text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
``` from mmcv import Config cfg = Config.fromfile('../configs/recognition/slowfast/slowfast_r50_video_4x16x1_256e_kinetics400_rgb.py') import mmcv mmcv.__version__ from mmcv.runner import set_random_seed # Modify dataset type and path cfg.dataset_type = 'VideoDataset' cfg.data_root = '/data2/phap/datasets/train/' cfg.data_root_val = '/data2/phap/datasets/val/' cfg.ann_file_train = '/data2/phap/datasets/label_train.txt' cfg.ann_file_val = '/data2/phap/datasets/label_val.txt' cfg.ann_file_test = '/data2/phap/datasets/label_val.txt' cfg.data.test.type = 'VideoDataset' cfg.data.test.ann_file = '/data2/phap/datasets/label_val.txt' cfg.data.test.data_prefix = '/data2/phap/datasets/val/' cfg.data.train.type = 'VideoDataset' cfg.data.train.ann_file = '/data2/phap/datasets/label_train.txt' cfg.data.train.data_prefix = '/data2/phap/datasets/train/' cfg.data.val.type = 'VideoDataset' cfg.data.val.ann_file = '/data2/phap/datasets/label_val.txt' cfg.data.val.data_prefix = '/data2/phap/datasets//val/' # Modify num classes of the model in cls_head cfg.model.cls_head.num_classes = 2 # We can use the pre-trained TSN model cfg.load_from = '../checkpoints/slowfast_r50_4x16x1_256e_kinetics400_rgb_20200704-bcde7ed7.pth' #cfg.load_from = None # Set up working dir to save files and logs. cfg.work_dir = '../work_dirs/violence_v2' # The original learning rate (LR) is set for 8-GPU training. # We divide it by 8 since we only use one GPU. cfg.data.video_per_gpu = 4 cfg.data.workers_per_gpu=1 #cfg.data.videos_per_gpu = cfg.data.videos_per_gpu // 8 #cfg.optimizer.lr = cfg.optimizer.lr / 8 / 16 cfg.total_epochs = 30 # We can set the checkpoint saving interval to reduce the storage cost cfg.checkpoint_config.interval = 10 # We can set the log print interval to reduce the the times of printing log #cfg.log_config.interval = 5 # Set seed thus the results are more reproducible cfg.seed = 0 set_random_seed(0, deterministic=True) cfg.gpu_ids = range(1) import os.path as osp from mmaction.datasets import build_dataset from mmaction.models import build_model from mmaction.apis import train_model import mmcv # Build the dataset datasets = [build_dataset(cfg.data.train)] # Build the recognizer model = build_model(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) # Create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) train_model(model, datasets, cfg, distributed=False, validate=True) from mmaction.apis import single_gpu_test from mmaction.datasets import build_dataloader from mmcv.parallel import MMDataParallel # Build a test dataloader dataset = build_dataset(cfg.data.test, dict(test_mode=True)) data_loader = build_dataloader( dataset, videos_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False) model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader) eval_config = cfg.evaluation eval_config.pop('interval') eval_res = dataset.evaluate(outputs, **eval_config) for name, val in eval_res.items(): print(f'{name}: {val:.04f}') ```
github_jupyter
# Experiments with Dense Vectors ``` import os import sys import numpy as np from numba import jit sys.path.append(os.path.abspath('../')) # needed to import from permutations sibling directory import permutations.dense_permutations as p # Generate some vectors a = p.get_random_vector(100) b = p.get_random_vector(100) c = p.get_random_vector(100) # Generate some predicates p_a = p.get_random_permutation(100) p_b = p.get_random_permutation(100) p_c = p.get_random_permutation(100) # In theory, this should give us the ability to deduce a permutation from two source vectors; untested def deduceperm(permuted, unpermuted): return np.argsort(np.argsort(permuted)[np.argsort(np.argsort(unpermuted))]) # Changes a permutation to be like another permutation, where howmuch is the number of dimensions the input perm # should have in common with tobelike. In theory it should never become disjoint with range(dimension), but I haven't # rigorously tested it. Assuming it's fed with a permutation derived from range(dimension), that is. def trainpermutation(perm, tobelike, howmuch): likeness = np.sum(perm == tobelike) perm1 = np.copy(perm) while likeness < howmuch: tochange = np.random.choice(np.where(perm1 != tobelike)[0]) perm1[np.where(perm1 == tobelike[tochange])] = perm1[tochange] perm1[tochange] = tobelike[tochange] likeness = np.sum(perm1 == tobelike) return perm1 def trainpermutation(perm, tobelike, howmuch): perm1 = np.copy(perm) for i in np.where(perm1 == p.get_random_permutation(100))[0]: #move away from noise idx = np.random.randint(0,perm1.shape[0]) val = perm1[i] perm1[i] = perm[idx] perm1[idx] = val canchange = np.clip(howmuch, 0, len(np.where(perm1 != tobelike)[0])) # how many spaces can we actually change? for i in np.argsort(np.abs(perm1 - tobelike))[::-1][:canchange]: # move towards the thing you want to be like perm1[np.where(perm1 == tobelike[i])] = perm1[i] perm1[i] = tobelike[i] return perm1 p_ta = trainpermutation(p.get_random_permutation(100), p_a, 50) # make a permutation with 50 dimensions in common with p_a print(np.setdiff1d(np.arange(100), p_ta)) # should be an empty list print(np.sum(p_ta == p_a)) # should be precisely ~50 print(p.cosine_similarity(a[p_a], a[p_b])) # basically random print(p.cosine_similarity(a[p_ta], a[p_a])) # same vector, more similar permutations, should be more similar print(p.cosine_similarity(a,b)) #should be identical to below permutation print(p.cosine_similarity(a[p_a], b[p_a])) # dissimilar vectors, same permutation, should be (identically to unpermuted) dissimilar np.sum(deduceperm(a[p_a], a) == p_a) # should be 100 (e.g. every dimension should match) ``` ## Training Things If we want to train, we should go for something like `sigmoid(vec[permutation].dot(vec2))` Ideally, we want the sigmoid to be very close to one for vectors in context, and far from one for negative samples. ``` def sigmoid(x): return np.exp(-np.logaddexp(0, -x)) ``` Let's make a toy dataset: ``` dataset = { 'orange': { 'isa': 'fruit', 'color': 'orange', 'colour': 'orange', 'shape': 'round', 'origin': 'plant', 'growson': 'tree' }, 'apple': { 'isa': 'fruit', 'colour':'red', 'color': 'red', 'shape': 'round', 'origin': 'plant', 'growson': 'tree' }, 'broccoli': { 'isa': 'vegetable', 'color': 'green', 'shape': 'floret', 'origin': 'plant', 'growson': 'bush' }, 'chicken': { 'isa': 'animal', 'makesnoise': 'cuccoo', 'has': 'feathers,', 'eats': 'grain', 'drinks': 'water' }, 'bicycle': { 'typeof':'transportation', 'origin':'man-made', 'color':'varies', 'has':'wheels', 'used-by':'humans' } } svecs = {x:p.get_random_vector(100) for x in dataset.keys()} #generate subject vectors preds = {x:p.get_random_permutation(100) for y in dataset.keys() for x in dataset[y].keys()} # generate permutation vectors cvecs = {x:p.get_random_vector(100) for y in dataset.keys() for x in dataset[y].values()} # generate context vectors alpha = 0.1 for i in range(100): # epochs alpha = alpha - 0.001 # monotonic decrease in alpha for subject in dataset.keys(): for predicate in dataset[subject]: context = dataset[subject][predicate] subjectvector = svecs[subject] svectorcopy = svecs[subject].copy() # copy for context to move to predicatevector = preds[predicate] contextvector = cvecs[dataset[subject][predicate]] cvectorcopy = cvecs[dataset[subject][predicate]].copy() #print(f"{subject}-{predicate}-{context} similarity prior to training: {p.cosine_similarity(p.permute_vector(predicatevector, subjectvector), contextvector):.2f}") shiftoward = 1-sigmoid(p.permute_vector(predicatevector, subjectvector).dot(contextvector)) shiftaway = -sigmoid(p.permute_vector(predicatevector, subjectvector).dot(p.get_random_vector(100))) # generate a random noise vector to move away from subjectvector += alpha*shiftoward*contextvector[p.inverse_permutation(predicatevector)] contextvector += alpha*shiftoward*p.permute_vector(predicatevector, svectorcopy) subjectvector -= alpha*shiftaway*contextvector[p.inverse_permutation(predicatevector)] contextvector -= alpha*shiftaway*p.permute_vector(predicatevector, svectorcopy) #predicatevector = trainpermutation(predicatevector, deduceperm(cvectorcopy, svectorcopy), 5) #print(f"{subject}-{predicate}-{context} similarity after training: {p.cosine_similarity(p.permute_vector(predicatevector, subjectvector), contextvector):.2f}") #normalize vectors at end of every epoch for vec in svecs: svecs[vec] = p.normalize(svecs[vec]) for vec in cvecs: cvecs[vec] = p.normalize(cvecs[vec]) #don't normalize predicate vectors as that would break things def real_similarity_search(query, vectorstore): similarities = [(i, p.cosine_similarity(vectorstore[i], vectorstore[query])) for i in vectorstore] similarities.sort(key=lambda x: x[1], reverse=True) for i in similarities[:5]: print(f"{i[0]+':':<10}\t{i[1]:.4f}") real_similarity_search('orange', svecs) #should be very similar to apples, a little similar to broccoli, and random to others real_similarity_search('orange', cvecs) #should be similar to other colors, especially red def permutation_similarity_search(query, permstore): similarities = [(i, np.sum(permstore[query] == permstore[i])) for i in permstore] similarities.sort(key=lambda x: x[1], reverse=True) for i in similarities[:5]: print(f"{i[0]+':':<10}\t{i[1]}") #May want to do something like below '''def permutation_similarity_search(query, permstore): comparevec = svecs['term that is probably related by query to svec'] similarities = [(i, p.cosine_similarity(comparevec[permstore[query]], comparevec[permstore[i]])) for i in permstore] similarities.sort(key=lambda x: x[1], reverse=True) for i in similarities[:5]: print(f"{i[0]+':':<10}\t{i[1]:.4f}")'''; permutation_similarity_search('color', preds) #if preds aren't trained, should recover cue term and be random to other things def bound_permutation_search(contextquery, subjectquery, contextstore, subjectstore, permstore): similarities = [(i, np.sum(deduceperm(contextstore[contextquery], subjectstore[subjectquery]) == permstore[i])) for i in permstore] similarities.sort(key=lambda x: x[1], reverse=True) for i in similarities[:5]: print(f"{i[0]+':':<10}\t{i[1]}") bound_permutation_search('orange','orange', cvecs, svecs, preds) # should return color print() bound_permutation_search('red','apple', cvecs, svecs, preds) # should return color bound_permutation_search('fruit','orange', cvecs, svecs, preds) # should return isa bound_permutation_search('wheels','bicycle', cvecs, svecs, preds) # should return has bound_permutation_search('fruit','bicycle', cvecs, svecs, preds) # should return random, especially without permutation training ``` Now let's train on a slightly larger toy data set derived from geonames: ``` # Load countryfacts file (derived from geonames) # Note that some countries reported as NONE for capitals may actually have capitals, I just didn't resolve them # Antarctica has no capital and no official currency with open('../countryfacts.txt','r') as infile: facts = [line.strip().split('\t') for line in infile] facts = np.asarray(facts) subjects = set([x[0] for x in facts]) predicates = set([x[1] for x in facts]) objects = set([x[2] for x in facts]) svecs = {x:p.get_random_vector(100) for x in subjects} #generate subject vectors preds = {x:p.get_random_permutation(100) for x in predicates} # generate permutation vectors cvecs = {x:p.get_random_vector(100) for x in objects} # generate context vectors ``` Let's train based on how the attributes are related to each subject in the dataset: ``` alpha = 0.1 for i in range(10): # epochs # Shuffle the order of the examples: np.random.shuffle(facts) alpha = alpha - 0.001 # monotonic decrease in alpha for fact in facts: subjectvector = svecs[fact[0]] svectorcopy = svecs[fact[0]].copy() # copy for context to move to predicatevector = preds[fact[1]] contextvector = cvecs[fact[2]] cvectorcopy = cvecs[fact[2]].copy() #print(f"{subject}-{predicate}-{context} similarity prior to training: {p.cosine_similarity(p.permute_vector(predicatevector, subjectvector), contextvector):.2f}") shiftoward = 1-sigmoid(p.permute_vector(predicatevector, subjectvector).dot(contextvector)) shiftaway = -sigmoid(p.permute_vector(predicatevector, subjectvector).dot(p.get_random_vector(100))) # generate a random noise vector to move away from subjectvector += alpha*shiftoward*contextvector[p.inverse_permutation(predicatevector)] contextvector += alpha*shiftoward*p.permute_vector(predicatevector, svectorcopy) subjectvector -= alpha*shiftaway*contextvector[p.inverse_permutation(predicatevector)] contextvector -= alpha*shiftaway*p.permute_vector(predicatevector, svectorcopy) #predicatevector = trainpermutation(predicatevector, deduceperm(contextvector, subjectvector), 75) #print(f"{subject}-{predicate}-{context} similarity after training: {p.cosine_similarity(p.permute_vector(predicatevector, subjectvector), contextvector):.2f}") #normalize vectors at end of every epoch #for vec in svecs: # svecs[vec] = p.normalize(svecs[vec]) #for vec in cvecs: # cvecs[vec] = p.normalize(cvecs[vec]) #don't normalize predicate vectors as that would break things ``` Let's do some querying to make sure that it worked: ``` real_similarity_search('United States', svecs) # should return other countries that are in NA and use "Dollar" as currency real_similarity_search('Dollar', cvecs) # should return other currencies used in NA / other currencies permutation_similarity_search('Continent', preds) #preds aren't trained, so should recover cue term and be random to other things bound_permutation_search('Dollar','United States', cvecs, svecs, preds) # should return currencyname print() bound_permutation_search('Peso','Mexico', cvecs, svecs, preds) # should return currencyname bound_permutation_search('San Salvador','El Salvador', cvecs, svecs, preds) # should return Capital print() bound_permutation_search('Mexico City','Mexico', cvecs, svecs, preds) print() bound_permutation_search('Washington','United States', cvecs, svecs, preds) print() bound_permutation_search('London', 'United Kingdom', cvecs, svecs, preds) ```
github_jupyter
## Step 6: partition the files to follow the conventions KGTK uses for Wikidata ``` import sys sys.path.insert(0, 'tutorial') from tutorial_setup import * from generate_report import run %cd {output_path} ``` We'll use the partition-wikidata notebook to complete this step. This notebook expects an input file that includes all edges and qualifiers together. We also need to specify a directory where partitioned files should be created, and a directory where temporary files can be sent (this should be different from our temp directory as the partition notebook will clear any existing files in this folder). ``` !mkdir -p $OUT/parts ``` Combine the main edges with the qualifiers ``` !$kgtk cat -i $OUT/all.tsv.gz -i $OUT/Q154.qualifiers.tsv.gz -o $TEMP/all_and_qualifiers.tsv.gz !zcat < $TEMP/all_and_qualifiers.tsv.gz | head pm.execute_notebook( os.environ["EXAMPLES_DIR"] + "/partition-wikidata.ipynb", os.environ["TEMP"] + "/partition-wikidata.out.ipynb", parameters=dict( wikidata_input_path = os.environ["TEMP"] + "/all_and_qualifiers.tsv.gz", wikidata_parts_path = os.environ["OUT"] + "/parts", temp_folder_path = os.environ["OUT"] + "/parts/temp", sort_extras = "--buffer-size 30% --temporary-directory $OUT/parts/temp", verbose = False ) ) ; ``` The partition-wikidata notebook created the following partitioned kgtk-files: ``` !ls $OUT/parts !$kypher -i $OUT/parts/claims.tsv.gz \ --match '(n1)-[]->()' \ --return 'count(distinct n1)' ``` ## Step 7 Run Useful files Notebook ``` pm.execute_notebook( os.environ["USECASE_DIR"] + "/Wikidata Useful Files.ipynb", os.environ["TEMP"] + "/Wikidata Useful Files Out.ipynb", parameters=dict( output_path = os.environ["OUT"], output_folder = "useful_files", temp_folder = "temp.useful_files", wiki_root_folder = os.environ["OUT"] + "/parts/", cache_path = os.environ["OUT"] + "/temp.useful_files", languages = 'en', compute_pagerank = True, delete_database = True ) ) ; ``` The useful files notebook created the following files ``` !ls -lh $OUT/useful_files ``` Look at the distribution of out degrees ``` pd.read_table(os.environ['OUT']+'/useful_files/statistics.out_degree.distribution.tsv') ``` ## Step 8 Run the Knowledge Graph Profiler ``` # the ; at the end suppresses the output of this cell which is a very large json object output of executing the profiler notebook pm.execute_notebook( os.environ["USECASE_DIR"] + "/Knowledge-Graph-Profiler.ipynb", "Knowledge-Graph-Profiler.out.ipynb", parameters=dict( wikidata_parts_folder = os.environ["OUT"] + "/parts", cache_folder = os.environ['TEMP'] + "/profiler_temp", output_folder = os.environ["OUT"] + "/profiler", compute_graph_statistics = "true" ) ) ; ``` [Knowledge Graph Profiler output](Knowledge-Graph-Profiler.out.ipynb) ### Generate a report on Profiler output ``` run(f'{os.environ["OUT"]}/profiler') ``` See the [profiler report](report.html) of the main classes and properties in our KG.
github_jupyter
# Tuesday Exams ### TYPE: Practical session ### DATE: 03/11/2020 Vamos a codificar un juego sencillo que consiste en adivinar una secuencia de colores. Estas son las reglas: 1. El código genera una secuencia de dos colores (Rojo y Verde) aleatoria 2. El usuario tiene que averiguarla 3. Si el usuario mete una secuencia que ya ha metido antes, tiene que recibir un aviso 4. El código debe de dar pistas al usuario sobre si la distribución de colores (número de rojos y número de verdes) coincide con el de la secuencia secreta y sobre el número de colores que ha acertado 5. Cuando el usuario acierte la secuencia secreta, el código debe imprimir el número de intentos Por ejemplo: ![image.png](attachment:image.png) ## En la última celda de este notebook tienes el código del Juego casi completo. Échale un vistazo antes de empezar para intentar entenderlo. El examen consiste en completar las funciones que usa el juego #### Ej1. Programa la función gen_secret. Esta función debe generar una secuencia secreta aleatoria de "R"s y "G"s con una longitud igual al valor de la variable **size** que se le pasa como argumento. Esta función recive la variable **size** como argumento y debe devolver la secuencia aleatoria (e.g. "RRGG") ``` def gen_secret(size) return secret ``` #### SOLUTION. ``` def gen_secret(size): options=["R","G"] pattern=[] for i in range(size): pattern.append(options[rnd.randint(0,1)]) secret="".join(pattern) return secret ``` #### Ej2. Programa la función input_play. Este método le pide al usuario que introduzca una secuencia STRING de Rojos y Verdes (e.g. "RRGG"). El código debe seguir preguntando al usuario en caso de que no introduzca el número de colores requerido o en caso de que utilize letras diferentes de "R" o "G". Esta función recibe la longitud de la secuencia en la variable **size** y debe devolver la secuencia de colores en formato STRING ``` def input_play(size): return sequence ``` #### SOLUTION. ``` def input_play(size): sequence=input("\nInput a sequence of R's and G's with length {}: ".format(size)) while (sequence.count("R")+sequence.count("G")!=size) or (len(sequence)!=size): print("The sequence can only contains G's and R's and must have length {}".format(size)) sequence=input("Guess a sequence of R's and G's with length {}: ".format(size)) return sequence sequence="RRRR" condicion_logitud=(len(sequence)!=4) print(condicion_logitud) condicion_letras=(sequence.count("R")+sequence.count("G")!=4) print(condicion_letras) condicion_letras or condicion_logitud print(condicion_letras or condicion_logitud) ``` #### Ej3. Programa el código de la función plot_play. Este método recibe una secuencia de colores en formato STRING (e.g. "RRGG") e imprimie esa secuencia con puntos de colores. Esta función no tiene que devolver nada, sólo imprimir los puntos. Para imprimir texto en color necesitamos usar la librería TERMCOLOR. Prueba este código en una celda: from termcolor import colored print(colored("●","red")) Si el IMPORT no reconoce la librería ejecuta este comando en otra celda para instalarla (sólo tienes que ejecutarlo una vez): ! pip install --user termcolor ``` def plot_play(play): ``` #### SOLUTION. ``` def plot_play(play): for color in play: if color=="R": print(colored("●","red"),end="") elif color=="G": print(colored("●","green"),end="") else: print("●",end="") print() ``` #### Ej4. Utiliza la función check_distribution. Os proporcionamos la función **check_distribution**. Este método recibe la secuencia de colores del usuario y la secuencia de colores secreta y chequea si el número de Rojos y de Verdes coincide entre ambas secuencias. La función debe devolver una variable BOOLEANA indicando si el número de colores es el mismo. For example: secret="RRGG" y guess="RGRG" debería devolver TRUE secret="RRGG" y guess="RGGG" debería devolver FALSE ``` def check_distribution(guess,secret): if guess.count("R")==secret.count("R") and guess.count("G")==secret.count("G"): return True else: return False ``` Para resolver este ejercicio tienes que invocar este método en el código del juego que está al final del notebook. Tienes que añadir las instrucciones necesarias para darle al usuario pistas respecto a la distribución de colores. La zona en la que tienes que añadir el código tiene esta pinta: ![image.png](attachment:image.png) #### Ej5. Programa la función check_positions. Esta función chequeará las posiciones de los colores de la secuencia del usuario y de la secuencia secreta. Si las posiciones y los colores coinciden, la función deberá devolver una variable BOOLEANA con valor TRUE, y si no coinciden, con valor FALSE. Por ejemplo: secret="RRGG" y guess="RRGG" debería devolver TRUE secret="RRGG" y guess="GGRR" debería devolver FALSE La función recibe como argumentos las dos secuencias y devuelve la variable booleana con el resultado de la comparación ``` def check_positions(guess,secret): return boolean ``` #### SOLUTION. ``` def check_positions(guess,secret): if guess==secret: return True else: return False ``` #### Ej6. Programa el código de la función check_coincidences. Este método debe contar el número de aciertos entre la secuencia de colores introducida por el usuario y la secuencia secreta. Por ejemplo: secret="RRGG" y guess="RRGG" debe devolver 4 secret="RRGG" y guess="GGGR" debe devolver 1 La función recibe como argumentos las dos secuencias y devuelve una variable entera con el número de aciertos ``` def check_coincidences(guess,secret): return number_of_coincidences ``` #### SOLUTION. ``` def check_coincidences(guess,secret): out=[] for i in range(len(guess)): if guess[i]==secret[i]: out.append(1) else: out.append(0) number_of_coincidences=sum(out) return number_of_coincidences ``` ## GAME CODE Este es el código del juego completo. Sólo falta el bloque correspondiente a la función **check_distribution**. El examen consiste en implementar las funciones del juego y invocar correctamente la función **check_distribution** en el espacio reservado para tener un juego funcional. ``` from termcolor import colored import random as rnd size=4 guesses=[] attempts=0 # Generates a secret print("Generate secret with length: {}".format(size)) print("......") secret=gen_secret(size) while True: # Ask the user for a guess guess=input_play(size) attempts=attempts+1 # Plot the sequence of colored points plot_play(guess) # Inform the user if that guess was already used if guess in guesses: print(" - Combination already used") else: guesses.append(guess) # Check if we found the solution if check_positions(guess, secret)==True: print("Congrats!!") break else: # ..................................................... # ..................................................... # ..............USE THIS SPACE TO CALL THE METHOD...... # ..............check_distribution(guess, secret)...... # ..............and give the user some feedback ....... # ..............about the guess ....................... # ..................................................... # ..................................................... # Check the number of coincidences print("Hits: {}".format(check_coincidences(guess, secret))) print("You needed {} attempts".format(attempts)) ``` #### SOLUTION. ``` size=4 guesses=[] attempts=0 # Generates a secret print("Generate secret with length: {}".format(size)) print("......") secret=gen_secret(size) while True: # Ask the user for a guess guess=input_play(size) attempts=attempts+1 # Plot the sequence of colored points plot_play(guess) # Inform the user if that guess was already used if guess in guesses: print(" - Combination already used") else: guesses.append(guess) # Check if we found the solution if check_positions(guess, secret)==True: print("Congrats!!") break else: # Check if we found the right R's and G's distribution if check_distribution(guess, secret)==True: print("Distribution: OK. ") else: print("Distribution: WRONG. ") # Check the number of coincidences print("Hits: {}".format(check_coincidences(guess, secret))) print("You needed {} attempts".format(attempts)) ```
github_jupyter
# Introduction # In Lessons 2 and 3, we treated forecasting as a simple regression problem with all of our features derived from a single input, the time index. We could easily create forecasts for any time in the future by just generating our desired trend and seasonal features. When we added lag features in Lesson 4, however, the nature of the problem changed. Lag features require that the lagged target value is known at the time being forecast. A lag 1 feature shifts the time series forward 1 step, which means you could forecast 1 step into the future but not 2 steps. In Lesson 4, we just assumed that we could always generate lags up to the period we wanted to forecast (every prediction was for just one step forward, in other words). Real-world forecasting typically demands more than this, so in this lesson we'll learn how to make forecasts for a variety of situations. # Defining the Forecasting Task # There are two things to establish before designing a forecasting model: - what information is available at the time a forecast is made (features), and, - the time period during which you require forecasted values (target). The **forecast origin** is time at which you are making a forecast. Practically, you might consider the forecast origin to be the last time for which you have training data for the time being predicted. Everything up to he origin can be used to create features. The **forecast horizon** is the time for which you are making a forecast. We often describe a forecast by the number of time steps in its horizon: a "1-step" forecast or "5-step" forecast, say. The forecast horizon describes the target. <figure style="padding: 1em;"> <img src="https://i.imgur.com/xwEgcOk.png" width=500, alt=""> <figcaption style="textalign: center; font-style: italic"><center>A three-step forecast horizon with a two-step lead time, using four lag features. The figure represents what would be a single row of training data -- data for a single prediction, in other words. </center></figcaption> </figure> The time between the origin and the horizon is the **lead time** (or sometimes *latency*) of the forecast. A forecast's lead time is described by the number of steps from origin to horizon: a "1-step ahead" or "3-step ahead" forecast, say. In practice, it may be necessary for a forecast to begin multiple steps ahead of the origin because of delays in data acquisition or processing. # Preparing Data for Forecasting # In order to forecast time series with ML algorithms, we need to transform the series into a dataframe we can use with those algorithms. (Unless, of course, you are only using deterministic features like trend and seasonality.) We saw the first half of this process in Lesson 4 when we created a feature set out of lags. The second half is preparing the target. How we do this depends on the forecasting task. Each row in a dataframe represents a single forecast. The time index of the row is the first time in the forecast horizon, but we arrange values for the entire horizon in the same row. For multistep forecasts, this means we are requiring a model to produce multiple outputs, one for each step. ``` import numpy as np import pandas as pd N = 20 ts = pd.Series( np.arange(N), index=pd.period_range(start='2010', freq='A', periods=N, name='Year'), dtype=pd.Int8Dtype, ) # Lag features X = pd.DataFrame({ 'y_lag_2': ts.shift(2), 'y_lag_3': ts.shift(3), 'y_lag_4': ts.shift(4), 'y_lag_5': ts.shift(5), 'y_lag_6': ts.shift(6), }) # Multistep targets y = pd.DataFrame({ 'y_step_3': ts.shift(-2), 'y_step_2': ts.shift(-1), 'y_step_1': ts, }) data = pd.concat({'Targets': y, 'Features': X}, axis=1) data.head(10).style.set_properties(['Targets'], **{'background-color': 'LavenderBlush'}) \ .set_properties(['Features'], **{'background-color': 'Lavender'}) ``` The above illustrates how a dataset would be prepared similar to the *Defining a Forecast* figure: a three-step forecasting task with a two-step lead time using five lag features. The original time series is `y_step_1`. The missing values we could either fill-in or drop. # Multistep Forecasting Strategies # There are a number of strategies for producing the multiple target steps required for a forecast. We'll outline four common strategies, each with strengths and weaknesses. ### Multioutput model Use a model that produces multiple outputs naturally. Linear regression and neural networks can both produce multiple outputs. This strategy is simple and efficient, but not possible for every algorithm you might want to use. XGBoost can't do this, for instance. <figure style="padding: 1em;"> <img src="https://i.imgur.com/uFsHiqr.png" width=300, alt=""> <figcaption style="textalign: center; font-style: italic"><center> </center></figcaption> </figure> ### Direct strategy Train a separate model for each step in the horizon: one model forecasts 1-step ahead, another 2-steps ahead, and so on. Forecasting 1-step ahead is a different problem than 2-steps ahead (and so on), so it can help to have a different model make forecasts for each step. The downside is that training lots of models can be computationally expensive. <figure style="padding: 1em;"> <img src="https://i.imgur.com/HkolNMV.png" width=900, alt=""> <figcaption style="textalign: center; font-style: italic"><center> </center></figcaption> </figure> ### Recursive strategy Train a single one-step model and use its forecasts to update the lag features for the next step. With the recursive method, we feed a model's 1-step forecast back in to that same model to use as a lag feature for the next forecasting step. We only need to train one model, but since errors will propagate from step to step, forecasts can be inaccurate for long horizons. <figure style="padding: 1em;"> <img src="https://i.imgur.com/sqkSFDn.png" width=300, alt=""> <figcaption style="textalign: center; font-style: italic"><center> </center></figcaption> </figure> ### DirRec strategy A combination of the direct and recursive strategies: train a model for each step and use forecasts from previous steps as *new* lag features. Step by step, each model gets an additional lag input. Since each model always has an up-to-date set of lag features, the DirRec strategy can capture serial dependence better than Direct, but it can also suffer from error propagation like Recursive. <figure style="padding: 1em;"> <img src="https://i.imgur.com/B7KAvAO.png" width=900, alt=""> <figcaption style="textalign: center; font-style: italic"><center> </center></figcaption> </figure> # Example - Flu Trends # In this example we'll apply the MultiOutput and Direct strategies to the *Flu Trends* data from Lesson 4, this time making true forecasts for multiple weeks beyond the training period. We'll define our forecasting task to have an 8-week horizon with a 1-week lead time. In other words, we'll be forecasting eight weeks of flu cases starting with the following week. The hidden cell sets up the example and defines a helper function `plot_multistep`. ``` from pathlib import Path from warnings import simplefilter import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor simplefilter("ignore") # Set Matplotlib defaults plt.style.use("seaborn-whitegrid") plt.rc("figure", autolayout=True, figsize=(11, 4)) plt.rc( "axes", labelweight="bold", labelsize="large", titleweight="bold", titlesize=16, titlepad=10, ) plot_params = dict( color="0.75", style=".-", markeredgecolor="0.25", markerfacecolor="0.25", ) %config InlineBackend.figure_format = 'retina' def plot_multistep(y, every=1, ax=None, palette_kwargs=None): palette_kwargs_ = dict(palette='husl', n_colors=16, desat=None) if palette_kwargs is not None: palette_kwargs_.update(palette_kwargs) palette = sns.color_palette(**palette_kwargs_) if ax is None: fig, ax = plt.subplots() ax.set_prop_cycle(plt.cycler('color', palette)) for date, preds in y[::every].iterrows(): preds.index = pd.period_range(start=date, periods=len(preds)) preds.plot(ax=ax) return ax data_dir = Path("../input/ts-course-data") flu_trends = pd.read_csv(data_dir / "flu-trends.csv") flu_trends.set_index( pd.PeriodIndex(flu_trends.Week, freq="W"), inplace=True, ) flu_trends.drop("Week", axis=1, inplace=True) ``` First we'll prepare our target series (weekly office visits for the flu) for multistep forecasting. Once this is done, training and prediction will be very straightfoward. ``` def make_lags(ts, lags, lead_time=1): return pd.concat( { f'y_lag_{i}': ts.shift(i) for i in range(lead_time, lags + lead_time) }, axis=1) # Four weeks of lag features y = flu_trends.FluVisits.copy() X = make_lags(y, lags=4).fillna(0.0) def make_multistep_target(ts, steps): return pd.concat( {f'y_step_{i + 1}': ts.shift(-i) for i in range(steps)}, axis=1) # Eight-week forecast y = make_multistep_target(y, steps=8).dropna() # Shifting has created indexes that don't match. Only keep times for # which we have both targets and features. y, X = y.align(X, join='inner', axis=0) ``` ### Multioutput model We'll use linear regression as a MultiOutput strategy. Once we have our data prepared for multiple outputs, training and prediction is the same as always. ``` # Create splits X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, shuffle=False) model = LinearRegression() model.fit(X_train, y_train) y_fit = pd.DataFrame(model.predict(X_train), index=X_train.index, columns=y.columns) y_pred = pd.DataFrame(model.predict(X_test), index=X_test.index, columns=y.columns) ``` Remember that a multistep model will produce a complete forecast for each instance used as input. There are 269 weeks in the training set and 90 weeks in the test set, and we now have an 8-step forecast for each of these weeks. ``` train_rmse = mean_squared_error(y_train, y_fit, squared=False) test_rmse = mean_squared_error(y_test, y_pred, squared=False) print((f"Train RMSE: {train_rmse:.2f}\n" f"Test RMSE: {test_rmse:.2f}")) palette = dict(palette='husl', n_colors=64) fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(11, 6)) ax1 = flu_trends.FluVisits[y_fit.index].plot(**plot_params, ax=ax1) ax1 = plot_multistep(y_fit, ax=ax1, palette_kwargs=palette) _ = ax1.legend(['FluVisits (train)', 'Forecast']) ax2 = flu_trends.FluVisits[y_pred.index].plot(**plot_params, ax=ax2) ax2 = plot_multistep(y_pred, ax=ax2, palette_kwargs=palette) _ = ax2.legend(['FluVisits (test)', 'Forecast']) ``` ### Direct strategy XGBoost can't produce multiple outputs for regression tasks. But by applying the Direct reduction strategy, we can still use it to produce multi-step forecasts. This is as easy as wrapping it with scikit-learn's `MultiOutputRegressor`. ``` from sklearn.multioutput import MultiOutputRegressor model = MultiOutputRegressor(XGBRegressor()) model.fit(X_train, y_train) y_fit = pd.DataFrame(model.predict(X_train), index=X_train.index, columns=y.columns) y_pred = pd.DataFrame(model.predict(X_test), index=X_test.index, columns=y.columns) ``` XGBoost here is clearly overfitting on the training set. But on the test set it seems it was able to capture some of the dynamics of the flu season better than the linear regression model. It would likely do even better with some hyperparameter tuning. ``` train_rmse = mean_squared_error(y_train, y_fit, squared=False) test_rmse = mean_squared_error(y_test, y_pred, squared=False) print((f"Train RMSE: {train_rmse:.2f}\n" f"Test RMSE: {test_rmse:.2f}")) palette = dict(palette='husl', n_colors=64) fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(11, 6)) ax1 = flu_trends.FluVisits[y_fit.index].plot(**plot_params, ax=ax1) ax1 = plot_multistep(y_fit, ax=ax1, palette_kwargs=palette) _ = ax1.legend(['FluVisits (train)', 'Forecast']) ax2 = flu_trends.FluVisits[y_pred.index].plot(**plot_params, ax=ax2) ax2 = plot_multistep(y_pred, ax=ax2, palette_kwargs=palette) _ = ax2.legend(['FluVisits (test)', 'Forecast']) ``` To use the DirRec strategy, you would only need to replace `MultiOutputRegressor` with another scikit-learn wrapper, `RegressorChain`. The Recursive strategy we would need to code ourselves. # Your Turn # [**Create a forecasting dataset**](https://www.kaggle.com/kernels/fork/20667477) for *Store Sales* and apply the DirRec strategy. --- *Have questions or comments? Visit the [course discussion forum](https://www.kaggle.com/learn/time-series/discussion) to chat with other learners.*
github_jupyter
# Numpy Tutorial Numpy is a computational library for Python that is optimized for operations on multi-dimensional arrays. In this notebook we will use numpy to work with 1-d arrays (often called vectors) and 2-d arrays (often called matrices). For a the full user guide and reference for numpy see: http://docs.scipy.org/doc/numpy/ ``` import numpy as np # importing this way allows us to refer to numpy as np ``` # Creating Numpy Arrays New arrays can be made in several ways. We can take an existing list and convert it to a numpy array: ``` mylist = [1., 2., 3., 4.] mynparray = np.array(mylist) mynparray ``` You can initialize an array (of any dimension) of all ones or all zeroes with the ones() and zeros() functions: ``` one_vector = np.ones(4) print one_vector # using print removes the array() portion one2Darray = np.ones((2, 4)) # an 2D array with 2 "rows" and 4 "columns" print one2Darray zero_vector = np.zeros(4) print zero_vector ``` You can also initialize an empty array which will be filled with values. This is the fastest way to initialize a fixed-size numpy array however you must ensure that you replace all of the values. ``` empty_vector = np.empty(5) print empty_vector ``` # Accessing array elements Accessing an array is straight forward. For vectors you access the index by referring to it inside square brackets. Recall that indices in Python start with 0. ``` mynparray[2] ``` 2D arrays are accessed similarly by referring to the row and column index separated by a comma: ``` my_matrix = np.array([[1, 2, 3], [4, 5, 6]]) print my_matrix print my_matrix[1, 2] ``` Sequences of indices can be accessed using ':' for example ``` print my_matrix[:, 2] # recall 0:2 = [0, 1] print my_matrix[0, :] ``` You can also pass a list of indices. ``` fib_indices = np.array([1, 1, 2, 3]) random_vector = np.random.random(10) # 10 random numbers between 0 and 1 print random_vector print random_vector[fib_indices] ``` You can also use true/false values to select values ``` my_vector = np.array([1, 2, 3, 4]) select_index = np.array([True, False, True, False]) print my_vector[select_index] ``` For 2D arrays you can select specific columns and specific rows. Passing ':' selects all rows/columns ``` select_cols = np.array([True, False, True]) # 1st and 3rd column select_rows = np.array([False, True]) # 2nd row print my_matrix[select_rows, :] # just 2nd row but all columns print my_matrix[:, select_cols] # all rows and just the 1st and 3rd column ``` # Operations on Arrays You can use the operations '\*', '\*\*', '\\', '+' and '-' on numpy arrays and they operate elementwise. ``` my_array = np.array([1., 2., 3., 4.]) print my_array*my_array print my_array**2 print my_array - np.ones(4) print my_array + np.ones(4) print my_array / 3 print my_array / np.array([2., 3., 4., 5.]) # = [1.0/2.0, 2.0/3.0, 3.0/4.0, 4.0/5.0] ``` You can compute the sum with np.sum() and the average with np.average() ``` print np.sum(my_array) print np.average(my_array) print np.sum(my_array)/len(my_array) ``` # The dot product An important mathematical operation in linear algebra is the dot product. When we compute the dot product between two vectors we are simply multiplying them elementwise and adding them up. In numpy you can do this with np.dot() ``` array1 = np.array([1., 2., 3., 4.]) array2 = np.array([2., 3., 4., 5.]) print np.dot(array1, array2) print np.sum(array1*array2) ``` Recall that the Euclidean length (or magnitude) of a vector is the squareroot of the sum of the squares of the components. This is just the squareroot of the dot product of the vector with itself: ``` array1_mag = np.sqrt(np.dot(array1, array1)) print array1_mag print np.sqrt(np.sum(array1*array1)) ``` We can also use the dot product when we have a 2D array (or matrix). When you have an vector with the same number of elements as the matrix (2D array) has columns you can right-multiply the matrix by the vector to get another vector with the same number of elements as the matrix has rows. For example this is how you compute the predicted values given a matrix of features and an array of weights. ``` my_features = np.array([[1., 2.], [3., 4.], [5., 6.], [7., 8.]]) print my_features my_weights = np.array([0.4, 0.5]) print my_weights my_predictions = np.dot(my_features, my_weights) # note that the weights are on the right print my_predictions # which has 4 elements since my_features has 4 rows ``` Similarly if you have a vector with the same number of elements as the matrix has *rows* you can left multiply them. ``` my_matrix = my_features my_array = np.array([0.3, 0.4, 0.5, 0.6]) print np.dot(my_array, my_matrix) # which has 2 elements because my_matrix has 2 columns ``` # Multiplying Matrices If we have two 2D arrays (matrices) matrix_1 and matrix_2 where the number of columns of matrix_1 is the same as the number of rows of matrix_2 then we can use np.dot() to perform matrix multiplication. ``` matrix_1 = np.array([[1., 2., 3.],[4., 5., 6.]]) print matrix_1 matrix_2 = np.array([[1., 2.], [3., 4.], [5., 6.]]) print matrix_2 print np.dot(matrix_1, matrix_2) ``` # Good Luck !!!
github_jupyter
``` import spacy import re import textstat import numpy as np import pandas as pd import sklearn.ensemble import seaborn as sns import matplotlib.pyplot as plt import sklearn.metrics from sklearn.metrics import ConfusionMatrixDisplay from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from nltk import WhitespaceTokenizer from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer from scipy import sparse ``` # Loading journal text entries ``` users = pd.read_csv('../data/users.csv') journals = pd.read_csv('../data/journals.csv') # Dropping NAN entries in sentiment/journal/date journals_sub = journals.dropna(subset=['sentiment', 'journal', 'date']) journals_sub['sentiment_level'] = journals_sub[[ 'sentiment' ]].apply(lambda x: pd.cut(x, [-np.inf, -.5, .5, np.inf], labels=['negative', 'neutral', 'positive'])) ``` # Feature Engineering ``` journals_sub['date'] = pd.to_datetime(journals_sub['date']) journals_sub['year'] = pd.Series(journals_sub['date'].dt.year, dtype='category') journals_sub['day'] = journals_sub['date'].dt.day_name() journals_sub['month'] = journals_sub['date'].dt.month_name() journals_sub['month_trans'] = np.cos( (journals_sub['date'].dt.month - 6.0) / 6.0) journals_sub['timeofday'] = journals_sub['date'].dt.hour journals_sub['timeofday_trans'] = np.cos( (journals_sub['date'].dt.hour - 12.0) / 12.0) plt.figure(figsize=(15, 8)) journals_sub.hist(figsize=(20, 10), ylabelsize=15, xlabelsize=15, xrot=20, bins=20, edgecolor='white') ``` To observe if there are any big fluctuations in sentiment levels across the years so that we can make reasonable assumptions about the distribution of the data. ``` sns.countplot(x='year', hue='sentiment_level', data=journals_sub, orient='horizontal', edgecolor='white') ``` # Time of day effect on mood This shows that numerous negative/positive entries are usually made at later/earlier hours in the day whereas neutral entries are made during mid-day. However as seen in the chart above the number of observations are also skewed between the different sentiment bins, which might indicate that "most entries" (and not sentiment-based associations) are made at earlier/later hours in the day rather than mid-day. ``` plt.figure(figsize=(10, 8)) # plt.subplot(2,2,1) ax = sns.boxplot(x="sentiment_level", y='timeofday_trans', data=journals_sub) ax.set_xticklabels(ax.get_xticklabels(), rotation=20) # plt.subplot(2,2,3) # plt.scatter(x=np.arange(24), y= np.cos((np.arange(24)-12)/12)) # plt.axhline(y=0.75) ``` # Seasonal effect Do Seasons influence mood predictions? Especially in the context of seasonal affective disorders ## Monthly fluctuations This is an attempt to see if there is any seasonality effect on mood. A classic example is Seasonal Affective Disorders which might indicate more negative sentiment entries during Winter Season. When binning by month and using "continuous" sentiment scores, there is a "dip" around January and "peak" around May, which is an interesting observation. When using "continuous" month transformed variable (cosine-transformation) and binning by sentiment scores (neutral/positive/negative) this trend is less obvious. ``` plt.figure(figsize=(15, 15)) plt.subplot(3, 1, 1) ax = sns.boxplot(x="sentiment_level", y='month_trans', data=journals_sub) ax.set_xticklabels(ax.get_xticklabels(), rotation=20) plt.subplot(3, 1, 2) ax = sns.boxplot(y="sentiment", x='month', data=journals_sub) ax.set_xticklabels(ax.get_xticklabels(), rotation=20) plt.subplot(3, 1, 3) plt.scatter(x=np.arange(12), y=np.cos((np.arange(12) - 6) / 6)) ``` ## Seasonal fluctuations ``` Winter = ['December', 'January', 'February'] Spring = ['March', 'April', 'May'] Summer = ['June', 'July', 'August'] Fall = ['September', 'October', 'November'] seasons = dict( zip(Winter + Spring + Summer + Fall, ['Winter'] * 3 + ['Spring'] * 3 + ['Summer'] * 3 + ['Fall'] * 3)) journals_sub['season'] = journals_sub['month'].map(seasons) seasons_color = dict( zip(Winter + Spring + Summer + Fall, ['skyblue'] * 3 + ['lightgreen'] * 3 + ['#FFD43B'] * 3 + ['Red'] * 3)) ``` Winter seems to have the lowest mood predictions; though the difference compared to other seasons seems not too big of an effect. This impact may be masked by the clumping of months together and seems more evident in the monthly binning of sentiment scores above. ``` plt.figure(figsize=(15, 8)) ax = sns.boxplot(y="sentiment", x='season', data=journals_sub, palette={ 'Winter': 'skyblue', 'Fall': 'Red', 'Spring': 'lightgreen', 'Summer': '#FFD43B' }) ax.set_xticklabels(ax.get_xticklabels()) ``` # Content-based features ## Personal and Non-personal pronouns This is to test the hypothesis that usage of pronouns might differ depending on mood where people may talk about themselves more often or less compared to other people depending on their mood/personality. ``` def get_pprn(doc): inner_pprn = ('i', 'me', 'my', 'mine', 'myself') outer_pprn = ('we', 'you', 'your', 'yourself', 'yours', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her', 'hers', 'herself', 'they', 'them', 'their', 'theirs', 'themselves') inner_count = 0 outer_count = 0 for token in doc: if token.text in inner_pprn: inner_count += 1 elif token.text in outer_pprn: outer_count += 1 return inner_count, outer_count ``` These is a lot of variation in the length of the journal entry where longer journal entries are observed less often than shorter journal entries ``` journals_sub['word_len'] = journals_sub['journal'].apply(lambda x: len(x.split(' '))) journals_sub['word_len'].plot(kind='hist', logy=True, edgecolor='white') # print(sum(journals_sub['word_len']>50)) plt.xlabel('# Words in Entry') nlp = spacy.load("en_core_web_sm") i_count_all = [] o_count_all = [] for entry in journals_sub['journal']: doc = nlp(entry.lower()) i_count, o_count = get_pprn(doc) i_count_all.append(i_count) o_count_all.append(o_count) journals_sub['inner_pprn'] = np.array(i_count_all) journals_sub['outer_pprn'] = np.array(o_count_all) journals_sub[ 'sum_pprn'] = journals_sub['outer_pprn'] + journals_sub['inner_pprn'] journals_sub[ 'inner_frac'] = journals_sub['inner_pprn'] / journals_sub['sum_pprn'] journals_sub['inner_frac'] = journals_sub['inner_frac'].fillna(0) journals_sub[ 'outer_frac'] = journals_sub['outer_pprn'] / journals_sub['sum_pprn'] journals_sub['outer_frac'] = journals_sub['outer_frac'].fillna(0) journals_sub['pprn_ratio'] = np.array(i_count_all) / (np.array(o_count_all) + 1) ``` This figure shows that there is an association between sentiment, total pronouns used and the ratio of pronouns used to describe self vs. pronouns used to describe others (PPRN/Personal Pronoun Ratio Numeric). You can see this teased apart using the measure (Number of I,Me used/Total personal pronouns) where positive sentiments are seen more when this fraction is higher i.e. when people use alot of personal pronouns and when people talk about themselves more the sentiment is more positive. ``` sns.boxplot(x='sentiment_level', y='inner_frac', data=journals_sub[journals_sub['sum_pprn'] > 10]) plt.ylabel('(Number of I,Me used/Total personal pronouns)') plt.xlabel('') ``` This effect is also there when looking at ratio of pronouns used to describe self vs. pronouns used to describe others (PPRN/Personal Pronoun Ratio Numeric). When we threshold the journal entries that use at least 10 personal pronouns and bin sentiment scores, the median PPRN is higher for positive journal entries ``` sns.boxplot(x='sentiment_level', y='pprn_ratio', data=journals_sub[journals_sub['sum_pprn'] > 10]) plt.ylabel('Personal pronouns ratio (I,me vs. She/He/Them)') plt.xlabel('') stop_words = set(stopwords.words('english')) filtered_sentences = journals_sub['journal'].apply( lambda x: ' '.join([i for i in WhitespaceTokenizer().tokenize(x)])) corpus = journals_sub['journal'] vectorizer = TfidfVectorizer(stop_words=stopwords.words('english'), token_pattern=r'(?u)\b[A-Za-z]+\b') mat = vectorizer.fit_transform(corpus) ``` ## Readability scores This effect of sentiment can also be observed using polysyllable count where people with positive sentiment scores tend to use more difficult/long polysyllable count words compared to those with negative sentiments. ``` journals_sub['polysyllable_count'] = journals_sub['journal'].apply( lambda x: textstat.textstat.polysyllabcount(x)) journals_sub['difficult_words'] = journals_sub['journal'].apply( lambda x: textstat.textstat.difficult_words(x)) journals_sub['cl_index'] = journals_sub['journal'].apply( lambda x: textstat.textstat.coleman_liau_index(x)) sns.boxplot(x='sentiment_level', y=np.sqrt(journals_sub[journals_sub.sum_pprn>20].polysyllable_count), data=journals_sub) plt.ylabel('Polysyllable count') plt.xlabel('') ``` ## Users-frequency of posting There is also user behavior element in predictions where some users have logged more entries than others. For this analysis we dont have access to how long the user has been using the app which may be a contributing factor for this. Here we try to see what the median sentiment score looks like for users that log more journal entries than others. ``` journals_sub['user'] = journals_sub['path'].apply( lambda x: re.sub('"', '', x.split(',')[1].strip())) # Number of journal entries varies per user plt.figure(figsize=(10, 8)) plt.hist(journals_sub['user'].value_counts(), bins=20, edgecolor='white') plt.yscale('log') plt.xlabel('# Journal Entries') plt.ylabel('# Users') ``` As seen, the higher the number of entries by a user, the higher the median sentiment score of these journal entries. This can be due to (i) 'happier' users tend to log more entries/use the app more, (ii) users tend to be happier the longer they use the app, or (iii) sentiment scores are unreliable and these associations dont show any meaningful signals. ``` # Variance in sentiment scores may also be associated with # entries by a user plt.hexbin(y=journals_sub.groupby('user').median()['sentiment'], x=journals_sub['user'].value_counts(), gridsize=20, cmap='Blues', bins='log') plt.xlabel('# Entries by user') plt.ylabel('Median(Sentiment)') plt.colorbar() ```
github_jupyter
``` #hide #all_slow ``` # Inference of ensemble models > Inference with all the models trained. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/marcomatteo/steel_segmentation/blob/master/dev_nbs/21_ensemble_unet_fpn_resnet34.ipynb) ``` #hide %load_ext autoreload %autoreload 2 %matplotlib inline from steel_segmentation.all import * from fastai.vision.all import * import torch import torch.nn as nn import torch.nn.functional as F import segmentation_models_pytorch as smp import warnings import random import os import cv2 import pandas as pd import numpy as np from tqdm import tqdm from matplotlib import pyplot as plt seed_everything() torch.device("cuda:0").type torch.cuda.is_available() torch.cuda.empty_cache() !nvidia-smi print_competition_data(models_dir) device = torch.device("cuda") # device = torch.device("cpu") # testset = get_test_dls(batch_size=2) testset = get_test_dls(root=train_path, df=train_multi, batch_size=4) name, x = next(iter(testset)) x.shape ``` ## UNET model ``` dls = get_segmentation_dls(4, (256, 1600), with_btfms=False) unet_trainer = unet_learner(dls=dls, arch=resnet34, metrics=seg_metrics, pretrained=True) unet_trainer.model_dir = models_dir unet_trainer = unet_trainer.load("fastai-UNET-ResNet34-256-stage5") unet_model = unet_trainer.model unet_model.to(device) unet_model = unet_model.eval() unet_preds = torch.sigmoid(unet_model(x.to(device))) unet_preds.shape unet_preds[:, 1:].shape ``` ## FPN model ``` fpn_model = smp.FPN("resnet34", encoder_weights='imagenet', classes=4, activation=None) loaded_dict = torch.load(models_dir/"kaggle-FPN-ResNet34.pth") fpn_model.load_state_dict(loaded_dict["state_dict"], strict=True) fpn_model.to(device) fpn_model = fpn_model.eval() fpn_preds = torch.sigmoid(fpn_model(x.to(device))) fpn_preds.shape ``` ## Ensemble As an ensemble we build a specific `nn.Module` class to get the predictions. ``` class Ensemble(nn.Module): def __init__(self, models): super(Ensemble, self).__init__() self.models = models def forward(self, x): preds = [model(x.clone()) for model in self.models] probs = map(f.sigmoid, preds) return torch.cat(list(probs), axis=1).mean(axis=1) models = [fpn_model, unet_model] # not working because unet is 5 classes and fpn 4 classes Ensemble() ``` ## Inference ``` def post_process(probability, threshold, min_size): """ Post processing of each predicted mask, components with lesser number of pixels than `min_size` are ignored """ mask = cv2.threshold(probability, threshold, 1, cv2.THRESH_BINARY)[1] num_component, component = cv2.connectedComponents(mask.astype(np.uint8)) predictions = np.zeros((256, 1600), np.float32) num = 0 for c in range(1, num_component): p = (component == c) if p.sum() > min_size: predictions[p] = 1 num += 1 return predictions, num best_threshold = 0.5 min_size = 3000 min_sizes = [3000, 3000, 3000, 3000] # start prediction on validation set predictions = [] for i, batch in enumerate(tqdm(testset)): fnames, images = batch images = images.to(device) # FPN fpn_preds = torch.sigmoid(fpn_model(images)) fpn_preds = fpn_preds.detach().cpu().numpy() # UNET unet_preds = torch.sigmoid(unet_model(images)) unet_preds = unet_preds[:, 1:].detach().cpu().numpy() batch_preds = (fpn_preds + unet_preds) / 2 for fname, preds in zip(fnames, batch_preds): for cls, pred in enumerate(preds): min_size = min_sizes[cls] pred, num = post_process(pred, best_threshold, min_size) rle = mask2rle(pred) name = fname + f"_{cls+1}" predictions.append([name, rle]) # save predictions to submission.csv df = pd.DataFrame(predictions, columns=['ImageId_ClassId', 'EncodedPixels']) df.to_csv(sub_path/"ensemble_validation.csv", index=False) # start prediction on test set predictions = [] for i, batch in enumerate(tqdm(testset)): fnames, images = batch images = images.to(device) # FPN fpn_preds = torch.sigmoid(fpn_model(images)) fpn_preds = fpn_preds.detach().cpu().numpy() # UNET unet_preds = torch.sigmoid(unet_model(images)) unet_preds = unet_preds[:, 1:].detach().cpu().numpy() batch_preds = (fpn_preds + unet_preds) / 2 for fname, preds in zip(fnames, batch_preds): for cls, pred in enumerate(preds): min_size = min_sizes[cls] pred, num = post_process(pred, best_threshold, min_size) rle = mask2rle(pred) name = fname + f"_{cls+1}" predictions.append([name, rle]) # save predictions to submission.csv df = pd.DataFrame(predictions, columns=['ImageId_ClassId', 'EncodedPixels']) df.to_csv(sub_path/"ensemble_submission.csv", index=False) df.head() ``` ## Refrences
github_jupyter
# Chapter 7 - Data Exploration and Summary Statistics ``` import swat conn = swat.CAS('server-name.mycompany.com', 5570, 'username', 'password') conn organics = conn.CASTable('ORGANICS') organics = conn.CASTable('ORGANICS') ``` ## Summarizing Continuous Variables ``` conn.caslibinfo() conn.tableinfo() ``` ### Summary ``` organics.summary() ``` Specify input variables ``` varlist = ['DemAge', 'Purchase_12mon', 'Purchase_6mon'] organics.summary(inputs=varlist) varlist = ['Purchase_3mon', 'Purchase_6mon', 'Purchase_9mon', 'Purchase_12mon'] result = organics.summary(inputs=varlist) print(result.performance.cpu_system_time) print(result.performance.cpu_user_time) list(result.keys()) df = result['Summary'] df.columns ``` Interfact with result table using stack() ``` df.index = df['Column'] stackedDf = df[['Min','Mean','Max']].stack() print(stackedDf) ``` Create bar chart ``` from bokeh.charts import Bar, output_file, output_notebook, show p = Bar(df, 'Column', values='Mean', color='#1f77b4', agg='mean', title='', xlabel='', ylabel='Frequency') output_notebook() #output_file('visual1.html') show(p) ``` Group By ``` organics.groupby = 'DemGender' result = organics.summary(inputs='DemAge') list(result.keys()) result['ByGroupInfo'] ``` Concatenate result tables from different by groups ``` result2 = result.concat_bygroups() result2['Summary'][['N','Min','Mean','Max','Std']] varlist = 'DemAge' organics.groupby = ['DemGender','DemHomeowner'] result = organics.summary(inputs=varlist).concat_bygroups() result['Summary'][['N','Min','Mean','Max','Std']] result['Summary'].index ``` Remove the group by parameter ``` organics.del_param('groupby') organics.summary() ``` ### Histograms ``` result = organics.histogram( reqpacks=[{'nicebinning':False, 'nbins':10}], inputs=['Purchase_3mon'] ) result['BinDetails'] p = Bar(result['BinDetails'], 'MidPoint', values='Percent', color='#1f77b4', agg='mean', title='', legend=None, xlabel = 'Purchase_3mon', ylabel = 'Percent' ) output_notebook() #output_file('bar.html') show(p) ``` nicebining = True ``` result = organics.histogram( reqpacks=[{'nicebinning':True, 'nbins':10}], inputs=['Purchase_3mon'] ) p = Bar(result['BinDetails'], 'MidPoint', values='Percent', color='#1f77b4', agg='mean', title='', legend=None, xlabel = 'Purchase_3mon', ylabel = 'Percent' ) output_notebook() #output_file('bar.html') show(p) ``` nicebining = True, nbins = 25 ``` result = organics.histogram( reqpacks=[{'nicebinning':True, 'nbins':25}], inputs=['Purchase_3mon'] ) p = Bar(result['BinDetails'], 'MidPoint', values='Percent', color='#1f77b4', agg='mean', title='', legend=None, xlabel = 'Purchase_3mon', ylabel = 'Percent' ) output_notebook() #output_file('bar.html') show(p) ``` Specify binwidth directly ``` result = organics.histogram( reqpacks=[{'binwidth':50}], inputs=['Purchase_3mon'] ) p = Bar(result['BinDetails'], 'MidPoint', values='Percent', color='#1f77b4', agg='mean', title='', legend=None, xlabel = 'Purchase_3mon', ylabel = 'Percent' ) output_notebook() #output_file('bar.html') show(p) ``` Group by ``` organics.groupby = ['DemGender', 'DemAgeGroup'] result = organics.histogram( reqpacks=[{'nicebinning':False, 'nbins':20}], inputs=['DemAffl'] ) list(result.keys()) result['ByGroupInfo'] all_df = { 'Gender=Female, AgeGroup=Middle': result['ByGroup1.BinDetails'], 'Gender=Female, AgeGroup=Senior': result['ByGroup2.BinDetails'], 'Gender=Female, AgeGroup=Unknown': result['ByGroup3.BinDetails'], 'Gender=Female, AgeGroup=Young' : result['ByGroup4.BinDetails'] } all_pic = [] for this_title in all_df: this_pic = Bar(all_df[this_title], 'MidPoint', values='Percent', color='#1f77b4', agg='mean', title=this_title, legend=None, xlabel='DemAffl', ylabel='Percent') all_pic.append(this_pic) from bokeh.io import gridplot p = gridplot([[all_pic[0], all_pic[1]],[all_pic[2], all_pic[3]]]) output_notebook() #output_file('grid.html') show(p) ``` ### Percentile ``` conn.loadactionset('percentile') del organics.groupby organics.percentile(inputs='DemAge') result = organics.percentile(inputs='DemAge', values=list(range(5,100,5))) result ``` Group by percentile ``` organics.groupby = ['DemGender'] result = organics.percentile(inputs='DemAge', values=list(range(5,100,5))) df = result.concat_bygroups()['Percentile'] df.reset_index(level=0, inplace=True) from bokeh.charts import Scatter p = Scatter(df, x='Pctl', y='Value', legend='top_center', marker='DemGender') output_notebook() #output_file('scatter.html') show(p) ``` ### Correlation ``` organics = conn.CASTable('ORGANICS') organics.correlation() ``` Disable output of simple statistics ``` varlist = ['DemAffl', 'DemAge', 'purchase_3mon'] organics.correlation(inputs=varlist, simple=False) ``` ## Summarizing Categorical Variables ## Distinct Counts ``` organics = conn.CASTable('ORGANICS') organics.distinct() ``` Skip exact counting for high cardinality variables. ``` organics.distinct(maxnvals=500) result = conn.CASTable('distinctOutput', replace=True) organics.distinct(maxnvals=500, casout=result) result.head() ``` Use columninfo and distinct actions together to decide which variables should be used as categorical or continous ``` from pprint import pprint import pandas as pd out1 = organics.columninfo()['ColumnInfo'] out2 = organics.distinct(maxnvals=1000)['Distinct'] out3 = pd.merge(out1, out2, left_on='Column', right_on='Column', how='left') varList = { 'cats': [], 'conts': [], 'others' : [] } for index, row in out3.iterrows(): varname = row['Column'].lower() vartype = row['Type'].lower() if (vartype == 'char' and row['NDistinct'] <= 128): varList['cats'].append(varname) elif (vartype == 'double' and row['NDistinct'] <= 16): varList['cats'].append(varname) elif (vartype == 'double' and row['NDistinct'] > 16): varList['conts'].append(varname) else: varList['others'].append(varname) pprint(varList) ``` ### Frequency ``` varlist = ['TargetBuy'] organics.freq(inputs=varlist) ``` Another way to subset the data for freq() ``` organics[['TargetBuy']].freq() ``` Create bar chart ``` df = organics['TargetBuy'].freq() p = Bar(df['Frequency'], 'FmtVar', values='Frequency', color='#1f77b4', agg='mean', title='', legend=None, xlabel='TargetBuy', ylabel='Frequency' ) output_notebook() #output_file('bar.html') show(p) ``` Request more than one frequency tables ``` organics[['TargetBuy','DemAgeGroup','DemHomeowner']].freq() ``` Use freq() to mimic histogram ``` df = organics['DemAge'].freq(includemissing=False) p = Bar(df['Frequency'], 'FmtVar', values='Frequency', color='#1f77b4', agg='mean', title='', legend=None, xlabel='Age', ylabel='Frequency', bar_width=1, plot_width=1200, plot_height=600 ) output_notebook() #output_file('bar.html') show(p) ``` Turn on includingmissing option ``` df = organics['DemAge'].freq(includemissing=True) p = Bar(df['Frequency'], 'FmtVar', values='Frequency', color='#1f77b4', agg='mean', title='', legend=None, xlabel='Age', ylabel='Frequency', bar_width=1, plot_width=1200, plot_height=600 ) output_notebook() #output_file('bar.html') show(p) ``` ### topK ``` organics['purchase_12mon'].topk(topk=5, bottomk=0) result = organics['purchase_12mon'].topK(topk=5, bottomk=0) for df in result: print(df + ' table has: ' + ' '.join(result[df].columns.values)) ``` Return both top 5 and bottom 5 values ``` organics[['purchase_12mon','DemAge']].topK(topk=5, bottomk=5) ``` Use weight variable ``` organics['DemTVReg'].topk(topk=3, bottomk=3, weight='DemAffl', agg='mean') organics['DemTVReg'].topK(topk=3, bottomk=3, weight='purchase_3mon', agg='sum') ``` ### Cross Tabulations ``` result = organics.crosstab(row='DemAgeGroup', col='DemGender') result ``` The levels of the column variable (DemGender) are actually stored in colinfo ``` result['Crosstab'].colinfo ``` Rename the columns of result table using the levels of DemGender ``` df = result['Crosstab'] labels = {k: v.label for k, v in df.colinfo.items() if v.label} df = df.rename(columns = labels) df ``` Request associations and chi-square test ``` organics.crosstab(row='DemAgeGroup', col='DemGender',association=True, chisq=True) ``` Display the two way frequency table in a clustered bar chart ``` from bokeh.charts import Bar, output_file, show from bokeh.charts.operations import blend from bokeh.charts.attributes import cat, color result1 = organics.crosstab(row='DemGender', col='DemAgeGroup') # rename output columns result1['Crosstab'].columns = ['DemGender','middle','senior','unknown','young'] bar = Bar(result1['Crosstab'], values=blend('middle','senior','unknown','young', name='counts', labels_name='AgeGroup'), label='DemGender', group='AgeGroup', xlabel = 'Gender', ylabel = 'Frequency', legend='top_right') output_notebook() #output_file('bar.html') show(bar) ``` Crosstab with weight variable ``` result = organics.crosstab(row='DemAgeGroup', col='DemGender', weight='purchase_3mon', aggregators='sum') df = result['Crosstab'] labels = {k: v.label for k, v in df.colinfo.items() if v.label} df = df.rename(columns = labels) df ``` Crosstab with binning ``` result = organics.crosstab(row='DemAgeGroup', col='purchase_3mon', colnbins=4, chisq=True) df = result['Crosstab'] labels = {k: v.label for k, v in df.colinfo.items() if v.label} df = df.rename(columns = labels) df ``` Crosstab with group by ``` organics.groupby='DemReg' result2 = organics.crosstab(row='DemAgeGroup', col='DemGender',chisq=True) for table_name in result2: df = result2[table_name] if 'ChiSq' in table_name: print(df[df['Statistic']=='Chi-Square']) ``` ## Variable Transformation and Dimension Reduction ``` conn.loadactionset('datapreprocess') conn.help(actionset='dataPreprocess') ``` Bucket binning ``` outData = conn.CASTable('cas.binnedData', replace=True) organics = conn.CASTable('Organics') result = organics.binning( inputs = 'purchase_3mon', tech = 'bucket', casout = outData, nBinsArray = 10 ); outData.head(10) result ``` Specify number of bins ``` result = organics.binning( inputs = ['purchase_3mon', 'purchase_6mon', 'purchase_9mon', 'purchase_12mon'], tech = 'bucket', casout = outData, nBinsArray = [4, 10, 20, 6] ); outData.head(10) ``` Quantile binning ``` outData2 = conn.CASTable('binnedData2', replace=True) result = organics.binning( inputs = ['purchase_3mon', 'purchase_6mon', 'purchase_9mon', 'purchase_12mon'], tech = 'Quantile', casout = outData2, copyallvars = True, nBinsArray = [4, 4, 4, 4] ) outData2.columns ``` Intepret the result table from quantile binning ``` outData2.crosstab(row='bin_purchase_3mon', col='bin_purchase_12mon') ``` Output score code ``` result = organics.binning( inputs = 'purchase_3mon', tech = 'bucket', casout = outData, code = {'comment': True, 'tabform': True}, nBinsArray = 10 ) df = result['CodeGen'] df ``` ### Variable Imputation Check which variables have missing values ``` df = organics.distinct()['Distinct'] df[df['NMiss'] > 0] ``` Impute missing values of PromTime ``` organics[['PromTime']].impute() ``` Impute using medianand create a new table with imputed values ``` outData = conn.CASTable('imputedData1') outData.replace = True impute1 = organics['PromTime'].Impute() impute1.methodcontinuous = 'Median' impute1.copyallvars = True impute1.casout = outData impute1() ``` Impute using constant ``` impute1.methodcontinuous = 'Value' impute1.valuescontinuous = 0 impute1() ``` Output score code ``` impute1.techforcont = 'Median' impute1.code = {'comment':True} impute1()['CodeGen'] conn.close() ```
github_jupyter
# Fire up graphlab create ``` import graphlab ``` # Load some house value vs. crime rate data Dataset is from Philadelphia, PA and includes average house sales price in a number of neighborhoods. The attributes of each neighborhood we have include the crime rate ('CrimeRate'), miles from Center City ('MilesPhila'), town name ('Name'), and county name ('County'). ``` sales = graphlab.SFrame.read_csv('Philadelphia_Crime_Rate_noNA.csv/') sales ``` # Exploring the data The house price in a town is correlated with the crime rate of that town. Low crime towns tend to be associated with higher house prices and vice versa. ``` graphlab.canvas.set_target('ipynb') sales.show( view="Scatter Plot", x="CrimeRate", y="HousePrice") ``` # Fit the regression model using crime as the feature ``` crime_model = graphlab.linear_regression.create( sales, target='HousePrice', features=['CrimeRate'], validation_set=None, verbose=False) ``` # Let's see what our fit looks like Matplotlib is a Python plotting library that is also useful for plotting. You can install it with: 'pip install matplotlib' ``` import matplotlib.pyplot as plt %matplotlib inline plt.plot(sales['CrimeRate'],sales['HousePrice'],'.', sales['CrimeRate'],crime_model.predict(sales),'-'); ``` Above: blue dots are original data, green line is the fit from the simple regression. # Remove Center City and redo the analysis Center City is the one observation with an extremely high crime rate, yet house prices are not very low. This point does not follow the trend of the rest of the data very well. A question is how much including Center City is influencing our fit on the other datapoints. Let's remove this datapoint and see what happens. ``` sales_noCC = sales[sales['MilesPhila'] != 0.0] sales_noCC.show( view="Scatter Plot", x="CrimeRate", y="HousePrice") ``` ### Refit our simple regression model on this modified dataset: ``` crime_model_noCC = graphlab.linear_regression.create( sales_noCC, target='HousePrice', features=['CrimeRate'], validation_set=None, verbose=False) ``` ### Look at the fit: ``` plt.plot(sales_noCC['CrimeRate'], sales_noCC['HousePrice'],'.', sales_noCC['CrimeRate'], crime_model.predict(sales_noCC),'-'); ``` # Compare coefficients for full-data fit versus no-Center-City fit Visually, the fit seems different, but let's quantify this by examining the estimated coefficients of our original fit and that of the modified dataset with Center City removed. ``` crime_model.get('coefficients') crime_model_noCC.get('coefficients') ``` Above: We see that for the "no Center City" version, per unit increase in crime, the predicted decrease in house prices is 2,287. In contrast, for the original dataset, the drop is only 576 per unit increase in crime. This is significantly different! ### High leverage points: Center City is said to be a "high leverage" point because it is at an extreme x value where there are not other observations. As a result, recalling the closed-form solution for simple regression, this point has the *potential* to dramatically change the least squares line since the center of x mass is heavily influenced by this one point and the least squares line will try to fit close to that outlying (in x) point. If a high leverage point follows the trend of the other data, this might not have much effect. On the other hand, if this point somehow differs, it can be strongly influential in the resulting fit. ### Influential observations: An influential observation is one where the removal of the point significantly changes the fit. As discussed above, high leverage points are good candidates for being influential observations, but need not be. Other observations that are *not* leverage points can also be influential observations (e.g., strongly outlying in y even if x is a typical value). # Remove high-value outlier neighborhoods and redo analysis Based on the discussion above, a question is whether the outlying high-value towns are strongly influencing the fit. Let's remove them and see what happens. ``` sales_nohighend = sales_noCC[sales_noCC['HousePrice'] < 350000] crime_model_nohighend = graphlab.linear_regression.create( sales_nohighend, target='HousePrice', features=['CrimeRate'], validation_set=None, verbose=False) ``` ### Do the coefficients change much? ``` crime_model_noCC.get('coefficients') crime_model_nohighend.get('coefficients') plt.plot(sales_noCC['CrimeRate'], sales_noCC['HousePrice'],'.', sales_noCC['CrimeRate'], crime_model_nohighend.predict(sales_noCC),'-'); ``` Above: We see that removing the outlying high-value neighborhoods has *some* effect on the fit, but not nearly as much as our high-leverage Center City datapoint.
github_jupyter
# Assignment of identity-barcodes In this tutorial, we'll be walking through the steps to assignment of identity barcodes for multiple indexing experiments. In this experiment, A549 lung carcinoma cells were transduced with a pool containing 93 total sgRNAs (90 sgRNAs targeting 45 different genes and 3 control sgRNAs). Cells were split into 6 conditions, receiving no treatment or treatments of DZNep, Trichostatin A, Valproic Acid, Kinetin, or, Resveratrol. Before sequencing, cells were multiplexed at equal proportions with 1 CMO per sample type. The original dataset is downloaded from 10x genomics [dataset](https://www.10xgenomics.com/resources/datasets/30-k-a-549-lung-carcinoma-cells-treatments-transduced-with-a-crispr-pool-multiplexed-6-cm-os-3-1-standard-6-0-0). <div class="alert alert-info"> Note You need to install [scar](https://scar-tutorials.readthedocs.io/en/latest/) to run this notebook on your own device. You can also run it on Colab [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Novartis/scAR/blob/main/docs/tutorials/scAR_tutorial_identity_barcode.ipynb) </div> ``` # Run this cell to install scar in Colab # Skip this cell if running on your own device %pip install scanpy %pip install git+https://github.com/Novartis/scAR.git %pip install matplotlib==3.1.3 # Specify this matplotlib version to avoid errors import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import scanpy as sc from scar import model import warnings warnings.simplefilter("ignore") ``` ## Download data The raw count matrices (cellranger output: raw_feature_bc_matrix) can be downloaded from 10x Dataset. Filtered count matrices are not available for this experiment. ``` A549_30K = sc.read_10x_h5(filename='CRISPR_A549_30K_Multiplex_count_raw_feature_bc_matrix.h5ad', gex_only=False, backup_url='https://cf.10xgenomics.com/samples/cell-exp/6.0.0/SC3_v3_NextGem_DI_CellPlex_CRISPR_A549_30K_Multiplex/SC3_v3_NextGem_DI_CellPlex_CRISPR_A549_30K_Multiplex_count_raw_feature_bc_matrix.h5'); A549_30K.var_names_make_unique(); ``` Raw Count matrix of cell tags (unfiltered droplets) ``` A549_30K_CMO_raw = A549_30K[:, A549_30K.var['feature_types']=='Multiplexing Capture'].to_df() ``` ## Estimate ambient profile Identify cell-containing and cell-free droplets using kneeplot of mRNA counts. ``` all_droplets = pd.DataFrame(A549_30K[:,A549_30K.var['feature_types']=='Gene Expression'].X.sum(axis=1), index = A549_30K.obs_names, columns=['total_counts']) all_droplets['droplets'] = 'cell-free droplets' all_droplets['droplets'] = all_droplets['droplets'].mask(all_droplets['total_counts']>200, 'others') all_droplets['droplets'] = all_droplets['droplets'].mask(all_droplets['total_counts']>500, 'cells') all_droplets = all_droplets.sort_values(by='total_counts', ascending=False).reset_index().rename_axis("rank").reset_index() all_droplets = all_droplets.loc[all_droplets['total_counts']>0] all_droplets = all_droplets.set_index('index').rename_axis('cells') ``` <div class="alert alert-info"> Note The thresholds (200 and 500) are experiment-specific. We currently manually determine them by examing the following kneeplot. </div> ``` plt.figure(figsize=(3, 1.8), dpi=150) ax = sns.lineplot(data = all_droplets, x='rank', y='total_counts', hue='droplets', hue_order=['cells', 'others', 'cell-free droplets'], palette=sns.color_palette()[-3:], markers=False, lw=2) ax.set_xscale('log') ax.set_yscale('log') ax.set_xlabel('sorted droplets'); ax.legend(loc='lower left', ncol=1, title=None, frameon=False) ax.set_title(f'kneeplot: A549_30k'); sns.set_palette("muted") sns.set_style("ticks") sns.despine(offset=10, trim=False); ``` **Raw count matrix of cell tags (filtered droplets)** ``` A549_30K_filtered = A549_30K[A549_30K.obs_names.isin(all_droplets[all_droplets['droplets']=='cells'].index)] # equal to filtered population as cellranger output A549_30K_CMO_filtered = A549_30K_filtered[:, A549_30K_filtered.var['feature_types']=='Multiplexing Capture'].to_df() # pandas.DataFrame A549_30K_CMO_filtered.head() ``` **Ambient profile of cell tags (CMOs)** ``` cell_free_CMO = A549_30K_CMO_raw.loc[A549_30K_CMO_raw.index.difference(A549_30K_CMO_filtered.index)] ambient_profile_CMO = cell_free_CMO.sum()/cell_free_CMO.sum().sum() # pandas.Series ambient_profile_CMO = ambient_profile_CMO.to_frame("ambient profile") ambient_profile_CMO.head() ``` ## Training ``` CMO = model(raw_count = A549_30K_CMO_filtered, ambient_profile=ambient_profile_CMO, # In the case of default None, the ambient_profile will be calculated by averaging pooled cells feature_type='tag' # We use the 'tag' for cell tag/cell indexing experiments ) CMO.train(epochs=80, batch_size=64, verbose=True, ) # After training, we can infer the native true signal CMO.inference(cutoff=3) # by defaut, batch_size=None, set a batch_size if getting a GPU memory issue ``` Resulting assignment is saved in `CMO.feature_assignment`. If there are multiple guides detected after denoising, try to increase the cutoff and re-run `CMO.inference(cutoff=10)` Download metadata of CMOs and treatments ``` !wget https://cf.10xgenomics.com/samples/cell-exp/6.0.0/SC3_v3_NextGem_DI_CellPlex_CRISPR_A549_30K_Multiplex/SC3_v3_NextGem_DI_CellPlex_CRISPR_A549_30K_Multiplex_config.csv metadata = pd.read_csv('SC3_v3_NextGem_DI_CellPlex_CRISPR_A549_30K_Multiplex_config.csv', sep='delimiter') CMO2Tret = dict() for val in metadata.iloc[11:].values: array_txt = val[0].split(",") CMO2Tret.update({array_txt[1]: array_txt[2].split("v2_")[-1]}) CMO2Tret CMO.feature_assignment['treatment'] = CMO.feature_assignment['tag'].map(CMO2Tret) CMO.feature_assignment.head() ``` ## Visulization ### Cell clustering We can cluster the cells and visulize them with UMAP. ``` A549_30K_filtered.obs = A549_30K_filtered.obs.join(CMO.feature_assignment, how='left') A549_30K_mRNA_filtered = A549_30K_filtered[:, A549_30K_filtered.var['feature_types']=='Gene Expression'] random_state = 8 adata_out = A549_30K_mRNA_filtered.copy() sc.pp.filter_genes(adata_out, min_cells=20) sc.pp.filter_cells(adata_out, min_genes=200) sc.pp.normalize_total(adata_out, target_sum=1e4) sc.pp.log1p(adata_out) sc.tl.pca(adata_out, svd_solver='arpack', random_state=random_state) sc.pp.neighbors(adata_out, n_neighbors=15, n_pcs=25, random_state=random_state) sc.tl.umap(adata_out, random_state=random_state) ``` Let's filter out the cells with multiple CMOs ``` adata_out = adata_out[adata_out.obs['n_tag']==1] print("{:.2f}% cells are assigned with a single CMO".format(adata_out.shape[0]/A549_30K_mRNA_filtered.shape[0]* 100)) ``` ### UMAP ``` sc.settings.set_figure_params(dpi=150,figsize=(3.5, 3)) sc.pl.umap(adata_out, size=4, color=["treatment"], frameon=False, legend_loc="on data", legend_fontsize=6, ) ``` Cells assigned with a same treatment (e.g. DZNep, Trichostatin A, Valproic Acid, or Resveratrol) present similar transcriptomic profile.
github_jupyter
# Mean Shift This Code template is for the Cluster analysis using a simple Mean Shift(Centroid-Based Clustering using a flat kernel) Clustering algorithm and includes 2D and 3D cluster visualization of the Clusters. ### Required Packages ``` !pip install plotly import operator import warnings import itertools import numpy as np import pandas as pd import seaborn as sns import plotly.express as px import matplotlib.pyplot as plt from mpl_toolkits import mplot3d import plotly.graph_objects as go from sklearn.cluster import MeanShift, estimate_bandwidth warnings.filterwarnings("ignore") ``` ### Initialization Filepath of CSV file ``` file_path = "" ``` List of features which are required for model training ``` features=[] ``` ### Data Fetching Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools. We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry. ``` df=pd.read_csv(file_path) df.head() ``` ### Feature Selections It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model. We will assign all the required input features to X. ``` X = df[features] ``` ### Data Preprocessing Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes. ``` def NullClearner(df): if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])): df.fillna(df.mean(),inplace=True) return df elif(isinstance(df, pd.Series)): df.fillna(df.mode()[0],inplace=True) return df else:return df def EncodeX(df): return pd.get_dummies(df) ``` Calling preprocessing functions on the feature and target set. ``` x=X.columns.to_list() for i in x: X[i]=NullClearner(X[i]) X=EncodeX(X) X.head() ``` ### Model Mean shift clustering using a flat kernel. Mean shift clustering aims to discover “blobs” in a smooth density of samples. It is a centroid-based algorithm, which works by updating candidates for centroids to be the mean of the points within a given region. These candidates are then filtered in a post-processing stage to eliminate near-duplicates to form the final set of centroids. Seeding is performed using a binning technique for scalability. [More information](https://analyticsindiamag.com/hands-on-tutorial-on-mean-shift-clustering-algorithm/) #### Tuning Parameters 1. bandwidth float, default=None > Bandwidth used in the RBF kernel. If not given, the bandwidth is estimated using sklearn.cluster.estimate_bandwidth 2. seeds array-like of shape (n_samples, n_features), default=None > Seeds used to initialize kernels. If not set, the seeds are calculated by clustering.get_bin_seeds with bandwidth as the grid size and default values for other parameters. 3. bin_seeding bool, default=False > If true, initial kernel locations are not locations of all points, but rather the location of the discretized version of points, where points are binned onto a grid whose coarseness corresponds to the bandwidth. 4. min_bin_freq int, default=1 > To speed up the algorithm, accept only those bins with at least min_bin_freq points as seeds. 5. cluster_all bool, default=True > If true, then all points are clustered, even those orphans that are not within any kernel. Orphans are assigned to the nearest kernel. If false, then orphans are given cluster label -1 6. n_jobs int, default=None > The number of jobs to use for the computation. This works by computing each of the n_init runs in parallel. None means 1 unless in a joblib.parallel_backend context. -1 means using all processors. 7. max_iter int, default=300 > Maximum number of iterations, per seed point before the clustering operation terminates [For more detail on API](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.MeanShift.html) <br> <br> ####Estimate Bandwidth Estimate the bandwidth to use with the mean-shift algorithm. That this function takes time at least quadratic in n_samples. For large datasets, it’s wise to set that parameter to a small value. ``` bandwidth = estimate_bandwidth(X, quantile=0.18, n_samples=200) ms = MeanShift(bandwidth=bandwidth, bin_seeding=True) ms.fit(X) y_pred = ms.predict(X) ``` ### Cluster Analysis First, we add the cluster labels from the trained model into the copy of the data frame for cluster analysis/visualization. ``` ClusterDF = X.copy() ClusterDF['ClusterID'] = y_pred ClusterDF.head() ``` #### Cluster Records The below bar graphs show the number of data points in each available cluster. ``` ClusterDF['ClusterID'].value_counts().plot(kind='bar') ``` #### Cluster Plots Below written functions get utilized to plot 2-Dimensional and 3-Dimensional cluster plots on the available set of features in the dataset. Plots include different available clusters along with cluster centroid. ``` def Plot2DCluster(X_Cols,df): for i in list(itertools.combinations(X_Cols, 2)): plt.rcParams["figure.figsize"] = (8,6) xi,yi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1]) for j in df['ClusterID'].unique(): DFC=df[df.ClusterID==j] plt.scatter(DFC[i[0]],DFC[i[1]],cmap=plt.cm.Accent,label=j) plt.scatter(ms.cluster_centers_[:,xi],ms.cluster_centers_[:,yi],marker="^",color="black",label="centroid") plt.xlabel(i[0]) plt.ylabel(i[1]) plt.legend() plt.show() def Plot3DCluster(X_Cols,df): for i in list(itertools.combinations(X_Cols, 3)): xi,yi,zi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1]),df.columns.get_loc(i[2]) fig,ax = plt.figure(figsize = (16, 10)),plt.axes(projection ="3d") ax.grid(b = True, color ='grey',linestyle ='-.',linewidth = 0.3,alpha = 0.2) for j in df['ClusterID'].unique(): DFC=df[df.ClusterID==j] ax.scatter3D(DFC[i[0]],DFC[i[1]],DFC[i[2]],alpha = 0.8,cmap=plt.cm.Accent,label=j) ax.scatter3D(ms.cluster_centers_[:,xi],ms.cluster_centers_[:,yi],ms.cluster_centers_[:,zi], marker="^",color="black",label="centroid") ax.set_xlabel(i[0]) ax.set_ylabel(i[1]) ax.set_zlabel(i[2]) plt.legend() plt.show() def Plotly3D(X_Cols,df): for i in list(itertools.combinations(X_Cols,3)): xi,yi,zi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1]),df.columns.get_loc(i[2]) fig1 = px.scatter_3d(ms.cluster_centers_,x=ms.cluster_centers_[:,xi],y=ms.cluster_centers_[:,yi], z=ms.cluster_centers_[:,zi]) fig2=px.scatter_3d(df, x=i[0], y=i[1],z=i[2],color=df['ClusterID']) fig3 = go.Figure(data=fig1.data + fig2.data, layout=go.Layout(title=go.layout.Title(text="x:{}, y:{}, z:{}".format(i[0],i[1],i[2]))) ) fig3.show() sns.set_style("whitegrid") sns.set_context("talk") plt.rcParams["lines.markeredgewidth"] = 1 sns.pairplot(data=ClusterDF, hue='ClusterID', palette='Dark2', height=5) Plot2DCluster(X.columns,ClusterDF) Plot3DCluster(X.columns,ClusterDF) Plotly3D(X.columns,ClusterDF) ``` #### [Created by Anu Rithiga](https://github.com/iamgrootsh7)
github_jupyter
# KIC 9651065 ``` import numpy as np import matplotlib.pyplot as plt import exoplanet as xo import pymc3 as pm from maelstrom import Maelstrom from astropy.stats import LombScargle from lightkurve import search_lightcurvefile import lightkurve as lk from maelstrom.orbit import Orbit from exoplanet.orbits import get_true_anomaly import pymc3 as pm import theano.tensor as tt from matplotlib import rcParams rcParams["figure.dpi"] = 150 rcParams["savefig.dpi"] = 300 t, y = np.loadtxt('https://raw.githubusercontent.com/danhey/maelstrom/master/paper/lc/9651065_lc.txt', usecols=(0,1)).T ms = Maelstrom(t, y, max_peaks=5, fmin=5, fmax=48) ms.first_look(); period_guess = 300 a_guess = 200 time, flux = ms.time, ms.flux freq = ms.freq weights = ms.get_weights(norm=False) pg = ms.period_search() periods = np.linspace(100, 300, 300) results = pg.fit(periods) ys = np.array([[r[0] for r in row] for row in results]) sm = np.sum(ys, axis=0) period_ind = np.argmax(sm) plt.plot(periods[:-2], sm[:-2]); from maelstrom.utils import unique_colors hh = unique_colors(len(ms.freq), cmap='Blues') plt.figure(figsize=mnras_size(240.)) ys = np.array([[np.exp(r[1]["logasini"]) for r in row] for row in results]) for i, c in zip(ys, hh): plt.plot(periods, i, alpha=1, linewidth=0.8, c=c); plt.xlabel('Period (d)') plt.ylabel(r'a$\sin{i}$ (s)') plt.ylim(0, None) plt.xlim(100,300) plt.axhline(184., c='r', linestyle='dashed', linewidth=0.7) plt.axvline(272., c='r', linestyle='dashed', linewidth=0.7) plt.savefig(overleaf_path + '9651065_period_search.pdf', dpi=300, bbox_inches='tight', pad_inches=0) ``` ## Maelstrom ``` ms.setup_orbit_model(period=period_guess) # opt = ms.optimize() pb1 = ms.pin_orbit_model() opt = pb1.optimize() opt # with pb1: # trace = pm.load_trace('traces/9651065_FINAL_VERSION2/') with pb1: trace = pm.sample( tune=1000, draws=2000, start=opt, chains=2, step=xo.get_dense_nuts_step(target_accept=0.9), ) pm.save_trace(trace, 'traces/NEW/9651065') with pb1: trace = pm.load_trace('traces/NEW/9651065') pm.summary(trace) from tqdm import tqdm taus = [] with pb1: for samp in tqdm(xo.utils.get_samples_from_trace(trace, size=1000), total=1000): taus.append(xo.eval_in_model(pb1.orbit.get_time_delay(time), samp) * 86400) med_td = np.median(taus, axis=0) sd_td = np.std(taus, axis=0) mean = np.mean(taus) mean np.random.seed(23) fig, ax = plt.subplots(figsize=mnras_size(240), constrained_layout=True) ax.set_rasterized(True) #ax.set_rasterization_zorder(1) with pb1: for samp in xo.utils.get_samples_from_trace(trace, size=10): taumod = xo.eval_in_model(pb1.orbit.get_time_delay(time), samp) * 86400 #ttime = (ms.time_mid + time - samp['tref']) % samp['period'] / samp['period'] ttime = (time) % samp['PB1_period'] / samp['PB1_period'] #ttime = ((ms.time_mid + time) + (samp['phi'] * samp['period'] / (2*np.pi))) % samp['period'] / samp['period'] sort = np.argsort(ttime) ax.plot(ttime[sort], (taumod - np.mean(taumod))[sort], color=blue, linewidth=0.4, alpha=1, # rasterized=True, zorder=1) ax.set_xlabel('Orbital phase') ax.set_ylabel('Time delay (s)', c=blue) times = time# + xo.eval_in_model(phi * period / (2*np.pi), samp) fold = times % np.median(trace['PB1_period']) / np.median(trace['PB1_period']) sort = np.argsort(fold) plt.fill_between(fold[sort], (med_td - sd_td - 108.12878089572754)[:,0][sort], (med_td+sd_td - 108.12878089572754)[:,0][sort], alpha=0.2, color=blue) ax.set_xlim(0, 1) plt.savefig(overleaf_path + '9651065.pdf', dpi=300, bbox_inches='tight', pad_inches=0) #plt.savefig('rast.pdf', dpi=300, bbox_inches='tight') from maelstrom.utils import mass_function import astropy.units as u rounding = 3 samples = pm.trace_to_dataframe(trace, varnames=['PB1_period', 'PB1_asini']) mfs = mass_function(samples['PB1_period'].values * u.day, samples['PB1_asini'].values*u.s) #mfs = np.array(mfs) upper, med, lower = np.percentile(mfs.value, [84.13, 50, 15.86]) print('mass_func', ': ', np.round(med,rounding), ' + ', np.round(upper - med,rounding), ' - ', np.round(med - lower,rounding)) varnames = ["period", "asini", "eccen", "omega", "phi"] for var in varnames: percentiles = np.percentile(trace['PB1_' + var], q=[15.87, 50, 84.13]) print(f'{var}: {percentiles[0]:.2f} + {percentiles[1] - percentiles[0]:.2f} - {percentiles[2] - percentiles[1]:.2f}') ``` ## Subdividing ``` td_time, td_td, td_err = np.loadtxt('../data/kic9651065_uncertainties-plus-time-delay_Q99_llc.txt', delimiter=',', usecols=(0,1,2)).T td_time += 2400000 td_time -= 2454833 plt.scatter(td_time, td_td) import theano.tensor as tt from maelstrom.orbit import Orbit with pm.Model() as subdivide_model: logP = pm.Normal("logP", mu=np.log(272), sd=1.0, testval=np.log(272)) period = pm.Deterministic("period", pm.math.exp(logP)) # The time of conjunction phi = xo.distributions.Angle("phi", testval=0.5691498) logs_lc = pm.Normal('logs_lc', mu=np.log(np.std(flux)), sd=10, testval=0.) logasini = pm.Normal('logasini', mu=np.log(184), sd=10, testval=np.log(184)) asini = pm.Normal("asini", mu=184, sd=10, testval=184) drift = pm.Normal('drift', mu=0., sd=0.1, testval=0) # Periastron sampled from uniform angle omega = xo.distributions.Angle("omega", testval=-0.94) # Eccentricity eccen = pm.Uniform("eccen", lower=0, upper=0.9, testval=0.45) # The baseline flux mean = pm.Normal("mean", mu=0.0, sd=10.0, testval=0.003) # Here, we generate an Orbit instance and pass in our priors. orbit = Orbit(period=period, lighttime=asini, omega=omega, eccen=eccen, phi=phi, freq=0) # psi is defined to be negative but the light curve model takes 2*pi*f * (time - tau), so # we must flip tau here to phase it on the same values td = -1*tt.squeeze(orbit.get_time_delay(td_time) * 86400) # Convert to s td += drift * td_time taumodel = pm.Deterministic('taumodel', td - tt.mean(td)) pm.Normal('obs', mu=taumodel, sd=tt.exp(logs_lc), observed=td_td) plt.plot(td_time, xo.eval_in_model(taumodel)) plt.plot(td_time, td_td) with subdivide_model: opt = xo.optimize() opt with subdivide_model: trace = pm.sample(draws=2000, tune=2000, chains=2, start=opt) pm.summary(trace) varnames=['period', 'phi', 'eccen', 'asini', 'omega', 'phi', 'drift'] rounding = 2 for varname in varnames: upper, med, lower = np.percentile(trace[varname], [84.13, 50, 15.86]) print(varname, ': ', np.round(med,rounding), ' + ', np.round(upper - med,rounding), ' - ', np.round(med - lower,rounding)) from maelstrom.utils import mass_function import astropy.units as u rounding = 3 samples = pm.trace_to_dataframe(trace, varnames=['period', 'asini']) mfs = mass_function(samples['period'].values * u.day, samples['asini'].values*u.s) #mfs = np.array(mfs) upper, med, lower = np.percentile(mfs.value, [84.13, 50, 15.86]) print('mass_func', ': ', np.round(med,rounding), ' + ', np.round(upper - med,rounding), ' - ', np.round(med - lower,rounding)) plt.scatter(td_time % 272 / 272, np.median(trace['taumodel'], axis=0)) ```
github_jupyter
``` import pandas as pd import numpy as np import geopandas as gpd import psycopg2 from geoalchemy2 import Geometry, WKTElement from sqlalchemy import * from shapely.geometry import MultiPolygon from zipfile import ZipFile import requests import sys import yaml with open('../../config/postgres.yaml') as f: engine_configs = yaml.load(f, Loader=yaml.FullLoader) try: engine = create_engine('postgresql://{username}:{password}@{host}:{port}/{dbname}'.format(**engine_configs)) except Exception as e: print("Uh oh, can't connect. Invalid dbname, user or password?") print(e) def process_geometry_SQL_insert(gdf): gdf['geom'] = gdf['geometry'].apply(lambda x: WKTElement((MultiPolygon([x]) if x.geom_type == 'Polygon' else x).wkt, srid=4326)) gdf = gdf.drop('geometry', 1) return gdf # Often when reading in a ShapeFile from Basemap, you'll get: "ValueError: readshapefile can only handle 2D shape types" # A trick can be to convert your geometry in your GeoPandas Dataframe and restoring the new flattened 2D geometry # series back into a shapefile and try again. # edit from http://stackoverflow.com/questions/33417764/basemap-readshapefile-valueerror from shapely.geometry import Polygon, MultiPolygon, shape, Point def convert_3D_2D(geometry): ''' Takes a GeoSeries of 3D Multi/Polygons (has_z) and returns a list of 2D Multi/Polygons ''' new_geo = [] for p in geometry: if p.has_z: if p.geom_type == 'Polygon': lines = [xy[:2] for xy in list(p.exterior.coords)] new_p = Polygon(lines) new_geo.append(new_p) elif p.geom_type == 'MultiPolygon': new_multi_p = [] for ap in p: lines = [xy[:2] for xy in list(ap.exterior.coords)] new_p = Polygon(lines) new_multi_p.append(new_p) new_geo.append(MultiPolygon(new_multi_p)) return new_geo CITY='chicago' NEIGHBORHOOD_SIZE = 805 # 805 OR 1609 bounds_gdf = gpd.read_file('../../data/chicago/boundary/chicago_cook.geojson') bounds_gdf = bounds_gdf[['geometry']] bounds_gdf['city'] = CITY bounds_gdf.head() insert_gdf = process_geometry_SQL_insert(bounds_gdf) insert_gdf.to_sql('boundary', engine, if_exists='append', index=False, dtype={'geom': Geometry('MultiPolygon', srid=4326)}) ``` ## Spatial groups and blocks_group ``` block_groups_gdf = gpd.read_file('zip://../../data/chicago/blocks_group/cb_2014_17_bg_500k_edited.zip') block_groups_gdf = block_groups_gdf[['GEOID', 'geometry']] block_groups_gdf = block_groups_gdf.to_crs({'init': 'epsg:4326'}) block_groups_gdf.geometry = convert_3D_2D(block_groups_gdf.geometry) block_groups_gdf.head() block_groups_gdf = gpd.sjoin(block_groups_gdf, bounds_gdf, how="inner", op='intersects').drop('index_right', axis=1) block_groups_gdf = block_groups_gdf.rename(columns={'GEOID': 'original_id'}) block_groups_gdf['city'] = CITY insert_gdf = process_geometry_SQL_insert(block_groups_gdf) insert_gdf.to_sql('temptable_{}'.format(CITY.lower()), engine, if_exists='replace', index=False, dtype={'geom': Geometry('MultiPolygon', srid=4326)}) sql = """ INSERT INTO blocks_group (original_id, city, geom) SELECT s.original_id, s.city, ST_Multi(ST_Intersection(s.geom, b.geom)) FROM temptable_{tempname} as s INNER JOIN boundary b ON ST_Intersects(s.geom, b.geom) AND NOT ST_Touches(s.geom, b.geom) AND s.city=b.city where s.city='{city}' and ST_Area(ST_Intersection(s.geom, b.geom))/ST_Area(s.geom) > 0.5; """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) ``` ### Neighborhoods ``` sql = """INSERT INTO spatial_groups (city, core_geom, core_id, lower_ids, spatial_name, approx_geom) SELECT a.city, a.geom as core_geom, a.bid as core_id, array_agg(b.bid), 'ego', ST_multi(ST_Union(b.geom)) FROM blocks_group a INNER JOIN blocks_group b ON a.city = b.city AND (a.bid = b.bid OR ST_DWithin(a.geom::geography, ST_Centroid(b.geom)::geography, {distance}) OR st_touches(a.geom, b.geom)) where a.city='{city}' GROUP BY a.bid, a.geom, a.city; delete from spatial_groups where ST_Area(approx_geom::geography) < 250000 and spatial_name='ego'; """.format(city=CITY, tempname=CITY.lower(), distance=NEIGHBORHOOD_SIZE) result = engine.execute(text(sql)) ``` ## Blocks ``` block_gdf = gpd.read_file('zip://../../data/chicago/block/tl_2014_17_tabblock10.zip') block_gdf = block_gdf[['geometry']] block_gdf = block_gdf.to_crs({'init': 'epsg:4326'}) block_gdf.head() insert_gdf = process_geometry_SQL_insert(block_gdf) insert_gdf.to_sql('temptable_{}'.format(CITY.lower()), engine, if_exists='replace', index=False, dtype={'geom': Geometry('MultiPolygon', srid=4326)}) sql = """ insert into block (sp_id, geom, city, geog, greater_1sm) select bid, geom, city, geom::geography, ST_AREA(geom::geography)>2.59e+6 from( SELECT bid, st_multi(geom) as geom, city, ROW_NUMBER() OVER (PARTITION BY geom ORDER by area DESC) AS r from ( select b.bid, c.geom, b.city, ST_Area(ST_Intersection(b.geom, c.geom)) as area from temptable_{tempname} as c inner join blocks_group as b on ST_Intersects(b.geom, c.geom) where b.city = '{city}' ) as dtable ) x WHERE x.r = 1; """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) sql = """ UPDATE block AS b SET geom=ST_Multi(ST_Intersection(b.geom, s.geom)) FROM boundary AS s WHERE ST_Intersects(b.geom, s.geom) AND b.city=s.city AND s.city='{city}' AND NOT ST_Contains(s.geom, b.geom); """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) ## Some blocks_group do not have blocks sql = """ DELETE FROM blocks_group bg WHERE NOT EXISTS(SELECT * FROM block b WHERE b.sp_id = bg.bid) AND bg.city='{city}'; """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) ``` ## Census ``` zip_file = ZipFile('../../data/chicago/employment/ACS_14_5YR_B23025.zip') zip_file.infolist() employment_df = pd.read_csv(zip_file.open('ACS_14_5YR_B23025_with_ann.csv')) employment_df = employment_df.rename(columns={'GEO.id2': 'original_id', 'HD01_VD02': 'inforce', 'HD01_VD04': 'employed', 'HD01_VD06': 'armed'}) employment_df = employment_df[['original_id', 'inforce', 'employed', 'armed']] # Skip first header line employment_df = employment_df[employment_df['original_id'] != 'Id2'] employment_df['inforce'] = employment_df['inforce'].astype(int) employment_df['employed'] = employment_df['employed'].astype(int) employment_df['armed'] = employment_df['armed'].astype(int) employment_df.head() zip_file = ZipFile('../../data/chicago/population/ACS_14_5YR_B01003.zip') zip_file.infolist() pop_df = pd.read_csv(zip_file.open('ACS_14_5YR_B01003_with_ann.csv')) pop_df = pop_df.rename(columns={'GEO.id2': 'original_id', 'HD01_VD01': 'population'}) pop_df = pop_df[['original_id', 'population']] # Skip first header line pop_df = pop_df[pop_df['original_id'] != 'Id2'] pop_df['population'] = pop_df['population'].astype(int) pop_df.head() zip_file = ZipFile('../../data/chicago/population/ACS_14_5YR_B25001.zip') zip_file.infolist() dwellings_df = pd.read_csv(zip_file.open('ACS_14_5YR_B25001_with_ann.csv'), dtype={'GEO.id2': str}) dwellings_df = dwellings_df.rename(columns={'GEO.id2': 'original_id', 'HD01_VD01': 'dwellings'}) dwellings_df = dwellings_df[['original_id', 'dwellings']] dwellings_df['dwellings'] = dwellings_df['dwellings'].astype(int) dwellings_df.head() print(len(pop_df)) census_df = pd.merge(employment_df, pop_df, on='original_id') census_df = pd.merge(census_df, dwellings_df, on='original_id') print(len(census_df)) census_df.head() census_df.to_sql('temptable_{}'.format(CITY.lower()), engine, if_exists='replace', index=False) sql = """ insert into census (bid, population, employed, inforce, tot_survey, dwellings, city) select b.bid, c.population, c.employed+c.armed, c.inforce, c.population, c.dwellings, '{city}' from temptable_{tempname} c inner join blocks_group b on b.original_id = c.original_id where b.city = '{city}'; """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) ``` ### Residential stability ``` zip_file = ZipFile('../../data/chicago/residential_stability/ACS_14_5YR_B07201.zip') zip_file.infolist() stab_df = pd.read_csv(zip_file.open('ACS_14_5YR_B07201.csv')) stab_df = stab_df.rename(columns={'GEO.id2': 'original_id', 'HD01_VD01': 'total', 'HD01_VD02': 'stable'}) stab_df = stab_df[['original_id', 'total', 'stable']] # Skip first header line stab_df = stab_df[stab_df['original_id'] != 'Id2'] stab_df['total'] = stab_df['total'].astype(int) stab_df['stable'] = stab_df['stable'].astype(int) stab_df.head() zip_file = ZipFile('../../data/chicago/tenure/ACS_14_5YR_B25003.zip') zip_file.infolist() tenure_df = pd.read_csv(zip_file.open('ACS_14_5YR_B25003.csv')) tenure_df = tenure_df.rename(columns={'GEO.id2': 'original_id', 'HD01_VD01': 'total2', 'HD01_VD02': 'owner'}) tenure_df = tenure_df[['original_id', 'total2', 'owner']] # Skip first header line tenure_df = tenure_df[tenure_df['original_id'] != 'Id2'] tenure_df['total2'] = tenure_df['total2'].astype(int) tenure_df['owner'] = tenure_df['owner'].astype(int) tenure_df.head() res_stability_df = pd.merge(stab_df, tenure_df, on='original_id') res_stability_df.head() res_stability_df.to_sql('temptable_{}'.format(CITY.lower()), engine, if_exists='replace', index=False) sql = """ INSERT INTO residential_stability (bid, city, total, stable, total2, owner) SELECT b.bid, '{city}', c.total, c.stable, c.total2, c.owner FROM temptable_{tempname} c INNER JOIN blocks_group b ON b.original_id = c.original_id where b.city = '{city}'; """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) ``` ### Ethnic diversity ``` zip_file = ZipFile('../../data/chicago/ethnic_diversity/ACS_14_5YR_B02001.zip') zip_file.infolist() eth_df = pd.read_csv(zip_file.open('ACS_14_5YR_B02001.csv')) eth_df = eth_df.rename(columns={'GEO.id2': 'original_id', 'HD01_VD02': 'white', 'HD01_VD03': 'black', 'HD01_VD04': 'native', 'HD01_VD05': 'asian', 'HD01_VD06': 'native2', 'HD01_VD08': 'o1', 'HD01_VD09': 'o2', 'HD01_VD10': 'o3'}) eth_df = eth_df[['original_id', 'white', 'black', 'asian', 'native', 'native2', 'o1', 'o2', 'o3']] # Skip first header line eth_df = eth_df[eth_df['original_id'] != 'Id2'] for x in ['white', 'black', 'asian', 'native', 'native2', 'o1', 'o2', 'o3']: eth_df[x] = eth_df[x].astype(int) eth_df['other'] = eth_df['o1'] + eth_df['o2'] + eth_df['o3'] eth_df = eth_df.drop(['o1', 'o2', 'o3'], axis=1) eth_df.head() eth_df.to_sql('temptable_{}'.format(CITY.lower()), engine, if_exists='replace', index=False) sql = """ INSERT INTO ethnic_diversity (bid, city, race1, race2, race3, race4, race5, race6) SELECT b.bid, '{city}', c.white, c.black, c.native, c.asian, c.native2, c.other FROM temptable_{tempname} c INNER JOIN blocks_group b ON b.original_id = c.original_id where b.city = '{city}'; """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) ``` ### Poverty ``` zip_file = ZipFile('../../data/chicago/poverty/ACS_14_5YR_C17002.zip') zip_file.infolist() pov_df = pd.read_csv(zip_file.open('ACS_14_5YR_C17002_with_ann.csv')) pov_df = pov_df.rename(columns={'GEO.id2': 'original_id', 'HD01_VD01': 'total', 'HD01_VD02': 'p50', 'HD01_VD03': 'p99'}) pov_df = pov_df[['original_id', 'total', 'p50', 'p99']] # Skip first header line pov_df = pov_df[pov_df['original_id'] != 'Id2'] for x in ['total', 'p50', 'p99']: pov_df[x] = pov_df[x].astype(int) pov_df['poors'] = pov_df['p50'] + pov_df['p99'] pov_df = pov_df.drop(['p50', 'p99'], axis=1) pov_df.head() pov_df.to_sql('temptable_{}'.format(CITY.lower()), engine, if_exists='replace', index=False) sql = """ INSERT INTO poverty_index (bid, city, total, poors) SELECT b.bid, '{city}', c.total, c.poors FROM temptable_{tempname} c INNER JOIN blocks_group b ON b.original_id = c.original_id where b.city = '{city}'; """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) ``` ## Buildings ``` bld_gdf = gpd.read_file('zip://../../data/chicago/buildings/Building Footprints (deprecated August 2015).zip') bld_gdf = bld_gdf[(bld_gdf["bldg_statu"] == 'ACTIVE') & (bld_gdf['non_standa'].isnull())] bld_gdf = bld_gdf[~((bld_gdf["st_name1"].isnull()) & (bld_gdf["comments"].isnull()))] bld_gdf = bld_gdf[['geometry', 'year_built']] bld_gdf = bld_gdf[~(bld_gdf['geometry'].isnull())] bld_gdf = bld_gdf.to_crs({'init': 'epsg:4326'}) bld_gdf.head() insert_gdf = process_geometry_SQL_insert(bld_gdf) insert_gdf.to_sql('temptable_{}'.format(CITY.lower()), engine, if_exists='replace', index=False, dtype={'geom': Geometry('MultiPolygon', srid=4326)}) sql = """ UPDATE temptable_{tempname} p SET geom=ST_Multi(ST_buffer(p.geom, 0.0)) WHERE NOT ST_ISValid(p.geom); """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) sql = """ INSERT INTO building (bid, city, geom, area) SELECT bid, '{city}', geom, barea FROM ( SELECT bid, geom, barea, ROW_NUMBER() OVER (PARTITION BY geom ORDER BY area DESC) AS r from ( SELECT p.geom, ST_Area(p.geom::geography) as barea, d.bid, ST_Area(ST_Intersection(p.geom, d.geom)) as area FROM temptable_{tempname} as p INNER JOIN blocks_group as d on ST_Intersects(p.geom, d.geom) AND NOT ST_Touches(p.geom, d.geom) WHERE d.city = '{city}' AND ST_Area(p.geom::geography) >= 40 ) as dtable order by area ) x WHERE x.r = 1; """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) ``` ## Property age ``` sql = """ INSERT INTO property_age (bid, age, area, city) SELECT bid, age, area_building, '{city}' FROM ( SELECT bid, age, area_building, ROW_NUMBER() OVER (PARTITION BY geom ORDER BY area DESC) AS r from ( SELECT p.geom, p."year_built"::int as age, ST_Area(p.geom::geography) as area_building, d.bid, ST_Area(ST_Intersection(p.geom, d.geom)) as area FROM temptable_{tempname} as p INNER JOIN blocks_group as d on ST_Intersects(p.geom, d.geom) AND NOT ST_Touches(p.geom, d.geom) WHERE d.city = '{city}' AND ST_Area(p.geom::geography) >= 40 AND p."year_built"::int > 0 ) as dtable order by area ) x WHERE x.r = 1; """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) ``` ## Land use ``` parcels_gdf = gpd.read_file('zip://../../data/chicago/land_use/ccgisdata - Parcel 2014.zip') parcels_gdf = parcels_gdf.rename(columns={'pin14': 'pid'}) #land_gdf = land_gdf[['pid', 'sqftmain', 'usecode', 'usecode2', 'yearbuilt', 'geometry', 'value']] parcels_gdf = parcels_gdf[~(parcels_gdf['geometry'].isnull())] #land_gdf = land_gdf.to_crs({'init': 'epsg:4326'}) parcels_gdf.head() ins_gdf = process_geometry_SQL_insert(parcels_gdf[['pid', 'geometry']].copy()) ins_gdf.to_sql('temptable_{}'.format(CITY.lower()), engine, if_exists='replace', index=False, dtype={'geom': Geometry('MultiPolygon', srid=4326)}) sql = """ UPDATE temptable_{tempname} p SET geom=ST_Multi(ST_buffer(p.geom, 0.0)) WHERE NOT ST_ISVALID(p.geom); """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) land_gdf = gpd.read_file('zip://../../data/chicago/land_use/land_use.zip', dtype={'LANDUSE': str}) land_gdf = land_gdf.to_crs({'init': 'epsg:4326'}) land_gdf.head() land_gdf['landuse'] = 'none' land_gdf.loc[(land_gdf['LANDUSE'].str[:2].isin({'11'})) | (land_gdf['LANDUSE'].isin({'1216'})), 'landuse'] = 'residential' land_gdf.loc[(land_gdf['LANDUSE'].str[:2].isin({'12', '13', '14', '15', '20'})) & (~land_gdf['LANDUSE'].isin({'1510', '1511', '1512', '1520', '1550', '1561', '1565'})), 'landuse'] = 'commercial' land_gdf.loc[land_gdf['LANDUSE'].str[:1].isin({'3'}), 'landuse'] = 'recreational' land_gdf.loc[land_gdf['LANDUSE'].str[:1].isin({'4'}), 'landuse'] = 'vacant' land_gdf.head() ins_gdf = process_geometry_SQL_insert(land_gdf[['landuse', 'LANDUSE', 'geometry']].rename(columns={'LANDUSE': 'use'})) ins_gdf.to_sql('temptable2_{}'.format(CITY.lower()), engine, if_exists='replace', index=False, dtype={'geom': Geometry('MultiPolygon', srid=4326)}) sql = """ UPDATE temptable2_{tempname} p SET geom=ST_Multi(ST_buffer(p.geom, 0.0)) WHERE NOT ST_ISVALID(p.geom); """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) sql = """ DROP TABLE IF EXISTS temptable_parcels_{tempname}; CREATE TABLE temptable_parcels_{tempname} AS SELECT pid, landuse, geom FROM ( SELECT pid, landuse, geom, ROW_NUMBER() OVER (PARTITION BY pid ORDER BY area DESC) AS r from ( SELECT p.pid, p2.landuse, p.geom, ST_Area(ST_Intersection(p.geom, p2.geom)) as area FROM temptable_{tempname} as p INNER JOIN temptable2_chicago as p2 ON ST_Intersects(p.geom, p2.geom) AND NOT ST_Touches(p.geom, p2.geom) WHERE ST_Isvalid(p.geom) ) as dtable ) x WHERE x.r = 1; CREATE INDEX ON temptable_parcels_{tempname} USING GIST (geom); """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) sql = """ INSERT INTO land_uses (bid, city, use_type, area) SELECT bid, '{city}', landuse, SUM(area) FROM ( SELECT bid, landuse, area, ROW_NUMBER() OVER (PARTITION BY pid ORDER BY area DESC) AS r from ( SELECT p.pid, p.landuse, d.bid, ST_Area(ST_Intersection(p.geom, d.geom)::geography) as area FROM temptable_parcels_{tempname} as p INNER JOIN blocks_group as d on ST_Intersects(p.geom, d.geom) AND NOT ST_Touches(p.geom, d.geom) WHERE d.city = '{city}' AND p.landuse <> 'none' AND ST_Isvalid(p.geom) ) as dtable ) x WHERE x.r = 1 GROUP BY bid, landuse; """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) ``` ## Land values ``` zip_file = ZipFile('../../data/chicago/land_use/parcels.csv.zip') zip_file.infolist() parcels_df = pd.read_csv(zip_file.open('parcels.csv'), dtype={'pin14': str}) parcels_df = parcels_df[parcels_df.usecode > 0] parcels_df = parcels_df.rename(columns={'pin14': 'pid'}) parcels_df['pid'] = parcels_df['pid'].astype(str) parcels_df.head() ``` ## Unused areas ``` unused_gdf = gpd.read_file('zip://../../data/chicago/unused_areas/tl_2014_17_arealm.zip') unused_gdf = unused_gdf[['geometry', 'MTFCC']] unused_gdf = unused_gdf.to_crs({'init': 'epsg:4326'}) unused_gdf = unused_gdf[unused_gdf['MTFCC'].isin({'K2180', 'K2181', 'K2182', 'K2183', 'K2184', 'K2185', 'K2186', 'K2187', 'K2188', 'K2189', 'K2190'})].drop('MTFCC', axis=1) unused_gdf['type'] = 'park' unused_gdf['city'] = CITY unused_gdf.head() ins_gdf = process_geometry_SQL_insert(unused_gdf) ins_gdf.to_sql('unused_areas', engine, if_exists='append', index=False, dtype={'geom': Geometry('MultiPolygon', srid=4326)}) unused_gdf = gpd.read_file('zip://../../data/chicago/unused_areas/tl_2014_17031_areawater.zip') unused_gdf = unused_gdf[['geometry']] unused_gdf = unused_gdf.to_crs({'init': 'epsg:4326'}) unused_gdf['type'] = 'water' unused_gdf['city'] = CITY unused_gdf.head() ins_gdf = process_geometry_SQL_insert(unused_gdf) ins_gdf.to_sql('unused_areas', engine, if_exists='append', index=False, dtype={'geom': Geometry('MultiPolygon', srid=4326)}) sql = """ update unused_areas set geom=st_multi(st_buffer(geom, 0.0)) WHERE city = '{city}'; """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) ``` ``` unused_gdf = gpd.read_file('../../data/chicago/unused_areas/parkandrivers.geojson') unused_gdf = unused_gdf[['geometry']] unused_gdf = unused_gdf.to_crs({'init': 'epsg:4326'}) unused_gdf['type'] = 'parksrivers' unused_gdf['city'] = CITY unused_gdf.head() ins_gdf = process_geometry_SQL_insert(unused_gdf) ins_gdf.to_sql('temptable_{tempname}'.format(tempname=CITY.lower()), engine, if_exists='replace', index=False, dtype={'geom': Geometry('MultiPolygon', srid=4326)}) sql = """ update temptable_{tempname} set geom=st_multi(st_buffer(geom, 0.0)) WHERE not st_isvalid(geom); """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) sql = """ DROP TABLE IF EXISTS temptable_unusedhelper_{tempname}; CREATE TEMPORARY TABLE temptable_unusedhelper_{tempname} AS SELECT ST_Union(geom) as geom FROM unused_areas u WHERE city='{city}'; DROP TABLE IF EXISTS temptable_unusedhelper_exp_{tempname}; CREATE TEMPORARY TABLE temptable_unusedhelper_exp_{tempname} AS SELECT (ST_Dump(geom)).geom FROM temptable_unusedhelper_{tempname} u; CREATE INDEX ON temptable_unusedhelper_exp_{tempname} USING GIST (geom); """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) sql = """ update temptable_{tempname} t set geom=ST_Multi(st_buffer(ST_Difference(t.geom, h.geom), 0.0)) FROM temptable_unusedhelper_{tempname} h WHERE st_intersects(t.geom, h.geom) AND (NOT ST_Touches(t.geom, h.geom)) AND ST_GeometryType(ST_Multi(ST_Difference(t.geom, h.geom))) <> 'ST_GeometryCollection'; """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) sql = """ DELETE FROM temptable_{tempname} t USING temptable_unusedhelper_exp_{tempname} h WHERE ST_Within(t.geom, h.geom) OR (st_intersects(t.geom, h.geom) AND (NOT ST_Touches(t.geom, h.geom)) AND ST_GeometryType(ST_Multi(ST_Difference(t.geom, h.geom))) = 'ST_GeometryCollection'); """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) sql = """ update temptable_{tempname} set geom=st_multi(st_buffer(geom, 0.0)); """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) sql = """ INSERT INTO unused_areas (geom, type, city) SELECT p.geom, p.type, p.city FROM temptable_{tempname} as p WHERE ST_Isvalid(p.geom) AND NOT EXISTS(SELECT * FROM unused_areas u WHERE ST_Intersects(u.geom, p.geom) AND u.city=p.city) """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) ``` ## Net area ``` land_gdf_unique = land_gdf.copy() land_gdf_unique.loc[:, 'x'] = land_gdf_unique.geometry.centroid.x land_gdf_unique.loc[:, 'y'] = land_gdf_unique.geometry.centroid.y land_gdf_unique = land_gdf_unique.drop_duplicates(subset=['x', 'y'])[['geometry', 'landuse']] ins_gdf = process_geometry_SQL_insert(land_gdf_unique) ins_gdf.to_sql('temptable_unique_{}'.format(CITY.lower()), engine, if_exists='replace', index=False, dtype={'geom': Geometry('MultiPolygon', srid=4326)}) sql = """ UPDATE temptable_unique_{tempname} p SET geom=ST_Multi(ST_buffer(p.geom, 0.0)) WHERE NOT ST_Isvalid(p.geom); """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) ## This deletes the blocks that are related to streets sql = """ DELETE FROM block b WHERE city='{city}' and NOT EXISTS (select * from temptable_unique_{tempname} t where st_intersects(t.geom, b.geom) and t.landuse <> 'none'); """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) sql = """ DELETE FROM temptable_unique_{tempname} t USING unused_areas u WHERE u.city = '{city}' AND ST_Intersects(u.geom, t.geom) AND (NOT ST_Touches(u.geom, t.geom)) AND (ST_Contains(u.geom, t.geom) OR ST_AREA(ST_Intersection(t.geom, u.geom))/ST_Area(t.geom) > 0.5); """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) sql = """ INSERT INTO spatial_groups_net_area (sp_id, city, spatial_name, used_area) SELECT sp_id, city, spatial_name, SUM(ST_Area(ST_Intersection(s.approx_geom, t.geom)::geography))/1000000. FROM temptable_unique_{tempname} t INNER JOIN spatial_groups s ON ST_Intersects(s.approx_geom, t.geom) AND NOT ST_Touches(s.approx_geom, t.geom) WHERE s.city = '{city}' GROUP BY sp_id, city, spatial_name; """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) ``` ## Vacuums ``` sql = """ REFRESH MATERIALIZED VIEW block_centroids; """ result = engine.execute(text(sql)) sql = """ REFRESH MATERIALIZED VIEW pois_requests; """ result = engine.execute(text(sql)) from collections import defaultdict from joblib import Parallel, delayed def make_trip(lon1, lat1, dest): # PERSONALIZE HERE r = requests.get( 'http://localhost:5000/table/v1/foot/{lon1},{lat1};{dest}?annotations=distance&sources=0'.format(lon1=lon1, lat1=lat1, dest=dest)) distances = r.json()['distances'] distance = np.array(distances[0][1:], np.float32) distance[distance < 0] = 0 return distance def walkscore_list(bid, clon, clat, list_dests, ws, straight_distances): dists = make_trip(clon, clat, list_dests) straight_distances = np.array(straight_distances) #good_idxs = np.argwhere(dists < 1600) ws = np.array(ws)+ 0.00000001 #[good_idxs] zeros_idxs = np.argwhere(dists == 0) dists[zeros_idxs] = 1 if np.sum(ws) == 0 or len(ws) == 0: return bid, -1 return bid, np.average(straight_distances/dists, weights=ws) #[good_idxs] [good_idxs] cat_weights = { 'grocery': [3], 'Food': [.75,.45,.25,.25,.225,.225,.225,.225,.2,.2], 'Shops': [.5,.45,.4,.35,.3], 'Schools': [1], 'Entertainment': [1], 'Parks and outside': [1], 'Coffee': [1.25,.75], 'Banks': [1], 'Books': [1] } def walkscore(meters): max_walk = 1500 score = np.exp(-5 * (meters / max_walk) ** 2.5) score = np.clip(score, 0, 1) return score def walkscore2_list(bid, clon, clat, list_dests, c): dists = make_trip(clon, clat, list_dests) #good_idxs = np.argwhere(dists < 1600) scores = np.sort(walkscore(dists))[::-1] n = len(cat_weights[c]) d = np.zeros(n) d[:scores.shape[0]] = scores[:n] w = np.sum(np.array(d)*np.array(cat_weights[c])) assert w <= np.sum(cat_weights[c]) and w >= 0 return bid, w #[good_idxs] [good_idxs] ``` ``` sql = """ SELECT bid, lon, lat, dests, parent_cat FROM pois_requests p WHERE p.city = '{city}' """.format(city=CITY, tempname=CITY.lower()) blocks_df = pd.read_sql_query(sql, con=engine) blocks_df.head() sql = """ SELECT bid, COUNT(*) as size FROM block_centroids b WHERE b.city = '{city}' GROUP BY bid ORDER BY bid """.format(city=CITY, tempname=CITY.lower()) n_blocks_df = pd.read_sql_query(sql, con=engine).set_index('bid') n_blocks_df.head() block_groups = defaultdict(list) for index, row in blocks_df.iterrows(): block_groups[row['bid']].append(row.values[1:]) from tqdm import tqdm results = [(idx, score) for idx, score in Parallel(n_jobs=10)(delayed(walkscore2_list)(bid, req[0], req[1], req[2], req[3]) for bid, reqs in tqdm(block_groups.items()) for req in reqs)] block_vacuum_index = defaultdict(list) bid2size = {k: v['size'] for k, v in n_blocks_df.iterrows()} for bid, score in results: block_vacuum_index[bid].append(score) sum_cat_weights = np.sum([y for x in cat_weights.values() for y in x]) for bid, score in block_vacuum_index.items(): if len(score) > 0: score = (np.sum(score)/bid2size[bid])/sum_cat_weights assert score <= 1.01 sql = "INSERT INTO walk_index (bid, score, city) VALUES ({}, {}, '{}')".format(bid, score, CITY) result = engine.execute(text(sql)) ``` ## Crime ``` df = pd.read_csv('../../data/chicago/crime/Crimes_-_2014.csv') df.head() df = df[['Date', 'IUCR', 'Description', 'Latitude', 'Location', 'Community Area']] df.head() print(df.count()) df = df.dropna() print(df.count()) df.head() df['datetime'] = pd.to_datetime(df['Date'], format='%m/%d/%Y %I:%M:%S %p') df.head() df['Location'] = df['Location'].str.replace('(', '') df['Location'] = df['Location'].str.replace(')', '') df['lng'] = df['Location'].str.split(', ').str[1] df['lat'] = df['Location'].str.split(', ').str[0] df['num'] = 1 df.head() ``` ### Crime types ``` crime_types_df = pd.read_csv('../../data/crime_types/Chicago_Police_Department_-_Illinois_Uniform_Crime_Reporting__IUCR__Codes.csv', dtype='str') crime_types_df['IUCR'] = crime_types_df['IUCR'].str.zfill(4) crime_types_df.head() crime_types_df.loc[(crime_types_df['INDEX CODE'] == 'I') & (crime_types_df['SECONDARY DESCRIPTION'].str.contains('RECOVERY')), 'INDEX CODE'] = 'N' crime_types_df[crime_types_df['INDEX CODE'] == 'I'] crime_types_df.loc[(crime_types_df['UCR1'] == 'Larceny-theft (except motor vehicle theft)'), 'INDEX CODE'] = 'N' print(df['num'].count()) df = pd.merge(df, crime_types_df, on='IUCR') print(df['num'].count()) df = df[df['INDEX CODE'] == 'I'] print(df['num'].count()) ``` #### Subtypes of crimes ``` ucr_crimes_df = pd.read_csv('../../data/crime_types/UCR_crimes.csv') ucr_crimes_df.head() df_ucr1 = pd.merge(df, ucr_crimes_df.rename(columns={'Name': 'UCR1'}), on='UCR1') a = set(df_ucr1['UCR1'].drop_duplicates().values) b = set(df['UCR1'].drop_duplicates().values) assert(a.intersection(b) == a) # Categories not present in crime dataset df_ucr1[~(df_ucr1['UCR1'].isin(b))] df_ucr1['lng'] = df_ucr1['lng'].astype('float32') df_ucr1['lat'] = df_ucr1['lat'].astype('float32') df_2014 = df_ucr1[df_ucr1['datetime'].dt.year == 2014][['lng', 'lat', 'Description', 'num', 'UCR1', 'Category']] df_2014 = df_2014.rename(columns={'Description': 'description'}) df_2014.count() from geopandas import GeoDataFrame from shapely.geometry import Point geometry = [Point(xy) for xy in zip(df_2014.lng, df_2014.lat)] df_2014 = df_2014.drop(['lng', 'lng'], axis=1) crs = {'init': 'epsg:4326'} gdf = GeoDataFrame(df_2014, crs=crs, geometry=geometry) gdf.head() insert_gdf = process_geometry_SQL_insert(gdf) insert_gdf.to_sql('temptable_{}'.format(CITY.lower()), engine, if_exists='replace', index=True, dtype={'geom': Geometry('Point', srid=4326)}) sql = """ insert into crime (sp_id, num, city, ucr1, ucr_category) select bid, SUM(num), '{city}', "UCR1", "Category" from( SELECT num, bid, "UCR1", "Category", ROW_NUMBER() OVER (PARTITION BY index) AS r from ( select c.index, c.num, b.bid, "UCR1", "Category" from temptable_{tempname} as c inner join blocks_group as b on ST_Intersects(b.geom, st_buffer(c.geom::geography, 30)::geometry) where b.city='{city}' ) as dtable ) x group by bid, "UCR1", "Category"; """.format(city=CITY, tempname=CITY.lower()) result = engine.execute(text(sql)) ``` ## Refresh materialized views ``` sql = """ REFRESH MATERIALIZED VIEW spatial_groups_unused_areas; """ result = engine.execute(text(sql)) sql = """ REFRESH MATERIALIZED VIEW block_building; """ result = engine.execute(text(sql)) sql = """ REFRESH MATERIALIZED VIEW blocks_group_with_building; """ result = engine.execute(text(sql)) 2 ```
github_jupyter
``` from google.colab import drive drive.mount('/content/drive', force_remount=True) import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os # Installing Nvidia Apex ! pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" "drive/My Drive/kaggle/Jigsaw/repository/NVIDIA-apex-39e153a" from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime import pkg_resources import seaborn as sns import time import scipy.stats as stats import gc import re import operator import sys from sklearn import metrics from sklearn import model_selection import torch import torch.nn as nn import torch.utils.data import torch.nn.functional as F from nltk.stem import PorterStemmer from sklearn.metrics import roc_auc_score %load_ext autoreload %autoreload 2 %matplotlib inline from tqdm import tqdm, tqdm_notebook import os from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" import warnings warnings.filterwarnings(action='once') import pickle from apex import amp import shutil device=torch.device('cuda') MAX_SEQUENCE_LENGTH = 220 SEED = 13 EPOCHS = 1 Data_dir="drive/My Drive/kaggle/Jigsaw/" Input_dir = "drive/My Drive/kaggle/Jigsaw/" WORK_DIR = "drive/My Drive/kaggle/Jigsaw/working/" num_to_load=1804874 #Train size to match time limit valid_size= 0 #Validation Size TOXICITY_COLUMN = 'target' !pip install regex # # Add the Bert Pytorch repo to the PATH # # using files from: https://github.com/huggingface/pytorch-pretrained-BERT package_dir_a = "drive/My Drive/kaggle/Jigsaw/repository/pytorch-pretrained-BERT" sys.path.insert(0, package_dir_a) from pytorch_pretrained_bert import convert_tf_checkpoint_to_pytorch from pytorch_pretrained_bert import BertTokenizer, BertForSequenceClassification,BertAdam from pytorch_pretrained_bert.modeling import BertForPreTraining, BertPreTrainedModel, BertModel, BertConfig, BertForMaskedLM, BertForSequenceClassification # Translate model from tensorflow to pytorch BERT_MODEL_PATH = 'drive/My Drive/kaggle/Jigsaw/uncased_L-12_H-768_A-12/' convert_tf_checkpoint_to_pytorch.convert_tf_checkpoint_to_pytorch(BERT_MODEL_PATH + 'bert_model.ckpt', BERT_MODEL_PATH + 'bert_config.json', WORK_DIR + 'pytorch_model.bin') shutil.copyfile(BERT_MODEL_PATH + 'bert_config.json', WORK_DIR + 'bert_config.json') # # This is the Bert configuration file from pytorch_pretrained_bert import BertConfig bert_config = BertConfig('drive/My Drive/kaggle/Jigsaw/uncased_L-12_H-768_A-12/'+'bert_config.json') #Reduce the memory usage def reduce_mem_usage(df, verbose=True): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem)) return df # Converting the lines to BERT format # Thanks to https://www.kaggle.com/httpwwwfszyc/bert-in-keras-taming def convert_lines(example, max_seq_length,tokenizer): max_seq_length -=2 all_tokens = [] longer = 0 for text in tqdm_notebook(example): tokens_a = tokenizer.tokenize(text) if len(tokens_a)>max_seq_length: tokens_a = tokens_a[:max_seq_length] longer += 1 one_token = tokenizer.convert_tokens_to_ids(["[CLS]"]+tokens_a+["[SEP]"])+[0] * (max_seq_length - len(tokens_a)) all_tokens.append(one_token) print(longer) return np.array(all_tokens) symbols_to_isolate = '.,?!-;*"…:—()%#$&_/@\・ω+=”“[]^–>\\°<~•≠™ˈʊɒ∞§{}·τα❤☺ɡ|¢→̶`❥━┣┫┗O►★©―ɪ✔®\x96\x92●£♥➤´¹☕≈÷♡◐║▬′ɔː€۩۞†μ✒➥═☆ˌ◄½ʻπδηλσερνʃ✬SUPERIT☻±♍µº¾✓◾؟.⬅℅»Вав❣⋅¿¬♫CMβ█▓▒░⇒⭐›¡₂₃❧▰▔◞▀▂▃▄▅▆▇↙γ̄″☹➡«φ⅓„✋:¥̲̅́∙‛◇✏▷❓❗¶˚˙)сиʿ✨。ɑ\x80◕!%¯−flfi₁²ʌ¼⁴⁄₄⌠♭✘╪▶☭✭♪☔☠♂☃☎✈✌✰❆☙○‣⚓年∎ℒ▪▙☏⅛casǀ℮¸w‚∼‖ℳ❄←☼⋆ʒ⊂、⅔¨͡๏⚾⚽Φ×θ₩?(℃⏩☮⚠月✊❌⭕▸■⇌☐☑⚡☄ǫ╭∩╮,例>ʕɐ̣Δ₀✞┈╱╲▏▕┃╰▊▋╯┳┊≥☒↑☝ɹ✅☛♩☞AJB◔◡↓♀⬆̱ℏ\x91⠀ˤ╚↺⇤∏✾◦♬³の|/∵∴√Ω¤☜▲↳▫‿⬇✧ovm-208'‰≤∕ˆ⚜☁' symbols_to_delete = '\n🍕\r🐵😑\xa0\ue014\t\uf818\uf04a\xad😢🐶️\uf0e0😜😎👊\u200b\u200e😁عدويهصقأناخلىبمغر😍💖💵Е👎😀😂\u202a\u202c🔥😄🏻💥ᴍʏʀᴇɴᴅᴏᴀᴋʜᴜʟᴛᴄᴘʙғᴊᴡɢ😋👏שלוםבי😱‼\x81エンジ故障\u2009🚌ᴵ͞🌟😊😳😧🙀😐😕\u200f👍😮😃😘אעכח💩💯⛽🚄🏼ஜ😖ᴠ🚲‐😟😈💪🙏🎯🌹😇💔😡\x7f👌ἐὶήιὲκἀίῃἴξ🙄H😠\ufeff\u2028😉😤⛺🙂\u3000تحكسة👮💙فزط😏🍾🎉😞\u2008🏾😅😭👻😥😔😓🏽🎆🍻🍽🎶🌺🤔😪\x08‑🐰🐇🐱🙆😨🙃💕𝘊𝘦𝘳𝘢𝘵𝘰𝘤𝘺𝘴𝘪𝘧𝘮𝘣💗💚地獄谷улкнПоАН🐾🐕😆ה🔗🚽歌舞伎🙈😴🏿🤗🇺🇸мυтѕ⤵🏆🎃😩\u200a🌠🐟💫💰💎эпрд\x95🖐🙅⛲🍰🤐👆🙌\u2002💛🙁👀🙊🙉\u2004ˢᵒʳʸᴼᴷᴺʷᵗʰᵉᵘ\x13🚬🤓\ue602😵άοόςέὸתמדףנרךצט😒͝🆕👅👥👄🔄🔤👉👤👶👲🔛🎓\uf0b7\uf04c\x9f\x10成都😣⏺😌🤑🌏😯ех😲Ἰᾶὁ💞🚓🔔📚🏀👐\u202d💤🍇\ue613小土豆🏡❔⁉\u202f👠》कर्मा🇹🇼🌸蔡英文🌞🎲レクサス😛外国人关系Сб💋💀🎄💜🤢َِьыгя不是\x9c\x9d🗑\u2005💃📣👿༼つ༽😰ḷЗз▱ц🤣卖温哥华议会下降你失去所有的钱加拿大坏税骗子🐝ツ🎅\x85🍺آإشء🎵🌎͟ἔ油别克🤡🤥😬🤧й\u2003🚀🤴ʲшчИОРФДЯМюж😝🖑ὐύύ特殊作戦群щ💨圆明园קℐ🏈😺🌍⏏ệ🍔🐮🍁🍆🍑🌮🌯🤦\u200d𝓒𝓲𝓿𝓵안영하세요ЖљКћ🍀😫🤤ῦ我出生在了可以说普通话汉语好极🎼🕺🍸🥂🗽🎇🎊🆘🤠👩🖒🚪天一家⚲\u2006⚭⚆⬭⬯⏖新✀╌🇫🇷🇩🇪🇮🇬🇧😷🇨🇦ХШ🌐\x1f杀鸡给猴看ʁ𝗪𝗵𝗲𝗻𝘆𝗼𝘂𝗿𝗮𝗹𝗶𝘇𝗯𝘁𝗰𝘀𝘅𝗽𝘄𝗱📺ϖ\u2000үսᴦᎥһͺ\u2007հ\u2001ɩye൦lƽh𝐓𝐡𝐞𝐫𝐮𝐝𝐚𝐃𝐜𝐩𝐭𝐢𝐨𝐧Ƅᴨןᑯ໐ΤᏧ௦Іᴑ܁𝐬𝐰𝐲𝐛𝐦𝐯𝐑𝐙𝐣𝐇𝐂𝐘𝟎ԜТᗞ౦〔Ꭻ𝐳𝐔𝐱𝟔𝟓𝐅🐋ffi💘💓ё𝘥𝘯𝘶💐🌋🌄🌅𝙬𝙖𝙨𝙤𝙣𝙡𝙮𝙘𝙠𝙚𝙙𝙜𝙧𝙥𝙩𝙪𝙗𝙞𝙝𝙛👺🐷ℋ𝐀𝐥𝐪🚶𝙢Ἱ🤘ͦ💸ج패티W𝙇ᵻ👂👃ɜ🎫\uf0a7БУі🚢🚂ગુજરાતીῆ🏃𝓬𝓻𝓴𝓮𝓽𝓼☘﴾̯﴿₽\ue807𝑻𝒆𝒍𝒕𝒉𝒓𝒖𝒂𝒏𝒅𝒔𝒎𝒗𝒊👽😙\u200cЛ‒🎾👹⎌🏒⛸公寓养宠物吗🏄🐀🚑🤷操美𝒑𝒚𝒐𝑴🤙🐒欢迎来到阿拉斯ספ𝙫🐈𝒌𝙊𝙭𝙆𝙋𝙍𝘼𝙅ﷻ🦄巨收赢得白鬼愤怒要买额ẽ🚗🐳𝟏𝐟𝟖𝟑𝟕𝒄𝟗𝐠𝙄𝙃👇锟斤拷𝗢𝟳𝟱𝟬⦁マルハニチロ株式社⛷한국어ㄸㅓ니͜ʖ𝘿𝙔₵𝒩ℯ𝒾𝓁𝒶𝓉𝓇𝓊𝓃𝓈𝓅ℴ𝒻𝒽𝓀𝓌𝒸𝓎𝙏ζ𝙟𝘃𝗺𝟮𝟭𝟯𝟲👋🦊多伦🐽🎻🎹⛓🏹🍷🦆为和中友谊祝贺与其想象对法如直接问用自己猜本传教士没积唯认识基督徒曾经让相信耶稣复活死怪他但当们聊些政治题时候战胜因圣把全堂结婚孩恐惧且栗谓这样还♾🎸🤕🤒⛑🎁批判检讨🏝🦁🙋😶쥐스탱트뤼도석유가격인상이경제황을렵게만들지않록잘관리해야합다캐나에서대마초와화약금의품런성분갈때는반드시허된사용🔫👁凸ὰ💲🗯𝙈Ἄ𝒇𝒈𝒘𝒃𝑬𝑶𝕾𝖙𝖗𝖆𝖎𝖌𝖍𝖕𝖊𝖔𝖑𝖉𝖓𝖐𝖜𝖞𝖚𝖇𝕿𝖘𝖄𝖛𝖒𝖋𝖂𝕴𝖟𝖈𝕸👑🚿💡知彼百\uf005𝙀𝒛𝑲𝑳𝑾𝒋𝟒😦𝙒𝘾𝘽🏐𝘩𝘨ὼṑ𝑱𝑹𝑫𝑵𝑪🇰🇵👾ᓇᒧᔭᐃᐧᐦᑳᐨᓃᓂᑲᐸᑭᑎᓀᐣ🐄🎈🔨🐎🤞🐸💟🎰🌝🛳点击查版🍭𝑥𝑦𝑧NG👣\uf020っ🏉ф💭🎥Ξ🐴👨🤳🦍\x0b🍩𝑯𝒒😗𝟐🏂👳🍗🕉🐲چی𝑮𝗕𝗴🍒ꜥⲣⲏ🐑⏰鉄リ事件ї💊「」\uf203\uf09a\uf222\ue608\uf202\uf099\uf469\ue607\uf410\ue600燻製シ虚偽屁理屈Г𝑩𝑰𝒀𝑺🌤𝗳𝗜𝗙𝗦𝗧🍊ὺἈἡχῖΛ⤏🇳𝒙ψՁմեռայինրւդձ冬至ὀ𝒁🔹🤚🍎𝑷🐂💅𝘬𝘱𝘸𝘷𝘐𝘭𝘓𝘖𝘹𝘲𝘫کΒώ💢ΜΟΝΑΕ🇱♲𝝈↴💒⊘Ȼ🚴🖕🖤🥘📍👈➕🚫🎨🌑🐻𝐎𝐍𝐊𝑭🤖🎎😼🕷grntidufbk𝟰🇴🇭🇻🇲𝗞𝗭𝗘𝗤👼📉🍟🍦🌈🔭《🐊🐍\uf10aლڡ🐦\U0001f92f\U0001f92a🐡💳ἱ🙇𝗸𝗟𝗠𝗷🥜さようなら🔼' from nltk.tokenize.treebank import TreebankWordTokenizer tokenizer1 = TreebankWordTokenizer() isolate_dict = {ord(c):f' {c} ' for c in symbols_to_isolate} remove_dict = {ord(c):f'' for c in symbols_to_delete} def handle_punctuation(x): x = x.translate(remove_dict) x = x.translate(isolate_dict) return x def handle_contractions(x): x = tokenizer1.tokenize(x) return x def fix_quote(x): x = [x_[1:] if x_.startswith("'") else x_ for x_ in x] x = ' '.join(x) return x def preprocess(x): x = handle_punctuation(x) x = handle_contractions(x) x = fix_quote(x) return x tokenizer = BertTokenizer.from_pretrained(BERT_MODEL_PATH, cache_dir=None,do_lower_case=True) train_df = reduce_mem_usage(pd.read_csv("drive/My Drive/kaggle/Jigsaw/train.csv").sample(num_to_load+valid_size,random_state=SEED)) print('loaded %d records' % len(train_df)) # Make sure all comment_text values are strings train_df['comment_text'] = train_df['comment_text'].astype(str) # import multiprocessing as mp # pool = mp.Pool(processes= 2) # train_df['comment_text'] = pool.map(preprocess, list(train_df['comment_text'])) # pool.close sequences = convert_lines(train_df["comment_text"].fillna("DUMMY_VALUE"),MAX_SEQUENCE_LENGTH,tokenizer) train_df=train_df.fillna(0) # List all identities identity_columns = [ 'male', 'female', 'homosexual_gay_or_lesbian', 'christian', 'jewish', 'muslim', 'black', 'white', 'psychiatric_or_mental_illness'] y_columns=['target'] y_aux_train = train_df[['target', 'severe_toxicity', 'obscene', 'identity_attack', 'insult', 'threat','sexual_explicit']][:num_to_load] # Overall weights = np.ones((num_to_load+valid_size,)) / 4 # Subgroup weights += (train_df[identity_columns].fillna(0).values>=0.5).sum(axis=1).astype(bool).astype(np.int) / 4 # Background Positive, Subgroup Negative weights += (( (train_df['target'].values>=0.5).astype(bool).astype(np.int) + (train_df[identity_columns].fillna(0).values<0.5).sum(axis=1).astype(bool).astype(np.int) ) > 1 ).astype(bool).astype(np.int) / 4 # Background Negative, Subgroup Positive weights += (( (train_df['target'].values<0.5).astype(bool).astype(np.int) + (train_df[identity_columns].fillna(0).values>=0.5).sum(axis=1).astype(bool).astype(np.int) ) > 1 ).astype(bool).astype(np.int) / 4 loss_weight = 1.0 / weights.mean() train_df = train_df.drop(['comment_text'],axis=1) # convert target to 0,1 train_df['target']=(train_df['target']>=0.5).astype(float) y_aux_train = train_df[['severe_toxicity', 'obscene', 'identity_attack', 'insult', 'threat','sexual_explicit']][:num_to_load] # X = sequences[:num_to_load] y = train_df[y_columns].values[:num_to_load] X_val = sequences[num_to_load:] y_val = train_df[y_columns].values[num_to_load:] weights = weights[:num_to_load].reshape(num_to_load,1) y = np.hstack([y,weights]) y_train = np.hstack([y, y_aux_train]) test_df=train_df.tail(valid_size).copy() train_df=train_df.head(num_to_load) train_dataset = torch.utils.data.TensorDataset(torch.tensor(X,dtype=torch.long), torch.tensor(y_train,dtype=torch.float)) def custom_loss(data, targets): ''' Define custom loss function for weighted BCE on 'target' column ''' bce_loss_1 = nn.BCEWithLogitsLoss(weight=targets[:,1:2])(data[:,:1],targets[:,:1]) bce_loss_2 = nn.BCEWithLogitsLoss()(data[:,1:],targets[:,2:]) return (bce_loss_1 * loss_weight) + bce_loss_2 class BertForSequenceClassification1(BertPreTrainedModel): def __init__(self, config, num_labels): super(BertForSequenceClassification1, self).__init__(config) self.num_labels = num_labels self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, num_labels) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None): _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) if labels is not None: loss_fct = nn.CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return loss else: return logits lr=2e-5 batch_size = 32 accumulation_steps=1 np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True model = BertForSequenceClassification1.from_pretrained("bert-base-uncased",cache_dir=None,num_labels=len(y_columns) + y_aux_train.shape[1]) model.zero_grad() model = model.to(device) param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] train = train_dataset num_train_optimization_steps = int(EPOCHS*len(train)/batch_size/accumulation_steps) optimizer = BertAdam(optimizer_grouped_parameters, lr=lr, warmup=0.05, t_total=num_train_optimization_steps) model, optimizer = amp.initialize(model, optimizer, opt_level="O1",verbosity=0) model = model.train() tq = tqdm_notebook(range(EPOCHS)) for epoch in tq: train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True) avg_loss = 0. avg_accuracy = 0. lossf=None tk0 = tqdm(enumerate(train_loader),total=len(train_loader),leave=False) optimizer.zero_grad() for i,(x_batch, y_batch) in tk0: y_pred = model(x_batch.to(device), attention_mask=(x_batch>0).to(device), labels=None) # loss = F.binary_cross_entropy_with_logits(y_pred,y_batch.to(device)) loss = custom_loss(y_pred,y_batch.to(device)) loss = loss / accumulation_steps with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() if (i+1) % accumulation_steps == 0: # Wait for several backward steps optimizer.step() # Now we can do an optimizer step optimizer.zero_grad() if lossf: lossf = 0.98*lossf+0.02*loss.item() else: lossf = loss.item() tk0.set_postfix(loss = lossf) avg_loss += loss.item() / len(train_loader) avg_accuracy += torch.mean(((torch.sigmoid(y_pred[:,0])>0.5) == (y_batch[:,0]>0.5).to(device)).to(torch.float) ).item()/len(train_loader) tq.set_postfix(avg_loss=avg_loss,avg_accuracy=avg_accuracy) output_model_file = "bert_pytorch.bin" path = F"drive/My Drive/kaggle/Jigsaw/{output_model_file}" torch.save(model.state_dict(), path) # Run validation # The following 2 lines are not needed but show how to download the model for prediction model = BertForSequenceClassification(bert_config,num_labels=len(y_columns)) model.load_state_dict(torch.load(output_model_file )) model.to(device) for param in model.parameters(): param.requires_grad=False model.eval() valid_preds = np.zeros((len(X_val))) valid = torch.utils.data.TensorDataset(torch.tensor(X_val,dtype=torch.long)) valid_loader = torch.utils.data.DataLoader(valid, batch_size=32, shuffle=False) tk0 = tqdm_notebook(valid_loader) for i,(x_batch,) in enumerate(tk0): pred = model(x_batch.to(device), attention_mask=(x_batch>0).to(device), labels=None) valid_preds[i*32:(i+1)*32]=pred[:,0].detach().cpu().squeeze().numpy() # From baseline kernel def calculate_overall_auc(df, model_name): true_labels = df[TOXICITY_COLUMN]>0.5 predicted_labels = df[model_name] return metrics.roc_auc_score(true_labels, predicted_labels) def power_mean(series, p): total = sum(np.power(series, p)) return np.power(total / len(series), 1 / p) def get_final_metric(bias_df, overall_auc, POWER=-5, OVERALL_MODEL_WEIGHT=0.25): bias_score = np.average([ power_mean(bias_df[SUBGROUP_AUC], POWER), power_mean(bias_df[BPSN_AUC], POWER), power_mean(bias_df[BNSP_AUC], POWER) ]) return (OVERALL_MODEL_WEIGHT * overall_auc) + ((1 - OVERALL_MODEL_WEIGHT) * bias_score) SUBGROUP_AUC = 'subgroup_auc' BPSN_AUC = 'bpsn_auc' # stands for background positive, subgroup negative BNSP_AUC = 'bnsp_auc' # stands for background negative, subgroup positive def compute_auc(y_true, y_pred): try: return metrics.roc_auc_score(y_true, y_pred) except ValueError: return np.nan def compute_subgroup_auc(df, subgroup, label, model_name): subgroup_examples = df[df[subgroup]>0.5] return compute_auc((subgroup_examples[label]>0.5), subgroup_examples[model_name]) def compute_bpsn_auc(df, subgroup, label, model_name): """Computes the AUC of the within-subgroup negative examples and the background positive examples.""" subgroup_negative_examples = df[(df[subgroup]>0.5) & (df[label]<=0.5)] non_subgroup_positive_examples = df[(df[subgroup]<=0.5) & (df[label]>0.5)] examples = subgroup_negative_examples.append(non_subgroup_positive_examples) return compute_auc(examples[label]>0.5, examples[model_name]) def compute_bnsp_auc(df, subgroup, label, model_name): """Computes the AUC of the within-subgroup positive examples and the background negative examples.""" subgroup_positive_examples = df[(df[subgroup]>0.5) & (df[label]>0.5)] non_subgroup_negative_examples = df[(df[subgroup]<=0.5) & (df[label]<=0.5)] examples = subgroup_positive_examples.append(non_subgroup_negative_examples) return compute_auc(examples[label]>0.5, examples[model_name]) def compute_bias_metrics_for_model(dataset, subgroups, model, label_col, include_asegs=False): """Computes per-subgroup metrics for all subgroups and one model.""" records = [] for subgroup in subgroups: record = { 'subgroup': subgroup, 'subgroup_size': len(dataset[dataset[subgroup]>0.5]) } record[SUBGROUP_AUC] = compute_subgroup_auc(dataset, subgroup, label_col, model) record[BPSN_AUC] = compute_bpsn_auc(dataset, subgroup, label_col, model) record[BNSP_AUC] = compute_bnsp_auc(dataset, subgroup, label_col, model) records.append(record) return pd.DataFrame(records).sort_values('subgroup_auc', ascending=True) MODEL_NAME = 'model1' test_df[MODEL_NAME]=torch.sigmoid(torch.tensor(valid_preds)).numpy() TOXICITY_COLUMN = 'target' bias_metrics_df = compute_bias_metrics_for_model(test_df, identity_columns, MODEL_NAME, 'target') bias_metrics_df get_final_metric(bias_metrics_df, calculate_overall_auc(test_df, MODEL_NAME)) ```
github_jupyter
<h1 align="center"> BitCoin Heist Ransomware Address </h1> <h4 align="center"> <img alt="BitcoinHeist" title="#BitcoinHeist" src="https://exame.com/wp-content/uploads/2021/03/Dollar-bitcoin-1.jpg" width="400px;" /> </h4> ## 🔍 About Bitcoin has revolutionized the currency in our world. Cryptocurrency is a virtual currency, based in blockchain, a distributed system in which transaction are made in an intriguing safety due to it's de-centralized, untraceable and immutable aspects. Recently, ramsomware attackers have evolved in the way they charge to unlock attacked systems, asking for cryptocurrency as payment. As blockchain is hard to track, this study aims to analyse how transactions take place and try to predict if a cryptocoin - in this case Bitcoin - address is being used for malicious intent or not. In this [link](https://archive.ics.uci.edu/ml/datasets/BitcoinHeistRansomwareAddressDataset), you can locate the dataset that was used. We also made a jupyter notebook available for a better comprehension of the topic. ## 🦸 Authors - Felipe Tavoni - Reynold N. Mazo # **Importing Libraries and Reading Data** ``` import sklearn import numpy as np import pandas as pd from matplotlib import pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, homogeneity_score from sklearn.cluster import KMeans from sklearn import metrics from sklearn.metrics.cluster import adjusted_rand_score !gdown --id 11ky8x9fhB6gPLUsQUKQzOV7K-ES5paof bitcoin_data = pd.read_csv('BitcoinHeistData.csv', delimiter=',') ``` # **Exploratory Data Analysis** ### **Dataset** > Below, we're just exibiting the values present on our dataset ``` bitcoin_data ``` --- ### **Unbalanced Samples** #### **Unique amount of labels** > The query below shows that our *dataset* has 29 unique labels. ``` #Verificando a quantidade de valores únicos no atributo `label` bitcoin_data.groupby('label').nunique() ``` #### **Binarization** > As we can observe on queries below, the ammount of ransomware's equal to 1.42% of the whole data. As that said, we must reduce the ammount of observations used to train our model, since it can affect the final performance. > Here, we will convert those 29 unique labels into only 2 (it's ransomware & not/unknown ransomware). A under-sampling method was used, which consists of selecting a random ammount of the majority data to match the same ammount of the minority data. No criteria was used, just random. ``` # Separação dos valores de `label` para: white & ransomware class_white = bitcoin_data[bitcoin_data['label'] == "white"] class_ransomware = bitcoin_data[bitcoin_data['label'] != "white"] # Binarização dos valores de `label` para: white = "0" & ransomware = "1" class_white['label'] = 0 class_ransomware['label'] = 1 # Quantidade de orservações de cada uma das labels print('Total:', bitcoin_data.shape[0]) print('Class 0 (White):', class_white.shape[0]) print('Class 1 (Ransomware):', class_ransomware.shape[0]) #Plotagem da discrepancia entre classes plot_concat = pd.concat([class_white, class_ransomware], axis=0) plot_concat['label'].value_counts().plot(kind='bar', title='count (target)') #Processo de under-sampeling #binary case class_white = class_white.sample(n=class_ransomware.shape[0], random_state=49) bitcoin_data_binary = pd.concat([class_white, class_ransomware], axis=0) print('Novo Total:', bitcoin_data_binary.shape[0]) print("total label of 1 and 0:", bitcoin_data_binary['label'].value_counts()) # plot apos o under-sampeling bitcoin_data_binary['label'].value_counts().plot(kind='bar', title='count (target) for binary case') #Processo de under-sampeling #multi case class_white = bitcoin_data[bitcoin_data['label'] == "white"] class_ransomware = bitcoin_data[bitcoin_data['label'] != "white"] class_white = class_white.sample(n=class_ransomware.shape[0], random_state=49) bitcoin_data = pd.concat([class_white, class_ransomware], axis=0) print('Novo Total:', bitcoin_data.shape[0]) print("total labels:", bitcoin_data['label'].value_counts()) # plot apos o under-sampeling bitcoin_data['label'].value_counts().plot(kind='bar', title='count (target) for multi case') ``` > Now yes, our data is equalized, and we can have a better prediction without overfitting our model! --- ### **Missing and Duplicate Values** > As we can see on queries below, there isn't any missing values on our dataset. ``` #Multi case missing_values = bitcoin_data.isna().sum() / len(bitcoin_data) print(missing_values) #Binary case missing_values = bitcoin_data_binary.isna().sum() / len(bitcoin_data) print(missing_values) ``` > Also, we are dropping any data that is duplicate ``` #multiclasse bitcoin_data = bitcoin_data.drop_duplicates() #binary case bitcoin_data_binary = bitcoin_data_binary.drop_duplicates() ``` --- ### **Correlation Between Variables** > A correlation also was made on the *dataset* attributes. We can observe 3 similar behavior: - *length* -- *count*: the length interaction with count is explained, since the length refers to a hidden address of a Bitcoin while the counter counts the merging address on Bitcoin's graph. - *count* -- *looped*: Again, loop refers to that division, transition and merge of Bitcoiná addresses. - *weight* -- *neighboor*: Also, there is a interaction between those variables, since weight and neighboor are related to the fusion of addresses of their neighboors. Finally, all other variables are disconsidered, since they present a correlation lesser than 50%, which demonstrates that they're not related. ``` #Multi case plt.figure(figsize=(12,12)) # Checking correlation between all features sns.heatmap(bitcoin_data.corr(), annot=True); #Binary case plt.figure(figsize=(12,12)) # Checking correlation between all features sns.heatmap(bitcoin_data_binary.corr(), annot=True); ``` --- ### **Plotting all features** > And below is showed some graphical correlation, i.g., the interaction of a attribute with the others. ``` # Ploting all data plt.figure(figsize=(12,12)) g = sns.PairGrid(bitcoin_data, diag_sharey=False, corner=True) g.map_lower(sns.scatterplot) ``` # **Model Building, Training and Evaluation** > Next, we start to build our model. In the next steps, we'll use two approaches. One one them consists of creating a model with 29 labels as target. On the other hand, we'll build a model with only 2 labels, using a binary approach, the *is ransomware* and *isn't a ransomware/unknown*. ## **Multi class clustering (29 Labels)** ### **Building** #### **Setting up training data** ``` class_white = bitcoin_data[bitcoin_data['label'] == "white"] class_ransomware = bitcoin_data[bitcoin_data['label'] != "white"] class_white['label'] = 0 class_ransomware['label'] = class_ransomware['label'].map({ "montrealAPT": 1, 'montrealComradeCircle': 2, 'montrealCryptConsole': 3, 'montrealCryptXXX': 4, 'montrealCryptoLocker': 5, 'montrealCryptoTorLocker2015': 6, 'montrealDMALocker': 7, 'montrealDMALockerv3': 8, 'montrealEDA2': 9, 'montrealFlyper': 10, 'montrealGlobe': 11, 'montrealGlobeImposter': 12, 'montrealGlobev3': 13, 'montrealJigSaw': 14, 'montrealNoobCrypt': 15, 'montrealRazy': 16, 'montrealSam': 17, 'montrealSamSam': 18, 'montrealVenusLocker': 19, 'montrealWannaCry': 20, 'montrealXLocker': 21, 'montrealXLockerv5.0': 22, 'montrealXTPLocker': 23, 'paduaCryptoWall': 24, 'paduaJigsaw': 25, 'paduaKeRanger': 26, 'princetonCerber': 27, 'princetonLocky': 28 }) bitcoin_data = pd.concat([class_white, class_ransomware], axis=0) Y_real = bitcoin_data['label'] X_treino = bitcoin_data X_treino.drop('address', axis = 1, inplace = True) X_treino.drop('label', axis = 1, inplace = True) ``` #### **Finding best k value based on Elbow Method** > The elbow method runs several times the KMeans algorithm, return a *clustering score*. With that in hands, we can analyse the graphic, searching it's elbow, the optimal number of cluster. ``` clustering_score = [] for i in range(1, 11): kmeans = KMeans(n_clusters = i, init = 'random', random_state = 42) kmeans.fit(X_treino) clustering_score.append(kmeans.inertia_) # inertia_ = Soma das distâncias # quadráticas das amostras até o # centro do cluster mais próximo. # Plota a curva característica plt.figure(figsize=(10,6)) plt.plot(range(1, 11), clustering_score) plt.scatter(2,clustering_score[1], s = 200, c = 'red', marker='*') plt.title('Método Elbow') plt.xlabel('No. de Clusters') plt.ylabel('Clustering Score') plt.show() ``` ### **Testing** > As previously defined, the model will be first trained with 29 labels as target. Below, we're training it. ``` km = KMeans(n_clusters = 29, init = 'random', random_state = 42, n_init = 5) km.fit(X_treino) Y_clusters = km.predict(X_treino) ``` ### **Evaluating** > As evaluation method, the random score is used. ``` metrics.rand_score(Y_real, Y_clusters) metrics.adjusted_rand_score(Y_real, Y_clusters) print(classification_report(Y_real, Y_clusters)) ``` ## **Binary clustering (2 Labels)** ### **Building** #### **Setting up training data** ``` Y_real_binary = bitcoin_data_binary['label'] X_treino_binary = bitcoin_data_binary X_treino_binary.drop('address', axis = 1, inplace = True) X_treino_binary.drop('label', axis = 1, inplace = True) ``` ### **Training** ``` km = KMeans(n_clusters = 2, init = 'random', random_state = 42, n_init = 5) km.fit(X_treino_binary) Y_clusters_binary = km.predict(X_treino_binary) ``` ### **Evaluating** ##### **Random Score** ``` metrics.rand_score(Y_real_binary, Y_clusters_binary) metrics.adjusted_rand_score(Y_real_binary, Y_clusters_binary) print(classification_report(Y_real_binary, Y_clusters_binary)) ``` ##### **Confusion matrix** ``` # Calcula a matrix de confusão cm = confusion_matrix(Y_real_binary,Y_clusters_binary) # Gera um gráfico da matrix de confusão do tipo heatmap ax = sns.heatmap(cm, annot=True, cmap='Blues', cbar=False, annot_kws={"size": 14}, fmt='g') # Dá nomes aos campos das matrix cmlabels = ['True Negatives', 'False Positives', 'False Negatives', 'True Positives'] for i,t in enumerate(ax.texts): t.set_text(t.get_text() + "\n" + cmlabels[i]) # Dá nome ao título e eixos plt.title('Confusion Matrix', size=15) plt.xlabel('Predicted Values', size=13) plt.ylabel('True Values', size=13) ``` # **Extra: Building And Training With All Samples** ## **29 Labels, 2 Clusters** ### **Building** ``` bitcoin_data_full = pd.read_csv('BitcoinHeistData.csv', delimiter=',') Y_real_full = bitcoin_data_full['label'] X_treino_full = bitcoin_data_full.copy() X_treino_full.drop('address', axis = 1, inplace = True) X_treino_full.drop('label', axis = 1, inplace = True) ``` ### **Training** ``` km = KMeans(n_clusters = 2, init = 'random', random_state = 42, n_init = 5) km.fit(X_treino_full) Y_clusters_full = km.predict(X_treino_full) ``` ### **Testing** ``` metrics.rand_score(Y_real_full, Y_clusters_full) metrics.adjusted_rand_score(Y_real_full, Y_clusters_full) Y_real_full_list = Y_real_full.to_list() Y_real_full_list # Y_real_full_list = np.array(Y_real_full_list) # Y_real_full_list = Y_real_full_list.astype(int) for index, label in enumerate(Y_real_full_list): if label == 'white': Y_real_full_list[index] = 0 else: Y_real_full_list[index] = 1 print(classification_report(Y_real_full_list, Y_clusters_full)) ``` ## **29 Labels, 29 Clusters** ### **Building** ``` bitcoin_data_full = pd.read_csv('BitcoinHeistData.csv', delimiter=',') ``` Maping of labels for further comparison ``` bitcoin_data_full['label'] class_white = bitcoin_data_full[bitcoin_data_full['label'] == "white"] class_ransomware = bitcoin_data_full[bitcoin_data_full['label'] != "white"] class_white['label'] = 0 class_ransomware['label'] = class_ransomware['label'].map({ "montrealAPT": 1, 'montrealComradeCircle': 2, 'montrealCryptConsole': 3, 'montrealCryptXXX': 4, 'montrealCryptoLocker': 5, 'montrealCryptoTorLocker2015': 6, 'montrealDMALocker': 7, 'montrealDMALockerv3': 8, 'montrealEDA2': 9, 'montrealFlyper': 10, 'montrealGlobe': 11, 'montrealGlobeImposter': 12, 'montrealGlobev3': 13, 'montrealJigSaw': 14, 'montrealNoobCrypt': 15, 'montrealRazy': 16, 'montrealSam': 17, 'montrealSamSam': 18, 'montrealVenusLocker': 19, 'montrealWannaCry': 20, 'montrealXLocker': 21, 'montrealXLockerv5.0': 22, 'montrealXTPLocker': 23, 'paduaCryptoWall': 24, 'paduaJigsaw': 25, 'paduaKeRanger': 26, 'princetonCerber': 27, 'princetonLocky': 28 }) bitcoin_data_full = pd.concat([class_white, class_ransomware], axis=0) Y_real_full = bitcoin_data_full['label'] X_treino_full = bitcoin_data_full.copy() X_treino_full.drop('address', axis = 1, inplace = True) X_treino_full.drop('label', axis = 1, inplace = True) ``` ### **Training** ``` km = KMeans(n_clusters = 29, init = 'random', random_state = 42, n_init = 5) km.fit(X_treino_full) Y_clusters_full = km.predict(X_treino_full) ``` ### **Testing** ``` metrics.rand_score(Y_real_full, Y_clusters_full) metrics.adjusted_rand_score(Y_real_full, Y_clusters_full) #change the type to fit with the predict array Y_real_full = Y_real_full.astype(int) print(classification_report(Y_real_full, Y_clusters_full)) ``` ## **2 Labels, 2 Clusters** ### **Building** ``` Y_real_full = bitcoin_data_full['label'] X_treino_full = bitcoin_data_full X_treino_full.drop('address', axis = 1, inplace = True) X_treino_full.drop('label', axis = 1, inplace = True) ``` ### **Training** ``` km = KMeans(n_clusters = 2, init = 'random', random_state = 42, n_init = 5) km.fit(X_treino_full) Y_clusters_full = km.predict(X_treino_full) ``` ### **Evaluating** ``` metrics.rand_score(Y_real_full, Y_clusters_full) metrics.adjusted_rand_score(Y_real_full, Y_clusters_full) Y_real_full # Calcula a matrix de confusão cm = confusion_matrix(Y_real_binary,Y_clusters_binary) # Gera um gráfico da matrix de confusão do tipo heatmap ax = sns.heatmap(cm, annot=True, cmap='Blues', cbar=False, annot_kws={"size": 14}, fmt='g') # Dá nomes aos campos das matrix cmlabels = ['True Negatives', 'False Positives', 'False Negatives', 'True Positives'] for i,t in enumerate(ax.texts): t.set_text(t.get_text() + "\n" + cmlabels[i]) # Dá nome ao título e eixos plt.title('Confusion Matrix', size=15) plt.xlabel('Predicted Values', size=13) plt.ylabel('True Values', size=13) ``` # **Silhouette Technique** ``` from sklearn.metrics import silhouette_samples, silhouette_score range_n_clusters = [2, 3, 4, 5] silhouette_avg_n_clusters = [] for n_clusters in range_n_clusters: # Initialize the clusterer with n_clusters value and a random generator # seed of 10 for reproducibility. clusterer = KMeans(n_clusters=n_clusters, random_state=42) cluster_labels = clusterer.fit_predict(X_treino) # The silhouette_score gives the average value for all the samples. # This gives a perspective into the density and separation of the formed # clusters silhouette_avg = silhouette_score(X_treino, cluster_labels) print("For n_clusters =", n_clusters, "The avarage score of silhouette is :", silhouette_avg) silhouette_avg_n_clusters.append(silhouette_avg) # Compute the silhouette scores for each sample sample_silhouette_values = silhouette_samples(X_treino, cluster_labels) ``` # **PCA** ``` # Importação do módulo PCA from sklearn.decomposition import PCA # Atribui 2 componentes principais pca = PCA(n_components=2) # Realiza a transformação PCA do dataset treino X_treino = bitcoin_data principalComponents = pca.fit_transform(X_treino) # Transforma o objeto numpy array em dataframe e atribui nomes às colunas principalDf = pd.DataFrame(data = principalComponents , columns = ['principal_component_1', 'principal_component_2']) # Concatena o atributo alvo ao novo dataset reduzido X_reduced = pd.concat([principalDf, Y_real_binary], axis = 1) X_reduced.head() # Plota o gráfico de dispersão do novo dataset com transformação PCA fig = plt.figure(figsize = (6,6)) fig.suptitle('Gráfico de dispersão dos componentes principais') sns.scatterplot(data = X_reduced, x = 'principal_component_1', y ='principal_component_2', hue = 'label' ) # Importa o módulo de padronização de escala from sklearn.preprocessing import StandardScaler # Realiza o agrupamento via K-means no novo datase X_reduced.drop('label', axis = 1, inplace = True) #Remove valores inválidos X_reduced.dropna(inplace=True) # Standardize the data X_std = StandardScaler().fit_transform(X_reduced) # Run local implementation of kmeans km = KMeans(n_clusters = 2, init = 'k-means++', random_state = 42) km.fit(X_std) # Armazena a localização dos centroids centroids = km.cluster_centers_ # Realiza a predição com o modelo treinado Y_clusters_2 = km.predict(X_std) # Plotagem do gráfico de dispersão dos clusters # Configura subplots fig, ax = plt.subplots(figsize=(12, 12)) # Plota a dispersão do primeiro cluster plt.scatter(X_std[km.labels_ == 0, 0], X_std[km.labels_ == 0, 1], c='green', label='cluster 1') # Plota a dispersão do segundo cluster plt.scatter(X_std[km.labels_ == 1, 0], X_std[km.labels_ == 1, 1], c='blue', label='cluster 2') # Marca os centróides com o símbolo estrela plt.scatter(centroids[:, 0], centroids[:, 1], marker='*', s=300, c='r', label='centroid') plt.legend() plt.xlim([-5, 5]) plt.ylim([-5, 5]) plt.xlabel('principal_component_1') plt.ylabel('principal_component_2') plt.title('Visualization of clustered data', fontweight='bold') ax.set_aspect('equal'); ``` # **KNN Experiment for number of neighbors equal to 1** ``` from sklearn.neighbors import KNeighborsClassifier bitcoin_data_full = pd.read_csv('BitcoinHeistData.csv', delimiter=',') Y_real = bitcoin_data_full['label'] X_treino = bitcoin_data_full X_treino.drop('address', axis = 1, inplace = True) X_treino.drop('label', axis = 1, inplace = True) #Split the data X_train, X_test, Y_train, Y_test = train_test_split(X_treino, Y_real, test_size=0.3, random_state=42) neigh = KNeighborsClassifier(n_neighbors=1) neigh.fit(X_train, Y_train) ``` ##KNN Multi Case ``` #testing Y_predit = neigh.predict(X_test) neigh.score(X_test, Y_test) print(classification_report(Y_test, Y_predit)) ``` ##KNN Binary Case ``` #Split the data X_train_binary, X_test_binary, Y_train_binary, Y_test_binary = train_test_split(X_treino_binary, Y_real_binary, test_size=0.3, random_state=42) neigh = KNeighborsClassifier(n_neighbors=1) neigh.fit(X_train_binary, Y_train_binary) #testing Y_predit_binary = neigh.predict(X_test_binary) neigh.score(X_test_binary, Y_test_binary) print(classification_report(Y_test_binary, Y_predit_binary)) ```
github_jupyter
# Generalized Additive Models > Using pyGAM ``` import numpy as np import pandas as pd import statsmodels.api as sm import statsmodels.formula.api as smf from sklearn.metrics import r2_score from pygam import LinearGAM, s, f, l, te from pygam.datasets import wage def partialResidualPlot(model, df, outcome, feature, ax): y_pred = model.predict(df) copy_df = df.copy() for c in copy_df.columns: if c == feature: continue copy_df[c] = 0.0 feature_prediction = model.predict(copy_df) results = pd.DataFrame({ 'feature': df[feature], 'residual': df[outcome] - y_pred, 'ypartial': feature_prediction - model.params[0], }) results = results.sort_values(by=['feature']) smoothed = sm.nonparametric.lowess(results.ypartial, results.feature, frac=1/3) ax.scatter(results.feature, results.ypartial + results.residual) # ax.plot(smoothed[:, 0], smoothed[:, 1], color='gray') ax.plot(results.feature, results.ypartial, color='black') ax.set_xlabel(feature) ax.set_ylabel(f'Residual + {feature} contribution') return ax data = pd.read_csv("Fish.csv") data fig, ax = plt.subplots(figsize=(10, 8)) y = data.Weight x = data.Width plt.scatter(x,y) plt.ylabel("weight of fish in gram") plt.xlabel("diagonal width in cm") plt.grid() ``` # Polynomial ``` result_poly = smf.ols('Weight ~ Width +' + 'I(Width**2)', data=data).fit() print(result_poly.summary()) fig, ax = plt.subplots(figsize=(10, 8)) pred_poly = result_poly.predict(data.Width) plt.plot(data.Width, data.Weight,'o') plt.xlabel('Width') plt.ylabel('Weight') pfit = pd.DataFrame(columns = ['Width','Pred']) pfit.Width = data.Width pfit.Pred = pred_poly pfit = pfit.sort_values(by=['Width']) plt.plot(pfit.Width, pfit.Pred) plt.grid() ``` # Spline ``` formula = ('Weight ~ bs(Width, df=3, degree=2)') model_spline = smf.ols(formula=formula, data=data) result_spline = model_spline.fit() print(result_spline.summary()) fig, ax = plt.subplots(figsize=(10, 8)) partialResidualPlot(result_spline, data, 'Weight', 'Width', ax) plt.tight_layout() plt.grid() plt.xlabel('Width') plt.ylabel('Weight') ``` # GAM ``` data predictors = ['Width'] outcome = ['Weight'] x = data[predictors].values y = data[outcome] gam = LinearGAM(l(0)) gam.gridsearch(x, y) fig, ax = plt.subplots(figsize=(10, 8)) XX = gam.generate_X_grid(term=0) plt.plot(XX, gam.predict(XX), 'r--') plt.plot(XX, gam.prediction_intervals(XX, width=.95), color='b', ls='--') plt.scatter(X, y, facecolor='gray', edgecolors='none') predictors = ['Width'] outcome = ['Weight'] x = data[predictors].values y = data[outcome] gam = LinearGAM(s(0, n_splines=20)) gam.gridsearch(x, y) fig, ax = plt.subplots(figsize=(10, 8)) XX = gam.generate_X_grid(term=0) plt.plot(XX, gam.predict(XX), 'r--') plt.plot(XX, gam.prediction_intervals(XX, width=.95), color='b', ls='--') plt.scatter(X, y, facecolor='gray', edgecolors='none') ```
github_jupyter
# Management - Substituted Phenyl Set 1 ``` import qcportal as ptl import pandas as pd import datetime import time from management import * ``` ``` # connect with authentication, therefore write access # don't use unless you plan to submit things client = ptl.FractalClient.from_file() client ``` ## Dataset tracking ### 2020.06.27 01:00 UTC ``` dataset = "OpenFF Substituted Phenyl Set 1" ds = client.get_collection("TorsionDriveDataset", dataset) ds.list_specifications() res = get_unfinished_torsiondrive_optimizations(ds, 'default', client) res ds.status("default") resm = merge(res) incomplete = (opt.dict() for opt in resm if opt.status == 'INCOMPLETE') next(incomplete) count_unique_optimization_error_messages(resm, full=True, pretty_print=True) restart_optimizations(resm, client) [tdr for tdr in ds.df.default if tdr.status == 'ERROR'] restart_torsiondrives(ds.df.default.tolist(), client) ``` Same errored TorsionDrives as previously... ### 2020.06.18 14:22 UTC ``` dataset = "OpenFF Substituted Phenyl Set 1" ds = client.get_collection("TorsionDriveDataset", dataset) ds.list_specifications() res = get_unfinished_torsiondrive_optimizations(ds, 'default', client, merged=False) res ds.status("default") resm = merge(res) incomplete = (opt.dict() for opt in resm if opt.status == 'INCOMPLETE') next(incomplete) count_unique_optimization_error_messages(resm, full=True, pretty_print=True) restart_optimizations(resm, client) [tdr for tdr in ds.df.default if tdr.status == 'ERROR'] restart_torsiondrives(ds.df.default.tolist(), client) ``` ### 2020.06.12 15:12 UTC ``` dataset = "OpenFF Substituted Phenyl Set 1" ds = client.get_collection("TorsionDriveDataset", dataset) ds.list_specifications() res = get_unfinished_torsiondrive_optimizations(ds, 'default', client, merged=False) res ds.status("default") resm = merge(res) incomplete = (opt.dict() for opt in resm if opt.status == 'INCOMPLETE') count_unique_optimization_error_messages(resm, full=True, pretty_print=True) restart_optimizations(resm, client) [tdr for tdr in ds.df.default if tdr.status == 'ERROR'] restart_torsiondrives(ds.df.default.tolist(), client) ``` ### 2020.06.10 16:30 ``` dataset = "OpenFF Substituted Phenyl Set 1" ds = client.get_collection("TorsionDriveDataset", dataset) ds.list_specifications() res = get_incomplete_torsiondrive_optimizations(ds, 'default', client, merged=False) res ds.status("default") [ts for ts in ds.df.default if ts.status != 'COMPLETE'] client.modify_services('restart', procedure_id='19953582') client.modify_services('restart', procedure_id='3745388') client.modify_services('restart', procedure_id='3745481') client.modify_services('restart', procedure_id='3745569') ds = client.get_collection("TorsionDriveDataset", dataset) ds.status("default") res = get_incomplete_torsiondrive_optimizations(ds, 'default', client, merged=False) res ``` ## Archived ``` ds.df [i for i in ds.df.default if i.status == 'ERROR'] ids = set(i.id for i in ds.df.default) res = client.query_procedures(ids) from collections import defaultdict angle_optimizations = defaultdict(set) for tdr in ds.df.default: if tdr.status == 'COMPLETE': continue for val in tdr.optimization_history.values(): angle_optimizations[tdr.id].update(set(val)) angle_optimizations client.modify_tasks(operation='restart', base_result='20576181') angle_optimizations_i = set() for i in angle_optimizations.values(): angle_optimizations_i.update(set(i)) angle_optimizations_i len(angle_optimizations_i) res_angle_opt = client.query_procedures(angle_optimizations_i) res_angle_opt review = ((i, i.get_error().error_message) for i in res_angle_opt if i.status == 'ERROR') print(next(review)) client.modify_tasks(operation='restart', base_result='20634721') client.modify_tasks(operation='restart', base_result='20634722') ``` Why are there only 2 failures? Don't we see 4 errors at the top level? Not what I would expect, but I could be thinking about this wrong? Only one of the torsion drive ids (`19953582`) shows up in our iteration with `optimaization_history` values. Do the others not? ``` for i in ds.df.default: if i.status == 'ERROR': print(i.optimization_history) ``` Guess some of these had no optimizations assigned? Need to check the original PR if this was noted. ``` failed = tuple(i.id for i in res_angle_opt if i.status == 'ERROR') failed for i in ds.df.default: if i.status == 'ERROR': for key, value in i.optimization_history.items(): for f in failed: if f in value: print(key) ``` Two angles failed it appears. Restart again? ``` angle_optimizations.keys() for tdrid in angle_optimizations.keys(): client.modify_services(operation='restart', procedure_id=tdrid) ds.status('default') ``` ## Problem cases (2020.05.27) From the 'OpenFF Substituted Phenyl Set 1' : job index `c1[cH:1]c:2[c:4]2[cH:3]cncc2` , job id: ``` TorsionDriveRecord(id='19953582', status='ERROR') . Two optimizations failed with the following Error messages: ComputeError(error_type='BrokenProcessPool', error_message='Caught Executor Error:\nTraceback (most recent call last):\n File "/opt/conda/envs/qcfractal/lib/python3.6/site-packages/qcfractal/queue/executor_adapter.py", line 15, in _get_future\n return future.result()\n File "/opt/conda/envs/qcfractal/lib/python3.6/concurrent/futures/_base.py", line 425, in result\n return self.__get_result()\n File "/opt/conda/envs/qcfractal/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result\n raise self._exception\nconcurrent.futures.process.BrokenProcessPool: A process in the process pool was terminated abruptly while the future was running or pending.\n') ``` ``` erred = [i for i in ds.df.default.values if i.status == 'ERROR'] erred ``` Start with the last one: ``` tdr = erred[-1] tdr tdr.fields tdr.get_stdout() tdr.id client.modify_services(operation='restart', procedure_id=tdr.id) ds = client.get_collection("TorsionDriveDataset", dataset) ds.list_specifications() ``` 'default' always means 'b3lyp-d3bj' for us. ``` ds.status("default") ds.df ```
github_jupyter
In this tutorial we analyse 5k PBMC cells produced using 10x Genomics. The data is publicly available on their website. To download the data you just need to un-comment the cells below downloading 10x PBMC data ---------------------------------------- https://www.10xgenomics.com/resources/datasets/5-k-peripheral-blood-mononuclear-cells-pbm-cs-from-a-healthy-donor-next-gem-v-1-1-1-1-standard-2-0-0 ``` #!wget https://cf.10xgenomics.com/samples/cell-atac/2.0.0/atac_pbmc_5k_nextgem/atac_pbmc_5k_nextgem_filtered_peak_bc_matrix.h5 #!wget https://cf.10xgenomics.com/samples/cell-atac/2.0.0/atac_pbmc_5k_nextgem/atac_pbmc_5k_nextgem_fragments.tsv.gz #!wget https://cf.10xgenomics.com/samples/cell-atac/2.0.0/atac_pbmc_5k_nextgem/atac_pbmc_5k_nextgem_fragments.tsv.gz.tbi ``` downloading gtf file from gencode ---------------------------------------- https://www.gencodegenes.org/human/ ``` #!wget http://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_38/gencode.v38.annotation.gtf.gz ``` # Loading Libraries ``` #!pip install git+https://github.com/colomemaria/epiScanpy.git@tss_enrichment #!pip install git+https://github.com/colomemaria/epiScanpy import episcanpy as epi # settings for the plots epi.set_figure_params(scanpy=True, dpi=80, dpi_save=800, frameon=True, vector_friendly=True, color_map="YlGnBu", format='pdf', transparent=False, ipython_format='png2x') ``` # Loading the ATAC data as Anndata ``` adata = epi.read_h5_atac("./atac_pbmc_5k_nextgem_filtered_peak_bc_matrix.h5") adata # save the raw read count information in a layer adata.layers['counts'] = adata.X # binarize the count matrix to only have open peak = 1 and closed peak = 0 epi.pp.binarize(adata) ``` ### preliminary filtering removing very lowly covered cells and features before running QC and a more detailed filtering ``` #filter out cell barcodes containing less than 10 peaks epi.pp.filter_cells(adata, min_features=10) adata # filter out peaks open in less than 2 cells epi.pp.filter_features(adata, min_cells=2) adata ``` # Quality Controls ## - TSS enrichment ``` # TSS enrichment & find genes epi.pp.tss_enrichment(adata, gtf="./gencode.v38.annotation.gtf.gz", fragments="./atac_pbmc_5k_nextgem_fragments.tsv.gz") adata # visualisation of the TSS enrichment metric epi.pl.tss_enrichment(adata) epi.pl.tss_enrichment_score(adata) # split cells between high TSS enrichment and low TSS enrichment scores. epi.tl.filter_enrichment_score(adata, score_threshold=8) epi.pl.tss_enrichment(adata, group_by='tss_enrichment_split') - ``` # Latent semantic Indexing ``` epi.pp.tfidf(adata) epi.pp.lsi(adata) epi.pp.neighbors(adata, use_rep='X_lsi') epi.tl.umap(adata) epi.pl.umap(adata) epi.tl.louvain(adata) epi.pl.umap(adata, color='louvain') !pip install louvain python-igraph louvain epi.pp.pca(adata) epi.tl.tsne(adata) ```
github_jupyter
### 1. What is the relationship between def statements and lambda expressions ? def keyword is used to create named function in python, Function naming follows the same rules of writing identifiers in Python Lambdas are one-line functions without a name, in other words using lambdas we can create anonymous functions in python. Lambda functions work practically the same as any other method in Python. Lambdas differ from normal Python methods because they can have only one expression, and they can't contain any statements and their return type is a function object ### 2. What is the benefit of lambda? Fewer Lines of Code Lambda functions are inline functions and thus execute comparatively faster Many times lambda functions make code much more readable by avoiding the logical jumps caused by function calls ### 3. Compare and contrast map, filter, and reduce. map applies as a transformation to an element. The map() function iterates through all items in the given iterable and executes the function we passed as an argument on each of them. Syntax : map(function, iterable(s)) filter accumulates only elements matching a condition. filter() forms a new list that contains only elements that satisfy a certain condition, i.e. the function we passed returns True Syntax : filter(function, iterable(s)) reduce accumulates all elements to a single value, by using immutable values reduce() works by calling the function we passed for the first two items in the sequence. The result returned by the function is used in another call to function alongside with the next (third in this case), element Syntax : reduce(function, sequence[, initial]) ### 4. What are function annotations, and how are they used? Function annotation is the standard way to access the metadata with the arguments and the return value of the function. These are optional Python expressions that get allied to different parts of the function. They get evaluated only during the compile-time and have no significance during the run-time of the code. They do not have any significance or meaning associated with them until accessed by some third-party libraries. They are used to type check the functions by declaring the type of the parameters and the return value for the functions. The string-based annotations help us to improve the help messages. Syntax : def func(a: 'int') -> 'int': pass Annotations for simple parameters: def func(x: 'float'=10.8, y: 'argument2'): In the above code the argument, ‘x’ of the function func, has been annotated to float data type and the argument ‘y’ has a string-based annotation. The argument can also be assigned to a default value using a ‘=’ symbol followed by the default value. These default values are optional to the code. Annotations for return values: def func(a: expression) -> 'int': The annotations for the return value is written after the ‘->’ symbol. ### 5. What are recursive functions, and how are they used? A recursive function is a function that calls itself during its execution. This means that the function will continue to call itself and repeat its behavior until some condition is met to return a result ``` # example def fact(x): if x == 1 : return 1 else : return x * fact(x-1) # recursion fact(3) ``` ### 6. What are some general design guidelines for coding functions? 1. Use 4-space indentation and no tabs. 2. Use docstrings 3. Wrap linethat they don’t exceed 79 characters 4. Use of regular and updated comments are valuable to both the coders and users 5. Use of trailing commas : in case of tuple -> ('good',) 6. Use Python’s default UTF-8 or ASCII encodings and not any fancy encodings 7. Naming Conventions 8.Characters that should not be used for identifiers: * ‘l’ (lowercase letter el), * ‘O’ (uppercase letter oh), * ‘I’ (uppercase letter eye) as single character variable names as these are similar to the numerals one and zero. 9. Don’t use non-ASCII characters in identifiers 10. Name your classes and functions consistently 11. While naming of function of methods always use self for the first argument ### 7. Name three or more ways that functions can communicate results to a caller. * Function can return single value * Can return multiple values, tuple * can return list,dictionary * can return function object * can return class object (ex: SQL Connection object) * can update an output variable * can update a global variable * can update/create a file
github_jupyter
# SLU03 - Git Basics In this notebook we will be covering the following: * (Painless) Introduction to Version Control - So what is a Version Control System? - Collaboration - Storing version properly - Auditability * Repositories: where it all begins - Creating a repository - README - .gitignore - Creating a repository not hosted on GitHub servers * Working with Git: the basics - Main git commands - git status - git add - git commit - git push - git pull - git log * Summary workflow * GitHub and other Version Control Systems ## (Painless) Introduction to Version Control **Imagine this scenario with me...** It's two AM and you've just finished your first programming project. Everything works, your "*hello worlds*" are all very hello'y, your *1*s and *0*s are all very *True* and *False*, and there's not a *ZeroDivisionError* to be seen in the land. Everything is wonderful and you go to sleep as a happy programmer, albeit a tired one... You wake up the next morning, relaxed and happy with no undereye bags to be seen on your lovely face. Your amazing programming project is due that afternoon and you go check if everything is running smoothly... but something terrible happened! All your beautiful code has been replaced by cat emojis! You look suspiciously to *Whiskers* who you once thought as your four-legged furry best friend. You haven't paied him as much attention as you should, with all the late-night coding... And he just did you good. **Never trust a cat.** If only there was a way to "*go back in time*" and save all your precious work... **With version control there is!** *And all is good in the coding land!* ### So what is a Version Control System? In short, [Version Control System (VCS)](https://en.wikipedia.org/wiki/Version_control) records changes to a file or set of files over time so that you can recall specific versions later. It allows you to *revert selected files to a previous state*, *revert the entire project to a previous state*, *compare changes over time*, *see who last modified something that might be causing a problem*, *who introduced an issue and when*, and more. There are a lot of decentralized services for VCS such as [GitHub](https://github.com/), [GitLab](https://about.gitlab.com/) and [Bitbucket](https://bitbucket.org/), and all of them rely on [Git](https://git-scm.com/), the VCS that is fueling all of these services and recording all the changes. *FYI: Git is free and open-source meaning that there are no barriers to take advantage of it. :)* You noticed that all of our [Prep Course Material](https://github.com/LDSSA/ds-prep-course-2021) is hosted on GitHub. This makes **our** lives easier as all changes to the learning materials are being tracked, and also who made them, and it also makes **your** life easier when accessing the materials. Saving your work from evil cats is not the only reason VCS is useful. There are three main reasons why VCS are used: * collaboration, * storing versions properly, * auditability. Let's try to understand all of them. ### Collaboration <img width="400" src="https://www.groovecommerce.com/hs-fs/hub/188845/file-4063238065-png/blog-files/version-control-comic.png"/> **Is this a familiar scenario to you?** Imagine this but with many *many* files in a software program... Words are not enough to explain the utter horror of such an event... Worse... Imagine collaborating on a software project *over email*. (>_<) Git (or any other VCS for that matter) was designed to solve the problem of **multiple people working on the same code**. While that won't happen during the prep course, it is still important for you to know how Git eases the development process when there are multiple people involved, since in the industry almost no developer works alone (*thankfully!*). ### Storing versions properly Saving a version of your project after making changes is an **essential habit**. In VCS, we call this a **commit**. Just like in an *old-school* videogame, when you feel you have made some progress and a boss around the corner might take you down, you save your game. The same logic applies to a **commit**. (*Note for your future self who will thank me later: don't commit broken or unfinished work. Your future self will have zero clues about what the heck was going on back when you commmitted your code...*) Another important feature of the **commit**, which we will revisit later on, is that it asks you for a message. This is to help your future self, and others collaborating with you, know what is going on. A succint but clear message of what changes have happened is a very good practice (and a mandatory one!). In the end, when using a VCS, because everything is being stored in a server (such as GitHub), you **only** have one version on your disk of the project that you're currently working on. Everything else - all the past versions and variants - is neatly packed up inside the VCS. When/if you need them, you can request any version at any time, and you'll have a snapshot of the complete project right at hand, including the history of all the changes done thus far. :) ### Auditability When you're working with a team on a single project, sometimes it gets hard to know "who did what". VCS offers you an easy way to track "who made what change and when". It unlocks the ability to: - Debug more effectively by finding when a breaking change was introduced - Track the reason that certain changes were made - Find the person who made a change and ask them why they did it (not with violence, but with love!) <img width="600" src="https://miro.medium.com/max/1400/1*wQ2mtIZHzVkJ0Y2suuVGpQ.jpeg"> ## Repositories: where it all begins A repository is simply a **place that stores and tracks changes to the files contained within it**. It associates identities with changes to those files. Take a look at [a familiar repository](https://github.com/LDSSA/ds-prep-course-2021): <img width="600" src="https://imgur.com/2uKqRbF.png"/> This is the Prep Course public repository, from where all of you pull the SLUs every week. Under the hood, our lovely instructors (shown in **contributors**) work really hard to make the SLUs available to you every week. You can see that at the time of the creation of this Learning Notebook, **32 commits** had already been **pushed** (meaning saved) to the Prep Course repository, but more on this later. ### Creating a repository Congrats! You may have not realized it at the time, but you've already created a repository by following these instructions: https://github.com/LDSSA/ds-prep-course-2021#12-setup-git-and-github. When you followed these steps, you've: - Installed Git, a VCS - Created an account on GitHub, a development platform that implements Git - Set up your own (private) repository to save your progress throughout the Prep Course - Cloned the Prep Course repository to access the materials You're basically a master at this already! Now, you'll learn a little bit more about the README file and the .gitignore file. ### README The **README** is probably **the single most important document in a repository**. It is the starting point by which when someone arrives at the repo, they know what the repository is all about and what they should do next. It is the documentation of the repo. It uses [Markdown](https://pt.wikipedia.org/wiki/Markdown) (which you have learned in the Jupyter Notebook SLU). The .md extension identifies markdown files. (*Fun fact: this notebook is also written in markdown!*) ### .gitignore <img width="600" src="https://imgur.com/VmhvbAt.jpeg"> The **.gitignore** is a special file that contains information about the files, file types, and/or directories in your git repo that you never want to see committed (*just like that poor dude is being ignored, so will these files/directories*). It's very easy to accidentally **commit a file** that you never intended to make available in the repository (especially if you use `git add .` to stage all files in the current directory). That's where a .gitignore file comes in handy! It lets Git know that it should ignore certain files and not track them. So, what files would you normally not want to commit? - Any file with more than 2 MB, so always include the dataset that you're using - Log files - Files with API keys/secrets, credentials, or sensitive information - Useless system files such as the **annoying** mac .DS_store - Binaries - version control is all about tracking changes to text files and so tracking changes to large amounts of 1s and 0s will be unhelpful and crash the system - Dependencies which can be downloaded from a package manager. They already have their own VC systems and you only need a particular version of it. No need to do their job for them and track their changes! There's a nice website which tells you what to ignore depending on your operating system, text editor or IDE, languages, and frameworks: https://www.gitignore.io/ Additionally, when setting up a repository, GitHub allows you to add a pre-set .gitignore file depending on the language you're using (Python, R, C...). Very handy! To include files in the .gitignore, just type the name of the file. The same thing for directories, except that directories should end with a slash (/). **Ignoring any file with a given extension:** Some slightly more advanced ways of excluding files make use of the wildcard (`*`). The wildcard matches 0 or more characters. So, for example, if you want to exclude every .log file you would include *.log in your .gitignore. However, when using this rule, you may end up ignoring a specific file that you want to commit, e.g. you may want to commit a specific log named important.log and exclude all the others. If that happens, you can use a `!` to specifically negate a file that would be ignored. Our .gitignore would be like: *.log !important.log (*Note: The negation must be placed after the rule from which you're excluding the file*) ### Creating a repository not hosted on GitHub servers This is what you know until now: - How to create a repo in GitHub - How to create a README and understand why it is important - How to create a .gitignore and add files that you don't want to see committed But what if you wanted to benefit from the advantages of a version control system, but you don't want the content of your work to be hosted on GitHub servers? Imagine this: you're a writer and you're working on your new book. For obvious reasons you don't want the draft version of your book to be public, but actually you're not even interested in sharing it with anyone at this stage, so not even a private repo makes sense. Imagine that you delete some text as you're writing because you don't like it. But in the future, you often find yourself wanting to bring that text back. You know you can do that with a version control system, but you don't want neither a private or a public repository. What do you do? Luckily for you, there's a way to create a repo on your machine without hosting it on GitHub. Let's see how: - Open the terminal and go the folder that contains the files you would like to track - Run `git init --bare`. The bare option is crucial as by default git will create a repository with a remote server. So, you have to use it if you want to keep everything locally - Run `git remote add origin <path to the folder>`. You can get the path to the folder by running the `pwd` command while you're inside the folder. This command will create a local repository, which is something that you'll learn about in the next section of this SLU :) - Run `git checkout -b main`. You'll only learn about branches in the next SLU so for now don't think too much about this command :) And done! Whenever you feel like using the advantages of a Version Control System but you don't want to host anything on GitHub, you now know how to! ## Working with Git: the basics So... This is how Git works! Looks pretty confusing, right? <img width="700" src="https://imgur.com/JefNy7n.png"/> Let's break this diagram into parts, using the concepts that we've already acquired in this SLU. And remember, you've already done most of this stuff! The **remote repository** is the GitHub repository that is hosted on the GitHub servers. The first question you may ask is: **why do we need a workspace and a local repository? And why is there a staging area in between them?** Let me try to explain: - The **local repository** is the “container” that tracks the changes to your project files. It holds all the **commits** — a snapshot of all your files at a point in time — that have been made. - The **workspace** consists of files that you are currently working on. You can think of the workspace as a file system where you can view and modify files. - The **staging area** is where commits are prepared. The index compares the files in the workspace to the files in the local repository. When you make a change in the workspace, the index marks the file as "modified" before it is committed. Now, why is this important? Mainly for the purpose of **separating your commits**. Instead of committing all the changes you've made since the last commit, the stage lets you group related changes before actually committing it to the project history. This means you can make all sorts of edits to unrelated files, then go back and split them up into logical commits by adding related changes to the stage and commit them piece-by-piece. It's better to create small and frequent commits so that it’s easy to track down bugs and revert changes with minimal impact on the rest of the project. ### Main Git commands #### git status The `git status` command is very straightforward. It lets you see the status of your workspace and staging area files. The files in these two areas have 4 possible states: - **Staged**: files that you've recently used `git add` on - **Modified**: files that you've **not** recently used `git add` on, but that were committed in the past and were modified - **Unmodified**: files that you've **not** recently used `git add` on, that were committed in the past and were **not** modified - **Untracked**: files that were neither committed nor staged before In a graphical way: <img width="500" src="https://imgur.com/nBgRBls.png"/> **When you add a file it will show up as untracked. If you've never staged a file, it won't be tracked by git.** **When you stage a file (with the `git add` command), it becomes staged until the next commit (`git commit -m ""`).** **After the commit, it goes into the unmodified status, until the next change. After that, it will show up as modified.** And the cycle continues.. That's Git's *circle of life* for files! The image bellow shows an example of running a `git status` command where: - the Learning Notebook is staged; - the Exercise Notebook is modified; - a txt called *NotTracked* is untracked. <img width="600" src="https://imgur.com/MyB0RSS.png"/> For copy-pasting purposes: - `git status` #### git add <img width="500" src="https://imgur.com/Qit7nJ2.png"/> The `git add` command adds a change in the working directory to the staging area. It tells Git that you want to include updates to a particular file in the next commit. However, `git add` doesn't really affect the repository in any significant way — **changes are not actually recorded until you run `git commit`**. Example of running a `git add` command: <img width="600" src="https://imgur.com/8E810IF.png"/> In **Step 5** of the [guide to work on the learning materials](https://github.com/LDSSA/ds-prep-course-2021#22---working-on-the-learning-units), we ask you to stage all changes with the `git add .` command. **Useful command options:** - If you type `git add <file>` you'll stage all changes in file for the next commit - If you type `git add <file1> <file2>` you'll stage all changes in these two files for the next commit. You can extend this option to a higher number of files - If you type `git add <directory>` you'll stage all changes in the directory for the next commit - If you type `git add .` you'll stage **all changes** in your current directory for the next commit. For copy-pasting purposes: - `git add .` #### git commit <img width="500" src="https://imgur.com/Dh1vMsm.png"/> By now you already know what this command does, it simply commits the files with changes that you have in the staging area to the local repository. After using the command this window will appear: <img width="600" src="https://imgur.com/Q4BKk6N.png"/> You'll need to write a commit message, click in *ctrl + X* to exit, click on *Y* when asked to save the changes and then click on *Enter* to confirm the commit file name. If you want to **avoid this process**, you can write your commit message directly on the `git commit` command, like this: `git commit -m "commit message"`. In **Step 5** of the [guide to work on the learning materials](https://github.com/LDSSA/ds-prep-course-2021#22---working-on-the-learning-units), we ask you to commit your resolved notebook with the command `git commit -m "Exercises for Week <week number>"`, where you substitute the `<week number>` by the corresponding value. Adding an explicit message to the commit will make your life easier down the road to trace back to where you were at the time. **Other useful command options:** - If you type `git commit -a` you'll, for every file that’s currently in the index, check to see if the workspace copy is different, and if so, add that file to the index. Then, all files from the index are committed. - If you type `git commit --amend` you'll modify the last commit and staged changes (added with `git add`) will be added to it. #### git push The `git push` command is used to upload local repository content to a remote repository. Pushing is how you transfer commits from your local repository to a remote repo. After a local repository has been modified, a push is executed to share the modifications with remote team members. In **Step 5** of the [guide to work on the learning materials](https://github.com/LDSSA/ds-prep-course-2021#22---working-on-the-learning-units), you finish with the `git push` command to send all your changes to your working repository. #### git pull The `git pull` command is used to fetch and download content from a remote repository and immediately update the local repository to match that content. It’s an easy way to synchronize your local repository with upstream changes. In a way, the opposite of the `git push` command. We asked in [the guide of week 00](https://github.com/LDSSA/ds-prep-course-2021#21-weekly-setup---get-the-learning-materials) to start the set-up of the learning materials with a `git pull` of the [ds-prep-course-2021](https://github.com/LDSSA/ds-prep-course-2021). You already had to do it again for **Week 01 and Week 02** when you followed the [basic workflow to update the learning units](https://github.com/LDSSA/ds-prep-course-2021#3-updates-to-learning-units), so you're already very familiar with this command. Sometimes conflicts can arise when you have a conflicting file(s) with the remote repository, but don't worry about it now. We'll learn how to solve those in the next Git SLU! #### git log The `git log` command is a command to access the commit history of the local repo. It's a very important command which allows reverting to a previous commit, something that you'll also learn to do in the next Git SLU! For now, you can see what the output of a `git log` command looks like: <img width="500" src="https://imgur.com/Ys9IF79.png"/> **Other useful command options:** - If you type `git log -n <limit>` you'll limit the number of commits shown. For example, `git log -n 3` will display only the 3 last commits. - If you type `git log -- stat`, the command output will include the information of which files were altered and the relative number of lines that were added or deleted from each of them, along with the normal information displayed by `git log`. - If you type `git log --author="<pattern>"` you can filter the commits, only viewing the commits of a particular author. For those of you that are familiar with regular expressions, you can also use a regular expression instead of a string. - If you type `git log --grep="<pattern>"` you can again filter the commits, this time by searching for a specific pattern in the commit. One example would be to filter commits that mention a specific detail you're interested in. Again, you can search for a string or use a regular expression. - If you type `git log <file>` you'll only see commits that include the specific file. ## Summary workflow Now you know what each command individually does. The **git status, add and commit** can be chained in a workflow that makes sense. A typical use-case would be: - use `git status` to check for any **Modified files**, - use `git add` to stage the Modified files, then `git status` again to check that those files are now Staged - use `git commit` to commit them. - finally, use `git push` to send all our changes to the remote repository So now you know how to chain these commands in a way that makes sense! With experience, you may end up dropping one or both the git status command as you'll intuitively know in which states your files are, but it's always ok to do a check once in a while. <img width="500" src="https://imgs.xkcd.com/comics/git_2x.png"/> So that's it! Hope you've enjoyed this Learning Notebook and please take your time to assimilate all the concepts. The Exercise Notebook consists on multiple choice questiosn, so it's really important that you have a good grasp of everything before starting it. **Good luck!**
github_jupyter
# Python 1 - Introduction to Python <br/> <section class="objectives panel panel-warning"> <div class="panel-heading"> <h2><span class="fa fa-certificate"></span> Learning Objectives </h2> </div> <ul> <li> Get used to the IPython Notebook. </li> <li> Understand what a library is and how to use it.</li> <li> Read tabular data from a file into a program.</li> <li> Assign values to variables.</li> <li> Select individual values and subsections from data.</li> <li> Perform operations on arrays of data.</li> <li> Display simple graphs.</li> </ul> </section> ## IPython Notebook IPython notebook is a web based interactive computational environment with code, text, mathematics, plot and exectution all in the same place. It's how we'll be doing the whole course, though we wouldn't use it for more script based programming, for that we suggest you use Spyder or your pet text editor. A few shortcuts to make your life easier: - crtl + enter - Run the contents of this cell - B - moves out from inside this cell and selects the entire cell - esc - moves from this window to select the whole box - enter - will move you back in - M - changes the whole box to markdown - Y - changes the whole box to code ## Libraries Words are useful, but what’s more useful are the sentences and stories we build with them. Similarly, while a lot of powerful tools are built into languages like Python, even more live in the libraries they are used to build. Numpy is one of the essential libraries we use in python. We can import it like so: ``` from __future__ import division, print_function import numpy ``` We're going to demonstrate how to use the NumPy library with some statistics on arthuritis patient inflammmation. We can import csv files, where the data currently is, like so: ``` p_data = numpy.loadtxt(fname='data/inflammation-01.csv', delimiter=',') ``` So lets examine this. By typing `numpy.loadtxt()` we're saying, from the numpy library use the loadtxt function. It is the equivalent of selecting a file from a directory, or saying thing.component, for example, car.tyres might say, from the car I would like to inspect the tyres. `loadtxt()` is a function call, which in this case has two arguments passed to it. The filename and the delimiter, both need to be input as character strings. ### Strings Character strings are sequences of characters enclosed in either `""` or `''`. We typically use these in keyword argument calls to function, as above, and output to the console, more of which later. Now if we type the variable the file is attached to into the interpreter, we see the data held within it as an array, with the delimiter `,` seperating all the values. ``` p_data ``` ### Variables The fact that this array is saved to a variable means that the array is stored in the computer's memory. Variables are an essential part of programming. Lets look at them in more detail. We can assign values to variables ``` temp = 34 ``` Reassign them ``` temp = 37 ``` Do some fancy printing with them, throwing in a bit of string usage too. <br/> <section class="objectives panel panel-warning"> <div class="panel-heading"> <h2><span class="fa fa-certificate"></span> Print Formatting </h2> </div> <br/> We use `.format` at the end of a string to insert variables which are not strings into a string. If we have multiple items to insert then we use `{0}, {1}, {2}... etc` and order them accordinglt in the format `()`. </section> ``` print("The temperature of this spam is {0} please take it back".format(temp)) ``` We can also use variables to define other variables ``` temp_K = temp + 273 temp_K ``` and then we can change the temperature in Kelvin ``` temp_K = temp_K + 10 temp_K ``` Note that changing the temperature in Kelvin does not change the previous temperature we used to calculate it in the first place ``` temp ``` <section class="objectives panel panel-warning"> <div class="panel-heading"> <h2><span class="fa fa-certificate"></span> Tip </h2> </div> <br/> You can use the `%whos` command at any time to see what variables you have created and what modules you have loaded into the computers memory. As this is an IPython command, it will only work if you are in an IPython terminal or the Jupyter Notebook. </section> ``` %whos ``` ## Arrays Now back to the patients. We have the patient infomation stored in memory as the variable `p_data`, and we can check it's still there with `type`, lets find out more about the array. ``` print("The data from the file is now {0} and has attibutes {1}".format(type(p_data), p_data.shape)) ``` These two commands tell us that the variable is a NumPy array and then the extent of the array. In this case the rows are individual patients and the columns are their daily inflammation measurements. In this case we have 60 rows and 40 columns, known as the dimensions of the array. Using these attributes we can index the data to extract single data values: ``` print(file[12,15]) print(p_data[0,0]) ``` <section class="objectives panel panel-warning"> <div class="panel-heading"> <h2><span class="fa fa-certificate"></span> Indexing </h2> </div> <br/> What may also surprise you is that when Python displays an array, it shows the element with index [0, 0] in the upper left corner rather than the lower left. This is consistent with the way mathematicians draw matrices, but different from the Cartesian coordinates. The indices are (row, column) instead of (column, row) for the same reason, which can be confusing when plotting data. </section> We can also index a specific row or column. This can my done by using a colon in our indexing. For example, `p_data[:,15]` will give us column 15: ``` p_data[:,15] ``` and `p_data[15,:]` gives us the 15 row. The `:` says give me all the elements in this domain. ``` p_data[15,:] ``` Thus we can make the domain smaller and essentially crop the array. By using the colon between the two limits: ``` crop_arr = p_data[10:20,10:20] ``` Arrays also know how to perform common mathematical operations on their values. The simplest operations with data are arithmetic: add, subtract, multiply, and divide. When you do such operations on arrays, the operation is done on each individual element of the array. Thus: ``` double_data = crop_arr * 2.0 ``` This will create a new array whose elements have the value of two times the value of the corresponding elements in `crop_arr`. However what if instead of taking an array and doing arithmetic with a single value (as above) you did the arithmetic operation with another array of the same shape, the operation will be done on corresponding elements of the two arrays. Thus: ``` trip_data = double_data + crop_arr trip_data ``` ### Arrays and Statistics Often, we want to do more than add, subtract, multiply, and divide values of data. Arrays also know how to do more complex operations on their values. If we want to find the average inflammation for all patients on all days, for example, we can just ask the array for its mean value ``` print(p_data.mean()) ``` `mean` is a method of the array, i.e., a function that belongs to it in the same way that the member `shape` does. If variables are nouns, methods are verbs: they are what the thing in question knows how to do. We need empty parentheses for `data.mean()`, even when we’re not passing in any parameters, to tell Python to go and do something for us. `data.shape` doesn’t need `()` because it is just a description but `data.mean()` requires the `()` because it is an action. ``` print( 'maximum inflammation:', p_data.max()) print('minimum inflammation:', p_data.min()) print('standard deviation:', p_data.std()) ``` When analyzing data, though, we often want to look at partial statistics, such as the maximum value per patient or the average value per day. One way to do this is to create a new temporary array of the data we want, then ask it to do the calculation: ``` patient_0 = p_data[0, :] # 0 on the first axis, everything on the second print( 'maximum inflammation for patient 0:', patient_0.max()) ``` We don’t actually need to store the row in a variable of its own. Instead, we can combine the selection and the method call: ``` print('maximum inflammation for patient 2:', p_data[2, :].max()) ``` What if we need the maximum inflammation for all patients, or the average for each day? In this case we need to average across an 'axis' of the array i.e. in x or y if we consider the data as a 2D array. To support this, most array methods allow us to specify the axis we want to work on. If we ask for the average across axis 0 (rows in our 2D example), we get: ``` print(p_data.mean(axis=0)) ``` As a quick check, we can ask this array what its shape is: ``` print(p_data.mean(axis=0).shape) ``` The expression `(40,)` tells us we have an N×1 vector, so this is the average inflammation per day for all patients. If we average across axis 1 (columns in our 2D example), we get: ``` print(p_data.mean(axis=1)) ``` which is the average inflammation per patient across all days. ## Matplotlib The mathematician Richard Hamming once said, “The purpose of computing is insight, not numbers,” and the best way to develop insight is often to visualize data. Visualization deserves an entire lecture (or course) of its own, but we can explore a few features of Python’s `matplotlib` library here. While there is no “official” plotting library, this package is the de facto standard. First, we will import the `pyplot` module from `matplotlib` and use two of its functions to create and display a heat map of our data: <section class="objectives panel panel-warning"> <div class="panel-heading"> <h2><span class="fa fa-certificate"></span> Inline with IPythonNB </h2> </div> <br/> If you’re using an IPython / Jupyter notebook, you’ll need to execute the following command in order for your matplotlib images to appear in the notebook when `show()` is called: `% matplotlib inline` The `%` indicates an IPython magic function - a function that is only valid within the notebook environment. Note that you only have to execute this function once per notebook. </section> ``` import matplotlib.pyplot image = matplotlib.pyplot.imshow(p_data) matplotlib.pyplot.show(image) ``` Let’s take a look at the average inflammation over time: ``` ave_infl = p_data.mean(axis=0) ave_plot = matplotlib.pyplot.plot(ave_infl) matplotlib.pyplot.show(ave_plot) ``` Here, we have put the average per day across all patients in the variable `ave_inflammation`, then asked `matplotlib.pyplot` to create and display a line graph of those values. The result is roughly a linear rise and fall, which is suspicious: based on other studies, we expect a sharper rise and slower fall. Let’s have a look at two other statistics: ``` max_plot = matplotlib.pyplot.plot(p_data.max(axis=0)) matplotlib.pyplot.show(max_plot) min_plot = matplotlib.pyplot.plot(p_data.min(axis=0)) matplotlib.pyplot.show(min_plot) ``` The maximum value rises and falls perfectly smoothly, while the minimum seems to be a step function. Neither result seems particularly likely, so either there’s a mistake in our calculations or something is wrong with our data. ### Subplots You can group similar plots in a single figure using subplots. This script below uses a number of new commands. The function `matplotlib.pyplot.figure()` creates a space into which we will place all of our plots. The parameter `figsize` tells Python how big to make this space. Each subplot is placed into the figure using the `subplot` command. The `subplot` command takes 3 parameters. The first denotes how many total rows of subplots there are, the second parameter refers to the total number of subplot columns, and the final parameters denotes which subplot your variable is referencing. Each `subplot` is stored in a different variable (axes1, axes2, axes3). Once a subplot is created, the axes are can be titled using the `set_xlabel()` command (or `set_ylabel()`). Here are our three plots side by side: ``` import numpy import matplotlib.pyplot data = numpy.loadtxt(fname='data/inflammation-01.csv', delimiter=',') fig = matplotlib.pyplot.figure(figsize=(10.0, 3.0)) axes1 = fig.add_subplot(1, 3, 1) axes2 = fig.add_subplot(1, 3, 2) axes3 = fig.add_subplot(1, 3, 3) axes1.set_ylabel('average') axes1.plot(data.mean(axis=0)) axes2.set_ylabel('max') axes2.plot(data.max(axis=0)) axes3.set_ylabel('min') axes3.plot(data.min(axis=0)) fig.tight_layout() matplotlib.pyplot.show(fig) ``` The call to `loadtxt` reads our data, and the rest of the program tells the plotting library how large we want the figure to be, that we’re creating three sub-plots, what to draw for each one, and that we want a tight layout. (Perversely, if we leave out that call to `fig.tight_layout()`, the graphs will actually be squeezed together more closely.) <section class="objectives panel panel-warning"> <div class="panel-heading"> <h2><span class="fa fa-certificate"></span> Scientists dislike typing </h2> </div> <br/> We will always use the syntax `import numpy` to import NumPy. However, in order to save typing, it is often suggested to make a shortcut like so: `import numpy as np`. If you ever see Python code online using a NumPy function with `np` (for example, `np.loadtxt(...)`), it’s because they’ve used this shortcut. </section> <section class="objectives panel panel-success"> <div class="panel-heading"> <h2><span class="fa fa-pencil"></span> Make your own plot </h2> </div> <br/> What does the following program print out? <pre> first, second = 'Grace', 'Hopper' third, fourth = second, first print third, fourth </pre> </section> <section class="objectives panel panel-success"> <div class="panel-heading"> <h2><span class="fa fa-pencil"></span> Slicing strings </h2> </div> <br/> A section of an array is called a slice. We can take slices of character strings as well: <pre> element = 'oxygen' print('first three characters:', element[0:3]) print('last three characters:', element[3:6]) </pre> What is the value of element[:4]? What about element[4:]? Or element[:]? What is element[-1]? What is element[-2]? Given those answers, explain what element[1:-1] does. </section> <section class="objectives panel panel-success"> <div class="panel-heading"> <h2><span class="fa fa-pencil"></span> Thin Slices </h2> </div> <br/> The expression element[3:3] produces an empty string, i.e., a string that contains no characters. If data holds our array of patient data, what does data[3:3, 4:4] produce? What about data[3:3, :]? </section> <section class="objectives panel panel-success"> <div class="panel-heading"> <h2><span class="fa fa-pencil"></span> Check your understanding: plot scaling </h2> </div> <br/> Why do all of our plots stop just short of the upper end of our graph? </section> <section class="objectives panel panel-success"> <div class="panel-heading"> <h2><span class="fa fa-pencil"></span> Check your understanding: drawing straight lines </h2> </div> <br/> Why are the vertical lines in our plot of the minimum inflammation per day not perfectly vertical? </section> <section class="objectives panel panel-success"> <div class="panel-heading"> <h2><span class="fa fa-pencil"></span> Make your own plot </h2> </div> <br/> Create a plot showing the standard deviation (`numpy.std`) of the inflammation data for each day across all patients. </section> <section class="objectives panel panel-success"> <div class="panel-heading"> <h2><span class="fa fa-pencil"></span> Moving plots around </h2> </div> <br/> Modify the program to display the three plots on top of one another instead of side by side. </section>
github_jupyter
# Text classification process using Toloka running at Airflow Toloka offers a library of Airflow-integrated functions to facilitate crowdsourcing. This example illustrates how one can build the whole project using these blocks. This library provides Airflow tasks for Toloka. You can connect tasks by passing one task's result to another as argument. For more details see Airflow docs. Airflow can be run either locally or in docker with possibility for distribution. We recommend to use docker, and it will be used in our example. To get acquainted with Toloka tools for free, you can use the promo code **TOLOKAKIT1** on $20 on your [profile page](https://toloka.yandex.com/requester/profile?utm_source=github&utm_medium=site&utm_campaign=tolokakit) after registration. ## Airflow configuration First, you need to do basic configuration for running Airflow in docker. Follow the instruction below. If you have any troubles, see [docs](https://airflow.apache.org/docs/apache-airflow/stable/start/docker.html). 1. Install [Docker Community Edition (CE)](https://docs.docker.com/engine/install/) on your workstation. Depending on the OS, you may need to configure your Docker instance to use 4.00 GB of memory for all containers to run properly. Please refer to the Resources section if using [Docker for Windows](https://docs.docker.com/desktop/windows/) or [Docker for Mac](https://docs.docker.com/desktop/mac/) for more information. 2. Install [Docker Compose v1.29.1](https://docs.docker.com/compose/install/) and newer on your workstation. > Default amount of memory available for Docker on MacOS is often not enough to get Airflow up and running. **If enough memory is not allocated, it might lead to airflow webserver continuously restarting. You should at least allocate 4GB memory for the Docker Engine (ideally 8GB). You can check and change the amount of memory in [Resources](https://docs.docker.com/desktop/mac/)** 3. Create airflow folder ``` mkdir airflow && cd airflow ``` 4. Fetch [docker-compose.yaml](https://airflow.apache.org/docs/apache-airflow/stable/docker-compose.yaml) ``` curl -LfO 'https://airflow.apache.org/docs/apache-airflow/2.2.4/docker-compose.yaml' ``` 5. Create custom Dockerfile with commands for installation required packages: ``` FROM apache/airflow:2.2.4-python3.9 RUN pip install --no-cache-dir airflow-provider-toloka RUN pip install --no-cache-dir toloka-kit RUN pip install --no-cache-dir crowd-kit RUN pip install --no-cache-dir pandas ``` or can find Dockerfile in our files. To build custom docker-image invoke the command below: ``` docker build . -f Dockerfile --tag toloka-airflow-image ``` 6. Set docker image name in console ``` echo "AIRFLOW_IMAGE_NAME=toloka-airflow-image:latest" >> .env ``` or replace it in docker-compose file ``` ✕ image: ${AIRFLOW_IMAGE_NAME:-apache/airflow:2.2.4} ✓ image: ${AIRFLOW_IMAGE_NAME:-toloka-airflow-image:latest} ``` 7. On Linux, the quick-start needs to know your host user id and needs to have group id set to 0. Otherwise the files created in dags, logs and plugins will be created with root user. You have to make sure to configure them for the docker-compose: ``` mkdir -p ./dags ./logs ./plugins echo -e "AIRFLOW_UID=$(id -u)" >> .env ``` For other operating systems, you will get warning that AIRFLOW_UID is not set, but you can ignore it. You can also manually create the .env file in the same folder your docker-compose.yaml is placed with this content to get rid of the warning: ``` AIRFLOW_UID=50000 ``` 8. Place file "text_classification.py" in `dags/` folder to let Airflow see it. 9. On all operating systems, you need to run database migrations and create the first user account. To do it, run. ``` docker-compose up airflow-init ``` 10. Now you can start all services: ``` docker-compose up ``` The Airflow will be available at http://localhost:8080/home. Default login and password are "airflow" and "airflow". ### Store Toloka Credentials To use Toloka from Airflow you should pass a Toloka OAuth token to it. If you do not have it yet, you can obtain it from your Toloka account at the `Profile / External Services Integration` page by clicking at `Get OAuth token`. ![Account information](./images/some_account.png) You can store it in Airflow as a connection. ![Variable location](./images/connections.png) Add a new connection at `Admin / Connections` page. Name it `toloka_default` and write Toloka token in password field. Use fernet for proper security, see [docs](https://airflow.apache.org/docs/apache-airflow/stable/security/secrets/fernet.html). ![Token variable](./images/toloka_connection.png) You can choose any connection type if `toloka` doesn't exist. ## Text classification project Configuration is finished. Here you can see the whole "text_classification.py". ```python from datetime import timedelta import json from airflow.decorators import dag, task from airflow.utils.dates import days_ago import toloka_provider.tasks.toloka as tlk_tasks default_args = { 'owner': 'airflow', 'start_date': days_ago(5), 'retries': 0, } @dag(default_args=default_args, schedule_interval=None, catchup=False, tags=['example']) def text_classification(): @task def download_json(url): """Download and parse json config stored at given url.""" import requests response = requests.get(url) response.raise_for_status() return response.json() @task(multiple_outputs=True) def prepare_datasets(unlabeled_url: str, labeled_url: str): from sklearn.model_selection import train_test_split import pandas as pd labeled = pd.read_csv(labeled_url) labeled, exam_tasks = train_test_split(labeled, test_size=10, stratify=labeled.category) _, honeypots = train_test_split(labeled, test_size=20, stratify=labeled.category) main_tasks = pd.read_csv(unlabeled_url).sample(n=100) return { 'main_tasks': main_tasks.to_json(), 'exam_tasks': exam_tasks.to_json(), 'honeypots': honeypots.to_json() } @task def prepare_tasks(main_tasks): main_tasks = json.loads(main_tasks) return [{'input_values': {'headline': headline}} for headline in main_tasks['headline'].values()] @task def prepare_exam_tasks(exam_tasks): exam_tasks = json.loads(exam_tasks) return [{'input_values': {'headline': headline}, 'known_solutions': [{'output_values': {'category': category}}], 'message_on_unknown_solution': category} for headline, category in zip(exam_tasks['headline'].values(), exam_tasks['category'].values())] @task def prepare_honeypots(honeypots): honeypots = json.loads(honeypots) return [{'input_values': {'headline': headline}, 'known_solutions': [{'output_values': {'category': category}}]} for headline, category in zip(honeypots['headline'].values(), honeypots['category'].values())] @task def aggregate_assignments(assignments): from crowdkit.aggregation import DawidSkene from toloka.client import structure, Assignment import pandas as pd assignments = [Assignment.from_json(assignment) for assignment in assignments] tasks = [] labels = [] performers = [] for assignment in assignments: for task, solution in zip(assignment.tasks, assignment.solutions): tasks.append(task.input_values['headline']) labels.append(solution.output_values['category']) performers.append(assignment.user_id) assignments = { 'task': tasks, 'performer': performers, 'label': labels } assignments = pd.DataFrame.from_dict(assignments) df = DawidSkene(n_iter=20).fit_predict(assignments).to_frame().reset_index() df.columns = ['headline', 'category'] print('RESULT', df) project_conf = download_json( 'https://raw.githubusercontent.com/Toloka/airflow-provider-toloka/main/toloka_provider/example_dags/configs/project.json') exam_conf = download_json( 'https://raw.githubusercontent.com/Toloka/airflow-provider-toloka/main/toloka_provider/example_dags/configs/exam.json') pool_conf = download_json( 'https://raw.githubusercontent.com/Toloka/airflow-provider-toloka/main/toloka_provider/example_dags/configs/pool.json') project = tlk_tasks.create_project(project_conf) exam = tlk_tasks.create_exam_pool(exam_conf, project=project) pool = tlk_tasks.create_pool(pool_conf, project=project, exam_pool=exam, expiration=timedelta(days=1)) dataset = prepare_datasets( unlabeled_url='https://raw.githubusercontent.com/Toloka/airflow-provider-toloka/main/toloka_provider/example_dags/data/not_known.csv', labeled_url='https://raw.githubusercontent.com/Toloka/airflow-provider-toloka/main/toloka_provider/example_dags/data/known.csv', ) main_tasks, exam_tasks, honeypots = dataset['main_tasks'], dataset['exam_tasks'], dataset['honeypots'] tasks = prepare_tasks(main_tasks) exam_tasks = prepare_exam_tasks(exam_tasks) honeypots = prepare_honeypots(honeypots) _exam_upload = tlk_tasks.create_tasks(exam_tasks, pool=exam, additional_args={'open_pool': True, 'allow_defaults': True}) _honeypots_upload = tlk_tasks.create_tasks(honeypots, pool=pool, additional_args={'allow_defaults': True}) _tasks_upload = tlk_tasks.create_tasks(tasks, pool=pool, additional_args={'allow_defaults': True}) opened_pool = tlk_tasks.open_pool(pool) _waiting = tlk_tasks.wait_pool(opened_pool) assignments = tlk_tasks.get_assignments(pool, 'ACCEPTED') aggregate_assignments(assignments) [_exam_upload, _honeypots_upload, _tasks_upload] >> opened_pool _waiting >> assignments dag = text_classification() ``` Then if you did everything all right and ran docker properly you can go to http://localhost:8080/home and see `text_classification` dag that can be run. ![run button location](./images/run_button.png) Then you can see statuses of tasks in graph tab. ![dag representation](./images/dag_representation.png) When "aggregate_assignments" will be green it means that pipeline is finished. You can click on `aggregate_assignments` then clink on `Log` and you will see some results of aggregation at the bottom of logs. ### Step by step explanation. Basic configuration. `dag` decorator defines airflow dag. ```python default_args = { 'owner': 'airflow', 'start_date': days_ago(5), 'retries': 0, } @dag(default_args=default_args, schedule_interval=None, catchup=False, tags=['example']) def text_classification(): ... ``` This code downloads json configuration for project and pools. We use it to not create project and pools by our own. ```python @task def download_json(url): """Download and parse json config stored at given url.""" import requests response = requests.get(url) response.raise_for_status() return response.json() ``` Here are some user-defined functions to prepare input data. ```python @task(multiple_outputs=True) def prepare_datasets(unlabeled_url: str, labeled_url: str): ... @task def prepare_tasks(main_tasks): ... @task def prepare_exam_tasks(exam_tasks): ... @task def prepare_honeypots(honeypots): ... ``` `aggregate_assignments` function aggregates results by DawidSkene method. Results will be printed in airflow logs. To aggregate the results, we recommend using the methods of the [crowd-kit](https://github.com/Toloka/crowd-kit) package. ```python @task def aggregate_assignments(assignments): from crowdkit.aggregation import DawidSkene from toloka.client import structure, Assignment import pandas as pd assignments = [Assignment.from_json(assignment) for assignment in assignments] tasks = [] labels = [] performers = [] for assignment in assignments: for task, solution in zip(assignment.tasks, assignment.solutions): tasks.append(task.input_values['headline']) labels.append(solution.output_values['category']) performers.append(assignment.user_id) assignments = { 'task': tasks, 'performer': performers, 'label': labels } assignments = pd.DataFrame.from_dict(assignments) df = DawidSkene(n_iter=20).fit_predict(assignments).to_frame().reset_index() df.columns = ['headline', 'category'] print('RESULT', df) ``` Here we define the topology of our pipeline. ```python project_conf = download_json( 'https://raw.githubusercontent.com/Toloka/airflow-provider-toloka/main/toloka_provider/example_dags/configs/project.json') exam_conf = download_json( 'https://raw.githubusercontent.com/Toloka/airflow-provider-toloka/main/toloka_provider/example_dags/configs/exam.json') pool_conf = download_json( 'https://raw.githubusercontent.com/Toloka/airflow-provider-toloka/main/toloka_provider/example_dags/configs/pool.json') project = tlk_tasks.create_project(project_conf) exam = tlk_tasks.create_exam_pool(exam_conf, project=project) pool = tlk_tasks.create_pool(pool_conf, project=project, exam_pool=exam, expiration=timedelta(days=1)) dataset = prepare_datasets( unlabeled_url='https://raw.githubusercontent.com/Toloka/airflow-provider-toloka/main/toloka_provider/example_dags/data/not_known.csv', labeled_url='https://raw.githubusercontent.com/Toloka/airflow-provider-toloka/main/toloka_provider/example_dags/data/known.csv', ) main_tasks, exam_tasks, honeypots = dataset['main_tasks'], dataset['exam_tasks'], dataset['honeypots'] tasks = prepare_tasks(main_tasks) exam_tasks = prepare_exam_tasks(exam_tasks) honeypots = prepare_honeypots(honeypots) _exam_upload = tlk_tasks.create_tasks(exam_tasks, pool=exam, additional_args={'open_pool': True, 'allow_defaults': True}) _honeypots_upload = tlk_tasks.create_tasks(honeypots, pool=pool, additional_args={'allow_defaults': True}) _tasks_upload = tlk_tasks.create_tasks(tasks, pool=pool, additional_args={'allow_defaults': True}) opened_pool = tlk_tasks.open_pool(pool) _waiting = tlk_tasks.wait_pool(opened_pool) assignments = tlk_tasks.get_assignments(pool, 'ACCEPTED') aggregate_assignments(assignments) [_exam_upload, _honeypots_upload, _tasks_upload] >> opened_pool _waiting >> assignments ``` ### What's next? It was just an example of creating a workflow on Airflow with Toloka. Further it can be expanded in the following directions: * For production version you can configure custom XCom backend that lets pass heavy data across tasks. See example in `custom_xcom_backend.py`. * You can [schedule your flow runs with Timetables](https://airflow.apache.org/docs/apache-airflow/stable/howto/timetable.html). * You can configure [Logging and Monitoring](https://airflow.apache.org/docs/apache-airflow/stable/logging-monitoring/logging-architecture.html). * Apache Airflow aims to be a very Kubernetes-friendly project, you can run Airflow from within a Kubernetes cluster in order to take advantage of the increased stability and autoscaling options that Kubernetes provides, see [docs](https://airflow.apache.org/docs/apache-airflow/stable/kubernetes.html). * And, of course, you can build much more advanced process with crowdsourcing using Toloka.
github_jupyter
# Reading histograms One of the most common tasks will be translating [ROOT](https://root.cern.ch) histograms into the HEPData format. `hepdata_lib` will help you with that, and this notebook will demonstrate how to do that. As explained in the [Getting started notebook](Getting_started.ipynb), a `Submission` needs to exist or be created. Here, we'll just create one without any additional information: ``` from hepdata_lib import Submission submission = Submission() ``` The plot will be a `Table`, in this example Figure 4a from page 12 (upper left) of the [publication](https://cms-results.web.cern.ch/cms-results/public-results/publications/B2G-17-009/index.html), which shows the distribution of the reconstructed B quark mass for the data as well as the individual simulated processes. Let's add all this, some more details as well as the actual plot (for thumbnail creation) to the `Table`: ``` from hepdata_lib import Table table = Table("Figure 4a") table.description = "Distribution in the reconstructed B quark mass, after applying all selections to events with no forward jet, compared to the background distributions estimated before fitting. The plot refers to the low-mass mB analysis. The expectations for signal MC events are given by the blue histogram lines. Different contributions to background are indicated by the colour-filled histograms. The grey-hatched error band shows total uncertainties in the background expectation. The ratio of observations to background expectations is given in the lower panel, together with the total uncertainties prior to fitting, indicated by the grey-hatched band." table.location = "Data from Figure 4 (upper left), located on page 12." table.keywords["observables"] = ["N"] table.add_image("example_inputs/CMS-B2G-17-009_Figure_004-a.pdf") ``` The individual plot components are stored in different ROOT files, one for the individual background processes (one histogram per process plus the total), another one for the data, and the third for the signal process. All histograms here are of type [TH1](https://root.cern.ch/doc/master/classTH1.html), but you can also read in 2-dimensional [TH2](https://root.cern.ch/doc/master/classTH2.html) using `read_hist_2d(...)` instead of `read_hist_1d(...)`: ``` from hepdata_lib import RootFileReader reader = RootFileReader("example_inputs/mlfit_lm_1000.root") reader_data = RootFileReader("example_inputs/Data_cat0_singleH.root") reader_signal = RootFileReader("example_inputs/BprimeBToHB1000_cat0_singleH.root") TotalBackground = reader.read_hist_1d("shapes_prefit/cat0_singleH/total_background") TT = reader.read_hist_1d("shapes_prefit/cat0_singleH/TT") QCD = reader.read_hist_1d("shapes_prefit/cat0_singleH/QCDTT") WJets = reader.read_hist_1d("shapes_prefit/cat0_singleH/WJets") ZJets = reader.read_hist_1d("shapes_prefit/cat0_singleH/ZJets") Data = reader_data.read_hist_1d("h_bprimemass_SRlm") signal = reader_signal.read_hist_1d("h_bprimemass_SRlm") ``` The content of the histograms is stored as a dictionary, with keys `x` (bin center), `y` (bin value or for `TH2` the bin center of the 2nd dimension), `z` (`TH2` only: bin value), as well as the bin errors `dy` (`dz` for `TH2`). Furthermore, the lower and upper bin edges (`x_edges`, for `TH2` also `y_edges`) are stored for each bin: ``` TotalBackground.keys() ``` The `RootFileReader` automatically recognises if the histogram has symmetric or assymmetric errors based on [TH1::GetBinErrorOption()](https://root.cern.ch/doc/master/classTH1.html#ac6e38c12259ab72c0d574614ee5a61c7). Symmetric errors are returned if this returns `TH1::kNormal`, in this case (as for the example here) the errors are a plain `float` per bin, otherwise a `tuple` of `float`. The bin edges are always stored as `tuple`: ``` from __future__ import print_function for key in TotalBackground.keys(): print(key, type(TotalBackground[key]), type(TotalBackground[key][0])) ``` Now define the variables: ``` from hepdata_lib import Variable, Uncertainty # x-axis: B quark mass mmed = Variable("$M_{bH}$", is_independent=True, is_binned=False, units="GeV") mmed.values = signal["x"] # y-axis: N events sig = Variable("Number of signal events", is_independent=False, is_binned=False, units="") sig.values = signal["y"] totalbackground = Variable("Number of background events", is_independent=False, is_binned=False, units="") totalbackground.values = TotalBackground["y"] tt = Variable("Number of ttbar events", is_independent=False, is_binned=False, units="") tt.values = TT["y"] qcd = Variable("Number of qcd events", is_independent=False, is_binned=False, units="") qcd.values = QCD["y"] wjets = Variable("Number of wjets events", is_independent=False, is_binned=False, units="") wjets.values = WJets["y"] zjets = Variable("Number of zjets events", is_independent=False, is_binned=False, units="") zjets.values = ZJets["y"] data = Variable("Number of data events", is_independent=False, is_binned=False, units="") data.values = Data["y"] ``` For the data as well as the background total, we will also provide the associated uncertainties: ``` from hepdata_lib import Uncertainty unc_totalbackground = Uncertainty("total uncertainty", is_symmetric=True) unc_totalbackground.values = TotalBackground["dy"] unc_data = Uncertainty("Poisson errors", is_symmetric=True) unc_data.values = Data["dy"] totalbackground.add_uncertainty(unc_totalbackground) data.add_uncertainty(unc_data) ``` Now we can add the variables to the `Table` and the `Table` to the `Submission`, and create the files. Please refer to the [Getting started notebook](Getting_started.ipynb) for a complete example. ``` table.add_variable(mmed) table.add_variable(sig) table.add_variable(totalbackground) table.add_variable(tt) table.add_variable(qcd) table.add_variable(zjets) table.add_variable(wjets) table.add_variable(data) submission.add_table(table) submission.create_files("example_output") !head example_output/figure_4a.yaml ```
github_jupyter
# Example: Fast and accurate prediction of the regioselectivity of electrophilic aromatic substitution reactions RegioSQM method protonates all aromatic C–H carbon atoms and identifies those with the lowest free energies in **solvent** using the semiempirical quantum chemical **method** as the most nucleophilic center. As per the Regio2020 version, in this example we are using **xTB GFN1** in **Methanol** Reference - https://doi.org/10.1039/C7SC04156J - https://doi.org/10.1186/s13321-021-00490-7 - https://github.com/jensengroup/regiosqm - https://github.com/NicolaiRee/RegioSQM20 ``` %load_ext autoreload %autoreload 2 %matplotlib inline import logging import sys from tqdm import tqdm tqdm.pandas() # Show progress bars on pandas functions import numpy as np import pandas as pd from IPython.display import SVG from rdkit import Chem from rdkit.Chem import AllChem, PandasTools from rdkit.Chem.Draw import MolsToGridImage, MolToImage, rdMolDraw2D try: import ppqm except ModuleNotFoundError: import pathlib cwd = pathlib.Path().resolve().parent sys.path.append(str(cwd)) import ppqm # Set logging logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger("ppqm").setLevel(logging.INFO) logging.getLogger("xtb").setLevel(logging.INFO) show_progress = True # Set DataFrames visuals PandasTools.RenderImagesInAllDataFrames(images=True) pd.set_option('display.float_format','{:.2f}'.format) ``` ## Define protonation reactions with SMARTS ``` reaction1 = AllChem.ReactionFromSmarts("[C;R;H1:1]=[C,N;R;H1:2]>>[CH2:1][*H+:2]") reaction2 = AllChem.ReactionFromSmarts("[C;R;H1:1]=[C,N;R;H0:2]>>[CH2:1][*+;H0:2]") reaction1 reaction2 ``` ## Define a molecule you like ``` smiles = "Cc1cc(NCCO)nc(-c2ccc(Br)cc2)n1" # CHEMBL1956589 molobj = Chem.MolFromSmiles(smiles) molobj Chem.Kekulize(molobj, clearAromaticFlags=True) ``` ## Protonate all aromatic carbons ``` def get_target_atoms(molobj, target): """ Find target atom indices from SMART """ atoms = molobj.GetSubstructMatches(target) # convert tuple of tuple to one-dimensional list atoms = [element for tupl in atoms for element in tupl] return atoms molobjs = [] target_atoms = [] smarts_1 = Chem.MolFromSmarts("[C;R;H1:1]=[C,N;R;H1:2]") smarts_2 = Chem.MolFromSmarts("[C;R;H1:1]=[C,N;R;H0:2]") atoms_1 = get_target_atoms(molobj, smarts_1) atoms_2 = get_target_atoms(molobj, smarts_2) i = 0 products_1 = reaction1.RunReactants((molobj,)) for x in products_1: molobj_prime = x[0] smiles = Chem.MolToSmiles(molobj_prime) smiles = smiles.replace("NH2+", "N+") molobj_prime = Chem.MolFromSmiles(smiles) molobjs.append(molobj_prime) target_atoms.append(atoms_1[i]) i += 1 isav = i products_2 = reaction2.RunReactants((molobj,)) for x in products_2: molobj_prime = x[0] smiles = Chem.MolToSmiles(molobj_prime) smiles = smiles.replace("NH2+", "N+") molobj_prime = Chem.MolFromSmiles(smiles) molobjs.append(molobj_prime) target_atoms.append(atoms_2[2 * (i - isav) - 2]) i += 1 MolsToGridImage( molobjs, molsPerRow=3, subImgSize=(250, 250), useSVG=True, ) [Chem.MolToSmiles(m) for m in molobjs] ``` ## Now let's find out which are most stable using quantum chemistry ``` df = pd.DataFrame(molobjs, columns=["molobj"]) df["atom_index"] = target_atoms df xtb = ppqm.xtb.XtbCalculator(scr="_tmp_directory_", n_cores=2, cmd="xtb", show_progress=False) xtb ``` Let's define a function that we can map onto a pandas DataFrame on each row. We want to calculate the energy for each site which requires some conformer expansion. We are only interested in the lowest energy per conformer. ``` def calculate_energy(molobj): """ For each protonated molecule - Generate conformers (max 20 conformers) - Minimize all conformers - Get the energy for each conformer - Return the lowest energy """ xtb_options = { "gfn": 1, "alpb": "Methanol", "opt": None, } # Generate conformers molobj = ppqm.tasks.generate_conformers(molobj, max_conformers=20) # Optimize with xTB results = xtb.calculate(molobj, xtb_options) # Collect energies and find lowest conformer_energies = [result["scc_energy"] for result in results] min_energy = np.min(conformer_energies) min_energy *= ppqm.units.hartree_to_kcalmol return min_energy # example usage: reference_energy = calculate_energy(molobj) df["energy"] = df["molobj"].progress_apply(calculate_energy) df["rel_energy"] = df["energy"].values - np.min(df["energy"].values) df # Define energy cutoffs cutoff1 = 1.0 # kcal/mol cutoff2 = 3.0 # kcal/mol # Define pretty colors colors = dict() colors["green"] = (119, 198, 110) colors["green"] = tuple(x/255 for x in colors["green"]) colors["red"] = (201, 43, 38) colors["red"] = tuple(x/255 for x in colors["red"]) # Find reactive centers and convert index type to int. # rdkit doesn't understand np.int green_indices = df[df["rel_energy"] < cutoff1]["atom_index"].values green_indices = [int(x) for x in green_indices] red_indices = df[df["rel_energy"] < cutoff2]["atom_index"].values red_indices = [int(x) for x in red_indices if x not in green_indices] # All highlights highlights = green_indices + red_indices # Map highlight to a color colormap = dict() colormap.update({key: [colors["green"]] for key in green_indices}) colormap.update({key: [colors["red"]] for key in red_indices}) # should be working, but does not respect colors # MolToImage( # molobj, # highlightAtoms=highlights, # highlightMap=colormap, # size=(500,500), # ) # http://rdkit.blogspot.com/2020/04/new-drawing-options-in-202003-release.html d2d = rdMolDraw2D.MolDraw2DSVG(500, 500) d2d.DrawMoleculeWithHighlights(molobj, "Regioselective site(s)", dict(colormap), {}, {}, {}) d2d.FinishDrawing() SVG(d2d.GetDrawingText()) ```
github_jupyter
### Imports ``` import sys sys.path.append('..') from PAINTeR import connectivity # in-house lib used for the RPN-signature from PAINTeR import plot # in-house lib used for the RPN-signature from PAINTeR import model # in-house lib used for the RPN-signature import numpy as np # hi old friend import pandas as pd from sklearn.preprocessing import StandardScaler from nilearn.connectome import ConnectivityMeasure from matplotlib.colors import ListedColormap from matplotlib.colors import Normalize import matplotlib.pyplot as plt import seaborn as sns sns.set_style("white") from sklearn.linear_model import ElasticNet, Ridge from sklearn.feature_selection import SelectKBest, f_regression from sklearn import preprocessing from sklearn.pipeline import Pipeline from sklearn.model_selection import LeaveOneOut, KFold, GroupKFold, LeavePGroupsOut from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error, r2_score, explained_variance_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_predict from sklearn.model_selection import cross_validate import joblib ``` ### Processing parameters ``` thres_mean_FD = 0.15 # mm scrub_threshold = 0.15 # mm thres_perc_scrub = 30 # % scubbed out ``` ### Load all behavioral data ``` # load bochum data df_bochum = pd.read_csv("../res/bochum_sample_excl.csv") df_essen = pd.read_csv("../res/essen_sample_excl.csv") df_szeged = pd.read_csv("../res/szeged_sample_excl.csv") df_bochum['study']='bochum' df_essen['study']='essen' df_szeged['study']='szeged' df=pd.concat((df_bochum, df_essen, df_szeged), sort=False) df=df.reset_index() df_bochum.HPT.mean() ``` ### Load standardized scrubbed timeseries ``` timeseries = [] perc_scrubbed = [] for i, f in enumerate(df['ts_file']): f = '..' + f.split('/..')[1] f_scrub = f.split('.tsv')[0] + '-scrubbed.tsv' ts = pd.read_csv(f_scrub).iloc[:,1:] # here we can omit global signal... fd_file = df["fd_file"].values[i] fd_file = '..' + fd_file.split('/..')[1] fd = pd.read_csv(fd_file).values.ravel().tolist() fd = [0] + fd perc_scrubbed.append(100 - 100*len(ts.shape)/len(fd) ) timeseries.append(ts.values) #region names labels=ts.columns.values l = pd.read_csv('../data/atlas_relabeled.tsv', sep="\t") modules=np.insert(l['modules'].values, 0, "GlobSig") ``` ### Calculate connectivity ``` correlation_measure = ConnectivityMeasure(kind='partial correlation', vectorize=True, discard_diagonal=True) X = correlation_measure.fit_transform(timeseries) # these are the features mat=correlation_measure.mean_ #mat=mat[1:, 1:] #fisrt row and column is global signal mat[range(mat.shape[0]), range(mat.shape[0])] = 0 # zero diag plot.plot_matrix(mat, labels, modules) # create groups to balance-out cross-validations plt.figure(figsize=(12, 0.3)) sns.heatmap([df.study.astype("category").cat.codes.values]).set_title('study center') plt.show() n_szeged = np.sum(df.study == 'szeged') # size of the smallest study n_essen = np.sum(df.study == 'essen') n_bochum = np.sum(df.study == 'bochum') print(n_bochum, n_essen, n_szeged) groups=np.zeros(len(df), dtype=int) g=0 i=0 while i < n_bochum: groups[i] = g #groups[i+1] = g i += 1 g += 1 g=0 i=n_bochum while i < n_bochum+n_essen: groups[i] = g #groups[i+1] = g i += 1 g += 1 g=0 i=n_bochum+n_essen while i < len(df): groups[i] = g i += 1 g += 1 plt.figure(figsize=(12, 0.3)) sns.heatmap([groups]).set_title('groups') plt.show() groups ``` ## Leave-one-study-out: nested ``` class PainSensitivity(): def __init__(self): self.cpt_mean = None self.cpt_std = None self.hpt_mean = None self.hpt_std = None self.mpt_mean = None self.mpt_std = None def _trial_means(self, df): #cpt = np.mean(df[['qst_cpt_2', 'qst_cpt_3', 'qst_cpt_4', 'qst_cpt_5', 'qst_cpt_6']].values, axis=1) #hpt = np.mean(df[['qst_cpt_2', 'qst_cpt_3', 'qst_cpt_4', 'qst_cpt_5', 'qst_cpt_6']].values, axis=1) #mpt = np.log(stats.gmean(df[['qst_mpt_2_pain', 'qst_mpt_2_no_pain', # 'qst_mpt_3_pain', 'qst_mpt_3_no_pain', # 'qst_mpt_4_pain', 'qst_mpt_4_no_pain', # 'qst_mpt_5_pain', 'qst_mpt_5_no_pain']],axis=1)) #return cpt, hpt, mpt return df.CPT, df.HPT, df.MPT_log_geom def _pain_sens(self, cpt, hpt, mpt): return (+ (cpt-self.cpt_mean)/self.cpt_std\ - (hpt-self.hpt_mean)/self.hpt_std\ - (mpt-self.mpt_mean)/ self.mpt_std) / 3 def calculate(self, df): cpt, hpt, mpt = self._trial_means(df) self.cpt_mean = np.nanmean(cpt) self.cpt_std = np.nanstd(cpt) self.hpt_mean = np.nanmean(hpt) self.hpt_std = np.nanstd(hpt) self.mpt_mean = np.nanmean(mpt) self.mpt_std = np.nanstd(mpt) return self._pain_sens(cpt, hpt, mpt) def transform(self, df): cpt, hpt, mpt = self._trial_means(df) return self._pain_sens(cpt, hpt, mpt) # calculate pooled composite pain sensitivity ps_calc = PainSensitivity() y = ps_calc.calculate(df) def pipe_scale_fsel_elnet(scaler=preprocessing.RobustScaler(), fsel=SelectKBest(f_regression), model=ElasticNet(max_iter=100000), #p_grid = {'fsel__k': [10, 50, 100, 200, 500, 700, 1000, 2000, 3000, 4000, 5000, 'all'], 'model__alpha': [.001, .01, .1, 1, 10], 'model__l1_ratio': [0.001, .1, .3, .5, .7, .9, .999] p_grid = {'fsel__k': [25, 100, 500, 1000, 2000, 5000], 'model__alpha': [.001, .005, .01, .05, .1], 'model__l1_ratio': [.999] }): mymodel = Pipeline( [('scaler', scaler), ('fsel', fsel), ('model', model)]) return mymodel, p_grid model, p_grid = pipe_scale_fsel_elnet() outer_cv = LeavePGroupsOut(1) #Leave-one-sudy-out inner_cv = GroupKFold(30) # do 30-fold quasi-balanced splits within the other two studies for hyperparam optimization. clf = GridSearchCV(estimator=model, param_grid=p_grid, cv=inner_cv, scoring="neg_mean_squared_error", verbose=True, return_train_score=False, n_jobs=-1) all_models = [] best_params = [] predicted = np.zeros(len(y)) nested_scores_train = np.zeros(outer_cv.get_n_splits(X, groups=df.study)) nested_scores_test = np.zeros(outer_cv.get_n_splits(X, groups=df.study)) print("model\tinner_cv mean score\touter vc score") i=0 for train, test in outer_cv.split(X, y, groups=df.study): print(test) group_train = groups[train] clf.fit(X[train], y[train], groups=group_train) print(str(clf.best_params_) + " " + str(clf.best_score_) + " " + str(clf.score(X[test], y[test]))) all_models.append(clf.best_estimator_) best_params.append(clf.best_params_) predicted[test] = clf.predict(X[test]) nested_scores_train[i] = clf.best_score_ nested_scores_test[i] = clf.score(X[test], y[test]) i = i+1 print("*** Score on mean as model:\t" + str(-mean_squared_error(np.repeat(y.mean(), len(y)), y))) print("** Mean score in the inner crossvaludation (inner_cv):\t" + str(nested_scores_train.mean())) print("** Mean Nested Crossvalidation Score (outer_cv):\t" + str(nested_scores_test.mean())) print("Explained Variance: " + str( 1- nested_scores_test.mean()/-mean_squared_error(np.repeat(y.mean(), len(y)), y) )) print("Correlation: " + str(np.corrcoef(y, predicted)[0,1])) plot.plot_prediction(y, predicted, sd=True) study='bochum' plot.plot_prediction(y[df.study==study], predicted[df.study==study], sd=True) print("*** Score on mean as model:\t" + str(-mean_squared_error(np.repeat(y[df.study==study].mean(), len(y[df.study==study])), y[df.study==study]))) print("** Mean score in the inner crossvaludation (inner_cv):\t" + str(nested_scores_train[0].mean())) print("** Mean Nested Crossvalidation Score (outer_cv):\t" + str(nested_scores_test[0].mean())) print("Explained Variance: " + str( 1- nested_scores_test[0].mean()/-mean_squared_error(np.repeat(y[df.study==study].mean(), len(y[df.study==study])), y[df.study==study]) )) print("Correlation: " + str(np.corrcoef(y[df.study==study], predicted[df.study==study])[0,1])) study='essen' plot.plot_prediction(y[df.study==study], predicted[df.study==study], sd=True) print("*** Score on mean as model:\t" + str(-mean_squared_error(np.repeat(y[df.study==study].mean(), len(y[df.study==study])), y[df.study==study]))) print("** Mean score in the inner crossvaludation (inner_cv):\t" + str(nested_scores_train[1].mean())) print("** Mean Nested Crossvalidation Score (outer_cv):\t" + str(nested_scores_test[1].mean())) print("Explained Variance: " + str( 1- nested_scores_test[1].mean()/-mean_squared_error(np.repeat(y[df.study==study].mean(), len(y[df.study==study])), y[df.study==study]) )) print("Correlation: " + str(np.corrcoef(y[df.study==study], predicted[df.study==study])[0,1])) study='szeged' plot.plot_prediction(y[df.study==study], predicted[df.study==study], sd=True) print("*** Score on mean as model:\t" + str(-mean_squared_error(np.repeat(y[df.study==study].mean(), len(y[df.study==study])), y[df.study==study]))) print("** Mean score in the inner crossvaludation (inner_cv):\t" + str(nested_scores_train[2].mean())) print("** Mean Nested Crossvalidation Score (outer_cv):\t" + str(nested_scores_test[2].mean())) print("Explained Variance: " + str( 1- nested_scores_test[2].mean()/-mean_squared_error(np.repeat(y[df.study==study].mean(), len(y[df.study==study])), y[df.study==study]) )) print("Correlation: " + str(np.corrcoef(y[df.study==study], predicted[df.study==study])[0,1])) ``` ### save finalized models and nested cv predictions ``` # save nested cv-predictions np.savetxt("nested_cv_pred_L1SO.csv", predicted, delimiter=",") # essen+szeged -> bochum joblib.dump(all_models[0], 'model_trained_on_essen+szeged.joblib') # bochum+szeged -> essen joblib.dump(all_models[1], 'model_trained_on_bochum+szeged.joblib') # bochum+essen -> szeged joblib.dump(all_models[2], 'model_trained_on_bochum+essen.joblib') ``` ## Leave-two-study-out ``` def pipe_scale_fsel_elnet(scaler=preprocessing.RobustScaler(), fsel=SelectKBest(f_regression), model=ElasticNet(max_iter=100000), #p_grid = {'fsel__k': [20, 25, 30, 35, 40, 45, 50, 60, 70, 80], 'model__alpha': [.001, .005, .01, .05, .1, .5], 'model__l1_ratio': [.999] } p_grid = {'fsel__k': [25], 'model__alpha': [.005], 'model__l1_ratio': [.999] } ): mymodel = Pipeline( [('scaler', scaler), ('fsel', fsel), ('model', model)]) return mymodel, p_grid mymodel = Pipeline( [('scaler', scaler), ('fsel', fsel), ('model', model)]) return mymodel, p_grid model, p_grid = pipe_scale_fsel_elnet() outer_cv = LeavePGroupsOut(2) #Leave-two-sudy-out inner_cv = LeaveOneOut() # do LeaveOneOut within the study studies for hyperparam optimization. clf = GridSearchCV(estimator=model, param_grid=p_grid, cv=inner_cv, scoring="neg_mean_squared_error", verbose=True, return_train_score=False, n_jobs=-1) all_models = [] best_params = [] predicted = np.zeros(len(y)) nested_scores_train = np.zeros(outer_cv.get_n_splits(X, groups=df.study)) nested_scores_test = np.zeros(outer_cv.get_n_splits(X, groups=df.study)) print("model\tinner_cv mean score\touter vc score") i=0 for train, test in outer_cv.split(X, y, groups=df.study): print(test) group_train = groups[train] clf.fit(X[train], y[train], groups=group_train) print(str(clf.best_params_) + " " + str(clf.best_score_) + " " + str(clf.score(X[test], y[test]))) all_models.append(clf.best_estimator_) best_params.append(clf.best_params_) predicted[test] += clf.predict(X[test]) # we sum it to later construct an average nested_scores_train[i] = clf.best_score_ nested_scores_test[i] = clf.score(X[test], y[test]) i = i+1 predicted /= 2 # we take the mean prediction from the two predictions we summarised so far in leave-two-out print("*** Score on mean as model:\t" + str(-mean_squared_error(np.repeat(y.mean(), len(y)), y))) print("** Mean score in the inner crossvaludation (inner_cv):\t" + str(nested_scores_train.mean())) print("** Mean Nested Crossvalidation Score (outer_cv):\t" + str(nested_scores_test.mean())) print("Explained Variance: " + str( 1- nested_scores_test.mean()/-mean_squared_error(np.repeat(y.mean(), len(y)), y) )) print("Correlation: " + str(np.corrcoef(y, predicted)[0,1])) plot.plot_prediction(y, predicted, sd=True) # bochum -> essen + szeged study='bochum' plot.plot_prediction(y[df.study!=study], predicted[df.study!=study], sd=True) print("*** Score on mean as model:\t" + str(-mean_squared_error(np.repeat(y[df.study!=study].mean(), len(y[df.study!=study])), y[df.study!=study]))) print("Explained Variance: " + str( 1- nested_scores_test[0].mean()/-mean_squared_error(np.repeat(y[df.study!=study].mean(), len(y[df.study!=study])), y[df.study!=study]) )) print("Correlation: " + str(np.corrcoef(y[df.study!=study], predicted[df.study!=study])[0,1])) # essen -> bochum + szeged study='essen' plot.plot_prediction(y[df.study!=study], predicted[df.study!=study], sd=True) print("*** Score on mean as model:\t" + str(-mean_squared_error(np.repeat(y[df.study!=study].mean(), len(y[df.study!=study])), y[df.study!=study]))) print("Explained Variance: " + str( 1- nested_scores_test[0].mean()/-mean_squared_error(np.repeat(y[df.study!=study].mean(), len(y[df.study!=study])), y[df.study!=study]) )) print("Correlation: " + str(np.corrcoef(y[df.study!=study], predicted[df.study!=study])[0,1])) # szeged -> essen + bochum study='szeged' plot.plot_prediction(y[df.study!=study], predicted[df.study!=study], sd=True) print("*** Score on mean as model:\t" + str(-mean_squared_error(np.repeat(y[df.study!=study].mean(), len(y[df.study!=study])), y[df.study!=study]))) print("Explained Variance: " + str( 1- nested_scores_test[0].mean()/-mean_squared_error(np.repeat(y[df.study!=study].mean(), len(y[df.study!=study])), y[df.study!=study]) )) print("Correlation: " + str(np.corrcoef(y[df.study!=study], predicted[df.study!=study])[0,1])) ``` ## save nested cv prediction ``` # save nested cv-predictions np.savetxt("nested_cv_pred_L2SO.csv", predicted, delimiter=",") ``` ## save models ``` # szeged -> bochum + essen joblib.dump(all_models[0], 'model_trained_on_szeged.joblib') # essen -> bochum + szeged joblib.dump(all_models[1], 'model_trained_on_essen.joblib') # bochum -> essen + szeged joblib.dump(all_models[2], 'model_trained_on_bochum.joblib') # RPN-signature ``` # full model with balanced nested cv ``` def pipe_scale_fsel_elnet(scaler=preprocessing.RobustScaler(), fsel=SelectKBest(f_regression), model=ElasticNet(max_iter=100000), p_grid = {'fsel__k': [2000], 'model__alpha': [.01], 'model__l1_ratio': [.999] #p_grid = {'fsel__k': [1000, 2000, 5000], 'model__alpha': [.001, .005, .01, .05, .1], 'model__l1_ratio': [.999] }): mymodel = Pipeline( [('scaler', scaler), ('fsel', fsel), ('model', model)]) return mymodel, p_grid model, p_grid = pipe_scale_fsel_elnet() outer_cv = GroupKFold(30) inner_cv = GroupKFold(30) clf = GridSearchCV(estimator=model, param_grid=p_grid, cv=inner_cv, scoring="neg_mean_squared_error", verbose=True, return_train_score=False, n_jobs=-1) all_models = [] best_params = [] predicted = np.zeros(len(y)) nested_scores_train = np.zeros(outer_cv.get_n_splits(X)) nested_scores_test = np.zeros(outer_cv.get_n_splits(X)) print("model\tinner_cv mean score\touter vc score") i=0 for train, test in outer_cv.split(X, y, groups=groups): group_train = groups[train] clf.fit(X[train], y[train], groups=group_train) print(str(clf.best_params_) + " " + str(clf.best_score_) + " " + str(clf.score(X[test], y[test]))) all_models.append(clf.best_estimator_) best_params.append(clf.best_params_) predicted[test] = clf.predict(X[test]) nested_scores_train[i] = clf.best_score_ nested_scores_test[i] = clf.score(X[test], y[test]) i = i+1 print("*** Score on mean as model:\t" + str(-mean_squared_error(np.repeat(y.mean(), len(y)), y))) print("** Mean score in the inner crossvaludation (inner_cv):\t" + str(nested_scores_train.mean())) print("** Mean Nested Crossvalidation Score (outer_cv):\t" + str(nested_scores_test.mean())) print("Explained Variance: " + str( 1- nested_scores_test.mean()/-mean_squared_error(np.repeat(y.mean(), len(y)), y) )) print("Correlation: " + str(np.corrcoef(y, predicted)[0,1])) plot.plot_prediction(y, predicted, sd=True, covar=[]) # save nested cv-predictions np.savetxt("nested_cv_pred_full_GroupKFold30.csv", predicted, delimiter=",") # finalize and save model def pipe_scale_fsel_elnet(scaler=preprocessing.RobustScaler(), fsel=SelectKBest(f_regression), model=ElasticNet(max_iter=100000), #p_grid = {'fsel__k': [50, 100, 500, 700, 1000, 2000, 5000, 'all'], 'model__alpha': [.001, .005, .01, .05, .1, .5], 'model__l1_ratio': [0.001, .1, .5, .7, .9, .999] p_grid = {'fsel__k': [2000], 'model__alpha': [.01], 'model__l1_ratio': [.999] }): mymodel = Pipeline( [('scaler', scaler), ('fsel', fsel), ('model', model)]) return mymodel, p_grid model, p_grid = pipe_scale_fsel_elnet() cv = GroupKFold(30) clf = GridSearchCV(estimator=model, param_grid=p_grid, cv=cv, scoring="neg_mean_squared_error", verbose=True, return_train_score=False, n_jobs=-1) clf.fit(X, y, groups=groups) print("**** Non-nested analysis ****") print("** Best hyperparameters: " + str(clf.best_params_)) print("** Score on full data as training set:\t" + str(-mean_squared_error(y_pred=clf.best_estimator_.predict(X), y_true=y))) print("** Score on mean as model: " + str(-mean_squared_error(np.repeat(y.mean(), len(y)), y))) print("** Best Non-nested cross-validated score on test:\t" + str(clf.best_score_)) print("XXXXX Explained Variance: " + str( 1 - clf.best_score_ / -mean_squared_error(np.repeat(y.mean(), len(y)), y))) joblib.dump(clf.best_estimator_, 'model_trained_on_all.joblib') cv_pred = cross_val_predict(clf.best_estimator_, X, y, cv=cv, groups=groups, n_jobs=-1) plot.plot_prediction(y, cv_pred, sd=True, covar=[]) #for train_index, test_index in group_kfold.split(X, y, groups): # #print("TRAIN:", train_index, "TEST:", test_index) # #print(df.study[train_index].values) # print('test:', df.study[test_index].values) ```
github_jupyter
# 005 Clustering Results * Visualization (Fig1 a, Fig1 c, and Fig S3) * Qauntify hurricane impacts and neighborhood resilience (Table 1) * Contexualize demographics and socioeconmoic characteristics (Table 2) ``` import pandas as pd import numpy as np from collections import Counter import matplotlib.pyplot as plt %matplotlib inline import warnings warnings.filterwarnings("ignore") # Check package versions import types def imports(): for name, val in globals().items(): if isinstance(val, types.ModuleType): yield val.__name__ import pkg_resources root_packages = [i.split('.', 1)[0] for i in list(imports())] for m in pkg_resources.working_set: if m.project_name.lower() in root_packages: print (m.project_name, m.version) ``` # 1. Load clustering result outputs ``` df = pd.read_csv('../../outputs/clustering_resul.csv') # df.head() ``` # 2. Filtering grid cells * with at least 20% residential area and dropping schools and university areas (outliers) ``` # Filtering only grid cells with at least 20% residentail area and dropping schools or university areas res = pd.read_csv('../../data/harris_county_grid_pct_res.csv') df_res = pd.merge(df, res, on='cell_id', how='left') # df_res.to_csv('../../result/clustering_result_grid_level_pct_res_04082020.csv', index=False) df_res = df_res[df_res['pct_res']>0.2] remove_list = ['-95.62,29.78', '-95.34,29.72', '-95.34,29.71', '-95.7,29.85', '-95.27,29.66','-95.28,29.66'] df_res = df_res[~df_res['cell_id'].isin(remove_list)] print len(df_res) # df_res.head(2) ``` # 3. Visualize clustering results ``` myarray = df_res.iloc[:,1:55].as_matrix() labels = df_res.iloc[:, 55].as_matrix() # print (myarray.shape) # print (myarray[0][:3]) # print (labels.shape) # print (labels[:3]) labels_N = np.zeros([4,myarray.shape[0]]) for n in range(4): for i in range(len(myarray)): if labels[i]==n: labels_N[n][i]=1 else: labels_N[n][i]=0 meancenter4 = np.zeros([4,myarray.shape[1]]) std4 = np.zeros([4,myarray.shape[1]]) for n in range(len(labels_N)): for i in range(len(myarray[0])): A=[] for j in range(len(myarray)): a=myarray[j][i]*labels_N[n][j] A.append(a) meancenter4[n][i] = sum(A)/sum(labels_N[n]) std4[n][i] = np.std(np.trim_zeros(A)) yymmdd_list = range(54) yymmdd_list_str = ['2017-08-04', '2017-08-05', '2017-08-06', '2017-08-07', '2017-08-08', '2017-08-09', '2017-08-10', '2017-08-11', '2017-08-12', '2017-08-13', '2017-08-14', '2017-08-15', '2017-08-16', '2017-08-17', '2017-08-18', '2017-08-19', '2017-08-20', '2017-08-21', '2017-08-22', '2017-08-23', '2017-08-24', '2017-08-25', '2017-08-26', '2017-08-27', '2017-08-28', '2017-08-29', '2017-08-30', '2017-08-31', '2017-09-01', '2017-09-02', '2017-09-03', '2017-09-04', '2017-09-05', '2017-09-06', '2017-09-07', '2017-09-08', '2017-09-09', '2017-09-10', '2017-09-11', '2017-09-12', '2017-09-13', '2017-09-14', '2017-09-15', '2017-09-16', '2017-09-17', '2017-09-18', '2017-09-19', '2017-09-20', '2017-09-21', '2017-09-22', '2017-09-23', '2017-09-24', '2017-09-25', '2017-09-26'] ``` ### Creat Fig1. ``` fig = plt.figure(figsize=(10,5)) plt.xticks(yymmdd_list,yymmdd_list_str, rotation=90, fontsize=9) colors = ['crimson', 'darkblue', 'grey', 'dodgerblue'] for i in range(len(myarray)): for n in range(4): if labels[i]==n: plt.plot(yymmdd_list, myarray[i], color=colors[n], alpha=0.1, linewidth=0.1) for i in range(len(labels_N)): plt.plot(yymmdd_list, meancenter4[i], marker='o' ,markersize=3, color=colors[i], linewidth=1.5, label = "%s neighborhoods"%(Counter(labels)[i])) # fill_between x1 = np.arange(len(std4[i])) f1 = meancenter4[i]+std4[i] f2 = meancenter4[i]-std4[i] plt.fill_between(x1, f1, f2, alpha=0.2, color=colors[i]) plt.plot(yymmdd_list, [0]*54, color='Black', linewidth = 1, linestyle='--', alpha=0.6) plt.legend(loc = 'lower left') plt.ylim(-0.75,0.75) plt.grid('off') # plt.savefig('../../figures/Fig1_a.svg', format='svg', dpi=300, bbox_inches='tight') plt.show() ``` ### By group ``` def plot_result_group(g): # plt.xticks(yymmdd_list,yymmdd_list_str, rotation=90, fontsize=9) colors = ['crimson', 'darkblue', 'grey', 'dodgerblue'] for i in range(len(myarray)): for n in range(4): if labels[i]==g: plt.plot(yymmdd_list, myarray[i], color=colors[g], alpha=0.1, linewidth=0.05) for i in range(len(labels_N)): if i == g: plt.plot(yymmdd_list, meancenter4[i], marker='o' ,markersize=3, color=colors[g], linewidth=1.5, label = "%s neighborhoods"%(Counter(labels)[i])) # fill_between x1 = np.arange(len(std4[i])) f1 = meancenter4[i]+std4[i] f2 = meancenter4[i]-std4[i] plt.fill_between(x1, f1, f2, alpha=0.2, color=colors[g]) plt.fill_between(x1, meancenter4[i], alpha=0.6, color='k') plt.plot(yymmdd_list, [0]*54, color='Black', linewidth = 1, linestyle='--', alpha=0.6) plt.legend(loc = 'upper right') plt.ylim(-0.75,0.75) plt.grid(linewidth=0.5, alpha=0.5) fig = plt.figure(figsize=(16,8)) plt.subplot(2,2,1) plot_result_group(2) plt.subplot(2,2,2) plot_result_group(0) plt.subplot(2,2,3) plot_result_group(3) plt.subplot(2,2,4) plot_result_group(1) plt.show() ``` ### Create FigS3. ``` fig = plt.figure(figsize=(15,4), dpi=300) plt.xticks(yymmdd_list,yymmdd_list_str, rotation=90, fontsize=9) for i in range(len(myarray)): plt.plot(yymmdd_list, myarray[i], color='grey', alpha=0.2, linewidth=0.2) plt.plot(yymmdd_list, [0]*54, color='Black', linewidth = 1, linestyle='--', alpha=0.6) plt.ylim(-1,1.1) plt.grid('off') # plt.savefig('../../figures/FigS3.svg', format='svg', dpi=300, # bbox_inches='tight') plt.show() # df_res.head() ``` ### Create Fig1. (case study) * Below needs a manual case study to identify neighborhood disaster response and resilience patterns. ``` # dec1: -95.37, 29.72 Riverside terrace (multifamily housing area) c = 90 # -95.4,29.9 Northfield place neighborhood (mixed residential area with lots of auto shops) c= 110 # -95.35,29.69 Greater OST / SOUTH UNION neighborhoods with multifamily (mostly lower income) housings c = 85 # stabel: -95.2,29.66 Pasadena Westside terrace (mix of residential + cormmercial) c=170 # -95.39,30.06 North Spring (c=390) # -95.45,29.82 Central northwest (c=490) mix of residentail + commercial # inc: -95.4,29.98 Imperial green neighborhood (single housing area) # -95.48,29.7 Shenandoah neighborhood (single housing area) # -95.53,29.76 Bunker Hill Village (single housing - like big mansions area) c = 355 # -95.53,29.77 Bunker Hill Village (single housing - like big mansions area) c = 356 # -95.68,29.9 Copper lakes neighborhood c = 500 # -95.76,30.01 Fairfield village south c=570 # dec2: -95.18,29.52 near clear creek c=16 (Forest creek neighborhood) # -95.18,30.04 near Kingwood neighborhood (near river area) # -95.19,29.59 near Turkey creek (southbelt/Ellington neighborhood) # -95.51,30.01 Cypress hill and courthouse area # -95.62,29.78 Reservoir area (shell/BP employee's residential areas) colors = ['crimson', 'darkblue', 'grey', 'dodgerblue'] fig = plt.figure(figsize=(10,5), dpi=300) plt.xticks(yymmdd_list,yymmdd_list_str, rotation=90, fontsize=9) # for i in range(len(myarray)): # plt.plot(yymmdd_list, myarray[i], color='grey', alpha=0.2, linewidth=0.2) # c = 356 # plt.plot(yymmdd_list, tmp.iloc[c,1:1+len(yymmdd_list)], color=colors[0], marker='o', markersize=3, label = tmp.iloc[c,0]) tmp = df_res[df_res['label']==0] c = 356 plt.plot(yymmdd_list, tmp.iloc[c,1:1+len(yymmdd_list)], color=colors[0], marker='o', markersize=3, label = tmp.iloc[c,0]) c = 500 plt.plot(yymmdd_list, tmp.iloc[c,1:1+len(yymmdd_list)], color=colors[0], marker='o', markersize=3, label = tmp.iloc[c,0]) # c = 570 # plt.plot(yymmdd_list, tmp.iloc[c,1:1+len(yymmdd_list)], color=colors[0], marker='o', markersize=3, label = tmp.iloc[c,0]) tmp = df_res[df_res['label']==1] c = 16 plt.plot(yymmdd_list, tmp.iloc[c,1:1+len(yymmdd_list)], color=colors[1], marker='o', markersize=3, label = tmp.iloc[c,0]) c = 28 plt.plot(yymmdd_list, tmp.iloc[c,1:1+len(yymmdd_list)], color=colors[1], marker='o', markersize=3, label = tmp.iloc[c,0]) c = 3 plt.plot(yymmdd_list, tmp.iloc[c,1:1+len(yymmdd_list)], color=colors[1], marker='o', markersize=3, label = tmp.iloc[c,0]) tmp = df_res[df_res['label']==2] c = 170 plt.plot(yymmdd_list, tmp.iloc[c,1:1+len(yymmdd_list)], color=colors[2], marker='o', markersize=3, label = tmp.iloc[c,0]) c = 390 plt.plot(yymmdd_list, tmp.iloc[c,1:1+len(yymmdd_list)], color=colors[2], marker='o', markersize=3, label = tmp.iloc[c,0]) c = 490 plt.plot(yymmdd_list, tmp.iloc[c,1:1+len(yymmdd_list)], color=colors[2], marker='o', markersize=3, label = tmp.iloc[c,0]) tmp = df_res[df_res['label']==3] c = 90 plt.plot(yymmdd_list, tmp.iloc[c,1:1+len(yymmdd_list)], color=colors[3], marker='o', markersize=3, label = tmp.iloc[c,0]) c = 110 plt.plot(yymmdd_list, tmp.iloc[c,1:1+len(yymmdd_list)], color=colors[3], marker='o', markersize=3, label = tmp.iloc[c,0]) c = 85 plt.plot(yymmdd_list, tmp.iloc[c,1:1+len(yymmdd_list)], color=colors[3], marker='o', markersize=3, label = tmp.iloc[c,0]) plt.plot(yymmdd_list, [0]*54, color='Black', linewidth = 1, linestyle='--', alpha=0.6) plt.ylim(-0.8,0.75) plt.grid(linewidth=0.5, alpha=0.5) # plt.ylabel("Community activity change (%)", fontsize=15) # plt.legend(bbox_to_anchor=(1.05,1.05)) # plt.savefig('../image/Fig1_c.svg', format='svg', dpi=300, bbox_inches='tight') plt.show() ``` # 4. Quantify hurricane impact ``` for i in range(4): print (df_res['height'][df_res['label']==i].mean()) print ('-----') for i in range(4): print (df_res[df_res['label']==i]['depth'].mean()) print ('-----') for i in range(4): print (df_res[df_res['label']==i]['auc_2'].mean()) ``` # 5. Contexualize demographic and socioeconomic characteristics ``` # Load demographic and socioeconomic data ch = pd.read_csv('../../data/independent_variables.csv') ch.head(2) # Load grid cell id and census tract id information gr_ct = pd.read_csv('../../data/grid_cell_ct_joined.csv') gr_ct = gr_ct[['cell_id', 'GEOID_Data']] gr_ct.head(2) df_res = pd.merge(df_res, gr_ct, how='left', on='cell_id') re = pd.merge(df_res[['GEOID_Data', 'cell_id','label','auc', 'auc_2', 'depth', 'height']], ch, on='GEOID_Data', how='left') re.head(2) col_list = [ 'pct_pop_age_lt_18', 'pct_pop_age_18_30', 'pct_pop_age_gt_65', 'med_building_age', 'pct_hh_computer', 'pct_pop_disability', 'pct_high_edu', 'pct_low_edu', 'pct_lim_Eng', 'pct_lim_Eng_Spanish', 'pct_lim_Eng_Euro', 'pct_lim_Eng_Asian', 'pct_white', 'pct_black', 'pct_asian', 'pct_other', 'hhi_race', 'pct_hh_food_stamp', 'pct_foreign', 'gini_coeff', 'pct_no_health_insu', 'pct_hh_kids_6', 'pct_hh_kids_6_18', 'pct_hh_alone', 'avg_hh_size', 'pct_hispanic', 'pct_hu_owned', 'pct_hu_mobile', 'pct_hh_income_lt_25k', 'pct_hh_income_25_75k', 'pct_hh_income_gt_75k', 'med_income', 'pct_hh_no_internet', 'pct_hh_poverty', 'med_room', 'pct_pop_unemp', 'pct_hu_vacant', 'pct_hh_vehicle', 'pct_pop_veteran', 'pop_den', 'pct_open_space', 'pct_single_housing', 'pct_multi_housing', 'pct_commercial', 'pct_industrial', 'pct_lower', 'pct_flood', 'dist_shelter', 'prox_road'] for c in col_list: if c in ['med_building_age', 'pct_hh_computer', 'pct_high_edu', 'pct_low_edu', 'hhi_rage', 'gini_coeff', 'avg_hh_size', 'pct_hu_owned', 'pct_hh_income_lt_25k', 'pct_hh_income_25_75k', 'pct_hh_income_gt_75k', 'med_income', 'pct_hh_no_internet', 'med_room', 'pct_pop_unemp', 'pct_hh_vehicle', 'pop_den', 'pct_single_housing', 'pct_multi_housing', 'pct_commercial', 'dist_shelter', 'prox_road']: re[c] = re[c].replace(np.nan, re[c].mean()) else: re[c] = re[c].replace(np.nan, 0) re['cat_evacuation'][re['cat_evacuation']=='A'] = 1 re['cat_evacuation'][re['cat_evacuation']=='B'] = 1 re['cat_evacuation'][re['cat_evacuation']=='C'] = 1 re['cat_evacuation'][re['cat_evacuation']!=1] = 0 re['cat_evacuation'] = re['cat_evacuation'].astype(float) col_list = col_list + ['cat_evacuation'] ``` ### One-way ANOVA test ``` import scipy.stats as stats anova_value = [] anova_pvalue = [] for col in col_list: print (col) group = ['inc', 'dec2', 'stable', 'dec1'] inc = re[col][re['label']==0].values dec2 = re[col][re['label']==1].values stable = re[col][re['label']==2].values dec1 = re[col][re['label']==3].values print (stats.f_oneway(inc, dec2, stable, dec1)[0].round(4)) anova_value.append(stats.f_oneway(inc, dec2, stable, dec1)[0].round(4)) print (stats.f_oneway(inc, dec2, stable, dec1)[1].round(4)) anova_pvalue.append(stats.f_oneway(inc, dec2, stable, dec1)[1].round(4)) print ('_______________') ``` ### Tukey test ``` import statsmodels.stats.multicomp as multi # np.random.seed(0) # x = np.random.choice(['A','B','C'], 50) # y = np.random.rand(50) # mcDate = multi.MultiComparison(y,x) # Results = mcDate.tukeyhsd() # print (Results) re['label_'] = re['label'].astype(str) re['label_'] = 'Stable' re['label_'][re['label']==0] = 'Increasing' re['label_'][re['label']==1] = 'Decreasing2' re['label_'][re['label']==3] = 'Decreasing1' for col in col_list: print ("-------------",col,"----------------") mcDate = multi.MultiComparison(re[col],re['label_']) Results = mcDate.tukeyhsd() print (Results) ``` # 6. Create a summary table ``` mean_std_list_total = [] for col in col_list: #print col mean_std_list = [] for i in [0,1,2,3]: group = ['inc', 'dec2', 'stable', 'dec1'] mean = re[col][re['label']==i].mean() std = re[col][re['label']==i].std() mean_std = round(mean,4), round(std,4) mean_std_list.append(mean_std) mean_std_list_total.append(mean_std_list) df_summary = pd.DataFrame(mean_std_list_total) df_summary['feature'] = col_list df_summary.columns = ['inc', 'dec2', 'stable', 'dec1', 'feature'] df_summary = df_summary[['feature','stable', 'inc', 'dec1', 'dec2']] for c in ['stable', 'inc', 'dec1', 'dec2']: df_summary[c] = df_summary[c].astype(str).str.replace('(','') df_summary[c] = df_summary[c].astype(str).str.replace(', ',' (') # df_summary df_summary_anova = pd.DataFrame() df_summary_anova['feature'] = col_list df_summary_anova['anova'] = anova_value df_summary_anova['pvalue'] = anova_pvalue df_summary = pd.merge(df_summary, df_summary_anova, how='outer', on='feature') df_summary ``` # 7. Export a summary table ``` # df_summary.to_csv('../../outputs/cluster_groups_characteristics_summary_table.csv', index=False) ```
github_jupyter
## Evaluating Performance of a Regressor Regression Model produces numeric output. <i>How much is my home worth?</i> <i>How many passengers are going to travel by air this year?</i> To find out how good the model predictions are, we need to check predictions against previously unseen samples that were not used for training. Usually, 30% of the available samples are reserved for testing while remaining 70% of samples are used for training. By comparing predicted values against known results in test data, we can assess overall model performance<br> Common Techniques for evaluating performance:<br> <li>Visually observe using Plots</li> <li>Residual Histograms</li> <li>Evaluate with Metrics like Root Mean Square Error (RMSE)</li> <p>While Plots are good for humans to visually observe the results, we often need a single metric that can indicate quality of a model. This can be useful for programmatically identifying which model is performing better (for example: using automatic model tuning to select the best performing model)</p> Reference:<br>https://docs.aws.amazon.com/machine-learning/latest/dg/evaluating-model-accuracy.html<br> RMSE:<br>https://en.wikipedia.org/wiki/Root-mean-square_deviation<br> Mean Absolute Error:<br>https://en.wikipedia.org/wiki/Mean_absolute_error<br> ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error, mean_absolute_error ``` <h2>Air Passengers Data</h2> <h3>Columns</h3> <li>Passengers = Actual Number of passengers who traveled by air</li> <li>Model1_Prediction = Number of Passengers predicted by model 1</li> <li>Model2_Prediction = Number of Passengers predicted by model 2</li> <li>Model3_Prediction = Number of Passengers predicted by model 3</li> <li>Model4_Prediction = Number of Passengers predicted by model 4</li> <p>We are going to compare performance of these four models</p> World Bank Air Traffic Passengers Dataset:<br> https://data.worldbank.org/indicator/NY.GDP.MKTP.CD<br> https://data.worldbank.org/indicator/SP.POP.TOTL ``` models = ['Model 1','Model 2', 'Model 3', 'Model 4'] df_air = pd.read_csv('airpassengers_sample.csv',index_col=0) df_air ``` ## Plot Data Compare performance visually <br> ``` plt.figure(figsize=(10,10)) # Plot Actual versus predictions by each model # We are going to draw 4 plots in a 2 x 2 grid # 221 = 2 rows, 2 columns, 1st sub plot # 222 = 2 rows, 2 columns, 2nd sub plot # and so forth # Model 1 plt.subplot(221) plt.plot(df_air['Passengers'], label='Actual') plt.plot(df_air['Model1_Prediction'],label='Model 1') plt.xlabel('Year') plt.ylabel('Passengers (Millions)') plt.legend() # Model 2 plt.subplot(222) plt.plot(df_air['Passengers'], label='Actual') plt.plot(df_air['Model2_Prediction'],label='Model 2') plt.xlabel('Year') plt.ylabel('Passengers (Millions)') plt.legend() # Model 3 plt.subplot(223) plt.plot(df_air['Passengers'], label='Actual') plt.plot(df_air['Model3_Prediction'],label='Model 3') plt.xlabel('Year') plt.ylabel('Passengers (Millions)') plt.legend() # Model 4 plt.subplot(224) plt.plot(df_air['Passengers'], label='Actual') plt.plot(df_air['Model4_Prediction'],label='Model 4') plt.xlabel('Year') plt.ylabel('Passengers (Millions)') plt.legend() plt.show() # Same plot as above...more concise code plt.figure(figsize=(10,10)) for idx, model in enumerate(models): plt.subplot(2,2,idx+1) plt.plot(df_air['Passengers'], label='Actual') plt.plot(df_air[model.replace(' ','') + '_Prediction'], label=model) plt.xlabel('Year') plt.ylabel('Passengers (Millions)') plt.legend() ``` <p>From the plots, we can observe that Model 1 and Model 2 appears to be pretty close to actuals. Model 3 plot is not matching with actuals. Model 4 is predicting a constant value<p> ## Root Mean Square Error (RMSE) Compares Actual and Predicted values and arrives at a single metric.<br> Smaller RMSE value indicates better predictive quality. <br> Let's compute the RMSE metric for each of the models ``` # RMSE for model in models: print (model) mse = mean_squared_error(df_air['Passengers'], df_air[model.replace(' ','') + '_Prediction']) print(" Mean Squared Error: {0:.2f}".format(mse)) print(" Root Mean Square Error: {0:.2f}".format(mse**.5)) ``` We can confirm using RMSE that Model 2 produces best outcome ## Residual Histograms "A residual for an observation in the evaluation data is the difference between the true target and the predicted target. The histogram of the residuals on the evaluation data when distributed in a bell shape and centered at zero indicates that the model makes mistakes in a random manner and does not systematically over or under predict any particular range of target values"<br> Reference: https://docs.aws.amazon.com/machine-learning/latest/dg/regression-model-insights.html ``` plt.figure(figsize=(10,10)) for idx, model in enumerate(models): plt.subplot(2,2,idx+1) residual = df_air['Passengers'] - df_air[model.replace(' ','') + '_Prediction'] plt.hist(residual, label=model) plt.plot([0,0],[0,3]) plt.xlabel('Actual - Predicted') plt.ylabel('Count') plt.grid() plt.legend() # Let's print actual counts # How many under predictions and over predictions # Actual - Predicted # Positive Value indicates Actual is more than predicted (under estimation) # Negative Value indicates Actual is less than predicted (over estimation) # Since our test dataset has only 10 samples, it hard to find patterns. # But, even here, Model 3 appears to be different from other models # as it over predicting for larger number of samples for model in models: print (model) residual = df_air['Passengers'] - df_air[model.replace(' ','') + '_Prediction'] # Count number of values greater than zero and less than zero value_counts = (residual > 0).value_counts(sort=False) print(' Under Estimation: ', value_counts[True]) # difference is greater than 0 print(' Over Estimation: ', value_counts[False]) # difference is less than 0 ``` <h2>Summary</h2> In this example, Model 2 has the best performance followed by Model 1. We can confirm this by visual observation using plots and by comparing RMSE metrics
github_jupyter
# Introduction to Tensorflow and Sonnet By the end of this colab you will have trained a neural net to approximate the NXOR function based on some data. In the process you will have learnt about * some useful tensorflow tensor operations * building a model with *Tensorflow* and *Sonnet* * visualizing the model you built * getting the data into your model * backpropagation as implemented by tensorflow * debugging tensorflow models * how to actually train the network. Recall: you can use the outline on the right hand side to navigate the colab easier. ``` #@title Fetching (DM) sonnet from pip. Run this cell. !pip install dm-sonnet #@title Imports. Run this cell. from __future__ import absolute_import from __future__ import division from __future__ import print_function import random import seaborn as sns import numpy as np import tensorflow as tf import sonnet as snt from matplotlib import pyplot as plt from google.colab import files from scipy.stats import multivariate_normal from IPython.display import clear_output, Image, display, HTML sns.set_style('ticks') #@title Utility functions. Run this cell. def get_data(num_examples): inputs = 2*np.random.random((num_examples, 2)) - 1 labels = np.prod(inputs, axis=1) labels[labels <= 0] = -1 labels[labels > 0] = 1 return inputs, labels def plot_nxor_data(inputs, labels, title): MARKER_COLORS = np.array([ [1.0, 0.0, 0.0], # red for -1 [0.0, 1.0, 0.0], # green for +1 ]) class_idx = (labels + 1 / 2.0).astype(np.int) plt.figure() plt.title(title) plt.scatter( x=inputs[:, 0], y=inputs[:, 1], c=MARKER_COLORS[class_idx], alpha=0.9) plt.legend() plt.show() def strip_consts(graph_def, max_const_size=32): """Strip large constant values from graph_def.""" strip_def = tf.GraphDef() for n0 in graph_def.node: n = strip_def.node.add() n.MergeFrom(n0) if n.op == 'Const': tensor = n.attr['value'].tensor size = len(tensor.tensor_content) if size > max_const_size: tensor.tensor_content = "<stripped %d bytes>"%size return strip_def def show_graph(graph_def=None, max_const_size=32): """Visualize TensorFlow graph. Default to the default graph.""" if graph_def is None: graph_def = tf.get_default_graph() if hasattr(graph_def, 'as_graph_def'): graph_def = graph_def.as_graph_def() strip_def = strip_consts(graph_def, max_const_size=max_const_size) code = """ <script src="//cdnjs.cloudflare.com/ajax/libs/polymer/0.3.3/platform.js"></script> <script> function load() {{ document.getElementById("{id}").pbtxt = {data}; }} </script> <link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()> <div style="height:600px"> <tf-graph-basic id="{id}"></tf-graph-basic> </div> """.format(data=repr(str(strip_def)), id='graph'+str(np.random.rand())) iframe = """ <iframe seamless style="width:1200px;height:620px;border:0" srcdoc="{}"></iframe> """.format(code.replace('"', '&quot;')) display(HTML(iframe)) ``` ## The TensorFlow Paradigm This section is not necessarily a fully complete introduction to tensorflow. If you are not familiar with tensorflow or don't feel comfortable with some of the content consider using a third party tutorial or the tensorflow documentation. Instead this colab focuses on exploring the ideas underlying tensorflow and working with it, highlighting important concepts along the way. **There are two distinct phases when it comes to working with tensorflow:** 1. Constructing the computation graph, our model, 2. Running data through this graph. We soon see what this means. *Note:* that with TensorFlow *Eager mode* this is not the case anymore: there the two phases happen hand in hand. Here we work with *Graph mode*, however. ### Building and displaying graphs Let's build a simple computation graph: ``` a = tf.constant([5, 3, 1]) b = tf.constant([-1, 2, 5]) c = a + b c ``` Notice that `c` has no value associated. It is actually a (reference to a) node in the computation graph we just defined: tensorflow knows that to find the value of `c`, it needs to query the values of the nodes `a` and `b` and add them. **In tensorflow all computation is implemented as operations on tensors (or variables, etc), and this computation forms a graph.** * We add tensors and operations to a graph with our Python code and libraries. * The tensorflow API [docs](https://www.tensorflow.org/api_docs/python/) list all available operations. * In practice many -- if not most -- `numpy` operations have a tensorflow counterpart, though often not by that same name. We can visualize the graph we have built so far. `show_graph()` is a utility function we defined above<sup>1</sup>; it shows the tensorboard graph representation of the graph you pass to it, right here in colab. <small>1: The graph visualization code is from the [Jakub Arnold Blog](https://blog.jakuba.net/2017/05/30/tensorflow-visualization.html#Using-a-cloud-hosted-TensorBoard-instance-to-do-the-rendering).</small> ``` show_graph(tf.get_default_graph()) ``` Note that in tensorflow you can have many graphs at the same time. By default, unless otherwise specified, we are building the so called "default graph" that we accessed with `tf.get_default_graph()`. ### Resetting the default graph Recall that colab cells run in arbitrary order, maintaining python state between them. Therefore, if you run a cell that adds some tensors or operations to the graph, you will add more and more copies of them to the graph. This is probably not what you want. **Try running the cell where we defined node `c` a few more times, then visualizing the graph.** You will see multiply copies the same nodes. To solve this issue, tensorflow has `tf.reset_default_graph()`, which clears everything from the default graph. ``` tf.reset_default_graph() a = tf.constant(5, name='a') b = tf.constant(-1, name='b') c = tf.add(a, b, name='c') show_graph(tf.get_default_graph()) ``` Whenever in doubt about your current graph, you can just reset it and rebuild it. By the way, notice that in the previous code cell we labelled nodes in the graph using the `name` argument. This can often help us interpret the graph. ### Running the graph Recall that `c` had no associated value -- we were merely informed that it is a tensor, it's shape, etc. **Tensors only have values when 'run' in a session**. ``` tf.reset_default_graph() a = tf.constant([5, 2], name='a') b = tf.constant([-1, 0], name='b') c = tf.add(a, b, name='c') with tf.Session() as session: print(session.run(c)) ``` What really happens is that when you pass a graph node (operation, tensor, etc) to `session.run()`, tensorflow figures out what is the minimal subset of the graph to run in order to satisfy your request, and runs only that. It's difficult to appreciate this in the context of the simple graphs we had so far, but we will see a good example shortly. You can run any node from your graph, or a combination of them. ``` with tf.Session() as session: print('a:', session.run(a)) # As noted above, in this case addition # (required to find the value of c) is not even # executed. print('[b, c]:', session.run([b, c])) print(session.run({'a': a, 'c': c})) ``` The data flows through the graph just once, but tensorflow runs all requested operations and tensors (along with their dependencies), returning their calculated values. We can easily illustrate how this work with tensors that get a new random value each time you run them. **Try predicting the pattern before inspecting the printed results!** ``` tf.reset_default_graph() r = tf.random_normal(shape=(3,), mean=0.0, stddev=1.0) x1 = r + 1 # Shifted +1 x2 = r - 1 # Shifted -1 with tf.Session() as session: print('x1, x2 run separately:', session.run(x1), session.run(x2)) print('x1, x2 run together :', session.run([x1, x2])) ``` Notice that * when x1 and x2 were run together, the difference between correpsonding entries is always 2, * while this is not the case when they were run separately. This is because when run together, `r` is sampled once, and both `x1` and `x2` use this same value. We now highlight what this means for neural network training implemented in tensorflow. ### A neural network example of tensorflow's computational model All computation required to train the network will be implemented as a tensorflow computation graph. In particular you will have tensor and operations like * `train`: take a training step on some data, * `loss`: calculate the loss on some data, * `outputs`: give you predictions on some data, * and so on. Given the computation model of tensorflow: * You will be able to `run(loss)` to calculate the loss, and **without triggering the training step computation**. * On the other hand, running `train` will calculate the `loss` since this is what it needs to optimize. If you `run([loss, train])`, tensorflow will take a training step and report the loss, **both based on the same data**. As a final note, the fact that only the **minimal required subset of nodes are run** is going to be crucial when using BatchNorm: the ops that update the statistics kept in BatchNorm are not dependencies of any other ops, therefore will not get run automatically. You will experiment with this in the ConvNets and Vision Lab. ### Running a graph with state and inputs Our examples so far have been silly in the sense that they were straightforward computation on constants, not warranting a computation graph. We now showcase a situation where the value of a tensor is not defined until it is run; this is because the value is dependent on data fed to the graph at running time. ``` tf.reset_default_graph() a = tf.placeholder(dtype=tf.int32, shape=(), name='input') b = tf.constant(-1, name='b') c = tf.add(a, b, name='c') with tf.Session() as session: print(session.run(c, feed_dict={a: 3})) print(session.run(c, feed_dict={a: 10})) ``` We used a `tf.placeholder`. These are tensors that have no value or computation associated to them by default, instead they simply take data so this data can be computed on by the rest of the graph. Note that, at the same time, **any tensor may be fed with some data**. Another strength of the computation graph approach is that some nodes may be stateful. The most common stateful node is a *variable*. **A variable is a tensor that remembers its value between run calls**. This also means **it must be initialized**. In the following example `a` will be a variable. We also define an `inc` operation that increments the value of `a` by 1 each time this operation is run. ``` tf.reset_default_graph() a = tf.get_variable('counter', shape=(), dtype=tf.int64) inc = tf.assign(a, a+1) init_op = tf.global_variables_initializer() with tf.Session() as session: session.run(init_op) # Sets an initial value for a. print(session.run(a)) # By default, this is 0. print(session.run(a)) session.run(inc) session.run(inc) print(session.run(a)) # We see the variable was incremented (twice). # If you were to print the output of inc, you see that it actually # returns the value of a post-increment. This is a convenience feature # of tf.assign(). ``` Statefulness is highly relevant to us since the weights of our machine learning models are stored as variables and are updated by some operations in `session.run` calls during training. ### Quick Tour of Control dependencies and Race conditions *These topics do not often come up when training simple neural networks, but they are core concepts of tensorflow and you should be familiar with them.* With the introduction of stateful graph components we need to revisit the rule that tensorflow only executes the minimal set of operations required by a `run()` call. **Try predicting the output of the following cell.** ``` tf.reset_default_graph() x = tf.get_variable("x", shape=(), initializer=tf.zeros_initializer()) assign_x = tf.assign(x, 10.0) z = x + 1.0 init = tf.global_variables_initializer() with tf.Session() as session: session.run(init) print(session.run(z)) ``` From tensorflow's perspective, * There is a variable `x`, which starts with value 0, * `z` is always `x+1`, * with `assign_x` you can set the value of `x` to 10. So if you simply ask for the value of `z`, tensorflow evaluates the minimal subset of the graph it needs and reports that `z = 0 + 1`. This is reflected in the graph as well. ``` show_graph() ``` If you want `x` incremented by 10 before using it to calculate `z`, you need to tell tensorflow. You can do so by specifying `assign_x` as a (control_)dependency of z. ``` tf.reset_default_graph() x = tf.get_variable("x", shape=(), initializer=tf.zeros_initializer()) assign_x = tf.assign(x, 10.0) with tf.control_dependencies([assign_x]): z = x + 1.0 init = tf.global_variables_initializer() with tf.Session() as session: session.run(init) print(session.run(z)) ``` To be precise, `tf.control_dependencies` ensures all operations / tensors passed to it are run before running the the operations defined inside its body. The other rule to keep in mind is that **Tensorflow is inherently parallel.** If there are computation subgraphs that do not depend on each other, they can -- and likely will be -- evaluated in parallel. We use the same generic example to illustrate this. ``` tf.reset_default_graph() x = tf.get_variable("x", shape=(), initializer=tf.zeros_initializer()) z = x + 1.0 assign_x10 = tf.assign(x, 10.0) assign_x5 = tf.assign(x, 5.0) init = tf.global_variables_initializer() with tf.Session() as session: session.run(init) for _ in range(10): _, _, z_val = session.run([assign_x10, assign_x5, z]) print(z_val) ``` We can see that `z` can take various values: its value will depend on what order the different operations get run -- which we don't control. (If you do not see different values, re-run the cell until you do.) The lesson is that **if you care about the order of otherwise independent operations, you must be explicit about this**. ### Exercise: "Interactive Tensorflow Summing Machine" Write a tensorflow graph which keeps a running sum of the integers passed to it through a `feed_dict`. To make sure it works feed the machine a few numbers, printing the cumulative sum after each step. ``` #@title Your Code #@title Solution tf.reset_default_graph() cumulative_sum = tf.get_variable('sum', shape=(), dtype=tf.int64) to_add = tf.placeholder(dtype=tf.int64, shape=(), name='input') add = tf.assign(cumulative_sum, cumulative_sum + to_add) init_op = tf.global_variables_initializer() with tf.Session() as session: session.run(init_op) # Sets an initial value for a. for i in range(1, 6): print('cumulative sum={}; adding {}.'.format(session.run(cumulative_sum), i)) session.run(add, feed_dict={to_add: i}) ``` ## A word (section) on tensorflow tensor shapes Tensors in Tensorflow have **static and dynamic shape**. * Static shape information is known or can be deduced at graph construction time, * Dynamic shape information is only available when data is available. **Static shape may be and is often only partially defined**. For example, we may know that our model expect a batch of examples, each of shape `2 x 2`, but not how large these batches are. This will allow us to feed the computation graph with batches of any size. Once data is fed the tensors will have a known **dynamic shape**. ``` tf.reset_default_graph() inputs = tf.placeholder(dtype=tf.int32, shape=(None, 2, 2), name='input') print('static shape:', inputs.shape) ``` We pass `None` for axes that we do not know the static length of when specifying a shape. When a tensor or its shape is printed, this is denoted by a question mark, `?`, as seen above. **Bug-alert:** Be careful not to confuse passing `(None)` vs `(None,)` as a desired shape. The next cell illustrates the consequences: ``` inputs_1 = tf.placeholder(dtype=tf.int32, shape=(None), name='input') inputs_2 = tf.placeholder(dtype=tf.int32, shape=(None,), name='input') print(inputs_1.shape) # Shape that we know nothing about, not even rank. print(inputs_2.shape) # Tensorflow will assert that the tensor is of rank 1, # albeit with unknwon length. ``` The static shape information is used to * verify operations make sense (think matrix multiplication), * infer the static shape of tensors defined through operations (so they can also be checked) . **Example** 1. We take `batch (?) x 2 x 2`-shaped tensors, flatten each example in the batch to be a vector of length `4`. Tensorflow will infer the shape of the flattened tensor automatically. 2. Then we multiply the now `? x 4`-shaped tensor with a vector. Tensorflow will only allow this to happen if the vector is of length 4, as otherwise the operation makes no sense. (In practice the `tf.matmul` operation we use does not accept vectors, so we will use a `4 x 1` matrix instead.) ``` tf.reset_default_graph() inputs = tf.placeholder(dtype=tf.int32, shape=(None, 2, 2), name='input') flat_inputs = tf.contrib.layers.flatten(inputs) print('flat_inputs static shape', flat_inputs.shape) result = tf.matmul(flat_inputs, tf.constant([[0], [1], [2], [3]], name='ok')) print('result static shape', result.shape) # Uncomment and run to see # # ValueError: Dimensions must be equal, but are 4 and 3 for 'MatMul_4' # (op: 'MatMul') with input shapes: [?,4], [3,1]. # #tf.matmul(flat_inputs, tf.constant([[0], [1], [2]], name='shape_mismatch')) ``` It happens sometimes (e.g. for custom operations) that tensorflow is not be able to infer the static shape of the resulting tensor. f you know the expected shape, you can explicitly set it using `tensor.set_shape()`. This will allow tensorflow to infer and check later shapes. Finally, let us try working with the dynamic shape of a tensor. ``` print('dynamic shape:', tf.shape(inputs)) ``` The **dynamic shape itself is a tensor** and may (only) be evaluated or computed with once the graph is run in a session. ``` shape = tf.shape(inputs) num_total_elements = tf.reduce_prod(shape) with tf.Session() as session: print(session.run([shape, num_total_elements], feed_dict={ inputs: np.array(np.random.random((3, 2, 2))) })) ``` ### Broadcasting Tensorflow automatically broadcasts operations, similarly to `numpy`. We covered broadcasting in detail in the `numpy` colab. Here we include three common examples. ``` tf.reset_default_graph() a = tf.constant([[1, 2, 3], [4, 5, 6]], name='a') c = a - 1 # `1` is first turned into a constant, # then broadcast across the full tensor with tf.Session() as session: print(session.run(c)) tf.reset_default_graph() a = tf.constant([[1, 2, 3], [4, 5, 6]], name='a') b = tf.constant([1000, 100, 10], name='b') c = a + b # a: 2 x 3 # b: 3 # --> b is copied over across the first axis to calculate c. with tf.Session() as session: print(session.run(c)) tf.reset_default_graph() a = tf.constant([[1, 2, 3], [4, 5, 6]], name='a') b = tf.constant([100, 10], name='b') # a: 2 x 3 # b: 2 # --> a and b are not compatible; #a + b # Raises an error. # Instead, b can be defined as [[100], [10]] so that # a: 2 x 3 # b: 2 x 1 # --> b is copied across the last axis. ``` **As a general rule of thumb** * use broadcasting in the simple cases * prefer explicit broadcasting in complex situations. This will result in code that is **easier to read** and has **fewer bugs**. ## Building a simple network with Sonnet Instead of building our neural networks in plain Tensorflow, we use the [sonnet](https://github.com/deepmind/sonnet) library. **Sonnet uses an object-oriented approach, similar to Torch/NN.** * This allows modules to be created, which define the forward pass of some computation. * Modules are ‘called’ with some input Tensors, which adds ops to the Graph and returns output Tensors. We call this a **configure-then-connect principle**, which allows for easy reuse of complex modules. ``` tf.reset_default_graph() # You can always clear the current graph and # add exactly what you need to it. ``` Start by creating a Linear module (dense layer). ``` linear = snt.Linear(output_size=5) linear ``` Our input will be batches of 2-long vectors, and we will feed that data to the graph using `feed_dict`s. ``` inputs_placeholder = tf.placeholder(tf.float32, shape=(None, 2), name='inputs') ``` As in tensorflow, we "call" the module on the tensor that we want it to compute on. This yields a tensor, the output of the calculation. ``` pre_activations = linear(inputs_placeholder) ``` To complete our model, we apply a ReLU non-linearity and add a final linear layer with just 1 output. ``` activations = tf.nn.relu(pre_activations) outputs = snt.Linear(output_size=1)(activations) outputs ``` We drop the final singleton axis so that `outputs` becomes a vector. ``` outputs = tf.squeeze(outputs, axis=-1) outputs ``` Let's see the graph we built. ``` show_graph() # With no arguments show_graph() shows the default graph. ``` You can explore the exact set of tensorflow operations that were created the sonnet code by expanding colored boxes. **We can verify that each linear layer implements $WX+b$ for $X$ inputs and $W$ weights and $b$ bias with basic tensorflow operations**. Let's pass some data through our model. We will use the data generating function we wrote in the numpy colab. (It is redefined at the top of this colab). ``` init_op = tf.global_variables_initializer() inputs_np, unused_labels_np = get_data(num_examples=8) with tf.Session() as session: session.run(init_op) # Initializes the weights in the network. outputs_np = session.run(outputs, feed_dict={ inputs_placeholder: inputs_np, }) outputs_np ``` You can rerun the above cell to see the output on new and new batches. The one thing that now remains is... ## Training a tensorflow model This is the same with or without sonnet. We will start by 1. Making the correct labels available to the graph, 2. Using these to define and calculate the loss on the output of the network. ``` labels_placeholder = tf.placeholder(tf.float32, shape=(None,), name='labels') ``` Here we will simply regress onto the labels with the squared loss. (It would be better to calculate a cross entropy.) ``` with tf.name_scope('element_wise_loss'): loss = tf.square(labels_placeholder - outputs) loss ``` The loss tensor now calculates the loss per example. We want one scalar to optimize: ``` loss = tf.reduce_mean(loss, name='batch_mean_loss') loss ``` We can verify on the graph that everything is as expected. The `name_scope` and `name` instructions make the graph easier to interpret. ``` show_graph() ``` We need to tell the computation graph that we want to minimize this loss. ``` optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) train_op = optimizer.minimize(loss) ``` **It is worth noting here the effect of this call on the graph.** ``` show_graph() ``` The minimization call added * gradient calculation operations * operations that update the weights based on these gradients. In fact, **we could have built the graph corresponding to `minimize()` manually** by * calculating the gradients of the loss with respect to the weights with the `tf.gradients(loss, [list of weights])` operation, * potentially scaling these gradients and adding them to the existing weights. By running the returned `train_op`, we take one gradient step, fitting the data just a bit better. Let's do this! But first some setup. ``` # Get some training data, and plot it. This is based on earlier exercises. inputs_np, true_labels_np = get_data(num_examples=128) plot_nxor_data(inputs_np, true_labels_np, title='Train data') # Show some statistics that can help with debugging print('Mean label on train data:', np.mean(true_labels_np)) init_op = tf.global_variables_initializer() ``` **The final training script.** This cell contains all training and some reporting code. For now you can just run it, but for the next exercise you will have to understand it. *Note that sometimes we can get a bad weight initialization, but in a few runs you can easily get below 5% error.* ``` RECORD_PERIOD = int(1e3) training_steps = 10000 #@param {'type': 'integer'} print('Losses:') with tf.Session() as session: session.run(init_op) # Initializes the weights in the network. for i in range(training_steps): _, loss_np = session.run( [train_op, loss], feed_dict={ inputs_placeholder: inputs_np, labels_placeholder: true_labels_np, }) if (i % RECORD_PERIOD) == 0: print(' ', loss_np) if loss_np < 0.01: print() print('Loss hit threshold after {} steps, stopping.'.format(i)) break print() # The model is ready to be evaluated. Fetch the predicted outputs. predictions_np = session.run(outputs, feed_dict={ inputs_placeholder: inputs_np, }) # Actual label predictions given as {-1, +1}. predictions_np[predictions_np <= 0] = -1 predictions_np[predictions_np > 0] = 1 # Prediction errors and plotting. num_correct = np.count_nonzero(np.isclose(predictions_np, true_labels_np)) num_examples = true_labels_np.shape[0] print('Prediction error:', (num_examples-num_correct)/num_examples) plot_nxor_data(inputs_np, predictions_np, title='Predictions') ``` Notice that the prediction error calculation was inside the `with tf.Session()` context manager. This because **the graph state (including weights) is only maintained on a per session basis**. It is possible to save (and load) graphs, including their weights, with a [`tf.train.Saver`](https://www.tensorflow.org/api_docs/python/tf/train/Saver). ## Exercise: Evaluate the trained model We have seen how to train the model -- that is -- we saw that the model can fit the training set well. But we are actually interested in generalizing to new examples from the same data distribution. 1. Define a training and a test dataset using our data generation function. 2. Fit the training data using the model we defined above. 3. Instead of reporting the prediction error only on the training set, also report it on the test set. 4. Plot the predictions on the test set using the pre-defined plotting function. For simplicity, the full model building code is included in the cell below: ``` tf.reset_default_graph() # Inputs. inputs_placeholder = tf.placeholder(tf.float32, shape=(None, 2), name='inputs') labels_placeholder = tf.placeholder(tf.float32, shape=(None,), name='labels') # All network and loss definition. activations = tf.nn.relu( snt.Linear(output_size=5)(inputs_placeholder)) outputs = tf.squeeze( snt.Linear(output_size=1)(activations), axis=-1) loss = tf.reduce_mean( tf.squared_difference(labels_placeholder, outputs)) # Optimizer and initializer. optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.03) train_op = optimizer.minimize(loss) init_op = tf.global_variables_initializer() #@title Your Code #@title Solution # The solution is very similar to the previous training script, except care # needs to be taken to have a separate train and test set. train_inputs_np, train_labels_np = get_data(num_examples=256) test_inputs_np, test_labels_np = get_data(num_examples=128) TRAINING_STEPS = int(2e4) RECORD_PERIOD = int(1e3) def _get_predictions(inputs): predictions_np = session.run(outputs, feed_dict={ inputs_placeholder: inputs, }) # Actual label predictions given as {-1, +1}. predictions_np[predictions_np <= 0] = -1 predictions_np[predictions_np > 0] = 1 return predictions_np def _get_error(predictions, true_labels): num_correct = np.count_nonzero(np.isclose(predictions, true_labels)) num_examples = true_labels.shape[0] return (num_examples-num_correct) / num_examples print('Losses:') with tf.Session() as session: session.run(init_op) # Initializes the weights in the network. for i in range(TRAINING_STEPS): _, loss_np = session.run( [train_op, loss], feed_dict={ inputs_placeholder: train_inputs_np, labels_placeholder: train_labels_np, }) if (i % RECORD_PERIOD) == 0: print(' ', loss_np) if loss_np < 0.01: print() print('Loss hit threshold after {} steps, stopping.'.format(i)) break print() # The model is ready to be evaluated. train_predictions = _get_predictions(train_inputs_np) train_error = _get_error(train_predictions, train_labels_np) test_predictions = _get_predictions(test_inputs_np) test_error = _get_error(test_predictions, test_labels_np) print('Train error:', train_error) print('Test error:', test_error) plot_nxor_data(test_inputs_np, test_predictions, title='Predictions') ``` ## Datasets So far we used a `feed_dict`s to pass data to the computation graph. Another, often more efficient solution is to have nodes in the graph read, maninpulate, and make data available. Tensorflow has a dedicated `tf.data` module. Tensorflow's [Importing Data Guide](https://www.tensorflow.org/guide/datasets) guide is a great resource for learning about it. **Read this guide up to and including the "Reading input data > Consuming NumPy arrays"** section. ### Exercise: define a tensorflow dataset 1. Use the `get_data` function from before to generate a training dataset of 1000 examples and a test dataset of 500 examples. 2. Using `from_tensor_slices()`, define a training and a test `tf.data.Dataset`. 3. Ensure that the train data is (a) fully shuffled (b) can be iterated infinitely (c) is batched with a batch size of 64. 4. We do not shuffle the test data and we only want to iterate it once. We still batch it up so that the amount of data we compute on is limited. **Write a function called `get_tf_dataset()` that returns a (`train_dataset, test_dataset`)-tuple according to these instructions.** Print the returned datasets in order to verify they are correctly defined. ``` tf.reset_default_graph() #@title Your Code #@title Solution BATCH_SIZE = 64 train_data_np = get_data(1000) test_data_np = get_data(500) def get_tf_dataset(): train_dataset = tf.data.Dataset.from_tensor_slices(train_data_np) train_dataset = train_dataset.shuffle(1000).repeat().batch(BATCH_SIZE) test_dataset = tf.data.Dataset.from_tensor_slices(test_data_np) test_dataset = test_dataset.batch(BATCH_SIZE) return train_dataset, test_dataset print(get_tf_dataset()) ``` We need to access the data as tensors. We can do so by asking for an iterator over the dataset. We use the simplest iterator, which simply iterates over the dataset: ``` train_dataset, test_dataset = get_tf_dataset() train_data_iter = train_dataset.make_one_shot_iterator() (train_inputs, train_labels) = train_data_iter.get_next() train_inputs, train_labels test_data = test_dataset.make_one_shot_iterator().get_next() ``` Now we can use `train_inputs` and `train_labels` like any other tensor. Each time we use them in a `session.run()` the tensor will hold a new batch. ``` def _print_some(np_array, descr): print(descr + ':') print(' shape: {}'.format(np_array.shape)) print(' first examples in batch: {}'.format(np_array[:4])) with tf.Session() as session: # Train data. for _ in range(2): train_inputs_np, train_labels_np = session.run([train_inputs, train_labels]) _print_some(train_inputs_np, 'train_inputs') _print_some(train_labels_np, 'train_labels') print() # Test data. test_inputs_np, test_labels_np = session.run(test_data) _print_some(test_inputs_np, 'test_inputs') _print_some(test_labels_np, 'test_labels') ``` We defined the test dataset to supply data for exacly one full iteration of the test dataset. We can fetch data until tensorflow lets us know there is no more data. ``` with tf.Session() as session: counter = 0 while True: try: test_inputs_np, test_labels_np = session.run(test_data) counter += 1 except tf.errors.OutOfRangeError: break print('Counted {} batches of test examples.'.format(counter)) ``` The `make_one_shot_iterator()` function returns an iterator that, when exhausted, cannot be restarted. There are many utility functions in the `tf.data` both for reading in and manipulating data; chances are, whatever you would like to do it is already available there. ### Queues In earlier versions of tensorflow datasets had to be manipulated with so called [Queues](https://www.tensorflow.org/api_guides/python/threading_and_queues). They allowed data loading and preprocessing to be asynchronous, making the input pipeline faster. Their use for input pipelines is now deprecated, if you are interested in increasing the performance of your input pipeline read the [official guide on this topic](https://www.tensorflow.org/performance/datasets_performance). Queues are still used for pushing data between different threads, potentially on different machines, but we will not cover them in this lab. ## The Power of Sonnet The Sonnet library has two key selling points: * Complex networks are easily reused. * Variable sharing is handled transparently by automatically reusing variables on subsequent calls to the same module. We will now see these features in action. We start by defining a sonnet module corresponding to the classifier we have been working with. The section on [defining your own submodules](https://deepmind.github.io/sonnet/#defining-your-own-modules) in the sonnet documentation is both helpful and precise. The key points are: * Inherit from snt.AbstractModule * Call superclass constructor * Implement the `_build()` method The `_build()` method is meant to construct all computation graph corresponding to this module. It takes as argument the inputs to the module, and returns the outputs. ``` class MySimpleModule(snt.AbstractModule): def __init__(self, num_hidden, nonlinearity=tf.nn.relu, name="my_simple_module"): super(MySimpleModule, self).__init__(name=name) self._num_hidden = num_hidden self._nonlinearity = nonlinearity def _build(self, inputs): # Inputs has shape batch_size x ?. pre_activations = snt.Linear(output_size=self._num_hidden)(inputs) activations = self._nonlinearity(pre_activations) outputs = snt.Linear(output_size=1)(activations) return tf.squeeze(outputs, axis=-1) # Shape: [batch_size]. ``` Aside: since this module is simply a sequence of other modules and tensorflow ops (e.g. the non-linearity), the module could have been made using the `snt.Sequential()` wrapper. We can make a particular instance of the module we defined like so: ``` tf.reset_default_graph() model = MySimpleModule(num_hidden=5) ``` No graph has actually been created so far, since only the constructor of the class ran. Let's connect this module to the training data. *Note that while it is encouraged to only create graph in the `_build()` method, some sonnet modules may already do so in their constructor.* ``` train_dataset, test_dataset = get_tf_dataset() train_inputs, train_labels = train_dataset.make_one_shot_iterator().get_next() train_ouputs = model(train_inputs) ``` The connection triggered the `_build()` function and we can see the graph corresponding to the model is built. ``` show_graph() ``` The beauty of sonnet is that we can **connect the same `model` instance to the test data tensor and it will automatically share variables**. ``` test_inputs, test_labels = test_dataset.make_one_shot_iterator().get_next() test_outputs = model(test_inputs) ``` Of course creating another instance will not share variables. Can you tell, based on the graph (not considering the names) which modules share weights? ``` unshared_test_outputs = MySimpleModule(num_hidden=5, name='unshared_simple_module')(test_inputs) show_graph() ``` The fact that `train_outputs` and `test_outputs` use shared variables means that training based on `train_outputs` will improve the quality of `test_ouputs` as well. We show this next. We base the training script here based on our previous one. Some modifications are required: * The references to the dataset must be updated. We do not use `feed_dicts`, but we must take care to run `test_outputs` or `train_outputs`. * In order to get the true (test) labels, we need to run the `test_labels` tensor. * We need to iterate over the full test dataset. Another change is that now each training step uses a different batch of data, while our earlier version used the full (smaller) dataset. ``` # CHANGED HERE: loss = tf.reduce_mean(tf.squared_difference(train_labels, train_ouputs)) # Optimizer and initializer. optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.03) train_op = optimizer.minimize(loss) init_op = tf.global_variables_initializer() TRAINING_STEPS = int(2e4) RECORD_PERIOD = int(1e3) def _num_correct(predictions_np, true_labels_np): # Actual label predictions given as {-1, +1}. predictions_np[predictions_np <= 0] = -1 predictions_np[predictions_np > 0] = 1 # Count correct predictions. return np.count_nonzero(np.isclose(predictions_np, true_labels_np)) print('Losses:') with tf.Session() as session: session.run(init_op) # Initializes the weights in the network. for i in range(TRAINING_STEPS): _, loss_np = session.run([train_op, loss]) # CHANGED HERE. if (i % RECORD_PERIOD) == 0: print(' ', loss_np) print() # The model is ready to be evaluated. Fetch the predicted outputs. num_correct = 0 num_elements = 0 while True: try: # CHANGES HERE. predictions_np, true_labels_np = session.run([test_outputs, test_labels]) num_elements += predictions_np.shape[0] num_correct += _num_correct(predictions_np, true_labels_np) except tf.errors.OutOfRangeError: break print('The prediction error on the test set:', (num_elements - num_correct) / num_elements) ``` We will see another convenient feature of Sonnet when working with generative models in the VAE and GAN lab. ## Debugging Tensorflow Debugging tensorflow code and models can be challenging when compared to debugging 1) simple python code or even 2) other machine learning code. This is due to the separate building and running phases* of tensorflow: * You cannot simply just stop the computation midway in a `run()` call and inspect what is going on. ** * If an error is only revealed in a `session.run()` call, Tensorflow may often be unable to point you to the python code that generated the offending operation. * Race conditions may occur. These can be hard to detect because the race condition may only occur very very infrequently. In this section we list some practical advice to debugging tensorflow. <small>*&ast;Tensorflow's Eager mode removes this separation, making debugging simpler.</small><br /> <small>*&ast;*&ast;There is a [tensorflow debugger](https://www.tensorflow.org/programmers_guide/debugger) that tries to address this problem.*</small> * **Check your shapes**. It is possible that something is not of the shape you expect, but due to broadcasting the graph still computes something -- but not what you want. * **Check the graph with tensorboard**. Does it do what you wanted it to? * **Print and/or assert values of tensors**. While you cannot stop your graph mid-computation, you can print the values going through them. Unfortunately this [does not currently work](https://www.tensorflow.org/api_docs/python/tf/Print) in notebooks. ## Not covered: Control Flow In tensorflow you can define logical operations such as conditionals, loops, etc. In fact, Tensorflow is Turing-complete. We do not cover them as these operations are not usually required for training neural nets, and it is better to avoid them unless really needed due their added compexity. ``` ```
github_jupyter
``` import h3 import pandas as pd import keplergl as KeplerGl import requests import json url_1 = 'https://nominatim.openstreetmap.org/search.php' params_2 = { 'q':f'amsterdam', 'polygon_geojson':1, 'format':'geojson' } geo_object = requests.get(url_1, params_2) geo_json_object = json.loads(geo_object.content) geo_feature = geo_json_object.get('features')[0].get('geometry') #print(geo_feature) # geo_feature gives back a multiPolygon # We can't work with it so we manipulate the dict manipulated_geo_feature = { 'type': 'Polygon', 'coordinates': geo_feature["coordinates"][0] } ROME_h3_from_polyfill = h3.polyfill_geojson(manipulated_geo_feature, res=9) ROME_h3_list =[] for i in ROME_h3_from_polyfill: ROME_h3_list.append(i) # Make DF for Munich for POI ROME_dict = { 'city': 'ROME', 'h3_code': ROME_h3_list } ROME_poi_df = pd.DataFrame(ROME_dict) h3_All = h3.polyfill_geojson(manipulated_geo_feature, res=9) df_rome = pd.DataFrame(h3_All) df_rome.to_csv('AMSTERDAM_all.csv', sep=',') ``` ### #4 Get ROME all POI ``` #find the center of every h3_code ROME_geo_center_list = [] for i in ROME_h3_list: k = list(h3.h3_to_geo(i)) ROME_geo_center_list.append(k) ROME_geo_center_list_string = [] def make_string_geo(): for i in ROME_geo_center_list: string = str(i[0]) + ',' + str(i[1]) ROME_geo_center_list_string.append(string) make_string_geo() api_key= 'Z1XB-w87ksYmy5gJnj4gfLPJUJRkiyyPDLEtyjnMYy0' # url_range = f'https://browse.search.hereapi.com/v1/browse?at={GEO}&in=circle:{GEO};r=700&limit=100&apiKey={api_key}' ROME_title_list = [] ROME_cat_id_list = [] ROME_cat_name_list = [] ROME_h3_source_list = [] count = 0 for index, GEO in enumerate(ROME_geo_center_list_string): resp = requests.get(url = f'https://browse.search.hereapi.com/v1/browse?at={GEO}&in=circle:{GEO};r=200&limit=100&apiKey={api_key}') data= resp.json() for i in data['items']: if 'categories' not in i: continue else: ROME_title_list.append(str(i['title'])) ROME_cat_id_list.append(str(i['categories'][0]['id'])) ROME_cat_name_list.append(str(i['categories'][0]['name'])) ROME_h3_source_list.append(ROME_h3_list[index]) count +=1 print('POI`s found: ' + str(count)) ROME_here_dict = { 'title': ROME_title_list, 'cat_id': ROME_cat_id_list, 'cat_name': ROME_cat_name_list, 'h3_source': ROME_h3_source_list} ROME_poi_df = pd.DataFrame(ROME_here_dict) def make_cat_def(row): if row['cat_id'][:1] == '1': return 'Eat & Drink' elif row['cat_id'][:1] == '2': return 'Going Out-Entertainment' elif row['cat_id'][:1] == '3': return 'Sights and Museums' elif row['cat_id'][:1] == '4': return 'Transport' elif row['cat_id'][:1] == '5': return 'Accommodations' elif row['cat_id'][:1] == '6': return 'Shopping' elif row['cat_id'][:1] == '7': return 'Business and Services' elif row['cat_id'][:1] == '8': return 'Facilities' elif row['cat_id'][:1] == '9': return 'Areas and Buildings' ROME_poi_df['cat_definition'] = ROME_poi_df.apply(lambda row : make_cat_def(row), axis=1) ROME_poi_df = ROME_poi_df[[ 'h3_source', 'cat_definition', 'cat_name', 'title' ]] ROME_poi_df.to_csv('AMSTERDAM_poi_df.csv', sep=',') ```
github_jupyter
# Optimization Methods Until now, you've always used Gradient Descent to update the parameters and minimize the cost. In this notebook, you will learn more advanced optimization methods that can speed up learning and perhaps even get you to a better final value for the cost function. Having a good optimization algorithm can be the difference between waiting days vs. just a few hours to get a good result. Gradient descent goes "downhill" on a cost function $J$. Think of it as trying to do this: <img src="images/cost.jpg" style="width:650px;height:300px;"> <caption><center> <u> **Figure 1** </u>: **Minimizing the cost is like finding the lowest point in a hilly landscape**<br> At each step of the training, you update your parameters following a certain direction to try to get to the lowest possible point. </center></caption> **Notations**: As usual, $\frac{\partial J}{\partial a } = $ `da` for any variable `a`. To get started, run the following code to import the libraries you will need. ``` import numpy as np import matplotlib.pyplot as plt import scipy.io import math import sklearn import sklearn.datasets from opt_utils import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation from opt_utils import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset from testCases import * %matplotlib inline plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' ``` ## 1 - Gradient Descent A simple optimization method in machine learning is gradient descent (GD). When you take gradient steps with respect to all $m$ examples on each step, it is also called Batch Gradient Descent. **Warm-up exercise**: Implement the gradient descent update rule. The gradient descent rule is, for $l = 1, ..., L$: $$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{1}$$ $$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{2}$$ where L is the number of layers and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding. ``` # GRADED FUNCTION: update_parameters_with_gd def update_parameters_with_gd(parameters, grads, learning_rate): """ Update parameters using one step of gradient descent Arguments: parameters -- python dictionary containing your parameters to be updated: parameters['W' + str(l)] = Wl parameters['b' + str(l)] = bl grads -- python dictionary containing your gradients to update each parameters: grads['dW' + str(l)] = dWl grads['db' + str(l)] = dbl learning_rate -- the learning rate, scalar. Returns: parameters -- python dictionary containing your updated parameters """ L = len(parameters) // 2 # number of layers in the neural networks # Update rule for each parameter for l in range(L): ### START CODE HERE ### (approx. 2 lines) parameters["W" + str(l+1)] -= learning_rate * grads["dW" + str(l+1)] parameters["b" + str(l+1)] -= learning_rate * grads["db" + str(l+1)] ### END CODE HERE ### return parameters parameters, grads, learning_rate = update_parameters_with_gd_test_case() parameters = update_parameters_with_gd(parameters, grads, learning_rate) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) ``` **Expected Output**: <table> <tr> <td > **W1** </td> <td > [[ 1.63535156 -0.62320365 -0.53718766] [-1.07799357 0.85639907 -2.29470142]] </td> </tr> <tr> <td > **b1** </td> <td > [[ 1.74604067] [-0.75184921]] </td> </tr> <tr> <td > **W2** </td> <td > [[ 0.32171798 -0.25467393 1.46902454] [-2.05617317 -0.31554548 -0.3756023 ] [ 1.1404819 -1.09976462 -0.1612551 ]] </td> </tr> <tr> <td > **b2** </td> <td > [[-0.88020257] [ 0.02561572] [ 0.57539477]] </td> </tr> </table> A variant of this is Stochastic Gradient Descent (SGD), which is equivalent to mini-batch gradient descent where each mini-batch has just 1 example. The update rule that you have just implemented does not change. What changes is that you would be computing gradients on just one training example at a time, rather than on the whole training set. The code examples below illustrate the difference between stochastic gradient descent and (batch) gradient descent. - **(Batch) Gradient Descent**: ``` python X = data_input Y = labels parameters = initialize_parameters(layers_dims) for i in range(0, num_iterations): # Forward propagation a, caches = forward_propagation(X, parameters) # Compute cost. cost = compute_cost(a, Y) # Backward propagation. grads = backward_propagation(a, caches, parameters) # Update parameters. parameters = update_parameters(parameters, grads) ``` - **Stochastic Gradient Descent**: ```python X = data_input Y = labels parameters = initialize_parameters(layers_dims) for i in range(0, num_iterations): for j in range(0, m): # Forward propagation a, caches = forward_propagation(X[:,j], parameters) # Compute cost cost = compute_cost(a, Y[:,j]) # Backward propagation grads = backward_propagation(a, caches, parameters) # Update parameters. parameters = update_parameters(parameters, grads) ``` In Stochastic Gradient Descent, you use only 1 training example before updating the gradients. When the training set is large, SGD can be faster. But the parameters will "oscillate" toward the minimum rather than converge smoothly. Here is an illustration of this: <img src="images/kiank_sgd.png" style="width:750px;height:250px;"> <caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **SGD vs GD**<br> "+" denotes a minimum of the cost. SGD leads to many oscillations to reach convergence. But each step is a lot faster to compute for SGD than for GD, as it uses only one training example (vs. the whole batch for GD). </center></caption> **Note** also that implementing SGD requires 3 for-loops in total: 1. Over the number of iterations 2. Over the $m$ training examples 3. Over the layers (to update all parameters, from $(W^{[1]},b^{[1]})$ to $(W^{[L]},b^{[L]})$) In practice, you'll often get faster results if you do not use neither the whole training set, nor only one training example, to perform each update. Mini-batch gradient descent uses an intermediate number of examples for each step. With mini-batch gradient descent, you loop over the mini-batches instead of looping over individual training examples. <img src="images/kiank_minibatch.png" style="width:750px;height:250px;"> <caption><center> <u> <font color='purple'> **Figure 2** </u>: <font color='purple'> **SGD vs Mini-Batch GD**<br> "+" denotes a minimum of the cost. Using mini-batches in your optimization algorithm often leads to faster optimization. </center></caption> <font color='blue'> **What you should remember**: - The difference between gradient descent, mini-batch gradient descent and stochastic gradient descent is the number of examples you use to perform one update step. - You have to tune a learning rate hyperparameter $\alpha$. - With a well-turned mini-batch size, usually it outperforms either gradient descent or stochastic gradient descent (particularly when the training set is large). ## 2 - Mini-Batch Gradient descent Let's learn how to build mini-batches from the training set (X, Y). There are two steps: - **Shuffle**: Create a shuffled version of the training set (X, Y) as shown below. Each column of X and Y represents a training example. Note that the random shuffling is done synchronously between X and Y. Such that after the shuffling the $i^{th}$ column of X is the example corresponding to the $i^{th}$ label in Y. The shuffling step ensures that examples will be split randomly into different mini-batches. <img src="images/kiank_shuffle.png" style="width:550px;height:300px;"> - **Partition**: Partition the shuffled (X, Y) into mini-batches of size `mini_batch_size` (here 64). Note that the number of training examples is not always divisible by `mini_batch_size`. The last mini batch might be smaller, but you don't need to worry about this. When the final mini-batch is smaller than the full `mini_batch_size`, it will look like this: <img src="images/kiank_partition.png" style="width:550px;height:300px;"> **Exercise**: Implement `random_mini_batches`. We coded the shuffling part for you. To help you with the partitioning step, we give you the following code that selects the indexes for the $1^{st}$ and $2^{nd}$ mini-batches: ```python first_mini_batch_X = shuffled_X[:, 0 : mini_batch_size] second_mini_batch_X = shuffled_X[:, mini_batch_size : 2 * mini_batch_size] ... ``` Note that the last mini-batch might end up smaller than `mini_batch_size=64`. Let $\lfloor s \rfloor$ represents $s$ rounded down to the nearest integer (this is `math.floor(s)` in Python). If the total number of examples is not a multiple of `mini_batch_size=64` then there will be $\lfloor \frac{m}{mini\_batch\_size}\rfloor$ mini-batches with a full 64 examples, and the number of examples in the final mini-batch will be ($m-mini_\_batch_\_size \times \lfloor \frac{m}{mini\_batch\_size}\rfloor$). ``` # GRADED FUNCTION: random_mini_batches def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0): """ Creates a list of random minibatches from (X, Y) Arguments: X -- input data, of shape (input size, number of examples) Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples) mini_batch_size -- size of the mini-batches, integer Returns: mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y) """ np.random.seed(seed) # To make your "random" minibatches the same as ours m = X.shape[1] # number of training examples mini_batches = [] # Step 1: Shuffle (X, Y) permutation = list(np.random.permutation(m)) shuffled_X = X[:, permutation] shuffled_Y = Y[:, permutation].reshape((1,m)) # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case. num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning for k in range(0, num_complete_minibatches): ### START CODE HERE ### (approx. 2 lines) mini_batch_X = shuffled_X[:, k * mini_batch_size:(k + 1) * mini_batch_size] mini_batch_Y = shuffled_Y[:, k * mini_batch_size:(k + 1) * mini_batch_size] ### END CODE HERE ### mini_batch = (mini_batch_X, mini_batch_Y) mini_batches.append(mini_batch) # Handling the end case (last mini-batch < mini_batch_size) if m % mini_batch_size != 0: ### START CODE HERE ### (approx. 2 lines) mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size:] mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size:] ### END CODE HERE ### mini_batch = (mini_batch_X, mini_batch_Y) mini_batches.append(mini_batch) return mini_batches X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case() mini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size) print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape)) print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape)) print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape)) print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape)) print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape)) print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape)) print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3])) ``` **Expected Output**: <table style="width:50%"> <tr> <td > **shape of the 1st mini_batch_X** </td> <td > (12288, 64) </td> </tr> <tr> <td > **shape of the 2nd mini_batch_X** </td> <td > (12288, 64) </td> </tr> <tr> <td > **shape of the 3rd mini_batch_X** </td> <td > (12288, 20) </td> </tr> <tr> <td > **shape of the 1st mini_batch_Y** </td> <td > (1, 64) </td> </tr> <tr> <td > **shape of the 2nd mini_batch_Y** </td> <td > (1, 64) </td> </tr> <tr> <td > **shape of the 3rd mini_batch_Y** </td> <td > (1, 20) </td> </tr> <tr> <td > **mini batch sanity check** </td> <td > [ 0.90085595 -0.7612069 0.2344157 ] </td> </tr> </table> <font color='blue'> **What you should remember**: - Shuffling and Partitioning are the two steps required to build mini-batches - Powers of two are often chosen to be the mini-batch size, e.g., 16, 32, 64, 128. ## 3 - Momentum Because mini-batch gradient descent makes a parameter update after seeing just a subset of examples, the direction of the update has some variance, and so the path taken by mini-batch gradient descent will "oscillate" toward convergence. Using momentum can reduce these oscillations. Momentum takes into account the past gradients to smooth out the update. We will store the 'direction' of the previous gradients in the variable $v$. Formally, this will be the exponentially weighted average of the gradient on previous steps. You can also think of $v$ as the "velocity" of a ball rolling downhill, building up speed (and momentum) according to the direction of the gradient/slope of the hill. <img src="images/opt_momentum.png" style="width:400px;height:250px;"> <caption><center> <u><font color='purple'>**Figure 3**</u><font color='purple'>: The red arrows shows the direction taken by one step of mini-batch gradient descent with momentum. The blue points show the direction of the gradient (with respect to the current mini-batch) on each step. Rather than just following the gradient, we let the gradient influence $v$ and then take a step in the direction of $v$.<br> <font color='black'> </center> **Exercise**: Initialize the velocity. The velocity, $v$, is a python dictionary that needs to be initialized with arrays of zeros. Its keys are the same as those in the `grads` dictionary, that is: for $l =1,...,L$: ```python v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)]) v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)]) ``` **Note** that the iterator l starts at 0 in the for loop while the first parameters are v["dW1"] and v["db1"] (that's a "one" on the superscript). This is why we are shifting l to l+1 in the `for` loop. ``` # GRADED FUNCTION: initialize_velocity def initialize_velocity(parameters): """ Initializes the velocity as a python dictionary with: - keys: "dW1", "db1", ..., "dWL", "dbL" - values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters. Arguments: parameters -- python dictionary containing your parameters. parameters['W' + str(l)] = Wl parameters['b' + str(l)] = bl Returns: v -- python dictionary containing the current velocity. v['dW' + str(l)] = velocity of dWl v['db' + str(l)] = velocity of dbl """ L = len(parameters) // 2 # number of layers in the neural networks v = {} # Initialize velocity for l in range(L): ### START CODE HERE ### (approx. 2 lines) v["dW" + str(l+1)] = np.zeros_like(parameters["W" + str(l+1)]) v["db" + str(l+1)] = np.zeros_like(parameters["b" + str(l+1)]) ### END CODE HERE ### return v parameters = initialize_velocity_test_case() v = initialize_velocity(parameters) print("v[\"dW1\"] = " + str(v["dW1"])) print("v[\"db1\"] = " + str(v["db1"])) print("v[\"dW2\"] = " + str(v["dW2"])) print("v[\"db2\"] = " + str(v["db2"])) ``` **Expected Output**: <table style="width:40%"> <tr> <td > **v["dW1"]** </td> <td > [[ 0. 0. 0.] [ 0. 0. 0.]] </td> </tr> <tr> <td > **v["db1"]** </td> <td > [[ 0.] [ 0.]] </td> </tr> <tr> <td > **v["dW2"]** </td> <td > [[ 0. 0. 0.] [ 0. 0. 0.] [ 0. 0. 0.]] </td> </tr> <tr> <td > **v["db2"]** </td> <td > [[ 0.] [ 0.] [ 0.]] </td> </tr> </table> **Exercise**: Now, implement the parameters update with momentum. The momentum update rule is, for $l = 1, ..., L$: $$ \begin{cases} v_{dW^{[l]}} = \beta v_{dW^{[l]}} + (1 - \beta) dW^{[l]} \\ W^{[l]} = W^{[l]} - \alpha v_{dW^{[l]}} \end{cases}\tag{3}$$ $$\begin{cases} v_{db^{[l]}} = \beta v_{db^{[l]}} + (1 - \beta) db^{[l]} \\ b^{[l]} = b^{[l]} - \alpha v_{db^{[l]}} \end{cases}\tag{4}$$ where L is the number of layers, $\beta$ is the momentum and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$ (that's a "one" on the superscript). So you will need to shift `l` to `l+1` when coding. ``` # GRADED FUNCTION: update_parameters_with_momentum def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate): """ Update parameters using Momentum Arguments: parameters -- python dictionary containing your parameters: parameters['W' + str(l)] = Wl parameters['b' + str(l)] = bl grads -- python dictionary containing your gradients for each parameters: grads['dW' + str(l)] = dWl grads['db' + str(l)] = dbl v -- python dictionary containing the current velocity: v['dW' + str(l)] = ... v['db' + str(l)] = ... beta -- the momentum hyperparameter, scalar learning_rate -- the learning rate, scalar Returns: parameters -- python dictionary containing your updated parameters v -- python dictionary containing your updated velocities """ L = len(parameters) // 2 # number of layers in the neural networks # Momentum update for each parameter for l in range(L): ### START CODE HERE ### (approx. 4 lines) # compute velocities v["dW" + str(l+1)] = beta * v["dW" + str(l+1)] + (1 - beta) * grads["dW" + str(l+1)] v["db" + str(l+1)] = beta * v["db" + str(l+1)] + (1 - beta) * grads["db" + str(l+1)] # update parameters parameters["W" + str(l+1)] -= learning_rate * v["dW" + str(l+1)] parameters["b" + str(l+1)] -= learning_rate * v["db" + str(l+1)] ### END CODE HERE ### return parameters, v parameters, grads, v = update_parameters_with_momentum_test_case() parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) print("v[\"dW1\"] = " + str(v["dW1"])) print("v[\"db1\"] = " + str(v["db1"])) print("v[\"dW2\"] = " + str(v["dW2"])) print("v[\"db2\"] = " + str(v["db2"])) ``` **Expected Output**: <table style="width:90%"> <tr> <td > **W1** </td> <td > [[ 1.62544598 -0.61290114 -0.52907334] [-1.07347112 0.86450677 -2.30085497]] </td> </tr> <tr> <td > **b1** </td> <td > [[ 1.74493465] [-0.76027113]] </td> </tr> <tr> <td > **W2** </td> <td > [[ 0.31930698 -0.24990073 1.4627996 ] [-2.05974396 -0.32173003 -0.38320915] [ 1.13444069 -1.0998786 -0.1713109 ]] </td> </tr> <tr> <td > **b2** </td> <td > [[-0.87809283] [ 0.04055394] [ 0.58207317]] </td> </tr> <tr> <td > **v["dW1"]** </td> <td > [[-0.11006192 0.11447237 0.09015907] [ 0.05024943 0.09008559 -0.06837279]] </td> </tr> <tr> <td > **v["db1"]** </td> <td > [[-0.01228902] [-0.09357694]] </td> </tr> <tr> <td > **v["dW2"]** </td> <td > [[-0.02678881 0.05303555 -0.06916608] [-0.03967535 -0.06871727 -0.08452056] [-0.06712461 -0.00126646 -0.11173103]] </td> </tr> <tr> <td > **v["db2"]** </td> <td > [[ 0.02344157] [ 0.16598022] [ 0.07420442]]</td> </tr> </table> **Note** that: - The velocity is initialized with zeros. So the algorithm will take a few iterations to "build up" velocity and start to take bigger steps. - If $\beta = 0$, then this just becomes standard gradient descent without momentum. **How do you choose $\beta$?** - The larger the momentum $\beta$ is, the smoother the update because the more we take the past gradients into account. But if $\beta$ is too big, it could also smooth out the updates too much. - Common values for $\beta$ range from 0.8 to 0.999. If you don't feel inclined to tune this, $\beta = 0.9$ is often a reasonable default. - Tuning the optimal $\beta$ for your model might need trying several values to see what works best in term of reducing the value of the cost function $J$. <font color='blue'> **What you should remember**: - Momentum takes past gradients into account to smooth out the steps of gradient descent. It can be applied with batch gradient descent, mini-batch gradient descent or stochastic gradient descent. - You have to tune a momentum hyperparameter $\beta$ and a learning rate $\alpha$. ## 4 - Adam Adam is one of the most effective optimization algorithms for training neural networks. It combines ideas from RMSProp (described in lecture) and Momentum. **How does Adam work?** 1. It calculates an exponentially weighted average of past gradients, and stores it in variables $v$ (before bias correction) and $v^{corrected}$ (with bias correction). 2. It calculates an exponentially weighted average of the squares of the past gradients, and stores it in variables $s$ (before bias correction) and $s^{corrected}$ (with bias correction). 3. It updates parameters in a direction based on combining information from "1" and "2". The update rule is, for $l = 1, ..., L$: $$\begin{cases} v_{dW^{[l]}} = \beta_1 v_{dW^{[l]}} + (1 - \beta_1) \frac{\partial \mathcal{J} }{ \partial W^{[l]} } \\ v^{corrected}_{dW^{[l]}} = \frac{v_{dW^{[l]}}}{1 - (\beta_1)^t} \\ s_{dW^{[l]}} = \beta_2 s_{dW^{[l]}} + (1 - \beta_2) (\frac{\partial \mathcal{J} }{\partial W^{[l]} })^2 \\ s^{corrected}_{dW^{[l]}} = \frac{s_{dW^{[l]}}}{1 - (\beta_1)^t} \\ W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{dW^{[l]}}}{\sqrt{s^{corrected}_{dW^{[l]}}} + \varepsilon} \end{cases}$$ where: - t counts the number of steps taken of Adam - L is the number of layers - $\beta_1$ and $\beta_2$ are hyperparameters that control the two exponentially weighted averages. - $\alpha$ is the learning rate - $\varepsilon$ is a very small number to avoid dividing by zero As usual, we will store all parameters in the `parameters` dictionary **Exercise**: Initialize the Adam variables $v, s$ which keep track of the past information. **Instruction**: The variables $v, s$ are python dictionaries that need to be initialized with arrays of zeros. Their keys are the same as for `grads`, that is: for $l = 1, ..., L$: ```python v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)]) v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)]) s["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)]) s["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)]) ``` ``` # GRADED FUNCTION: initialize_adam def initialize_adam(parameters) : """ Initializes v and s as two python dictionaries with: - keys: "dW1", "db1", ..., "dWL", "dbL" - values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters. Arguments: parameters -- python dictionary containing your parameters. parameters["W" + str(l)] = Wl parameters["b" + str(l)] = bl Returns: v -- python dictionary that will contain the exponentially weighted average of the gradient. v["dW" + str(l)] = ... v["db" + str(l)] = ... s -- python dictionary that will contain the exponentially weighted average of the squared gradient. s["dW" + str(l)] = ... s["db" + str(l)] = ... """ L = len(parameters) // 2 # number of layers in the neural networks v = {} s = {} # Initialize v, s. Input: "parameters". Outputs: "v, s". for l in range(L): ### START CODE HERE ### (approx. 4 lines) v["dW" + str(l+1)] = np.zeros_like(parameters["W" + str(l+1)]) v["db" + str(l+1)] = np.zeros_like(parameters["b" + str(l+1)]) s["dW" + str(l+1)] = np.zeros_like(parameters["W" + str(l+1)]) s["db" + str(l+1)] = np.zeros_like(parameters["b" + str(l+1)]) ### END CODE HERE ### return v, s parameters = initialize_adam_test_case() v, s = initialize_adam(parameters) print("v[\"dW1\"] = " + str(v["dW1"])) print("v[\"db1\"] = " + str(v["db1"])) print("v[\"dW2\"] = " + str(v["dW2"])) print("v[\"db2\"] = " + str(v["db2"])) print("s[\"dW1\"] = " + str(s["dW1"])) print("s[\"db1\"] = " + str(s["db1"])) print("s[\"dW2\"] = " + str(s["dW2"])) print("s[\"db2\"] = " + str(s["db2"])) ``` **Expected Output**: <table style="width:40%"> <tr> <td > **v["dW1"]** </td> <td > [[ 0. 0. 0.] [ 0. 0. 0.]] </td> </tr> <tr> <td > **v["db1"]** </td> <td > [[ 0.] [ 0.]] </td> </tr> <tr> <td > **v["dW2"]** </td> <td > [[ 0. 0. 0.] [ 0. 0. 0.] [ 0. 0. 0.]] </td> </tr> <tr> <td > **v["db2"]** </td> <td > [[ 0.] [ 0.] [ 0.]] </td> </tr> <tr> <td > **s["dW1"]** </td> <td > [[ 0. 0. 0.] [ 0. 0. 0.]] </td> </tr> <tr> <td > **s["db1"]** </td> <td > [[ 0.] [ 0.]] </td> </tr> <tr> <td > **s["dW2"]** </td> <td > [[ 0. 0. 0.] [ 0. 0. 0.] [ 0. 0. 0.]] </td> </tr> <tr> <td > **s["db2"]** </td> <td > [[ 0.] [ 0.] [ 0.]] </td> </tr> </table> **Exercise**: Now, implement the parameters update with Adam. Recall the general update rule is, for $l = 1, ..., L$: $$\begin{cases} v_{W^{[l]}} = \beta_1 v_{W^{[l]}} + (1 - \beta_1) \frac{\partial J }{ \partial W^{[l]} } \\ v^{corrected}_{W^{[l]}} = \frac{v_{W^{[l]}}}{1 - (\beta_1)^t} \\ s_{W^{[l]}} = \beta_2 s_{W^{[l]}} + (1 - \beta_2) (\frac{\partial J }{\partial W^{[l]} })^2 \\ s^{corrected}_{W^{[l]}} = \frac{s_{W^{[l]}}}{1 - (\beta_2)^t} \\ W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{W^{[l]}}}{\sqrt{s^{corrected}_{W^{[l]}}}+\varepsilon} \end{cases}$$ **Note** that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding. ``` # GRADED FUNCTION: update_parameters_with_adam def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01, beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8): """ Update parameters using Adam Arguments: parameters -- python dictionary containing your parameters: parameters['W' + str(l)] = Wl parameters['b' + str(l)] = bl grads -- python dictionary containing your gradients for each parameters: grads['dW' + str(l)] = dWl grads['db' + str(l)] = dbl v -- Adam variable, moving average of the first gradient, python dictionary s -- Adam variable, moving average of the squared gradient, python dictionary learning_rate -- the learning rate, scalar. beta1 -- Exponential decay hyperparameter for the first moment estimates beta2 -- Exponential decay hyperparameter for the second moment estimates epsilon -- hyperparameter preventing division by zero in Adam updates Returns: parameters -- python dictionary containing your updated parameters v -- Adam variable, moving average of the first gradient, python dictionary s -- Adam variable, moving average of the squared gradient, python dictionary """ L = len(parameters) // 2 # number of layers in the neural networks v_corrected = {} # Initializing first moment estimate, python dictionary s_corrected = {} # Initializing second moment estimate, python dictionary # Perform Adam update on all parameters for l in range(L): # Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v". ### START CODE HERE ### (approx. 2 lines) v["dW" + str(l+1)] = beta1 * v["dW" + str(l+1)] + (1 - beta1) * grads["dW" + str(l+1)] v["db" + str(l+1)] = beta1 * v["db" + str(l+1)] + (1 - beta1) * grads["db" + str(l+1)] ### END CODE HERE ### # Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected". ### START CODE HERE ### (approx. 2 lines) v_corrected["dW" + str(l+1)] = v["dW" + str(l+1)] / (1 - beta1 ** t) v_corrected["db" + str(l+1)] = v["db" + str(l+1)] / (1 - beta1 ** t) ### END CODE HERE ### # Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s". ### START CODE HERE ### (approx. 2 lines) s["dW" + str(l+1)] = beta2 * s["dW" + str(l+1)] + (1 - beta2) * grads["dW" + str(l+1)] ** 2 s["db" + str(l+1)] = beta2 * s["db" + str(l+1)] + (1 - beta2) * grads["db" + str(l+1)] ** 2 ### END CODE HERE ### # Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected". ### START CODE HERE ### (approx. 2 lines) s_corrected["dW" + str(l+1)] = s["dW" + str(l+1)] / (1 - beta2 ** t) s_corrected["db" + str(l+1)] = s["db" + str(l+1)] / (1 - beta2 ** t) ### END CODE HERE ### # Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters". ### START CODE HERE ### (approx. 2 lines) parameters["W" + str(l+1)] -= learning_rate * v_corrected["dW" + str(l+1)] / (s_corrected["dW" + str(l+1)] ** 0.5 + epsilon) parameters["b" + str(l+1)] -= learning_rate * v_corrected["db" + str(l+1)] / (s_corrected["db" + str(l+1)] ** 0.5 + epsilon) ### END CODE HERE ### return parameters, v, s parameters, grads, v, s = update_parameters_with_adam_test_case() parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) print("v[\"dW1\"] = " + str(v["dW1"])) print("v[\"db1\"] = " + str(v["db1"])) print("v[\"dW2\"] = " + str(v["dW2"])) print("v[\"db2\"] = " + str(v["db2"])) print("s[\"dW1\"] = " + str(s["dW1"])) print("s[\"db1\"] = " + str(s["db1"])) print("s[\"dW2\"] = " + str(s["dW2"])) print("s[\"db2\"] = " + str(s["db2"])) ``` **Expected Output**: <table> <tr> <td > **W1** </td> <td > [[ 1.63178673 -0.61919778 -0.53561312] [-1.08040999 0.85796626 -2.29409733]] </td> </tr> <tr> <td > **b1** </td> <td > [[ 1.75225313] [-0.75376553]] </td> </tr> <tr> <td > **W2** </td> <td > [[ 0.32648046 -0.25681174 1.46954931] [-2.05269934 -0.31497584 -0.37661299] [ 1.14121081 -1.09245036 -0.16498684]] </td> </tr> <tr> <td > **b2** </td> <td > [[-0.88529978] [ 0.03477238] [ 0.57537385]] </td> </tr> <tr> <td > **v["dW1"]** </td> <td > [[-0.11006192 0.11447237 0.09015907] [ 0.05024943 0.09008559 -0.06837279]] </td> </tr> <tr> <td > **v["db1"]** </td> <td > [[-0.01228902] [-0.09357694]] </td> </tr> <tr> <td > **v["dW2"]** </td> <td > [[-0.02678881 0.05303555 -0.06916608] [-0.03967535 -0.06871727 -0.08452056] [-0.06712461 -0.00126646 -0.11173103]] </td> </tr> <tr> <td > **v["db2"]** </td> <td > [[ 0.02344157] [ 0.16598022] [ 0.07420442]] </td> </tr> <tr> <td > **s["dW1"]** </td> <td > [[ 0.00121136 0.00131039 0.00081287] [ 0.0002525 0.00081154 0.00046748]] </td> </tr> <tr> <td > **s["db1"]** </td> <td > [[ 1.51020075e-05] [ 8.75664434e-04]] </td> </tr> <tr> <td > **s["dW2"]** </td> <td > [[ 7.17640232e-05 2.81276921e-04 4.78394595e-04] [ 1.57413361e-04 4.72206320e-04 7.14372576e-04] [ 4.50571368e-04 1.60392066e-07 1.24838242e-03]] </td> </tr> <tr> <td > **s["db2"]** </td> <td > [[ 5.49507194e-05] [ 2.75494327e-03] [ 5.50629536e-04]] </td> </tr> </table> You now have three working optimization algorithms (mini-batch gradient descent, Momentum, Adam). Let's implement a model with each of these optimizers and observe the difference. ## 5 - Model with different optimization algorithms Lets use the following "moons" dataset to test the different optimization methods. (The dataset is named "moons" because the data from each of the two classes looks a bit like a crescent-shaped moon.) ``` train_X, train_Y = load_dataset() ``` We have already implemented a 3-layer neural network. You will train it with: - Mini-batch **Gradient Descent**: it will call your function: - `update_parameters_with_gd()` - Mini-batch **Momentum**: it will call your functions: - `initialize_velocity()` and `update_parameters_with_momentum()` - Mini-batch **Adam**: it will call your functions: - `initialize_adam()` and `update_parameters_with_adam()` ``` def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9, beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 10000, print_cost = True): """ 3-layer neural network model which can be run in different optimizer modes. Arguments: X -- input data, of shape (2, number of examples) Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples) layers_dims -- python list, containing the size of each layer learning_rate -- the learning rate, scalar. mini_batch_size -- the size of a mini batch beta -- Momentum hyperparameter beta1 -- Exponential decay hyperparameter for the past gradients estimates beta2 -- Exponential decay hyperparameter for the past squared gradients estimates epsilon -- hyperparameter preventing division by zero in Adam updates num_epochs -- number of epochs print_cost -- True to print the cost every 1000 epochs Returns: parameters -- python dictionary containing your updated parameters """ L = len(layers_dims) # number of layers in the neural networks costs = [] # to keep track of the cost t = 0 # initializing the counter required for Adam update seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours # Initialize parameters parameters = initialize_parameters(layers_dims) # Initialize the optimizer if optimizer == "gd": pass # no initialization required for gradient descent elif optimizer == "momentum": v = initialize_velocity(parameters) elif optimizer == "adam": v, s = initialize_adam(parameters) # Optimization loop for i in range(num_epochs): # Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch seed = seed + 1 minibatches = random_mini_batches(X, Y, mini_batch_size, seed) for minibatch in minibatches: # Select a minibatch (minibatch_X, minibatch_Y) = minibatch # Forward propagation a3, caches = forward_propagation(minibatch_X, parameters) # Compute cost cost = compute_cost(a3, minibatch_Y) # Backward propagation grads = backward_propagation(minibatch_X, minibatch_Y, caches) # Update parameters if optimizer == "gd": parameters = update_parameters_with_gd(parameters, grads, learning_rate) elif optimizer == "momentum": parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate) elif optimizer == "adam": t = t + 1 # Adam counter parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t, learning_rate, beta1, beta2, epsilon) # Print the cost every 1000 epoch if print_cost and i % 1000 == 0: print ("Cost after epoch %i: %f" %(i, cost)) if print_cost and i % 100 == 0: costs.append(cost) # plot the cost plt.plot(costs) plt.ylabel('cost') plt.xlabel('epochs (per 100)') plt.title("Learning rate = " + str(learning_rate)) plt.show() return parameters ``` You will now run this 3 layer neural network with each of the 3 optimization methods. ### 5.1 - Mini-batch Gradient descent Run the following code to see how the model does with mini-batch gradient descent. ``` # train 3-layer model layers_dims = [train_X.shape[0], 5, 2, 1] parameters = model(train_X, train_Y, layers_dims, optimizer = "gd") # Predict predictions = predict(train_X, train_Y, parameters) # Plot decision boundary plt.title("Model with Gradient Descent optimization") axes = plt.gca() axes.set_xlim([-1.5,2.5]) axes.set_ylim([-1,1.5]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) ``` ### 5.2 - Mini-batch gradient descent with momentum Run the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small; but for more complex problems you might see bigger gains. ``` # train 3-layer model layers_dims = [train_X.shape[0], 5, 2, 1] parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum") # Predict predictions = predict(train_X, train_Y, parameters) # Plot decision boundary plt.title("Model with Momentum optimization") axes = plt.gca() axes.set_xlim([-1.5,2.5]) axes.set_ylim([-1,1.5]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) ``` ### 5.3 - Mini-batch with Adam mode Run the following code to see how the model does with Adam. ``` # train 3-layer model layers_dims = [train_X.shape[0], 5, 2, 1] parameters = model(train_X, train_Y, layers_dims, optimizer = "adam") # Predict predictions = predict(train_X, train_Y, parameters) # Plot decision boundary plt.title("Model with Adam optimization") axes = plt.gca() axes.set_xlim([-1.5,2.5]) axes.set_ylim([-1,1.5]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) ``` ### 5.4 - Summary <table> <tr> <td> **optimization method** </td> <td> **accuracy** </td> <td> **cost shape** </td> </tr> <td> Gradient descent </td> <td> 79.7% </td> <td> oscillations </td> <tr> <td> Momentum </td> <td> 79.7% </td> <td> oscillations </td> </tr> <tr> <td> Adam </td> <td> 94% </td> <td> smoother </td> </tr> </table> Momentum usually helps, but given the small learning rate and the simplistic dataset, its impact is almost negligeable. Also, the huge oscillations you see in the cost come from the fact that some minibatches are more difficult thans others for the optimization algorithm. Adam on the other hand, clearly outperforms mini-batch gradient descent and Momentum. If you run the model for more epochs on this simple dataset, all three methods will lead to very good results. However, you've seen that Adam converges a lot faster. Some advantages of Adam include: - Relatively low memory requirements (though higher than gradient descent and gradient descent with momentum) - Usually works well even with little tuning of hyperparameters (except $\alpha$) **References**: - Adam paper: https://arxiv.org/pdf/1412.6980.pdf
github_jupyter
# Python Basics with Numpy (optional assignment) Welcome to your first assignment. This exercise gives you a brief introduction to Python. Even if you've used Python before, this will help familiarize you with functions we'll need. **Instructions:** - You will be using Python 3. - Avoid using for-loops and while-loops, unless you are explicitly told to do so. - Do not modify the (# GRADED FUNCTION [function name]) comment in some cells. Your work would not be graded if you change this. Each cell containing that comment should only contain one function. - After coding your function, run the cell right below it to check if your result is correct. **After this assignment you will:** - Be able to use iPython Notebooks - Be able to use numpy functions and numpy matrix/vector operations - Understand the concept of "broadcasting" - Be able to vectorize code Let's get started! ## <font color="darkblue"> Updates to Assignment</font> This is version 3a of the notebook. #### If you were working on a previous version * If you were already working on version "3", you'll find your original work in the file directory. * To reach the file directory, click on the "Coursera" icon in the top left of this notebook. * Please still use the most recent notebook to submit your assignment. #### List of Updates * softmax section has a comment to clarify the use of "m" later in the course * softmax function specifies (m,n) matrix dimensions to match the notation in the preceding diagram (instead of n,m) ## About iPython Notebooks ## iPython Notebooks are interactive coding environments embedded in a webpage. You will be using iPython notebooks in this class. You only need to write code between the ### START CODE HERE ### and ### END CODE HERE ### comments. After writing your code, you can run the cell by either pressing "SHIFT"+"ENTER" or by clicking on "Run Cell" (denoted by a play symbol) in the upper bar of the notebook. We will often specify "(≈ X lines of code)" in the comments to tell you about how much code you need to write. It is just a rough estimate, so don't feel bad if your code is longer or shorter. **Exercise**: Set test to `"Hello World"` in the cell below to print "Hello World" and run the two cells below. ``` ### START CODE HERE ### (≈ 1 line of code) test = "Hello World" ### END CODE HERE ### print ("test: " + test) ``` **Expected output**: test: Hello World <font color='blue'> **What you need to remember**: - Run your cells using SHIFT+ENTER (or "Run cell") - Write code in the designated areas using Python 3 only - Do not modify the code outside of the designated areas ## 1 - Building basic functions with numpy ## Numpy is the main package for scientific computing in Python. It is maintained by a large community (www.numpy.org). In this exercise you will learn several key numpy functions such as np.exp, np.log, and np.reshape. You will need to know how to use these functions for future assignments. ### 1.1 - sigmoid function, np.exp() ### Before using np.exp(), you will use math.exp() to implement the sigmoid function. You will then see why np.exp() is preferable to math.exp(). **Exercise**: Build a function that returns the sigmoid of a real number x. Use math.exp(x) for the exponential function. **Reminder**: $sigmoid(x) = \frac{1}{1+e^{-x}}$ is sometimes also known as the logistic function. It is a non-linear function used not only in Machine Learning (Logistic Regression), but also in Deep Learning. <img src="images/Sigmoid.png" style="width:500px;height:228px;"> To refer to a function belonging to a specific package you could call it using package_name.function(). Run the code below to see an example with math.exp(). ``` # GRADED FUNCTION: basic_sigmoid import math def basic_sigmoid(x): """ Compute sigmoid of x. Arguments: x -- A scalar Return: s -- sigmoid(x) """ ### START CODE HERE ### (≈ 1 line of code) s = 1/(1 + math.exp(-x)) ### END CODE HERE ### return s basic_sigmoid(3) ``` **Expected Output**: <table style = "width:40%"> <tr> <td>** basic_sigmoid(3) **</td> <td>0.9525741268224334 </td> </tr> </table> Actually, we rarely use the "math" library in deep learning because the inputs of the functions are real numbers. In deep learning we mostly use matrices and vectors. This is why numpy is more useful. ``` ### One reason why we use "numpy" instead of "math" in Deep Learning ### x = [1, 2, 3] basic_sigmoid(x) # you will see this give an error when you run it, because x is a vector. ``` In fact, if $ x = (x_1, x_2, ..., x_n)$ is a row vector then $np.exp(x)$ will apply the exponential function to every element of x. The output will thus be: $np.exp(x) = (e^{x_1}, e^{x_2}, ..., e^{x_n})$ ``` import numpy as np # example of np.exp x = np.array([1, 2, 3]) print(np.exp(x)) # result is (exp(1), exp(2), exp(3)) ``` Furthermore, if x is a vector, then a Python operation such as $s = x + 3$ or $s = \frac{1}{x}$ will output s as a vector of the same size as x. ``` # example of vector operation x = np.array([1, 2, 3]) print (x + 3) ``` Any time you need more info on a numpy function, we encourage you to look at [the official documentation](https://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.exp.html). You can also create a new cell in the notebook and write `np.exp?` (for example) to get quick access to the documentation. **Exercise**: Implement the sigmoid function using numpy. **Instructions**: x could now be either a real number, a vector, or a matrix. The data structures we use in numpy to represent these shapes (vectors, matrices...) are called numpy arrays. You don't need to know more for now. $$ \text{For } x \in \mathbb{R}^n \text{, } sigmoid(x) = sigmoid\begin{pmatrix} x_1 \\ x_2 \\ ... \\ x_n \\ \end{pmatrix} = \begin{pmatrix} \frac{1}{1+e^{-x_1}} \\ \frac{1}{1+e^{-x_2}} \\ ... \\ \frac{1}{1+e^{-x_n}} \\ \end{pmatrix}\tag{1} $$ ``` # GRADED FUNCTION: sigmoid import numpy as np # this means you can access numpy functions by writing np.function() instead of numpy.function() def sigmoid(x): """ Compute the sigmoid of x Arguments: x -- A scalar or numpy array of any size Return: s -- sigmoid(x) """ ### START CODE HERE ### (≈ 1 line of code) s = 1/(1 + np.exp(-x)) ### END CODE HERE ### return s x = np.array([1, 2, 3]) sigmoid(x) ``` **Expected Output**: <table> <tr> <td> **sigmoid([1,2,3])**</td> <td> array([ 0.73105858, 0.88079708, 0.95257413]) </td> </tr> </table> ### 1.2 - Sigmoid gradient As you've seen in lecture, you will need to compute gradients to optimize loss functions using backpropagation. Let's code your first gradient function. **Exercise**: Implement the function sigmoid_grad() to compute the gradient of the sigmoid function with respect to its input x. The formula is: $$sigmoid\_derivative(x) = \sigma'(x) = \sigma(x) (1 - \sigma(x))\tag{2}$$ You often code this function in two steps: 1. Set s to be the sigmoid of x. You might find your sigmoid(x) function useful. 2. Compute $\sigma'(x) = s(1-s)$ ``` # GRADED FUNCTION: sigmoid_derivative def sigmoid_derivative(x): """ Compute the gradient (also called the slope or derivative) of the sigmoid function with respect to its input x. You can store the output of the sigmoid function into variables and then use it to calculate the gradient. Arguments: x -- A scalar or numpy array Return: ds -- Your computed gradient. """ ### START CODE HERE ### (≈ 2 lines of code) s = sigmoid(x) ds = s*(1-s) ### END CODE HERE ### return ds x = np.array([1, 2, 3]) print ("sigmoid_derivative(x) = " + str(sigmoid_derivative(x))) ``` **Expected Output**: <table> <tr> <td> **sigmoid_derivative([1,2,3])**</td> <td> [ 0.19661193 0.10499359 0.04517666] </td> </tr> </table> ### 1.3 - Reshaping arrays ### Two common numpy functions used in deep learning are [np.shape](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.shape.html) and [np.reshape()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.reshape.html). - X.shape is used to get the shape (dimension) of a matrix/vector X. - X.reshape(...) is used to reshape X into some other dimension. For example, in computer science, an image is represented by a 3D array of shape $(length, height, depth = 3)$. However, when you read an image as the input of an algorithm you convert it to a vector of shape $(length*height*3, 1)$. In other words, you "unroll", or reshape, the 3D array into a 1D vector. <img src="images/image2vector_kiank.png" style="width:500px;height:300;"> **Exercise**: Implement `image2vector()` that takes an input of shape (length, height, 3) and returns a vector of shape (length\*height\*3, 1). For example, if you would like to reshape an array v of shape (a, b, c) into a vector of shape (a*b,c) you would do: ``` python v = v.reshape((v.shape[0]*v.shape[1], v.shape[2])) # v.shape[0] = a ; v.shape[1] = b ; v.shape[2] = c ``` - Please don't hardcode the dimensions of image as a constant. Instead look up the quantities you need with `image.shape[0]`, etc. ``` # GRADED FUNCTION: image2vector def image2vector(image): """ Argument: image -- a numpy array of shape (length, height, depth) Returns: v -- a vector of shape (length*height*depth, 1) """ ### START CODE HERE ### (≈ 1 line of code) v = image.reshape(((image.shape[0] * image.shape[1] * image.shape[2]), 1)) ### END CODE HERE ### return v # This is a 3 by 3 by 2 array, typically images will be (num_px_x, num_px_y,3) where 3 represents the RGB values image = np.array([[[ 0.67826139, 0.29380381], [ 0.90714982, 0.52835647], [ 0.4215251 , 0.45017551]], [[ 0.92814219, 0.96677647], [ 0.85304703, 0.52351845], [ 0.19981397, 0.27417313]], [[ 0.60659855, 0.00533165], [ 0.10820313, 0.49978937], [ 0.34144279, 0.94630077]]]) print ("image2vector(image) = " + str(image2vector(image))) ``` **Expected Output**: <table style="width:100%"> <tr> <td> **image2vector(image)** </td> <td> [[ 0.67826139] [ 0.29380381] [ 0.90714982] [ 0.52835647] [ 0.4215251 ] [ 0.45017551] [ 0.92814219] [ 0.96677647] [ 0.85304703] [ 0.52351845] [ 0.19981397] [ 0.27417313] [ 0.60659855] [ 0.00533165] [ 0.10820313] [ 0.49978937] [ 0.34144279] [ 0.94630077]]</td> </tr> </table> ### 1.4 - Normalizing rows Another common technique we use in Machine Learning and Deep Learning is to normalize our data. It often leads to a better performance because gradient descent converges faster after normalization. Here, by normalization we mean changing x to $ \frac{x}{\| x\|} $ (dividing each row vector of x by its norm). For example, if $$x = \begin{bmatrix} 0 & 3 & 4 \\ 2 & 6 & 4 \\ \end{bmatrix}\tag{3}$$ then $$\| x\| = np.linalg.norm(x, axis = 1, keepdims = True) = \begin{bmatrix} 5 \\ \sqrt{56} \\ \end{bmatrix}\tag{4} $$and $$ x\_normalized = \frac{x}{\| x\|} = \begin{bmatrix} 0 & \frac{3}{5} & \frac{4}{5} \\ \frac{2}{\sqrt{56}} & \frac{6}{\sqrt{56}} & \frac{4}{\sqrt{56}} \\ \end{bmatrix}\tag{5}$$ Note that you can divide matrices of different sizes and it works fine: this is called broadcasting and you're going to learn about it in part 5. **Exercise**: Implement normalizeRows() to normalize the rows of a matrix. After applying this function to an input matrix x, each row of x should be a vector of unit length (meaning length 1). ``` # GRADED FUNCTION: normalizeRows def normalizeRows(x): """ Implement a function that normalizes each row of the matrix x (to have unit length). Argument: x -- A numpy matrix of shape (n, m) Returns: x -- The normalized (by row) numpy matrix. You are allowed to modify x. """ ### START CODE HERE ### (≈ 2 lines of code) # Compute x_norm as the norm 2 of x. Use np.linalg.norm(..., ord = 2, axis = ..., keepdims = True) x_norm = np.linalg.norm(x, ord = 2, axis = 1, keepdims = True) # Divide x by its norm. x = x/x_norm ### END CODE HERE ### return x x = np.array([ [0, 3, 4], [1, 6, 4]]) print("normalizeRows(x) = " + str(normalizeRows(x))) ``` **Expected Output**: <table style="width:60%"> <tr> <td> **normalizeRows(x)** </td> <td> [[ 0. 0.6 0.8 ] [ 0.13736056 0.82416338 0.54944226]]</td> </tr> </table> **Note**: In normalizeRows(), you can try to print the shapes of x_norm and x, and then rerun the assessment. You'll find out that they have different shapes. This is normal given that x_norm takes the norm of each row of x. So x_norm has the same number of rows but only 1 column. So how did it work when you divided x by x_norm? This is called broadcasting and we'll talk about it now! ### 1.5 - Broadcasting and the softmax function #### A very important concept to understand in numpy is "broadcasting". It is very useful for performing mathematical operations between arrays of different shapes. For the full details on broadcasting, you can read the official [broadcasting documentation](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). **Exercise**: Implement a softmax function using numpy. You can think of softmax as a normalizing function used when your algorithm needs to classify two or more classes. You will learn more about softmax in the second course of this specialization. **Instructions**: - $ \text{for } x \in \mathbb{R}^{1\times n} \text{, } softmax(x) = softmax(\begin{bmatrix} x_1 && x_2 && ... && x_n \end{bmatrix}) = \begin{bmatrix} \frac{e^{x_1}}{\sum_{j}e^{x_j}} && \frac{e^{x_2}}{\sum_{j}e^{x_j}} && ... && \frac{e^{x_n}}{\sum_{j}e^{x_j}} \end{bmatrix} $ - $\text{for a matrix } x \in \mathbb{R}^{m \times n} \text{, $x_{ij}$ maps to the element in the $i^{th}$ row and $j^{th}$ column of $x$, thus we have: }$ $$softmax(x) = softmax\begin{bmatrix} x_{11} & x_{12} & x_{13} & \dots & x_{1n} \\ x_{21} & x_{22} & x_{23} & \dots & x_{2n} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ x_{m1} & x_{m2} & x_{m3} & \dots & x_{mn} \end{bmatrix} = \begin{bmatrix} \frac{e^{x_{11}}}{\sum_{j}e^{x_{1j}}} & \frac{e^{x_{12}}}{\sum_{j}e^{x_{1j}}} & \frac{e^{x_{13}}}{\sum_{j}e^{x_{1j}}} & \dots & \frac{e^{x_{1n}}}{\sum_{j}e^{x_{1j}}} \\ \frac{e^{x_{21}}}{\sum_{j}e^{x_{2j}}} & \frac{e^{x_{22}}}{\sum_{j}e^{x_{2j}}} & \frac{e^{x_{23}}}{\sum_{j}e^{x_{2j}}} & \dots & \frac{e^{x_{2n}}}{\sum_{j}e^{x_{2j}}} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ \frac{e^{x_{m1}}}{\sum_{j}e^{x_{mj}}} & \frac{e^{x_{m2}}}{\sum_{j}e^{x_{mj}}} & \frac{e^{x_{m3}}}{\sum_{j}e^{x_{mj}}} & \dots & \frac{e^{x_{mn}}}{\sum_{j}e^{x_{mj}}} \end{bmatrix} = \begin{pmatrix} softmax\text{(first row of x)} \\ softmax\text{(second row of x)} \\ ... \\ softmax\text{(last row of x)} \\ \end{pmatrix} $$ #### Note Note that later in the course, you'll see "m" used to represent the "number of training examples", and each training example is in its own column of the matrix. Also, each feature will be in its own row (each row has data for the same feature). Softmax should be performed for all features of each training example, so softmax would be performed on the columns (once we switch to that representation later in this course). However, in this coding practice, we're just focusing on getting familiar with Python, so we're using the common math notation $m \times n$ where $m$ is the number of rows and $n$ is the number of columns. ``` # GRADED FUNCTION: softmax def softmax(x): """Calculates the softmax for each row of the input x. Your code should work for a row vector and also for matrices of shape (m,n). Argument: x -- A numpy matrix of shape (m,n) Returns: s -- A numpy matrix equal to the softmax of x, of shape (m,n) """ ### START CODE HERE ### (≈ 3 lines of code) # Apply exp() element-wise to x. Use np.exp(...). x_exp = np.exp(x) # Create a vector x_sum that sums each row of x_exp. Use np.sum(..., axis = 1, keepdims = True). x_sum = np.sum(x_exp, axis = 1, keepdims = True) # Compute softmax(x) by dividing x_exp by x_sum. It should automatically use numpy broadcasting. s = x_exp/x_sum ### END CODE HERE ### return s x = np.array([ [9, 2, 5, 0, 0], [7, 5, 0, 0 ,0]]) print("softmax(x) = " + str(softmax(x))) ``` **Expected Output**: <table style="width:60%"> <tr> <td> **softmax(x)** </td> <td> [[ 9.80897665e-01 8.94462891e-04 1.79657674e-02 1.21052389e-04 1.21052389e-04] [ 8.78679856e-01 1.18916387e-01 8.01252314e-04 8.01252314e-04 8.01252314e-04]]</td> </tr> </table> **Note**: - If you print the shapes of x_exp, x_sum and s above and rerun the assessment cell, you will see that x_sum is of shape (2,1) while x_exp and s are of shape (2,5). **x_exp/x_sum** works due to python broadcasting. Congratulations! You now have a pretty good understanding of python numpy and have implemented a few useful functions that you will be using in deep learning. <font color='blue'> **What you need to remember:** - np.exp(x) works for any np.array x and applies the exponential function to every coordinate - the sigmoid function and its gradient - image2vector is commonly used in deep learning - np.reshape is widely used. In the future, you'll see that keeping your matrix/vector dimensions straight will go toward eliminating a lot of bugs. - numpy has efficient built-in functions - broadcasting is extremely useful ## 2) Vectorization In deep learning, you deal with very large datasets. Hence, a non-computationally-optimal function can become a huge bottleneck in your algorithm and can result in a model that takes ages to run. To make sure that your code is computationally efficient, you will use vectorization. For example, try to tell the difference between the following implementations of the dot/outer/elementwise product. ``` import time x1 = [9, 2, 5, 0, 0, 7, 5, 0, 0, 0, 9, 2, 5, 0, 0] x2 = [9, 2, 2, 9, 0, 9, 2, 5, 0, 0, 9, 2, 5, 0, 0] ### CLASSIC DOT PRODUCT OF VECTORS IMPLEMENTATION ### tic = time.process_time() dot = 0 for i in range(len(x1)): dot+= x1[i]*x2[i] toc = time.process_time() print ("dot = " + str(dot) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") ### CLASSIC OUTER PRODUCT IMPLEMENTATION ### tic = time.process_time() outer = np.zeros((len(x1),len(x2))) # we create a len(x1)*len(x2) matrix with only zeros for i in range(len(x1)): for j in range(len(x2)): outer[i,j] = x1[i]*x2[j] toc = time.process_time() print ("outer = " + str(outer) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") ### CLASSIC ELEMENTWISE IMPLEMENTATION ### tic = time.process_time() mul = np.zeros(len(x1)) for i in range(len(x1)): mul[i] = x1[i]*x2[i] toc = time.process_time() print ("elementwise multiplication = " + str(mul) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") ### CLASSIC GENERAL DOT PRODUCT IMPLEMENTATION ### W = np.random.rand(3,len(x1)) # Random 3*len(x1) numpy array tic = time.process_time() gdot = np.zeros(W.shape[0]) for i in range(W.shape[0]): for j in range(len(x1)): gdot[i] += W[i,j]*x1[j] toc = time.process_time() print ("gdot = " + str(gdot) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") x1 = [9, 2, 5, 0, 0, 7, 5, 0, 0, 0, 9, 2, 5, 0, 0] x2 = [9, 2, 2, 9, 0, 9, 2, 5, 0, 0, 9, 2, 5, 0, 0] ### VECTORIZED DOT PRODUCT OF VECTORS ### tic = time.process_time() dot = np.dot(x1,x2) toc = time.process_time() print ("dot = " + str(dot) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") ### VECTORIZED OUTER PRODUCT ### tic = time.process_time() outer = np.outer(x1,x2) toc = time.process_time() print ("outer = " + str(outer) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") ### VECTORIZED ELEMENTWISE MULTIPLICATION ### tic = time.process_time() mul = np.multiply(x1,x2) toc = time.process_time() print ("elementwise multiplication = " + str(mul) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") ### VECTORIZED GENERAL DOT PRODUCT ### tic = time.process_time() dot = np.dot(W,x1) toc = time.process_time() print ("gdot = " + str(dot) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") ``` As you may have noticed, the vectorized implementation is much cleaner and more efficient. For bigger vectors/matrices, the differences in running time become even bigger. **Note** that `np.dot()` performs a matrix-matrix or matrix-vector multiplication. This is different from `np.multiply()` and the `*` operator (which is equivalent to `.*` in Matlab/Octave), which performs an element-wise multiplication. ### 2.1 Implement the L1 and L2 loss functions **Exercise**: Implement the numpy vectorized version of the L1 loss. You may find the function abs(x) (absolute value of x) useful. **Reminder**: - The loss is used to evaluate the performance of your model. The bigger your loss is, the more different your predictions ($ \hat{y} $) are from the true values ($y$). In deep learning, you use optimization algorithms like Gradient Descent to train your model and to minimize the cost. - L1 loss is defined as: $$\begin{align*} & L_1(\hat{y}, y) = \sum_{i=0}^m|y^{(i)} - \hat{y}^{(i)}| \end{align*}\tag{6}$$ ``` # GRADED FUNCTION: L1 def L1(yhat, y): """ Arguments: yhat -- vector of size m (predicted labels) y -- vector of size m (true labels) Returns: loss -- the value of the L1 loss function defined above """ ### START CODE HERE ### (≈ 1 line of code) loss = sum(abs(y - yhat)) ### END CODE HERE ### return loss yhat = np.array([.9, 0.2, 0.1, .4, .9]) y = np.array([1, 0, 0, 1, 1]) print("L1 = " + str(L1(yhat,y))) ``` **Expected Output**: <table style="width:20%"> <tr> <td> **L1** </td> <td> 1.1 </td> </tr> </table> **Exercise**: Implement the numpy vectorized version of the L2 loss. There are several way of implementing the L2 loss but you may find the function np.dot() useful. As a reminder, if $x = [x_1, x_2, ..., x_n]$, then `np.dot(x,x)` = $\sum_{j=0}^n x_j^{2}$. - L2 loss is defined as $$\begin{align*} & L_2(\hat{y},y) = \sum_{i=0}^m(y^{(i)} - \hat{y}^{(i)})^2 \end{align*}\tag{7}$$ ``` # GRADED FUNCTION: L2 def L2(yhat, y): """ Arguments: yhat -- vector of size m (predicted labels) y -- vector of size m (true labels) Returns: loss -- the value of the L2 loss function defined above """ ### START CODE HERE ### (≈ 1 line of code) x = y - yhat loss = np.dot(x,x) ### END CODE HERE ### return loss yhat = np.array([.9, 0.2, 0.1, .4, .9]) y = np.array([1, 0, 0, 1, 1]) print("L2 = " + str(L2(yhat,y))) ``` **Expected Output**: <table style="width:20%"> <tr> <td> **L2** </td> <td> 0.43 </td> </tr> </table> Congratulations on completing this assignment. We hope that this little warm-up exercise helps you in the future assignments, which will be more exciting and interesting! <font color='blue'> **What to remember:** - Vectorization is very important in deep learning. It provides computational efficiency and clarity. - You have reviewed the L1 and L2 loss. - You are familiar with many numpy functions such as np.sum, np.dot, np.multiply, np.maximum, etc...
github_jupyter
# Introduction to Promises and PromiseTensors ### Context We introduce here Promises and, more specificly, PromiseTensors. The principle is quite similar to promises or futures that you may have encountered in some programming languages as Javascript. As stated in wikipedia, "they describe an object that acts as a proxy for a result that is initially unknown, usually because the computation of its value is not yet complete." In particular, we will explore PromiseTensors which inherit from the Promise class and are used when the object that is waited for is a torch tensor. Let's write some code! Author: - Jason Paumier - GitHub: [@Jasopaum](https://github.com/https://github.com/Jasopaum) ### Imports and setup First let's make the classic imports and hooking. ``` import torch import syft as sy hook = sy.TorchHook(torch) ``` Also one important note: **the local worker should not be a client worker.** *Non client workers can store objects and we need this ability to use promises.* ``` hook.local_worker.is_client_worker = False ``` ### Creation and basic methods Let's see how to create a PromiseTensor and use its basic methods. ``` # We create a promise for a FloatTensor a = sy.Promise.FloatTensor(shape=torch.Size((2, 2))) # Now, this promise can be kept as follows ta = torch.tensor([[1., 2.], [3., 4.]]) a.keep(ta) # Finally, we can retrieve the value a.value() # We can also keep a promise several times before getting the value a.keep(torch.tensor([[5., 6.], [7., 8.]])) a.keep(torch.tensor([[8., 7.], [6., 5.]])) # The subsequent calls to .value() will give the tensors in the same order as they arrived print(a.value()) print(a.value()) ``` ### Operations with promises We can perform mainstream operations with promises. ``` a = sy.Promise.FloatTensor(shape=torch.Size((2, 2))) b = sy.Promise.FloatTensor(shape=torch.Size((2, 2))) promised_result = a + b # The result of an operation involving promises is a promise itself: print("promised_result: ", promised_result) a.keep(torch.tensor([[1., 1.], [1., 1.]])) a.keep(torch.tensor([[2., 2.], [2., 2.]])) b.keep(torch.tensor([[3., 3.], [3., 3.]])) b.keep(torch.tensor([[4., 4.], [4., 4.]])) print(promised_result.value()) print(promised_result.value()) ``` Operations between promises and basic torch tensors are also possible. ``` a = sy.Promise.FloatTensor(shape=torch.Size((2, 2))) promised_result = 2 * a a.keep(torch.tensor([[1., 2.], [3., 4.]])) print(promised_result.value()) ``` ### Example on a remote worker PromiseTensors can also be used between different workers. ``` bob = sy.VirtualWorker(hook, id="bob") a = sy.Promise.FloatTensor(shape=torch.Size((3, 3))) b = sy.Promise.FloatTensor(shape=torch.Size((3, 3))) x = a.send(bob) # Here, x and y are pointers to PromiseTensors y = b.send(bob) # located on worker Bob res = x - y # res is a pointer to a PromiseTensor located on Bob print(res) print(bob._objects[res.id_at_location]) x.keep(torch.tensor([[1., 1.], [1., 1.]])) y.keep(torch.tensor([[2., 3.], [4., 5.]])) res.value().get() ``` Et voilà! We have seen how to use the powerful tool that are promises with PySyft. ### Star PySyft on GitHub The easiest way to help our community is just by starring the repositories! This helps raise awareness of the cool tools we're building. - [Star PySyft](https://github.com/OpenMined/PySyft) ### Pick our tutorials on GitHub! We made really nice tutorials to get a better understanding of what Federated and Privacy-Preserving Learning should look like and how we are building the bricks for this to happen. - [Checkout the PySyft tutorials](https://github.com/OpenMined/PySyft/tree/master/examples/tutorials) ### Join our Slack! The best way to keep up to date on the latest advancements is to join our community! - [Join slack.openmined.org](http://slack.openmined.org) ### Join a Code Project! The best way to contribute to our community is to become a code contributor! If you want to start "one off" mini-projects, you can go to PySyft GitHub Issues page and search for issues marked `Good First Issue`. - [Good First Issue Tickets](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) ### Donate If you don't have time to contribute to our codebase, but would still like to lend support, you can also become a Backer on our Open Collective. All donations go toward our web hosting and other community expenses such as hackathons and meetups! - [Donate through OpenMined's Open Collective Page](https://opencollective.com/openmined)
github_jupyter
``` import os os.environ['CUDA_VISIBLE_DEVICES'] = '0' os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import numpy as np import json import tensorflow as tf import itertools import collections import re import random import sentencepiece as spm from tqdm import tqdm import xlnet_utils as squad_utils import xlnet from prepro_utils import preprocess_text, encode_ids sp_model = spm.SentencePieceProcessor() sp_model.Load('sp10m.cased.v9.model') import pickle with open('/home/husein/xlnet/xlnet-squad-test.pkl', 'rb') as fopen: test_features, test_examples = pickle.load(fopen) max_seq_length = 512 doc_stride = 128 max_query_length = 64 epoch = 5 batch_size = 6 warmup_proportion = 0.1 n_best_size = 20 num_train_steps = int(len(test_features) / batch_size * epoch) num_warmup_steps = int(num_train_steps * warmup_proportion) learning_rate = 2e-5 kwargs = dict( is_training=False, use_tpu=False, use_bfloat16=False, dropout=0.1, dropatt=0.1, init='normal', init_range=0.1, init_std=0.05, clamp_len=-1) xlnet_parameters = xlnet.RunConfig(**kwargs) xlnet_config = xlnet.XLNetConfig( json_path = 'alxlnet-base-2020-04-10/config.json' ) training_parameters = dict( decay_method = 'poly', train_steps = num_train_steps, learning_rate = learning_rate, warmup_steps = num_warmup_steps, min_lr_ratio = 0.0, weight_decay = 0.00, adam_epsilon = 1e-8, num_core_per_host = 1, lr_layer_decay_rate = 1, use_tpu=False, use_bfloat16=False, dropout=0.0, dropatt=0.0, init='normal', init_range=0.1, init_std=0.05, clip = 1.0, clamp_len=-1,) class Parameter: def __init__(self, decay_method, warmup_steps, weight_decay, adam_epsilon, num_core_per_host, lr_layer_decay_rate, use_tpu, learning_rate, train_steps, min_lr_ratio, clip, **kwargs): self.decay_method = decay_method self.warmup_steps = warmup_steps self.weight_decay = weight_decay self.adam_epsilon = adam_epsilon self.num_core_per_host = num_core_per_host self.lr_layer_decay_rate = lr_layer_decay_rate self.use_tpu = use_tpu self.learning_rate = learning_rate self.train_steps = train_steps self.min_lr_ratio = min_lr_ratio self.clip = clip training_parameters = Parameter(**training_parameters) from tensorflow.contrib import layers as contrib_layers class Model: def __init__(self, is_training = True): self.X = tf.placeholder(tf.int32, [None, None]) self.segment_ids = tf.placeholder(tf.int32, [None, None]) self.input_masks = tf.placeholder(tf.float32, [None, None]) self.start_positions = tf.placeholder(tf.int32, [None]) self.end_positions = tf.placeholder(tf.int32, [None]) self.p_mask = tf.placeholder(tf.float32, [None, None]) self.is_impossible = tf.placeholder(tf.int32, [None]) self.cls_index = tf.placeholder(tf.int32, [None]) xlnet_model = xlnet.XLNetModel( xlnet_config=xlnet_config, run_config=xlnet_parameters, input_ids=tf.transpose(self.X, [1, 0]), seg_ids=tf.transpose(self.segment_ids, [1, 0]), input_mask=tf.transpose(self.input_masks, [1, 0])) output = xlnet_model.get_sequence_output() self.output = output self.model = xlnet_model is_training = False tf.reset_default_graph() model = Model(is_training = is_training) start_n_top = 5 end_n_top = 5 seq_len = tf.shape(model.X)[1] initializer = model.model.get_initializer() return_dict = {} p_mask = model.p_mask output = model.output cls_index = model.cls_index with tf.variable_scope('start_logits'): start_logits = tf.layers.dense( output, 1, kernel_initializer = initializer ) start_logits = tf.transpose(tf.squeeze(start_logits, -1), [1, 0]) start_logits_masked = start_logits * (1 - p_mask) - 1e30 * p_mask start_log_probs = tf.nn.log_softmax(start_logits_masked, -1) with tf.variable_scope('end_logits'): if is_training: # during training, compute the end logits based on the # ground truth of the start position start_positions = tf.reshape(model.start_positions, [-1]) start_index = tf.one_hot( start_positions, depth = seq_len, axis = -1, dtype = tf.float32 ) start_features = tf.einsum('lbh,bl->bh', output, start_index) start_features = tf.tile(start_features[None], [seq_len, 1, 1]) end_logits = tf.layers.dense( tf.concat([output, start_features], axis = -1), xlnet_config.d_model, kernel_initializer = initializer, activation = tf.tanh, name = 'dense_0', ) end_logits = tf.contrib.layers.layer_norm( end_logits, begin_norm_axis = -1 ) end_logits = tf.layers.dense( end_logits, 1, kernel_initializer = initializer, name = 'dense_1', ) end_logits = tf.transpose(tf.squeeze(end_logits, -1), [1, 0]) end_logits_masked = end_logits * (1 - p_mask) - 1e30 * p_mask end_log_probs = tf.nn.log_softmax(end_logits_masked, -1) else: # during inference, compute the end logits based on beam search start_top_log_probs, start_top_index = tf.nn.top_k( start_log_probs, k = start_n_top ) start_index = tf.one_hot( start_top_index, depth = seq_len, axis = -1, dtype = tf.float32 ) start_features = tf.einsum('lbh,bkl->bkh', output, start_index) end_input = tf.tile( output[:, :, None], [1, 1, start_n_top, 1] ) start_features = tf.tile(start_features[None], [seq_len, 1, 1, 1]) end_input = tf.concat([end_input, start_features], axis = -1) end_logits = tf.layers.dense( end_input, xlnet_config.d_model, kernel_initializer = initializer, activation = tf.tanh, name = 'dense_0', ) end_logits = tf.contrib.layers.layer_norm( end_logits, begin_norm_axis = -1 ) end_logits = tf.layers.dense( end_logits, 1, kernel_initializer = initializer, name = 'dense_1', ) end_logits = tf.reshape( end_logits, [seq_len, -1, start_n_top] ) end_logits = tf.transpose(end_logits, [1, 2, 0]) end_logits_masked = ( end_logits * (1 - p_mask[:, None]) - 1e30 * p_mask[:, None] ) end_log_probs = tf.nn.log_softmax(end_logits_masked, -1) end_top_log_probs, end_top_index = tf.nn.top_k( end_log_probs, k = end_n_top ) end_top_log_probs = tf.reshape( end_top_log_probs, [-1, start_n_top * end_n_top] ) end_top_index = tf.reshape( end_top_index, [-1, start_n_top * end_n_top] ) if is_training: return_dict['start_log_probs'] = start_log_probs return_dict['end_log_probs'] = end_log_probs else: return_dict['start_top_log_probs'] = start_top_log_probs return_dict['start_top_index'] = start_top_index return_dict['end_top_log_probs'] = end_top_log_probs return_dict['end_top_index'] = end_top_index # an additional layer to predict answerability with tf.variable_scope('answer_class'): # get the representation of CLS cls_index = tf.one_hot( cls_index, seq_len, axis = -1, dtype = tf.float32 ) cls_feature = tf.einsum('lbh,bl->bh', output, cls_index) # get the representation of START start_p = tf.nn.softmax( start_logits_masked, axis = -1, name = 'softmax_start' ) start_feature = tf.einsum('lbh,bl->bh', output, start_p) # note(zhiliny): no dependency on end_feature so that we can obtain # one single `cls_logits` for each sample ans_feature = tf.concat([start_feature, cls_feature], -1) ans_feature = tf.layers.dense( ans_feature, xlnet_config.d_model, activation = tf.tanh, kernel_initializer = initializer, name = 'dense_0', ) ans_feature = tf.layers.dropout( ans_feature, 0.1, training = is_training ) cls_logits = tf.layers.dense( ans_feature, 1, kernel_initializer = initializer, name = 'dense_1', use_bias = False, ) cls_logits = tf.squeeze(cls_logits, -1) return_dict['cls_logits'] = cls_logits seq_length = tf.shape(model.X)[1] cls_logits = return_dict['cls_logits'] is_impossible = tf.reshape(model.is_impossible, [-1]) sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) saver = tf.train.Saver(var_list = tf.trainable_variables()) saver.restore(sess, 'alxlnet-base-squad/model.ckpt') all_results = [] pbar = tqdm( range(0, len(test_features), batch_size), desc = 'test minibatch loop' ) for i in pbar: batch = test_features[i: i + batch_size] batch_ids = [b.input_ids for b in batch] batch_masks = [b.input_mask for b in batch] batch_segment = [b.segment_ids for b in batch] batch_start = [b.start_position for b in batch] batch_end = [b.end_position for b in batch] is_impossible = [b.is_impossible for b in batch] p_mask = [b.p_mask for b in batch] cls_index = [b.cls_index for b in batch] o = sess.run( [start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits], feed_dict = { model.X: batch_ids, model.segment_ids: batch_segment, model.input_masks: batch_masks, model.p_mask: p_mask, model.cls_index: cls_index }, ) for no, b in enumerate(batch): start_top_log_probs_ = ( [float(x) for x in o[0][no].flat]) start_top_index_ = [int(x) for x in o[1][no].flat] end_top_log_probs_ = ( [float(x) for x in o[2][no].flat]) end_top_index_ = [int(x) for x in o[3][no].flat] cls_logits_ = float(o[4][no].flat[0]) all_results.append(squad_utils.RawResult( unique_id=b.unique_id, start_top_log_probs=start_top_log_probs_, start_top_index=start_top_index_, end_top_log_probs=end_top_log_probs_, end_top_index=end_top_index_, cls_logits=cls_logits_)) with open('/home/husein/pure-text/ms-dev-2.0.json') as predict_file: orig_data = json.load(predict_file)["data"] output_prediction_file = 'predict.json' output_nbest_file = 'nbest_predictions.json' output_null_log_odds_file = 'null_odds.json' max_answer_length = 64 squad_utils.write_predictions(test_examples, test_features, all_results, n_best_size, max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, orig_data) ```
github_jupyter
<div style="float:right; color:red; font-weight:bold;">Rename this file before you work on it!</div> # Exercise on Joins :: Part 1 Use the Adventure Works dataset to create the following reports. The dataset is availablt for download in it's original format at https://msdn.microsoft.com/en-us/library/hh403424.aspx (follow instructions to download). ## Task 1. write the Python Pandas expression to produce a table as described in the problem statements. 2. The SQL expression may give you a hint. It also allows you to see both systems side-by-side. 3. If you don't know SQL just ignore the SQL code. ``` import pandas as pd import numpy as np Empoyees = pd.read_excel('/home/data/AdventureWorks/Employees.xls') Territory = pd.read_excel('/home/data/AdventureWorks/SalesTerritory.xls') Customers = pd.read_excel('/home/data/AdventureWorks/Customers.xls') Orders = pd.read_excel('/home/data/AdventureWorks/ItemsOrdered.xls') ``` ### 1a. Show me all the employees, and if any are salespeople then show me the details about their sales territory ### 1b. Change the columns above From Employees table use: EmployeeID, FirstName, LastName, TerritoryID From the SalesTerritory table use: show all the columns except for the TerritoryID ### 1c. For the list above, limit the results to just salespeople ### 2a. Give me a list of customers ### 2b. For those customers, also tell me which sales territory they fall in ### 3a. Give me a list of sales territories ### 3b. For those sales territories, also show what customers fall under them ### 3c. Make sure, if you didn't already, that any sales territories with 0 customers are also shown in the list ### 4a. Give me a list of the customers we have in North Carolina, and note how many there are. ### 4b. For our North Carolina customers, show me the items they have ordered ### 4c. Change it so we're only looking at customers who are in Arizona ### 4d. For each of the items ordered, show the total price of the order (sometimes they ordered more than 1 item) ### 4e. For the Arizona customers above, show the maximum total price per customer. Choose the columns wisely ### 4f. For the results above, only show the ones where the MaxTotalPrice is more than $5.00 ### BONUS 1: Take the query from #3c and add a column called "CityRegion" that combines dbo.Customers.City and ### BONUS 2: Fix the problem above where CityRegion is NULL sometimes; change it to, for example, "n/a - North America" ### OVERACHIEVER 1: Why do you think a database is designed so the data is split up into different tables? Why not just put it all in one table? ### OVERACHIEVER 2a: Take the query from 1b and sort it by the sales region so that all the salespeople are the top of the list ### OVERACHIEVER 2b: From the query above, if the sales territory is outside Europe, do not display it (but we still want to see all the employees in the company!)
github_jupyter
<a href="https://colab.research.google.com/github/PacktPublishing/Hands-On-Computer-Vision-with-PyTorch/blob/master/Chapter10/corwd_counting.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` %%time import os if not os.path.exists('CSRNet-pytorch/'): !pip install -U scipy torch_snippets torch_summary !git clone https://github.com/sizhky/CSRNet-pytorch.git from google.colab import files files.upload() # upload kaggle.json !mkdir -p ~/.kaggle !mv kaggle.json ~/.kaggle/ !ls ~/.kaggle !chmod 600 /root/.kaggle/kaggle.json print('downloading data...') !kaggle datasets download -d tthien/shanghaitech-with-people-density-map/ print('unzipping data...') !unzip -qq shanghaitech-with-people-density-map.zip %cd CSRNet-pytorch !ln -s ../shanghaitech_with_people_density_map from torch_snippets import * import h5py from scipy import io part_A = Glob('shanghaitech_with_people_density_map/ShanghaiTech/part_A/train_data/'); image_folder = 'shanghaitech_with_people_density_map/ShanghaiTech/part_A/train_data/images/' heatmap_folder = 'shanghaitech_with_people_density_map/ShanghaiTech/part_A/train_data/ground-truth-h5/' gt_folder = 'shanghaitech_with_people_density_map/ShanghaiTech/part_A/train_data/ground-truth/' device = 'cuda' if torch.cuda.is_available() else 'cpu' tfm = T.Compose([ T.ToTensor() ]) class Crowds(Dataset): def __init__(self, stems): self.stems = stems def __len__(self): return len(self.stems) def __getitem__(self, ix): _stem = self.stems[ix] image_path = f'{image_folder}/{_stem}.jpg' heatmap_path = f'{heatmap_folder}/{_stem}.h5' gt_path = f'{gt_folder}/GT_{_stem}.mat' pts = io.loadmat(gt_path) pts = len(pts['image_info'][0,0][0,0][0]) image = read(image_path, 1) with h5py.File(heatmap_path, 'r') as hf: gt = hf['density'][:] gt = resize(gt, 1/8)*64 return image.copy(), gt.copy(), pts def collate_fn(self, batch): ims, gts, pts = list(zip(*batch)) ims = torch.cat([tfm(im)[None] for im in ims]).to(device) gts = torch.cat([tfm(gt)[None] for gt in gts]).to(device) return ims, gts, torch.tensor(pts).to(device) def choose(self): return self[randint(len(self))] from sklearn.model_selection import train_test_split trn_stems, val_stems = train_test_split(stems(Glob(image_folder)), random_state=10) trn_ds = Crowds(trn_stems) val_ds = Crowds(val_stems) trn_dl = DataLoader(trn_ds, batch_size=1, shuffle=True, collate_fn=trn_ds.collate_fn) val_dl = DataLoader(val_ds, batch_size=1, shuffle=True, collate_fn=val_ds.collate_fn) import torch.nn as nn import torch from torchvision import models from utils import save_net,load_net def make_layers(cfg, in_channels = 3,batch_norm=False,dilation = False): if dilation: d_rate = 2 else: d_rate = 1 layers = [] for v in cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=d_rate, dilation=d_rate) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) class CSRNet(nn.Module): def __init__(self, load_weights=False): super(CSRNet, self).__init__() self.seen = 0 self.frontend_feat = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512] self.backend_feat = [512, 512, 512, 256, 128, 64] self.frontend = make_layers(self.frontend_feat) self.backend = make_layers(self.backend_feat,in_channels = 512,dilation = True) self.output_layer = nn.Conv2d(64, 1, kernel_size=1) if not load_weights: mod = models.vgg16(pretrained = True) self._initialize_weights() items = list(self.frontend.state_dict().items()) _items = list(mod.state_dict().items()) for i in range(len(self.frontend.state_dict().items())): items[i][1].data[:] = _items[i][1].data[:] def forward(self,x): x = self.frontend(x) x = self.backend(x) x = self.output_layer(x) return x def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight, std=0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def train_batch(model, data, optimizer, criterion): model.train() optimizer.zero_grad() ims, gts, pts = data _gts = model(ims) loss = criterion(_gts, gts) loss.backward() optimizer.step() pts_loss = nn.L1Loss()(_gts.sum(), gts.sum()) return loss.item(), pts_loss.item() @torch.no_grad() def validate_batch(model, data, criterion): model.eval() ims, gts, pts = data _gts = model(ims) loss = criterion(_gts, gts) pts_loss = nn.L1Loss()(_gts.sum(), gts.sum()) return loss.item(), pts_loss.item() model = CSRNet().to(device) criterion = nn.MSELoss() optimizer = optim.Adam(model.parameters(), lr=1e-6) n_epochs = 20 log = Report(n_epochs) for ex in range(n_epochs): N = len(trn_dl) for bx, data in enumerate(trn_dl): loss, pts_loss = train_batch(model, data, optimizer, criterion) log.record(ex+(bx+1)/N, trn_loss=loss, trn_pts_loss=pts_loss, end='\r') N = len(val_dl) for bx, data in enumerate(val_dl): loss, pts_loss = validate_batch(model, data, criterion) log.record(ex+(bx+1)/N, val_loss=loss, val_pts_loss=pts_loss, end='\r') log.report_avgs(ex+1) from matplotlib import cm as c from torchvision import datasets, transforms from PIL import Image transform=transforms.Compose([ transforms.ToTensor(),transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) test_folder = 'shanghaitech_with_people_density_map/ShanghaiTech/part_A/test_data/' imgs = Glob(f'{test_folder}/images') f = choose(imgs) print(f) img = transform(Image.open(f).convert('RGB')).to(device) output = model(img[None]) print("Predicted Count : ",int(output.detach().cpu().sum().numpy())) temp = np.asarray(output.detach().cpu().reshape(output.detach().cpu().shape[2],output.detach().cpu().shape[3])) plt.imshow(temp,cmap = c.jet) plt.show() ```
github_jupyter
``` import os import pickle # Gmail API utils from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request # for encoding/decoding messages in base64 from base64 import urlsafe_b64decode, urlsafe_b64encode # for dealing with attachement MIME types from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from email.mime.image import MIMEImage from email.mime.audio import MIMEAudio from email.mime.base import MIMEBase from email.mime.multipart import MIMEMultipart from mimetypes import guess_type as guess_mime_type # Request all access (permission to read/send/receive emails, manage the inbox, and more) SCOPES = ['https://mail.google.com/'] our_email = 'your_gmail@gmail.com' def gmail_authenticate(): creds = None # the file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first time if os.path.exists("token.pickle"): with open("token.pickle", "rb") as token: creds = pickle.load(token) # if there are no (valid) credentials availablle, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES) creds = flow.run_local_server(port=0) # save the credentials for the next run with open("token.pickle", "wb") as token: pickle.dump(creds, token) return build('gmail', 'v1', credentials=creds) # get the Gmail API service service = gmail_authenticate() # Adds the attachment with the given filename to the given message def add_attachment(message, filename): content_type, encoding = guess_mime_type(filename) if content_type is None or encoding is not None: content_type = 'application/octet-stream' main_type, sub_type = content_type.split('/', 1) if main_type == 'text': fp = open(filename, 'rb') msg = MIMEText(fp.read().decode(), _subtype=sub_type) fp.close() elif main_type == 'image': fp = open(filename, 'rb') msg = MIMEImage(fp.read(), _subtype=sub_type) fp.close() elif main_type == 'audio': fp = open(filename, 'rb') msg = MIMEAudio(fp.read(), _subtype=sub_type) fp.close() else: fp = open(filename, 'rb') msg = MIMEBase(main_type, sub_type) msg.set_payload(fp.read()) fp.close() filename = os.path.basename(filename) msg.add_header('Content-Disposition', 'attachment', filename=filename) message.attach(msg) def build_message(destination, obj, body, attachments=[]): if not attachments: # no attachments given message = MIMEText(body) message['to'] = destination message['from'] = our_email message['subject'] = obj else: message = MIMEMultipart() message['to'] = destination message['from'] = our_email message['subject'] = obj message.attach(MIMEText(body)) for filename in attachments: add_attachment(message, filename) return {'raw': urlsafe_b64encode(message.as_bytes()).decode()} def send_message(service, destination, obj, body, attachments=[]): return service.users().messages().send( userId="me", body=build_message(destination, obj, body, attachments) ).execute() # test send email send_message(service, "destination@domain.com", "This is a subject", "This is the body of the email", ["test.txt", "credentials.json"]) def search_messages(service, query): result = service.users().messages().list(userId='me',q=query).execute() messages = [ ] if 'messages' in result: messages.extend(result['messages']) while 'nextPageToken' in result: page_token = result['nextPageToken'] result = service.users().messages().list(userId='me',q=query, pageToken=page_token).execute() if 'messages' in result: messages.extend(result['messages']) return messages # utility functions def get_size_format(b, factor=1024, suffix="B"): """ Scale bytes to its proper byte format e.g: 1253656 => '1.20MB' 1253656678 => '1.17GB' """ for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]: if b < factor: return f"{b:.2f}{unit}{suffix}" b /= factor return f"{b:.2f}Y{suffix}" def clean(text): # clean text for creating a folder return "".join(c if c.isalnum() else "_" for c in text) def parse_parts(service, parts, folder_name): """ Utility function that parses the content of an email partition """ if parts: for part in parts: filename = part.get("filename") mimeType = part.get("mimeType") body = part.get("body") data = body.get("data") file_size = body.get("size") part_headers = part.get("headers") if part.get("parts"): # recursively call this function when we see that a part # has parts inside parse_parts(service, part.get("parts"), folder_name) if mimeType == "text/plain": # if the email part is text plain if data: text = urlsafe_b64decode(data).decode() print(text) elif mimeType == "text/html": # if the email part is an HTML content # save the HTML file and optionally open it in the browser if not filename: filename = "index.html" filepath = os.path.join(folder_name, filename) print("Saving HTML to", filepath) with open(filepath, "wb") as f: f.write(urlsafe_b64decode(data)) else: # attachment other than a plain text or HTML for part_header in part_headers: part_header_name = part_header.get("name") part_header_value = part_header.get("value") if part_header_name == "Content-Disposition": if "attachment" in part_header_value: # we get the attachment ID # and make another request to get the attachment itself print("Saving the file:", filename, "size:", get_size_format(file_size)) attachment_id = body.get("attachmentId") attachment = service.users().messages() \ .attachments().get(id=attachment_id, userId='me', messageId=msg['id']).execute() data = attachment.get("data") filepath = os.path.join(folder_name, filename) if data: with open(filepath, "wb") as f: f.write(urlsafe_b64decode(data)) def read_message(service, message_id): """ This function takes Gmail API `service` and the given `message_id` and does the following: - Downloads the content of the email - Prints email basic information (To, From, Subject & Date) and plain/text parts - Creates a folder for each email based on the subject - Downloads text/html content (if available) and saves it under the folder created as index.html - Downloads any file that is attached to the email and saves it in the folder created """ msg = service.users().messages().get(userId='me', id=message_id['id'], format='full').execute() # parts can be the message body, or attachments payload = msg['payload'] headers = payload.get("headers") parts = payload.get("parts") folder_name = "email" if headers: # this section prints email basic info & creates a folder for the email for header in headers: name = header.get("name") value = header.get("value") if name == 'From': # we print the From address print("From:", value) if name == "To": # we print the To address print("To:", value) if name == "Subject": # make a directory with the name of the subject folder_name = clean(value) # we will also handle emails with the same subject name folder_counter = 0 while os.path.isdir(folder_name): folder_counter += 1 # we have the same folder name, add a number next to it if folder_name[-1].isdigit() and folder_name[-2] == "_": folder_name = f"{folder_name[:-2]}_{folder_counter}" elif folder_name[-2:].isdigit() and folder_name[-3] == "_": folder_name = f"{folder_name[:-3]}_{folder_counter}" else: folder_name = f"{folder_name}_{folder_counter}" os.mkdir(folder_name) print("Subject:", value) if name == "Date": # we print the date when the message was sent print("Date:", value) parse_parts(service, parts, folder_name) print("="*50) # get emails that match the query you specify results = search_messages(service, "Python Code") # for each email matched, read it (output plain/text to console & save HTML and attachments) for msg in results: read_message(service, msg) def mark_as_read(service, query): messages_to_mark = search_messages(service, query) return service.users().messages().batchModify( userId='me', body={ 'ids': [ msg['id'] for msg in messages_to_mark ], 'removeLabelIds': ['UNREAD'] } ).execute() def mark_as_unread(service, query): messages_to_mark = search_messages(service, query) return service.users().messages().batchModify( userId='me', body={ 'ids': [ msg['id'] for msg in messages_to_mark ], 'addLabelIds': ['UNREAD'] } ).execute() mark_as_read(service, "Google") # search query by sender/receiver mark_as_unread(service, "email@domain.com") def delete_messages(service, query): messages_to_delete = search_messages(service, query) # it's possible to delete a single message with the delete API, like this: # service.users().messages().delete(userId='me', id=msg['id']) # but it's also possible to delete all the selected messages with one query, batchDelete return service.users().messages().batchDelete( userId='me', body={ 'ids': [ msg['id'] for msg in messages_to_delete] } ).execute() delete_messages(service, "Google Alerts") ```
github_jupyter
1) you have to create function which takes vector x, a, and N as length of output signal and returns y[n]. 2) you have to create function which generates random x vector, feeds it to previous function and saves y[n] output signal as .wav file 3) Using last function you have to create wav examples ``` ## Math import numpy as np ## nd.array deep copy import copy ## Audio Playing from IPython.display import Audio ## Wave file save import scipy.io.wavfile as wavfile ## Visualization %matplotlib inline import matplotlib.pyplot as plt ``` $y[n] = x[n] + ay[n-M]$ $0<a<1$ $x[n] = y[n] = 0 if n < 0$ ``` def karplus_strong_naive(x, a, y_len): """ Synthesizes a new waveform from an existing wavetable, modifies last sample by averaging. Parameters: x (list or nd.array) - initially generated signal a (float) - alpha M (int) - previous value of y y_len (int) - length of output signal Return: np.array - obtained y signal """ x = copy.deepcopy(x) x_len = len(x) y = [] for i in range(y_len): if i < x_len: y.insert(i, x[i]) else: y.insert(i, a*y[i-x_len]) return np.array(y) # def karplus_strong(x, alpha, N, num_repitition): # """ # Synthesizes a new waveform from an existing wavetable, modifies last sample by averaging. # Parameters: # x (list or nd.array) - initially generated signal # alpha (float) - decay (or envelop) # N (int) - previous value of y # num_repitition (int) - number of repitition of x # Return: # np.array - transformed y signal # """ # x = copy.deepcopy(x) # len_x = len(x) # y = [] # for i in range(num_repitition): # y = np.append(y, [np.power(alpha, i) * elem for elem in x]) # return y def gen_signal_save_output(range_from = 1, range_to = 10, x_len = 1000, alpha = 0.5, M = 3, y_len = 48000, sampling_rate = 16000, path_to_save = '_outputs/'): """ Parameters: range_from (int) - start range for random interval range_to (int) - end range for random interval x_len(int) - length of initial signal to generate alpha (float) - decay (or envelop) M (int) - previous value of y len_y (int) - length of output signal sampling_rate (int) - sampling rate which will set to write function as rate path_to_save (string) - relative directory wher wav file will be saved Returns: tuple (pair of x and y, for comparison) """ x = np.random.randint(range_from, range_to, x_len).astype(np.float) y = karplus_strong_naive(x, alpha, y_len) # y = y.astype('int16') wavfile.write(str(path_to_save) + 'y' + '.wav', 16000, y) return x, y x, y = gen_signal_save_output(x_len=5, y_len=10, M = 2, alpha=0.5) x y x, y = gen_signal_save_output(x_len=1000, y_len=48000, M=10, alpha=0.99) Audio(y, rate=16000) ```
github_jupyter
# Sentiment Analysis: Vectorización Referencias: - [scikit-learn: Text feature extraction](http://scikit-learn.org/stable/modules/feature_extraction.html#text-feature-extraction) - [CountVectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html) - [TfidfVectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html#sklearn.feature_extraction.text.TfidfVectorizer) ``` %load_ext autoreload %autoreload 2 from util import load_datasets train, _, _ = load_datasets() X_train, y_train = train ``` ## Bag-of-words Entrenamos un vectorizador de tipo bag-of-words: ``` from sklearn.feature_extraction.text import CountVectorizer vect = CountVectorizer() vect.fit(X_train) ``` Vectorizamos un elemento. El resultado es una matriz dispersa ("sparse"): ``` x = vect.transform([X_train[0]]) x ``` Veamos cuáles son los elementos distintos de cero: ``` #x.toarray() [(i, x[0, i]) for i in range(32422) if x[0, i]] features = vect.get_feature_names() features[592] ``` ## Min counts Quizás mejor exigir que las palabras a considerar tengan una frecuencia mínima: ``` vect = CountVectorizer(min_df=5) vect.fit(X_train) x = vect.transform(X_train[:1]) x ``` Podemos preguntarle al vectorizador qué features encontró: ``` features = vect.get_feature_names() features[2000:2010] [(features[i], x[0, i]) for i in range(x.shape[1]) if x[0, i]] ``` ## Max Features También podemos limitar la cantidad de features a los N más frecuentes: ``` vect = CountVectorizer(max_features=100) vect.fit(X_train) x = vect.transform(X_train[:1]) x ``` Podemos ver que los features elegidos no parecen ser muy informativos en cuanto a polaridad: ``` features = vect.get_feature_names() features[10:20] [(features[i], x[0, i]) for i in range(x.shape[1]) if x[0, i]] ``` ## Vocabulary También podemos limitar los features a un vocabulario predefinido. Por ejemplo, si contamos con lexicones de palabras positivas y negativas: ``` positive_words = [ 'good', 'best', 'excellent', 'awesome', ] negative_words = [ 'bad', 'worst', 'horrendous', 'awful', ] vocabulary = positive_words + negative_words vect = CountVectorizer(vocabulary=vocabulary) vect.fit(X_train) x = vect.transform(X_train[:1]) x features = vect.get_feature_names() [(features[i], x[0, i]) for i in range(x.shape[1]) if x[0, i]] ``` ## Otros parámetros - **binary=True**: binarizar conteos (0 o 1) - **ngram_range=(p, q)**: contar n-gramas de palabras con n en (p, q) - **stop_words**: filtrar algunas palabras - **analyzer='char'**: caracteres en lugar de palabras y varios más... ## TF-IDF Algunas palabras son muy frecuentes en todos los documentos (artículos, preposiciones), y por ende poco informativas. TF-IDF divide el conteo por un número que mide esto. ``` from sklearn.feature_extraction.text import TfidfVectorizer vect = TfidfVectorizer(binary=True) vect.fit(X_train) features = vect.get_feature_names() x = vect.transform(X_train[:1]) x ``` Podemos ver, por ejemplo, que la palabra 'the' tiene mucho menos peso que la palabra 'annoying', a pesar de ser mucho más frecuente: ``` x[0, features.index('the')], x[0, features.index('annoying')] tokens = X_train[0].decode('utf-8').split() tokens.count('the'), tokens.count('annoying') ```
github_jupyter
``` import time import requests import pandas as pd import matplotlib.pyplot as plt import matplotlib.cm as cm import numpy as np """ Set configuration here """ CONTRACT_ADDRESS = "0x9A534628B4062E123cE7Ee2222ec20B86e16Ca8F" COLLECTION = "MekaVerse" METHOD = "raritytools" TOKEN_COL = "TOKEN_ID" # Use TOKEN_NAME if you prefer to infer token id from token name NUMBERS_TO_CHECK = 50 # Number of tokens to search for opportunities OPENSEA_API_KEY = "YOUR_API_KEY" # Define variables used throughout RARITY_DATABASE = f"../metadata/rarity_data/{COLLECTION}_{METHOD}.csv" ETHER_UNITS = 1e18 """ Plot params """ plt.rcParams.update({"figure.facecolor": "white", "savefig.facecolor": "white"}) # Load rarity database and format RARITY_DB = pd.read_csv(RARITY_DATABASE) RARITY_DB = RARITY_DB[RARITY_DB["TOKEN_ID"].duplicated() == False] if TOKEN_COL == "TOKEN_NAME": RARITY_DB["TOKEN_ID"] = RARITY_DB["TOKEN_NAME"].str.split("#").str[1].astype(int) """ Get open bids from OpenSea and plot. """ def getOpenseaOrders(token_id, contract_address): url = "https://api.opensea.io/wyvern/v1/orders" querystring = { "bundled": "false", "include_bundled": "false", "is_english": "false", "include_invalid": "false", "limit": "50", "offset": "0", "order_by": "created_date", "order_direction": "desc", "asset_contract_address": contract_address, "token_ids": [token_id], } headers = {"Accept": "application/json", "X-API-KEY": OPENSEA_API_KEY} response = requests.request("GET", url, headers=headers, params=querystring) response_json = response.json() return response_json def plot_all_bids(bid_db): series = [] max_listings = bid_db["token_ids"].value_counts().max() for i in range(1, max_listings + 1): n_bids = bid_db.groupby("token_ids").filter(lambda x: len(x) == i) series.append(n_bids) colors = iter(cm.rainbow(np.linspace(0, 1, len(series)))) for i in range(0, len(series)): plt.scatter( series[i]["ranks"], series[i]["bid"], color=next(colors), label=i + 1 ) plt.xlabel("rarity rank") plt.ylabel("price (ETHER)") plt.legend(loc="best") plt.show() def get_all_bids(rarity_db): token_ids = [] ranks = [] bids = [] numbersToCheck = [] for x in rarity_db["TOKEN_ID"]: numbersToCheck.append(x) if len(numbersToCheck) == 15: # send 15 NFTs at a time to API orders = getOpenseaOrders(numbersToCheck, CONTRACT_ADDRESS) numbersToCheck = [] for order in orders["orders"]: if order["side"] == 0: tokenId = int(order["asset"]["token_id"]) token_ids.append(tokenId) ranks.append( float(rarity_db[rarity_db["TOKEN_ID"] == tokenId]["Rank"]) ) bids.append(float(order["base_price"]) / ETHER_UNITS) bid_db = pd.DataFrame(columns=["token_ids", "ranks", "bid"]) bid_db["token_ids"] = token_ids bid_db["ranks"] = ranks bid_db["bid"] = bids return bid_db bid_db = get_all_bids(RARITY_DB.head(NUMBERS_TO_CHECK)) bid_db = bid_db.sort_values(by=["ranks"]) print(bid_db.set_index("token_ids").head(50)) plot_all_bids(bid_db) """ Get open offers from OpenSea and plot. """ def getOpenseaOrders(token_id, contract_address): # gets orders, both bids and asks # divide token_list into limit sized chunks and get output url = "https://api.opensea.io/wyvern/v1/orders" querystring = { "bundled": "false", "include_bundled": "false", "is_english": "false", "include_invalid": "false", "limit": "50", "offset": "0", "order_by": "created_date", "order_direction": "desc", "asset_contract_address": contract_address, "token_ids": [token_id], } headers = {"Accept": "application/json", "X-API-KEY": OPENSEA_API_KEY} response = requests.request("GET", url, headers=headers, params=querystring) responseJson = response.json() return responseJson def display_orders(rarity_db): print("RANK TOKEN_ID PRICE URL") numbersToCheck = [] for x in rarity_db["TOKEN_ID"]: numbersToCheck.append(x) if len(numbersToCheck) == 15: orders = getOpenseaOrders(numbersToCheck, CONTRACT_ADDRESS) numbersToCheck = [] time.sleep(2) for order in orders["orders"]: if order["side"] == 1: tokenId = int(order["asset"]["token_id"]) price = float(order["current_price"]) / 1e18 if price <= 20: current_order = dict() current_order["RANK"] = str( int(rarity_db[rarity_db["TOKEN_ID"] == tokenId]["Rank"]) ) current_order["TOKEN_ID"] = str(tokenId) current_order["PRICE"] = str(price) current_order[ "URL" ] = f"https://opensea.io/assets/{CONTRACT_ADDRESS}/{tokenId}" str_to_print = "" for x in ["RANK", "TOKEN_ID", "PRICE"]: str_to_print += f"{current_order[x]}" str_to_print += " " * (len(x) + 1 - len(current_order[x])) str_to_print += current_order["URL"] print(str_to_print) display_orders(RARITY_DB.head(NUMBERS_TO_CHECK)) import numpy as np A = -0.9 K = 1 B = 5 v = 1 Q = 1.1 C = 1 RARITY_DB["VALUE"] = A + ( (K - A) / np.power((C + Q * np.exp(-B * (1 / RARITY_DB["Rank"]))), 1 / v) ) RARITY_DB["VALUE"] = np.where(RARITY_DB["Rank"] > 96 * 2, 0, RARITY_DB["VALUE"]) RARITY_DB[["Rank", "VALUE"]].sort_values("Rank").plot( x="Rank", y="VALUE", figsize=(14, 7), logx=True, grid=True ) plt.show() RARITY_DB = RARITY_DB.sort_values("TOKEN_ID") RARITY_DB.plot(x="TOKEN_ID", y="VALUE", grid=True, figsize=(14, 7)) RARITY_DB = RARITY_DB.sort_values("TOKEN_ID") RARITY_DB["EXPANDING_VALUE"] = RARITY_DB["VALUE"].expanding().sum() RARITY_DB.plot(x="TOKEN_ID", y="EXPANDING_VALUE", grid=True, figsize=(14, 7)) pd.set_option("display.max_rows", 100) RARITY_DB.sort_values("Rank").head(96) ```
github_jupyter
# Notebook Instructions <i>You can run the notebook document sequentially (one cell a time) by pressing <b> shift + enter</b>. While a cell is running, a [*] will display on the left. When it has been run, a number will display indicating the order in which it was run in the notebook [8].</i> <i>Enter edit mode by pressing <b>`Enter`</b> or using the mouse to click on a cell's editor area. Edit mode is indicated by a green cell border and a prompt showing in the editor area.</i> # Support Vector Classifier Strategy Code In this notebook, you will learn to use the support vector classifier (SVC) algorithm and its step by step implementation on S&P500. SVCs are supervised learning models. A set of training data is provided to the SVC algorithm, each belonging to one of the categories. For instance, the categories can be either to buy or to sell a stock. The algorithm builds a model during training phase and assigns new data into one of the categories based on the model crated during the training phase. # Importing the libraries ``` # Machine learning from sklearn.svm import SVC from sklearn.metrics import scorer from sklearn.metrics import accuracy_score # For data manipulation import pandas as pd import numpy as np # To plot import matplotlib.pyplot as plt import seaborn # To fetch data from pandas_datareader import data as pdr import fix_yahoo_finance as yf yf.pdr_override() ``` # Downloading S&P500 data We will download the S&P500 data from yahoo finance. ``` Df = pdr.get_data_yahoo('SPY', start="2012-01-01", end="2017-10-01") Df= Df.dropna() Df.Close.plot(figsize=(10,5)) plt.ylabel("S&P500 Price") plt.show() ``` # Determine the correct trading signal If tomorrow's price is greater than today's price then we will buy the S&P500 index, else we will sell the S&P500 index. We will store +1 for buy signal and -1 for sell signal in Signal column. y is a target dataset storing the correct trading signal which the machine learning algorithm will try to predict. ``` y = np.where(Df['Close'].shift(-1) > Df['Close'],1,-1) ``` # Creation of predictors datasets The X is a dataset that holds the variables which are used to predict y, that is, whether the S&P500 index price will go up (1) or go down (-1) tomorrow. The X consists of variables such as 'Open - Close' and 'High - Low'. These can be understood as indicators based on which the algorithm will predict tomorrow's trend. Feel free to add mroe indicators and see the performance. ``` Df['Open-Close'] = Df.Open - Df.Close Df['High-Low'] = Df.High - Df.Low X=Df[['Open-Close','High-Low']] X.head() ``` # Test and train data set split Now, we will split data into training and test data set. 1. First 80% of data is used for training and remaining data for testing. 2. X_train and y_train are training dataset. 3. X_test and y_test are test dataset. ``` split_percentage = 0.8 split = int(split_percentage*len(Df)) # Train data set X_train = X[:split] y_train = y[:split] # Test data set X_test = X[split:] y_test = y[split:] ``` # Support Vector Classifier (SVC) We will use SVC() function from sklearn.svm.SVC library for the classification and create our classifier model using fit() method on the training data set. ``` cls = SVC().fit(X_train, y_train) ``` # Classifier accuracy We will compute the accuarcy of the algorithm on the train and test data set, by comparing the actual values of Signal with the predicted values of Signal. The function accuracy_score() will be used to calculate the accuracy. <B>Syntax:</B> accuracy_score(<font color=blue>target_actual_value</font>,<font color=blue>target_predicted_value</font>) 1. <font color=blue>target_actual_value:</font> correct signal values 2. <font color=blue>target_predicted_value:</font> predicted signal values ``` accuracy_train = accuracy_score(y_train, cls.predict(X_train)) accuracy_test = accuracy_score(y_test, cls.predict(X_test)) print('\nTrain Accuracy:{: .2f}%'.format(accuracy_train*100)) print('Test Accuracy:{: .2f}%'.format(accuracy_test*100)) ``` An accuracy of 50%+ in test data suggests that the classifier model is effective. # Prediction ### Predict signal We will predict the signal (buy or sell) for the test data set, using the cls.predict() fucntion. ### Compute returns in test period We will compute the strategy returns based on the predicted signal, and then save it in the column 'Strategy_Return' and plot the cumulative strategy returns. # Strategy Implementation ``` Df['Predicted_Signal'] = cls.predict(X) # Calculate log returns Df['Return'] = np.log(Df.Close.shift(-1) / Df.Close)*100 Df['Strategy_Return'] = Df.Return * Df.Predicted_Signal Df.Strategy_Return.iloc[split:].cumsum().plot(figsize=(10,5)) plt.ylabel("Strategy Returns (%)") plt.show() ``` As seen from the graph, the strategy generates a return of 15%+ in the test data set. For further reading on SVC, enroll to <a href = https://quantra.quantinsti.com/course/trading-machine-learning-classification-svm> Machine Learning: SVM </a> course on Quantra. <BR>
github_jupyter
# Datasets We published four datasets for training and evaluating extraction of performance results from machine learning papers. In this notebook we describe the format and show how to use our python API to conveniently work with the datasets. Due to the licensing the datasets consists of metadata and annotations, but do not include papers and data extracted from them. However, we made special effort in our extraction pipeline to get reproducible results. Simple functions to load the datasets ``` from axcell.helpers.datasets import read_arxiv_papers from pathlib import Path V1_URL = 'https://github.com/paperswithcode/axcell/releases/download/v1.0/' ARXIV_PAPERS_URL = V1_URL + 'arxiv-papers.csv.xz' SEGMENTED_TABLES_URL = V1_URL + 'segmented-tables.json.xz' PWC_LEADERBOARDS_URL = V1_URL + 'pwc-leaderboards.json.xz' ``` ## ArxivPapers **ArxivPapers** dataset is a corpus of over 100,000 scientific papers related to machine learning. In our work we use the corpus for self-supervised training of ULMFiT langauge model (see the lm_training notebook) and for extraction of common abbreviations. The dataset is a CSV file with one row per paper and the following fields: * arxiv_id - arXiv identifier with version * archive_size - the file size in bytes of the e-print archive * sha256 - SHA-256 hash of the e-print archive * title - paper's title * status - the text and tables extraction status for this paper, one of: + success, + no-tex - LaTeX source is unavailable, + processing-error - extraction issues, + withdrawn - the paper is withdrawn from arXiv * sections - number of extracted sections and subsections * tables - number of extracted tables ``` arxiv_papers = read_arxiv_papers(ARXIV_PAPERS_URL) print(f'Number of papers: {len(arxiv_papers):8}') print(f'└── with LaTeX source: {(~arxiv_papers.status.isin(["no-tex", "withdrawn"])).sum():8}') print(f'Number of extracted tables: {arxiv_papers.tables.sum():8}') ``` The arXiv id can be used to generate links to e-prints. Please read https://arxiv.org/help/bulk_data and play nice. ``` def get_eprint_link(paper): return f'http://export.arxiv.org/e-print/{paper.arxiv_id}' links = arxiv_papers.apply(get_eprint_link, axis=1) links.tail() ``` ## SegmentedTables & LinkedResults The **SegmentedTables** dataset contains annotations of almost 2,000 tables. The dataset is a JSON array with one item per paper and the following fields: * arxiv_id - arXiv identifier with version. The version can be different than in **ArxivTables**, * sha256 - SHA-256 hash of the e-print archive * fold - one of 11 folds, f.e., img_class or speech_rec. Each paper has exactly one fold, even if it's related to more than one task, * tables - array of tables annotations + index - 0-based index of tables extracted from paper, + leaderboard - a boolean denoting if this table is a leaderboard table, + ablation - a boolean denoting if this table is an ablation table (a table can be both a leaderboard and an ablation table), + dataset_text - datasets mentioned in table's caption, not normalized + segmentation - for leaderboard tables, a 2D array (list of lists) with one label per cell Additionally we annotated part of the tables with performance results, called simply the **LinkedResults** dataset. Each table contains a 'records' array with items containing: * task, dataset, metric - task, dataset and metric names normalized across all papers from the **LinkedResults** dataset, * value - normalized metric value, * model - model name, * row, column - 0-based cell location with this result. ``` from axcell.helpers.datasets import read_tables_annotations segmented_tables_annotations = read_tables_annotations(SEGMENTED_TABLES_URL) leaderboards = (segmented_tables_annotations.tables.apply( lambda tables: len([t for t in tables if t['leaderboard']]) ).sum()) ablations = (segmented_tables_annotations.tables.apply( lambda tables: len([t for t in tables if t['ablation']]) ).sum()) records = (segmented_tables_annotations.tables.apply( lambda tables: sum([len(t['records']) for t in tables]) ).sum()) print(f'Number of papers: {len(segmented_tables_annotations):8}') print(f'Number of tables: {segmented_tables_annotations.tables.apply(len).sum():8}') print(f'├── leaderboards: {leaderboards:8}') print(f'└── ablations: {ablations:8}') print(f'Linked results: {records:8}') ``` ## PWCLeaderboards The **PWCLeaderboards** dataset is similar in structure to the **LinkedResults** dataset. It's a JSON array with one item per paper, containing: * arxiv_id - arXiv identifier with version. The version corresponds to the version in **ArxivTables**, * tables + index - 0-based table index + records - as in **LinkedResults** ``` pwc_leaderboards = read_tables_annotations(PWC_LEADERBOARDS_URL) records = (pwc_leaderboards.tables.apply( lambda tables: sum([len(t['records']) for t in tables]) ).sum()) print(f'Number of papers: {len(pwc_leaderboards):8}') print(f'Number of tables: {pwc_leaderboards.tables.apply(len).sum():8}') print(f'Linked results: {records:8}') ```
github_jupyter
``` import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline import os from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout from tensorflow.keras.callbacks import EarlyStopping import tensorflow as tf import numpy as np from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score data = pd.read_csv('data/spirals.csv') data.head() # dropping the label from the dataset to get the input features X = data.drop('label', axis=1).to_numpy() # selecting the labels y = data['label'].to_numpy() # splitting the dataset X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, shuffle=True) y_test # setting seed to get reproducible results tf.random.set_seed(42) np.random.seed(42) # toy model, based on Tensorflow Playground model = Sequential() model.add(Dense(7, activation='sigmoid')) model.add(Dense(7, activation='sigmoid')) model.add(Dense(1, activation='sigmoid')) #model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) #model.fit(X_train, y_train, batch_size=16, epochs=100) from loop import TrainingLoop # definign optimizer and loss function optimizer = tf.keras.optimizers.Adam() loss_function = tf.keras.losses.BinaryCrossentropy() batch_size = 16 epochs = 150 log_path = 'logs/original/archimedean.csv' train_metrics = tf.keras.metrics.BinaryAccuracy() val_metrics = tf.keras.metrics.BinaryAccuracy() training = TrainingLoop(model, X_train, y_train, loss_function, optimizer, train_metrics, val_metrics, validation_split=0.1, batch_size=batch_size, log_file=log_path, ) training.train(epochs) # creating x coordinates xx = np.linspace(4, -4, 100) # creating y coordinates yy = np.linspace(-4, 4, 100).T # making a grid of these xx, yy = np.meshgrid(xx, yy) # giving a value to each of these Xfull = np.c_[xx.ravel(), yy.ravel(), xx.ravel() * yy.ravel(), np.sin(xx.ravel()), np.sin(yy.ravel()), xx.ravel() ** 2, yy.ravel() ** 2] # testing accuracy pred = model.predict(X_test) accuracy_score(y_test, pred.round()) # predicting values in the grid y_pred = model.predict(Xfull) # reshaping the predictions len(y_pred) im = y_pred.reshape(100, 100) # plotting the original image and what the program learned with the custom trainging loop plt.figure(figsize=(7, 7)) plt.scatter((data['x']+4)*10, 100-(data['y']+4)*10, c=data['label'], cmap='viridis') plt.imshow(-im) ```
github_jupyter
# <center> Loop, Conditional Control and Function # <center> Loop and Conditional Control ## for ``` for i in [1,4,7]: print('Hi') print(i) for .. in ....: ... ... ... my_list = [1, 4, 7, 'saeed', 10] #print(my_list) for i in my_list: print(i*2) for i in 'economics': print(i*2) for i in my_list: print(i) print('hi') print('bye') list(range(3, 11)) for i in range(3, 11): print(i**2) my_list1 = [1,4,7,8,9,19,100] sum(my_list1) list_sum = 0 for i in my_list1: list_sum = i + list_sum print(list_sum) list(range(2)) list(range(1,4)) for i in range(1,4): for j in range(2): print(i,j) ``` ## while (pass, continue, break) <img src="https://github.com/saeed-saffari/Python-for-Economics-2021-ATU/blob/main/MSc%20Planning%20Economics/while.jpg?raw=true"> ``` n = 0 while n<10 : print(n) n = n + 1 x = 1 while x > 10**(-4): print(x) x = x / 5 x = [1, 2 , 3 , 4] for i in x: #print(i) pass ``` ## if, elif , else <img src="https://github.com/saeed-saffari/Python-for-Economics-2021-ATU/blob/main/MSc%20Planning%20Economics/if-else.jpg?raw=true"> ``` if 3<2: print('Hello!') x = 4 if x==2: print('x is two') elif x==3: print('x is three') else: print('x is four') grade = 'B' if grade == 'A': print('Well done!') elif grade == 'B': print('try more!') else: print('fail!') my_list1 = [1, 4, 7, 8, 9, 19, 100] for i in my_list1: if i % 2 == 0: print('even', i) else: print('ODD') mystring = 'nothing' for i in mystring: if i == 't': continue print(i) mystring = 'nothing' for i in mystring: if i == 't': break print(i) x = 0 while x < 5: if x == 3: break print(x) x = x + 1 ``` ## <center> Function <img src="https://github.com/saeed-saffari/Python-for-Economics-2021-ATU/blob/main/MSc%20Planning%20Economics/function.gif?raw=true"> ``` def test1(): ... ... ... ... return ... def test2(): ... ... .. ... print(...) test1() test2() def test3(x, y, z): ... ... ... print(...) # return ... test3(3, 0.5, 98) def test4(): ... ... ... ``` ## - Example ``` def echo(i): print(i) echo('Salaaam!') def mean3(x, y , z): sum = x + y + z ave = sum / 3 return ave mean3(2, 3.5, 6.8) def mean(): x = float(input('first num:')) y = float(input('second num:')) z = float(input('third num:')) sum = x + y + z ave = sum / 3 return ave mean() def hello(): name = input("Enter your name: ") if name: print('Hello ', str(name)) else: print('Hello world!') return hello() hello() ```
github_jupyter
# Chapter 3 Applied Labs ## Q13 (fitting to simulated data) ``` import statsmodels.api as sm import statsmodels.formula.api as smf import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import statsmodels.stats.anova sns.set(style="whitegrid") ``` ### (a) Create a vector `x` containing 100 observations drawn from `N(0,1)` ``` x = np.random.normal(size=100) ``` ### (b) Create a vector `eps` containing 100 observations drawn from `N(0,0.25)` ``` eps = np.random.normal(loc=0, scale=0.25, size=100) ``` ### (c) Generate `y` according to `y = -1 +0.5x + eps` ``` # This vector has length 100, and has B0 = -1, B1 = 0.5 y = -1 + 0.5*x + eps ``` ### (d) Create a scatterplot displaying the relationship between `x` and `y`, and comment. ``` fig, ax = plt.subplots(figsize=(15,10)) sns.scatterplot(x=x, y=y, ax=ax) ``` By eye, the data passes through (0,-1) and has a slope of 1/2, and a total spread in y values of roughly 1.0 (i.e. almost all points are within $2\sigma$ where $\sigma=0.25$ ) ### (e) Fit a least-squares linear model to predict y using x. How do $\hat{\beta_0}$ and $\hat{\beta_1}$ compare to the true values $\beta_0$ and $\beta_1$? ``` model = sm.OLS(y, sm.add_constant(x)) results = model.fit() results.summary() ``` $\hat{\beta_0} = -1.02 \pm 0.03$, and $\hat{\beta_1} = 0.52 \pm 0.02$, i.e. the predicted values agree with the true values. ``` rough_sigma = np.sqrt(results.mse_resid) print(rough_sigma) # agrees with the eps with a standard deviation of 0.25 we added to the data ``` ### (f) plot the regression line. ``` B0, B1 = results.params print(B0, B1) fig, ax = plt.subplots(figsize=(15,10)) sns.scatterplot(x=x, y=y, ax=ax, label="Data") plt.plot(x,B0+B1*x, linewidth=3, color="black", label="regression line" ) plt.legend() ``` ### (g) Fit a quadratic model using `x` and `x^2` to predict `y`. Is there any evidence that the quadratic term improves the fit? ``` # Create a DataFrame test_data = pd.DataFrame(zip(x,y), columns=["x","y"]) quadratic_model = smf.ols('y ~ x + np.square(x)', data=test_data) # == lstat+age+lstat:age quadratic_results = quadratic_model.fit() print(results.summary()) statsmodels.stats.anova.anova_lm(results, quadratic_results) ``` The p-value for the x^2 term shows that this term is not statistically significant. In addition, using ANOVA we can see that the p value for the F statistic is high, suggesting we can't reject the null hypothesis that both models fit the data equally well. ### (h) repeat a-f using less noisy data. Describe your results ``` lower_eps = np.random.normal(loc=0, scale=0.125, size=100) less_noisy_y = -1 + 0.5*x + lower_eps model = sm.OLS(less_noisy_y, sm.add_constant(x)) less_noisy_results = model.fit() less_noisy_results.summary() rough_sigma = np.sqrt(less_noisy_results.mse_resid) print(rough_sigma) # agrees with the eps with a standard deviation of 0.25 we added to the data B0, B1 = less_noisy_results.params print(B0, B1) fig, ax = plt.subplots(figsize=(15,10)) sns.scatterplot(x=x, y=less_noisy_y, ax=ax, label="Data") plt.plot(x,B0+B1*x, linewidth=3, color="black", label="regression line" ) plt.legend() ``` R^2 has gone up, from 0.827 to 0.951. The value of F has increased. The std_errs in the predicted coefficients have halved. ### (i) repeat a-f using more noisy data. Describe your results ``` higher_eps = np.random.normal(loc=0, scale=0.5, size=100) more_noisy_y = -1 + 0.5*x + higher_eps model = sm.OLS(more_noisy_y, sm.add_constant(x)) noisy_results = model.fit() noisy_results.summary() rough_sigma = np.sqrt(noisy_results.mse_resid) print(rough_sigma) # agrees with the eps with a standard deviation of 0.25 we added to the data B0, B1 = noisy_results.params print(B0, B1) fig, ax = plt.subplots(figsize=(15,10)) sns.scatterplot(x=x, y=more_noisy_y, ax=ax, label="Data") plt.plot(x,B0+B1*x, linewidth=3, color="black", label="regression line" ) plt.legend() ``` R^2 has gone down, as has F. The std_errs in the predicted coefficients have doubled. ### (j) What are the confidence intervals for each dataset? ``` for i in range(2): for fit in [less_noisy_results, results, noisy_results]: print(fit.conf_int()[i]) print() for i in range(2): for fit in [less_noisy_results, results, noisy_results]: print(fit.conf_int()[i][1]-fit.conf_int()[i][0]) print() ``` We expect the standard error to be roughly 4 sigma (2 sigma either side). This is indeed the case
github_jupyter
# Zero-Noise Extrapolation *Copyright (c) 2021 Institute for Quantum Computing, Baidu Inc. All Rights Reserved.* ## Outline **Note: The number of credit points in the Quantum Hub account and time spent running this tutorial program will vary based on the parameters users input. Users need 28 points to obtain the results for the default parameters in this tutorial. If you want to get more points, please contact us on [Quantum Hub](https://quantum-hub.baidu.com). First, you should log into [Quantum Hub](https://quantum-hub.baidu.com), then enter the "Feedback" page, choose "Get Credit Point", and input the necessary information. Submit your feedback and wait for a reply.** This tutorial introduces an efficient and general method for Quantum Error Mitigation: Zero-Noise Extrapolation (ZNE), covering its theory and implementation in Quanlse. We use the single-qubit random Clifford sequence as benchmark to illustrate how to use the ZNE method in Quanlse step-by-step. The outline of this tutorial is as follows: - ZNE: Theory - Introduction - Noise rescaling - Extrapolation - ZNE: Practice - Computation task description - Quanlse implementation - Summary - Reference ## ZNE: Theory ### Introduction Zero-Noise Extrapolation (ZNE) is a powerful technique for mitigating quantum errors in quantum computing. Notice that ZNE does not directly reduce the inherent noise in the quantum computing process, but instead infers the ideal computation result by repeating the same quantum computing process many times with different levels of noise \[1, 2\]. The advantage of ZNE is that we do not need to know the exact form of the noise as well as how to control the noise source. The implementation process of this method is shown in the figure below. The figure shows that the ZNE method is composed of two steps: rescaling noise and extrapolating. Among various noise rescaling techniques, time-variant rescaling is a robust and promising one. This technique stretches the system Hamiltonian in time domain according to some rescaling coefficient to obtain an equivalently noise-rescaled final quantum state. For simplicity, we use the Richardson extrapolation in our Quanlse implementation, a mature numeric algorithm that can eliminate error of any order in principle. We remark that there are many other extrapolation methods such as polynomial and exponential extrapolation methods \[3\]. ![zne-profile](figures/zne-profile.png) ### Noise rescaling On the physical level, a quantum computing process with noise can be described by the Lindblad master equation: $$ \frac{\partial}{\partial t}\rho(t) = -i[K,\rho]+\lambda\mathcal{L}(\rho), $$ for time $t\in[0,T]$. In this formulation, the Hamiltonian $K$ (which might be time-dependent) represents the ideal coherent evolution we aim to implement, while the Lindblad operator $\mathcal{L}$ represents the noisy process we hope to mitigate. We emphasize that there is no need to know the exact form of the generator $\mathcal{L}$. We only require that it is *time-invariant* and its effect is dominated by a scalar noise parameter $\lambda$. Let $\rho_\lambda(T)$ be the final state after evolution time $T$. Given a positive coefficient $c$, can we obtain a noise-rescaled final state $\rho_{c\lambda}(T)$? Surprisingly, this is possible whenever the Lindblad operator is time-invariant. Consider the following procedure. We implement a time-stretching and amplitude-contracting version of the system Hamiltonian via: $$ K(t)\to K'(t) = \frac{K(t/c)}{c}. $$ What's more, we stretch the system evolution time to $cT$. It has been proven that this rescaled Hamiltonian $K'(t)$ will lead to a new evaluation whose final state is exactly $\rho_{c\lambda}(T)$ numerically \[1\]. Experimentally, stretching the evolution time ($T\to cT$) is easy to implement. Now let's analyze how to obtain the rescaled Hamiltonian $K'(t)$. In general, the systematic Hamiltonian is composed of time-independent drift items and time-dependent control ones, and the latter act on quantum states in the form of driving pulses. As an example, we learn from the [Single-Qubit Gate Tutorial](https://quanlse.baidu.com/#/doc/tutorial-single-qubit) in Quanlse that the driving pulses of the Hadamard gate $$ H=\frac{1}{\sqrt{2}}\begin{pmatrix} 1&1\\ 1&-1 \end{pmatrix} $$ are optimized as one $X$-channel pulse and one $Y$-channel pulse. As so, to implement the rescaled Hamiltonian is to stretch the corresponding driving pulses. In the following, we show by case the rescaled driving pulses of the optimized Hadamard gate with rescaling coefficients $1$ (does not rescale), $1.25$, and $1.5$. ![zne-profile](figures/zne-pulse-rescale-h.png) To close this section, we comment that the noise parameter $\lambda$ might also be other physical-relevant quantities, such as infidelity, temperature, error probability, variational parameter, etc. For example, we implement this ZNE method in Quanlse by treating the infidelity of the quantum circuit as the noise parameter $\lambda$. ### Extrapolation In numeric analysis, Richardson extrapolation is an efficient numerical method commonly used to eliminate low-order estimation errors. This method assumes that the estimated value $E(\lambda)$ could be expressed as a power series of $\lambda$ with respect to the ideal value $E^{\ast}\equiv E(\lambda=0)$: $$ E(\lambda) = \sum_{k=0}^{d} a_k\lambda^k + O(\lambda^{d+1}), $$ where $E^{\ast} = a_0$, $\{a_k\}$ is a set of coefficients to be determined, and $d$ is the order we aim to extrapolate. If we can obtain a set of estimators $\left\{E(\lambda_j)\right\}_{j=1}^{d+1}$ with different parameters, we can construct a new estimator $E^d(\lambda)$ from this set. In comparison with the original noisy estimator $E(\lambda)$, this new estimator has a higher-precision estimation error (to $d$-order) \[4\]. ![extrapolation](figures/zne-extrapolation.png) In the above figure, we demonstrate the Richardson extrapolation by setting $d=2$. From the figure, we can see that the data points are linearly fitted, and the ideal value $E^{\ast}$ can be inferred via extrapolation. It is worth noting that the Richardson extrapolation is just one of many extrapolation methods. It works well only when the power series assumption is valid. Luckily, this assumption holds naturally within the above Lindblad master equation framework, as justified in \[1\]. ## ZNE: Practice ### Computation task description **Random Clifford circuit** A random Clifford circuit is a quantum circuit composed of randomly generated Clifford unitary gates, which has been intensively applied to benchmark the average error rates of quantum circuits. Here we consider the identity-equivalent single-qubit Clifford circuit composed of $n$ sequential random Clifford gates with the corresponding inverse gate attached to the end. As shown in the figure below, each $C_j$ is a randomly generated Clifford unitary gate while $C_{\rm inv}$ is the inverse gate of all the preceding $n$ Clifford gates, that is, $$ C_{\rm inv}C_n C_{n-1}\cdots C_1=I. $$ **Computation task** Consider the following quantum computation task. The initial state is $|0\rangle = \begin{pmatrix} 1\\0\end{pmatrix}$, the evolution circuit is an identity-equivalent Clifford circuit of size $n+1$, and the quantum observable is $A=|0\rangle\langle 0|=\begin{pmatrix}1&0\\0&0 \end{pmatrix}$. ![zne-clifford-circuit](figures/zne-clifford-circuit.png) Ideally, the final output quantum state will be $|0\rangle$ since the evolution circuit is identity-equivalent. As so, the expectation value of $A$ will be $\langle A\rangle_{\rm ideal}=1$, no matter how long the Clifford circuit is. However, due to the inevitable quantum noise when implementing the quantum circuit, the output state is no longer $|0\rangle$, resulting in an incorrect expectation value $\langle A\rangle_{\rm noisy}$. What's worse, the deeper the identity-equivalent quantum circuit is, the more that $\langle A\rangle_{\rm noisy}$ deviates from the ideal value $1$. Notice that we compute the expectation value numerically after we obtain the final output state. In the following, we show that using the ZNE method offered by the Quanlse Cloud Service, we can mitigate the quantum noise dramatically, and the mitigated expectation value $\langle A\rangle_{\rm miti}$ approaches the ideal value $\langle A\rangle_{\rm ideal}$ for deep Clifford circuits. **Data processing procedure** We describe the data processing procedure in detail to fully reveal the power of the ZNE method implemented in Quanlse. For each $k=1,2,\cdots,n$, we select the first $k$ gates of length $n$ Clifford sequence, compute the corresponding inverse gate, and construct the identity-equivalent circuit of length $k+1$. Then, for this circuit, we calculate the expectation value with the input state being $|0\rangle$ and the quantum observable being $A$. We set the maximal extrapolation order to $d$ and compute the error-mitigated values of orders ranging from $1$ to $d$. Finally, we obtain $n\times d$ extrapolated values and $n\times (d+1)$ rescaling values. ### Quanlse implementation **Import necessary modules and functions** To run the program below, you need to install [Quanlse](https://quanlse.baidu.com/#/doc/install) first. Then you need to import the following packages from Quanlse and some supporting Python libraries: ``` from Quanlse.remoteZNE import remoteZNEMitigation as zneMitigation from Quanlse.ErrorMitigation.ZNE.Extrapolation import extrapolate from Quanlse.ErrorMitigation.Utils.Utils import computeIdealExpectationValue, \ computeIdealEvolutionOperator, fromCircuitToHamiltonian, randomCircuit, \ computeInverseGate from Quanlse.ErrorMitigation.Utils.Visualization import plotZNESequences from Quanlse.Utils.Functions import project, expect from Quanlse.Utils.Infidelity import unitaryInfidelity from Quanlse.remoteSimulator import remoteSimulatorRunHamiltonian import numpy as np from copy import deepcopy ``` Usually, the zero-noise extrapolation method are computationally expensive. To deal with this issue, we provide our cloud service that could speed up this process significantly. To use the Quanlse Cloud Service, the users need to acquire a token from the [Quantum Leaf](http://quantum-hub.baidu.com) platform. ``` from Quanlse import Define Define.hubToken = '' ``` **Construct random Clifford circuit** We use the built-in `randomCircuit` function to create a random Clifford sequence of length `numSeq`, whose data type is a `List` including a series of `CircuitLine` objects. Each `CircuitLine` describes a layer of the target quantum circuit. In this example, each layer consists of only one single-qubit gate. ``` # Set the maximal length of the random Clifford circuit numSeq = 5 numQubits = 1 # Set the input state as |0> and the quantum observable as |0><0| state = np.diag([1, 0]).astype(complex) A = np.diag([1, 0]).astype(complex) # Set the maximal extrapolation order order = 2 # Considering the reproducibility of our calculation result, we may as well set the "random seed" as a fixed value (e.g. 123) circuit = randomCircuit(qubits=1, numSeq=numSeq, seed=123) ``` **Compute the ideal and noisy expectation values** For a quantum circuit of length $n$, we could use the built-in `computeInverseGate` function to calculate its inverse gate and then attach it to the end of the original quantum circuit. In this way, we construct an identity-equivalent quantum circuit totally including $n+1$ gates. Based on this quantum circuit and other initial parameters, we could compute both the ideal expectation value (via numerical simulation) and the noisy expectation value suffering from implementation error. For reference, we compute the infidelity between the ideal evolutionary operator and the noisy evolutionary operator. ``` # Construct the identity-equivalent quantum circuit by appending an inverse gate to the end circuitIdentity = circuit + [computeInverseGate(circuit)] # Compute the ideal expectation value (should be 1.0) and the ideal evolution operator (should be an identity operator) valueIdeal = computeIdealExpectationValue(state, circuitIdentity, A) unitaryIdeal = computeIdealEvolutionOperator(circuitIdentity) # Compute the optimized Hamiltonian for implementing the quantum circuit # The built-in Quanlse Scheduler will be called ham = fromCircuitToHamiltonian(circuitIdentity) # Use the given Hamiltonian to compute the implemented evolution unitary, the infidelity, and the noisy expectation value result = remoteSimulatorRunHamiltonian(ham) unitaryNoisy = project(result.result[0]["unitary"], ham.subSysNum, ham.sysLevel, 2) infid = unitaryInfidelity(unitaryIdeal, unitaryNoisy, numQubits) noisyValue = expect(A, unitaryNoisy @ state @ unitaryNoisy.conj().T) # Print the ideal and noisy expectation values print("The ideal expectation value: {}; The noisy expectation: {}".format(valueIdeal, noisyValue)) print("The ideal evolutionary operator:") print(unitaryIdeal.round(3)) print('The noisy evolutionary operator:') print(unitaryNoisy.round(3)) print("The implemented evolution unitary has infidelity: ", infid) ``` **Error mitigation via ZNE** There exists a deviation between the ideal expectation value and the noisy expectation value. As we have explained in the Theory section, ZNE is a feasible and efficient method to mitigate this kind of deviation. Using the built-in `extrapolate` function, we could calculate the mitigated expectation value from a set of rescaling coefficients and corresponding noise-rescaling values. In comparison with the original noisy expectation value, The mitigated expectation value has a higher estimation precision. In Quanlse, the ZNE method is implemented and is available via the `zneMitigation` interface. It includes both the noise-rescaling and the extrapolating procedures. `zneMitigation` returns a mitigated expectation value (to the $d$-th order), a set of infidelities (a list of $d+1$ real numbers), and a set of noisy expectation values of different noise levels (a list of $d+1$ real numbers). According to the data processing procedure described above, we need to execute the `zneMitigation` function for `numSeq` times. The process for optimizing the target Hamiltonian will perform `numSeq` times in total, which is computationally expensive. As so, we use the Quanlse Cloud Service to accelerate the optimizing process. ``` EsRescaled = [] # EsRescaled size: [numSeq, order + 1] EsExtrapolated = [] # EsExtrapolated size: [numSeq, order] EsIdeal = [] # EsIdeal size: [numSeq,] Infidelities = [] # Infidelities size: [numSeq, order + 1] for length in range(1, numSeq + 1): print('==' * 20) print("Clifford circuit length:", length) # For each sequence, append the equivalent-inverse gate of all the preceding quantum gates # For each sequence, its length becomes: [1, 2, ..., numSeq] + 1 circuitPart = deepcopy(circuit[:length]) lastGate = computeInverseGate(circuitPart) circuitPart.append(lastGate) # Compute ideal expectations firstly for subsequent comparison in figure EsIdeal.append(computeIdealExpectationValue(state, circuitPart, A)) # Temporary extrapolated values of each order for each-length circuit mitigatedValues = [] # Use the Scheduler to compute the optimal Hamiltonian for this circuit ham = fromCircuitToHamiltonian(circuitPart) # Rescale order: [c_0, c_1, ..., c_d]; extrapolation order: d mitigatedValueHighest, infidelities, noisyValues = zneMitigation(state, circuitPart, A, ham=ham, order=order) # Rescale order: [c_0, c_1], [c_0, c_1, c_2], ...., [c_0, ..., c_{d-1}] # for d in [1, ..., d - 1]: for d in range(1, order): mitigatedValue = extrapolate(infidelities[:(d + 1)], noisyValues[:(d + 1)], type='richardson', order=d) mitigatedValues.append(mitigatedValue) mitigatedValues.append(mitigatedValueHighest) EsExtrapolated.append(mitigatedValues) EsRescaled.append(noisyValues) Infidelities.append(infidelities) ``` **Result and discussion** ``` # X-axis represents length of quantum circuit, Y-axis represents expectation values plotZNESequences(EsRescaled, EsExtrapolated, EsIdeal, fileName='zne-single-qubit-clifford') ``` As we can tell from the figure, our noise-rescaling strategy *does* improve the precision of the estimated expectation value. What's more, the larger the rescaling coefficient is, the larger the resulting noisy expectation value bias. It anticipates that rescaling would lead to *worse* Hamiltonian for the quantum circuit implementation since the Hamiltonian optimized by Quanlse `Scheduler` is already the best. The power of extrapolation is self-evident as the precision of mitigated expectation values is improved significantly. Interestingly, just first-order or second-order extrapolation yield estimated expectation values could approach the ideal expectation to a great extent. One might notice that in the above extrapolation plot, the $1$-order rescaled expectation values, which are obtained via the optimized Hamiltonians without rescaling, are very close to the ideal expectation value. It is because Quanlse can generate the single-qubit driving Hamiltonian with extremely high fidelity. To better illustrate the extrapolation technique, we compute the error mitigated values using only the $2$ and $3$-order rescaled expectation values. Remarkably, the mitigated expectation values are pretty close to the ideal expectation value, witnessing the power of the Richardson extrapolation method. ``` InfidelitiesPartial = np.array(Infidelities)[:, 1:] EsRescaledPartial = np.array(EsRescaled)[:, 1:] orderPartial = order - 1 EsExtrapolatedPartial = [] # size: [numSeq, order + 1] for i in range(numSeq): mitigatedValues = [] for d in range(1, orderPartial + 1): mitigatedValue = extrapolate(InfidelitiesPartial[i][:(d + 1)], EsRescaledPartial[i][:(d + 1)], type='richardson', order=d) mitigatedValues.append(mitigatedValue) EsExtrapolatedPartial.append(mitigatedValues) plotZNESequences(EsRescaledPartial, EsExtrapolatedPartial, EsIdeal, fileName='zne-single-qubit-clifford-2') ``` ## Summary This tutorial describes how to use the Zero-Noise Extrapolation method implemented in Quanlse to improve the precision of quantum computation results by considering a representative example, random single-qubit Clifford circuits, as a benchmark. Interested users may click on this link [tutorial-ZNE.ipynb](https://github.com/baidu/Quanlse/blob/main/Tutorial/EN/tutorial-ZNE.ipynb) to jump to the corresponding GitHub page for this Jupyter Notebook documentation to get the code and try different parameters to further explore the power of the Quanlse ZNE module. ## Reference \[1\] [Temme, K., et al. (2017). "Error mitigation for short-depth quantum circuits." *Physical Review Letters* 119(18): 180509](https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.119.180509). \[2\] [Kandala, A., et al. (2019). "Error mitigation extends the computational reach of a noisy quantum processor." *Nature* 567(7749): 491-495](https://www.nature.com/articles/s41586-019-1040-7). \[3\] [Giurgica-Tiron, T., et al. (2020). "Digital zero noise extrapolation for quantum error mitigation." 2020 IEEE International Conference on Quantum Computing and Engineering (QCE)](https://ieeexplore.ieee.org/document/9259940). \[4\] [A. Sidi (2003). "Practical Extrapolation Methods: Theory and Applications." Cambridge Monographs on Applied and Computational Mathematics, Vol. 10](https://www.cambridge.org/core/books/practical-extrapolation-methods/21A93C2B0793CF09B2F3ABEF78F3F9B9).
github_jupyter
<center> <img src="https://gitlab.com/ibm/skills-network/courses/placeholder101/-/raw/master/labs/module%201/images/IDSNlogo.png" width="300" alt="cognitiveclass.ai logo" /> </center> # **Web Scraping Lab** Estimated time needed: **30** minutes ## Objectives After completing this lab you will be able to: * Download a webpage using requests module * Scrape all links from a web page * Scrape all image urls from a web page * Scrape data from html tables ## Scrape [www.ibm.com](http://www.ibm.com/?utm_medium=Exinfluencer\&utm_source=Exinfluencer\&utm_content=000026UJ\&utm_term=10006555\&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDA0321ENSkillsNetwork21426264-2021-01-01) Import the required modules and functions ``` from bs4 import BeautifulSoup # this module helps in web scrapping. import requests # this module helps us to download a web page ``` Download the contents of the web page ``` url = "http://www.ibm.com" # get the contents of the webpage in text format and store in a variable called data data = requests.get(url).text ``` Create a soup object using the class BeautifulSoup ``` soup = BeautifulSoup(data,"html5lib") # create a soup object using the variable 'data' ``` Scrape all links ``` for link in soup.find_all('a'): # in html anchor/link is represented by the tag <a> print(link.get('href')) ``` Scrape all images ``` for link in soup.find_all('img'):# in html image is represented by the tag <img> print(link.get('src')) ``` ## Scrape data from html tables ``` #The below url contains a html table with data about colors and color codes. url = "https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DA0321EN-SkillsNetwork/labs/datasets/HTMLColorCodes.html" ``` Before proceeding to scrape a web site, you need to examine the contents, and the way data is organized on the website. Open the above url in your browser and check how many rows and columns are there in the color table. ``` # get the contents of the webpage in text format and store in a variable called data data = requests.get(url).text soup = BeautifulSoup(data,"html5lib") #find a html table in the web page table = soup.find('table') # in html table is represented by the tag <table> #Get all rows from the table for row in table.find_all('tr'): # in html table row is represented by the tag <tr> # Get all columns in each row. cols = row.find_all('td') # in html a column is represented by the tag <td> color_name = cols[2].getText() # store the value in column 3 as color_name color_code = cols[3].getText() # store the value in column 4 as color_code print("{}--->{}".format(color_name,color_code)) ```
github_jupyter
# Probability and Statistics - Part 1 The Magic Coin In this tutorial we're going to take a look at Probability and Statistics. The overall goal is to cover a lot of ground fairly quickly so that you'll get a sense of how all the pieces work in practice. Where going to start with a simple example that will demonstart the key problems behind probability and statistics. For this first part we won't be importing any `random` library. The only tool we'll need to create randomness is: ``` import time ``` There is a common belief that **randomness** is **not a property of the world** but this is a misunderstanding. When we say that something is "random" we are really describing **the state of our own mind**. To explore this we'll start with a very interesting `class` called `MagicCoin`: ``` class MagicCoin: def __init__(self,n): self.n = n def flip(self): return( int(time.time()) % self.n == 0 ) ``` This `MagicCoin` may not initially seem that *magic*: all it does is take an integer `n` and than uses that determine heads (`True`) or tails (`False`) based on the current time in seconds. Just to make sure this simple `flip` logic makes sense. If the time now is: ``` time.time() ``` then casting that to an `int` will give us: ``` int(time.time()) ``` ..oops! time changes so quickly! But you get the point, now we have an `int` and depending on `n` we get a result of our coin toss... ``` int(time.time()) % 2 ``` Okay let's just make a MagicCoin and start thinking about Probability! Here's our coin: ``` mc = MagicCoin(2) ``` Now this is where notebooks are really fun! Run the next line... ``` result = mc.flip() ``` Don't print it yet! What is the value of `result`? The obvious answer is that "we don't know!!!" but that's not really true. We know that the result could either be `True` or `False`. This is a big deal because it drastically limits the realm of values that `result` could have. ## Probability Since we know the result can either be `True` or `False` but which is it? It turns out we can solve this problem if we extend the way we think about logic to be a bit more flexible. In boolean logic, the logic we use every day when writing programs, we think of facts as either being `True` or `False`. In the case of our Magic coin, this is true. The value of `result` is only one of these two choices. The problem is that we're missing information to let us know which one of the two it is. How can we express this missing information. Well we can start by thinking of : `True` -> `1` `False` -> `0` So what is the values of `result`? Well if we think of being closer to `True` as being closer to `1` and being closer to `False` as being closer to `0`, then the most obvious choice here is to choose `0.5`. This is all that probability is: a number between 0 and 1 that represents how close to `True` or how close to `False` something is when we don't have enough information to know for sure (and we almost *never* have enough information). There's a lot of mathematically ways we can talk about (and argue about) for exactly to chose these number, but for this tutorial most of your intuitions will be reasonably correct, and when they're not this tutorial to try to point out why. ### "Randomness" is a property of your mind Initially assigning a value of 0.5 to the value of this coin toss might seem strange, after all the `MagicCoin` can only return `True` or `False`, 0.5 doesn't make sense! And this is a great observation and a very important one because: *probability says nothing at all about the world, it only makes statements about your own mind* ![Randomness is in your mind](images/mind_1.jpg) In this first example we know exactly how the world works (which is very rare), and we're just missing a tiny, tiny piece of information: the exact time when we called `flip`. This makes `result` a **random variable** but as everyone in this room knows, the *value* of `result` is either `True` or `False` and it is exactly one of them. Right now, in the notebook state, the value of `result` is decided, we just haven't seen it. What is *random* is our state of mind. This is important because we often think about *random* events as being somehow choatic out of our control, this is a mistake of thinking that the *world* is random. When we think about *randomness* as a statement about our own state of ignorance, and probability as a measure of that, we make better decisions about how we use probability and statistics. Consider this book by Nassim Taleb: ![Fooled by Randomness](./images/fbr.jpg) Being fooled by randomness sounds a bit spooky! But consider this reworking of the title to incorporate the idea of randomness as a statement about our ignorance: ![Fooled by Ignorance...](./images/fbr_alt.jpg) Much less catchy, but also a much more honest assessment of our situation. Keep this in mind whenever you do statistics! You are always making statements about your mind and about your ignorance of the world. The `MagicCoin` is only really "magic" when we don't know how it works. When we look at `result` there is no more mystery, and no more probability: ``` result ``` ## Logic of Probability! Now the real power of a probability is that we can combine uncertainty with the same type of logic we do for `bool` types in programming! The rules are pretty similar with some nuance... Notice that in all these examples if we consider `1.0` to be `True` and `0.0` to be `False` the logic is *exactly* the same. **note:** these are slightly simplified rules that assume something called conditional independence. There is a more naunced definition of some of these used for what we call conditional probabilities. Conditional probabilities happen when knowing something about one thing tells us information about another. #### Logical NOT ``` not True not False ``` #### Probabilistic NOT $$\text{not } P(A) = 1-P(A)$$ ``` 1 - 1.0 1 - 0.0 1-0.25 ``` #### Logicial AND ``` True and False True and True False and False ``` #### Probabilitistic AND $$P(A \text{ and } B) = P(A) \cdot P(B) $$ ``` 1.0 * 1.0 1.0 * 0.0 0.0 * 0.0 0.5*0.5 ``` #### Logical OR ``` True or True True or False False or False ``` #### Probabilistic OR $$P(A \text{ or } B) = P(A) + P(B) - P(A) \cdot P(B) $$ Not this is a little more complicated than the others. ``` 1.0 + 1.0 - 1.0*1.0 1.0 + 0.0 - 1.0*0.0 0.0 + 0.0 - 0.0*0.0 0.25 + 0.3 - 0.25*0.3 ``` Let's take this one step further and build out a probably class, `P`, that uses Python's data model methods to implement a working example of probability as logic using `-`,`&` and `|`. ``` class P: """ Example of Probability as logic using Python's data model In this simple example these probabilites are assumed to be conditionally independent. """ def __init__(self,prob): assert prob >= 0, "probabilities can't be negative!" assert prob <= 1, "probabilities can't be great than 1!" self.prob = prob def __repr__(self): return "P({})".format(self.prob) def __neg__(self): return P(1-self.prob) def __and__(self,P2): return P(self.prob * P2.prob) def __or__(self,P2): return P(self.prob + P2.prob - (self & P2).prob) #for mutually exclusive or def __add__(self,P2): return P(self.prob + P2.prob) ``` We can then use this to work out probabilities of various events happening using python! Suppose, for example, you know that there is a 0.3 probability of rain tomorrow and you'll get rained of if forget your umbrella or your umbrella is broken. Then let's say you forget your umbrella with a probability 0.1 and you think your umbrella might be the broken, we'll give that a probability of 0.7. Now let's use logic to answer: What's the probability you will *not* get wet? Let's start with our facts: ``` rain = P(0.3) forget = P(0.1) broken = P(0.7) ``` The probability of being `wet` is: ``` wet = rain & (forget | broken) ``` and logically the probability of being dry is: ``` -wet ``` ### Logic is how we reason when we are *certain* about facts ### Probability is how we reason when we are *uncertain* about the facts... ## Probability is the Logic of Uncertainity ``` mc2 = MagicCoin(3) ``` Now we have a baised coin. What is the probability we get `True`? $$P(True)=\frac{1}{3}$$ Or with our `P` class: ``` TRUE = P(1/3) results = [] results.append(mc2.flip()) ``` *note:* these need to be in two seperate cells so that there is a no trivial time difference between the two ``` results.append(mc2.flip()) result2 = any(results) ``` Ah! Don't peak yet! What is the probability that `result2` is `True`? Let's use the tools we have so far.. To begin lets list out all possible outcomes: ``` [True,True] [False,False] [False,True] [True,False] ``` Now we can use our `P` objects to calculate the probability of each these out comes: `[True, True]` ``` TRUE & TRUE ``` Because `False` is really the negation of `True` we know that: ``` FALSE = -TRUE ``` So, using our logic for: `[False,False]` We get: ``` FALSE & FALSE ``` Finally we have two cases which should have the same probability (so we only have to solve once): ``` [True,False] ``` ``` TRUE & FALSE ``` Finally we can figure out the probability `any` of our results to be `True`, we can just use OR (and since these events our mutually exclusve: ``` P((TRUE & TRUE) + (TRUE & FALSE) + (FALSE & TRUE).prob) ``` of course we could have also just our rules of logic a bit differently and gotten the same result: ``` -(FALSE & FALSE) ``` Now we can evaluate this: ``` result2 ``` now let's try this: ``` mc3 = MagicCoin(4) more_results = [] more_results.append(mc3.flip()) more_results.append(mc3.flip()) more_results.append(mc3.flip()) more_results.append(mc3.flip()) more_results.append(mc3.flip()) result3 = len([r for r in more_results if r]) ``` Now what! By now you should be able to see that the probability of `True` in each flip is $\frac{1}{4}$, but we have 5 results that we didn't see. With just 2 it took quite a bit of energy to ennumerate them and think about them all but with 5 this seems a bit much! To solve this we can use a *probability mass function* called the **Binomial Distribution**. We'll use `scipy` for this: ``` from scipy import stats ``` The binomial distribution takes a number of trials `n` and the probability of success `p`. So for the problem we just computed we would use `n=2` and `p=1/3` ``` my_dist = stats.binom(n=2,p=1/3) ``` Then we can use the `.pmf` method to compute the probability of getting `k` values of `True`: ``` my_dist.pmf(0) my_dist.pmf(1) my_dist.pmf(2) ``` We can visualize this distribution as so: ``` import seaborn as sns sns.barplot(x=[0,1,2], y=my_dist.pmf([0,1,2]), color="blue").set_title("Binomial Distribution n=2,p=1/3") ``` For our new problem we know that we have `n=5` and `p=1/4` ... ``` five_tosses_dist = stats.binom(n=5,p=1/4) ``` ...and here we can see all the possible outcomes for the value of `result3` ``` sns.barplot(x=[x for x in range(0,6)], y=five_tosses_dist.pmf([x for x in range(0,6)]), color='blue').set_title("Binomial Distribution for n=5,p=1/4") ``` Now we can easily answers all sorts of question! What is the probability that the lenght of `result3` is exactly `1`? ``` five_tosses_dist.pmf(1) ``` What is the probability that `result3` is length `2` or longer? ``` five_tosses_dist.pmf([2,3,4,5]).sum() ``` Interesting! It's slightly more likely to have just one single `True` than to have 2 or more! What about the probability that the length is even? ``` five_tosses_dist.pmf([0,2,4]).sum() ``` Okay! now make a guess at what the value of `result3` is and let's look at what's really in there: ``` result3 ``` ## Probability and now.... STATISTICS! You may wonder what the difference is between *probability* and *statistics*. Rather than explain it first, let's have an object lesson first: ``` truly_magical_coin = MagicCoin((int(time.time()) % 7)+1) ``` Okay let's play our game again! ``` magic_results = [] magic_results.append(truly_magical_coin.flip()) magic_results.append(truly_magical_coin.flip()) magic_results.append(truly_magical_coin.flip()) ``` NO PEEKING! oh wait! does it even matter this time? What in the world can we expect these results to be? This coin was initialized with a value between 1 and 7 *but we don't know which one!* What do you expect the `magic_results` to be? Would `[True, True, True]` surprise you? would `[False, False, False]` surprise you? ### The difference between probability and statistics When we are doing probability we know exactly how uncertain we are, but when we're doing statistics we don't even know that. The `truly_magical_coin` is different because we don't know how it was parameterized. ![Stats mind](./images/stats_mind.jpg) In statistics we have less information about how the world works and most importantly we aren't entirely sure of our own state of ignorance! We can do a lot with probability: when we know exactly how ignorant we are, we can predict the behavior of the world quite well. In statistics we often don't know how things really work, and have to work hard to figure out how uncertain we even are! This is why *data* is so important in statistics. We can use data to *reverse engineer processes we want to know about* to estimate what our uncertainities are. Let's look at our `magic_results` to see if this can help us decide what's going on: ``` magic_results ``` Now this information will be different each time you step through this notebook. But what would `[True, True, True]` tell you about the true value of `n` that `[True, False, False]` might now? If you get more data, can you start to make better guesses? ``` magic_results.append(truly_magical_coin.flip()) magic_results.append(truly_magical_coin.flip()) magic_results.append(truly_magical_coin.flip()) magic_results ``` The more we study the results of this coin the more we know about `n`... but the more we know about `n` the more we learn about what we can expect the results of this coin to be! ## Homework Want to really understand statistics? The good news is all you really need is the `MagicCoin`. All of the other stuff we'll cover in this tutorial are just tools that we can derive from first principles from this coin. Play with this coin and try to determine what `n` is. - How does more data change this? - Are all values of `n` equally as difficult to predict? - What is the distribution of possible results when you have no information? - How does this distribution change as you gather data? - When are you confindent enough to guess `n` and feel 99% sure you have the right answer?
github_jupyter
# Transfer Learning for NLP: Sentiment Analysis on Amazon Reviews In this notebook, we show how transfer learning can be applied to detecting the sentiment of amazon reviews, between positive and negative reviews. This notebook uses the work from [Howard and Ruder, Ulmfit](https://arxiv.org/pdf/1801.06146.pdf). The idea of the paper (and it implementation explained in the [fast.ai deep learning course](http://course.fast.ai/lessons/lesson10.html)) is to learn a language model trained on a very large dataset, e.g. a Wikipedia dump. The intuition is that if a model is able to predict the next word at each word, it means it has learnt something about the structure of the language we are using. [Word2vec](https://arxiv.org/pdf/1310.4546.pdf) and the likes have lead to huge improvements on various NLP tasks. This could be seen as a first step to transfer learning, where the pre-trained word vectors correspond to a transfer of the embedding layer. The ambition of [Ulmfit](https://arxiv.org/pdf/1801.06146.pdf) (and others like [ELMO](https://arxiv.org/pdf/1802.05365.pdf) or the [Transformer language model](https://s3-us-west-2.amazonaws.com/openai-assets/research-covers/language-unsupervised/language_understanding_paper.pdf) recently introduced) is to progressively move the NLP field to the state where Computer Vision has risen thanks to the ImageNet challenge. Thanks to the ImageNet challenge, today it is easy to download a model pre-trained on massive dataset of images, remove the last layer and replace it by a classifier or a regressor depending on the interest. With Ulmfit, the goal is for everyone to be able to use a pre-trained language model and use it a backbone which we can use along with a classifier and a regressor. The game-changing apect of transfer learning is that we are no longer limited by the size of training data! With only a fraction of the data size that was necessary before, we can train a classifier/regressor and have very good result with few labelled data. Given that labelled text data are difficult to get, in comparison with unlabelled text data which is almost infinite, transfer learning is likely to change radically the field of NLP, and help lead to a maturity state closer to computer vision. The architecture for the language model used in ULMFit is the [AWD-LSTM language model](https://arxiv.org/pdf/1708.02182.pdf) by Merity. While we are using this language model for this experiment, we keep an eye open to a recently proposed character language model with [Contextual String Embedings](http://alanakbik.github.io/papers/coling2018.pdf) by Akbik. # Content of this notebook This notebook illustrate the power of Ulmfit on a dataset of Amazon reviews available on Kaggle at https://www.kaggle.com/bittlingmayer/amazonreviews/home. We use code from the excellent fastai course and use it for a different dataset. The original code is available at https://github.com/fastai/fastai/tree/master/courses/dl2 The data consists of 4M reviews that are either positives or negatives. Training a model with FastText classifier results in a f1 score of 0.916. We show that using only a fraction of this dataset we are able to reach similar and even better results. We encourage you to try it on your own tasks! Note that if you are interested in Regression instead of classification, you can also do it following this [advice](http://forums.fast.ai/t/regression-using-ulmfit/18063/6). The notebook is organized as such: - Tokenize the reviews and create dictionaries - Download a pre-trained model and link the dictionary to the embedding layer of the model - Fine-tune the language model on the amazon reviews texts We have then the backbone of our algorithm: a pre-trained language model fine-tuned on Amazon reviews - Add a classifier to the language model and train the classifier layer only - Gradually defreeze successive layers to train different layers on the amazon reviews - Run a full classification task for several epochs - Use the model for inference! We end this notebook by looking at the specific effect of training size on the overall performance. This is to test the hypothesis that the ULMFit model does not need much labeled data to perform well. # Data Before starting, you should download the data from https://www.kaggle.com/bittlingmayer/amazonreviews, and put the extracted files into an ./Amazon folder somewhere you like, and use this path for this notebook. Also, we recommend working on a dedicated environment (e.g. mkvirtualenv fastai). Then clone the fastai github repo https://github.com/fastai/fastai and install requirements. ``` from fastai.text import * import html import os import pandas as pd import pickle import re from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, \ confusion_matrix from sklearn.model_selection import train_test_split from time import time path = '/your/path/to/folder/Amazon' train = [] with open(os.path.join(path, 'train.ft.txt'), 'r') as file: for line in file: train.append(file.readline()) test = [] with open(os.path.join(path, 'test.ft.txt'), 'r') as file: for line in file: test.append(file.readline()) print(f'The train data contains {len(train)} examples') print(f'The test data contains {len(test)} examples') BOS = 'xbos' # beginning-of-sentence tag FLD = 'xfld' # data field tag PATH=Path('/your/path/to/folder/Amazon') CLAS_PATH=PATH/'amazon_class' CLAS_PATH.mkdir(exist_ok=True) LM_PATH=PATH/'amazon_lm' LM_PATH.mkdir(exist_ok=True) # Each item is '__label__1/2' and then the review so we split to get texts and labels trn_texts,trn_labels = [text[10:] for text in train], [text[:10] for text in train] trn_labels = [0 if label == '__label__1' else 1 for label in trn_labels] val_texts,val_labels = [text[10:] for text in test], [text[:10] for text in test] val_labels = [0 if label == '__label__1' else 1 for label in val_labels] # Following fast.ai recommendations we put our data in pandas dataframes col_names = ['labels','text'] df_trn = pd.DataFrame({'text':trn_texts, 'labels':trn_labels}, columns=col_names) df_val = pd.DataFrame({'text':val_texts, 'labels':val_labels}, columns=col_names) df_trn.head(10) df_trn.to_csv(CLAS_PATH/'train.csv', header=False, index=False) df_val.to_csv(CLAS_PATH/'test.csv', header=False, index=False) CLASSES = ['neg', 'pos'] (CLAS_PATH/'classes.txt').open('w').writelines(f'{o}\n' for o in CLASSES) ``` # Language Model ``` # We're going to fine tune the language model so it's ok to take some of the test set in our train data # for the lm fine-tuning trn_texts,val_texts = train_test_split(np.concatenate([trn_texts,val_texts]), test_size=0.1) df_trn = pd.DataFrame({'text':trn_texts, 'labels':[0]*len(trn_texts)}, columns=col_names) df_val = pd.DataFrame({'text':val_texts, 'labels':[0]*len(val_texts)}, columns=col_names) df_trn.to_csv(LM_PATH/'train.csv', header=False, index=False) df_val.to_csv(LM_PATH/'test.csv', header=False, index=False) # Here we use functions from the fast.ai course to get data chunksize=24000 re1 = re.compile(r' +') def fixup(x): x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace( 'nbsp;', ' ').replace('#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace( '<br />', "\n").replace('\\"', '"').replace('<unk>','u_n').replace(' @.@ ','.').replace( ' @-@ ','-').replace('\\', ' \\ ') return re1.sub(' ', html.unescape(x)) def get_texts(df, n_lbls=1): labels = df.iloc[:,range(n_lbls)].values.astype(np.int64) texts = f'\n{BOS} {FLD} 1 ' + df[n_lbls].astype(str) for i in range(n_lbls+1, len(df.columns)): texts += f' {FLD} {i-n_lbls} ' + df[i].astype(str) texts = list(texts.apply(fixup).values) tok = Tokenizer().proc_all_mp(partition_by_cores(texts)) return tok, list(labels) def get_all(df, n_lbls): tok, labels = [], [] for i, r in enumerate(df): print(i) tok_, labels_ = get_texts(r, n_lbls) tok += tok_; labels += labels_ return tok, labels df_trn = pd.read_csv(LM_PATH/'train.csv', header=None, chunksize=chunksize) df_val = pd.read_csv(LM_PATH/'test.csv', header=None, chunksize=chunksize) # This cell can take quite some time if your dataset is large # Run it once and comment it for later use tok_trn, trn_labels = get_all(df_trn, 1) tok_val, val_labels = get_all(df_val, 1) # Run this cell once and comment everything but the load statements for later use (LM_PATH/'tmp').mkdir(exist_ok=True) np.save(LM_PATH/'tmp'/'tok_trn.npy', tok_trn) np.save(LM_PATH/'tmp'/'tok_val.npy', tok_val) tok_trn = np.load(LM_PATH/'tmp'/'tok_trn.npy') tok_val = np.load(LM_PATH/'tmp'/'tok_val.npy') # Check the most common tokens freq = Counter(p for o in tok_trn for p in o) freq.most_common(25) # Check the least common tokens freq.most_common()[-25:] # Build your vocabulary by keeping only the most common tokens that appears frequently enough # and constrain the size of your vocabulary. We follow here the 60k recommendation. max_vocab = 60000 min_freq = 2 itos = [o for o,c in freq.most_common(max_vocab) if c>min_freq] itos.insert(0, '_pad_') itos.insert(0, '_unk_') stoi = collections.defaultdict(lambda:0, {v:k for k,v in enumerate(itos)}) len(itos) trn_lm = np.array([[stoi[o] for o in p] for p in tok_trn]) val_lm = np.array([[stoi[o] for o in p] for p in tok_val]) np.save(LM_PATH/'tmp'/'trn_ids.npy', trn_lm) np.save(LM_PATH/'tmp'/'val_ids.npy', val_lm) pickle.dump(itos, open(LM_PATH/'tmp'/'itos.pkl', 'wb')) # Save everything trn_lm = np.load(LM_PATH/'tmp'/'trn_ids.npy') val_lm = np.load(LM_PATH/'tmp'/'val_ids.npy') itos = pickle.load(open(LM_PATH/'tmp'/'itos.pkl', 'rb')) vs=len(itos) vs,len(trn_lm) ``` # Using pre trained Language Model ``` # Uncomment this cell to download the pre-trained model. # It will be placed into the PATH that you defined earlier. # ! wget -nH -r -np -P {PATH} http://files.fast.ai/models/wt103/ # Load the weights of the model em_sz,nh,nl = 400,1150,3 PRE_PATH = PATH/'models'/'wt103' PRE_LM_PATH = PRE_PATH/'fwd_wt103.h5' wgts = torch.load(PRE_LM_PATH, map_location=lambda storage, loc: storage) # Check the word embedding layer and keep a 'mean word' for unknown tokens enc_wgts = to_np(wgts['0.encoder.weight']) row_m = enc_wgts.mean(0) enc_wgts.shape # Load the vocabulary on which the pre-trained model was trained # Define an embedding matrix with the vocabulary of our dataset itos2 = pickle.load((PRE_PATH/'itos_wt103.pkl').open('rb')) stoi2 = collections.defaultdict(lambda:-1, {v:k for k,v in enumerate(itos2)}) new_w = np.zeros((vs, em_sz), dtype=np.float32) for i,w in enumerate(itos): r = stoi2[w] new_w[i] = enc_wgts[r] if r>=0 else row_m # Use the new embedding matrix for the pre-trained model wgts['0.encoder.weight'] = T(new_w) wgts['0.encoder_with_dropout.embed.weight'] = T(np.copy(new_w)) wgts['1.decoder.weight'] = T(np.copy(new_w)) # Define the learner object to do the fine-tuning # Here we will freeze everything except the embedding layer, so that we can have a better # embedding for unknown words than just the mean embedding on which we initialise it. wd=1e-7 bptt=70 bs=52 opt_fn = partial(optim.Adam, betas=(0.8, 0.99)) trn_dl = LanguageModelLoader(np.concatenate(trn_lm), bs, bptt) val_dl = LanguageModelLoader(np.concatenate(val_lm), bs, bptt) md = LanguageModelData(PATH, 1, vs, trn_dl, val_dl, bs=bs, bptt=bptt) drops = np.array([0.25, 0.1, 0.2, 0.02, 0.15])*0.7 learner= md.get_model(opt_fn, em_sz, nh, nl, dropouti=drops[0], dropout=drops[1], wdrop=drops[2], dropoute=drops[3], dropouth=drops[4]) learner.metrics = [accuracy] learner.freeze_to(-1) learner.model.load_state_dict(wgts) lr=1e-3 lrs = lr # Run one epoch of fine-tuning learner.fit(lrs/2, 1, wds=wd, use_clr=(32,2), cycle_len=1) # Save the fine-tuned model and unfreeze everything to later fine-tune the whole model learner.save('lm_last_ft') learner.load('lm_last_ft') learner.unfreeze() learner.lr_find(start_lr=lrs/10, end_lr=lrs*10, linear=True) learner.sched.plot() # Run this if you want to highly tune the LM to the Amazon data, with 15 epochs # use_clr controls the shape of the cyclical (triangular) learning rate learner.fit(lrs, 1, wds=wd, use_clr=(20,10), cycle_len=15) # Save the Backbone for further classification!! learner.save('lm1') learner.save_encoder('lm1_enc') learner.sched.plot_loss() ``` # Going back to classification! Now that we spent some time fine-tuning the language model on our Amazon data, let's see if we can classify easily these reviews. As before, some cells should be run once, and then use data loaders for later use. ``` df_trn = pd.read_csv(CLAS_PATH/'train.csv', header=None, chunksize=chunksize) df_val = pd.read_csv(CLAS_PATH/'test.csv', header=None, chunksize=chunksize) tok_trn, trn_labels = get_all(df_trn, 1) tok_val, val_labels = get_all(df_val, 1) (CLAS_PATH/'tmp').mkdir(exist_ok=True) np.save(CLAS_PATH/'tmp'/'tok_trn.npy', tok_trn) np.save(CLAS_PATH/'tmp'/'tok_val.npy', tok_val) np.save(CLAS_PATH/'tmp'/'trn_labels.npy', trn_labels) np.save(CLAS_PATH/'tmp'/'val_labels.npy', val_labels) tok_trn = np.load(CLAS_PATH/'tmp'/'tok_trn.npy') tok_val = np.load(CLAS_PATH/'tmp'/'tok_val.npy') itos = pickle.load((LM_PATH/'tmp'/'itos.pkl').open('rb')) stoi = collections.defaultdict(lambda:0, {v:k for k,v in enumerate(itos)}) len(itos) trn_clas = np.array([[stoi[o] for o in p] for p in tok_trn]) val_clas = np.array([[stoi[o] for o in p] for p in tok_val]) np.save(CLAS_PATH/'tmp'/'trn_ids.npy', trn_clas) np.save(CLAS_PATH/'tmp'/'val_ids.npy', val_clas) ``` # Classifier In this part, we adopt an unusual train/test hierarchy. While it's common to train on a big dataset and thewn test on a small one, here we wanrt to test the hypothesis that the model can learn with few training data. Hence we take less data for training than for testing. ``` # We select here the 'size' first reviews of our dataset # The paper claims that it's possible to achieve very good results with few labeled examples # So let's try with 100 examples for training, and 5000 examples for validation. # We encourage you to try different values to see the effect of data size on performance. trn_size = 100 val_size = 5000 trn_clas = np.load(CLAS_PATH/'tmp'/'trn_ids.npy') val_clas = np.load(CLAS_PATH/'tmp'/'val_ids.npy') trn_labels = np.squeeze(np.load(CLAS_PATH/'tmp'/'trn_labels.npy')) val_labels = np.squeeze(np.load(CLAS_PATH/'tmp'/'val_labels.npy')) train = random.sample(list(zip(trn_clas, trn_labels)), trn_size) trn_clas = np.array([item[0] for item in train]) trn_labels = np.array([item[1] for item in train]) del train validation = random.sample(list(zip(val_clas, val_labels)), val_size) val_clas = np.array([item[0] for item in validation]) val_labels = np.array([item[1] for item in validation]) del validation bptt,em_sz,nh,nl = 70,400,1150,3 vs = len(itos) opt_fn = partial(optim.Adam, betas=(0.8, 0.99)) bs = 48 min_lbl = trn_labels.min() trn_labels -= min_lbl val_labels -= min_lbl c=int(trn_labels.max())+1 # Ccheck that the validation dataset is well balanced so acccuracy is a good metric # We'll also check other metrics usual for binary classification (precision, recall, f1 score) len(trn_labels[trn_labels == 1]) / len(trn_labels) trn_ds = TextDataset(trn_clas, trn_labels) val_ds = TextDataset(val_clas, val_labels) trn_samp = SortishSampler(trn_clas, key=lambda x: len(trn_clas[x]), bs=bs//2) val_samp = SortSampler(val_clas, key=lambda x: len(val_clas[x])) trn_dl = DataLoader(trn_ds, bs//2, transpose=True, num_workers=1, pad_idx=1, sampler=trn_samp) val_dl = DataLoader(val_ds, bs, transpose=True, num_workers=1, pad_idx=1, sampler=val_samp) # We define the model, here it a classifier on top of an RNN language model # We load the language model encoder that we fine tuned before # We freeze everything but the last layer, so that we can train the classification layer only. #load the saved weights from before, and freeze everything until the last layer md = ModelData(PATH, trn_dl, val_dl) dps = np.array([0.4, 0.5, 0.05, 0.3, 0.1]) m = get_rnn_classifier(bptt, 20*70, c, vs, emb_sz=em_sz, n_hid=nh, n_layers=nl, pad_token=1, layers=[em_sz*3, 50, c], drops=[dps[4], 0.1], dropouti=dps[0], wdrop=dps[1], dropoute=dps[2], dropouth=dps[3]) opt_fn = partial(optim.Adam, betas=(0.7, 0.99)) learn = RNN_Learner(md, TextModel(to_gpu(m)), opt_fn=opt_fn) learn.reg_fn = partial(seq2seq_reg, alpha=2, beta=1) learn.clip=25. learn.metrics = [accuracy] lr=3e-3 lrm = 2.6 lrs = np.array([lr/(lrm**4), lr/(lrm**3), lr/(lrm**2), lr/lrm, lr]) lrs=np.array([1e-4,1e-4,1e-4,1e-3,1e-2]) wd = 1e-7 wd = 0 learn.load_encoder('lm1_enc') learn.freeze_to(-1) learn.lr_find(lrs/1000) learn.sched.plot() # Run one epoch on the classification layer learn.fit(lrs, 1, wds=wd, cycle_len=1, use_clr=(8,3)) # Save the trained model learn.save('clas_0') learn.load('clas_0') # Gradually unfreeze another layer to train a bit more parameters than just the classifier layer learn.freeze_to(-2) learn.fit(lrs, 1, wds=wd, cycle_len=1, use_clr=(8,3)) # Save the trained model learn.save('clas_1') learn.load('clas_1') # Unfreeze everything and train for a few epochs on the whole set of parameters of the model learn.unfreeze() learn.fit(lrs, 1, wds=wd, cycle_len=14, use_clr=(32,10)) learn.sched.plot_loss() # Save the model learn.save('clas_2') ``` # Inference Nonw, let's play with the model we've just learned! ``` m = get_rnn_classifer(bptt, 20*70, c, vs, emb_sz=em_sz, n_hid=nh, n_layers=nl, pad_token=1, layers=[em_sz*3, 50, c], drops=[dps[4], 0.1], dropouti=dps[0], wdrop=dps[1], dropoute=dps[2], dropouth=dps[3]) opt_fn = partial(optim.Adam, betas=(0.7, 0.99)) learn = RNN_Learner(md, TextModel(to_gpu(m)), opt_fn=opt_fn) learn.reg_fn = partial(seq2seq_reg, alpha=2, beta=1) learn.clip=25. learn.metrics = [accuracy] lr=3e-3 lrm = 2.6 lrs = np.array([lr/(lrm**4), lr/(lrm**3), lr/(lrm**2), lr/lrm, lr]) wd = 1e-7 wd = 0 learn.load_encoder('lm1_enc') learn.load('clas_2') def get_sentiment(input_str: str): # predictions are done on arrays of input. # We only have a single input, so turn it into a 1x1 array texts = [input_str] # tokenize using the fastai wrapper around spacy tok = [t.split() for t in texts] # tok = Tokenizer().proc_all_mp(partition_by_cores(texts)) # turn into integers for each word encoded = [stoi[p] for p in tok[0]] idx = np.array(encoded)[None] idx = np.transpose(idx) tensorIdx = VV(idx) m.eval() m.reset() p = m.forward(tensorIdx) return np.argmax(p[0][0].data.cpu().numpy()) def prediction(texts): """Do the prediction on a list of texts """ y = [] for i, text in enumerate(texts): if i % 1000 == 0: print(i) encoded = text idx = np.array(encoded)[None] idx = np.transpose(idx) tensorIdx = VV(idx) m.eval() m.reset() p = m.forward(tensorIdx) y.append(np.argmax(p[0][0].data.cpu().numpy())) return y sentence = "I like Feedly" start = time() print(get_sentiment(sentence)) print(time() - start) y = prediction(list(val_clas)) # Show relevant metrics for binary classification # We encourage you to try training the classifier with different data size and its effect on performance print(f'Accuracy --> {accuracy_score(y, val_labels)}') print(f'Precision --> {precision_score(y, val_labels)}') print(f'F1 score --> {f1_score(y, val_labels)}') print(f'Recall score --> {recall_score(y, val_labels)}') print(confusion_matrix(y, val_labels)) print(classification_report(y, val_labels)) ``` # What training size do we need? The language model has already learnt a lot about the syntax. It is very knowledgeable about the context in which words appear in sentences. However, the language model does not contain any notion of [meaning](https://en.wikipedia.org/wiki/Meaning_%28linguistics%29). This problem is well summarised in [Emily Bender's tweet](https://twitter.com/emilymbender/status/1024042044035985408) during a very interesting twiter thread that occur in July around meaning in NLP. A cool summary of this thread can be found in the [Hugging Face](https://medium.com/huggingface/learning-meaning-in-natural-language-processing-the-semantics-mega-thread-9c0332dfe28e) blogpost. Hence the meaning in language is very likely to be learned through supervision, with the help of ground-truth examples. However, when we perform some NLP tasks, sentiment analysis in our example, both syntax and meaning are important! The idea is that you can save a lot of time by being taught with a lot of blind synatx first, and then learning meaning. Think of when you start learning a complete new field. Well, it is far easier to learn it in your mother tongue than in another language you master less. The big practical gain here is that once you "know" a language, you need less supervised examples to learn a new thing! In our example, it means we need less labeled reviews for us to learn a relevant classifier. Let's verify this hypothesis by training a classifier with several training size and see how this size affects the performance! ``` trn_clas = np.load(CLAS_PATH/'tmp'/'trn_ids.npy') val_clas = np.load(CLAS_PATH/'tmp'/'val_ids.npy') trn_labels = np.squeeze(np.load(CLAS_PATH/'tmp'/'trn_labels.npy')) val_labels = np.squeeze(np.load(CLAS_PATH/'tmp'/'val_labels.npy')) def experiment(trn_size, val_size): train = random.sample(list(zip(trn_clas, trn_labels)), trn_size) aux_trn_clas = np.array([item[0] for item in train]) aux_trn_labels = np.array([item[1] for item in train]) del train validation = random.sample(list(zip(val_clas, val_labels)), val_size) aux_val_clas = np.array([item[0] for item in validation]) aux_val_labels = np.array([item[1] for item in validation]) del validation bptt,em_sz,nh,nl = 70,400,1150,3 vs = len(itos) opt_fn = partial(optim.Adam, betas=(0.8, 0.99)) bs = 48 min_lbl = aux_trn_labels.min() aux_trn_labels -= min_lbl aux_val_labels -= min_lbl c=int(aux_trn_labels.max())+1 # Load data in relevant structures trn_ds = TextDataset(aux_trn_clas, aux_trn_labels) val_ds = TextDataset(aux_val_clas, aux_val_labels) trn_samp = SortishSampler(aux_trn_clas, key=lambda x: len(aux_trn_clas[x]), bs=bs//2) val_samp = SortSampler(aux_val_clas, key=lambda x: len(aux_val_clas[x])) trn_dl = DataLoader(trn_ds, bs//2, transpose=True, num_workers=1, pad_idx=1, sampler=trn_samp) val_dl = DataLoader(val_ds, bs, transpose=True, num_workers=1, pad_idx=1, sampler=val_samp) # Define the model and load the backbone lamguage model md = ModelData(PATH, trn_dl, val_dl) dps = np.array([0.4, 0.5, 0.05, 0.3, 0.1]) m = get_rnn_classifier(bptt, 20*70, c, vs, emb_sz=em_sz, n_hid=nh, n_layers=nl, pad_token=1, layers=[em_sz*3, 50, c], drops=[dps[4], 0.1], dropouti=dps[0], wdrop=dps[1], dropoute=dps[2], dropouth=dps[3]) opt_fn = partial(optim.Adam, betas=(0.7, 0.99)) learn = RNN_Learner(md, TextModel(to_gpu(m)), opt_fn=opt_fn) learn.reg_fn = partial(seq2seq_reg, alpha=2, beta=1) learn.clip=25. learn.metrics = [accuracy] lr=3e-3 lrm = 2.6 lrs = np.array([lr/(lrm**4), lr/(lrm**3), lr/(lrm**2), lr/lrm, lr]) lrs=np.array([1e-4,1e-4,1e-4,1e-3,1e-2]) wd = 1e-7 wd = 0 learn.load_encoder('lm1_enc') learn.freeze_to(-1) # Find th learning rate learn.lr_find(lrs/1000) # Run one epoch on the classification layer learn.fit(lrs, 1, wds=wd, cycle_len=1, use_clr=(8,3)) # Save the trained model learn.save(f'{trn_size}clas_0') learn.load(f'{trn_size}clas_0') # Gradually unfreeze another layer to train a bit more parameters than just the classifier layer learn.freeze_to(-2) learn.fit(lrs, 1, wds=wd, cycle_len=1, use_clr=(8,3)) # Save the trained model learn.save(f'{trn_size}clas_1') learn.load(f'{trn_size}clas_1') # Unfreeze everything and train for a few epochs on the whole set of parameters of the model learn.unfreeze() learn.fit(lrs, 1, wds=wd, cycle_len=14, use_clr=(32,10)) # Save the model learn.sched.plot_loss() learn.save(f'{trn_size}clas_2') from time import time val_size = 100000 for trn_size in [50, 100, 500, 1000, 5000, 10000, 20000, 50000]: print('#'*50) print(f'Experiment with training size {trn_size}') start = time() experiment(trn_size, val_size) t = time() - start print(f'Time cost: {t}') ``` Some notebook issues here, you might want to run this cell from a python script... # Conclusions Lety's see the evollution of the accuracy when we increas the size of the train data. For each training size, we report the best accuracy among the different epochs. ``` import matplotlib.pyplot as plt best_acc = [0.84558, 0.87324, 0.91232, 0.9203, 0.93174, 0.93584, 0.94032, 0.94616] sizes = [50, 100, 500, 1000, 5000, 10000, 20000, 50000] plt.plot(sizes, best_acc) plt.title('Evolution of performance when increasing the training size') plt.xlabel('Training size') plt.ylabel('Accuracy') plt.show() plt.plot(sizes, best_acc) plt.title('Evolution of performance when increasing the training size, Zoom on the [0-10000] size zone') plt.xlabel('Training size') plt.ylabel('Accuracy') plt.xlim([0, 10000]) plt.show() plt.plot(np.log(sizes)/np.log(10), best_acc) plt.title('Evolution of performance when increasing the training size, with log scale for size') plt.xlabel('Training size (log)') plt.ylabel('Accuracy') plt.show() ``` - The first observation is, even with 50 samples only, we get a pretty great accuracy of 0.85! - Then we see that the learning progress is very consequent when going from a size of 50 to 1000 samples - The ULMFit beats the reported score from FastText (~0.92) when using 1000 samples only! Note that the reported score from FastText is from a training using the whole training data (3.6M samples) - The accuracy continues to rise when we increase the training size, but with a lower speed. Here the trade-off comes, where you have to decide whether the extra 0.1% in accuracy is worth paying for more labeled data! - From the log-scale graph we might expect even greater results when raining the training size. We have 4.6M training reviews so we could get orders of magnitude more so we could expect reaching 0.95 accuracy or more with the full dataset.
github_jupyter
# M* vs Mhalo Again, galaxy - halo matching is required. ``` import matplotlib matplotlib.use("Qt4Agg") import matplotlib.pyplot as plt import numpy as np import load import tree.halomodule as hmo import pickle import pandas as pd import utils.match as mtc import draw from load.info import Info def get_M1(z, M10=11.59 , M11=1.195): return M10 + M11*(z/(z+1)) def m_M(M,z): """ Moster SHM relation from Moster et al. 2013 Returns M*/Mhalo. Parameters ---------- M : float Halo Mass z : float Redshift """ N = get_M1 N10 = 0.0351 N11 = -0.0247 beta = get_M1 beta10 = 1.376 beta11 = -0.826 gamma = get_M1 gamma10 = 0.608 gamma11 = 0.329 M1 = 10**get_M1(z, 11.59, 1.195) nn = N(z, N10, N11) bb = beta(z, beta10, beta11) gg = gamma(z, gamma10, gamma11) return 2 * nn / ( (M/M1)**(-bb) + (M/M1)**(gg)) mm = np.logspace(10, 15, 100) plt.plot(np.log10(xp), np.log10(m_M(mm,0)*mm)) plt.show() clusters = ["05420", "39990", "01605", "05427", "36415",\ "36413", "29176", "29172", "04466", "10002",\ "17891", "24954", "28930", "32675", "35663",\ "14172", "06098", "07206"][0:1] fig, ax = plt.subplots(1,2) fig.set_size_inches(18, 8) mvir_all_c=[] mstar_all_c=[] info = Info(187, base='./05420/') for cluster in clusters: wdir = "./" + cluster + '/' try: cat_final, ad_final = pickle.load(open(wdir + cluster + "moster.pickle", "rb")) except: continue # Color code central / satellite ind_cen = ad_final["level"] == 1 ind_sat = ~ind_cen#np.where(ad_final["level"] != 1)[0] mstar = cat_final["mstar"] mvir = ad_final['mvir'] # plt.clf() mvir_all_c.extend(mvir[ind_cen]) mstar_all_c.extend(mstar[ind_cen]) satellites = ax[0].scatter(np.log10(mvir[ind_sat]),\ mstar[ind_sat]/mvir[ind_sat] / (info.ob/info.om), \ facecolors="blue", edgecolors="blue",\ label="satellite", ) centrals = ax[0].scatter(np.log10(mvir[ind_cen]),\ mstar[ind_cen]/mvir[ind_cen] / (info.ob/info.om),\ facecolors = "red", edgecolors="red",\ label="central") centrals = ax[1].scatter(np.log10(mvir[ind_cen]),\ mstar[ind_cen]/mvir[ind_cen] / (info.ob/info.om),\ facecolors = "red", edgecolors="red",\ label="central") ax[0].plot(np.log10(mm), m_M(mm,0) / (info.ob/info.om), label="Moster 13") ax[1].plot(np.log10(mm), m_M(mm,0) / (info.ob/info.om), label="Moster 13") ax[0].legend() ax[1].legend() ax[0].set_yscale("log") ax[1].set_yscale("log") #ax[0].set_ylim([0,5]) #ax[1].set_ylim([0,2]) ax[0].set_ylabel(r"$ M_{\star} / M_{200} / (\Omega_{b} / \Omega_{m} )$") ax[0].set_xlabel(r"log$[M_{200} / M_{\odot}]$") ax[1].set_ylabel(r"$ M_{\star} / M_{200} / (\Omega_{b} / \Omega_{m} )$") ax[1].set_xlabel(r"log$[M_{200} / M_{\odot}]$") plt.savefig("./ALL_Moster_plot.png") #plt.savefig(wdir + cluster + "Moster_plot_central_only.png") plt.close() fig, ax = plt.subplots() #plt.clf() ax.plot(np.log10(mm), np.log10(m_M(mm,0)*mm)) ax.scatter(np.log10(mvir_all_c), np.log10(mstar_all_c)) plt.savefig("./Mstar_Mhal_cen.png") ```
github_jupyter
``` # !pip install "pymongo[srv]" import pymongo db_url = "mongodb+srv://rithik:capstoneproject@capstone1.86sce.mongodb.net/capstone1?retryWrites=true&w=majority" client = pymongo.MongoClient(db_url) db = client.capstone collection = db['quiz'] # collection.rename('quiz_cards', dropTarget=True) quiz = {'quizname': 'Cells', 'questions': {'1': { "question": 'sample question1', 'distractors': { '1': { 'distractor': 'reflection1', 'meaning': 'the image of something as reflected by a mirror (or other reflective material)' }, '2': { 'distractor': 'reflection2', 'meaning': 'the image of something as reflected by a mirror (or other reflective material)' }, '3': { 'distractor': 'reflection3', 'meaning': 'the image of something as reflected by a mirror (or other reflective material)' }, '4': { 'distractor': 'reflection4', 'meaning': 'the image of something as reflected by a mirror (or other reflective material)' } }, 'correct_answer': { 'answer': 'reflection2', 'distractor_number': '2' } }, '2': { "question": 'sample question1', 'distractors': { '1': { 'distractor': 'reflection1', 'meaning': 'the image of something as reflected by a mirror (or other reflective material)' }, '2': { 'distractor': 'reflection2', 'meaning': 'the image of something as reflected by a mirror (or other reflective material)' }, '3': { 'distractor': 'reflection3', 'meaning': 'the image of something as reflected by a mirror (or other reflective material)' }, '4': { 'distractor': 'reflection4', 'meaning': 'the image of something as reflected by a mirror (or other reflective material)' } }, 'correct_answer': { 'answer': 'reflection2', 'distractor_number': '2' } }, '3': { "question": 'sample question1', 'distractors': { '1': { 'distractor': 'reflection1', 'meaning': 'the image of something as reflected by a mirror (or other reflective material)' }, '2': { 'distractor': 'reflection2', 'meaning': 'the image of something as reflected by a mirror (or other reflective material)' }, '3': { 'distractor': 'reflection3', 'meaning': 'the image of something as reflected by a mirror (or other reflective material)' }, '4': { 'distractor': 'reflection4', 'meaning': 'the image of something as reflected by a mirror (or other reflective material)' } }, 'correct_answer': { 'answer': 'reflection2', 'distractor_number': '2' } }, }, } quiz['questions']['2'] collection = db['quizzes'] collection.insert_one(quiz) query = {'quizname':"light1"} res = collection.find(query) res for i in res: print(i) ```
github_jupyter
``` !wget --no-check-certificate \ https://storage.googleapis.com/laurencemoroney-blog.appspot.com/horse-or-human.zip \ -O /tmp/horse-or-human.zip !wget --no-check-certificate \ https://storage.googleapis.com/laurencemoroney-blog.appspot.com/validation-horse-or-human.zip \ -O /tmp/validation-horse-or-human.zip ``` The following python code will use the OS library to use Operating System libraries, giving you access to the file system, and the zipfile library allowing you to unzip the data. ``` import os import zipfile local_zip = '/tmp/horse-or-human.zip' zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall('/tmp/horse-or-human') local_zip = '/tmp/validation-horse-or-human.zip' zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall('/tmp/validation-horse-or-human') zip_ref.close() ``` The contents of the .zip are extracted to the base directory `/tmp/horse-or-human`, which in turn each contain `horses` and `humans` subdirectories. In short: The training set is the data that is used to tell the neural network model that 'this is what a horse looks like', 'this is what a human looks like' etc. One thing to pay attention to in this sample: We do not explicitly label the images as horses or humans. If you remember with the handwriting example earlier, we had labelled 'this is a 1', 'this is a 7' etc. Later you'll see something called an ImageGenerator being used -- and this is coded to read images from subdirectories, and automatically label them from the name of that subdirectory. So, for example, you will have a 'training' directory containing a 'horses' directory and a 'humans' one. ImageGenerator will label the images appropriately for you, reducing a coding step. Let's define each of these directories: ``` # Directory with our training horse pictures train_horse_dir = os.path.join('/tmp/horse-or-human/horses') # Directory with our training human pictures train_human_dir = os.path.join('/tmp/horse-or-human/humans') # Directory with our training horse pictures validation_horse_dir = os.path.join('/tmp/validation-horse-or-human/horses') # Directory with our training human pictures validation_human_dir = os.path.join('/tmp/validation-horse-or-human/humans') train_horse_names = os.listdir(train_horse_dir) train_human_names = os.listdir(train_human_dir) validation_horse_hames = os.listdir(validation_horse_dir) validation_human_names = os.listdir(validation_human_dir) ``` ## Building a Small Model from Scratch But before we continue, let's start defining the model: Step 1 will be to import tensorflow. ``` import tensorflow as tf ``` We then add convolutional layers as in the previous example, and flatten the final result to feed into the densely connected layers. Finally we add the densely connected layers. Note that because we are facing a two-class classification problem, i.e. a *binary classification problem*, we will end our network with a [*sigmoid* activation](https://wikipedia.org/wiki/Sigmoid_function), so that the output of our network will be a single scalar between 0 and 1, encoding the probability that the current image is class 1 (as opposed to class 0). ``` model = tf.keras.models.Sequential([ # Note the input shape is the desired size of the image 150x150 with 3 bytes color # This is the first convolution tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150, 150, 3)), tf.keras.layers.MaxPooling2D(2, 2), # The second convolution tf.keras.layers.Conv2D(32, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), # The third convolution tf.keras.layers.Conv2D(64, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), # The fourth convolution #tf.keras.layers.Conv2D(64, (3,3), activation='relu'), #tf.keras.layers.MaxPooling2D(2,2), # The fifth convolution #tf.keras.layers.Conv2D(64, (3,3), activation='relu'), #tf.keras.layers.MaxPooling2D(2,2), # Flatten the results to feed into a DNN tf.keras.layers.Flatten(), # 512 neuron hidden layer tf.keras.layers.Dense(512, activation='relu'), # Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('horses') and 1 for the other ('humans') tf.keras.layers.Dense(1, activation='sigmoid') ]) ``` The model.summary() method call prints a summary of the NN ``` model.summary() ``` The "output shape" column shows how the size of your feature map evolves in each successive layer. The convolution layers reduce the size of the feature maps by a bit due to padding, and each pooling layer halves the dimensions. Next, we'll configure the specifications for model training. We will train our model with the `binary_crossentropy` loss, because it's a binary classification problem and our final activation is a sigmoid. (For a refresher on loss metrics, see the [Machine Learning Crash Course](https://developers.google.com/machine-learning/crash-course/descending-into-ml/video-lecture).) We will use the `rmsprop` optimizer with a learning rate of `0.001`. During training, we will want to monitor classification accuracy. **NOTE**: In this case, using the [RMSprop optimization algorithm](https://wikipedia.org/wiki/Stochastic_gradient_descent#RMSProp) is preferable to [stochastic gradient descent](https://developers.google.com/machine-learning/glossary/#SGD) (SGD), because RMSprop automates learning-rate tuning for us. (Other optimizers, such as [Adam](https://wikipedia.org/wiki/Stochastic_gradient_descent#Adam) and [Adagrad](https://developers.google.com/machine-learning/glossary/#AdaGrad), also automatically adapt the learning rate during training, and would work equally well here.) ``` from tensorflow.keras.optimizers import RMSprop model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.001), metrics=['accuracy']) ``` ### Data Preprocessing Let's set up data generators that will read pictures in our source folders, convert them to `float32` tensors, and feed them (with their labels) to our network. We'll have one generator for the training images and one for the validation images. Our generators will yield batches of images of size 300x300 and their labels (binary). As you may already know, data that goes into neural networks should usually be normalized in some way to make it more amenable to processing by the network. (It is uncommon to feed raw pixels into a convnet.) In our case, we will preprocess our images by normalizing the pixel values to be in the `[0, 1]` range (originally all values are in the `[0, 255]` range). In Keras this can be done via the `keras.preprocessing.image.ImageDataGenerator` class using the `rescale` parameter. This `ImageDataGenerator` class allows you to instantiate generators of augmented image batches (and their labels) via `.flow(data, labels)` or `.flow_from_directory(directory)`. These generators can then be used with the Keras model methods that accept data generators as inputs: `fit`, `evaluate_generator`, and `predict_generator`. ``` from tensorflow.keras.preprocessing.image import ImageDataGenerator # All images will be rescaled by 1./255 train_datagen = ImageDataGenerator(rescale=1/255) validation_datagen = ImageDataGenerator(rescale=1/255) # Flow training images in batches of 128 using train_datagen generator train_generator = train_datagen.flow_from_directory( '/tmp/horse-or-human/', # This is the source directory for training images target_size=(150, 150), # All images will be resized to 150x150 batch_size=128, # Since we use binary_crossentropy loss, we need binary labels class_mode='binary') # Flow training images in batches of 128 using train_datagen generator validation_generator = validation_datagen.flow_from_directory( '/tmp/validation-horse-or-human/', # This is the source directory for training images target_size=(150, 150), # All images will be resized to 150x150 batch_size=32, # Since we use binary_crossentropy loss, we need binary labels class_mode='binary') ``` ### Training Let's train for 15 epochs -- this may take a few minutes to run. Do note the values per epoch. The Loss and Accuracy are a great indication of progress of training. It's making a guess as to the classification of the training data, and then measuring it against the known label, calculating the result. Accuracy is the portion of correct guesses. ``` history = model.fit( train_generator, steps_per_epoch=8, epochs=15, verbose=1, validation_data = validation_generator, validation_steps=8) ``` ### Running the Model Let's now take a look at actually running a prediction using the model. This code will allow you to choose 1 or more files from your file system, it will then upload them, and run them through the model, giving an indication of whether the object is a horse or a human. ``` import numpy as np from google.colab import files from keras.preprocessing import image uploaded = files.upload() for fn in uploaded.keys(): # predicting images path = '/content/' + fn img = image.load_img(path, target_size=(150, 150)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) images = np.vstack([x]) classes = model.predict(images, batch_size=10) print(classes[0]) if classes[0]>0.5: print(fn + " is a human") else: print(fn + " is a horse") ``` ### Visualizing Intermediate Representations To get a feel for what kind of features our convnet has learned, one fun thing to do is to visualize how an input gets transformed as it goes through the convnet. Let's pick a random image from the training set, and then generate a figure where each row is the output of a layer, and each image in the row is a specific filter in that output feature map. Rerun this cell to generate intermediate representations for a variety of training images. ``` import matplotlib.pyplot as plt import numpy as np import random from tensorflow.keras.preprocessing.image import img_to_array, load_img # Let's define a new Model that will take an image as input, and will output # intermediate representations for all layers in the previous model after # the first. successive_outputs = [layer.output for layer in model.layers[1:]] #visualization_model = Model(img_input, successive_outputs) visualization_model = tf.keras.models.Model(inputs = model.input, outputs = successive_outputs) # Let's prepare a random input image from the training set. horse_img_files = [os.path.join(train_horse_dir, f) for f in train_horse_names] human_img_files = [os.path.join(train_human_dir, f) for f in train_human_names] img_path = random.choice(horse_img_files + human_img_files) img = load_img(img_path, target_size=(150, 150)) # this is a PIL image x = img_to_array(img) # Numpy array with shape (150, 150, 3) x = x.reshape((1,) + x.shape) # Numpy array with shape (1, 150, 150, 3) # Rescale by 1/255 x /= 255 # Let's run our image through our network, thus obtaining all # intermediate representations for this image. successive_feature_maps = visualization_model.predict(x) # These are the names of the layers, so can have them as part of our plot layer_names = [layer.name for layer in model.layers] # Now let's display our representations for layer_name, feature_map in zip(layer_names, successive_feature_maps): if len(feature_map.shape) == 4: # Just do this for the conv / maxpool layers, not the fully-connected layers n_features = feature_map.shape[-1] # number of features in feature map # The feature map has shape (1, size, size, n_features) size = feature_map.shape[1] # We will tile our images in this matrix display_grid = np.zeros((size, size * n_features)) for i in range(n_features): # Postprocess the feature to make it visually palatable x = feature_map[0, :, :, i] x -= x.mean() x /= x.std() x *= 64 x += 128 x = np.clip(x, 0, 255).astype('uint8') # We'll tile each filter into this big horizontal grid display_grid[:, i * size : (i + 1) * size] = x # Display the grid scale = 20. / n_features plt.figure(figsize=(scale * n_features, scale)) plt.title(layer_name) plt.grid(False) plt.imshow(display_grid, aspect='auto', cmap='viridis') ``` As you can see we go from the raw pixels of the images to increasingly abstract and compact representations. The representations downstream start highlighting what the network pays attention to, and they show fewer and fewer features being "activated"; most are set to zero. This is called "sparsity." Representation sparsity is a key feature of deep learning. These representations carry increasingly less information about the original pixels of the image, but increasingly refined information about the class of the image. You can think of a convnet (or a deep network in general) as an information distillation pipeline. ## Clean Up Before running the next exercise, run the following cell to terminate the kernel and free memory resources: ``` import os, signal os.kill(os.getpid(), signal.SIGKILL) ```
github_jupyter
![couplage1](images/couplage1.jpg) # Equilibre énergétique de l'échangeur On note : - $m_{dro}$ le débit massique du fluide dans le dromotherme en $\frac{kg}{s}$ - $Cp_{dro}$ la capacité thermique du fluide circulant dans le dromotherme en $\frac{J}{kg K}$ - $m_{sto}$ le débit massique du fluide dans le système géothermique du stockage en $\frac{kg}{s}$ - $Cp_{sto}$ la capacité thermique du fluide circulant dans le système géothermique du stockage en $\frac{J}{kg K}$ On pose $coeff = \frac{m_{dro} * Cp_{dro}}{m_{sto} * Cp_{sto}}$ ## Cas 1 : $coeff <= 1$ On a \begin{equation} T_{inj\_dro} = T_{sor\_dro} - eff * (T_{sor\_dro} - T_{sor\_sto}) \end{equation} Et : \begin{equation} T_{inj\_sto} = T_{sor\_sto} + coeff * (T_{sor\_dro} - T_{inj\_dro}) \end{equation} On peut donc écrire : \begin{equation} T_{inj\_sto} = T_{sor\_sto} + coeff * (T_{sor\_dro} - T_{sor\_dro} + eff * (T_{sor\_dro} - T_{sor\_sto})) \end{equation} Ce qui laisse : \begin{equation} T_{inj\_sto} = T_{sor\_sto} + coeff * eff * (T_{sor\_dro} - T_{sor\_sto}) \end{equation} # Echanges énérgétiques au niveau du système géothermique équipant le système de stockage Si k est le coefficient du système énergétique exprimé en W/K, et en notant y la température dans le massif de stockage, on a : \begin{equation} m_{sto} * Cp_{sto} * (T_{inj\_sto} - T_{sor\_sto}) = k * (\frac{T_{inj\_sto} + T_{sor\_sto}}{2} - y ) \end{equation} Soit : \begin{equation} T_{inj\_sto} (m_{sto} * Cp_{sto} - \frac{k}{2}) = (m_{sto} * Cp_{sto} + \frac{k}{2}) T_{sor\_sto} - k y \end{equation} Ou encore : \begin{equation} T_{inj\_sto} = \frac{ (m_{sto} * Cp_{sto} + \frac{k}{2}) T_{sor\_sto} - k y }{m_{sto} * Cp_{sto} - \frac{k}{2}} \end{equation} # Bilan On obtient 2 expressions de $T_{inj\_sto}$, permettant ainsi de formuler $T_{sor\_sto}$ fonction de y et de $T_{sor\_dro}$ seulement : \begin{equation} T_{sor\_sto} + coeff * eff * (T_{sor\_dro} - T_{sor\_sto}) = \frac{(m_{sto} * Cp_{sto} + \frac{k}{2} ) T_{sor\_sto} - k y} {m_{sto} * Cp_{sto} - \frac{k}{2}} \end{equation} En posant $ B = coeff * eff * (m_{sto} * Cp_{sto} - \frac{k}{2})$ On obtient : \begin{equation} (m_{sto} * Cp_{sto} - \frac{k}{2})*T_{sor\_sto} + B * (T_{sor\_dro} - T_{sor\_sto}) = (m_{sto} * Cp_{sto} + \frac{k}{2}) T_{sor\_sto} - k y \end{equation} Soit : \begin{equation} (m_{sto} * Cp_{sto} - \frac{k}{2} - B - m_{sto} * Cp_{sto} - \frac{k}{2}) * T_{sor\_sto} = - B * T_{sor\_dro} - k y \end{equation} Ou encore : \begin{equation} ( k + B ) * T_{sor\_sto} = ky + B * T_{sor\_dro} \end{equation} Il reste : \begin{equation} T_{sor\_sto} = \frac{ky + B * T_{sor\_dro}}{k+B} \end{equation} # Couplage avec la PAC On note : - $P_{geo}$ la puissance géothermique extraite en W - $besoin$ le besoin en chauffage du bâtiment en W - $P_{elec}$ la puissance électrique de la PAC en W - COP le coefficient de performance de la PAC On a : $besoin = P_{elec} + P_{geo}$ Et \begin{equation} COP=\frac{besoin}{P_{elec}} \end{equation} De ces deux expressions, on obtient la relation entre la puissance géothermique et le besoin du bâtiment: \begin{equation} P_{geo}=\frac{COP-1}{COP} * besoin \end{equation} La puissance géothermique s'exprime aussi : $P_{geo} = m_{pac} Cp_{pac} ( T_{inj\_pac}-T_{sor\_pac} )$ On en déduit la température de sortie du fluide dans la PAC: \begin{equation} T_{sor\_pac} = T_{inj\_pac} - \frac{P_{geo}}{m_{pac} * Cp_{pac}} \end{equation} En écrivant l'équilibre énergétique entre la PAC et le stockage, comme on a écrit celui entre le dromotherm et stockage, on a : \begin{equation} T_{sor\_pac} = \frac{ (m_{pac} * Cp_{pac} + \frac{k}{2}) T_{inj\_pac} - k y }{m_{pac} * Cp_{pac} - \frac{k}{2}} \end{equation} Les deux expressions de $T_{sor\_pac}$ nous permettent d'obtenir $T_{inj\_pac}$ : \begin{equation} T_{inj\_pac} = y-\frac{C Pgeo}{k} \end{equation} avec \begin{equation} C = 1-\frac{k}{2 m_{pac} Cp_{pac}} \end{equation} ## Bâtiment ![Schema RC](images/RC_simple.png) Le bâtiment est assimilé à un modèle R3C. On note : - Tconsigne : température de consigne en °C - Tint : température intérieure en °C - Text : température extérieure en °C - Rm : Résistance thermique des murs (K/W) - Ri : Résistance superficielle intérieure (K/W) - Rf : résistance de fuite (infiltrations+vitre+renouvellement d'air) K/W Par analogie électrique, on assimile les températures à des tensions et les puissances à des intensités en première approximation, on a donc : \begin{equation} \frac {T_{int}-T_{ext}}{R_m+R_i}+ \frac {T_{int} -T_{ext}}{R_f} + C \frac{dT_{int}}{dt}=Qchauffage=besoin \end{equation} Soit : \begin{equation} C \frac{dT_{int}}{dt}=Qchauffage- \frac {T_{int}-T_{ext}}{R_m+R_i}-\frac {T_{int} -T_{ext}}{R_f} \end{equation} Pour maintenir Tint constante et égale à Tconsigne, on doit donc développer : \begin{equation} Qchauffage=(Tconsigne-Text)*(\frac {1}{Rm+Ri}+ \frac {1}{Rf} ) \end{equation} # RECAPITULATIF DE TOUTES LES EQUATIONS DU SYSTEME ![couplage](images/couplage2.jpg) ## Dromotherm \begin{equation} C_s h_s\frac{\partial T_s}{\partial t}=(1-\alpha)R_g(t)+ R_{atm}(t) -\epsilon\sigma T_s^{4}-H_v(T_s-T_{ext}(t))-r_{s/d}(T_s-T_f) \end{equation} \begin{equation} ((1-\phi) C_d h_d + \phi C_f h_d ) \frac {\partial T_f}{\partial t} + K_p C_f h_d \frac {\partial T_f}{\partial x}=r_{s/d}(T_s-T_f) - r_{d/b1}(T_f-T_{b1}) \end{equation} \begin{equation} C_b h_{b1}\frac{\partial T_{b1}}{\partial t}=r_{d/b1}(T_f-T_{b1})- r_{b1/b2}(T_{b1}-T_{b2}) \end{equation} \begin{equation} C_b h_{b2}\frac{\partial T_{b2}}{\partial t}= r_{b1/b2}(T_{b1}-T_{b2}) \end{equation} ## Echangeur thermique \begin{equation} T_{inj\_dro} = T_{sor\_dro} - eff * (T_{sor\_dro} - T_{sor\_sto}) \end{equation} ## Stockage \begin{equation} T_{inj\_sto} = \frac{ (m_{sto} * Cp_{sto} + \frac{k}{2}) T_{sor\_sto} - k T_{stock} }{m_{sto} * Cp_{sto} - \frac{k}{2}} \end{equation} \begin{equation} T_{sor\_sto} = \frac{k T_{stock} + B * T_{sor\_dro}}{k+B} \end{equation} ## PAC \begin{equation} T_{sor\_pac} = T_{inj\_pac} - \frac{P_{geo}}{m_{pac} * Cp_{pac}} \end{equation} \begin{equation} T_{inj\_pac} = T_{stock}-\frac{C Pgeo}{k} \end{equation} ## Température stockage ### 1er cas :Dromotherm seul en marche \begin{equation} m_{sable} Cp_{sable} \frac {dT_{stock}}{dt}=m_{sto} Cp_{sto} ( T_{inj\_sto} - T_{sor\_sto} ) \end{equation} ### 2ème cas :PAC seule en marche \begin{equation} m_{sable} Cp_{sable} \frac {dT_{stock}}{dt}= m_{pac} Cp_{pac} (T_{sor\_pac}- T_{inj\_pac}) \end{equation} ### 3me cas : Dromotherm et PAC en marche \begin{equation} m_{sable} Cp_{sable} \frac {dT_{stock}}{dt}=m_{sto} Cp_{sto} ( T_{inj\_sto} - T_{sor\_sto} ) + m_{pac} Cp_{pac} (T_{sor\_pac}- T_{inj\_pac}) \end{equation} ## Bâtiment \begin{equation} besoin_{chauffage}=(Tconsigne-Text)*(\frac {1}{Rm+Ri}+ \frac {1}{Rf} ) \end{equation} \begin{equation} besoin_{ECS}= Volume_{ballon}*N_{pers}*(T_{ballon}-T{eau})*Cp_{eau}/(24*3600) \end{equation} # SCHEMA NUMERIQUE Soit $t_i $, l'instant courant. ## Dromotherm Le schema numérique se résume à : \begin{equation} T_{sor\_dro}(t_i)=D(t_i,T(t_{i-1}),T_{inj\_dro}(t_{i-1}) \end{equation} avec : \begin{equation} T(t_i)=[Ts(t_i),Tf(t_i),Tb1(t_i),Tb2(t_i)] \end{equation} ## Echangeur thermique \begin{equation} T_{inj\_dro}(t_i) = T_{sor\_dro}(t_i) - eff * (T_{sor\_dro}(t_i) - T_{sor\_sto}(t_i)) \end{equation} ## Stockage \begin{equation} T_{inj\_sto}(t_i) = \frac{ (m_{sto} * Cp_{sto} + \frac{k}{2}) T_{sor\_sto}(t_i) - k T_{stock}(t_i) }{m_{sto} * Cp_{sto} - \frac{k}{2}} \end{equation} \begin{equation} T_{sor\_sto}(t_i) = \frac{k T_{stock}(t_i) + B * T_{sor\_dro}(t_i)}{k+B} \end{equation} ## PAC \begin{equation} T_{inj\_pac}(t_i) = T_{stock}(t_i)-\frac{C Pgeo(t_i)}{k} \end{equation} \begin{equation} T_{sor\_pac}(t_i) = T_{inj\_pac}(t_i) - \frac{P_{geo}(t_i)}{m_{pac} * Cp_{pac}} \end{equation} ## Température stockage Les équations régissant la température du stockage dans chaque est une équation différentielle d'ordre 1 sous la forme: \begin{equation} \frac {dT_{stock}}{dt}= F (t,T_{stock}) \end{equation} Suivant la méthode Euler explicite on a : \begin{equation} T_{stock}(t_{i})=T_{stock}(t_{i})+ dt *F (t_{i-1},T_{stock}(t_{i-1}) \end{equation} ## Bâtiment \begin{equation} besoin_{chauffage}(t_i)=(Tconsigne-Text(t_i))*(\frac {1}{Rm+Ri}+ \frac {1}{Rf} ) \end{equation} \begin{equation} besoin_{ECS}(t_i)= Volume_{ballon}*N_{pers}*(T_{ballon}-T{eau}(t_i))*Cp_{eau}/(24*3600) \end{equation} # ALGORITHME DE RESOLUTION ## Phase d'initialisation (t=0) A l'instant initial, tout le système est supposé être à l'arrêt et toutes les températures égales à une valeur $T_{ini}$ ## Phase itérative ( t>0) La procédure itérative de calcul des inconnues se présente comme suit: ### I- Choix du fonctionnement du démonstrateur Le démonstrateur peut fonctionner de 8 manières. L'utilisateur choisit: En été: 1. Dromotherm ON et PAC ON 2. Dromotherm ON et PAC OFF 3. Dromotherm OFF et PAC ON 4. Dromotherm OFF et PAC OFF En hivers: 1. Dromotherm ON et PAC ON 2. Dromotherm ON et PAC OFF 3. Dromotherm OFF et PAC ON 4. Dromotherm OFF et PAC OFF NB: PAC ON = chauffage et/ ou ECS ### II- Calculer La variation temporelle de la Température du stockage suivant chaque cas 1. Dromotherm ON et PAC ON $der=\frac{m_{sto} Cp{sto} (T_{inj\_sto}(t_{i})-T_{sor\_sto}(t_{i}))-P_{geo}(t_i)}{m_{sable} Cp{sable}} $ 2. Dromotherm ON et PAC OFF $der=\frac{m_{sto} Cp{sto} (T_{inj\_sto}(t_{i})-T_{sor\_sto}(t_{i}))}{m_{sable} Cp{sable}} $ 3. Dromotherm OFF et PAC ON $der=\frac{-P_{geo}(t_i)}{m_{sable} Cp{sable}} $ 4. Dromotherm OFF et PAC OFF $der=0$ ### III- Calculer la Température du stockage par la méthode de Euler explicite $T_{stock}(t_{i})=T_{stock}(t_{i})+ dt *der$ ### IV- Calculer les températures des fluides circulant dans le domotherm, l'échangeur et la PAC 1. Si Dromotherm ON Calculer $T_{sor\_dro}(t_i)$ en fonction de $T_{inj\_dro}(t_{i-1})$ et du débit unitaire qdro_u Calcluer $T_{sor\_sto}(t_i)$ par $T_{sor\_sto}(t_i) = \frac{k T_{stock}(t_i) + B * T_{sor\_dro}(t_i)}{k+B}$ Calculer $T_{inj\_sto}(t_i)$ par $ T_{inj\_sto}(t_i) = \frac{ (m_{sto} * Cp_{sto} + \frac{k}{2}) T_{sor\_sto}(t_i) - k T_{stock}(t_i) }{m_{sto} * Cp_{sto} - \frac{k}{2}}$ Calculer $T_{inj\_dro}(t_i)$ par $T_{inj\_dro}(t_i) = T_{sor\_dro}(t_i) - eff * (T_{sor\_dro}(t_i) - T_{sor\_sto}(t_i))$ 2. Si Dromotherm OFF Calculer $T_{sor\_dro}(t_i)$ en fonction de $T_{inj\_dro}(t_{i-1})$ et du débit nul Calculer $T_{inj\_dro}(t_i)$ par $T_{inj\_dro}(t_i) = T_{sor\_dro}(t_i-1)$ Calcluer $T_{sor\_sto}(t_i)$ par $T_{sor\_sto}(t_i) = T_{sor\_sto}(t_i-1)$ Calculer $T_{inj\_sto}(t_i)$ par $ T_{inj\_sto}(t_i) = T_{inj\_sto}(t_i-1) $ 3. Si la PAC ON Calculer $T_{inj\_pac}(t_i)$ par $T_{inj\_pac}(t_i) = T_{stock}(t_i)-\frac{C Pgeo(t_i)}{k}$ Calculer $T_{sor\_pac}(t_i)$ par $T_{sor\_pac}(t_i)= T_{inj\_pac}(t_i) - \frac{P_{geo}(t_i)}{m_{pac} * Cp_{pac}}$ 4. Si la PAC OFF Calculer $T_{inj\_pac}(t_i)$ par $T_{inj\_pac}(t_i) = T_{inj\_pac}(t_i-1)$ Calculer $T_{sor\_pac}(t_i)$ par $T_{sor\_pac}(t_i)= T_{sor\_pac}(t_i-1)$ La procédure recommence à l'étape I
github_jupyter
# Coordinates usage in ctapipe ``` import astropy.units as u import copy import numpy as np import matplotlib.pyplot as plt from ctapipe.io import EventSource from ctapipe.calib import CameraCalibrator from ctapipe.utils import get_dataset_path from ctapipe.visualization import ArrayDisplay %matplotlib inline from astropy.coordinates import SkyCoord, AltAz from ctapipe.coordinates import ( GroundFrame, TiltedGroundFrame, NominalFrame, TelescopeFrame, CameraFrame, ) # make plots and fonts larger plt.rcParams['figure.figsize'] = (12, 8) plt.rcParams['font.size'] = 16 ``` ## Open test dataset ``` filename = get_dataset_path("gamma_test_large.simtel.gz") source = EventSource(filename, max_events=4) events = [copy.deepcopy(event) for event in source] event = events[3] layout = set(source.subarray.tel_ids) ``` ### Choose event with LST This ensures that the telescope is not "parked" (as it would be in an event where it is not triggered) but is actually pointing to a source. ``` print(f'Telescope with data: {event.r0.tel.keys()}') tel_id = 2 ``` ## AltAz See [Astropy Docs on AltAz](http://docs.astropy.org/en/stable/api/astropy.coordinates.AltAz.html). Pointing direction of telescopes or the origin of a simulated shower are described in the `AltAz` frame. This is a local, angular coordinate frame, with angles `altitude` and `azimuth`. Altitude is the measured from the Horizon (0°) to the Zenith (90°). For the azimuth, there are different conventions. In Astropy und thus ctapipe, Azimuth is oriented East of North (i.e., N=0°, E=90°). ``` from astropy.time import Time from astropy.coordinates import EarthLocation obstime = Time('2013-11-01T03:00') location = EarthLocation.of_site('Roque de los Muchachos') altaz = AltAz(location=location, obstime=obstime) array_pointing = SkyCoord( alt=event.pointing.array_azimuth, az=event.pointing.array_altitude, frame=altaz, ) print(array_pointing) ``` ## CameraFrame Camera coordinate frame. The camera frame is a 2d cartesian frame, describing position of objects in the focal plane of the telescope. The frame is defined as in H.E.S.S., starting at the horizon, the telescope is pointed to magnetic north in azimuth and then up to zenith. Now, x points north and y points west, so in this orientation, the camera coordinates line up with the CORSIKA ground coordinate system. MAGIC and FACT use a different camera coordinate system: Standing at the dish, looking at the camera, x points right, y points up. To transform MAGIC/FACT to ctapipe, do x' = -y, y' = -x. **Typical usage**: Position of pixels in the focal plane. ``` geometry = source.subarray.tel[tel_id].camera.geometry pix_x = geometry.pix_x pix_y = geometry.pix_y focal_length = source.subarray.tel[tel_id].optics.equivalent_focal_length telescope_pointing = SkyCoord( alt=event.pointing.tel[tel_id].altitude, az=event.pointing.tel[tel_id].azimuth, frame=altaz, ) camera_frame = CameraFrame( focal_length=focal_length, rotation=0 * u.deg, telescope_pointing=telescope_pointing, ) cam_coords = SkyCoord(x=pix_x, y=pix_y, frame=camera_frame) print(cam_coords) plt.scatter(cam_coords.x, cam_coords.y) plt.title(f'Camera type: {geometry.camera_name}') plt.xlabel(f'x / {cam_coords.x.unit}') plt.ylabel(f'y / {cam_coords.y.unit}') plt.axis('square'); ``` The implementation of the coordinate system with astropy makes it easier to use time of the observation and location of the observing site, to understand, for example which stars are visible during a certain night and how they might be visible in the camera. ``` from ctapipe.visualization import CameraDisplay from ctapipe.instrument import CameraGeometry location = EarthLocation.of_site('Roque de los Muchachos') obstime = Time('2018-11-01T04:00') crab = SkyCoord.from_name("crab nebula") altaz = AltAz(location=location, obstime=obstime) pointing = crab.transform_to(altaz) camera_frame = CameraFrame( telescope_pointing=pointing, focal_length=focal_length, obstime=obstime, location=location, ) cam = CameraGeometry.from_name('LSTCam') fig, ax = plt.subplots() display = CameraDisplay(cam, ax=ax) ax.set_title( f'La Palma, {obstime}, az={pointing.az.deg:.1f}°, zenith={pointing.zen.deg:.1f}°, camera={geometry.camera_name}' ) for i, name in enumerate(['crab nebula', 'o tau', 'zet tau']): star = SkyCoord.from_name(name) star_cam = star.transform_to(camera_frame) x = star_cam.x.to_value(u.m) y = star_cam.y.to_value(u.m) ax.plot(x, y, marker='*', color=f'C{i}') ax.annotate( s=name, xy=(x, y), xytext=(5, 5), textcoords='offset points', color=f'C{i}', ) plt.show() ``` ## TelescopeFrame Telescope coordinate frame. A `Frame` using a `UnitSphericalRepresentation`. This is basically the same as a `HorizonCoordinate`, but the origin is at the telescope's pointing direction. This is what astropy calls a `SkyOffsetFrame`. The axis of the telescope frame, `fov_lon` and `fov_lat`, are aligned with the horizontal system's azimuth and altitude respectively. Pointing corrections should applied to the transformation between this frame and the camera frame. ``` telescope_frame = TelescopeFrame( telescope_pointing=pointing, obstime=pointing.obstime, location=pointing.location, ) telescope_coords = cam_coords.transform_to(telescope_frame) wrap_angle = telescope_pointing.az + 180* u.deg plt.axis('equal') plt.scatter( telescope_coords.fov_lon.deg, telescope_coords.fov_lat.deg, alpha=0.2, color='gray' ) for i, name in enumerate(['crab nebula', 'o tau', 'zet tau']): star = SkyCoord.from_name(name) star_tel = star.transform_to(telescope_frame) plt.plot(star_tel.fov_lon.deg, star_tel.fov_lat.deg, '*', ms=10) plt.annotate( s=name, xy=(star_tel.fov_lon.deg, star_tel.fov_lat.deg), xytext=(5, 5), textcoords='offset points', color=f'C{i}', ) plt.xlabel('fov_lon / {}'.format(telescope_coords.altaz.az.unit)) plt.ylabel('fov_lat / {}'.format(telescope_coords.altaz.alt.unit)) ``` ## NominalFrame Nominal coordinate frame. A Frame using a `UnitSphericalRepresentation`. This is basically the same as a `HorizonCoordinate`, but the origin is at an arbitray position in the sky. This is what astropy calls a `SkyOffsetFrame` If the telescopes are in divergent pointing, this `Frame` can be used to transform to a common system. - 2D reconstruction (`HillasIntersector`) is performed in this frame - 3D reconstruction (`HillasReconstructor`) doesn't need this frame Let's play a bit with 3 LSTs with divergent pointing ``` location = EarthLocation.of_site('Roque de los Muchachos') obstime = Time('2018-11-01T02:00') altaz = AltAz(location=location, obstime=obstime) crab = SkyCoord.from_name("crab nebula") # let's observe crab array_pointing = crab.transform_to(altaz) # let the telescopes point to different positions alt_offsets = u.Quantity([1, -1, -1], u.deg) az_offsets = u.Quantity([0, -2, +2], u.deg) tel_pointings = SkyCoord( alt=array_pointing.alt + alt_offsets, az=array_pointing.az + az_offsets, frame=altaz, ) camera_frames = CameraFrame( telescope_pointing=tel_pointings, # multiple pointings, so we get multiple frames focal_length=focal_length, obstime=obstime, location=location, ) nom_frame = NominalFrame(origin=array_pointing, obstime=obstime, location=location) fig, ax = plt.subplots(figsize=(15, 10)) ax.set_aspect(1) for i in range(3): cam_coord = SkyCoord(x=pix_x, y=pix_y, frame=camera_frames[i]) nom_coord = cam_coord.transform_to(nom_frame) ax.scatter( x=nom_coord.fov_lon.deg, y=nom_coord.fov_lat.deg, label=f'Telescope {i + 1}', s=30, alpha=0.15, ) for i, name in enumerate(['Crab', 'o tau', 'zet tau']): s = SkyCoord.from_name(name) s_nom = s.transform_to(nom_frame) ax.plot( s_nom.fov_lon.deg, s_nom.fov_lat.deg, '*', ms=10, ) ax.annotate( s=name, xy=(s_nom.fov_lon.deg, s_nom.fov_lat.deg), xytext=(5, 5), textcoords='offset points', color=f'C{i}', ) ax.set_xlabel(f'fov_lon / deg') ax.set_ylabel(f'fov_lat / deg') ax.legend() plt.show() ``` ## GroundFrame Ground coordinate frame. The ground coordinate frame is a simple cartesian frame describing the 3 dimensional position of objects compared to the array ground level in relation to the nomial centre of the array. Typically this frame will be used for describing the position on telescopes and equipment **Typical usage**: positions of telescopes on the ground (x, y, z) ``` source.subarray.peek() ``` In case a layout is selected, the following line will produce a different output from the picture above. ``` source.subarray.select_subarray(layout, name="Prod3b layout").peek() ``` ![Ground Frame](ground_frame.png) In this image all the telescope from the `gamma_test.simtel.gz` file are plotted as spheres in the GroundFrame. ## TiltedGroundFrame Tilted ground coordinate frame. The tilted ground coordinate frame is a cartesian system describing the 2 dimensional projected positions of objects in a tilted plane described by pointing_direction. The plane is rotated along the z_axis by the azimuth of the `pointing_direction` and then it is inclined with an angle equal to the zenith angle of the `pointing_direction`. This frame is used for the reconstruction of the shower core position. ![Tilted Ground Frame](tilted_ground_frame.png) This image picture both the telescopes in the GroundFrame (red) and in the TiltedGroundFrame (green) are displayed: in this case since the azimuth of the `pointing_direction` is 0 degrees, then the plane is just tilted according to the zenith angle. For playing with these and with more 3D models of the telescopes themselves, have a look at the [CREED_VTK](https://github.com/thomasgas/CREED_VTK) library.
github_jupyter
This branch is the software release for the 2019 paper: https://www.nature.com/articles/s41598-019-47795-0 See LICENSE.txt Copyright 2019 Massachusetts Institute of Technology ``` %reset -f import torch import numpy as np import matplotlib.pyplot as plt import scipy import scipy.signal import scipy.io import scipy.io.wavfile import sklearn import sklearn.metrics import sklearn.preprocessing import sklearn.feature_selection import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F import os import time import datetime import getpass #import seaborn as sns import pandas as pd import hashlib from importlib import reload from glob import glob import subprocess %matplotlib inline import matplotlib as mpl mpl.rcParams['pdf.fonttype'] = 42 np.random.seed(0) # for reproducibility tests torch.manual_seed(0) #for reproducibility tests ## Perform auditory attention decoding using audio reconstruction or direct classification using neural networks # ## About # Greg Ciccarelli # February 3, 2018 # March 22, 2018 # June 28, 2019 from IPython.core.debugger import set_trace import nipype.pipeline.engine as pe import nipype.interfaces.utility as niu import nipype.interfaces.io as nio import getpass import sys user = getpass.getuser() # Grab the latest eeg and cochleogram for each of the subject folders collect = 'LL_HowTo_0DegreesSeparation' modality = 'neuroscan' #modality = 'dsi' ############################# save_flag = True num_predict = 1 hidden_size = 2 #hidden_size = 200 num_ch_output = 1 output_size = num_ch_output * num_predict slow_opt_flag = False #num_batch = int(1024) # bce num_batch = int(5) num_epoch = 2400 #paper num_epoch = 2 learning_rate = 1e-3 #paper weight_decay = 0 #paper file_path_net = XXX_path_to_net # paper # dry, de taillez, 20190505112434, num_context=26 file_name_net = '201806190841_1hid_b_tanh_b_htanh' file_name_get_data = '201807141456_get_noscale_dsi' loss_type = 'corr' # Dry, bce, 20190503142452, num_context=1000 file_name_get_data = '201809272008_get_binary_conv_dry' file_name_net = '201809272028_binary_conv_dsi' loss_type = 'bce' # wet, bce, 20190504085917 num_context = 1000 file_name_net = '201809262034_binary_conv' file_name_get_data = '201809262022_get_binary_conv' loss_type = 'bce' # wet, De taillez, 20190505101057, num_context = 26 file_name_get_data = '201806221952_get_noscale' file_name_net = '201806190841_1hid_b_tanh_b_htanh' loss_type = 'corr' # wet ch sub, bce, 20190503211325, num_context = 1000 file_name_get_data = '201905031434_get_data_bce_wet2dry' file_name_net = '201809272028_binary_conv_dsi' loss_type = 'bce' # wet ch sub, de taillez, 20190505110243, num_context = 26 file_name_net = '201806190841_1hid_b_tanh_b_htanh' file_name_get_data = '201905031437_get_data_recon_wet2dry' loss_type = 'corr' file_path_name_net = os.path.join(file_path_net, file_name_net) file_path_name_get_data = XXX_path_and_name_to_get_data sys.path.append(os.path.split(file_path_name_get_data)[0]) module = __import__(os.path.split(file_path_name_get_data)[1]) reload(module) load_data = getattr(module, 'load_data') get_data = getattr(module, 'get_data') subj_folder_list = XXX_list_of_subj_folder_paths file_path_name_audio_list = [] file_path_name_eeg_list = [] for subj_folder in subj_folder_list[:]: #[:1] try: file_path_name_audio_list.append(sorted(glob(os.path.join(subj_folder, '*_Envelope100Hz.*')))[-1]) #. for real data file_path_name_eeg_list.append(sorted(glob(os.path.join(subj_folder, '*_EEGF*.*')))[-1]) except: print('-- missing --') print(subj_folder) print(file_path_name_audio_list) print(file_path_name_eeg_list) subj_folder # Load data audio, eeg, audio_unatt = load_data(file_path_name_audio_list[0], file_path_name_eeg_list[0]) print(audio.shape) print(eeg.shape) print(audio_unatt.shape) a = ~np.isnan(audio) fig, ax = plt.subplots(); ax.stem(np.sum(a, axis=1)); print(np.min(np.sum(a, axis=1))); idx_keep_audioTime = np.sort(np.random.permutation(num_context)[:250]) # 250 LL dct_params = {'idx_keep_audioTime': idx_keep_audioTime} # debug get data idx_sample = 0 X, y, z_unatt = get_data(audio, eeg, audio_unatt=audio_unatt, idx_eeg=None, num_batch=None, idx_sample=idx_sample, num_context=num_context, num_predict=num_predict, dct_params=dct_params) if X is not None: print(X.shape) print(y.shape) print(z_unatt) if X is not None: fig, ax = plt.subplots(); ax.plot(X.data.numpy()[100].T); if X is not None: fig, ax = plt.subplots(); ax.plot(y.data.numpy()[:100]); # debug get data idx_sample = 0 X, y, z_unatt = get_data(audio, eeg, audio_unatt=audio_unatt, idx_eeg=None, num_batch=None, idx_sample=idx_sample, num_context=num_context, num_predict=num_predict, dct_params=None) print(X.shape) print(y.shape) print(z_unatt) fig, ax = plt.subplots(); ax.plot(y.data.numpy()[:100, 0:9]); fig, ax = plt.subplots(); ax.plot(X.data.numpy()[:200, :10, 0]); ``` # Visualize differences print(np.nanstd(X[:, 0, 0].data.numpy(), axis=0)) fig, ax = plt.subplots(); ax.stem(np.nanmean(np.nanmean(eeg, axis=2), axis=0)); fig, ax = plt.subplots(); ax.stem(np.nanmean(np.nanstd(eeg, axis=2), axis=0)); fig, ax = plt.subplots(); ax.stem(np.nanstd(eeg, axis=2)[:, 26]); fig, ax = plt.subplots(); ax.stem(np.nanstd(audio, axis=1)); fig, ax = plt.subplots(); ax.stem(np.nanstd(audio_unatt, axis=1)); fig, ax = plt.subplots(); ax.plot(audio[0][:500]); ax.plot(audio[-1][:500]); ``` # Check availability of data after removing nan's eeg_1ch = np.squeeze(eeg[:, 0, :]) num_dur = np.nansum(~np.isnan(eeg_1ch), axis=1) print(num_dur) print(np.where(num_dur < num_context)) print(np.mean(num_dur[num_dur >= num_context]*0.01)) print(np.std(num_dur[num_dur >= num_context]* 0.01)) ``` # Required: Define the main processing function ``` def big_node(train, test, file_path_name_audio, file_path_name_eeg, dct_params): """Process data and make predictions. 1. Unpack parameters, define model, define data 2. Training loop 3. Evaluation 4. Save Arguments --------- train : list Integer list of training parts test : list Integer test part file_path_name_audio : string Full path and name of the audio mat file file_path_name_eeg : string Full path and name of the eeg mat file dct_params: dict Collection of auxillary parameters """ import numpy as np import scipy import scipy.io import sklearn import sklearn.preprocessing import torch from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import datetime import time import os import matplotlib.pyplot as plt import sys from importlib import reload import hashlib from glob import glob import re import nipype ################################################################ # Unpack parameters, define model, define data ################################################################ # Setup the dnn, and create the monolithic block of data that will be used for training. def closs(x, y): xbar = torch.mean(x) ybar = torch.mean(y) num = 1. / x.numel() * torch.dot(x-xbar, y-ybar) denom = torch.std(x) * torch.std(y) return -num / denom num_context = dct_params['num_context'] num_predict = dct_params['num_predict'] num_epoch = dct_params['num_epoch'] idx_eeg = dct_params['idx_eeg'] save_flag = dct_params['save_flag'] file_path_save = dct_params['file_path_save'] file_path_name_net= dct_params['file_path_name_net'] input_size = dct_params['input_size'] hidden_size = dct_params['hidden_size'] output_size = dct_params['output_size'] num_batch = dct_params['num_batch'] learning_rate = dct_params['learning_rate'] weight_decay = dct_params['weight_decay'] loss_type = dct_params['loss_type'] collect = dct_params['collect'] idx_split = dct_params['idx_split'] random_seed_flag = dct_params['random_seed_flag'] slow_opt_flag = dct_params['slow_opt_flag'] if random_seed_flag: np.random.seed(idx_split) torch.manual_seed(idx_split) else: np.random.seed(0) torch.manual_seed(0) torch.backends.cudnn.deterministic=True # Load and preprocess the data file_path_name_get_data = dct_params['file_path_name_get_data'] sys.path.append(os.path.split(file_path_name_get_data)[0]) module = __import__(os.path.split(file_path_name_get_data)[1]) reload(module) get_data = getattr(module, 'get_data') load_data = getattr(module, 'load_data') # Comment out in order to have the same val set and therefore the same train set # between runs #train = np.asarray(train)[np.random.permutation(len(train))].tolist() if 1: valset = train[-2:] print(valset) train = train[:-2] print(train) else: valset = [] # path to folder containing the class.py module sys.path.append(os.path.split(file_path_name_net)[0]) module = __import__(os.path.split(file_path_name_net)[1]) reload(module) # handle case of making changes to the module- forces reload NN = getattr(module, 'NN') model = NN(input_size, hidden_size, output_size) num_val = len(valset) num_tr = len(train) params = model.state_dict() if loss_type == 'mse': loss_fn = nn.MSELoss(size_average=True) # True = MSE vs False = sum squared elif loss_type == 'corr': loss_fn = closs elif loss_type == 'bce': loss_fn = nn.BCEWithLogitsLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay) if False: #torch.cuda.is_available(): cuda_flag = True model.cuda() print('Using CUDA') else: print('No CUDA') cuda_flag = False loss_history = np.nan * np.zeros(num_epoch) loss_val_history = np.nan * np.zeros(num_epoch) model.train() # Turn on dropout, batchnorm #model.eval() audio, eeg, audio_unatt = load_data(file_path_name_audio, file_path_name_eeg, train=train) idx_eeg = None idx_sample = train[0] X, y, z_unatt = get_data(audio, eeg, audio_unatt=audio_unatt, idx_eeg=idx_eeg, num_batch=None, idx_sample=idx_sample, num_context=num_context, num_predict=num_predict, dct_params=dct_params) X_all = X y_all = y for idx_sample in train[1:]: #train[1:] [1:2] print(idx_sample) X, y, z_unatt = get_data(audio, eeg, audio_unatt=audio_unatt, idx_eeg=idx_eeg, num_batch=None, idx_sample=idx_sample, num_context=num_context, num_predict=num_predict, dct_params=dct_params) if X is not None: X_all = torch.cat((X_all, X), dim=0) y_all = torch.cat((y_all, y), dim=0) print(X_all.shape) # Outside the loop to only form conv matrix once idx_val_sample = valset[0] Xval, yval, z_unatt = get_data(audio, eeg, audio_unatt=audio_unatt, idx_eeg=idx_eeg, num_batch=None, idx_sample=idx_val_sample, num_context=num_context, num_predict=num_predict, dct_params=dct_params) ################################################################ # Training loop ################################################################ # Iterate over the dataset a fixed number of times or until an early stopping condition is reached. # Randomly select a new batch of training at each iteration example_val_y = np.nan example_val_z_unatt = np.nan example_val_yhat = np.nan idx_sample_list = np.nan * np.ones(num_epoch) idx_sample = train[0] # Initialize to the first training part idx_train = 0 early_stop_flag = False early_stop_counter = 0 start = time.perf_counter() t_start = datetime.datetime.now() print(t_start) while (idx_train < num_epoch) and (not early_stop_flag): if np.mod(idx_train, num_epoch/10) == 0: print('epoch %d ' % idx_train) end = time.perf_counter() t_end = datetime.datetime.now() print('Time per epoch %2.5f ticks' % ((end - start)/(num_epoch/10))) print((t_end - t_start)/(num_epoch/10)) start = time.perf_counter() t_start = datetime.datetime.now() print(t_start) idx_keep = np.random.permutation(X_all.data.size(0))[:num_batch] idx_keep = torch.from_numpy(idx_keep).type('torch.LongTensor') X_audio = X_all[idx_keep] y = y_all[idx_keep] #X_audio = X_audio + Variable(0. * torch.randn(X_audio.shape)) # Data augmentation via noise #print('-- got data--') if X_audio is not None: model.zero_grad() #print('-pre forward-') if cuda_flag: y = y.cuda() output = model.forward(X_audio.cuda()) else: output = model.forward(X_audio) loss = loss_fn(output.view(-1), y.view(-1)) optimizer.zero_grad() #print('opt zeroed') loss.backward() #print('loss.backward done') optimizer.step() loss_flag = 1 if cuda_flag: loss = loss.cpu() output = output.cpu() y = y.cpu() loss_history[idx_train] = loss_flag * loss.data.numpy() if False: #loss_history[idx_train] < 0.09: early_stop_flag = True print("early_stop!") # Check validation set performance if (len(valset) > 0) and (np.mod(idx_train, 1) == 0): #50 #print('--- val check ---') model.eval() idx_keep = np.sort(np.random.permutation(Xval.data.size(0))[:num_batch]) idx_keep = torch.from_numpy(idx_keep).type('torch.LongTensor') X = Xval[idx_keep] y = yval[idx_keep] if cuda_flag: y_att = model.forward(X.cuda()) else: y_att = model.forward(X) if cuda_flag: stat_1 = loss_fn(y_att.view(-1), y.cuda().view(-1)) else: stat_1 = loss_fn(y_att.view(-1), y.view(-1)) stat_1 = stat_1.data.numpy() loss_val_history[idx_train] = stat_1 model.train() example_val_y = y.cpu().data.numpy() example_val_yhat = y_att.cpu().data.numpy() idx_train = idx_train + 1 print('-- done training --') print(datetime.datetime.now()) ################################################################ # Evaluation ################################################################ # Test on the train set, then test on the test set. if True: example_tr_y = [] example_tr_yhat = [] example_tr_unatt = [] for idx_tr in train[:1]: X, y, z_unatt = get_data(audio, eeg, audio_unatt=audio_unatt, idx_eeg=idx_eeg, num_batch=num_batch, idx_sample=idx_tr, num_context=num_context, num_predict=num_predict, dct_params=dct_params) if X is not None: model.eval() if cuda_flag: y_att = model.forward(X.cuda()) else: y_att = model.forward(X) example_tr_y.append(y.cpu().data.numpy()) example_tr_yhat.append(y_att.cpu().data.numpy()) if z_unatt is None: example_tr_unatt.append(np.array(np.nan)) else: example_tr_unatt.append(z_unatt.data.numpy()) if True: X, y, z_unatt = get_data(audio, eeg, audio_unatt=audio_unatt, idx_eeg=idx_eeg, num_batch=None, idx_sample=test[0], num_context=num_context, num_predict=num_predict, dct_params=dct_params) if X is not None: model.eval() if cuda_flag: y_att = model.forward(X.cuda()) else: y_att = model.forward(X) example_te_y = y.cpu().data.numpy()[None, :] example_te_yhat = y_att.cpu().data.numpy()[None, :] else: example_te_y = np.nan example_te_yhat = np.nan if z_unatt is None: example_te_unatt = np.array([np.nan]) else: example_te_unatt = z_unatt.data.numpy()[None, :] ################################################################ # Save ################################################################ # Save network parameters and outputs ver_list = [] for v in [torch, np, scipy, nipype]: ver_list.append(v.__name__ + "_" + v.__version__) ver_list.append('python_' + sys.version) if save_flag: dct_all = {**{'loss': loss_history, 'train': train, 'test': test, 'file_path_name_audio': file_path_name_audio, 'file_path_name_eeg': file_path_name_eeg, 'valset': valset, 'loss_val_history': loss_val_history, 'idx_sample_list': idx_sample_list, 'yValAtt': example_val_y, 'yValHat': example_val_yhat, 'yValUna': example_val_z_unatt, 'yTrainAtt': example_tr_y, 'yTrainHat': example_tr_yhat, 'yTrainUna': example_tr_unatt, 'yTestAtt': example_te_y, 'yTestHat': example_te_yhat, 'yTestUna': example_te_unatt, 'envTestAtt': example_te_y, #output api compatible 'envHatAtt': example_te_yhat, #output api compatible 'envTestUna': example_te_unatt, #output api compatible 'subjID': re.search('Subj_(\d+)_', file_path_name_audio).group(1),#output api compatible 'ver_list': ver_list }, **dct_params} hashstr = '' for key, val in {**{'train': train}, **dct_params}.items(): if type(val) is str: hashstr = hashstr + key + val elif type(val) in [float, int]: hashstr = hashstr + key + str(val) elif type(val) in [list]: if type(val[0]) is str: hashstr = hashstr + key + ','.join(val) elif type(val[0]) in [float, int]: hashstr = hashstr + key + ','.join([str(i) for i in val]) hexstamp = hashlib.md5(hashstr.encode('utf')).hexdigest() now_str = datetime.datetime.now().strftime('%Y%m%d%H%M%S') file_path_name_checkpoint = os.path.join(file_path_save, 'checkpoint_eeg2env_%s_%s.pt' % (hexstamp, now_str)) torch.save({'state_dict': model.state_dict()}, file_path_name_checkpoint) print(file_path_name_checkpoint) # Replace all None elements of dict with NaN before saving to avoid save fail. for key, val in dct_all.items(): if val is None: dct_all[key] = np.nan scipy.io.savemat(os.path.join(file_path_save, 'checkpoint_eeg2env_%s_%s.mat' % (hexstamp, now_str)), dct_all) model = None return model ``` # Required: Define all data splits ``` timestamp_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S') # :9 same AM or PM run eval_list = [] for file_path_name_audio, file_path_name_eeg in zip(file_path_name_audio_list, file_path_name_eeg_list): print(file_path_name_audio) audio, eeg, audio_unatt = load_data(file_path_name_audio, file_path_name_eeg) # exhaustive full_set = audio.shape[0] #full_set = 10 # debug, 4 X, y, z_unatt = get_data(audio, eeg, audio_unatt=audio_unatt, idx_eeg=None, num_batch=None, idx_sample=0, num_context=num_context, num_predict=num_predict, dct_params=dct_params) input_size = np.prod(X.shape[1:]) print(input_size) for test in range(full_set): #for test in np.random.permutation(full_set).tolist(): #If running less than a full set of splits and want to see different test partitions train = sorted(list(set(range(full_set)) - set([test]))) eval_list.append([train, [test], file_path_name_audio, file_path_name_eeg, input_size]) # Optional: Test stability of training ## Can the identical network with identical inputs recover the same performance with/without different random seeds during initialization/training/optimization? ## DEBUG for Stability check ## Take eval list, first item, copy N times ## These should be identical runs of the network eval_list = [eval_list[0] for i in range(len(eval_list))] ``` # Required: Define how many of the data splits to actually run ``` n_splits = len(eval_list) #n_splits = 5 #n_splits = 2 #n_splits = 3 n_splits = 1 random_seed_flag = True #random_seed_flag = False ``` # Create workflow ``` wf = pe.Workflow(name="wf") for idx_b in range(n_splits): timestamp = '%s_%s' % (timestamp_time, hashlib.md5((('').join(eval_list[idx_b][2]+eval_list[idx_b][3])).encode('utf')).hexdigest()) file_path_save = XXX_file_path_save_with_timestamp # Create the file_path_save here to avoid race conditions in the workflow if not os.path.exists(file_path_save): os.makedirs(file_path_save) # Remember, it is MUCH faster to submit lightweight arguments to a node than to submit the entire dataset. # That's why the dataset is loaded inside big_node. node_big = pe.Node(niu.Function(input_names=['train', 'test', 'file_path_name_audio', 'file_path_name_eeg', 'dct_params'], output_names=['outputs'], function=big_node), name='big_node_%03d' % idx_b) dct_params = {'idx_eeg': np.nan * np.ones(eeg.shape[1]), 'num_context': num_context, 'num_predict' : num_predict, 'idx_split': idx_b, 'timestamp': timestamp, 'file_path_save': file_path_save, 'file_path_name_get_data': file_path_name_get_data, 'save_flag':save_flag, 'num_epoch': num_epoch, 'file_path_name_net': file_path_name_net, 'input_size': eval_list[idx_b][4], 'hidden_size': hidden_size, 'output_size': output_size, 'num_batch': num_batch, 'learning_rate': learning_rate, 'weight_decay': weight_decay, 'loss_type': loss_type, 'num_ch_output': num_ch_output, 'collect': collect, 'idx_keep_audioTime': idx_keep_audioTime, 'random_seed_flag': random_seed_flag, 'slow_opt_flag': slow_opt_flag} node_big.inputs.train = eval_list[idx_b][0] #train node_big.inputs.test = eval_list[idx_b][1] #test node_big.inputs.file_path_name_audio = eval_list[idx_b][2] #file_path_name_audio node_big.inputs.file_path_name_eeg = eval_list[idx_b][3] #file_path_name_eeg node_big.inputs.dct_params = dct_params wf.add_nodes([node_big]) print(file_path_save) ``` # Optional: Test main processing function ## Don't use nipype, just run the function stats = big_node(train, [test], file_path_name_audio, file_path_name_eeg, dct_params) # Required: Main Proc ``` wf.config['execution']['crashdump_dir'] = XXX_path_to_crashdumpdir wf.base_dir = XXX_path_to_base_dir wf.config['execution']['parameterize_dirs'] = False wf.config['execution']['poll_sleep_duration'] = 10 wf.config['execution']['job_finished_timeout'] = 30 run_local_flag = True run_local_flag = False if run_local_flag: eg = wf.run() else: #eg = wf.run('SLURM', plugin_args={'sbatch_args': '-p gpu --gres=gpu:tesla:2 --constraint=xeon-e5 --mem=15G'}) eg = wf.run('SLURM', plugin_args={'sbatch_args': '--constraint=xeon-e5 --exclusive -O'}) print('Done successfully') ``` # Optional: Look at network parameters from a saved output file¶ # Look at params module = __import__(os.path.split(file_path_name_net)[1]) reload(module) NN = getattr(module, 'NN') file_path_name_checkpoint = XXX_path_to_checkpoint model = NN(input_size, hidden_size, output_size) checkpoint = torch.load(file_path_name_checkpoint) model.load_state_dict(checkpoint['state_dict']) model.eval() #a = list(model.parameters()) #[print(a[i]) for i in range(len(a))] p = nn.utils.parameters_to_vector(model.parameters()) p[:100] #
github_jupyter
# Aerospike Java Client – Reading and Updating Maps *Last updated: June 22, 2021* This notebook demonstrates Java Aerospike CRUD operations (Create, Read, Update, Delete) for maps of data, focusing on server-side **read** and **update** operations. Aerospike stores records by association with a **key**. Maps contain key:value pairs. This notebook makes use of the word **mapkey** to distinguish from a record **key**. This [Jupyter Notebook](https://jupyter-notebook.readthedocs.io/en/stable/notebook.html) requires the Aerospike Database running locally with Java kernel and Aerospike Java Client. To create a Docker container that satisfies the requirements and holds a copy of these notebooks, visit the [Aerospike Notebooks Repo](https://github.com/aerospike-examples/interactive-notebooks). # Notebook Setup Run these first to initialize Jupyter, download the Java Client, and make sure the Aerospike Database is running. ## Import Jupyter Java Integration Make it easier to work with Java in Jupyter. ``` import io.github.spencerpark.ijava.IJava; import io.github.spencerpark.jupyter.kernel.magic.common.Shell; IJava.getKernelInstance().getMagics().registerMagics(Shell.class); ``` ## Start Aerospike Ensure Aerospike Database is running locally. ``` %sh asd ``` ## Download the Aerospike Java Client Ask Maven to download and install the project object model (POM) of the Aerospike Java Client. ``` %%loadFromPOM <dependencies> <dependency> <groupId>com.aerospike</groupId> <artifactId>aerospike-client</artifactId> <version>5.0.0</version> </dependency> </dependencies> ``` ## Start the Aerospike Java Client and Connect Create an instance of the Aerospike Java Client, and connect to the demo cluster. The default cluster location for the Docker container is *localhost* port *3000*. If your cluster is not running on your local machine, modify *localhost* and *3000* to the values for your Aerospike cluster. ``` import com.aerospike.client.AerospikeClient; AerospikeClient client = new AerospikeClient("localhost", 3000); System.out.println("Initialized the client and connected to the cluster."); ``` # CREATING Maps in Aerospike ## Create and Print Map Data Create a string map representing fish metadata. Create an integer map containing timestamped fish observation locations. ``` import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; HashMap <String, String> mapFish = new HashMap <String, String>(); mapFish.put("name", "Annette"); mapFish.put("fruit", "Pineapple"); mapFish.put("color", "Aquamarine"); mapFish.put("tree", "Redwood"); System.out.println("Fish Map: " + mapFish); HashMap <Integer, HashMap> mapObs = new HashMap <Integer, HashMap>(); HashMap <String, Integer> mapCoords0 = new HashMap <String, Integer>(); mapCoords0.put("lat", -85); mapCoords0.put("long", -130); HashMap <String, Integer> mapCoords1 = new HashMap <String, Integer>(); mapCoords1.put("lat", -25); mapCoords1.put("long", -50); HashMap <String, Integer> mapCoords2 = new HashMap <String, Integer>(); mapCoords2.put("lat", 35); mapCoords2.put("long", 30); mapObs.put(13456, mapCoords1); mapObs.put(14567, mapCoords2); mapObs.put(12345, mapCoords0); System.out.println("Observations Map:" + mapObs); ``` ## Insert the Maps into Aerospike Insert one record in Aerospike with **Key** "koi", and **Bin Names** *mapfishbin* and *mapobsbin*. By default, Aerospike data is unsorted, however Aerospike preserves order by index when inserting data. Java HashMaps are sorted by mapkey by default. ### Create a Key Object A **Key** uniquely identifies a specific record in your Aerospike server or cluster. Each key must have a **Namespace** and optionally a **Set** name. * In Aerospike, a **Namespace** is like a relational database's tablespace. * A **Set** is like a relational database table. * A **Record** is like a row in a relational database table. The namespace *test* is configured on your Aerospike server or cluster. For additional information on the Aerospike Data Model, go [here](https://www.aerospike.com/docs/architecture/data-model.html). ``` import com.aerospike.client.Key; String mapSet = "mapset1"; String mapNamespace = "test"; String theKey = "koi"; Key key = new Key(mapNamespace, mapSet, theKey); System.out.println("Key created." ); ``` ### Create a Bin Object for Each Map A **Bin** is a data field in an Aerospike record. ``` import com.aerospike.client.Bin; String mapFishBinName = "mapfishbin"; String mapObsBinName = "mapobsbin"; Bin bin1 = new Bin(mapFishBinName, mapFish); Bin bin2 = new Bin(mapObsBinName, mapObs); System.out.println( "Created " + bin1 + " and " + bin2 + "."); ``` ### Create a Policy Object for Record Insertion A **Policy** tells Aerospike the intent of a database operation. For more information on policies, go [here](https://www.aerospike.com/docs/guide/policies.html). ``` import com.aerospike.client.policy.ClientPolicy; ClientPolicy clientPolicy = new ClientPolicy(); System.out.println("Created a client policy."); ``` ### Put the Map Data into Aerospike ``` client.put(clientPolicy.writePolicyDefault, key, bin1, bin2); System.out.println("Key: " + theKey + "\n" + mapFishBinName + ": " + mapFish + "\n" + mapObsBinName + ": " + mapObs ); ``` # READING Maps and Map Elements From the Server Now that the maps are in Aerospike, the client can return full or partial maps from **bin** contents. No data is modified by these ops. ## Get the Record A record can be retrieved using the **key**, **namespace**, and **set** name. In the output: * **gen** is the generation number, the number of record writes. * **exp** is the expiration counter for the record. For more information on [both generation number and expiration](https://www.aerospike.com/docs/guide/FAQ.html), see the [Aerospike FAQ](https://www.aerospike.com/docs/guide/FAQ.html). ``` import com.aerospike.client.Record; Key key = new Key(mapNamespace, mapSet, theKey); Record record = client.get(null, key); System.out.println(record); ``` ## Get String Elements by Mapkey, Rank, and Value Aerospike provides **MapOperations** to read string mapkeys and values from the database. The mapFishBin is a map containing string mapkey/value pairs associated with the fish, "Koi". For more information on map operations, go [here](https://www.aerospike.com/apidocs/java/com/aerospike/client/cdt/MapOperation.html). ### Get String by Mapkey Aerospike API can be used to look up a value by mapkey. The client returns the specified value as the contents of the bin. For the list of return type options, go [here](https://www.aerospike.com/apidocs/java/com/aerospike/client/cdt/MapReturnType.html). ``` import com.aerospike.client.Operation; import com.aerospike.client.Value; import com.aerospike.client.cdt.MapOperation; import com.aerospike.client.cdt.MapReturnType; String mapKeyToFind = "color"; Key key = new Key(mapNamespace, mapSet, theKey); Record record = client.get(null, key); Record colorString = client.operate(null, key, MapOperation.getByKey(mapFishBinName, Value.get(mapKeyToFind), MapReturnType.VALUE) ); System.out.println("The string map: " + record.getValue(mapFishBinName)); System.out.println("The " + mapKeyToFind + " in the string map is: " + colorString.getValue(mapFishBinName)); ``` ### Get Highest Rank String Aerospike's API contains operations to look up a map element by rank. For information on list ranking, go [here](https://en.wikipedia.org/wiki/List_ranking). ``` Integer highestRank = -1; Key key = new Key(mapNamespace, mapSet, theKey); Record record = client.get(null, key); Record highestRankString = client.operate(null, key, MapOperation.getByRank(mapFishBinName, highestRank, MapReturnType.VALUE) ); System.out.println("The string map: " + record.getValue(mapFishBinName)); System.out.println("The highest rank string is: " + highestRankString.getValue(mapFishBinName)); ``` ### Get Mapkey By String Value Aerospike provides operations to look up an element by value and return the mapkey. ``` String valueToFind = "Pineapple"; Key key = new Key(mapNamespace, mapSet, theKey); Record record = client.get(null, key); Record foundMapKey = client.operate(null, key, MapOperation.getByValue(mapFishBinName, Value.get(valueToFind), MapReturnType.KEY) ); System.out.println("The string map: " + record.getValue(mapFishBinName)); System.out.println("The mapkey associated with " + valueToFind + " is: " + foundMapKey.getValue(mapFishBinName)); ``` ## Get Map Size and Integer Elements by Index and Key Range Aerospike operations can read integers associated with fish observations. The mapobsbin is a list of Latitude/Longitude pairs stored by the time of fish observation in seconds from the start of the experiment. The number of seconds, latitude, and longitude are all integers. ### Get the Number of Observations in the Map Aerospike API's size operation returns a count of the mapkeys in a map. ``` Key key = new Key(mapNamespace, mapSet, theKey); Record record = client.get(null, key); Record sizeString = client.operate(null, key, MapOperation.size(mapObsBinName) ); System.out.println("The Observation Map: " + record.getValue(mapObsBinName)); System.out.println("The number of Observations in the Map: " + sizeString.getValue(mapObsBinName)); ``` ### Get The First Observation from the Map Aerospike API operations can look up a value by index. In Aerospike, the index operation can get one or more map elements by key order. Aerospike allows indexing forward from the beginning of the map using zero-based numbering. Negative numbers index backwards from the end of a map. In this example, the first element by index represents the first time the fish was observed. Because the key 12345 is before 13456 and 14567, the first element by index is 12345. For examples of indexes, go [here](https://www.aerospike.com/apidocs/java/com/aerospike/client/cdt/MapOperation.html). ``` Integer firstIdx = 0; Key key = new Key(mapNamespace, mapSet, theKey); Record record = client.get(null, key); Record firstObservation = client.operate(null, key, MapOperation.getByIndex(mapObsBinName, firstIdx, MapReturnType.KEY_VALUE) ); System.out.println("The Observation Map: " + record.getValue(mapObsBinName)); System.out.println("The First Observation: " + firstObservation.getValue(mapObsBinName)); ``` ### Get All Locations Observed Between 13,000 and 15,000 seconds. Aerospike delivers values by mapkey range. Get the latitude and longitude pairs for all observations between 13,000 and 15,000 seconds. ``` Integer lowerBound = 13000; Integer upperBound = 15000; Key key = new Key(mapNamespace, mapSet, theKey); Record record = client.get(null, key); Record rangeObservations = client.operate(null, key, MapOperation.getByKeyRange(mapObsBinName, Value.get(lowerBound), Value.get(upperBound), MapReturnType.KEY_VALUE) ); System.out.println("The Observation Map: " + record.getValue(mapObsBinName)); System.out.println("The Observations between 13000 and 15000 seconds: " + rangeObservations.getValue(mapObsBinName)); ``` # UPDATING Maps on the Aerospike Server Aerospike's **MapOperations** can also modify data in the Aerospike Database. ## Update the Fish Bin in Aerospike The Fish Bin contains metadata about the fish. ### Create a MapPolicy Java Object for the Fish Bin When modifying maps, Aerospike requires a **MapPolicy** that governs write protection and order. The default MapPolicy works for Fish Bin. For more information on mappolicy, go [here](https://www.aerospike.com/apidocs/java/com/aerospike/client/cdt/MapPolicy.html). ``` import com.aerospike.client.cdt.MapPolicy; MapPolicy mapFishBinPolicy = new MapPolicy(); System.out.println("Created default MapPolicy for " + mapFishBinName + ".") ``` ### Change the Tree to Larch When new data is put into a map, Aerospike returns the size of the resulting map. ``` String treeMapkeyName = "tree"; String newTree = "Larch"; Key key = new Key(mapNamespace, mapSet, theKey); Record record = client.get(null, key); Record sizeOfMapWithNewTree = client.operate(null, key, MapOperation.put(mapFishBinPolicy, mapFishBinName, Value.get(treeMapkeyName), Value.get(newTree)) ); Record mapWithNewTree = client.get(null, key); System.out.println("Before: " + record.getValue(mapFishBinName)); System.out.println("The size after the operation: " + sizeOfMapWithNewTree.getValue(mapFishBinName)); System.out.println(" After: " + mapWithNewTree.getValue(mapFishBinName)); ``` ### Remove the Fruit When removing a mapkey:value pair, Aerospike client returns the removed data. ``` String fruitMapkeyName = "fruit"; Key key = new Key(mapNamespace, mapSet, theKey); Record record = client.get(null, key); Record valOfRemovedFruit = client.operate(null, key, MapOperation.removeByKey(mapFishBinName, Value.get(fruitMapkeyName), MapReturnType.KEY_VALUE) ); Record mapWithoutFruit = client.get(null, key); System.out.println("Before: " + record.getValue(mapFishBinName)); System.out.println("The removed mapkey/value pair: " + valOfRemovedFruit.getValue(mapFishBinName)); System.out.println("After removing the " + fruitMapkeyName + ": " + mapWithoutFruit.getValue(mapFishBinName)); ``` ### Add Bait To be sure that other scientists can catch the fish, add the fish's preferred bait to the record. ``` String mapkeyForBait = "bait"; String valueForBait = "Mosquito Larva"; Key key = new Key(mapNamespace, mapSet, theKey); Record record = client.get(null, key); Record sizeOfRecordWithBait = client.operate(null, key, MapOperation.put(mapFishBinPolicy, mapFishBinName, Value.get(mapkeyForBait), Value.get(valueForBait)) ); Record recordWithBait = client.get(null, key); System.out.println("Before: " + record.getValue(mapFishBinName)); System.out.println("After adding Bait: " + recordWithBait.getValue(mapFishBinName)); ``` ### Put an Observation Counter in the Map The experiment continued past the original end date. The new work requires keeping track of the total number of observations. ``` String mapkeyObsCount = "Count"; Integer numObservations = 3; Key key = new Key(mapNamespace, mapSet, theKey); Record record = client.get(null, key); Record sizeOfRecordWithObsCounter = client.operate(null, key, MapOperation.put(mapFishBinPolicy, mapFishBinName, Value.get(mapkeyObsCount), Value.get(numObservations)) ); Record recordWithObsCount = client.get(null, key); System.out.println("Before: " + record.getValue(mapFishBinName)); System.out.println("After Adding the Counter: " + recordWithObsCount.getValue(mapFishBinName)); ``` ## Update the Observation Map Aerospike client can update map elements, such as integers and sub-maps. The experiment continued past the original end date. The new work requires the regular addition of new observations and keeping track of the total number of observations. ### Create a MapPolicy Object for the Observations Bin In this example, the Observations Map should be maintained as mapkey-sorted in Aerospike, but are put unordered into the database by default. When storing any map on SSD hardware, Key Ordered Maps hold a significant performance advantage over Unordered Maps, at a cost of 4 bytes of storage for metadata. The MapPolicy contains two types of configurations, **MapOrder** and **MapWriteFlags**. The maporder determines the sort order of the map. The mapwriteflags determine write behaviors, such as if the operation should fail when a mapkey/value already exists. For more information on maporder, go [here](https://www.aerospike.com/apidocs/java/com/aerospike/client/cdt/MapOrder.html). For more information on mapwriteflags, go [here](https://www.aerospike.com/apidocs/java/com/aerospike/client/cdt/MapWriteFlags.html). ``` import com.aerospike.client.cdt.MapOrder; import com.aerospike.client.cdt.MapWriteFlags; Record recordObsUnordered = client.get(null, key); MapPolicy mapObsBinPolicy = new MapPolicy(MapOrder.KEY_ORDERED, MapWriteFlags.DEFAULT); Record changeOrder = client.operate(null, key, MapOperation.setMapPolicy(mapObsBinPolicy, mapObsBinName)); Record recordObsOrdered = client.get(null, key); System.out.println("Before Sorting: " + recordObsUnordered.getValue(mapObsBinName)); System.out.println("Applied mapkey-ordered MapPolicy for " + mapObsBinName + "."); System.out.println("After Sorting: " + recordObsOrdered.getValue(mapObsBinName)); ``` ### Add a new Observation ``` int newObsTimestamp = 15678; int newObsLat = 80; int newObsLong = 110; HashMap <Integer, HashMap> mapNewObs = new HashMap <Integer, HashMap>(); HashMap <String, Integer> mapNewCoords = new HashMap <String, Integer>(); mapNewCoords.put("lat", newObsLat); mapNewCoords.put("long", newObsLong); Key key = new Key(mapNamespace, mapSet, theKey); Record record = client.get(null, key); Record sizeOfNewObs = client.operate(null, key, MapOperation.put(mapObsBinPolicy, mapObsBinName, Value.get(newObsTimestamp), Value.get(mapNewCoords)) ); Record recordWithNewObs = client.get(null, key); System.out.println("Before: " + record.getValue(mapObsBinName)); System.out.println("The Size After Adding the Observation: " + sizeOfNewObs.getValue(mapObsBinName)); System.out.println("After Adding the Observation: " + recordWithNewObs.getValue(mapObsBinName)); ``` ### Remove the Oldest Observation by Index This study only maintains the three most recent observations. ``` Key key = new Key(mapNamespace, mapSet, theKey); Record record = client.get(null, key); Record oldObs = client.operate(null, key, MapOperation.removeByIndex(mapObsBinName, firstIdx, MapReturnType.KEY_VALUE) ); Record updatedRecord = client.get(null, key); System.out.println("Before: " + record.getValue(mapObsBinName)); System.out.println("The Removed Observation: " + oldObs.getValue(mapObsBinName)); System.out.println("After Observation Removal: " + updatedRecord.getValue(mapObsBinName)); ``` ### Increment the Observation Counter When incrementing a map value, Aerospike returns the new value. ``` int incNum = 1; Key key = new Key(mapNamespace, mapSet, theKey); Record record = client.get(null, key); Record obsCount = client.operate(null, key, MapOperation.increment(mapFishBinPolicy, mapFishBinName, Value.get(mapkeyObsCount), Value.get(incNum)) ); Record updatedRecord = client.get(null, key); System.out.println("Before: " + record.getValue(mapFishBinName)); System.out.println("The New Count: " + obsCount.getValue(mapFishBinName)); System.out.println("After Increment: " + updatedRecord.getValue(mapFishBinName)); ``` # Notebook Cleanup ## Truncate the Set Truncate the set from the Aerospike Database. ``` import com.aerospike.client.policy.InfoPolicy; InfoPolicy infoPolicy = new InfoPolicy(); client.truncate(infoPolicy, mapNamespace, mapSet, null); System.out.println("Set Truncated."); ``` ## Close the Client connections to Aerospike ``` client.close(); System.out.println("Server connection(s) closed."); ``` # Code Summary ## Overview Here is a collection of all of the non-Jupyter code from this tutorial. 1. Import Java Libraries. 2. Import Aerospike Client Libraries. 3. Start the Aerospike Client. 4. Create Test Data. 5. Put Record into Aerospike. 6. Get Data from Aerospike. 1. Get the Record. 2. Get String by MapKey and Highest Rank. 3. Get MapKey by String. 3. Get the Number of Observations and 1st Observation By Index. 4. Get Observations by MapKey Range. 7. Update the Record in Aerospike 1. Change the Tree to a Larch 2. Remove the Fruit and add Bait. 3. Sort the Observation Map. 4. Add an Observation Counter. 5. Add a New Observation. 6. Remove the Oldest Operation. 7. Increment the Observation Counter. 8. Truncate the Set. 9. Close the Client Connections. ``` // Import Java Libraries. import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; // Import Aerospike Client Libraries. import com.aerospike.client.AerospikeClient; import com.aerospike.client.Key; import com.aerospike.client.Bin; import com.aerospike.client.policy.ClientPolicy; import com.aerospike.client.Record; import com.aerospike.client.Operation; import com.aerospike.client.Value; import com.aerospike.client.cdt.MapOperation; import com.aerospike.client.cdt.MapReturnType; import com.aerospike.client.cdt.MapPolicy; import com.aerospike.client.cdt.MapOrder; import com.aerospike.client.cdt.MapWriteFlags; import com.aerospike.client.policy.InfoPolicy; // Start the Aerospike Client. AerospikeClient client = new AerospikeClient("localhost", 3000); System.out.println("Initialized the client and connected to the cluster."); // Create Test Data. HashMap <String, String> mapFish = new HashMap <String, String>(); mapFish.put("name", "Annette"); mapFish.put("fruit", "Pineapple"); mapFish.put("color", "Aquamarine"); mapFish.put("tree", "Redwood"); System.out.println("Created Fish Map: " + mapFish); HashMap <Integer, HashMap> mapObs = new HashMap <Integer, HashMap>(); HashMap <String, Integer> mapCoords0 = new HashMap <String, Integer>(); mapCoords0.put("lat", -85); mapCoords0.put("long", -130); HashMap <String, Integer> mapCoords1 = new HashMap <String, Integer>(); mapCoords1.put("lat", -25); mapCoords1.put("long", -50); HashMap <String, Integer> mapCoords2 = new HashMap <String, Integer>(); mapCoords2.put("lat", 35); mapCoords2.put("long", 30); mapObs.put(13456, mapCoords1); mapObs.put(14567, mapCoords2); mapObs.put(12345, mapCoords0); System.out.println("Created Observations Map: " + mapObs); // Put Record into Aerospike. String mapSet = "mapset1"; String mapNamespace = "test"; String theKey = "koi"; String mapFishBin = "mapfishbin"; String mapObsBin = "mapobsbin"; ClientPolicy clientPolicy = new ClientPolicy(); InfoPolicy infoPolicy = new InfoPolicy(); Key key = new Key(mapNamespace, mapSet, theKey); Bin bin1 = new Bin(mapFishBin, mapFish); Bin bin2 = new Bin(mapObsBin, mapObs); client.put(clientPolicy.writePolicyDefault, key, bin1, bin2); System.out.println("Inserted Key: " + theKey + "\n " + mapFishBin + ": " + mapFish + "\n " + mapObsBin + ": " + mapObs ); System.out.println(); // Get Data from Aerospike. // 1. Get the Record. // 2. Get String by MapKey and Highest Rank. // 3. Get MapKey by String. // 3. Get the Number of Observations and 1st Observation By Index. // 4. Get Observations by MapKey Range. String mapKeyToFind = "color"; Integer highestRank = -1; String valueToFind = "Pineapple"; Integer firstIdx = 0; Integer lowerBound = 13000; Integer upperBound = 15000; Key key = new Key(mapNamespace, mapSet, theKey); Record record = client.get(null, key); Record results = client.operate(null, key, MapOperation.getByKey(mapFishBin, Value.get(mapKeyToFind), MapReturnType.VALUE), MapOperation.getByRank(mapFishBin, highestRank, MapReturnType.VALUE), MapOperation.getByValue(mapFishBin, Value.get(valueToFind), MapReturnType.KEY), MapOperation.size(mapObsBin), MapOperation.getByIndex(mapObsBin, firstIdx, MapReturnType.KEY_VALUE), MapOperation.getByKeyRange(mapObsBin, Value.get(lowerBound), Value.get(upperBound), MapReturnType.KEY_VALUE) ); List<?> resultsFish = results.getList(mapFishBin); List<?> resultsObs = results.getList(mapObsBin); System.out.println("Read the Full Record From Aerospike:" + record); System.out.println("The " + mapKeyToFind + " in the string map is: " + resultsFish.get(0)); System.out.println("The highest rank string is: " + resultsFish.get(1)); System.out.println("The mapkey associated with " + valueToFind + " is: " + resultsFish.get(2)); System.out.println("The number of Observations in the Map: " + resultsObs.get(0)); System.out.println("The First Observation: " + resultsObs.get(1)); System.out.println("The Observations between 13000 and 15000 seconds: " + resultsObs.get(2)); System.out.println(); // 7. Update the Record in Aerospike // 1. Change the Tree to a Larch // 2. Remove the Fruit and add Bait. // 3. Add an Observation Counter. // 4. Sort the Observation Map. // 5. Add a New Observation. // 6. Remove the Oldest Operation. // 7. Increment the Observation Counter. MapPolicy mapFishBinPolicy = new MapPolicy(); MapPolicy mapObsBinPolicy = new MapPolicy(MapOrder.KEY_ORDERED, MapWriteFlags.DEFAULT); String treeMapkeyName = "tree"; String newTree = "Larch"; String fruitMapkeyName = "fruit"; String mapkeyForBait = "bait"; String valueForBait = "Mosquito Larva"; String mapkeyObsCount = "Count"; Integer numObservations = 3; int newObsTimestamp = 15678; int newObsLat = 80; int newObsLong = 110; int incNum = 1; HashMap <Integer, HashMap> mapNewObs = new HashMap <Integer, HashMap>(); HashMap <String, Integer> mapNewCoords = new HashMap <String, Integer>(); mapNewCoords.put("lat", newObsLat); mapNewCoords.put("long", newObsLong); Key key = new Key(mapNamespace, mapSet, theKey); Record record = client.get(null, key); Record updatingRecord = client.operate(null, key, MapOperation.put(mapFishBinPolicy, mapFishBin, Value.get(treeMapkeyName), Value.get(newTree)), MapOperation.removeByKey(mapFishBin, Value.get(fruitMapkeyName), MapReturnType.KEY_VALUE), MapOperation.put(mapFishBinPolicy, mapFishBin, Value.get(mapkeyForBait), Value.get(valueForBait)), MapOperation.put(mapFishBinPolicy, mapFishBin, Value.get(mapkeyObsCount), Value.get(numObservations)), MapOperation.setMapPolicy(mapObsBinPolicy, mapObsBin), MapOperation.put(mapObsBinPolicy, mapObsBin, Value.get(newObsTimestamp), Value.get(mapNewCoords)), MapOperation.removeByIndex(mapObsBin, firstIdx, MapReturnType.KEY_VALUE), MapOperation.increment(mapFishBinPolicy, mapFishBin, Value.get(mapkeyObsCount), Value.get(incNum)) ); Record finalRecord = client.get(null, key); List<?> updateFish = updatingRecord.getList(mapFishBin); List<?> updateObs = updatingRecord.getList(mapObsBin); System.out.println("Changed " + treeMapkeyName + " to " + newTree + "; there are now " + updateFish.get(0) + " map items in " + mapFishBin); System.out.println("Removed item " + updateFish.get(1)); System.out.println("Added item [" + mapkeyForBait + "=" + valueForBait + "]; there are now " + updateFish.get(2) + " map items in " + mapFishBin); System.out.println("Added Observation Counter; there are now " + updateFish.get(3) + " map items in " + mapFishBin); System.out.println("Sorted " + mapObsBin); System.out.println("Added New Observation {" + newObsTimestamp + "=" + mapNewCoords + "}, there are now " + updateObs.get(1) + " map items in " + mapObsBin); System.out.println("Removed Oldest Observation: " + updateObs.get(2)); System.out.println("Incremented Observation Counter to reflect " + updateFish.get(4) + "th observation"); System.out.println(); System.out.println("After Record Edits: " + finalRecord); // Truncate the Set. client.truncate(infoPolicy, mapNamespace, mapSet, null); System.out.println("Set Truncated."); // Close the Client Connections. client.close(); ``` # Takeaway – Aerospike Does Maps Aerospike and its Java Client are up to the task of working with your map data. Its API provides rich operations to read and update list data using index, mapkey, value, and rank. Not modeled in this tutorial, Aerospike map operation also supports nested lists and maps, by assigning **CTX** or contexts to operations. For more information on contexts, go [here](https://www.aerospike.com/apidocs/java/com/aerospike/client/cdt/CTX.html). For examples of contexts, go [here](https://www.aerospike.com/apidocs/java/com/aerospike/client/cdt/MapOperation.html). # What's Next? ## Next Steps Have questions? Don't hesitate to reach out if you have additional questions about working with lists at https://discuss.aerospike.com/. Want to check out other Java notebooks? 1. [Hello, World](hello_world.ipynb) 2. [Reading and Updating Lists](java-working_with_lists.ipynb) 3. [Modeling Using Lists](java-modeling__using_lists.ipynb) 4. [Aerospike Query and UDF](query_udf.ipynb) Are you running this from Binder? [Download the Aerospike Notebook Repo](https://github.com/aerospike-examples/interactive-notebooks) and work with Aerospike Database and Jupyter locally using a Docker container. ## Additional Resources * Want to get started with Java? [Download](https://www.aerospike.com/download/client/) or [install](https://github.com/aerospike/aerospike-client-java) the Aerospike Java Client. * What other ways can we work with Maps? Take a look at [Aerospike's Map Operations](https://www.aerospike.com/apidocs/java/com/aerospike/client/cdt/MapOperation.html). * What are Namespaces, Sets, and Bins? Check out the [Aerospike Data Model](https://www.aerospike.com/docs/architecture/data-model.html). * How robust is the Aerospike Database? Browses the [Aerospike Database Architecture](https://www.aerospike.com/docs/architecture/index.html).
github_jupyter
<a href="https://colab.research.google.com/github/Martin09/DeepSEM/blob/master/segmentation-NWs/1_nw_seg_image_prep.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # 1 - Dataset Peparation for NW Yield In this notebook we will: 1. Import our raw SEM images. 2. Crop and export these images as PNG files. 3. Upload images for labelling to a new [Labelbox](https://labelbox.com/) project. ## 1.1 - Download the dataset ``` # # Optional: Save everything to your own GoogleDrive # from google.colab import drive # drive.mount('/content/gdrive/') # %cd "/content/gdrive/My Drive/path/to/save/location" # Clone the DeepSEM repository !rm -rf DeepSEM # Remove folder if it already exists !git clone https://github.com/Martin09/DeepSEM dataset_dir = './DeepSEM/segmentation-NWs/datasets/WJ_NWs_D1-17-02-17-C_rawtiffs' !rm -rf $dataset_dir # Remove dataset directory if it already exists !wget https://github.com/Martin09/DeepSEM/raw/master/segmentation-NWs/datasets/WJ_NWs_D1-17-02-17-C_rawtiffs.zip !unzip WJ_NWs_D1-17-02-17-C_rawtiffs.zip -d $dataset_dir !rm WJ_NWs_D1-17-02-17-C_rawtiffs.zip ``` ## 1.2 - Export as PNG Here we will import the raw TIFF images and export them as PNG files. Since the magnification of these images is relatively high, we only have a a few (<100) objects per image. Thus, we do not need to break the image up into smaller images and we can use the full resolution for training and inference. ``` import glob, cv2 from google.colab.patches import cv2_imshow input_files = glob.glob('./DeepSEM/segmentation-NWs/datasets/WJ_NWs_D1-17-02-17-C_rawtiffs/*.tif') ``` Import a raw TIFF image to see what it looks like. ``` im = cv2.imread(input_files[0], cv2.IMREAD_GRAYSCALE) cv2_imshow(im) ``` Loop over all the raw image files and save each as a PNG file. ``` output_directory = './DeepSEM/segmentation-NWs/datasets/WJ_NWs_D1-17-02-17-C_png/' !rm -rf $output_directory !mkdir $output_directory # Loop over the TIFF files for file in input_files: img = cv2.imread(file, cv2.IMREAD_GRAYSCALE) # Import the next image # Trim off the overlay bar at bottom of image img = img[:688,:] # Save as PNG file filename = output_directory + file[:-4].split('/')[-1]+'.png' print(filename) success = cv2.imwrite(filename, (img).astype('uint8')) # Save divided image as PNG if not success: print("Error, couldn't write image '{}'. Check if output directory exists!".format(filename)) ``` Visualize what an exported PNG image looks like: ``` im = cv2.imread('./DeepSEM/segmentation-NWs/datasets/WJ_NWs_D1-17-02-17-C_png/1200 nm pitch 60 nm edge17.png', cv2.IMREAD_GRAYSCALE) cv2_imshow(im) ``` ## 1.3 - Labelling Now we are ready to label these images for training the neural network. There are many tools available for creating labelled datasets. In this tutorial I will be using [Labelbox](https://labelbox.com/) for this purpose. Note: for those that don't care about labelling their own dataset, you can skip ahead to [Notebook 2](https://colab.research.google.com/github/Martin09/DeepSEM/blob/master/nanowire_yield/2_nw_yield_training.ipynb) where I will provide the pre-labelled data for the next steps. Moving on, let's install the labelbox API first: ``` !pip install labelbox ``` If you haven't already done so, go ahead and make a free Labelbox account. You can either upload your images to be labelled manually, or you can upload them directly using this script below. ***If you want to upload the images from this script, you need to create an API key [here](https://app.labelbox.com/account/api-keys) and paste it below:*** ``` API_KEY = '[INSERT LABELBOX API KEY HERE]' ``` Now we can make a new Labelbox project and a new dataset before uploading the sub-divided images. ``` # Change these names if you wish project_name = 'NanowireSegmentation' dataset_name = 'WJ_NWs_D1-17-02-17-C' # Create a new project and dataset in Labelbox from labelbox import Client client = Client(API_KEY) project = client.create_project(name=project_name) dataset = client.create_dataset(name=dataset_name, projects=project) # Perform a bulk upload of the subdivided PNG files dataset_dir = './DeepSEM/segmentation-NWs/datasets/WJ_NWs_D1-17-02-17-C_png/' dataset_files = glob.glob(dataset_dir + '*.png') # Get a list of the files to upload dataset.create_data_rows(dataset_files) # Upload the files ``` After a few minutes, you should see the new project and images appear in your Labelbox account, [here](https://app.labelbox.com/projects). You can now finish setting up your Labelbox project on the website, including setting your object classes. For this tutorial, we will be doing segmentation. Therefore, be sure to ***only*** define segmentation objects (not bounding box or polygon objects, for example). [Notebook 2](https://colab.research.google.com/github/Martin09/DeepSEM/blob/master/nanostruct_seg/2_ns_seg_inference.ipynb) assumes you have finished your labelling and have exported a labelbox .JSON file with all of your bounding box labels. If you don't have your own labelled dataset don't worry, I will provide that for you. See you there!
github_jupyter
``` import pandas as pd import numpy as np import scipy.stats as scs import matplotlib.pyplot as plt %matplotlib inline import keras from keras.models import Sequential, Model, Input from keras.layers import Dense, Dropout, Activation from sklearn.model_selection import train_test_split import tensorflow as tf import requests from bs4 import BeautifulSoup import json from IPython.display import display, Image import urllib.request from PIL.ExifTags import TAGS import PIL.Image import time import keras import numpy as np import pandas as pd from PIL import Image, ImageFile from keras.applications import vgg16 from keras.preprocessing.image import load_img, img_to_array from keras.applications.imagenet_utils import decode_predictions import matplotlib.pyplot as plt %matplotlib inline from scipy.spatial import distance import random import time import urllib.request import io import glob from src.fetch_data_pipeline import extract_image_url, extract_df, download_images, load_RG_data, zip_lookup, gps_lookup import json ImageFile.LOAD_TRUNCATED_IMAGES = True def extract_image_url(pd_series): ''' Extracts image URLs from the pictures column in the RescuePets database. INPUT: Pandas Series where each item is a list of dictionaries of dictionaries?? OUTPUT: Pandas dataframe with animalID and imageURL ''' large_image_urls = [] animalIDs = [] for lst in pd_series: for dct in lst: large_image_urls.append(dct['largeUrl']) for url in large_image_urls: animalIDs.append(url.split('/')[-2]) return pd.DataFrame({'animalID': animalIDs,'ImageUrl': large_image_urls}) def extract_df(filepath): ''' Extracts orgId, animalID, name breed and animalLocation from RescueGroup JSON and adds imageURLs INPUT: JSON filepath, string OUTPUT: Pandas dataframes ''' df = pd.read_json(filepath, lines=True) images = extract_image_url(df.pictures) df1 = df[['orgID','animalID','name','breed','animalLocation']] # NOTE: You loose images with this concat result = pd.concat([df1, images.ImageUrl], axis=1, join_axes=[df1.index]) # Return combined dataframe and original image source dataframe return result, images def download_images(urls): ''' Downloads all images from Rescue Pets S3 bucket INPUT: Pandas Series of URLs OUTPUT: Images stored in data directory. ''' for image_url in list(urls)[3934:5001]: image_name = image_url.split('/')[-1] r = requests.get(image_url, allow_redirects = True) open('data/images/'+image_name, 'wb').write(r.content) start = time.time() download_images(combined_df.ImageUrl) end = time.time() print(end - start) ### Still working on this function def rotate_image(file): ''' Rotates images uploaded by user's smartphone via exif data. Images need to be rotated to proper orientation prior to preprocessing step. ''' image=Image.open(file) try: for orientation in ExifTags.TAGS.keys(): if ExifTags.TAGS[orientation]=='Orientation': break exif=dict(image._getexif().items()) if exif[orientation] == 3: print('Rotate 180 degrees!') image=image.rotate(180, expand=True) elif exif[orientation] == 6: print('Rotate 270 degrees!') image=image.rotate(270, expand=True) elif exif[orientation] == 8: print('Rotate 90 degrees!') image=image.rotate(90, expand=True) image.save(file) image.close() except (AttributeError, KeyError, IndexError): # cases: image don't have getexif pass return(image) #Function to extract exif data from smartphone image and view in nice format from PIL.ExifTags import TAGS #def extract_image_data(file): filename ='' im = PIL.Image.open(filename) exifdict = im._getexif() #print(exifdict) if len(exifdict): for k in exifdict.keys(): if k in TAGS.keys(): print(TAGS[k], exifdict[k]) else: print(k, exifdict[k]) #new_pets_df = pd.read_json('data/h9DH7711_newpets_1.json', lines=True) #pets1_df = pd.read_json('data/h9DH7711_pets_1.json', lines=True) #pets2_df = pd.read_json('data/h9DH7711_pets_2.json', lines=True) #pets3_df = pd.read_json('data/h9DH7711_pets_3.json', lines=True) #pets4_df = pd.read_json('data/h9DH7711_pets_4.json', lines=True) #pets5_df = pd.read_json('data/h9DH7711_pets_5.json', lines=True) #import pdb #pdb.set_trace() df0, image0 = extract_df('data/h9DH7711_newpets_1.json') df1, image1 = extract_df('data/h9DH7711_pets_1.json') df2, image2 = extract_df('data/h9DH7711_pets_2.json') df3, image3 = extract_df('data/h9DH7711_pets_3.json') df4, image4 = extract_df('data/h9DH7711_pets_4.json') df5, image5 = extract_df('data/h9DH7711_pets_5.json') combined_df = df0.append([df1, df2, df3, df4, df5]) combined_imgs = image0.append([image1, image2, image3, image4, image5]) combined_df = combined_df.reset_index(drop=True) combined_imgs = combined_imgs.reset_index(drop=True) total_records = [df0.shape[0], df1.shape[0], df2.shape[0], df3.shape[0], df4.shape[0], df5.shape[0]] image_records = [image0.shape[0], image1.shape[0], image2.shape[0], image3.shape[0], image4.shape[0], image5.shape[0]] print('Total Records: ',sum(total_records)) print('Total Images: ',sum(image_records)) #combined_df.columns #combined_imgs.columns combined_imgs.head() #combined_df.tail(50) combined_imgs.ImageUrl.values[6574] #Columns to drop?: 'petUrl','drools' #Columns to rename?: animalLocation to zipcode use GeoPy to get city and state? #Activity Level change to scale from 1 to 4? #Age change to numeric scale from 1 to 4 for Baby, Young, Adult, Senior? '' = UNK? #'apartment', 'cratetrained', 'declawed' change to numeric 0,1=yes #'birthdate' to get exact age? lot of nulls #'breed','color', 'descriptionPlain'...tokenize with NLP? Combine them? #'eventempered',eagerToPlease','cats','dogs' good with cats or dogs?, change to numeric? 0,1,2=UNK #'coatLength' Med, Short, Long, ''=UNK #'contactEmail' & 'contactCellPhone'=='contactHomePhone' drop? only 1 seen in new Dog JSON...only drop after merging all JSONs!! #'contactName' foster? drop? #'exerciseNeeds' Low, Moderate, High, Not Required, ''=UNK...combine with activity level? #'description' == 'trackerImageUrl' #'eyeColor' various string descriptions lot of blanks.. merge to description? #'fence'..Not Required, Any Type, 3ft or 6ft #Need more info on following columns: 'altered','courtesy','lastUpdated','mediaLastUpdated','MessagePet' #df[df['eyeColor']==''].count() #df.activityLevel.isnull().count() #df[df['earType']==''].count() #type(df.breed[10]) #df.shape #df.eagerToPlease.unique() #df.pictures[698] new_pets_df.pictures[1] #df.name[df.name == 'Atlas'] #df.animalLocation[df.animalLocation =="90018"] #combined_df[combined_df.animalLocation == #type(combined_df.animalLocation[0]) wa_zip_string = '' sea_zips = [98101, 98102, 98103, 98104, 98105, 98106, 98107, 98108, 98109, 98112, 98115, 98116, 98117, 98118, 98119, 98121, 98122, 98125, 98126, 98133, 98134, 98136, 98144, 98146, 98154, 98164, 98174, 98177, 98178, 98195, 98199] seattle_zips = [] for zip in sea_zips: seattle_zips.append(str(zip)) seattle_df = combined_df.loc[combined_df.animalLocation.isin(seattle_zips)] cool = seattle_df.animalID.tolist() cool_df = combined_df.loc[combined_df.animalID.isin(cool)] #cool_df.ImageUrl #seattle_dog_imgs = cool_df.ImageUrl.tolist() #len(seattle_dog_imgs) len(combined_df.ImageUrl.tolist()) from geopy.geocoders import Nominatim def zip_lookup(zip_code): geolocator = Nominatim() location = geolocator.geocode(zip_code) city = location.address.split(',')[0].strip() state = location.address.split(',')[1].strip() return city, state pd.scatter_matrix(df, alpha=0.2, diagonal='kde', figsize=(12,12)) combined_df.ImageUrl[5000].split('/')[-1] from scipy.spatial import distance import keras import numpy as np from PIL import Image from keras.applications import vgg16, inception_v3, resnet50, mobilenet from keras.preprocessing.image import load_img from keras.preprocessing.image import img_to_array from keras.applications.imagenet_utils import decode_predictions results = [] model = vgg16.VGG16(include_top = True, weights = 'imagenet') model.layers.pop() model.layers.pop() model.outputs = [model.layers[-1].output] for url in combined_df.ImageUrl[0:50]: image_path = 'data/images/'+url.split('/')[-1] dog = load_img(image_path, target_size=(224, 224)) numpy_image = img_to_array(dog) image_batch = np.expand_dims(numpy_image, axis=0) processed_image = vgg16.preprocess_input(image_batch.copy()) feature_array = model.predict(processed_image) cosine_score = distance.cosine(feature_array.flatten(), durka.flatten()) results.append(cosine_score) dog_url = combined_df.ImageUrl[0:50] zipped_dogs = zip(dog_url.tolist(),results) sorted_zipped_dogs = zipped_dogs.sort(key = lambda t: t[1]) top_10 = sorted_zipped_dogs[0:11] for imgURL in image_name = image_url.split('/')[-1] from collections import deque import keras import numpy as np import pandas as pd from PIL import Image, ImageFile from keras.applications import vgg16 from keras.preprocessing.image import load_img from keras.preprocessing.image import img_to_array from keras.applications.imagenet_utils import decode_predictions import matplotlib.pyplot as plt %matplotlib inline from scipy.spatial import distance import time import urllib.request import io from src.fetch_data_pipeline import extract_image_url, extract_df, download_images, load_RG_data, zip_lookup, gps_lookup import json ImageFile.LOAD_TRUNCATED_IMAGES = True combined_df, combined_imgs = load_RG_data() start = time.time() feature_list = [] feature_matrix = np.zeros((4096,10)) for url in combined_imgs.ImageUrl.tolist()[0:10]: dog = load_img('/Users/bil2ab/galvanize/RG5kimages/'+url.split('/')[-1], target_size=(224, 224)) numpy_image = img_to_array(dog) image_batch = np.expand_dims(numpy_image, axis=0) processed_image = vgg16.preprocess_input(image_batch.copy()) feature_array = model.predict(processed_image) #np.insert() feature_list.append(feature_array) print('durka') #doggie = np.asarray(feature_array_list) #np.save('data/RG_features', doggie) end = time.time() total_time = end-start print('Total Time: '+str(total_time)) print('All dog features vectorized!') result = np.array(feature_list) ```
github_jupyter
``` import pandas as pd , numpy as np from sklearn.utils import resample from sklearn.preprocessing import StandardScaler , MinMaxScaler from collections import Counter from scipy import stats import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px import plotly.figure_factory as ff import plotly #Classifiers from sklearn.ensemble import AdaBoostClassifier , GradientBoostingClassifier , VotingClassifier , RandomForestClassifier # from sklearn.linear_model import LogisticRegression , RidgeClassifier # from sklearn.discriminant_analysis import LinearDiscriminantAnalysis # from sklearn.model_selection import RepeatedStratifiedKFold # from sklearn.neighbors import KNeighborsClassifier # from sklearn.model_selection import GridSearchCV #Model evaluation tools from sklearn.metrics import classification_report , accuracy_score , confusion_matrix from sklearn.metrics import accuracy_score,f1_score from sklearn.model_selection import cross_val_score from sklearn.metrics import roc_auc_score,roc_curve #Data processing functions from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn import model_selection from sklearn.preprocessing import LabelEncoder le = LabelEncoder() train_data=pd.read_csv('Training Data.csv') train_data["married"] = le.fit_transform(train_data["married"]) train_data["house_ownership"] = le.fit_transform(train_data["house_ownership"]) train_data["car_ownership"] = le.fit_transform(train_data["car_ownership"]) train_data["profession"] = le.fit_transform(train_data["profession"]) train_data["city"] = le.fit_transform(train_data["city"]) train_data["state"] = le.fit_transform(train_data["state"]) train_data=train_data.sort_values(by='Id') p=train_data[train_data['risk_flag']==1][:20000] q=train_data[train_data['risk_flag']==0][:20000] test_data=pd.concat([p,q]) test_data=test_data.sort_values(by='Id').copy() train_data.columns X_train=train_data[['income', 'age', 'experience', 'married', 'house_ownership', 'car_ownership', 'profession', 'city', 'state', 'current_job_years', 'current_house_years']] y_train=train_data['risk_flag'] X_test=test_data[['income', 'age', 'experience', 'married', 'house_ownership', 'car_ownership', 'profession', 'city', 'state', 'current_job_years', 'current_house_years']] y_test=test_data['risk_flag'] x=y_train[y_train==1][:40000] x.count() (y_train[:40000]==0) Model9 = RandomForestClassifier(n_estimators=200,random_state=10,n_jobs=1000,max_depth=100,bootstrap=True) Model9.fit(X_train[:100000],y_train[:100000]) y_pred = Model9.predict(X_test) print(classification_report(y_pred,y_test)) print("RandomForestClassifier:>",accuracy_score(y_pred,y_test)) y_pred=Model9.predict_proba(X_test) roc=roc_auc_score(y_test,y_pred[:,1]) print("roc_auc -350-" ,roc) output=pd.read_csv('Test Data.csv') output["married"] = le.fit_transform(output["married"]) output["house_ownership"] = le.fit_transform(output["house_ownership"]) output["car_ownership"] = le.fit_transform(output["car_ownership"]) output["profession"] = le.fit_transform(output["profession"]) output["city"] = le.fit_transform(output["city"]) output["state"] = le.fit_transform(output["state"]) output y_pred=Model9.predict(X_test) roc=roc_auc_score(y_test,y_pred) print("roc_auc -350-" ,roc) output['risk_flag']=Model9.predict(output[['income', 'age', 'experience', 'married', 'house_ownership', 'car_ownership', 'profession', 'city', 'state', 'current_job_years', 'current_house_years']]) output['risk_flag_prob']=Model9.predict_proba(output[['income', 'age', 'experience', 'married', 'house_ownership', 'car_ownership', 'profession', 'city', 'state', 'current_job_years', 'current_house_years']])[:,1] output.set_index('id') submission=output[['id','risk_flag']] submission.set_index('id').to_csv('submission3.csv') from sklearn.metrics import plot_roc_curve from matplotlib import pyplot as plt curve=plot_roc_curve(Model9,X_test,y_test) plt.show() ```
github_jupyter
# Ray Crash Course - Why Ray? © 2019-2020, Anyscale. All Rights Reserved ![Anyscale Academy](../images/AnyscaleAcademy_Logo_clearbanner_141x100.png) The first two lessons explored using Ray for task and actor concurrency. This lesson takes a step back and explains the challenges that led to the creation of Ray and the Ray ecosystem. The end of this lesson also has links for more or more information on Ray and Anyscale. [Ray](https://ray.io) is a system for scaling Python applications from your laptop to a cluster with relative ease. It emerged from the [RISELab](https://rise.cs.berkeley.edu/) at Berkeley in response to the problems researchers faced writing advanced ML applications and libraries that could easily scale to a cluster. These researchers found that none of the existing solutions were flexible enough and easy enough to use for their needs. Hence, Ray was born. > **Tip:** For more about Ray, see [ray.io](https://ray.io) or the [Ray documentation](https://docs.ray.io/en/latest/). ## Just Six API Methods Almost everything you do with Ray is done with just six API methods: #### `ray.init()` **Description:** Initialize Ray in your application. **Example:** ```python ray.init() # Many optional arguments discussed in lesson 06. ``` #### `@ray.remote` **Description:** Decorate a function to make it a remote _task_. Decorate a class to make it a remote _actor_. **Example:** ```python @ray.remote # Define a task def train_model(source): ... @ray.remote # Define an actor class ActivityTracker(): def record(event): ... return count</code> ``` #### `x.remote()` **Description:** Construct an actor instance or asynchronously run a task or an actor method. **Example:** ```python m_id = train_model.remote(...) # Invoke a task tracker = ActivityTracker.remote() # Construct an actor instance tr_id = tracker.record.remote(...) # Invoke an actor method ``` #### `ray.put()` **Description:** Put a value in the distributed object store. **Example:** ```python put_id = ray.put(my_object) ``` #### `ray.get()` **Description:** Get an object from the distributed object store, either placed there by `ray.put()` explicitly or by a task or actor method, blocking until object is available. **Example:** ```python model = ray.get(m_id) # Retrieve result of train_model task invocation count = ray.get(tr_id) # Retrieve result of tracker.record method call thing = ray.get(put_id) # Retrieve "my_object" ``` #### `ray.wait()` **Description:** Wait on a list of ids until one of the corresponding objects is available (e.g., the task completes). Return two lists, one with ids for the available objects and the other with ids for the still-running tasks or method calls. **Example:** ```python finished, running = ray.wait([m_id, tr_id]) ``` These six API methods are the essence of Ray. They provide Ray's concision, flexibility, and power. There are [other API methods](https://docs.ray.io/en/latest/package-ref.html) for various administrative and informational purposes. See [06 Exploring Ray API Calls](06-Exploring-Ray-API-Calls.ipynb). ## Why Do We Need Ray? Consider the following charts: ![Two Trends](../images/TwoTrends.png) ML/AI model sizes have grown enormously in recent years, roughly 35x every 18 months, which is considerably faster than Moore's Law! Hence, this growth is far outstripping the growth of hardware capabilities. The only way to meet the need for sufficient compute power is to go distributed, as [Ion Stoica recently wrote](https://anyscale.com/blog/the-future-of-computing-is-distributed/). At the same time, the use of Python is growing rapidly, because it is a very popular language for data science. Many of the ML/AI toolkits are Python-based. Hence, there is a pressing need for powerful, yet easy-to-use tools for scaling Python applications horizontally. This is the motivation for Ray. You saw Ray in action in lessons [01](01-Ray-Tasks.ipynb) and [02](02-Ray-Actors.ipynb). Why are tools needed? First, the Python interpreter itself is not designed for massive scalability and high performance. Many python libraries with these requirements use C/C++ backends to work around Python limitations, like the so-called _global interpreter lock_, which effectively makes Python scripts single threaded. Some of the most popular, general-purpose tools for this purpose include the following: * [asyncio](https://docs.python.org/3/library/asyncio.html) for _async/await_-style (coroutine) concurrency. * [multiprocessing.Pool](https://docs.python.org/3/library/multiprocessing.html?highlight=pool#module-multiprocessing.pool) for creating a pool of asynchronous workers * [joblib](https://joblib.readthedocs.io/en/latest/) for creating lightweight pipelines However, while all of them make it easier to exploit all the CPU cores on your machine, they don't provide distributed computing beyond the boundaries of your machine. In fact, Ray also provides implementations of these APIs, so you are no longer limited to the boundaries of a single machine, as we'll see in the next lesson, [04 Ray Multiprocessing](04-Ray-Multiprocessing.ipynb). Consider this image: ![ML Landscape](../images/ML-Landscape.png) It shows major tasks required in many ML-based application development and deployment, all of which typically require distributed implementations to scale large enough to process the compute and data load in a timely manner: * **Featurization:** Features are the data "attributes" that appear to be most useful for modeling the domain. * **Streaming:** New data often arrives in realtime and may be processed in realtime, too. * **Hyperameter Tuning:** What are the best kinds of models for this domain? When using neural networks, what is the ideal _architecture_ for the network? This model "metadata" is also called the _hyperparameters_. Since discovering the hyperparameters can be an expensive process of training lots of candidates, specialized techniques in their own right have merged for this purpose, as we'll learn in the _Ray Tune_ module. * **Training:** Once the best (or at least good enough) hyperparameters are chosen, the model has to be trained on real data and sometimes retrained periodically as new data arrives. * **Simulation:** An important part of many _reinforcement learning_ applications is running a simulator, such as a game engine or robot simulation, against which the RL system is trained to maximize the "reward" when operating in that environment or the real analog. The simulator is one example of a compute pattern that is quite a bit different from the normal "dataflow" or query-like patterns that many big data tools support well. Also, this simulator may be run many, many times as part of the hyperameter tuning or training process, requiring efficient, cluster-wide execution. * **Model Serving:** Finally, when the model is trained, it needs to be served, so that it can be applied to new data, sometimes with low latency requirements. Here is the Ray vision: ![Ray across the board](../images/ML-Landscape-Ray.png) The core Ray system, which we'll explore in this module, provides the cluster-wide scheduling of work (which we'll call _tasks_) and management of _distributed state_, another important requirement in real-world distributed systems. On top of Ray, a growing family of domain-specific libraries support many of the functions we've discussed, like the ones shown in the image. Other tutorial modules in this repo explore those libraries. * **Ray Tune:** For hyperparameter tuning. Tune integrates several optimization algorithms and integrates with many ML frameworks. * **Ray SGD:** For _stochastic gradient descent_ (SGD). This is a relatively new library, currently supporting PyTorch with other support for other systems forthcoming. * **Ray RLlib:** For reinforcement learning. Many of the widely-used and recent algorithms are implemented. RL often involves running and interoperating with a simulator for the environment (e.g., an actual game engine). * **Ray Serve:** Primarily targeted at model serving, but flexible enough for many scalable web service scenarios. All leverage Ray for cluster-wide scalability. All will be covered in depth in forthcoming tutorial modules. Many Ray users will never actually use the core Ray API, but instead use one or more of these domain-specific APIs. You might be one of those people ;) If you need to implement distributed applications, the current _Ray Core_ tutorial module will help you understand Ray, how it gives you the tools you need for most requirements, and how it works. Even if you never need to write code in the Ray API, this module will not only help you appreciate how Ray makes your Ray-based API work, but also how to understand and fix performance issues when they arise. ## For More Information on Ray and Anyscale * [ray.io](https://ray.io): The Ray website. In particular: * [Documentation](https://ray.readthedocs.io/en/latest/): The full Ray documentation * [Blog](https://medium.com/distributed-computing-with-ray): The Ray blog * [GitHub](https://github.com/ray-project/ray): The source code for Ray * [anyscale.com](https://anyscale.com/): The company developing Ray and these tutorials. In particular: * [Blog](https://anyscale.com/blog/): The Anyscale blog * [Events](https://anyscale.com/events/): Online events, [Ray Summit](http://raysummit.org), and meetups * [Academy](https://anyscale.com/academy/): Training for Ray and Anyscale products - What you're looking at! * [Jobs](https://jobs.lever.co/anyscale): Yes, we're hiring! * Community: * [Ray Slack](ray-distributed.slack.com) ([Click here](https://forms.gle/9TSdDYUgxYs8SA9e8) to join): The best forum for help on Ray. Use the `#tutorials` channel to ask for help on these tutorials! * [ray-dev mailing list](https://groups.google.com/forum/?nomobile=true#!forum/ray-dev) * [@raydistributed](https://twitter.com/raydistributed) * [@anyscalecompute](https://twitter.com/anyscalecompute) The next lesson, [Ray Multiprocessing](04-Ray-Multiprocessing.ipynb), discusses Ray's drop-in replacements for common parallelism APIs,[`multiprocessing.Pool`](https://docs.python.org/3/library/multiprocessing.html#module-multiprocessing.pool) and [`joblib`](https://joblib.readthedocs.io/en/latest/), and Ray's integration with `asyncio`.
github_jupyter
``` %matplotlib inline from cosmodc2.sdss_colors import load_umachine_processed_sdss_catalog sdss = load_umachine_processed_sdss_catalog() print(sdss.keys()) from astropy.table import Table npts = int(1e6) mock = Table() magr_min, magr_max = -26, -5 mock['Mr'] = np.random.uniform(magr_min, magr_max, npts) from cosmodc2.sdss_colors import analytical_colors __=reload(analytical_colors) # mock['gr'] = analytical_colors.g_minus_r(mock['Mr']) # mock['ri'] = analytical_colors.r_minus_i(mock['Mr']) mock['sfr_percentile'] = np.random.rand(len(mock)) mock['redshift'] = 0.0 gr, ri = analytical_colors.gr_ri_monte_carlo( mock['Mr'], mock['sfr_percentile'], mock['redshift']) mock['gr'] = gr mock['ri'] = ri mock['redshift1'] = 1.0 gr1, ri1 = analytical_colors.gr_ri_monte_carlo( mock['Mr'], mock['sfr_percentile'], mock['redshift1']) mock['gr_z1'] = gr1 mock['ri_z1'] = ri1 mock['redshift2'] = 2 gr2, ri2 = analytical_colors.gr_ri_monte_carlo( mock['Mr'], mock['sfr_percentile'], mock['redshift2']) mock['gr_z2'] = gr2 mock['ri_z2'] = ri2 fig, ax = plt.subplots(1, 1) __=ax.scatter(mock['Mr'][::20], mock['gr'][::20], s=0.1) xlim = ax.set_xlim(-10, -25) xlabel = ax.set_xlabel(r'$M_{\rm r}$') ylabel = ax.set_ylabel(r'${\rm g - r}$') title = ax.set_title(r'${\rm protoDC2\ v4:\ z = 0}$') figname = 'gr_vs_magr_z0_pdc2v4.pdf' fig.savefig(figname, bbox_extra_artists=[xlabel], bbox_inches='tight') fig, ax = plt.subplots(1, 1) __=ax.scatter(mock['Mr'][::20], mock['ri'][::20], s=0.1) xlim = ax.set_xlim(-10, -25) xlabel = ax.set_xlabel(r'$M_{\rm r}$') ylabel = ax.set_ylabel(r'${\rm r - i}$') title = ax.set_title(r'${\rm protoDC2\ v4:\ z=0}$') figname = 'ri_vs_magr_z0_pdc2v4.pdf' fig.savefig(figname, bbox_extra_artists=[xlabel], bbox_inches='tight') npts = int(1e6) from cosmodc2.sdss_colors.sdss_completeness_model import retrieve_sdss_sample_mask __=reload(analytical_colors) magr_max = -18. magr_min = magr_max - 0.3 mask = retrieve_sdss_sample_mask( sdss['z'], sdss['restframe_extincted_sdss_abs_magr'], magr_min, magr_max) sdss_sample_gr18 = sdss['restframe_extincted_sdss_gr'][mask] mock_mask = (mock['Mr'] < magr_max) & (mock['Mr'] > magr_min) mock_sample_gr18 = mock['gr'][mock_mask] magr_max = -19.5 magr_min = magr_max - 0.3 mask = retrieve_sdss_sample_mask( sdss['z'], sdss['restframe_extincted_sdss_abs_magr'], magr_min, magr_max) sdss_sample_gr19p5 = sdss['restframe_extincted_sdss_gr'][mask] mock_mask = (mock['Mr'] < magr_max) & (mock['Mr'] > magr_min) mock_sample_gr19p5 = mock['gr'][mock_mask] magr_max = -21. magr_min = magr_max - 0.3 mask = retrieve_sdss_sample_mask( sdss['z'], sdss['restframe_extincted_sdss_abs_magr'], magr_min, magr_max) sdss_sample_gr21 = sdss['restframe_extincted_sdss_gr'][mask] mock_mask = (mock['Mr'] < magr_max) & (mock['Mr'] > magr_min) mock_sample_gr21 = mock['gr'][mock_mask] magr_max = -22.5 magr_min = magr_max - 0.3 mask = retrieve_sdss_sample_mask( sdss['z'], sdss['restframe_extincted_sdss_abs_magr'], magr_min, magr_max) sdss_sample_gr22p5 = sdss['restframe_extincted_sdss_gr'][mask] mock_mask = (mock['Mr'] < magr_max) & (mock['Mr'] > magr_min) mock_sample_gr22p5 = mock['gr'][mock_mask] fig, _axes = plt.subplots(2, 2, figsize=(10, 8)) ((ax1, ax2), (ax3, ax4)) = _axes axes = ax1, ax2, ax3, ax4 nbins = 40 __=ax1.hist(sdss_sample_gr18, bins=nbins, alpha=0.8, normed=True, label=r'${\rm SDSS}$') __=ax1.hist(mock_sample_gr18, bins=nbins, alpha=0.8, normed=True, color='red', label=r'${\rm protoDC2\ v4}$') __=ax2.hist(sdss_sample_gr19p5, bins=nbins, alpha=0.8, normed=True, label=r'${\rm SDSS}$') __=ax2.hist(mock_sample_gr19p5, bins=nbins, alpha=0.8, normed=True, color='red', label=r'${\rm protoDC2\ v4}$') __=ax3.hist(sdss_sample_gr21, bins=nbins, alpha=0.8, normed=True, label=r'${\rm SDSS}$') __=ax3.hist(mock_sample_gr21, bins=nbins, alpha=0.8, normed=True, color='red', label=r'${\rm protoDC2\ v4}$') __=ax4.hist(sdss_sample_gr22p5, bins=nbins, alpha=0.8, normed=True, label=r'${\rm SDSS}$') __=ax4.hist(mock_sample_gr22p5, bins=nbins, alpha=0.8, normed=True, color='red', label=r'${\rm protoDC2\ v4}$') for ax in axes: xlim = ax.set_xlim(0, 1.25) leg = ax.legend() ax1.set_xticklabels(['']) ax2.set_xticklabels(['']) title1 = ax1.set_title(r'$M_{\rm r} \approx -18$') title2 = ax2.set_title(r'$M_{\rm r} \approx -19.5$') title3 = ax3.set_title(r'$M_{\rm r} \approx -21$') title4 = ax4.set_title(r'$M_{\rm r} \approx -22.5$') xlabel3 = ax3.set_xlabel(r'${\rm g-r}$') xlabel4 = ax4.set_xlabel(r'${\rm g-r}$') figname = 'sdss_gr_distribution_vs_pdc2v4.pdf' fig.savefig(figname, bbox_extra_artists=[xlabel3], bbox_inches='tight') npts = int(1e6) from cosmodc2.sdss_colors.sdss_completeness_model import retrieve_sdss_sample_mask __=reload(analytical_colors) magr_max = -18. magr_min = magr_max - 0.3 mask = retrieve_sdss_sample_mask( sdss['z'], sdss['restframe_extincted_sdss_abs_magr'], magr_min, magr_max) sdss_sample_ri18 = sdss['restframe_extincted_sdss_ri'][mask] mock_mask = (mock['Mr'] < magr_max) & (mock['Mr'] > magr_min) mock_sample_ri18 = mock['ri'][mock_mask] magr_max = -19.5 magr_min = magr_max - 0.3 mask = retrieve_sdss_sample_mask( sdss['z'], sdss['restframe_extincted_sdss_abs_magr'], magr_min, magr_max) sdss_sample_ri19p5 = sdss['restframe_extincted_sdss_ri'][mask] mock_mask = (mock['Mr'] < magr_max) & (mock['Mr'] > magr_min) mock_sample_ri19p5 = mock['ri'][mock_mask] magr_max = -21. magr_min = magr_max - 0.3 mask = retrieve_sdss_sample_mask( sdss['z'], sdss['restframe_extincted_sdss_abs_magr'], magr_min, magr_max) sdss_sample_ri21 = sdss['restframe_extincted_sdss_ri'][mask] mock_mask = (mock['Mr'] < magr_max) & (mock['Mr'] > magr_min) mock_sample_ri21 = mock['ri'][mock_mask] magr_max = -22.5 magr_min = magr_max - 0.3 mask = retrieve_sdss_sample_mask( sdss['z'], sdss['restframe_extincted_sdss_abs_magr'], magr_min, magr_max) sdss_sample_ri22p5 = sdss['restframe_extincted_sdss_ri'][mask] mock_mask = (mock['Mr'] < magr_max) & (mock['Mr'] > magr_min) mock_sample_ri22p5 = mock['ri'][mock_mask] fig, _axes = plt.subplots(2, 2, figsize=(10, 8)) ((ax1, ax2), (ax3, ax4)) = _axes axes = ax1, ax2, ax3, ax4 nbins = 40 __=ax1.hist(sdss_sample_ri18, bins=nbins, alpha=0.8, normed=True, label=r'${\rm SDSS}$') __=ax1.hist(mock_sample_ri18, bins=nbins, alpha=0.8, normed=True, color='red', label=r'${\rm protoDC2\ v4}$') __=ax2.hist(sdss_sample_ri19p5, bins=nbins, alpha=0.8, normed=True, label=r'${\rm SDSS}$') __=ax2.hist(mock_sample_ri19p5, bins=nbins, alpha=0.8, normed=True, color='red', label=r'${\rm protoDC2\ v4}$') __=ax3.hist(sdss_sample_ri21, bins=nbins, alpha=0.8, normed=True, label=r'${\rm SDSS}$') __=ax3.hist(mock_sample_ri21, bins=nbins, alpha=0.8, normed=True, color='red', label=r'${\rm protoDC2\ v4}$') __=ax4.hist(sdss_sample_ri22p5, bins=nbins, alpha=0.8, normed=True, label=r'${\rm SDSS}$') __=ax4.hist(mock_sample_ri22p5, bins=nbins, alpha=0.8, normed=True, color='red', label=r'${\rm protoDC2\ v4}$') for ax in axes: xlim = ax.set_xlim(-0.1, 0.6) leg = ax.legend() ax1.set_xticklabels(['']) ax2.set_xticklabels(['']) title1 = ax1.set_title(r'$M_{\rm r} \approx -18$') title2 = ax2.set_title(r'$M_{\rm r} \approx -19.5$') title3 = ax3.set_title(r'$M_{\rm r} \approx -21$') title4 = ax4.set_title(r'$M_{\rm r} \approx -22.5$') xlabel3 = ax3.set_xlabel(r'${\rm r-i}$') xlabel4 = ax4.set_xlabel(r'${\rm r-i}$') figname = 'sdss_ri_distribution_vs_pdc2v4.pdf' fig.savefig(figname, bbox_extra_artists=[xlabel3], bbox_inches='tight') faint, bright = -21.5, -22.5 mask = (mock['Mr'] < faint) & (mock['Mr'] > bright) from cosmodc2.sdss_colors.sdss_completeness_model import retrieve_sdss_sample_mask sdss_mask = retrieve_sdss_sample_mask(sdss['z'], sdss['restframe_extincted_sdss_abs_magr'], bright, faint) num_sdss_sample = np.count_nonzero(sdss_mask) num_mock_sample = np.count_nonzero(mask) num_to_plot = int(min(num_sdss_sample, num_mock_sample)/2) sdss_indices = np.random.choice(np.arange(num_sdss_sample), num_to_plot, replace=False) sdss_sample_gr = sdss['restframe_extincted_sdss_gr'][sdss_mask][sdss_indices.astype(int)] sdss_sample_ri = sdss['restframe_extincted_sdss_ri'][sdss_mask][sdss_indices.astype(int)] mock_indices = np.random.choice(np.arange(num_mock_sample), num_to_plot, replace=False) mock_sample_gr = mock['gr'][mask][mock_indices.astype(int)] mock_sample_ri = mock['ri'][mask][mock_indices.astype(int)] fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4)) __=ax1.scatter(sdss_sample_gr, sdss_sample_ri, s=0.1, color='blue') __=ax2.scatter(mock_sample_gr, mock_sample_ri, s=0.1, color='red') xlim = ax1.set_xlim(0.25, 1.1) ylim = ax1.set_ylim(0.2, 0.6) xlim = ax2.set_xlim(0.25, 1.1) ylim = ax2.set_ylim(0.2, 0.6) xlabel = ax1.set_xlabel(r'${\rm g-r}$') xlabel2 = ax2.set_xlabel(r'${\rm g-r}$') ylabel = ax1.set_ylabel(r'${\rm r-i}$') title1 = ax1.set_title(r'${\rm SDSS:\ M_r \approx -22}$') title2 = ax2.set_title(r'${\rm protoDC2\ v4:\ M_r \approx -22}$') figname = 'sdss_gr_ri_vs_magr22.pdf' fig.savefig(figname, bbox_extra_artists=[xlabel3], bbox_inches='tight') faint, bright = -20.5, -21.5 mask = (mock['Mr'] < faint) & (mock['Mr'] > bright) from cosmodc2.sdss_colors.sdss_completeness_model import retrieve_sdss_sample_mask sdss_mask = retrieve_sdss_sample_mask(sdss['z'], sdss['restframe_extincted_sdss_abs_magr'], bright, faint) num_sdss_sample = np.count_nonzero(sdss_mask) num_mock_sample = np.count_nonzero(mask) num_to_plot = int(min(num_sdss_sample, num_mock_sample)/3) sdss_indices = np.random.choice(np.arange(num_sdss_sample), num_to_plot, replace=False) sdss_sample_gr = sdss['restframe_extincted_sdss_gr'][sdss_mask][sdss_indices.astype(int)] sdss_sample_ri = sdss['restframe_extincted_sdss_ri'][sdss_mask][sdss_indices.astype(int)] mock_indices = np.random.choice(np.arange(num_mock_sample), num_to_plot, replace=False) mock_sample_gr = mock['gr'][mask][mock_indices.astype(int)] mock_sample_ri = mock['ri'][mask][mock_indices.astype(int)] fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4)) __=ax1.scatter(sdss_sample_gr, sdss_sample_ri, s=0.1, color='blue') __=ax2.scatter(mock_sample_gr, mock_sample_ri, s=0.1, color='red') xlim = ax1.set_xlim(0.25, 1.1) ylim = ax1.set_ylim(0.1, 0.65) xlim = ax2.set_xlim(0.25, 1.1) ylim = ax2.set_ylim(0.1, 0.65) xlabel = ax1.set_xlabel(r'${\rm g-r}$') xlabel2 = ax2.set_xlabel(r'${\rm g-r}$') ylabel = ax1.set_ylabel(r'${\rm r-i}$') title1 = ax1.set_title(r'${\rm SDSS:\ M_r \approx -21}$') title2 = ax2.set_title(r'${\rm protoDC2\ v4:\ M_r \approx -21}$') figname = 'sdss_gr_ri_vs_magr21.pdf' fig.savefig(figname, bbox_extra_artists=[xlabel3], bbox_inches='tight') faint, bright = -19.5, -20.5 mask = (mock['Mr'] < faint) & (mock['Mr'] > bright) from cosmodc2.sdss_colors.sdss_completeness_model import retrieve_sdss_sample_mask sdss_mask = retrieve_sdss_sample_mask(sdss['z'], sdss['restframe_extincted_sdss_abs_magr'], bright, faint) num_sdss_sample = np.count_nonzero(sdss_mask) num_mock_sample = np.count_nonzero(mask) num_to_plot = min(num_sdss_sample, num_mock_sample) sdss_indices = np.random.choice(np.arange(num_sdss_sample), num_to_plot, replace=False) sdss_sample_gr = sdss['restframe_extincted_sdss_gr'][sdss_mask][sdss_indices.astype(int)] sdss_sample_ri = sdss['restframe_extincted_sdss_ri'][sdss_mask][sdss_indices.astype(int)] mock_indices = np.random.choice(np.arange(num_mock_sample), num_to_plot, replace=False) mock_sample_gr = mock['gr'][mask][mock_indices.astype(int)] mock_sample_ri = mock['ri'][mask][mock_indices.astype(int)] fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4)) __=ax1.scatter(sdss_sample_gr, sdss_sample_ri, s=0.1, color='blue') __=ax2.scatter(mock_sample_gr, mock_sample_ri, s=0.1, color='red') xlim = ax1.set_xlim(0.15, 1.) ylim = ax1.set_ylim(0., 0.6) xlim = ax2.set_xlim(0.15, 1.) ylim = ax2.set_ylim(0., 0.6) xlabel = ax1.set_xlabel(r'${\rm g-r}$') xlabel2 = ax2.set_xlabel(r'${\rm g-r}$') ylabel = ax1.set_ylabel(r'${\rm r-i}$') title1 = ax1.set_title(r'${\rm SDSS:\ M_r \approx -20}$') title2 = ax2.set_title(r'${\rm protoDC2\ v4:\ M_r \approx -20}$') figname = 'sdss_gr_ri_vs_magr20.pdf' fig.savefig(figname, bbox_extra_artists=[xlabel3], bbox_inches='tight') faint, bright = -18.5, -19.5 mask = (mock['Mr'] < faint) & (mock['Mr'] > bright) from cosmodc2.sdss_colors.sdss_completeness_model import retrieve_sdss_sample_mask sdss_mask = retrieve_sdss_sample_mask(sdss['z'], sdss['restframe_extincted_sdss_abs_magr'], bright, faint) num_sdss_sample = np.count_nonzero(sdss_mask) num_mock_sample = np.count_nonzero(mask) num_to_plot = min(num_sdss_sample, num_mock_sample) sdss_indices = np.random.choice(np.arange(num_sdss_sample), num_to_plot, replace=False) sdss_sample_gr = sdss['restframe_extincted_sdss_gr'][sdss_mask][sdss_indices.astype(int)] sdss_sample_ri = sdss['restframe_extincted_sdss_ri'][sdss_mask][sdss_indices.astype(int)] mock_indices = np.random.choice(np.arange(num_mock_sample), num_to_plot, replace=False) mock_sample_gr = mock['gr'][mask][mock_indices.astype(int)] mock_sample_ri = mock['ri'][mask][mock_indices.astype(int)] fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4)) __=ax1.scatter(sdss_sample_gr, sdss_sample_ri, s=0.1, color='blue') __=ax2.scatter(mock_sample_gr, mock_sample_ri, s=0.1, color='red') xlim = ax1.set_xlim(0.1, 1.) ylim = ax1.set_ylim(0., 0.6) xlim = ax2.set_xlim(0.1, 1.) ylim = ax2.set_ylim(0., 0.6) xlabel = ax1.set_xlabel(r'${\rm g-r}$') xlabel2 = ax2.set_xlabel(r'${\rm g-r}$') ylabel = ax1.set_ylabel(r'${\rm r-i}$') title1 = ax1.set_title(r'${\rm SDSS:\ M_r \approx -19}$') title2 = ax2.set_title(r'${\rm protoDC2\ v4:\ M_r \approx -19}$') figname = 'sdss_gr_ri_vs_magr19.pdf' fig.savefig(figname, bbox_extra_artists=[xlabel3], bbox_inches='tight') ``` ## Look at redshift-dependence ``` mock_mask16 = (mock['Mr'] < -15.5) & (mock['Mr'] > -16.5) mock_mask17 = (mock['Mr'] < -16.5) & (mock['Mr'] > -17.5) mock_mask18 = (mock['Mr'] < -17.5) & (mock['Mr'] > -18.5) mock_mask19 = (mock['Mr'] < -18.5) & (mock['Mr'] > -19.5) mock_mask20 = (mock['Mr'] < -19.5) & (mock['Mr'] > -20.5) mock_mask21 = (mock['Mr'] < -20.5) & (mock['Mr'] > -21.5) mock_mask22 = (mock['Mr'] < -21.5) & (mock['Mr'] > -22.5) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4)) __=ax1.hist(mock['gr_z2'][mock_mask20], bins=100, normed=True, alpha=0.8, label=r'${\rm z=2}$', color='blue') __=ax1.hist(mock['gr_z1'][mock_mask20], bins=100, normed=True, alpha=0.8, label=r'${\rm z=1}$', color='green') __=ax1.hist(mock['gr'][mock_mask20], bins=100, normed=True, alpha=0.8, label=r'${\rm z=0}$', color='darkorange') __=ax2.hist(mock['ri_z2'][mock_mask20], bins=100, normed=True, alpha=0.8, label=r'${\rm z=2}$', color='blue') __=ax2.hist(mock['ri_z1'][mock_mask20], bins=100, normed=True, alpha=0.8, label=r'${\rm z=1}$', color='green') __=ax2.hist(mock['ri'][mock_mask20], bins=100, normed=True, alpha=0.8, label=r'${\rm z=0}$', color='darkorange') title1 = ax1.set_title(r'${\rm protoDC2\ v4:\ M_r \approx -20}$') title2 = ax2.set_title(r'${\rm protoDC2\ v4:\ M_r \approx -20}$') xlabel1 = ax1.set_xlabel(r'${\rm g-r}$') xlabel2 = ax2.set_xlabel(r'${\rm r-i}$') legend1 = ax1.legend() legend2 = ax2.legend() figname = 'analytical_colors_redshift_dependence_mr20.pdf' fig.savefig(figname, bbox_extra_artists=[xlabel, ylabel], bbox_inches='tight') fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4)) __=ax1.hist(mock['gr_z2'][mock_mask17], bins=100, normed=True, alpha=0.8, label=r'${\rm z=2}$', color='blue') __=ax1.hist(mock['gr_z1'][mock_mask17], bins=100, normed=True, alpha=0.8, label=r'${\rm z=1}$', color='green') __=ax1.hist(mock['gr'][mock_mask17], bins=100, normed=True, alpha=0.8, label=r'${\rm z=0}$', color='darkorange') __=ax2.hist(mock['ri_z2'][mock_mask17], bins=100, normed=True, alpha=0.8, label=r'${\rm z=2}$', color='blue') __=ax2.hist(mock['ri_z1'][mock_mask17], bins=100, normed=True, alpha=0.8, label=r'${\rm z=1}$', color='green') __=ax2.hist(mock['ri'][mock_mask17], bins=100, normed=True, alpha=0.8, label=r'${\rm z=0}$', color='darkorange') title1 = ax1.set_title(r'${\rm protoDC2\ v4:\ M_r \approx -17}$') title2 = ax2.set_title(r'${\rm protoDC2\ v4:\ M_r \approx -17}$') xlabel1 = ax1.set_xlabel(r'${\rm g-r}$') xlabel2 = ax2.set_xlabel(r'${\rm r-i}$') legend1 = ax1.legend() legend2 = ax2.legend() figname = 'analytical_colors_redshift_dependence_mr17.pdf' fig.savefig(figname, bbox_extra_artists=[xlabel, ylabel], bbox_inches='tight') fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4)) __=ax1.hist(mock['gr_z2'][mock_mask22], bins=100, normed=True, alpha=0.8, label=r'${\rm z=2}$', color='blue') __=ax1.hist(mock['gr_z1'][mock_mask22], bins=100, normed=True, alpha=0.8, label=r'${\rm z=1}$', color='green') __=ax1.hist(mock['gr'][mock_mask22], bins=100, normed=True, alpha=0.8, label=r'${\rm z=0}$', color='darkorange') __=ax2.hist(mock['ri_z2'][mock_mask22], bins=100, normed=True, alpha=0.8, label=r'${\rm z=2}$', color='blue') __=ax2.hist(mock['ri_z1'][mock_mask22], bins=100, normed=True, alpha=0.8, label=r'${\rm z=1}$', color='green') __=ax2.hist(mock['ri'][mock_mask22], bins=100, normed=True, alpha=0.8, label=r'${\rm z=0}$', color='darkorange') title1 = ax1.set_title(r'${\rm protoDC2\ v4:\ M_r \approx -22}$') title2 = ax2.set_title(r'${\rm protoDC2\ v4:\ M_r \approx -22}$') xlabel1 = ax1.set_xlabel(r'${\rm g-r}$') xlabel2 = ax2.set_xlabel(r'${\rm r-i}$') legend1 = ax1.legend() legend2 = ax2.legend() figname = 'analytical_colors_redshift_dependence_mr22.pdf' fig.savefig(figname, bbox_extra_artists=[xlabel, ylabel], bbox_inches='tight') ```
github_jupyter
# Visualisation tutorial In the [`introductory_tutorial`](introductory_tutorial.ipynb) we ran through building structural covariance network analyses using `scona`🍪. In this tutorial we'll cover some of the visualisation tools to communicate these results. Click on any of the links below to jump to that section * [Get set up](#Get-set-up) (make sure to run this section before jumping into any of the others!) * [Visualise the degree distribution: `plot_degree`](#Visualise-the-degree-distribution%3A-%3Ccode%3Eplot_degree%3C%2Fcode%3E) * [Report the global measures of the graph: `report_global_measures`](#Report-the-global-measures-of-the-graph%3A-%3Ccode%3Ereport_global_measures%3C%2Fcode%3E) * [Describe the rich club](#Describe-the-rich-club): `plot_rich_club` ## Get set up You need to run this section for all of the different examples in the notebook. So even if you want to skip to a section further down, make sure you've executed the next two code cells! ### Import the modules you need ``` import scona as scn import scona.datasets as datasets import numpy as np import networkx as nx import pandas as pd from IPython.display import display import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline %load_ext autoreload %autoreload 2 ``` ### Read in the data, build a network and calculate the network metrics If you're not sure about this step, please check out the [`introductory_tutorial`](introductory_tutorial.ipynb) notebook for more explanation. ``` # Read in sample data from the NSPN WhitakerVertes PNAS 2016 paper. df, names, covars, centroids = datasets.NSPN_WhitakerVertes_PNAS2016.import_data() # calculate residuals of the matrix df for the columns of names df_res = scn.create_residuals_df(df, names, covars) # create a correlation matrix over the columns of df_res M = scn.create_corrmat(df_res, method='pearson') # Initialise a weighted graph G from the correlation matrix M G = scn.BrainNetwork(network=M, parcellation=names, centroids=centroids) # Threshold G at cost 10 to create a binary graph with 10% as many edges as the complete graph G. G10 = G.threshold(10) # Create a GraphBundle object that contains the G10 graph called "original_graph" bundleGraphs = scn.GraphBundle([G10], ["original_graph"]) # Add ten random graphs to this bundle # (In real life you'd want more than 10 random graphs, # but this step can take quite a long time to run so # for the demo we just create 10) bundleGraphs.create_random_graphs("original_graph", 10) ``` --- ## Visualise the degree distribution: `plot_degree` The degree of each node is the number of edges adjacent to the node. For example if a node is connected to four other nodes then its degree is 4. If it is connected to 50 other nodes, its degree is 50. Brain networks are usually ["scale-free"](https://en.wikipedia.org/wiki/Scale-free_network) which means that their degree distribution follows a power law. You can think of them as having a "heavy tail": there are a small number of nodes that have a large number of connections. This is in contrast to - for example - an Erdős–Rényi graph where each node is connected to the others with a set, random probability. This graph is often called a [binomial graph](https://en.wikipedia.org/wiki/Erd%C5%91s%E2%80%93R%C3%A9nyi_model) because the probability of connections follows a binomial (Yes-No) distribution. One of the first things to check for the structural covariance network analysis with `scona` is that our degree distribution shows this pattern. ### Look at the data The degree distribution is already saved in the `G10` graph object. Let's spend a few moments showing how you can access that information. You can make a dictionary of the node ids (the dictionary key) and their degree (the dictionary value). ``` degrees = dict(G10.degree()) # Print the degree of every 50th node to show what's inside this dictionary for node_id, degree in list(degrees.items())[::50]: print ('Node: {:3d} has degree = {:2d}'.format(node_id, degree)) ``` You can see the information for a specific node from the graph itself. Although note that the degree needs to be calculated. It hasn't been added to the attributes yet. ``` # Display the nodal attributes G10.nodes[150] ``` `scona` has a command for that. Lets go ahead and add the degree to the nodal attributes....along with a few other measures. ``` # Calculate nodal measures for graph G10 G10.calculate_nodal_measures() # Display the nodal attributes G10.nodes[150] ``` Look at all that information! We only want to visualise the degree distribution at the moment though. ### Import the code you need: `plot_degree_dist` ``` # import the function to plot network measures from scona.visualisations import plot_degree_dist ``` ### Plot the degree distribution We only need the BrainNetwork graph to plot the degree distribution. #### Default settings By default we add an Erdős–Rényi random graph that has the same number of nodes as our BrainNetwork Graph for comparison. The default colours are blue for the degree distribution of the real graph and a grey line for the random graph. ``` plot_degree_dist(G10) ``` #### Without the random graph The random graph is a good sanity check that your degree distribution is not random...but it rather swamps the plot. So this example allows you to plot only the degree distribution of the real graph, without the random graph. ``` plot_degree_dist(G10, binomial_graph=False) ``` #### Save the plot You can save this figure in any location. You can do that by passing a file name and (optional) directory path to the `figure_name` option. If you don't set a directory path the figure will be saved in the local directory. For this tutorial we'll save the output in a `figures` folder inside this `tutorials` directory. ``` plot_degree_dist(G10, binomial_graph=False, figure_name="figures/DegreeDistribution.png") ``` ☝️ Did you see an error message? The code checks to see if the directory that you want to save your figure to actually exists. If it doesn't then it creates the directory, but gives you a little warning first to check that it isn't coming as a surprised (for example if you have tried to save your figure in the wrong place!) We have the `tutorials/figures` directory specifically ignored in this project so we shouldn't ever see changes there. **Run the cell above again**. You won't see the error the second time because the folder already exists! You made it the first time you plotted the distribution 🚀. Note that if you don't pass a file ending the file will be saved as a `png` by default. If you add a file extension allowed by `matplotlib` (eg `.jpg`, `.svg`, `.pdf` etc) then the figure will be saved in that format. #### Change the colours You can pass a pair of colours to the `plot_degree_dist` function. The first colour is that of the histogram for the real graph. The second colour is the line for the Erdős-Rényi graph. In the example below, we've chosen red and black 🎨 ``` plot_degree_dist(G10, color=["red", "black"]) ``` --- ## Report the global measures of the graph: `report_global_measures` One of the first things we want to know are how the global attributes of the network compare to those of random networks. Specifically we'll calculate: * `a`: assortativity * `C`: clustering * `E`: efficiency * `L`: shortest path * `M`: modularity * `sigma`: small world coefficient and plot a bar chart that compares the real network to the random graphs. ### Calculate the global measures ``` # Calculate the global measures bundleGraphs_measures = bundleGraphs.report_global_measures() # Show the dataframe so we can see the measures display(bundleGraphs_measures) ``` Now you have everything to plot the **network measures** of the BrainNetwork Graph and compare these measures to random measures values obtained from 10 random graphs stored inside the graph bundle `bundleGraphs`. ### Import the code you need: `plot_network_measures` ``` # import the function to plot network measures from scona.visualisations import plot_network_measures ``` ### Plot the measures There are 2 required parameters for the `plot_network_measures` function: 1. a `GraphBundle` object (e.g. `bundleGraphs`) 2. the name of the *real graph* in your `GraphBundle` (e.g. `"original_graph"`) #### Default settings The default colours are blue and grey, and by default the error bars show 95% confidence intervals. ``` plot_network_measures(bundleGraphs, original_network="original_graph") ``` #### Save the figure You'll probably want to save the beautiful figure you've made! You can do that by passing a file name and (optional) directory path to the `figure_name` option. If you don't set a directory path the figure will be saved in the local directory. For this tutorial we'll save the output in a `figures` folder inside this `tutorials` directory. For fun, we'll also adjust the colours to make the real network orange (`#FF4400`) and the random network turquoise (`#00BBFF`). ``` plot_network_measures(bundleGraphs, "original_graph", figure_name="figures/NetworkMeasuresDemo", color=["#FF4400", "#00BBFF"]) ``` #### Hide the legend You might not want to show the legend. That's fine! We'll also use this example to save an `svg` file. ``` plot_network_measures(bundleGraphs, "original_graph", figure_name="figures/NetworkMeasuresDemoNoLegend.svg", show_legend=False) ``` #### Only show the original graph You might not want to show the random graphs. In this case you have to create a new graph bundle that only contains the real graph, and pass that to the `plot_network_measures` function. For this example we've also changed the colour to green (to show off 😉). ``` # Create a new graph bundle realBundle = scn.GraphBundle([G10], ["original_graph"]) plot_network_measures(realBundle, original_network = "original_graph", color=["green"]) ``` #### Change the type of error bars The variance of measures obtained from random graphs is - by default - shown as the 95% confidence interval. They're calculated by bootstrapping the random graphs. There's more information in the [seaborn documentation](https://seaborn.pydata.org/generated/seaborn.barplot.html) if you're curious. But you don't have to calculate them. You can plot the standard deviations instead if you'd prefer. (These are a bit larger than the 95% confidence intervals so they're a bit easier to see in the plot below.) ``` plot_network_measures(bundleGraphs, original_network="original_graph", ci="sd") ``` Alternatively you could show the 99% confidence interval. ``` plot_network_measures(bundleGraphs, original_network="original_graph", ci=99) ``` ### Run with 100 random graphs You can't publish results with 10 random graphs. These don't give meaningful variations. So let's add 90 more random graphs. (This still isn't enough, but much better than 10! We'd recommend that you run 1000 random graphs for publication quality results.) This takes some time (around 5 minutes) so the cell below is commented out by default. Remove the `#` at the start of each of the lines below to run the commands yourself. ``` #bundleGraphs.create_random_graphs("original_graph", 90) #print (len(bundleGraphs)) ``` Congratulations! 🎉 You created additional 90 random graphs, to give you a total of 100 random graphs and 1 real graph, and you managed to answer to some of your emails while waiting. Here's a beautiful plot of your network measures with 95% confidence intervals....which you can't see because the random networks are all so similar to each other 🤦 ``` plot_network_measures(bundleGraphs, original_network="original_graph") ``` ------------------------------------------------------------------ ## Describe the rich club Brain networks often have a ["rich club"](https://en.wikipedia.org/wiki/Rich-club_coefficient#Definition). The rich club is a measure of the extent to which nodes in the graph with the highest degree (the largest number of connections) are preferentially connected to other highly connected nodes. This network measure is fantastically named because a rich club exists in so many other networks too. * Popular people in a social network are likely to be friends with each other. * Wealthy people are likely to trade with each other. * Major airports have lots of flights between each other. In the brain, the "hubs" of the network - the high degree nodes - may also be the ones coordinating complex actions, and so they need to connect with each other to efficiently send messages to different specialised regions. One thing that's a little confusing is that - to plot the rich club values per degree along with the random rich club values created from Random Networks with a preserved degree distribution ### Report the rich club coefficients The rich club coefficient is calculated as a function of a "cut off" degree. $\phi(k) = \frac{2 E_k}{N_k (N_k - 1)}$ So the first thing to do is calculate the rich club coefficient ($\phi$) for all possible values of $k$. ``` # Calculate the rich club coefficients and then write them a data frame rich_club_df = bundleGraphs.report_rich_club() # Show the first few entries in the data frame display(rich_club_df.iloc[:5, :6]) # Show the last few entries in the data frame display(rich_club_df.iloc[-5:, :6]) ``` The index column in the data frame above is the cut off degree value. You may be wondering where `105` comes from! The answer is in figuring out what the smallest and most highly connected sub-graph would be. Logically that would be the connection between the two nodes that have the highest and second highest degree. Take a look at the top five degree values for the`original_graph`: ``` # Show the degree of the 5 most connected nodes display (G10.report_nodal_measures().sort_values(by='degree', ascending=False).loc[:, ['name', 'degree']].head()) ``` The two most connected nodes in our graph have degree `126` and `106`. So the maximum degree cut off must be `105` otherwise we'd be left with a single node by themselves! ### Visualise the rich club coefficient You can see from the data frames above that for very low degree cut-off values the rich club coefficient in the real graph is very similar to that of the random graphs. When we only consider the highest possible connections, the random and the real graphs have either a rich club coefficient of exactly 1 (the maximum value) or 0 (the minimum value). This comes from the fact that we've maintained the degree distribution for every node, but switched *which* nodes the edges connect. Both of these observations makes sense! * The _whole_ graph can't be a member of the rich club: by definition it has to be a separate group of highly connected nodes. * If you get rid of a huge number of nodes (by cutting off the graph at a very high degree threshold) then you'll either have all or none of the connections between the highly connected nodes. The most interesting information is therefore in the middle of the dataframe! And that's where our visualisation is really helpful. ### Import the code you need: `plot_rich_club` ``` # import the function to plot rich club values from scona.visualisations import plot_rich_club ``` ### Plot the rich club The rich club doesn't make much sense unless you compare it to random graphs, so we pass a BrainBundle and the name of the original network. #### Default settings The default colours are blue for the real graph and grey for the random graph, with lighter grey for the confidence interval of the random graphs. We don't save the plot by default. ``` plot_rich_club(bundleGraphs, original_network="original_graph") ``` #### Save the figure Passing a file name and (optional) directory path to the `figure_name` option. If you don't set a directory path the figure will be saved in the local directory. In this case we've given the figure a `.jpg` suffix. (By default the figures are usually saved as `.png`.) For fun, we'll also adjust the colours to make the real network cornflower blue (`#6495ed`) and the random network coral (`#ff7f50`). (Thank you Alex, Liberty and Chris H at Neurohackademy 2019 for the suggestions!) ``` plot_rich_club(bundleGraphs, original_network="original_graph", figure_name="figures/RichClub.png", color=["#6495ed", "#ff7f50"]) ``` #### Hide the legend If you don't want to show the legend, that's easy to do.....although it seems slightly odd... to not tell folks what's going on 🤷 ``` plot_rich_club(bundleGraphs, original_network="original_graph", figure_name="figures/RichClub.png", color=["#6495ed", "#ff7f50"], show_legend=False) ``` #### Adjust the x and y axis values The axes always start at (0,0) and by default the maximum values are set according to the data. The maximum degree on the x axis and 1.1 on the y axis. (The maximum rich club value is 1.0 but adding a little extra space at the top looks good.) You aren't likely to need to change the y axis values, but you might have a time when you want to specifically set the maximum value for the x axis, for example if you want to compare two networks on two different plots and have them have the same dimensions. `scona` has an option for that! ``` plot_rich_club(bundleGraphs, original_network="original_graph", x_max=130) ``` Yeah, it looks slightly strange that the rich club curve stops early, but at least you'd be able to compare it to a different plot. ---------------------- ## Thank you! That's all we have for this demo. We hope it's been useful.
github_jupyter
# Sentiment Analysis with an RNN In this notebook, you'll implement a recurrent neural network that performs sentiment analysis. >Using an RNN rather than a strictly feedforward network is more accurate since we can include information about the *sequence* of words. Here we'll use a dataset of movie reviews, accompanied by sentiment labels: positive or negative. <img src="assets/reviews_ex.png" width=40%> ### Network Architecture The architecture for this network is shown below. <img src="assets/network_diagram.png" width=40%> >**First, we'll pass in words to an embedding layer.** We need an embedding layer because we have tens of thousands of words, so we'll need a more efficient representation for our input data than one-hot encoded vectors. You should have seen this before from the Word2Vec lesson. You can actually train an embedding with the Skip-gram Word2Vec model and use those embeddings as input, here. However, it's good enough to just have an embedding layer and let the network learn a different embedding table on its own. *In this case, the embedding layer is for dimensionality reduction, rather than for learning semantic representations.* >**After input words are passed to an embedding layer, the new embeddings will be passed to LSTM cells.** The LSTM cells will add *recurrent* connections to the network and give us the ability to include information about the *sequence* of words in the movie review data. >**Finally, the LSTM outputs will go to a sigmoid output layer.** We're using a sigmoid function because positive and negative = 1 and 0, respectively, and a sigmoid will output predicted, sentiment values between 0-1. We don't care about the sigmoid outputs except for the **very last one**; we can ignore the rest. We'll calculate the loss by comparing the output at the last time step and the training label (pos or neg). --- ### Load in and visualize the data ``` import numpy as np # read data from text files with open('data/reviews.txt', 'r') as f: reviews = f.read() with open('data/labels.txt', 'r') as f: labels = f.read() print(reviews[:2000]) print() print(labels[:20]) ``` ## Data pre-processing The first step when building a neural network model is getting your data into the proper form to feed into the network. Since we're using embedding layers, we'll need to encode each word with an integer. We'll also want to clean it up a bit. You can see an example of the reviews data above. Here are the processing steps, we'll want to take: >* We'll want to get rid of periods and extraneous punctuation. * Also, you might notice that the reviews are delimited with newline characters `\n`. To deal with those, I'm going to split the text into each review using `\n` as the delimiter. * Then I can combined all the reviews back together into one big string. First, let's remove all punctuation. Then get all the text without the newlines and split it into individual words. ``` from string import punctuation print(punctuation) # get rid of punctuation reviews = reviews.lower() # lowercase, standardize all_text = ''.join([c for c in reviews if c not in punctuation]) # split by new lines and spaces reviews_split = all_text.split('\n') all_text = ' '.join(reviews_split) # create a list of words words = all_text.split() words[:30] ``` ### Encoding the words The embedding lookup requires that we pass in integers to our network. The easiest way to do this is to create dictionaries that map the words in the vocabulary to integers. Then we can convert each of our reviews into integers so they can be passed into the network. > **Exercise:** Now you're going to encode the words with integers. Build a dictionary that maps words to integers. Later we're going to pad our input vectors with zeros, so make sure the integers **start at 1, not 0**. > Also, convert the reviews to integers and store the reviews in a new list called `reviews_ints`. ``` # feel free to use this import from collections import Counter word_counter = Counter(words) ## Build a dictionary that maps words to integers sorted_vocab = sorted(word_counter, key=word_counter.get, reverse=True) #start from 1 to respect 0 padding vocab_to_int = {word: i for i, word in enumerate(sorted_vocab, 1)} ## use the dict to tokenize each review in reviews_split ## store the tokenized reviews in reviews_ints reviews_ints = [[vocab_to_int[word] for word in review.split()] for review in reviews_split] reviews_ints[:3] ``` **Test your code** As a text that you've implemented the dictionary correctly, print out the number of unique words in your vocabulary and the contents of the first, tokenized review. ``` # stats about vocabulary print('Unique words: ', len((vocab_to_int))) # should ~ 74000+ print() # print tokens in first review print('Tokenized review: \n', reviews_ints[:1]) ``` ### Encoding the labels Our labels are "positive" or "negative". To use these labels in our network, we need to convert them to 0 and 1. > **Exercise:** Convert labels from `positive` and `negative` to 1 and 0, respectively, and place those in a new list, `encoded_labels`. ``` # 1=positive, 0=negative label conversion encoded_labels = [1 if label == 'positive' else 0 for label in labels.split()] encoded_labels[:30] ``` ### Removing Outliers As an additional pre-processing step, we want to make sure that our reviews are in good shape for standard processing. That is, our network will expect a standard input text size, and so, we'll want to shape our reviews into a specific length. We'll approach this task in two main steps: 1. Getting rid of extremely long or short reviews; the outliers 2. Padding/truncating the remaining data so that we have reviews of the same length. <img src="assets/outliers_padding_ex.png" width=40%> Before we pad our review text, we should check for reviews of extremely short or long lengths; outliers that may mess with our training. ``` # outlier review stats review_lens = Counter([len(x) for x in reviews_ints]) print("Zero-length reviews: {}".format(review_lens[0])) print("Maximum review length: {}".format(max(review_lens))) ``` Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. We'll have to remove any super short reviews and truncate super long reviews. This removes outliers and should allow our model to train more efficiently. > **Exercise:** First, remove *any* reviews with zero length from the `reviews_ints` list and their corresponding label in `encoded_labels`. ``` print('Number of reviews before removing outliers: ', len(reviews_ints)) ## remove any reviews/labels with zero length from the reviews_ints list. filtered_reviews_ints = [review_int for review_int in reviews_ints if len(review_int) > 0] encoded_filtered_labels = [encoded_labels[i] for i, review_int in enumerate(reviews_ints) if len(review_int) > 0] reviews_ints = filtered_reviews_ints encoded_labels = encoded_filtered_labels print('Number of reviews after removing outliers: ', len(reviews_ints)) ``` --- ## Padding sequences To deal with both short and very long reviews, we'll pad or truncate all our reviews to a specific length. For reviews shorter than some `seq_length`, we'll pad with 0s. For reviews longer than `seq_length`, we can truncate them to the first `seq_length` words. A good `seq_length`, in this case, is 200. > **Exercise:** Define a function that returns an array `features` that contains the padded data, of a standard size, that we'll pass to the network. * The data should come from `review_ints`, since we want to feed integers to the network. * Each row should be `seq_length` elements long. * For reviews shorter than `seq_length` words, **left pad** with 0s. That is, if the review is `['best', 'movie', 'ever']`, `[117, 18, 128]` as integers, the row will look like `[0, 0, 0, ..., 0, 117, 18, 128]`. * For reviews longer than `seq_length`, use only the first `seq_length` words as the feature vector. As a small example, if the `seq_length=10` and an input review is: ``` [117, 18, 128] ``` The resultant, padded sequence should be: ``` [0, 0, 0, 0, 0, 0, 0, 117, 18, 128] ``` **Your final `features` array should be a 2D array, with as many rows as there are reviews, and as many columns as the specified `seq_length`.** This isn't trivial and there are a bunch of ways to do this. But, if you're going to be building your own deep learning networks, you're going to have to get used to preparing your data. ``` def pad_features(reviews_ints, seq_length): ''' Return features of review_ints, where each review is padded with 0's or truncated to the input seq_length. ''' ## implement function #features = [[0]*(seq_length - len(review_int)) + # review_int[:seq_length] for review_int in reviews_ints] #features = np.array(features) #more elegant solution features = np.zeros((len(reviews_ints), seq_length), dtype=int) for i, row in enumerate(reviews_ints): features[i, -len(row):] = np.array(row)[:seq_length] return features # Test your implementation! seq_length = 200 features = pad_features(reviews_ints, seq_length=seq_length) ## test statements - do not change - ## assert len(features)==len(reviews_ints), "Your features should have as many rows as reviews." assert len(features[0])==seq_length, "Each feature row should contain seq_length values." # print first 10 values of the first 30 batches print(features[:30,:10]) ``` ## Training, Validation, Test With our data in nice shape, we'll split it into training, validation, and test sets. > **Exercise:** Create the training, validation, and test sets. * You'll need to create sets for the features and the labels, `train_x` and `train_y`, for example. * Define a split fraction, `split_frac` as the fraction of data to **keep** in the training set. Usually this is set to 0.8 or 0.9. * Whatever data is left will be split in half to create the validation and *testing* data. ``` split_frac = 0.8 ## split data into training, validation, and test data (features and labels, x and y) split_train = int(len(features) * split_frac) split_val = int(len(features) * 0.1) train_x = features[:split_train] train_y = encoded_labels[:split_train] train_y = np.array(train_y) val_x = features[split_train:split_train + split_val] val_y = encoded_labels[split_train:split_train + split_val] val_y = np.array(val_y) test_x = features[split_train + split_val:] test_y = encoded_labels[split_train + split_val:] test_y = np.array(test_y) ## print out the shapes of your resultant feature data print(f"Train set: {train_x.shape}\n" f"Validation set: {val_x.shape}\n" f"Test set: {test_x.shape}\n") ``` **Check your work** With train, validation, and test fractions equal to 0.8, 0.1, 0.1, respectively, the final, feature data shapes should look like: ``` Feature Shapes: Train set: (20000, 200) Validation set: (2500, 200) Test set: (2500, 200) ``` --- ## DataLoaders and Batching After creating training, test, and validation data, we can create DataLoaders for this data by following two steps: 1. Create a known format for accessing our data, using [TensorDataset](https://pytorch.org/docs/stable/data.html#) which takes in an input set of data and a target set of data with the same first dimension, and creates a dataset. 2. Create DataLoaders and batch our training, validation, and test Tensor datasets. ``` train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y)) train_loader = DataLoader(train_data, batch_size=batch_size) ``` This is an alternative to creating a generator function for batching our data into full batches. ``` import torch from torch.utils.data import TensorDataset, DataLoader # create Tensor datasets train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y)) valid_data = TensorDataset(torch.from_numpy(val_x), torch.from_numpy(val_y)) test_data = TensorDataset(torch.from_numpy(test_x), torch.from_numpy(test_y)) # dataloaders batch_size = 50 # make sure to SHUFFLE your data train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size) valid_loader = DataLoader(valid_data, shuffle=True, batch_size=batch_size) test_loader = DataLoader(test_data, shuffle=True, batch_size=batch_size) # obtain one batch of training data dataiter = iter(train_loader) sample_x, sample_y = dataiter.next() print('Sample input size: ', sample_x.size()) # batch_size, seq_length print('Sample input: \n', sample_x) print() print('Sample label size: ', sample_y.size()) # batch_size print('Sample label: \n', sample_y) ``` --- # Sentiment Network with PyTorch Below is where you'll define the network. <img src="assets/network_diagram.png" width=40%> The layers are as follows: 1. An [embedding layer](https://pytorch.org/docs/stable/nn.html#embedding) that converts our word tokens (integers) into embeddings of a specific size. 2. An [LSTM layer](https://pytorch.org/docs/stable/nn.html#lstm) defined by a hidden_state size and number of layers 3. A fully-connected output layer that maps the LSTM layer outputs to a desired output_size 4. A sigmoid activation layer which turns all outputs into a value 0-1; return **only the last sigmoid output** as the output of this network. ### The Embedding Layer We need to add an [embedding layer](https://pytorch.org/docs/stable/nn.html#embedding) because there are 74000+ words in our vocabulary. It is massively inefficient to one-hot encode that many classes. So, instead of one-hot encoding, we can have an embedding layer and use that layer as a lookup table. You could train an embedding layer using Word2Vec, then load it here. But, it's fine to just make a new layer, using it for only dimensionality reduction, and let the network learn the weights. ### The LSTM Layer(s) We'll create an [LSTM](https://pytorch.org/docs/stable/nn.html#lstm) to use in our recurrent network, which takes in an input_size, a hidden_dim, a number of layers, a dropout probability (for dropout between multiple layers), and a batch_first parameter. Most of the time, you're network will have better performance with more layers; between 2-3. Adding more layers allows the network to learn really complex relationships. > **Exercise:** Complete the `__init__`, `forward`, and `init_hidden` functions for the SentimentRNN model class. Note: `init_hidden` should initialize the hidden and cell state of an lstm layer to all zeros, and move those state to GPU, if available. ``` # First checking if GPU is available train_on_gpu=torch.cuda.is_available() if(train_on_gpu): print('Training on GPU.') else: print('No GPU available, training on CPU.') import torch.nn as nn class SentimentRNN(nn.Module): """ The RNN model that will be used to perform Sentiment analysis. """ def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, drop_prob=0.5): """ Initialize the model by setting up the layers. """ super(SentimentRNN, self).__init__() self.output_size = output_size self.n_layers = n_layers self.hidden_dim = hidden_dim # define all layers self.embed = nn.Embedding(vocab_size, embedding_dim) self.rnn = nn.LSTM(embedding_dim, self.hidden_dim, self.n_layers, dropout=drop_prob, batch_first=True) self.fc = nn.Linear(self.hidden_dim, self.output_size) self.sigmoid = nn.Sigmoid() def forward(self, x, hidden): """ Perform a forward pass of our model on some input and hidden state. """ x = self.embed(x) x, hidden = self.rnn(x, hidden) x = self.fc(x.contiguous().view(-1, self.hidden_dim)) x = self.sigmoid(x).view(x.size(0), -1) sig_out = x[:, -1] # return last sigmoid output and hidden state return sig_out, hidden def init_hidden(self, batch_size): ''' Initializes hidden state ''' # Create two new tensors with sizes n_layers x batch_size x hidden_dim, # initialized to zero, for hidden state and cell state of LSTM weight = next(self.parameters()).data hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda()) if (train_on_gpu): hidden[0] = hidden[0].cuda() hidden[1] = hidden[1].cuda() return hidden ``` ## Instantiate the network Here, we'll instantiate the network. First up, defining the hyperparameters. * `vocab_size`: Size of our vocabulary or the range of values for our input, word tokens. * `output_size`: Size of our desired output; the number of class scores we want to output (pos/neg). * `embedding_dim`: Number of columns in the embedding lookup table; size of our embeddings. * `hidden_dim`: Number of units in the hidden layers of our LSTM cells. Usually larger is better performance wise. Common values are 128, 256, 512, etc. * `n_layers`: Number of LSTM layers in the network. Typically between 1-3 > **Exercise:** Define the model hyperparameters. ``` # Instantiate the model w/ hyperparams vocab_size = len(vocab_to_int) + 1 output_size = 1 embedding_dim = 512 hidden_dim = 246 n_layers = 3 net = SentimentRNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers) print(net) ``` --- ## Training Below is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. You can also add code to save a model by name. >We'll also be using a new kind of cross entropy loss, which is designed to work with a single Sigmoid output. [BCELoss](https://pytorch.org/docs/stable/nn.html#bceloss), or **Binary Cross Entropy Loss**, applies cross entropy loss to a single value between 0 and 1. We also have some data and training hyparameters: * `lr`: Learning rate for our optimizer. * `epochs`: Number of times to iterate through the training dataset. * `clip`: The maximum gradient value to clip at (to prevent exploding gradients). ``` # loss and optimization functions lr=0.001 criterion = nn.BCELoss() optimizer = torch.optim.Adam(net.parameters(), lr=lr) # training params epochs = 4 # 3-4 is approx where I noticed the validation loss stop decreasing counter = 0 print_every = 100 clip=5 # gradient clipping # move model to GPU, if available if(train_on_gpu): net.cuda() net.train() # train for some number of epochs for e in range(epochs): # initialize hidden state h = net.init_hidden(batch_size) # batch loop for inputs, labels in train_loader: counter += 1 if(train_on_gpu): inputs, labels = inputs.cuda(), labels.cuda() # Creating new variables for the hidden state, otherwise # we'd backprop through the entire training history h = tuple([each.data for each in h]) # zero accumulated gradients net.zero_grad() # get the output from the model output, h = net(inputs, h) # calculate the loss and perform backprop loss = criterion(output.squeeze(), labels.float()) loss.backward() # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs. nn.utils.clip_grad_norm_(net.parameters(), clip) optimizer.step() # loss stats if counter % print_every == 0: # Get validation loss val_h = net.init_hidden(batch_size) val_losses = [] net.eval() for inputs, labels in valid_loader: # Creating new variables for the hidden state, otherwise # we'd backprop through the entire training history val_h = tuple([each.data for each in val_h]) if(train_on_gpu): inputs, labels = inputs.cuda(), labels.cuda() output, val_h = net(inputs, val_h) val_loss = criterion(output.squeeze(), labels.float()) val_losses.append(val_loss.item()) net.train() print("Epoch: {}/{}...".format(e+1, epochs), "Step: {}...".format(counter), "Loss: {:.6f}...".format(loss.item()), "Val Loss: {:.6f}".format(np.mean(val_losses))) ``` --- ## Testing There are a few ways to test your network. * **Test data performance:** First, we'll see how our trained model performs on all of our defined test_data, above. We'll calculate the average loss and accuracy over the test data. * **Inference on user-generated data:** Second, we'll see if we can input just one example review at a time (without a label), and see what the trained model predicts. Looking at new, user input data like this, and predicting an output label, is called **inference**. ``` # Get test data loss and accuracy test_losses = [] # track loss num_correct = 0 # init hidden state h = net.init_hidden(batch_size) net.eval() # iterate over test data for inputs, labels in test_loader: # Creating new variables for the hidden state, otherwise # we'd backprop through the entire training history h = tuple([each.data for each in h]) if(train_on_gpu): inputs, labels = inputs.cuda(), labels.cuda() # get predicted outputs output, h = net(inputs, h) # calculate loss test_loss = criterion(output.squeeze(), labels.float()) test_losses.append(test_loss.item()) # convert output probabilities to predicted class (0 or 1) pred = torch.round(output.squeeze()) # rounds to the nearest integer # compare predictions to true label correct_tensor = pred.eq(labels.float().view_as(pred)) correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy()) num_correct += np.sum(correct) # -- stats! -- ## # avg test loss print("Test loss: {:.3f}".format(np.mean(test_losses))) # accuracy over all test data test_acc = num_correct/len(test_loader.dataset) print("Test accuracy: {:.3f}".format(test_acc)) ``` ### Inference on a test review You can change this test_review to any text that you want. Read it and think: is it pos or neg? Then see if your model predicts correctly! > **Exercise:** Write a `predict` function that takes in a trained net, a plain text_review, and a sequence length, and prints out a custom statement for a positive or negative review! * You can use any functions that you've already defined or define any helper functions you want to complete `predict`, but it should just take in a trained net, a text review, and a sequence length. ``` # negative test review test_review_neg = 'The worst movie I have seen; acting was terrible and I want my money back. This movie had bad acting and the dialogue was slow.' def predict(net, test_review, sequence_length=200): ''' Prints out whether a give review is predicted to be positive or negative in sentiment, using a trained model. params: net - A trained net test_review - a review made of normal text and punctuation sequence_length - the padded length of a review ''' # print custom response based on whether test_review is pos/neg net.eval() test_review = test_review.lower() test_review = ''.join([c for c in test_review if c not in punctuation]) features = pad_features([[vocab_to_int[word] for word in test_review.split()]], sequence_length) features = torch.from_numpy(features).cuda() batch_size = features.size(0) hidden = net.init_hidden(batch_size) output, hidden = net(features, hidden) pred = torch.round(output.squeeze()) return "Positive" if pred.item()==1 else "Negative" # positive test review test_review_pos = 'This movie had the best acting and the dialogue was so good. I loved it.' # call function # try negative and positive reviews! seq_length=200 print(predict(net, test_review_neg, seq_length)) print(predict(net, test_review_pos, seq_length)) ``` ### Try out test_reviews of your own! Now that you have a trained model and a predict function, you can pass in _any_ kind of text and this model will predict whether the text has a positive or negative sentiment. Push this model to its limits and try to find what words it associates with positive or negative. Later, you'll learn how to deploy a model like this to a production environment so that it can respond to any kind of user data put into a web app! ``` test_reviews_custom = ['This movie was incredible.', 'This movie was incredible, in the worst way.'] for r in test_reviews_custom: print(predict(net, r, seq_length)) ```
github_jupyter
# Youtube LIFX Tiles Project Project to Display Youtube Subscriber Count using googleAPIs Canvas - 1 LIfx Tilechain w/ 5 Tiles Requires GoogleAPI key with permissions to Youtube dataset destroy and create new API key using https://developers.google.com/youtube/registering_an_application ## Future Projects - ability to rotate Youtube logo in the case of a minor checkpoint. If Subs is divisible by 10 then rotate the logo on Tile[0] and flash a +10 across the screen before continuing. - ability to flash and scoll text on a major checkpoint. If subs is divisible by 100 rotate the logo on Tile[0]. Flash the number of Subs. +100 across the screen. - ability to flash and scroll text on a huge checkpoint. If subs is dividsible by 1000 make fireworks appear. :) ## todolist Need to clear tile color map before writing new number ``` #install requests and inflect if necessary !pip install requests inflect ``` # Section for Project Code Using the helper functions above and from other projects, we will put together a small project to display the number of subscribers based on a 30 minute refresh rate ( refresh rate will be variable ) ``` import requests import json import inflect from secrets import youtube_api_key from lifxlan import * #from random import randint, betavariate from time import sleep #create a file called secrets.py and place your googleAPI key in a var called youtube_api_key DO NOT POSTS THIS TO GITHUB from secrets import youtube_api_key from pylifxtiles import actions from pylifxtiles import objects from pylifxtiles.alphanum import nums from pylifxtiles import colors channel_name = 'UCQHfJyIROQhDFUOJKVBiLog' youtube_api_key = 'AIzaSyAYkV_HmTAlZym-XRclnJ-8bplEBhA5YlI' my_tile = 'T1' def main(): target_tilechain = my_tile lan = LifxLAN() tilechain_lights = lan.get_tilechain_lights() print (len(tilechain_lights)) if len(tilechain_lights) != 0: for tile in tilechain_lights: if tile.get_label() == target_tilechain: print(tile.label) #if tile.get_label() == 'TEST': target_tilechain = tile duration_ms = 1000 try: #original_colors = reset_tiles(T1) run = 0 target_color_map = actions.reset_tiles(target_tilechain) blank_tile = actions.blank_tile() original_colors = [actions.blank_tile()]*5 #objects.draw_youtube(target_tilechain,0) while(True): #T1.set_tile_colors(0,youtube,rapid=True) subs = get_subs(channel_name, youtube_api_key) tile = 1 for number in subs: #blank_tile= actions.blank_tile() print(number) for led in nums[number]: target_color_map[tile][led] = (32767, 2056, 65535, 3500) target_tilechain.set_tile_colors(tile, target_color_map[tile]) print (tile) tile += 1 run += 1 print ('This is run ' + str(run)+ ' with '+str(subs)+' subscribers') #sleeps for 1/2h sleep(1200) except KeyboardInterrupt: print("Done.") else: print("No TileChain lights found.") def get_subs(channel_name, api_key): num_of_subs=[] data = requests.get("https://www.googleapis.com/youtube/v3/channels?part=statistics&id="+channel_name+"&key="+api_key) subs = data.json()['items'][0]['statistics']['subscriberCount'] for i in subs: p = inflect.engine() num_of_subs.append(p.number_to_words(int(i))) return num_of_subs if __name__=="__main__": main() ```
github_jupyter
# H2O Tutorial: Breast Cancer Classification Author: Erin LeDell Contact: erin@h2o.ai This tutorial steps through a quick introduction to H2O's Python API. The goal of this tutorial is to introduce through a complete example H2O's capabilities from Python. Also, to help those that are accustomed to Scikit Learn and Pandas, the demo will be specific call outs for differences between H2O and those packages; this is intended to help anyone that needs to do machine learning on really Big Data make the transition. It is not meant to be a tutorial on machine learning or algorithms. Detailed documentation about H2O's and the Python API is available at http://docs.h2o.ai. ## Install H2O in Python ### Prerequisites This tutorial assumes you have Python 2.7 installed. The `h2o` Python package has a few dependencies which can be installed using [pip](http://pip.readthedocs.org/en/stable/installing/). The packages that are required are (which also have their own dependencies): ```bash pip install requests pip install tabulate pip install scikit-learn ``` If you have any problems (for example, installing the `scikit-learn` package), check out [this page](https://github.com/h2oai/h2o-3/blob/master/h2o-docs/src/product/howto/FAQ.md#python) for tips. ### Install h2o Once the dependencies are installed, you can install H2O. We will use the latest stable version of the `h2o` package, which is called "Tibshirani-3." The installation instructions are on the "Install in Python" tab on [this page](http://h2o-release.s3.amazonaws.com/h2o/rel-tibshirani/3/index.html). ```bash # The following command removes the H2O module for Python (if it already exists). pip uninstall h2o # Next, use pip to install this version of the H2O Python module. pip install http://h2o-release.s3.amazonaws.com/h2o/rel-tibshirani/3/Python/h2o-3.6.0.3-py2.py3-none-any.whl ``` ## Start up an H2O cluster In a Python terminal, we can import the `h2o` package and start up an H2O cluster. ``` import h2o # Start an H2O Cluster on your local machine h2o.init() ``` If you already have an H2O cluster running that you'd like to connect to (for example, in a multi-node Hadoop environment), then you can specify the IP and port of that cluster as follows: ``` # This will not actually do anything since it's a fake IP address # h2o.init(ip="123.45.67.89", port=54321) ``` ## Download Data The following code downloads a copy of the [Wisconsin Diagnostic Breast Cancer dataset](https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29). We can import the data directly into H2O using the Python API. ``` csv_url = "https://h2o-public-test-data.s3.amazonaws.com/smalldata/wisc/wisc-diag-breast-cancer-shuffled.csv" data = h2o.import_file(csv_url) ``` ## Explore Data Once we have loaded the data, let's take a quick look. First the dimension of the frame: ``` data.shape ``` Now let's take a look at the top of the frame: ``` data.head() ``` The first two columns contain an ID and the resposne. The "diagnosis" column is the response. Let's take a look at the column names. The data contains derived features from the medical images of the tumors. ``` data.columns ``` To select a subset of the columns to look at, typical Pandas indexing applies: ``` columns = ["id", "diagnosis", "area_mean"] data[columns].head() ``` Now let's select a single column, for example -- the response column, and look at the data more closely: ``` data['diagnosis'] ``` It looks like a binary response, but let's validate that assumption: ``` data['diagnosis'].unique() data['diagnosis'].nlevels() ``` We can query the categorical "levels" as well ('B' and 'M' stand for "Benign" and "Malignant" diagnosis): ``` data['diagnosis'].levels() ``` Since "diagnosis" column is the response we would like to predict, we may want to check if there are any missing values, so let's look for NAs. To figure out which, if any, values are missing, we can use the `isna` method on the diagnosis column. The columns in an H2O Frame are also H2O Frames themselves, so all the methods that apply to a Frame also apply to a single column. ``` data.isna() data['diagnosis'].isna() ``` The `isna` method doesn't directly answer the question, "Does the diagnosis column contain any NAs?", rather it returns a 0 if that cell is not missing (Is NA? FALSE == 0) and a 1 if it is missing (Is NA? TRUE == 1). So if there are no missing values, then summing over the whole column should produce a summand equal to 0.0. Let's take a look: ``` data['diagnosis'].isna().sum() ``` Great, no missing labels. Out of curiosity, let's see if there is any missing data in this frame: ``` data.isna().sum() ``` The next thing I may wonder about in a binary classification problem is the distribution of the response in the training data. Is one of the two outcomes under-represented in the training set? Many real datasets have what's called an "imbalanace" problem, where one of the classes has far fewer training examples than the other class. Let's take a look at the distribution, both visually and numerically. ``` # TO DO: Insert a bar chart or something showing the proportion of M to B in the response. data['diagnosis'].table() ``` Ok, the data is not exactly evenly distributed between the two classes -- there are almost twice as many Benign samples as there are Malicious samples. However, this level of imbalance shouldn't be much of an issue for the machine learning algos. (We will revisit this later in the modeling section below). ``` n = data.shape[0] # Total number of training samples data['diagnosis'].table()['Count']/n ``` ## Machine Learning in H2O We will do a quick demo of the H2O software -- trying to predict malignant tumors using various machine learning algorithms. ### Specify the predictor set and response The response, `y`, is the 'diagnosis' column, and the predictors, `x`, are all the columns aside from the first two columns ('id' and 'diagnosis'). ``` y = 'diagnosis' x = data.columns del x[0:1] x ``` ### Split H2O Frame into a train and test set ``` train, test = data.split_frame(ratios=[0.75], seed=1) train.shape test.shape ``` ### Train and Test a GBM model ``` # Import H2O GBM: from h2o.estimators.gbm import H2OGradientBoostingEstimator ``` We first create a `model` object of class, `"H2OGradientBoostingEstimator"`. This does not actually do any training, it just sets the model up for training by specifying model parameters. ``` model = H2OGradientBoostingEstimator(distribution='bernoulli', ntrees=100, max_depth=4, learn_rate=0.1) ``` The `model` object, like all H2O estimator objects, has a `train` method, which will actually perform model training. At this step we specify the training and (optionally) a validation set, along with the response and predictor variables. ``` model.train(x=x, y=y, training_frame=train, validation_frame=test) ``` ### Inspect Model The type of results shown when you print a model, are determined by the following: - Model class of the estimator (e.g. GBM, RF, GLM, DL) - The type of machine learning problem (e.g. binary classification, multiclass classification, regression) - The data you specify (e.g. `training_frame` only, `training_frame` and `validation_frame`, or `training_frame` and `nfolds`) Below, we see a GBM Model Summary, as well as training and validation metrics since we supplied a `validation_frame`. Since this a binary classification task, we are shown the relevant performance metrics, which inclues: MSE, R^2, LogLoss, AUC and Gini. Also, we are shown a Confusion Matrix, where the threshold for classification is chosen automatically (by H2O) as the threshold which maximizes the F1 score. The scoring history is also printed, which shows the performance metrics over some increment such as "number of trees" in the case of GBM and RF. Lastly, for tree-based methods (GBM and RF), we also print variable importance. ``` print(model) ``` ### Model Performance on a Test Set Once a model has been trained, you can also use it to make predictions on a test set. In the case above, we passed the test set as the `validation_frame` in training, so we have technically already created test set predictions and performance. However, when performing model selection over a variety of model parameters, it is common for users to break their dataset into three pieces: Training, Validation and Test. After training a variety of models using different parameters (and evaluating them on a validation set), the user may choose a single model and then evaluate model performance on a separate test set. This is when the `model_performance` method, shown below, is most useful. ``` perf = model.model_performance(test) perf.auc() ``` ### Cross-validated Performance To perform k-fold cross-validation, you use the same code as above, but you specify `nfolds` as an integer greater than 1, or add a "fold_column" to your H2O Frame which indicates a fold ID for each row. Unless you have a specific reason to manually assign the observations to folds, you will find it easiest to simply use the `nfolds` argument. When performing cross-validation, you can still pass a `validation_frame`, but you can also choose to use the original dataset that contains all the rows. We will cross-validate a model below using the original H2O Frame which we call `data`. ``` cvmodel = H2OGradientBoostingEstimator(distribution='bernoulli', ntrees=100, max_depth=4, learn_rate=0.1, nfolds=5) cvmodel.train(x=x, y=y, training_frame=data) ``` ### Grid Search One way of evaluting models with different parameters is to perform a grid search over a set of parameter values. For example, in GBM, here are three model parameters that may be useful to search over: - `ntrees`: Number of trees - `max_depth`: Maximum depth of a tree - `learn_rate`: Learning rate in the GBM We will define a grid as follows: ``` ntrees_opt = [5,50,100] max_depth_opt = [2,3,5] learn_rate_opt = [0.1,0.2] hyper_params = {'ntrees': ntrees_opt, 'max_depth': max_depth_opt, 'learn_rate': learn_rate_opt} ``` Define an `"H2OGridSearch"` object by specifying the algorithm (GBM) and the hyper parameters: ``` from h2o.grid.grid_search import H2OGridSearch gs = H2OGridSearch(H2OGradientBoostingEstimator, hyper_params = hyper_params) ``` An `"H2OGridSearch"` object also has a `train` method, which is used to train all the models in the grid. ``` gs.train(x=x, y=y, training_frame=train, validation_frame=test) ``` ### Compare Models ``` print(gs) # print out the auc for all of the models for g in gs: print(g.model_id + " auc: " + str(g.auc())) #TO DO: Compare grid search models ```
github_jupyter
## Data Science Homework __Objective__: Develop a predictive model based on the provided Order and Online customer behavior data (data.zip). The analysis can be done in R or Python and should be presented in an R-Studio Notebook or Jupyter Notebook. The assignment should produce a multi-class classification supervised learning model. It is up to you to design the analysis and provide a rationale of chosen approach. Feel free to use any open source tools or functions (out of the box or your own) that will facilitate the analysis. In your workflow, please touch on each of the following areas: 1) Exploration and understanding of the data sets 2) Feature engineering 3) Feature selection 4) Model design and sampling 5) Model generation 6) Model evaluation 7) Summary of results: 2-3 paragraphs textual summary It is not necessary to produce a highly predictive model, but, rather, to illustrate your understanding and practical knowledge of the model building process. ### Data Sets Table order.csv 263278 obs. of 6 variables: |Columns|Data|Column Description| |----|----|----| |custno | int 18944 18944 18944 36096 1 6401 25601 57601 2 2 ... |Customer number | |ordno | int 64694 28906 114405 62681 1 8187 41198 112311 70848 2 ... |Order number | |orderdate| POSIXct, format: "2016-11-27 20:57:20" "2017-04-23 21:31:03" |Order date | |prodcat2 | int NA NA NA NA NA NA NA NA NA NA ... |Product category -detail | |prodcat1 | int 1 1 1 1 1 1 1 1 1 1 ... |Product category | |revenue | num 76.4 130.7 139.2 72.5 100.2 ... |Revenue | Table: online.csv 954774 obs. of 7 variables: |Columns|Data|Column Description| |----|----|----| |session| int 419542 3030130 2638740 880408 2612179 880953 418956 281663 26191 1363670 ... |online session key| |visitor| int 140970 14501 419353 90673 191542 419268 14938 419163 419163 14464 ... |Online visitor key| |dt| POSIXct, format: "2016-09-16 05:03:23" ... |Online activity date| |custno| int 3840 70400 21248 39168 47616 47616 47872 49920 49920 54784 ... |Customer number| |category| int 1 1 1 1 1 1 1 1 1 1 ... |Online browsing category| |event1 | int NA NA NA NA NA NA NA NA NA NA ... |Online event 1| |event2 | int 1 1 1 1 1 1 1 1 1 1 ... |Online event 2| ___ #### 1. Exploratory Analysis The [load_database.ipynb notebook](load_database.ipynb) import CSV files into MySQL database on my localhost. The [exploratory_analysis.ipynb notebook](exploratory_analysis.ipynb) explores the given dataset features prior to feature engineering. Summary statistics, charts and time series analysis are included in the notebook. The main patterns in the dataset include: * Revenue is approximately uniformly distirbuted across purchase orders. * Categorical variables (prodcat1, prodcat2, event1, event2) have highly unbalanced distirbution. * From the __Order__ table, we learn that revenue and order counts (aggregated by day) follow regular seasonality pattern. The low point occurs in October and November, which is followed by the Christmas shopping spree season (from year 2015-12 to 2018-12). * From the __Online__ table, we learn that online activity (aggregated by day) correlates with the revenue, and the three categories of activity follow suite. Furthermore, label 3~9 in event2 column follow the same seasonality pattern, where as label 1, 2, and 10 do not. * In contrast, revenue per order do not exhibit seasonality pattern. <p align="center"> <img src="figs/seasonality.png" width="1000"> </p> ___ #### 2. Feature Engineering Feature engineering was performed in MySQL engine. The CSV files are loaded into MySQL database in the [load_database.ipynb notebook](load_dataset.ipynb). Indices are created on the *custno* columns. Because the goal is to predict __event2__ in the *Online* table, each row in the *Online* table gives a label, and our classifier must only use information earlier than a given row to make prediction. Four tables are generated using the following scripts, resulting in __50__ features. Among the 50 features, 7 features are categorical features. The categorical features are one-hot encoded. ``` mysql < feature_group_1.sql -uprivateuser -p mysql < feature_group_2.sql -uprivateuser -p mysql < feature_group_3.sql -uprivateuser -p mysql < feature_group_4.sql -uprivateuser -p ``` [Group 1 (17 features)](feature_group_1.sql) features aggregates transaction data prior to an online event. For example, when user becomes active online (registering a row in the *Online* table), we ask the following questions: how much money has the user spent in the past, what is the biggest order and smallest order he ever placed, what is the revenue per order, and what are the product categories he purchased in the past? [Group 2 (24 features)](feature_group_2.sql) features join the *Online* table against itself. Among those samples features include the number of category 1, 2 and 3 event in the past, number of event 1 and event 2 in the past, and the number of missing values. [Group 3 (3 features)](feature_group_3.sql) features ask about the latest record: what category, event1 and event2 were last recorded? [Group 4 (3 features)](feature_group_4.sql) features ask about the latest transaction: how much did the user spend in the last order, and what product categories did he buy? The full feature set is loaded in the feature_engineering [notebook](feature_engineering.ipynb). Most features follows exponential distribution, only revenue-per-order is normally distributed. For such observation, neural net and linear model that assume normal distribution will not work well. The dataset is shuffled and split into 90% training set, 5% hold-out validation set (development set) and 5% test set. The original index of the three sets are stored in three separate tables. When loading a particular set, the index is simply joined with the feature tables. The dataset handling is implemented in the [helpers.py](helpers.py) module. <p align="center"> <img src="figs/features.png" width="1000"> </p> ___ #### 3. Features & Model Selection Three candidate models are trained on a 100k training set and a 10k dev set. Among the three models, gradient boosting machine performs best, without tuning hyper-parameters. The 10-class accuracy score and feature importance ranking are summarized below. All features are included in the GBM model, because it is robust in high dimension. |Model|Accuracy|Notebook| |----|----|-------------| |Baseline| 0.2585 || |Stochastic Gradient Descent | 0.2065 |[Link](sgd_basic.ipynb) | |Random Forests | 0.2983 |[Link](random_forest_basic.ipynb) | |Gradient Boosting Machine | 0.387 |[Link](gbm_basic.ipynb) | <p align="center"> <img src="figs/features_ranking.png" width="1000"> </p> ___ #### 4. Model Training & Evaluation GBM takes a long time to train. The [GBM model](gbm_benchmark_2.ipynb) is trained on the original features in the CSV files only, and yields an accuracy score of 0.3905 on the full training set. The second bench mark is trained on the 43 numeric features, and boosts the accuracy to 0.4354. The confusion matrix shows When the one-hot encoding of categorical features are included, the dimension increases from 43 to __276__. The [basic version](gbm_benchmark_1.ipynb) of GBM, which is trained on 100k data points, shows that the inclusion of categorical features boosts 10-class accuracy from 0.3607 to 0.387. If the model is trained on the full dataset with categorical features, similar boost is expected. However, the increase of dimension also slows down the training process. With 273 features, it takes over an hour to train the GBM on 100k data points on a CPU machine. If trained on the full 950k data points, training could take days. Due to the time constraint, no further hyperparameter tuning is attempted. The confusion matrix shows that class 1 and class 7 are the dominant classes. Precision is well-balanced across 10 classes. Precisions for class 1 and 9 are particularly high. Where as recall exhibit greater variability. Class 9 and 10 have the highest recalls. The model is particularly good at predicting class 9. <p align="center"> <img src="figs/confusion_matrix.png" width="1000"> </p> __10-class precision__ <p align="center"> <img src="figs/precision.png" width="200"> </p> __10-class recall__ <p align="center"> <img src="figs/recall.png" width="180"> </p> ___ #### 5. Conclusion The input tables are event logs. The information we can use to predict an event is limited to what is available prior to the event timestamp. For each row in the *Online*, we have a label *event2*, which takes on one of 10 classes. The classes are highly unbalanced, with class 7 dominating (38.5%). Feature engineering was performed in MySQL engine, which offers the greatest degree of flexibility and efficiency. A total of 50 features are generated, 7 of which are categorical. After one-hot encoding, the feature diension is expanded to 273. Upon closer inspection, most features follow exponential distribution. The only exceptions are revenue-per-order (normal distribution) and last revenue (uniform distribution). Furthermore, strong seasonality pattern was observed. So it is necessary to generate a *month* feature, which has 12 cardinality, to capture the seasonality effect. Three models were tried on a subsample of the dataset. Unsurprisingly, stochastic gradient descent classifier performed worst, since it relies on the assumption of normal distribution. Random forest barely beats the naive baseline. Gradient boosting machine performed the best. Without one-hot encoding of categorical features, GBM improving the 10-class accuracy from 0.385 to 0.4354. Where as precision is evenly distirbuted across 10 class (ranging between 0.4 and 0.6), recall is more skewed (between 0.08 and 0.87). Class 9 has the best precision (0.685). Class 7 has the best recall (0.872). Ablative analysis shows that the newly engineered features are responsible for the improvement in accuracy from 0.385 to 0.4354. Further improvement is expected if categorical features are included. Unfortunately, this will take too long to train, given the time constraint of the assignment.
github_jupyter
## Exercise 3.16 Setting the beta hyper-parameters II (Source: Draper.) Suppose $θ ∼ β(α_1,α_2)$ and we believe that $\mathbb{E}[θ] = m$ and $p(\mathcal{l} < θ < u) = 0.95$. Write a program that can solve for $α_1$ and $α_2$ in terms of $m$, $\mathcal{l}$ and $u$. Hint: write $α_2$ as a function of $α_1$ and $m$, so the pdf only has one unknown; then write down the probability mass contained in the interval as an integral, and minimize its squared discrepancy from 0.95. What values do you get if $m = 0.15$, $\mathcal{l} = 0.05$ and $u = 0.3$? What is the equivalent sample size of this prior? (NOTE: this is a strange way of phrasing things. I think it means that if we started with an uninformative prior, what sample size would result in this distribution.) $$ \alpha_2 = \frac{\alpha_1(1-m)}{m} $$ \begin{aligned} \int_l^u\beta(\theta, \alpha_1,\alpha_2)d\theta & = \int_l^u\frac{\theta^{\,\alpha_1-1}(1-\theta)^{\frac{\alpha_1}{m}-(\alpha_1-1)}}{B(\alpha_1, \frac{1-m}{m}\alpha_1)}d\theta = 0.95 \end{aligned} We will minimize the squared difference between the probability mass in the interval $[l, u]$ and 0.95 to arrive at the desired result: i.e. $\min \left(\int_l^up(\theta)d\theta-0.95\right)^2$ ``` import matplotlib.pyplot as plt import numpy as np from scipy import integrate from scipy.special import beta import seaborn as sns sns.set(font_scale=1.2) def alpha_2(alpha_1, m): return alpha_1*(1-m)/m def _beta(alpha_1, m): return beta(alpha_1, alpha_2(alpha_1, m)) def expr(theta, alpha_1, m): return theta**(alpha_1-1)*(1-theta)**(alpha_2(alpha_1, m)-1) def integrand(theta, alpha_1, m): return expr(theta, alpha_1, m) / _beta(alpha_1, m) def func_val(alpha_1, m, l, u): integral = (integrate.quad(integrand, l, u, args=(alpha_1, m))) final = (integral[0]-0.95)**2 return final alpha_1 = 0.3 m = 0.15 l = 0.05 u = 0.3 func_val(alpha_1, m, l, u) ``` ### Visualization ``` x = np.linspace(1/10000, 10, 10000) y = np.zeros(x.shape) for count, alpha in enumerate(x): y[count] = func_val(alpha, m, l, u) f, ax = plt.subplots(figsize=(8, 6)) ax.plot(x, y) ax.set_xlabel('alpha') ax.set_ylabel('objective function') ax.set_title('Objective function visualization'); ``` ### Optimization Perform the optimization of our constrained objective function. Constraint is that $\alpha_1 > 0$. ``` from scipy.optimize import minimize opt_result = minimize( func_val, x0 = 0.25, args=(m, l, u), method='L-BFGS-B', jac=None, bounds=((0, None),), tol=None, callback=None, options={ 'disp': None, 'maxls': 20, 'iprint': -1, 'gtol': 1e-05, 'eps': 1e-08, 'maxiter': 15000, 'ftol': 2.220446049250313e-09, 'maxcor': 10, 'maxfun': 15000} ) alpha1_opt = opt_result.x opt_value = func_val(alpha1_opt, m, l, u) print(f'Optimum argument: {alpha_opt}, optimum value: {opt_value}') ``` ### Visualization near optimum ``` x = np.linspace(opt_result.x * 0.75, opt_result.x * 1.25, 1000) y = np.zeros(x.shape) for count, element in enumerate(x): y[count] = func_val(element, m, l, u) f, ax = plt.subplots(figsize=(8, 6)) ax.plot(x, y) ax.set_xlabel('alpha_1') ax.set_ylabel('Objective function') ax.set_title('Objective function near optimal value'); ``` ### Calculate parameters ``` alpha2_opt = alpha_2(alpha1_opt, m) print(f'Optimal values of alpha_1 = {alpha1_opt}, alpha_2={alpha2_opt}') ``` This is approximately equivalent to the Beta prior corresponding to a sample size $N_1=\alpha_1$, $N=N_1+\alpha_2$ which gives $N_1 = 4$, $N = 29$ rounding to integers.
github_jupyter
# NSWClassifier ## Importing Packages ``` from mlots.models import NSWClassifier from sklearn.model_selection import GridSearchCV from scipy.io import arff import matplotlib.pyplot as plt import pandas as pd import numpy as np import warnings from sklearn.metrics import accuracy_score warnings.filterwarnings("ignore") import matplotlib %matplotlib inline font = {'size' : 22} matplotlib.rc('font', **font) ``` ## Loading Data Here we are loading the ``SyntheticControl`` dataset. The datasets are in two ``.arff`` files with pre-defined train and test splits. The following code reads the two files stores the ``X`` (time-series data) and ``y`` (labels), into their specific train and test sets. *** ``` name = "SyntheticControl" dataset = arff.loadarff(f'../input/{name}/{name}_TRAIN.arff'.format(name=name))[0] X_train = np.array(dataset.tolist(), dtype=np.float32) y_train = X_train[: , -1] X_train = X_train[:, :-1] dataset = arff.loadarff(f'../input/{name}/{name}_TEST.arff'.format(name=name))[0] X_test = np.array(dataset.tolist(), dtype=np.float32) y_test = X_test[: , -1] X_test = X_test[:, :-1] #Converting target from bytes to integer y_train = [int.from_bytes(el, "little") for el in y_train] y_test = [int.from_bytes(el, "little") for el in y_test] X_train.shape, X_test.shape ``` |Set|Sample size|TS length| |:---|:----------|:-------| |Train|300|60| |Test|300|60| ## Evaluating NSW ### Default parameters We would employ ``NSWClassifier`` model from the ``mlots`` python package. First, the model is evaluated with default parameters over the ``SyntheticControl`` dataset. *** ``` model_default = NSWClassifier(random_seed=42).fit(X_train,y_train) y_hat_default = model_default.predict(X_test) acc_default = accuracy_score(y_test, y_hat_default) print("Model accuracy with default parameters: ", round(acc_default, 2)) ``` The accuracy of the model is **18%**, which is really poor; worse than random guessing. This is due to the default parameters which makes NSW network not enough dense. ### Model tuning ``NSWClassifier`` model allows us to work with a more complex distance measure like ``DTW``. Here, we would use ``GridSearchCV`` algorithm from the ``sklearn`` package to find the best set of parameters of the model over the dataset. The model tuning would be done **only** over the ``train`` set of the dataset. *** ``` #Setting up the warping window grid of the DTW measure dtw_params = [] for w_win in range(11,15,2): dtw_params.append( { "global_constraint": "sakoe_chiba", "sakoe_chiba_radius": w_win } ) dtw_params #Setting up the param grid for the NSWClassifier model with the DTW params #We would be doing a narrow grid-search for the demo. However, one could widen the parameters for best results. param_grid = { "f": [1,5], "m": np.arange(17,20,2), "k": [1,3], "metric_params" : dtw_params } param_grid #Executing the GridSearchCV over the NSWClassifier model with the supplied param_grid. model = NSWClassifier(metric="dtw",random_seed=42) gscv = GridSearchCV(model, param_grid=param_grid, cv=5, scoring="accuracy", n_jobs=-1).fit(X_train,y_train) #Displaying the best parameters of NSWClassifier within the search grid. best_param = gscv.best_params_ best_score = gscv.best_score_ print("Best Parameters: ", best_param) print("Best Accuracy: ", best_score) ``` ### Evaluation of tuned model The parameters displayed above are optimal set of parameters for the ``NSWClassifier`` model over ``SyntheticControl`` dataset. Our next task is then to train the ``NSWClassifier`` model over the ``train`` set with the optimal set of parameters, and evaluate the model over the held-out ``test`` set. *** ``` model_tuned = NSWClassifier(**best_param,metric="dtw",random_seed=42).fit(X_train,y_train) y_hat_tuned = model_tuned.predict(X_test) acc_tuned = accuracy_score(y_test, y_hat_tuned) print("Model accuracy with tuned parameters: ", round(acc_tuned, 2)) ``` By tuning the parameters of the model we increased the accuracy of the model from ~$18\%$ to $89\%$. ## Comparison Here we do bar-plot that would illustrate the performance of the ``NSWClassifier`` model with **default** parameters against the model with the **tuned** parameters. The ``matplotlib.pyplot``is employed for this task. *** ``` acc = [acc_default*100,acc_tuned*100] models = ["NSWClassifier-Default", "NSWClassifier-Tuned"] df = pd.DataFrame({"models": models, "Accuracy":acc}) fig = plt.figure() ax = df['Accuracy'].plot(kind="bar", figsize=(12, 8), alpha=0.7, color=[ 'skyblue' ], label = "Accuracy") ax.set_xticklabels(df['models']) ax.set_ylabel("Accuracy (%)") ax.set_ylim(0,100) plt.setp(ax.xaxis.get_majorticklabels(), rotation=0) for i,a in enumerate(acc): ax.text(i,a,str(round(a,3))+"%") plt.text plt.title("Model Performance") plt.show() ```
github_jupyter
# Figure showing constraints on the $S_v$ - $L$ correlation This notebook reproduces Figure 2 on the $S_v$ - $L$ correlation constraints from _How well do we know the neutron-matter equation of state at the densities inside neutron stars? A Bayesian approach with correlated uncertainties_ by [BUQEYE](https://buqeye.github.io/) members Christian Drischler, Dick Furnstahl, Jordan Melendez, and Daniel Phillips [[arXiv:2004.07232]](https://arxiv.org/abs/2004.07232). It can be easily updated and extended. #### Table of contents 1. [Setting the boundaries of constraint regions](#setting_boundaries) 1.1 [Using splines for upper and lower boundaries of experimental regions](#splines_up_low_boundaries) 1.2 [Theoretical regions](#theoretical_regions) 2. [Unitary gas boundaries](#UG_boundaries) 3. [Confidence ellipse from calculations in the paper](#confidence_ellipse) 4. [Generate the incremental and full figures](#incremental_plots) 5. [Create an animated gif showing constraints as overlays](#animation) The allowed region derived in that paper from χEFT calculations of infinite matter are shown as the yellow ellipses (dark: 1σ, light: 2σ) and denoted “GP-B” (Gaussian Process–BUQEYE collaboration). The option is given below to choose either the $\Lambda = 500\,$MeV or $\Lambda = 450\,$MeV NN and 3N potentials. Experimental and other theoretical constraints were compiled by [Jim Lattimer](http://www.astro.sunysb.edu/lattimer/) and collaborators: * J. M. Lattimer and Y. Lim, Astrophys. J. **771**, 51 (2013), [arXiv:1203.4286](https://arxiv.org/abs/1203.4286). * J. M. Lattimer and A. W. Steiner, Eur. Phys. J. A **50**, 40 (2014), [arXiv:1403.1186](https://arxiv.org/abs/1403.1186). * I. Tews, J. M. Lattimer, A. Ohnishi, and E. E. Kolomeitsev, Astrophys. J. **848**, 105 (2017), [arXiv:1611.07133](https://arxiv.org/abs/1611.07133). If you use a version of the figure created from this notebook, please cite the notebook as follows: > C. Drischler, R. Furnstahl, J. Melendez, and D. Phillips (2020) [arXiv:2004.07232](https://arxiv.org/abs/2004.07232). ``` %matplotlib inline # Standard Python imports import os import numpy as np from scipy.interpolate import CubicSpline, interp1d import pandas as pd import matplotlib.pyplot as plt import matplotlib as mpl from matplotlib.ticker import MultipleLocator, AutoMinorLocator import seaborn as sns def setup_rc_params(presentation=False): """ Set up the formatting details for the figure. """ if presentation: fontsize = 11 else: fontsize = 9 black = 'k' mpl.rcdefaults() # Set to defaults mpl.rc('text', usetex=True) mpl.rcParams['font.size'] = fontsize mpl.rcParams['text.usetex'] = True mpl.rcParams['font.family'] = 'serif' mpl.rcParams['axes.labelsize'] = fontsize mpl.rcParams['axes.edgecolor'] = black # mpl.rcParams['axes.xmargin'] = 0 mpl.rcParams['axes.labelcolor'] = black mpl.rcParams['axes.titlesize'] = fontsize mpl.rcParams['ytick.direction'] = 'in' mpl.rcParams['xtick.direction'] = 'in' mpl.rcParams['xtick.labelsize'] = fontsize mpl.rcParams['ytick.labelsize'] = fontsize mpl.rcParams['xtick.color'] = black mpl.rcParams['ytick.color'] = black # Make the ticks thin enough to not be visible at the limits of the plot (over the axes border) mpl.rcParams['xtick.major.width'] = mpl.rcParams['axes.linewidth'] * 0.95 mpl.rcParams['ytick.major.width'] = mpl.rcParams['axes.linewidth'] * 0.95 # The minor ticks are little too small, make them both bigger. mpl.rcParams['xtick.minor.size'] = 2.4 # Default 2.0 mpl.rcParams['ytick.minor.size'] = 2.4 mpl.rcParams['xtick.major.size'] = 3.9 # Default 3.5 mpl.rcParams['ytick.major.size'] = 3.9 ppi = 72 # points per inch # dpi = 150 mpl.rcParams['figure.titlesize'] = fontsize mpl.rcParams['figure.dpi'] = 150 # To show up reasonably in notebooks mpl.rcParams['figure.constrained_layout.use'] = True # 0.02 and 3 points are the defaults: # can be changed on a plot-by-plot basis using fig.set_constrained_layout_pads() mpl.rcParams['figure.constrained_layout.wspace'] = 0.0 mpl.rcParams['figure.constrained_layout.hspace'] = 0.0 mpl.rcParams['figure.constrained_layout.h_pad'] = 3. / ppi # 3 points mpl.rcParams['figure.constrained_layout.w_pad'] = 3. / ppi mpl.rcParams['legend.title_fontsize'] = fontsize mpl.rcParams['legend.fontsize'] = fontsize mpl.rcParams['legend.edgecolor'] = 'inherit' # inherits from axes.edgecolor, to match mpl.rcParams['legend.facecolor'] = (1, 1, 1, 0.6) # Set facecolor with its own alpha, so edgecolor is unaffected mpl.rcParams['legend.fancybox'] = True mpl.rcParams['legend.borderaxespad'] = 0.8 mpl.rcParams['legend.framealpha'] = None # Do not set overall alpha (affects edgecolor). Handled by facecolor above mpl.rcParams['patch.linewidth'] = 0.8 # This is for legend edgewidth, since it does not have its own option mpl.rcParams['hatch.linewidth'] = 0.6 # bbox = 'tight' can distort the figure size when saved (that's its purpose). # mpl.rc('savefig', transparent=False, bbox='tight', pad_inches=0.04, dpi=350, format='png') mpl.rc('savefig', transparent=False, bbox=None, dpi=400, format='png') setup_rc_params() ``` Pick a color palette below. The plot should change accordingly. ``` black = 'k' # This is always the same regardless of palette choice yellow = 'yellow' # Flat palette flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"] [purple, blue, grey, red, darkblue, green] = flatui orange = '#f39c12' # Or pick 'deep', 'muted', etc. from seaborn's palettes # https://seaborn.pydata.org/tutorial/color_palettes.html # [darkblue, orange, green, red, purple, brown, pink, grey, tan, blue] = sns.color_palette('deep') # Another option: # purple = 'magenta'; blue = 'cyan'; grey = '0.5'; red = 'red'; darkblue = 'b'; green = 'green' # orange = 'orange' ``` ## Setting the boundaries of constraint regions <a id="setting_boundaries"></a> ### Using splines for upper and lower boundaries of experimental regions <a id="splines_up_low_boundaries"></a> Data for constraint regions for some of the experiments in the figure are stored in the data subdirectory in two csv files with pairs of Esym and L values on each line, one file for the upper boundary of the region and one for the lower boundary. For masses there is a single data file, which is read in and separated by hand into the boundaries. To add another constraint region, just follow the pattern for existing regions below. ``` # sampling points Esym = np.linspace(25, 36, 401) # Use many points for interpolating expt region # The information and spline boundaries are accumulated in avail_srcs array. avail_srcs = [] # Heavy-ion collisions (HIC) data_HIC_upper = pd.read_csv("./data/HIC_upper.csv",names=['Esym', 'L']) data_HIC_lower = pd.read_csv("./data/HIC_lower.csv",names=['Esym', 'L']) spline_HIC_upper = CubicSpline(data_HIC_upper["Esym"], data_HIC_upper["L"]) spline_HIC_lower = CubicSpline(data_HIC_lower["Esym"], data_HIC_lower["L"]) avail_srcs.append({ "label" : "HIC", "facecolor" : green, 'spline_lower': spline_HIC_lower, 'spline_upper': spline_HIC_upper, 'Esym': Esym, 'label_x': 0.8, 'label_y': 0.93, 'ha': 'right', 'va': 'top', 'label_color': 'k', # 'reference': 'HIC: Tsang, M. B., Zhang, Y., Danielewicz, P., et al., PRL, 102, 122701', 'reference': 'Tsang \\textit{et al.},\nPRL \\textbf{102}, 122701 (2009)', }) # Sn neutron skin data_Sn_upper = pd.read_csv("./data/Sn_skin_upper.csv", names=['Esym', 'L']) data_Sn_lower = pd.read_csv("./data/Sn_skin_lower.csv", names=['Esym', 'L']) spline_Sn_upper = CubicSpline(data_Sn_upper["Esym"], data_Sn_upper["L"]) spline_Sn_lower = CubicSpline(data_Sn_lower["Esym"], data_Sn_lower["L"]) avail_srcs.append({ "label" : "Sn Neutron Skin", "facecolor" : blue, 'spline_lower': spline_Sn_lower, 'spline_upper': spline_Sn_upper, 'Esym': Esym, 'alpha': 0.8, 'label_x': 0.67, 'label_y': 0.15, 'ha': 'center', 'va': 'center', 'label_color': 'k', 'rotation': -40.8, # 'reference': 'Sn skin: Chen, L.-W., Ko, C. M., Li, B.-A., Xu, J., PRC, 82, 024321', 'reference': 'Chen \\textit{et al.},\nPRC \\textbf{82}, 024321 (2010)', }) # GDR data_GDR_upper = pd.read_csv("./data/GDR_upper.csv", names=['Esym', 'L']) data_GDR_lower = pd.read_csv("./data/GDR_lower.csv", names=['Esym', 'L']) spline_GDR_upper = CubicSpline(data_GDR_upper["Esym"], data_GDR_upper["L"]) spline_GDR_lower = CubicSpline(data_GDR_lower["Esym"], data_GDR_lower["L"]) avail_srcs.append({ "label" : "GDR", "facecolor" : red, 'spline_lower': spline_GDR_lower, 'spline_upper': spline_GDR_upper, 'Esym': Esym, 'label_x': 0.135, 'label_y': 0.02, 'ha': 'left', 'va': 'bottom', 'label_color': 'k', # 'reference': 'GDR: Trippa, L., Colo, G., Vigezzi, E., PRC, 77, 061304', 'reference': 'Trippa \\textit{et al.},\nPRC \\textbf{77}, 061304 (2008)', }) # Pb dipole data_Pb_upper = pd.read_csv("./data/Pb_dipole_upper.csv", names=['Esym', 'L']) data_Pb_lower = pd.read_csv("./data/Pb_dipole_lower.csv", names=['Esym', 'L']) spline_Pb_upper = CubicSpline(data_Pb_upper["Esym"], data_Pb_upper["L"]) spline_Pb_lower = CubicSpline(data_Pb_lower["Esym"], data_Pb_lower["L"]) pb_dipole_label = "Pb Dipole\nPolarizability" avail_srcs.append({ "label": pb_dipole_label, "facecolor" : black, 'spline_lower': spline_Pb_lower, 'spline_upper': spline_Pb_upper, 'Esym': Esym, 'hatch': 'xx', # 'alpha': 0.5, 'ha': 'center', 'va': 'center', 'label_color': 'k', # 'label_x': 0.05, 'label_y': 0.26, # 'bbox': True, 'label_x': 0.025, 'label_y': 0.235, 'bbox': True, 'rotation': 29, 'fontsize': 8, # 'reference': 'Pb dipole: X. Roca-Maza, M. Brenna, G. Colo, M. Centelles, X. Vinas et al., PRC 88, 024316' 'reference': 'Roca-Maza \\textit{et al.},\nPRC \\textbf{88}, 024316 (2013)' }) # masses # Here there is only one csv file and the upper and lower parts are identified "by hand" data_masses = pd.read_csv("./data/masses.csv",names=['Esym', 'L']) # Find the lower and upper parts manually, only used for experimental agreement region data_masses_upper = data_masses[34:-1].sort_values(by='Esym') data_masses_lower = data_masses[:34].sort_values(by='Esym') spline_data_masses_upper = CubicSpline(data_masses_upper["Esym"], data_masses_upper["L"], extrapolate=False) spline_data_masses_lower = CubicSpline(data_masses_lower["Esym"], data_masses_lower["L"], extrapolate=False) avail_srcs.append({ "label" : "Masses", "facecolor" : orange, 'Esym': data_masses['Esym'], 'L': data_masses['L'], 'label_x': 0.36, 'label_y': 0.208, 'ha': 'center', 'va': 'center', 'label_color': 'k', 'rotation': 60, 'use_spline': False, 'spline_upper': spline_data_masses_upper, 'spline_lower': spline_data_masses_lower, 'alpha': 1, # 'reference': 'Masses: Kortelainen,M., McDonnell, J., Nazarewicz, W. et al., PRC, 82, 024313 (2010)' 'reference': 'Kortelainen \\textit{et al.},\nPRC \\textbf{82}, 024313 (2010)' }) # IAS # data_IAS = pd.read_csv("./data/IAS.csv",names=['Esym', 'L']) data_IAS = pd.read_csv("./data/IAS_2017.csv",names=['Esym', 'L']) data_IAS = data_IAS[:-2] # Find the lower and upper parts manually, only used for experimental agreement region # data_IAS_upper = data_IAS[24:-2].sort_values(by='Esym') # data_IAS_lower = data_IAS[:24].sort_values(by='Esym') # spline_data_IAS_upper = interp1d( # data_IAS_upper["Esym"], data_IAS_upper["L"], bounds_error=False, fill_value=np.NaN) # spline_data_IAS_lower = interp1d( # data_IAS_lower["Esym"], data_IAS_lower["L"], bounds_error=False, fill_value=np.NaN) avail_srcs.append({ "label" : "IAS + $\Delta R$", "facecolor" : darkblue, 'Esym': data_IAS['Esym'], 'L': data_IAS['L'], 'label_x': 0.8, 'label_y': 0.67, 'ha': 'right', 'va': 'top', 'label_color': 'w', 'use_spline': False, 'rotation': 50, # 'reference': 'P. Danielewicz, P. Singh and J. Lee, Nucl. Phys. A958, 147 (2017)' 'reference': 'Danielewicz \\textit{et al.},\nNPA \\textbf{958}, 147 (2017)' # 'spline_upper': spline_data_IAS_upper, # 'spline_lower': spline_data_IAS_lower }) ``` ### Theoretical regions <a id="theoretical_regions"></a> ``` # Hebeler et al. data_H = pd.read_csv("./data/H.csv", names=['Esym', 'L']) avail_srcs.append({ "label" : "H", "facecolor" : purple, 'Esym': data_H['Esym'], 'L': data_H['L'], 'label_x': 0.53, 'label_y': 0.35, 'ha': 'right', 'va': 'top', 'label_color': 'k', 'use_spline': False, # 'reference': 'H: Hebeler, K., Lattimer, J. M., Pethick, C. J., Schwenk, A, PRL, 105, 161102' 'reference': 'Hebeler \\textit{et al.},\nPRL \\textbf{105}, 161102 (2010)' }) # Gandolfi et al. data_G = pd.read_csv("./data/G.csv", names=['Esym', 'L']) avail_srcs.append({ "label" : "G", "facecolor" : grey, 'Esym': data_G['Esym'], 'L': data_G['L'], 'label_x': 0.65, 'label_y': 0.4, 'ha': 'right', 'va': 'top', 'label_color': 'k', 'use_spline': False, # 'reference': 'G: Gandolfi, S., Carlson, J., Reddy, S., PRC, 85, 032801' 'reference': 'Gandolfi \\textit{et al.},\nPRC \\textbf{85}, 032801 (2012)' }) ``` ## Unitary gas boundaries <a id="UG_boundaries"></a> ``` n0 = 0.157 # fm**-3 hbarc = 197.3269718 # MeV fm Mn = 939.565379; # MeV EUG0 = 3./(10.*Mn) * np.cbrt(3.*np.pi**2*n0)**2*hbarc**2*0.365 # about 12.64 MeV E0 = -15.5 # MeV TewsEtAlSetting = { "EUG0" : EUG0, "E0" : E0, "Kn" : 270, "Qnlower" : -750., "Qnupper" : 0. } # Unitary gas limit def getUgConstraint( ut, EUG0, Kn, Qnlower, Qnupper, E0 ): taylorDiff = ut-1. Qn = np.where(ut < 1, Qnlower, Qnupper) Esym = EUG0/(3.*np.cbrt(ut)) * (ut+2.) + Kn/18. * taylorDiff**2 + Qn/81. * taylorDiff**3 - E0 L = 2.*EUG0/(np.cbrt(ut)) - Kn/3. * taylorDiff - Qn/18. * taylorDiff**2 return Esym, L def getUgAnalyticConstraint( L, EUG0, E0 ): return L/6. * ( 1. + 2. * (2.* EUG0 / L)**(3/2) ) - E0 # analytic unitary gas limit # Uncomment the following lines to plot the UG constraint #fig, ax = plt.subplots(figsize=(3.5, 5)) #ut=np.linspace(0.001,2,100) #Esym_tews, L_tews = getUgConstraint( ut, **TewsEtAlSetting) #ax.set_xlim(24, 40) #ax.set_ylim(0, 120) #plt.plot(Esym_tews, L_tews) #ax.xaxis.set_minor_locator(AutoMinorLocator(n=2)) #ax.xaxis.set_major_locator(MultipleLocator(1)) #ax.xaxis.set_ticks_position('both') #ax.yaxis.set_ticks_position('both') # now plot analytic constraint #L_grid =np.linspace(0.001,120) #plt.plot(getUgAnalyticConstraint(L_grid, EUG0, E0), L_grid) ``` Make sure that our interpolants correctly split the Masses and IAS into upper and lower parts: ``` fig, ax = plt.subplots() data_masses.plot('Esym', 'L', ax=ax, c='C0') data_IAS.plot('Esym', 'L', ax=ax, c='C0') ax.plot(Esym, spline_data_masses_upper(Esym), ls='--', c='orange') ax.plot(Esym, spline_data_masses_lower(Esym), ls='--', c='red') # ax.plot(Esym, spline_data_IAS_upper(Esym), ls='--', c='orange') # ax.plot(Esym, spline_data_IAS_lower(Esym), ls='--', c='red'); def find_agreement(Esym, labels, sources): lower_bound = - np.inf * np.ones(Esym.shape) upper_bound = + np.inf * np.ones(Esym.shape) for source in sources: if source['label'] in labels: if 'spline_lower' in source and 'spline_upper' in source: lower = source['spline_lower'](Esym) upper = source['spline_upper'](Esym) # lower = np.where(np.isnan(lower), +np.inf, lower) # upper = np.where(np.isnan(upper), -np.inf, upper) lower[np.isnan(lower)] = +np.inf upper[np.isnan(upper)] = -np.inf else: raise ValueError() lower_bound = np.where(lower > lower_bound, lower, lower_bound) upper_bound = np.where(upper < upper_bound, upper, upper_bound) return lower_bound, upper_bound expt_lower, expt_upper = find_agreement( Esym, ['HIC', "Sn Neutron Skin", "GDR", pb_dipole_label, 'Masses'], avail_srcs ) expt_mask = expt_lower < expt_upper def plot_source( ax, Esym, L=None, spline_lower=None, spline_upper=None, use_spline=True, label=None, label_x=0, label_y=0, ha=None, va=None, reference=None, label_color='k', rotation=None, bbox=None, fontsize=None, facecolor=None, edgecolor='k', ls='-', lw=0.9, hatch=None, zorder=None, **kwargs ): if not use_spline: ax.fill( Esym, L, edgecolor=edgecolor, ls=ls, lw=lw, facecolor=facecolor, hatch=hatch, zorder=zorder, **kwargs ) else: old_facecolor = facecolor if hatch is not None: facecolor = "none" ax.plot(Esym, spline_lower(Esym), c=edgecolor, ls=ls, lw=lw, zorder=zorder) ax.plot(Esym, spline_upper(Esym), c=edgecolor, ls=ls, lw=lw, zorder=zorder) ax.fill_between( Esym, spline_lower(Esym), spline_upper(Esym), edgecolor=old_facecolor, ls=ls, lw=0, facecolor=facecolor, hatch=hatch, zorder=zorder, **kwargs ) if label is not None: if bbox is True: bbox = dict(facecolor='w', boxstyle='round', alpha=1) ax.text( label_x, label_y, label, fontdict=dict(color=label_color), rotation=rotation, transform=ax.transAxes, bbox=bbox, fontsize=fontsize ) return ax def add_ref(ref, ax): return ax.text(0.05, 0.92, ref, ha='left', va='top', transform=ax.transAxes) ``` ## Confidence ellipse from calculations in the paper <a id="confidence_ellipse"></a> ``` # Choose Lambda = 500 MeV or 450 MeV by uncommenting one of the following lines. Lambda = 500 #Lambda = 450 def confidence_ellipse(mean, cov, ax, n_std=3.0, facecolor='none', **kwargs): """ Create a plot of the covariance confidence ellipse of *x* and *y*. Parameters ---------- x, y : array-like, shape (n, ) Input data. ax : matplotlib.axes.Axes The axes object to draw the ellipse into. n_std : float The number of standard deviations to determine the ellipse's radiuses. Returns ------- matplotlib.patches.Ellipse Other parameters ---------------- kwargs : `~matplotlib.patches.Patch` properties """ from matplotlib.patches import Ellipse import matplotlib.transforms as transforms # cov = np.cov(x, y) pearson = cov[0, 1]/np.sqrt(cov[0, 0] * cov[1, 1]) # Using a special case to obtain the eigenvalues of this # two-dimensionl dataset. ell_radius_x = np.sqrt(1 + pearson) ell_radius_y = np.sqrt(1 - pearson) ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2, facecolor=facecolor, **kwargs) # # Calculating the stdandard deviation of x from # # the squareroot of the variance and multiplying # # with the given number of standard deviations. scale_x = np.sqrt(cov[0, 0]) * n_std # mean_x = np.mean(x) # # calculating the stdandard deviation of y ... scale_y = np.sqrt(cov[1, 1]) * n_std # mean_y = np.mean(y) transf = transforms.Affine2D() \ .rotate_deg(45) \ .scale(scale_x, scale_y) \ .translate(mean[0], mean[1]) ellipse.set_transform(transf + ax.transData) return ax.add_patch(ellipse) if Lambda not in [450, 500]: raise ValueError('') # n=0.17 +/- 0.01, Lambda = 500 if Lambda == 500: Esym_L_sat_eft = np.array([31.69810539, 59.830485 ]) cov_Esym_L_eft = np.array([ [ 1.23651375, 3.27499281], [ 3.27499281, 16.95157735], ]) # n=0.17 +/- 0.01, Lambda = 450 if Lambda == 450: Esym_L_sat_eft = np.array([33.52341527, 67.79737685]) cov_Esym_L_eft = np.array([ [ 1.57488007, 3.05934762], [ 3.05934762, 15.97354339], ]) ``` Uncertainties at $1\sigma$ ``` np.sqrt(cov_Esym_L_eft[0,0]), np.sqrt(cov_Esym_L_eft[1, 1]) ``` ## Generate the incremental and full figures <a id="incremental_plots"></a> ``` image_type = 'png' import glob # Just in case the number of plots changes files = glob.glob('incremental_plots/*') for f in files: os.remove(f) def file_out(i, image_type='pdf'): """Generate a file name for sequential image output. """ file_base = 'incremental_plots/Esym_L_correlation' return file_base + f'{i:02d}' + '.' + image_type fig, ax = plt.subplots(figsize=(3.4, 5), constrained_layout=True) # set labels and title ax.set_xlabel(r"Symmetry Energy $S_v$ [MeV]") ax.set_ylabel(r"Slope Parameter $L$ [MeV]") # ax.set_title('$S_v$--$L$ Correlation') ax.text(0.05, 0.96, 'Constraints on $S_v$--$L$', transform=ax.transAxes, ha='left', va='top') ax.xaxis.set_ticks_position('both') ax.yaxis.set_ticks_position('both') # set limits ax.set_xlim(25, 35) ax.set_ylim(0, 100) ax.yaxis.set_minor_locator(AutoMinorLocator(n=2)) ax.xaxis.set_minor_locator(AutoMinorLocator(n=2)) ax.tick_params(width=0.7, which='major') # plot each source separately i = 0 fig.savefig(file_out(i, image_type)) i += 1 # step through the array of constraint regions and plot in turn for src in avail_srcs: plot_source(ax, zorder=i/10., **src) ref = add_ref(src['reference'], ax) fig.savefig(file_out(i, image_type)) ref.remove() i += 1 if src['label'] == 'Masses': ax.fill_between( Esym[expt_mask], expt_lower[expt_mask], expt_upper[expt_mask], lw=1, facecolor='w', edgecolor='k', zorder=i/10 ) ax.text( 0.64, 0.53, 'Intersection', transform=ax.transAxes, rotation=65.4, ha='center', va='center', zorder=i/10 # fontsize=7, ) fig.savefig(file_out(i, image_type)) i += 1 # Add the unitary gas constraint boundaries ut = np.linspace(0.001, 2, 100) tews_zorder = 11 Esym_tews, L_tews = getUgConstraint( ut, **TewsEtAlSetting) ax.plot(Esym_tews, L_tews, c='k', zorder=tews_zorder) idx_arrow_tews = 60 ax.arrow(Esym_tews[idx_arrow_tews], L_tews[idx_arrow_tews], 1, 0, head_length=0.3, head_width=2, zorder=tews_zorder) ax.text(Esym_tews[idx_arrow_tews]+0.15, L_tews[idx_arrow_tews]+1.4, 'UG', ha='left', va='bottom', zorder=tews_zorder) # now plot analytic constraint L_grid = np.linspace(0.001, 120, 100) Esym_tews_analytic = getUgAnalyticConstraint(L_grid, EUG0, E0) ax.plot(Esym_tews_analytic, L_grid, c='k', ls='--', zorder=tews_zorder) idx_arrow_tews_a = 10 ax.arrow( Esym_tews_analytic[idx_arrow_tews_a], L_grid[idx_arrow_tews_a], 1, 0, head_length=0.3, head_width=2, zorder=tews_zorder) ax.text(Esym_tews_analytic[idx_arrow_tews_a]+0.15, L_grid[idx_arrow_tews_a]+1.4, 'UG Analytic', ha='left', va='bottom', zorder=tews_zorder) # ug_ref = 'UG: Tews, I., Lattimer, J. M., Ohnishi, A. , Kolomeitsev, E. E., APJ 848, 105' ug_ref = 'Tews \\textit{et al.},\nAPJ \\textbf{848}, 105 (2017)' ref = add_ref(ug_ref, ax) i += 1 fig.savefig(file_out(i, image_type)) ref.remove() # plot Drischler et al. (2016) # Uncomment the next lines to show the individual data points from this paper. # data_D = pd.read_csv("./data/Drischler_2016.csv", names=['Esym', 'L']) # ax.scatter(data_D["Esym"], data_D["L"], c="k", marker="*") # stars_ref = 'Drischler \\textit{et al.},\nPRC \\textbf{93}, 054314 (2016)' # ref = add_ref(stars_ref, ax) # i += 1 # fig.savefig(file_out(i, image_type)) # ref.remove() confidence_ellipse( Esym_L_sat_eft, cov_Esym_L_eft, ax=ax, n_std=2, facecolor=yellow, edgecolor='k', alpha=0.4, zorder=i+1 ) confidence_ellipse( Esym_L_sat_eft, cov_Esym_L_eft, ax=ax, n_std=1, facecolor=yellow, edgecolor='k', alpha=0.8, zorder=i+1 ) if Lambda == 500: # ax.text( # 0.6045, 0.584, r'GP-B', fontdict=dict(color='k'), # rotation=26.7, transform=ax.transAxes, # zorder=i+2, # ) ax.text( 0.61, 0.588, r'GP-B', fontdict=dict(color='k', fontsize=7), rotation=26.7, transform=ax.transAxes, zorder=i+2, ) ax.text( 0.635, 0.572, r'(500)', fontdict=dict(color='k', fontsize=7), rotation=26.7, transform=ax.transAxes, zorder=i+2, ) elif Lambda == 450: ax.text( 0.762, 0.641, r'GP-B (450)', fontdict=dict(color='k', fontsize=7), rotation=21, transform=ax.transAxes, zorder=i+2, ) ref = add_ref('This work', ax) i += 1 fig.savefig(file_out(i, image_type)) ref.remove() fig.savefig(f"Lattimer_Esym_L_Lambda-{Lambda}." + image_type) ``` ## Create an animated gif showing constraints as overlays <a id="animation"></a> The method used here requires [ImageMagick](https://imagemagick.org/) and assumes the incremental plots were created as png files. Increase the delay for more time before the next image. Set `-loop` to 0 to cycle endlessly and set to `n` to make n loops. ``` %%bash convert -delay 100 -loop 1 incremental_plots/*png incremental_plots/Esym_L_correlation_animated.gif ```
github_jupyter
![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://githubtocolab.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/NER_RU.ipynb) # **Detect entities in Russian text** ## 1. Colab Setup ``` !wget http://setup.johnsnowlabs.com/colab.sh -O - | bash # !bash colab.sh # -p is for pyspark # -s is for spark-nlp # !bash colab.sh -p 3.1.1 -s 3.0.1 # by default they are set to the latest # Install Spark NLP Display for visualization !pip install --ignore-installed spark-nlp-display ``` ## 2. Start the Spark session ``` import json import pandas as pd import numpy as np from pyspark.ml import Pipeline from pyspark.sql import SparkSession import pyspark.sql.functions as F from sparknlp.annotator import * from sparknlp.base import * import sparknlp from sparknlp.pretrained import PretrainedPipeline spark = sparknlp.start() ``` ## 3. Select the DL model ``` # If you change the model, re-run all the cells below. # Applicable models: wikiner_840B_300, wikiner_6B_300, wikiner_6B_100 MODEL_NAME = "wikiner_840B_300" ``` ## 4. Some sample examples ``` # Enter examples to be transformed as strings in this list text_list = [ """Уильям Генри Гейтс III (родился 28 октября 1955 года) - американский бизнес-магнат, разработчик программного обеспечения, инвестор и филантроп. Он наиболее известен как соучредитель корпорации Microsoft. За время своей карьеры в Microsoft Гейтс занимал должности председателя, главного исполнительного директора (CEO), президента и главного архитектора программного обеспечения, а также был крупнейшим индивидуальным акционером до мая 2014 года. Он является одним из самых известных предпринимателей и пионеров микрокомпьютерная революция 1970-х и 1980-х годов. Гейтс родился и вырос в Сиэтле, штат Вашингтон, в 1975 году вместе с другом детства Полом Алленом в Альбукерке, штат Нью-Мексико, и основал компанию Microsoft. она стала крупнейшей в мире компанией-разработчиком программного обеспечения для персональных компьютеров. Гейтс руководил компанией в качестве председателя и генерального директора, пока в январе 2000 года не ушел с поста генерального директора, но остался председателем и стал главным архитектором программного обеспечения. В конце 1990-х Гейтс подвергся критике за свою деловую тактику, которая считалась антиконкурентной. Это мнение было подтверждено многочисленными судебными решениями. В июне 2006 года Гейтс объявил, что перейдет на неполный рабочий день в Microsoft и будет работать на полную ставку в Фонде Билла и Мелинды Гейтс, частном благотворительном фонде, который он и его жена Мелинда Гейтс создали в 2000 году. [ 9] Постепенно он передал свои обязанности Рэю Оззи и Крейгу Манди. Он ушел с поста президента Microsoft в феврале 2014 года и занял новую должность консультанта по технологиям для поддержки вновь назначенного генерального директора Сатья Наделла.""", """Мона Лиза - картина маслом 16-го века, созданная Леонардо. Он проводится в Лувре в Париже.""" ] ``` ## 5. Define Spark NLP pipeline ``` document_assembler = DocumentAssembler() \ .setInputCol('text') \ .setOutputCol('document') tokenizer = Tokenizer() \ .setInputCols(['document']) \ .setOutputCol('token') # The wikiner_840B_300 is trained with glove_840B_300, so the embeddings in the # pipeline should match. Same applies for the other available models. if MODEL_NAME == "wikiner_840B_300": embeddings = WordEmbeddingsModel.pretrained('glove_840B_300', lang='xx') \ .setInputCols(['document', 'token']) \ .setOutputCol('embeddings') elif MODEL_NAME == "wikiner_6B_300": embeddings = WordEmbeddingsModel.pretrained('glove_6B_300', lang='xx') \ .setInputCols(['document', 'token']) \ .setOutputCol('embeddings') elif MODEL_NAME == "wikiner_6B_100": embeddings = WordEmbeddingsModel.pretrained('glove_100d') \ .setInputCols(['document', 'token']) \ .setOutputCol('embeddings') ner_model = NerDLModel.pretrained(MODEL_NAME, 'ru') \ .setInputCols(['document', 'token', 'embeddings']) \ .setOutputCol('ner') ner_converter = NerConverter() \ .setInputCols(['document', 'token', 'ner']) \ .setOutputCol('ner_chunk') nlp_pipeline = Pipeline(stages=[ document_assembler, tokenizer, embeddings, ner_model, ner_converter ]) ``` ## 6. Run the pipeline ``` empty_df = spark.createDataFrame([['']]).toDF('text') pipeline_model = nlp_pipeline.fit(empty_df) df = spark.createDataFrame(pd.DataFrame({'text': text_list})) result = pipeline_model.transform(df) ``` ## 7. Visualize results ``` from sparknlp_display import NerVisualizer NerVisualizer().display( result = result.collect()[0], label_col = 'ner_chunk', document_col = 'document' ) ```
github_jupyter
``` import numpy as np import tensorflow as tf import random as rn import os import matplotlib.pyplot as plt %matplotlib inline os.environ['PYTHONHASHSEED'] = '0' import sys import scipy import math import sys import pandas as pd from scipy.ndimage.filters import gaussian_filter1d from sklearn.metrics import mean_squared_error from scipy.stats import linregress from scipy import interpolate from scipy import signal import pickle from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import KFold from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score from scipy.stats import linregress from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.linear_model import Ridge,Lasso from sklearn.svm import SVR from video_process_utils import * target_col = 'GDI' alldata_processed =\ pd.read_csv("./data/processed/alldata_processed_with_dev_residual.csv" ) alldata_processed['videoid'] = alldata_processed['videoid'].apply(lambda x: int(x)) alldata_processed['target_count'] = alldata_processed.groupby('videoid')[target_col].transform(lambda x: x.count()) HOME_DIR = "./" datasplit_df = pd.read_csv('%sdata/processed/train_test_valid_id_split.csv' % (HOME_DIR)) datasplit_df['videoid'] = datasplit_df['videoid'].apply(lambda x: int(x)) all_ids = set(datasplit_df['videoid']) train_ids = set(datasplit_df[datasplit_df['dataset'] == 'train']['videoid']) validation_ids = set(datasplit_df[datasplit_df['dataset'] == 'validation']['videoid']) test_ids = set(datasplit_df[datasplit_df['dataset'] == 'test']['videoid']) with open('./data/processed/all_processed_videos.pickle', 'rb') as handle: processed_videos = pickle.load(handle) processed_video_ids = [x[0] for x in processed_videos if x[0] in all_ids] videos = [x[1][:500,:] for x in processed_videos if x[0] in all_ids] LANGLE_ANK_KNE_HIP = 50 RANGLE_ANK_KNE_HIP = 51 LANGLE_BTO_ANK_KNE = 52 RANGLE_BTO_ANK_KNE = 53 LDIST_BTO_ANK = 54 RDIST_BTO_ANK = 55 XDIST_LANK_RANK = 56 XDIST_RANK_LANK = 57 features_df = pd.DataFrame(processed_video_ids,columns=['videoid']) def add_percentiles_xy(df,videos,column_left,column_right,column_name,percentile): df = df.copy() name_base_L = 'p%s_L%s' % (percentile,column_name) name_base_R = 'p%s_R%s' % (percentile,column_name) df[name_base_L + '_x'] = [np.percentile(v[:,2*column_left],percentile) for v in videos] df[name_base_R + '_x'] = [np.percentile(v[:,2*column_right],percentile) for v in videos] df[name_base_L + '_y'] = [np.percentile(v[:,2*column_left+1],percentile) for v in videos] df[name_base_R + '_y'] = [np.percentile(v[:,2*column_right+1],percentile) for v in videos] return df def add_percentiles(df,videos,column_idx,column_name,percentile): df[column_name] = [np.percentile(v[:,column_idx],percentile) for v in videos] def apply_transform(df,videos,col_name,col_idx,fn): df[col_name] = [fn(v[:,col_idx]) for v in videos] for percentile in [10,25,50,75,90]: fn = lambda x: np.percentile(x,percentile) for keypoint,idx in [('LANK',LANK),('RANK',RANK),('LKNE',LKNE),('RKNE',RKNE), ('LHIP',LHIP),('RHIP',RHIP),('LBTO',LBTO),('RBTO',RBTO)]: apply_transform(features_df,videos,'p%s_%s_x' % (percentile,keypoint),2*idx,fn) apply_transform(features_df,videos,'p%s_%s_y' % (percentile,keypoint),2*idx+1,fn) for keypoint,idx in [('LANGLE_ANK_KNE_HIP',LANGLE_ANK_KNE_HIP),('RANGLE_ANK_KNE_HIP',RANGLE_ANK_KNE_HIP), ('LANGLE_BTO_ANK_KNE',LANGLE_BTO_ANK_KNE),('RANGLE_BTO_ANK_KNE',RANGLE_BTO_ANK_KNE), ('LDIST_BTO_ANK',LDIST_BTO_ANK),('RDIST_BTO_ANK',RDIST_BTO_ANK), ('XDIST_LANK_RANK',XDIST_LANK_RANK),('XDIST_RANK_LANK',XDIST_RANK_LANK)]: apply_transform(features_df,videos,'p%s_%s' % (percentile,keypoint),idx,fn) fn = np.std for keypoint,idx in [('LANK',LANK),('RANK',RANK),('LKNE',LKNE),('RKNE',RKNE), ('LHIP',LHIP),('RHIP',RHIP),('LBTO',LBTO),('RBTO',RBTO)]: apply_transform(features_df,videos,'std_%s_x' % (keypoint),2*idx,fn) apply_transform(features_df,videos,'std_%s_y' % (keypoint),2*idx+1,fn) for keypoint,idx in [('LANGLE_ANK_KNE_HIP',LANGLE_ANK_KNE_HIP),('RANGLE_ANK_KNE_HIP',RANGLE_ANK_KNE_HIP), ('LANGLE_BTO_ANK_KNE',LANGLE_BTO_ANK_KNE),('RANGLE_BTO_ANK_KNE',RANGLE_BTO_ANK_KNE), ('LDIST_BTO_ANK',LDIST_BTO_ANK),('RDIST_BTO_ANK',RDIST_BTO_ANK), ('XDIST_LANK_RANK',XDIST_LANK_RANK),('XDIST_RANK_LANK',XDIST_RANK_LANK)]: apply_transform(features_df,videos,'std_%s' % (keypoint),idx,fn) def orient_columns(df,left_col_name,right_col_name,col_name): df[col_name] = df.apply(lambda row: row[left_col_name] if row.side == 'L' else row[right_col_name],axis=1) final_df = features_df.merge(right=alldata_processed[['side','videoid',target_col,"cadence","speed","height"]],on=['videoid'],how='inner') final_df = final_df.merge(right=datasplit_df[['videoid','dataset']],on=['videoid'],how='inner') Xcols = [] for percentile in [10,25,50,75,90]: for keypoint in ['ANK','HIP','KNE','BTO']: orient_columns(final_df,'p%s_L%s_x' % (percentile,keypoint), 'p%s_R%s_x' % (percentile,keypoint), 'p%s_%s_x' % (percentile,keypoint)) orient_columns(final_df,'p%s_L%s_y' % (percentile,keypoint), 'p%s_R%s_y' % (percentile,keypoint), 'p%s_%s_y' % (percentile,keypoint)) Xcols.append('p%s_%s_x' % (percentile,keypoint)) Xcols.append('p%s_%s_y' % (percentile,keypoint)) for keypoint in ['ANGLE_ANK_KNE_HIP','ANGLE_BTO_ANK_KNE','DIST_BTO_ANK']: orient_columns(final_df,'p%s_L%s' % (percentile,keypoint), 'p%s_R%s' % (percentile,keypoint), 'p%s_%s' % (percentile,keypoint)) Xcols.append('p%s_%s' % (percentile,keypoint)) orient_columns(final_df,'p%s_XDIST_LANK_RANK' % (percentile), 'p%s_XDIST_RANK_LANK' % (percentile), 'p%s_XDIST_LANK_RANK' %(percentile)) Xcols.append('p%s_XDIST_LANK_RANK' %(percentile)) for keypoint in ['ANK','HIP','KNE','BTO']: orient_columns(final_df,'std_L%s_x' % (keypoint), 'std_R%s_x' % (keypoint), 'std_%s_x' % (keypoint)) orient_columns(final_df,'std_L%s_y' % (keypoint), 'std_R%s_y' % (keypoint), 'std_%s_y' % (keypoint)) Xcols.append('std_%s_x' % (keypoint)) Xcols.append('std_%s_y' % (keypoint)) for keypoint in ['ANGLE_ANK_KNE_HIP','ANGLE_BTO_ANK_KNE','DIST_BTO_ANK']: orient_columns(final_df,'std_L%s' % (keypoint), 'std_R%s' % (keypoint), 'std_%s' % (keypoint)) Xcols.append('std_%s' % (keypoint)) orient_columns(final_df,'std_XDIST_LANK_RANK' , 'std_XDIST_RANK_LANK' , 'std_XDIST_LANK_RANK') Xcols.append('std_XDIST_LANK_RANK') X_train = final_df[final_df['dataset'] == 'train'][Xcols].values y_train = final_df[final_df['dataset'] == 'train'][target_col].values X = final_df[Xcols].values from sklearn.ensemble import RandomForestRegressor sc = StandardScaler() rr = Ridge() rf = RandomForestRegressor() pipe_rr = Pipeline([('sc', sc), ('rr', rr)]) def evaluate_model(mod,df): df['%s_pred' % (target_col)] = mod.predict(X) metrics = {} for dataset in ['train','validation','test']: tmp = df[df['dataset'] == dataset] c = tmp.corr()['%s' % (target_col)]['%s_pred' % (target_col)] rmse = np.sqrt(mean_squared_error(tmp['%s_pred' % (target_col)], tmp['%s' % (target_col)])) metrics[dataset] = (c,rmse) return metrics results_rr = [] for alpha in [0.001,0.01,0.1,1.0,10,100,1000,10000]: print(alpha) pipe_rr.set_params(rr__alpha=alpha).fit(X_train,y_train) metrics = evaluate_model(pipe_rr,final_df) results_rr.append((alpha,metrics['validation'][1])) best_alpha = results_rr[np.argmin([x[1] for x in results_rr])][0] pipe_rr.set_params(rr__alpha=best_alpha).fit(X_train,y_train) metrics = evaluate_model(pipe_rr,final_df) metrics #get rr feature importances feature_importances = pd.DataFrame(zip(pipe_rr.named_steps['rr'].coef_,Xcols),columns=['coef','feature']) feature_importances['abs_coef'] = np.abs(feature_importances['coef']) feature_importances.sort_values(by='abs_coef',ascending=False) n_estimators = [100] max_features = ['auto', 'sqrt'] max_depth = list(range(10,110,10)) max_depth.append(None) min_samples_split = [2, 5, 10] min_samples_leaf = [1, 2, 5] param_grid = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf} def random_search_rf_estimator(param_grid): rf = RandomForestRegressor() selected_params = {} for k in param_grid.keys(): selected_params[k] = np.random.choice(param_grid[k]) rf.set_params(**selected_params) return rf rf_results = [] np.random.seed(1) n_iters = 5 for i in range(n_iters): print(i) rf = random_search_rf_estimator(param_grid) rf.fit(X_train,y_train) metrics = evaluate_model(rf,final_df) rf_results.append((rf.get_params(),metrics['validation'][1])) optimal_rf_params = rf_results[np.argmin([x[1] for x in rf_results])][0] rf.set_params(**optimal_rf_params) metrics = evaluate_model(rf,final_df) metrics feature_importances = pd.DataFrame(zip(Xcols,rf.feature_importances_),columns=['feature','feature_importance']) feature_importances.sort_values(by='feature_importance',ascending=False) ```
github_jupyter
``` #default_exp opt ``` # Optimization and Metrics > Here, we expose the functionality for optimizing the segmentation models, and evaluating them using the Dice loss and similarity, respectively. ``` #hide from nbdev.showdoc import * #export import torch import torch.nn as nn #export class DiceLoss(nn.Module): """ Module to compute the Dice segmentation loss. Based on the following discussion: https://discuss.pytorch.org/t/one-hot-encoding-with-autograd-dice-loss/9781 """ def __init__(self, weights=None, ignore_index=None, eps=0.0001): super(DiceLoss, self).__init__() self.weights = weights self.ignore_index = ignore_index self.eps = eps def forward(self, output, target): """ Arguments: output: (N, C, H, W) tensor of probabilities for the predicted output target: (N, H, W) tensor corresponding to the pixel-wise labels Returns: loss: the Dice loss averaged over channels """ encoded_target = output.detach() * 0 if self.ignore_index is not None: mask = target == self.ignore_index target = target.clone() target[mask] = 0 encoded_target.scatter_(1, target.unsqueeze(1), 1) mask = mask.unsqueeze(1).expand_as(encoded_target) encoded_target[mask] = 0 else: encoded_target.scatter_(1, target.unsqueeze(1), 1) if self.weights is None: self.weights = 1 intersection = output * encoded_target numerator = 2 * intersection.sum(0).sum(1).sum(1) denominator = output + encoded_target if self.ignore_index is not None: denominator[mask] = 0 denominator = denominator.sum(0).sum(1).sum(1) + self.eps loss_per_channel = self.weights * (1 - (numerator / denominator)) return loss_per_channel.sum() / output.size(1) #export def dice_similarity(output, target, weights=None, ignore_index=None, eps=1e-8): """ Arguments: output: (N, C, H, W) tensor of model output target: (N, H, W) tensor corresponding to the pixel-wise labels Returns: loss: the Dice loss averaged over channels """ prediction = torch.argmax(output, dim=1) encoded_prediction = output.detach() * 0 encoded_prediction.scatter_(1, prediction.unsqueeze(1), 1) encoded_target = output.detach() * 0 if ignore_index is not None: mask = target == ignore_index target = target.clone() target[mask] = 0 encoded_target.scatter_(1, target.unsqueeze(1), 1) mask = mask.unsqueeze(1).expand_as(encoded_target) encoded_target[mask] = 0 else: encoded_target.scatter_(1, target.unsqueeze(1), 1) if weights is None: weights = 1 intersection = encoded_prediction * encoded_target numerator = 2 * intersection.sum(0).sum(1).sum(1) + eps denominator = intersection + encoded_target if ignore_index is not None: denominator[mask] = 0 denominator = denominator.sum(0).sum(1).sum(1) + eps acc_per_channel = weights * ((numerator / denominator)) return acc_per_channel.sum() / output.size(1) ``` Now, we train our dynamic U-Net. First, let's initialize the dataset, and see an example datapoint. ``` #example import torch from dynamic_unet.utils import CamvidDataset, load_camvid_dataset, display_segmentation_from_file camvid_data_directory = "/home/jupyter/data/camvid" all_data, val_indices, label_mapping = load_camvid_dataset(camvid_data_directory) tr_data, val_data = [tpl for i, tpl in enumerate(all_data) if i not in val_indices], \ [tpl for i, tpl in enumerate(all_data) if i in val_indices] i = 10 display_segmentation_from_file(tr_data[i][0], tr_data[i][1]) tr_ds, val_ds = CamvidDataset(tr_data, resize_shape=(360, 480)),\ CamvidDataset(val_data, resize_shape=(360, 480), is_train=False) tr_dl, val_dl = torch.utils.data.DataLoader(tr_ds, batch_size=4, shuffle=True), torch.utils.data.DataLoader(val_ds) ``` Finally, we initialize our model, optimizer, and criterion to optimize (here, we use the Dice loss to optimize directly for the Dice metric). We'll also initialize a scheduler for faster training. ``` #example from dynamic_unet.encoder import resnet34 from dynamic_unet.unet import DynamicUNet model = DynamicUNet(resnet34(), num_output_channels=32, input_size=(360, 480)) if torch.cuda.is_available(): model = model.cuda() decoder_parameters = [item for module in model.decoder for item in module.parameters()] optimizer = torch.optim.AdamW(decoder_parameters) # Only training the decoder for now criterion = DiceLoss() # Training specific parameters num_epochs = 10 scheduler = torch.optim.lr_scheduler.OneCycleLR( optimizer, max_lr=1e-2, total_steps=num_epochs * len(tr_dl), ) ``` Here's an example of what training for 10 epochs to fine-tune the decoder to be on par with the pre-trained weights being used in the ResNet encoder. We're using the default learning rate for Adam, $10^{-3}$. We print the per-pixel accuracy, as well as the Dice similarity (which is the F1 score). ``` #example from tqdm.notebook import tqdm model.train() losses = [] accuracies = [] tqdm_iterator = tqdm(range(num_epochs), position=0) for epoch in tqdm_iterator: tr_loss, tr_correct_pixels, tr_total_pixels, tr_dice_similarity, total = 0., 0., 0., 0., 0. tqdm_epoch_iterator = tqdm(tr_dl, position=1, leave=False) for i, (x, y) in enumerate(tqdm_epoch_iterator): optimizer.zero_grad() if torch.cuda.is_available(): x, y = x.cuda(), y.squeeze(dim=1).cuda() output = model(x) probs = torch.softmax(output, dim=1) prediction = torch.argmax(output, dim=1) tr_correct_pixels += ((prediction == y).sum()) tr_total_pixels += y.numel() tr_dice_similarity += dice_similarity(output, y.squeeze(1)) * len(y) loss = criterion(output, y.squeeze(1)) tr_loss += loss.data.cpu() * len(y) total += len(y) loss.backward() optimizer.step() if scheduler is not None: scheduler.step() if i % 1 == 0: curr_loss = tr_loss / total curr_acc = tr_correct_pixels / tr_total_pixels curr_dice = tr_dice_similarity / total tqdm_epoch_iterator.set_postfix({ "Loss": curr_loss.item(), "Accuracy": curr_acc.item(), "Dice": curr_dice.item() }) overall_loss = tr_loss.item() / total overall_acc = tr_correct_pixels.item() / tr_total_pixels losses.append(overall_loss) accuracies.append(overall_acc) tqdm_iterator.set_postfix({"Loss": overall_loss, "Accuracy": overall_acc}) ``` For more details on how to train the dynamic U-Net (with the raw code, not this package), see [this](https://dthiagarajan.github.io/technical_blog/pytorch/hooks/2020/03/18/Dynamic-UNet-and-PyTorch-Hooks.html) blog post.
github_jupyter
``` %run -i ../python/common.py UC_SKIPTERMS=True %run -i ../python/ln_preamble.py ``` # SLS Lecture 7 : Assembly Programming Introduction Processes and Binaries ## Revisit Processes ``` display(HTML(htmlFig( [ [ # {'src':"/files/work/UndertheCovers/underthecovers/images/Processes/Processes.003.png", # 'caption':'A: Press Enter', # 'border': '1px solid black', # 'padding':'1px', # 'cellwidth':'33.33%' # }, {'src':"../images/Processes/Processes.004.png", # 'caption':'B: Shell "blank line" input processing' , # 'border': '1px solid black', # 'padding':'1px', # 'cellwidth':'33.33%' }, {'src':"../images/Processes/Processes.005.png", # 'caption':'C: Shell sends Prompt back', # 'border': '1px solid black', # 'padding':'1px', # 'cellwidth':'33.33%' }, ] ], # id="fig:shell-blankline", # caption="<center> Figure: Shell blank line behavior </center>" ))) ``` ### A Process is a Running executable but what really is an executable??? Let's see what we can figure out poking at the file a little ``` TermShellCmd("ls -l /bin/ls", markdown=False) ``` - So it is marked as a executable "x" in the permissions bits - How big is it? - Lets see if we can look at its contents ``` display(showBT("")) ``` Why did `cat /bin/ls` not help? - Because whatever is in it its is not ASCII encode. How about looking trying to dump its contents by look at the values of its bytes - there are several utilities that we can use to "dump" a file's contents - These programs read the file and convert the bytes into ascii representations of the value for each byte - you can use `man -k dump` to see commands that have the word dump in their names - the one we will focus on are `xxd` but two others that are really useful are `od` (octal dump) and `hd` (hexdump) ``` TermShellCmd("man xxd | head -20", markdown=False) ``` #### Lets use xdd to look at the first 80 bytes of the /bin/ls First in hexadecimal notation and then in binary (base 2) xdd command to display first 80 bytes (-l 80) of the file in units/groups of 1 byte (-g 1) values with 8 units per line (-c 8): `xxd -l 80 -g 1 -c 8 /bin/ls` and same as above but using binary (base 2) notation (-b) for each value: `xxd -l 80 -g 1 -c 8 -b /bin/ls` ``` TermShellCmd("xxd -l 80 -g 1 -c 8 /bin/ls;xxd -l 80 -g 1 -c 8 -b /bin/ls", markdown=False) ``` Ok so while that's a cool party trick ... so what do they mean? What else can we do? - Lets see what the `file` command can tell us. ``` TermShellCmd("man file | head -20", markdown=False) TermShellCmd("file /bin/ls", markdown=False) ``` Ok it is an ELF file let's see what, if anything, the manual has to say about `elf`. ``` TermShellCmd("man elf | head -20", markdown=False) ``` We could keep going down this road to poke at its contents using command designed to decode the elf file and dump information about it `readelf --all /bin/ls` `objdump --all /bin/ls` But let's try another approach before we stand back and put the pieces together. Let's lookup what the OS kernel function for "executing" a binary file has to say ``` TermShellCmd("man 3 exec | head -30", markdown=False) TermShellCmd("man 2 execve | head -21", markdown=False) ``` ## Executables as "Process Images" Remember what the Hardware looks like. <img src="../images/HW.png"> Remember that the OS Kernel is designed to make it easier to use the hardware to run programs. Now we need to dig into this idea more carefully. ### Processes As CPU and Memory context A process is the way that the Operating System let our programs use the CPU and Memory in a controlled way. Each Process is independent "Context" to execute program. Where a context provides a program with its own view of Memory and the CPU <img src="../images/ProcessContexts.png"> ### Process as a Context for using the CPU and Memory A process is a way for us to use the CPU and memory through the programs we write - But not the I/O devices -- Only the OS can directly access the I/O devices - as we will see later the only way for our programs to do I/O will be to OS calls To understand what we are doing when we write assembly code to create a program - we need to understand how the CPU works and Memory together as programmable system <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.003.png"> Let's start with a quick overview of the basic Von Neumman computer model and how the CPU and memory work together. <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.005.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.007.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.008.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.009.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.010.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.011.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.012.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.013.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.014.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.015.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.016.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.017.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.018.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.019.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.020.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.021.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.022.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.023.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.024.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.025.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.026.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.027.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.028.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.029.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.030.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.031.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.032.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.033.png"> So a Process is : - an OS provided context that lets us - direct the CPU via a binary program file - that is loaded into the RAM memory array when we "run" it! (exec) - A binary "contains" the initial contents of memory that the OS loads into our process's memory. - "memory image" -- the exact byte values and where they go into memory - A process's memory is called the process's address space. ## The Tools and how to use them 1. Preparing / creating binaries 1. Assembler: Tool that translates a programmer's description of what to put into memory into fragments of an executable file 2. Linker: Tool that combines the fragments into an complete executable that the OS can load 2. Process inspection and manipulation 1. A Debugger that allows us to look at and control a Process ### Assembler and Linker <center> <img src="../images/ASSEMBLY-VNA-SOFTWARE/ASSEMBLY-VNA-SOFTWARE.026.png" width="100%"> </center> <center> <img src="../images/ASSEMBLY-VNA-SOFTWARE/ASSEMBLY-VNA-SOFTWARE.027.png" width="100%"> </center> <center> <img src="../images/ASSEMBLY-VNA-SOFTWARE/ASSEMBLY-VNA-SOFTWARE.028.png" width="100%"> </center> <center> <img src="../images/ASSEMBLY-VNA-SOFTWARE/ASSEMBLY-VNA-SOFTWARE.029.png" width="100%"> </center> <center> <img src="../images/ASSEMBLY-VNA-SOFTWARE/ASSEMBLY-VNA-SOFTWARE.030.png" width="100%"> </center> ### Debugger Provides us a way of looking inside a process, freezing its execution, examining and modify the cpu registers and memory. <img src="../images/gdbsurgery/gdbsurgery.001.png"> <img src="../images/gdbsurgery/gdbsurgery.002.png"> <img src="../images/gdbProcess.png"> ### GDB Manual https://www.gnu.org/software/gdb/documentation/ ``` display(showET("Editor")) display(Markdown(FileCodeBox( file="../src/empty.S", lang="gas", title="<b>CODE: asm - The 'Empty' assembly program", h="100%", w="107em" ))) display(showBT("Build")) display(Markdown(FileCodeBox( file="empty_build.sh", lang="shell", title="<b>NOTES: on building empty", h="15em", w="100%"))) ``` The OS lets us have access to parts of the CPU and Memory via a Process. For everything else we will need to make calls to the OS Kernel functions to do. Let's use the standard tools to build a "empty" binary, create a process from it and use gdb to explore the parts of the machine that a Process lets us control. Eg use the debugger to read, write memory, explore the internals of the cpu and control it! setup ``` cd mkdir empty cd empty git init git remote add cs400 git@cs400-gitlab.bu.edu:jappavoo/empty.git echo "hello" > empty.S git add empty.S git commit -a -m "init: seeding repo" git push cs400 master ``` lets write some code!!!! `.fill` can be used to fill memory ;-) `repeat, size, value` ``` gas .fill 16, 1, 0x00 // .fill directive fills memory with n copies of values // (https://sourceware.org/binutils/docs/as/Fill.html#Fill) ``` ``` display(showDT("Debugger")) display(Markdown(FileCodeBox( file="empty_gdb.txt", lang="shell", title="", h="100%", w="100%"))) ``` <center> <img src="../images/popcnt.png" width="60%"> </center> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.041.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.042.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.043.png"> ## Intel Manuals Freely available online: https://software.intel.com/content/www/us/en/develop/articles/intel-sdm.html 1. Volume 1: Topics regarding general purpose programming - largely what we will focus on 2. Volume 2: Is a reference of all the instructions which we can consult as needed I usually grab the very large manual that combines volumes 1,2,3,4 https://software.intel.com/content/www/us/en/develop/articles/intel-sdm.html#combined Which include Volumes 3 and 4. These focus on the systems programming features necessary to write operating systems kernels. But these are not necessary for normal user application programming. ## Extra info about Intel <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.045.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.046.png"> <img src="../images/ASSEMBLY-VNA-THECPU/ASSEMBLY-VNA-THECPU.047.png">
github_jupyter
# 05a. Configuring *idact* on a cluster - local part ## Overview In this notebook and its remote counterpart `05b`, you will learn how to: - Synchronize the environment between *idact* and the cluster. - Initialize *idact* config on the cluster from a deployed notebook. ## Import idact It's recommended that *idact* is installed with *pip*. Alternatively, make sure the dependencies are installed: `pip install -r requirements.txt`, and add *idact* to path, for example: ``` import sys sys.path.append('../') ``` We will use a wildcard import for convenience: ``` from idact import * import bitmath ``` ## Load the cluster Let's load the environment and the cluster. Make sure to use your cluster name. ``` load_environment() cluster = show_cluster("hpc") cluster access_node = cluster.get_access_node() access_node.connect() ``` ## Synchronize the environment Synchronizing the environment with the cluster makes sure that your configuration matches, and also serves as a backup. It's a slightly smarter file copy. Pushing the environment will merge the local environment into the remote environment. This means most config fields will be overwritten, but machine-specific ones like `key` will be left unchanged. ``` push_environment(cluster) ``` The reverse action is pulling the environment. It will merge the remote environment into the local environment. ``` pull_environment(cluster) ``` The environment still needs to be saved to keep changes after pull: ``` save_environment() ``` ## Install *idact* on the cluster Our goal is to be able to work with *idact* on a notebook deployed on the cluster. We have already pushed our configuration, so the setup time will be minimal. Make sure `idact` is installed with the Python 3.5+ distribution you are already using for Jupyter and Dask. ``` python -m pip install idact ``` ## Initialize *idact* in a deployed notebook ### Deploy a notebook We need to deploy a notebook on a node. Let's allocate one. Make sure to adjust `--account`, same as in previous notebooks ``` nodes = cluster.allocate_nodes(nodes=1, cores=2, memory_per_node=bitmath.GiB(10), walltime=Walltime(minutes=20), native_args={ '--account': 'intdata' }) nodes nodes.wait() nodes nb = nodes[0].deploy_notebook() nb nb.open_in_browser() ``` ## Copy notebook `05b` to the cluster Drag and drop `05b-Configuring_idact_on_a_cluster_-_remote_part.ipynb` to the deployed notebook, and open it there. ## Follow the instructions in notebook `05b` Follow the instructions until you are referred back to this notebook. ## Cancel the Jupyter deployment (optional) ``` nb.cancel() ``` ## Cancel the allocation It's important to cancel an allocation if you're done with it early, in order to minimize the CPU time you are charged for. ``` nodes.running() nodes.cancel() nodes.running() ``` ## Next notebook In the next notebook we will deploy Jupyter and Dask, then access these deployments and perform simple computations from a notebook on the cluster.
github_jupyter
``` data = "../4_basic_image_recognition/data/mnist.pkl" train_data = MnistData(data, True, 'training') test_data = MnistData(data, False, 'test') ``` ## Dropout <br> ``` with tf.name_scope('input'): x = tf.placeholder(tf.float32, [None, 28*28], name='x-intput') y = tf.placeholder(tf.int64, [None], name='y-input') x_image = tf.reshape(x, [-1, 28, 28, 1], name='x-input-image') keep_prob = tf.placeholder(tf.float32, name='dropout-rate') # for tensorboard image visualization with tf.name_scope('input_reshape'): image_shaped_input = tf.reshape(x, [-1, 28, 28, 1]) # see x.shape with tf.name_scope('layers'): # add a new convolutional layer conv_1 = tf.layers.conv2d(inputs=x_image, filters=32, kernel_size=(5, 5), padding = 'same', activation=None, name= 'conv1') # add batch normalization conv_1 = tf.layers.batch_normalization(conv_1, training=True) # use activation function conv_1 = tf.nn.relu(conv_1) pool1 = tf.layers.max_pooling2d(inputs=conv_1, pool_size=(2, 2), strides=(2,2), name='pool1') conv_2 = tf.layers.conv2d(inputs=pool1, filters=64, kernel_size=(5, 5), padding = 'same', activation=None, name='conv2') conv_2 = tf.layers.batch_normalization(conv_2, training=True) conv_2 = tf.nn.relu(conv_2) pool2 = tf.layers.max_pooling2d(inputs=conv_2, pool_size=(2,2), strides=(2,2), name= 'pool2') # fc layer1 flatten = tf.layers.flatten(pool2, name= 'fc1_flatten') # dropout bunch of neurons flatten_dropout = tf.layers.dropout(flatten, rate=keep_prob, training=True) # fc layer2 y_ = tf.layers.dense(flatten_dropout, 10, name= 'fc2_flatten') with tf.name_scope('loss'): #sparse_softmax_cross_entropy include one_hot encoding loss = tf.losses.sparse_softmax_cross_entropy(labels=y, logits=y_) with tf.name_scope('accuracy'): predict = tf.argmax(y_, 1) correct_prediction = tf.equal(predict, y) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64)) with tf.name_scope('accuracy'): tf.summary.scalar('accuracy', accuracy) with tf.name_scope('train_op'): train_op = tf.train.AdamOptimizer(1e-4).minimize(loss) init = tf.global_variables_initializer() batch_size = 20 train_steps = 10000 test_steps = 50 n_batch = train_data._num_examples // batch_size data = "../4_basic_image_recognition/data/mnist.pkl" x_train, y_train, x_test, y_test = load(data) # train 10k: % with tf.Session() as sess: sess.run(init) # start training for epoch in range(10): for batch in range(n_batch): batch_data, batch_labels = train_data.next_batch(batch_size) sess.run([train_op],feed_dict={x: batch_data, y: batch_labels, keep_prob: 0.7}) ########## need more than 16GB memory ########## #test_acc = sess.run(accuracy, feed_dict={x: x_test, y: y_test, keep_prob: 1.0}) #train_acc = sess.run(accuracy, feed_dict={x: x_train, y: y_train, keep_prob: 1.0}) print("Iter " + str(epoch) + ",Testing Accuracy " + str(test_acc) + ",Training Accuracy " + str(train_acc)) ```
github_jupyter
``` import pandas as pd import numpy as np import os import glob import nltk.data import nltk, re, pprint from nltk import word_tokenize from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics.pairwise import linear_kernel from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer from collections import Counter from sklearn.metrics.pairwise import linear_kernel import sqlite3 def connect_db(): return sqlite3.connect('/Users/sheldon/podcasts/test.db') def create_df_object(): conn = sqlite3.connect('/Users/sheldon/podcasts/test.db') df = pd.read_sql("select * from podcast",conn) return df df = create_df_object() stop = set(stopwords.words('english')) #df.head() import psycopg2 import sys from sqlalchemy import create_engine engine = create_engine('postgresql://sheldon@localhost:5432/sheldon') df1 = pd.read_sql("select * from podcasts",engine) df1.query("select *") def remove_stop_words(row): tokens = word_tokenize(str(row)) tokens = [w for w in tokens if not w in stop] tokens = [word for word in tokens if not "'" in word] return ' '.join(tokens) df['transcribed'] = df['transcribed'].apply(remove_stop_words) texts = df.transcribed.tolist() from collections import defaultdict frequency = defaultdict(int) for text in texts: for token in text: frequency[token] +=1 from gensim import corpora, models, similarities import gensim '''class MyCorpus(object): def __iter__(self): for doc in docs: yield dictionary.doc2bow(doc.split()) corpus_mem_friendly = MyCorpus() corpora.MmCorpus.serialize('corpus.mm',corpus_mem_friendly) dictionary.save('words.dict') df["review_text"] = df["transcribed"].map(lambda x: x.split(' ')) from gensim import corpora dictionary = corpora.Dictionary(df["review_text"]) ''' #load all the stuff dictionary = corpora.Dictionary.load('models/words.dict') corpus = corpora.MmCorpus.load('models/corpus.mm') tfidf = gensim.models.tfidfmodel.TfidfModel.load('models/tfidf_model') lsi = gensim.models.lsimodel.LsiModel.load('models/model.lsi') index = similarities.MatrixSimilarity.load('models/corpus.index') lda = gensim.models #tfidf.save('tfidf_model') lsi.save('models/model.lsi') #tfidf = models.TfidfModel(corpus) corpus_tfidf = tfidf[corpus] #lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=75) corpus_lsi = lsi[corpus_tfidf] def get_related_podcasts(index): def getKey(item): return item[1] listOfTopics = corpus = corpus_lsi[index] corpus = sorted(corpus, key=getKey, reverse=True)[0:10] related_df = pd.DataFrame(corpus,columns=['index','score']) final_df = pd.merge(related_df, df, on='index')[['index','episode','score','series']] return final_df get_related_podcasts(1) lsi.show_topics(num_words=100)[1] def getKey(item): return item[1] sorted(corpus_lsi[17], key=getKey,reverse=True) sorted(lsi.show_topic(8,topn=100), key=getKey, reverse=True) top_topics ``` ## Gettting Related Documents ``` corpus_lsi[1] def get_related_podcasts(index): def getKey(item): return item[1] corpus = corpus_lsi[index] corpus = sorted(corpus, key=getKey, reverse=True)[0:10] related_df = pd.DataFrame(corpus,columns=['index','score']) final_df = pd.merge(related_df, df, on='index')[['index','episode','score','series']] return final_df related_podcasts = list(get_related_podcasts(1)['index']) def get_topics_per_podcast(podcast_index): topic_ids = [i for i in sorted(corpus_lsi[podcast_index], key=getKey, reverse=True) if i[1] > 0.10] def get_topic_arrays(topic_ids): x = [] for id in topic_ids: list_of_words = sorted(lsi.show_topic(id[0], topn=5),key=getKey, reverse=True) z = [] for word in list_of_words: if word[1] > .05: z.append(word) x.append(z) return x topic_arrays = get_topic_arrays(topic_ids) return topic_arrays testing = [[related_podcasts[i],get_topics_per_podcast(related_podcasts[i])] for i in range(0, len(related_podcasts))] testing x = pd.DataFrame(testing, columns=['index','words']) x.words.ix[0] ``` ## Get Related documents based on query ``` def get_related_podcasts(query): vec_box = dictionary.doc2bow(query.split()) vec_lsi = lsi[vec_box] sims = index[vec_lsi] sims = sorted(enumerate(sims), key=lambda item: -item[1])[0:10] related_df = pd.DataFrame(sims,columns=['index','score']) def get_related_podcasts_list(index): def getKey(item): return item[1] corpus = corpus_lsi[index] corpus = sorted(corpus, key=getKey, reverse=True)[0:10] related_df = pd.DataFrame(corpus,columns=['index','score']) final_df = pd.merge(related_df, df, on='index')[['index','episode','score','series']] return final_df related_podcasts = list(get_related_podcasts_list(1)['index']) def get_topics_per_podcast(podcast_index): topic_ids = [i for i in sorted(corpus_lsi[podcast_index], key=getKey, reverse=True) if i[1] > 0.10] def get_topic_arrays(topic_ids): x = [] for id in topic_ids: list_of_words = sorted(lsi.show_topic(id[0], topn=5),key=getKey, reverse=True) z = [] for word in list_of_words: if word[1] > .05: z.append(word) x.append(z) return x topic_arrays = get_topic_arrays(topic_ids) return topic_arrays topics_per_podcast = [[related_podcasts[i],get_topics_per_podcast(related_podcasts[i])] for i in range(0, len(related_podcasts))] other_df = pd.DataFrame(topics_per_podcast, columns=['topic_index','words']) final_df = pd.merge(related_df, df) test_final_df = pd.merge(other_df, final_df,left_index=True,right_index=True)[['words','index','score','episode','series']] return test_final_df x = get_related_podcasts('cats') zz = x.words.ix[0] zz[1] test tf = TfidfVectorizer(stop_words=stop) tfidf_matrix = tf.fit_transform(df['transcribed']) copy_matrix = tf.transform(df['transcribed']) cosine_similarities = linear_kernel(tfidf_matrix, tfidf_matrix) query = 'python economics love' trans_query = query.lower() trans_query = query.split() tfidf_matrix_test = tf.fit_transform(trans_query) tfidf_matrix_train = tf.transform(df['transcribed']) tfidf_matrix_train.todense() tfidf_matrix_test.todense() query_similarities = linear_kernel(tfidf_matrix_test, tfidf_matrix_train) query_similarities = query_similarities.argsort()[0][::-1] pod_dict = dict(zip(range(0, len(query_similarities)),query_similarities)) pod_dict = pd.DataFrame({'rank':pod_dict.keys()}, index=pod_dict.values()) #related_podcasts_df = pd.DataFrame.join(pod_dict, df, how='inner') #final_df = related_podcasts_df.sort_values('rank')[1:11][['rank','episode','series']] #related_podcasts = final_df['episode'] pod_dict ```
github_jupyter
# LAB 6: Serving baby weight predictions **Learning Objectives** 1. Deploy a web application that consumes your model service on Cloud AI Platform. ## Introduction **Verify that you have previously Trained your Keras model and Deployed it predicting with Keras model on Cloud AI Platform. If not, go back to [5a_train_keras_ai_platform_babyweight.ipynb](../solutions/5a_train_keras_ai_platform_babyweight.ipynb) and [5b_deploy_keras_ai_platform_babyweight.ipynb](../solutions/5b_deploy_keras_ai_platform_babyweight.ipynb) create them.** In the previous notebook, we deployed our model to CAIP. In this notebook, we'll make a [Flask app](https://palletsprojects.com/p/flask/) to show how our models can interact with a web application which could be deployed to [App Engine](https://cloud.google.com/appengine) with the [Flexible Environment](https://cloud.google.com/appengine/docs/flexible). ## Step 1: Review Flask App code in `application` folder Let's start with what our users will see. In the `application` folder, we have prebuilt the components for web application. In the templates folder, the <a href="application/templates/index.html">index.html</a> file is the visual GUI our users will make predictions with. It works by using an HTML [form](https://www.w3schools.com/html/html_forms.asp) to make a [POST request](https://www.w3schools.com/tags/ref_httpmethods.asp) to our server, passing along the values captured by the [input tags](https://www.w3schools.com/html/html_form_input_types.asp). The form will render a little strangely in the notebook since the notebook environment does not run javascript, nor do we have our web server up and running. Let's get to that! ## Step 2: Set environment variables ``` %%bash # Check your project name export PROJECT=$(gcloud config list project --format "value(core.project)") echo "Your current GCP Project Name is: "$PROJECT import os os.environ["BUCKET"] = "your-bucket-id-here" # Recommended: use your project name ``` ## Step 3: Complete application code in `application/main.py` We can set up our server with python using [Flask](https://flask.palletsprojects.com/en/1.1.x/quickstart/). Below, we've already built out most of the application for you. The `@app.route()` decorator defines a function to handle web reqests. Let's say our website is `www.example.com`. With how our `@app.route("/")` function is defined, our sever will render our <a href="application/templates/index.html">index.html</a> file when users go to `www.example.com/` (which is the default route for a website). So, when a user pings our server with `www.example.com/predict`, they would use `@app.route("/predict", methods=["POST"])` to make a prediction. The data that gets sent over the internet isn't a dictionary, but a string like below: `name1=value1&name2=value2` where `name` corresponds to the `name` on the input tag of our html form, and the value is what the user entered. Thankfully, Flask makes it easy to transform this string into a dictionary with `request.form.to_dict()`, but we still need to transform the data into a format our model expects. We've done this with the `gender2str` and the `plurality2str` utility functions. Ok! Let's set up a webserver to take in the form inputs, process them into features, and send these features to our model on Cloud AI Platform to generate predictions to serve to back to users. Fill in the **TODO** comments in <a href="application/main.py">application/main.py</a>. Give it a go first and review the solutions folder if you get stuck. **Note:** AppEngine test configurations have already been set for you in the file <a href="application/app.yaml">application/app.yaml</a>. Review [app.yaml](https://cloud.google.com/appengine/docs/standard/python/config/appref) documentation for additional configuration options. ## Step 4: Deploy application So how do we know that it works? We'll have to deploy our website and find out! Notebooks aren't made for website deployment, so we'll move our operation to the [Google Cloud Shell](https://console.cloud.google.com/home/dashboard?cloudshell=true). By default, the shell doesn't have Flask installed, so copy over the following command to install it. `python3 -m pip install --user Flask==0.12.1` Next, we'll need to copy our web app to the Cloud Shell. We can use [Google Cloud Storage](https://cloud.google.com/storage) as an inbetween. ``` %%bash gsutil -m rm -r gs://$BUCKET/baby_app gsutil -m cp -r application/ gs://$BUCKET/baby_app ``` Run the below cell, and copy the output into the [Google Cloud Shell](https://console.cloud.google.com/home/dashboard?cloudshell=true) ``` %%bash echo rm -r baby_app/ echo mkdir baby_app/ echo gsutil cp -r gs://$BUCKET/baby_app ./ echo python3 baby_app/main.py ``` ## Step 5: Use your website to generate predictions Time to play with the website! The cloud shell should now say something like `* Running on http://127.0.0.1:8080/ (Press CTRL+C to quit)`. Click on the `http` link to go to your shiny new website. Fill out the form and give it a minute or two to process its first prediction. After the first one, the rest of the predictions will be lightning fast. Did you get a prediction? If not, the Google Cloud Shell will spit out a stack trace of the error to help narrow it down. If yes, congratulations! Great job on bringing all of your work together for the users. ## Lab Summary In this lab, you deployed a simple Flask web form application on App Engine that takes inputs, transforms them into features, and sends them to a model service on Cloud AI Platform to generate and return predictions. Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
github_jupyter
# Simple BC task: - p = A, B - h = A [mask] B - [mask] = and - p = A, C - h = A [mask] B - [mask] = or #### In this notebook we will create a df with columns 'sentence1', 'sentence2', 'sentence2_masked' 'label', and a txt for training ``` import pandas as pd import numpy as np from inference.text_generation.vocab import male_names, female_names, cities_and_states, countries from inference.text_generation.util import get_new_item, get_n_different_items from inference.text_generation.util import vi, not_vi def and_entailment(person_list, place_list, n, vi_function, not_vi_function, mask_token="[MASK]"): """ $P:= pm V(x_1, y_1) , dots, pm V(x_n, y_n)$ $H:= pm V(x_i, y_i) and pm V(x_j, y_j)$ """ Subjects = get_n_different_items(person_list, n) people_O = [get_new_item(Subjects, person_list) for _ in range(n)] places = get_n_different_items(place_list, n) Objects = get_n_different_items(people_O + places, n) fs = np.random.choice([vi_function, not_vi_function], n) sentence1 = [f(x, y) for f, x, y in zip(fs, Subjects, Objects)] ids = get_n_different_items(range(len(Subjects)), 2) sentence2 = sentence1[ids[0]] + " and " + sentence1[ids[1]] sentence2_masked = sentence1[ids[0]] + " {} ".format(mask_token) + sentence1[ids[1]] sentence1 = ", ".join(sentence1) sentence1 += "." label = "and" people_O = list(set(Objects).intersection(people_O)) places = list(set(Objects).intersection(places)) people = ", ".join(Subjects + people_O) Subjects = ", ".join(Subjects) Objects = ", ".join(Objects) places = ", ".join(places) ids.sort() ids = ", ".join(map(lambda x: str(x), ids)) return sentence1, sentence2, sentence2_masked, label, Subjects, Objects, ids, people, places and_entailment(person_list=male_names, place_list=cities_and_states, n=2, vi_function=vi, not_vi_function=not_vi) def or_entailment(person_list, place_list, n, vi_function, not_vi_function, mask_token="[MASK]"): """ $P:= pm V(x_1, y_1) , dots, pm V(x_n, y_n)$ $H:= pm V(x_i, y_i) or pm V(x*, y*)$ """ Subjects = get_n_different_items(person_list, n) people_O = [get_new_item(Subjects, person_list) for _ in range(n)] places = get_n_different_items(place_list, n) Objects = get_n_different_items(people_O + places, n) fs = np.random.choice([vi_function, not_vi_function], n) sentence1 = [f(x, y) for f, x, y in zip(fs, Subjects, Objects)] fs2 = np.random.choice([vi_function, not_vi_function]) ids = get_n_different_items(range(len(Subjects)), 1) Subject2 = get_new_item(Subjects + people_O, person_list) Object2 = [get_new_item(Subjects + people_O + [Subject2], person_list)] place2 = get_new_item(places, place_list) Object2 += [place2] Object2 = np.random.choice(Object2) sentence2_l = [sentence1[ids[0]], fs2(Subject2, Object2)] np.random.shuffle(sentence2_l) sentence2 = sentence2_l[0] + " or " + sentence2_l[1] sentence2_masked = sentence2_l[0] + " {} ".format(mask_token) + sentence2_l[1] sentence1 = ", ".join(sentence1) sentence1 += "." label = "or" people_O = list(set(Objects).intersection(people_O)) people = ", ".join(Subjects + people_O + [Subject2]) places = list(set(Objects + [Object2]).intersection(places + [place2])) Subjects = ", ".join(Subjects + [Subject2]) Objects = ", ".join(Objects + [Object2]) places = ", ".join(places) ids.sort() ids = ", ".join(map(lambda x: str(x), ids)) return sentence1, sentence2, sentence2_masked, label, Subjects, Objects, ids, people, places or_entailment(person_list=male_names, place_list=cities_and_states, n=2, vi_function=vi, not_vi_function=not_vi) def create_csv(out_path, size, type1_instances_list, type2_instances_list, person_list, place_list, n, min_n): sentence1 = [] sentence2 = [] sentence2_masked = [] label = [] subjects = [] objects = [] ids = [] people = [] places = [] type1_examples = int(size / 2) type2_examples = int(size / 2) type1_len = len(type1_instances_list) type2_len = len(type2_instances_list) type1s = [int(type1_examples / type1_len) for _ in type1_instances_list] # noqa type2s = [int(type2_examples / type2_len) for _ in type2_instances_list] # noqa for i, f in zip(type1s, type1_instances_list): for _ in range(i): current_n = np.random.choice(range(min_n, n + 1)) s1, s2, s2_m, l, s, o, id_, pe, pl = f(person_list, place_list, current_n) # noqa sentence1.append(s1) sentence2.append(s2) sentence2_masked.append(s2_m) label.append(l) subjects.append(s) objects.append(o) ids.append(id_) people.append(pe) places.append(pl) for i, f in zip(type2s, type2_instances_list): for _ in range(i): current_n = np.random.choice(range(min_n, n + 1)) s1, s2, s2_m, l, s, o, id_, pe, pl = f(person_list, place_list, current_n) # noqa sentence1.append(s1) sentence2.append(s2) sentence2_masked.append(s2_m) label.append(l) subjects.append(s) objects.append(o) ids.append(id_) people.append(pe) places.append(pl) df = pd.DataFrame({"sentence1": sentence1, "sentence2": sentence2, "sentence2_masked": sentence2_masked, "label": label, "subjects": subjects, "objects": objects, "ids": ids, "people": people, "places": places}) df = df.sample(frac=1).reset_index(drop=True) df.to_csv(out_path, header=True, index=False) def i2eng(f): return lambda x, y, z: f(x, y, z, vi_function=vi, not_vi_function=not_vi) # noqa create_csv(out_path='data/generation/BC_train.csv', size=10000, type1_instances_list=[i2eng(and_entailment)], type2_instances_list=[i2eng(or_entailment)], person_list=male_names, place_list=cities_and_states, n=2, min_n=2) create_csv(out_path='data/generation/BC_test.csv', size=1000, type1_instances_list=[i2eng(and_entailment)], type2_instances_list=[i2eng(or_entailment)], person_list=female_names, place_list=countries, n=2, min_n=2) def create_train_txt(in_path, out_path): df = pd.read_csv(in_path) ps = df["sentence1"].values hs = df["sentence2"].values with open(out_path, "w") as file: for p,h in zip(ps, hs): line = p + "\n" + h + "\n" file.write(line) file.write("\n") create_train_txt(in_path='data/generation/BC_train.csv', out_path='data/generation/BC_train.txt') ```
github_jupyter
``` import numpy as np import pandas as pd from textwrap import wrap from matplotlib import pyplot as plt from sklearn import metrics import matplotlib.ticker as mticker import sys, os from hsbmpy import plot_topic_size, get_max_available_L directory="/home/fvalle/phd/datasets/gtex/log/10/" os.chdir(directory) sys.path.append('/home/fvalle/phd/master_thesis/') algorithm = 'lda' L = get_max_available_L(directory, algorithm=algorithm) df = pd.read_csv("mainTable.csv", index_col=[0]) ``` # topic size ``` for l in range(0,L+1): plot_topic_size(directory,l) ``` ## Topic O ``` df = pd.read_csv("mainTable.csv", index_col=0,header=0) df_mv=pd.DataFrame(data=[df.mean(1), df.var(1),df.apply(lambda x: len([x[x>0]])/float(len(x)), 1)], index=['average', 'var', 'O']).transpose() df_mv.head() for l in range(0,L+1): fig = plt.figure(figsize=(15,8)) ax = fig.subplots(1,2) candles = get_candles(directory,l,df_mv,ax[0]) candlestick2_ohlc(ax[0], candles['open'],candles['high'],candles['low'],candles['close'],width=0.6,colordown='b') ax[1].hist((np.array(candles['open'])+np.array(candles['close']))/2, weights=candles['size'], range=(-0.05,1.05), bins=10, histtype='step') ax[1].set_xlabel("$O_i", fontsize=18) plt.show() fig.savefig("%s/topic_Ocandles_level_%d.pdf"%(directory,l)) ``` # Geneontology ``` from geneontology import get_ontology_df, ensg_to_symbol from tableanalyser import get_symbol import gseapy as gs import importlib, geneontology,tableanalyser importlib.reload(geneontology) importlib.reload(tableanalyser) from geneontology import get_ontology_df, ensg_to_symbol from tableanalyser import get_symbol l=L df_topics = pd.read_csv("%s/%s/%s_level_%d_topics.csv"%(directory,algorithm,algorithm,l)) df_symbols= pd.read_csv("https://www.genenames.org/cgi-bin/download/custom?col=gd_hgnc_id&col=gd_app_sym&col=gd_pub_ensembl_id&col=md_ensembl_id&col=md_eg_id&status=Approved&status=Entry%20Withdrawn&hgnc_dbtag=on&order_by=gd_app_sym_sort&format=text&submit=submit", index_col=[0], sep='\t') def get_sea(): for g in df_topics.values.ravel()[[str(s)!='nan' for s in df_topics.values.ravel()]]: yield get_symbol(g) with open("gback.txt",'w') as f: list(map(lambda x: f.writelines(x+'\n')if len(x)>1 else None, get_sea())) with open("gback_ensg.txt",'w') as f: list(map(lambda x: f.writelines(x[:15]+'\n')if len(x)>1 else None, df_topics.values.ravel()[[str(s)!='nan' for s in df_topics.values.ravel()]])) gs.get_library_name() gene_sets = ['GO_Molecular_Function_2018', 'GO_Biological_Process_2018', 'GO_Cellular_Component_2018', 'Human_Phenotype_Ontology', 'WikiPathways_2019_Human', '/home/fvalle/MSigDB/h.all.v7.0.symbols.gmt', '/home/fvalle/MSigDB/c2.cp.v7.0.symbols.gmt' ] threshhold = 5e-1 cutoff = 5e-1 background = len([g for g in get_sea()]) os.system("mkdir -p gsea") for itopic,topic in enumerate(df_topics.columns): try: enriched_topic = pd.read_csv("gsea/gsea_level_%d_topic_%d.csv"%(l,itopic+1), index_col=[0]) print(topic) except: try: gene_list = ensg_to_symbol(df_topics.loc[:,topic].dropna().values) print(topic) enriched_topic = get_ontology_df(gene_list, cutoff=cutoff, threshhold = threshhold, gene_sets = gene_sets, background=background) enriched_topic = enriched_topic.sort_values(by=['Adjusted P-value'], ascending=True)[:20] enriched_topic.to_csv("gsea/gsea_level_%d_topic_%d.csv"%(l,itopic+1)) except: print(*sys.exc_info()) continue print(enriched_topic) topic_pvalues = [] topic_gos = [] for itopic,topic in enumerate(df_topics.columns): try: enriched_topic = pd.read_csv("%s/gsea/gsea_level_%d_topic_%d.csv"%(directory,l,itopic+1)) if len(enriched_topic.index) >0: p_val = np.sort(enriched_topic['Adjusted P-value'])[0] topic_pvalues.append(-np.log10(p_val)) for goc in enriched_topic['Gene_set'][:10].unique(): topic_gos.append(goc) print(topic) except: print("error", sys.exc_info()[0]) fig = plt.figure() x = np.arange(1,1+len(topic_pvalues)) c, _, _ = plt.hist(topic_pvalues, histtype='step', lw=2) plt.plot([-np.log10(0.05) for _ in np.linspace(1,10,num=10)],np.arange(0,np.max(c)+5,(np.max(c)+5)/10), ls='--', lw=5, label="$\\alpha=0.05$") plt.xlabel('-log(P-value)', fontsize=16) plt.ylabel("number of topics") #plt.ylim(0,0.055) #plt.yscale('log') plt.legend(fontsize=16) fig.savefig("%s/pvaluescrosstopic(%d).png"%(directory,l)) fig = plt.figure(figsize=(20,10)) gos, goscounts = np.unique(topic_gos,return_counts=True) plt.barh(["\n".join(wrap(str(l).replace('_',' '),20)) for l in gos], goscounts) plt.yticks(fontsize=15) plt.show() fig.savefig("%s/pvaluecategories(%d).pdf"%(directory,l)) ``` # WGCNA vs hSBM ``` from scipy.stats import hypergeom hsbm_list = pd.read_csv("topsbm/topsbm_level_2_word-dist.csv", index_col=0).apply(pd.Series.idxmax,axis=1) hsbm_list.index = [g[:15] for g in hsbm_list.index] hsbm_list wgcna_list = pd.read_csv("wgcna/wgcna_level_0_word-dist.csv", index_col=0).apply(pd.Series.idxmax,axis=1) tm_list = pd.read_csv("tm/tm_level_0_word-dist.csv", index_col=0).apply(pd.Series.idxmax,axis=1) print(len(hsbm_list), len(tm_list), len(wgcna_list)) #to uniform hsbm_list = hsbm_list[hsbm_list.index.isin(wgcna_list.index)] wgcna_list = wgcna_list[wgcna_list.index.isin(hsbm_list.index)] tm_list = tm_list[tm_list.index.isin(hsbm_list.index)] list_1 = hsbm_list list_2 = wgcna_list population_size = len(list_1[list_1.index.isin(list_2.index)]) pop_successes = {module:len(list_2[list_2==module]) for module in list_2.unique()} sample_sizes = {topic:len(list_1[list_1==topic]) for topic in list_1.unique()} num_successes = pd.DataFrame(index=list_1.unique(), columns=list_2.unique()).fillna(0) for g in list_2.index: if g in list_1: num_successes.at[list_1[g],list_2[g]]+=1 df_cmap=pd.DataFrame(index=list_1.unique(), columns=list_2.unique()).fillna(0.5) for module in df_cmap.columns: for topic in df_cmap.index: x = num_successes.at[topic,module].astype(int) # number of successes M = population_size # pop size k = pop_successes[module] # successes in pop N = sample_sizes[topic] # sample size pval = hypergeom.sf(x-1, M, k, N) df_cmap.at[topic,module]=-np.log10(float(pval)) import seaborn as sns sns.set_context('paper') df_cmap[df_cmap<3]=0 #df_cmap = df_cmap.sort_values(by=[c for c in df_cmap.columns], axis=0, ascending=True) #create a color palette with the same number of colors as unique values in the Source column network_pal = sns.color_palette('husl',n_colors=len(df_cmap.columns)) #Create a dictionary where the key is the category and the values are the #colors from the palette we just created network_lut = dict(zip(df_cmap.columns, network_pal)) network_col = df_cmap.columns.map(network_lut) #Create a dictionary where the key is the category and the values are the #colors from the palette we just created network_lut = dict(zip(df_cmap.columns, network_pal)) network_col = df_cmap.columns.map(network_lut) fig = plt.figure() cm = sns.clustermap(df_cmap, row_cluster=False, col_cluster=False, metric='euclidean', vmin=0, cmap='Blues_r', col_colors=network_col) cm.fig.suptitle('$-Log(Pvalue)$') cm.ax_heatmap.set_title('Tissues') cm.fig.savefig("topics_hsbm_wgcna.pdf") from sklearn.metrics import v_measure_score print("hsbm - wgcna %.3f"%v_measure_score(hsbm_list, wgcna_list)) print("hsbm - tm %.3f"%v_measure_score(hsbm_list.reindex_like(tm_list), tm_list)) print("tm - wgcna %.3f"%v_measure_score(tm_list, wgcna_list.reindex_like(tm_list))) ```
github_jupyter
# Create standard Kraken/Bracken databases The aim of this notebook is to create standard `Kraken 2` and `Bracken 2` databases of RefSeq *Archaea* and *Bacteria* genomes. # Init ``` import os ``` # Var ``` # Conda env Bracken_env = "Bracken2" # Scripts dir scripts_dir = "/ebio/abt3_projects/small_projects/jdelacuesta/DBs_benchmark/scripts" SGE_dir = os.path.join(scripts_dir, "SGE_out") if not os.path.exists(scripts_dir): os.makedirs(scripts_dir) os.makedirs(SGE_dir) # Kraken dir kraken_dbs = "/tmp/global/jdelacuesta/standard_DBs/kraken" if not os.path.exists(kraken_dbs): os.makedirs(kraken_dbs) ``` # Build standard databases ## Kraken ``` # Download taxonomy taxonomy_cmd = "kraken2-build --download-taxonomy --db {k_db}" taxonomy_job = taxonomy_cmd.format(k_db = kraken_dbs) taxonomy_job = 'bash -c "source activate {}; {}"'.format(Bracken_env, taxonomy_job) print(taxonomy_job) # If there are errors downloading the taxonomy # Use already dowloaded files NCBI_taxonomy = "/ebio/abt3_projects/databases/Kraken/K2_Progenomes/Kraken/taxonomy" rsync_cmd = "rsync -ah {0} {1}".format(NCBI_taxonomy, kraken_dbs) # Only sync if there is no taxonomy folder if not os.path.exists(os.path.join(kraken_dbs, "taxonomy")): print(rsync_cmd) !$rsync_cmd # Download Kraken 2 databases download_cmd = "kraken2-build --download-library {domain} --db {k_db}" # Download Bacteria DB download_bacteria = download_cmd.format(domain = "bacteria", k_db = kraken_dbs) download_bacteria = 'bash -c "source activate {}; {}"'.format(Bracken_env, download_bacteria) print(download_bacteria) # Download Archaea DB download_archaea = download_cmd.format(domain = "archaea", k_db = kraken_dbs) download_archaea = 'bash -c "source activate {}; {}"'.format(Bracken_env, download_archaea) print(download_archaea) # Build the database kraken_build = """#!/bin/bash #$ -N {name} #$ -pe parallel {cpu} #$ -l h_vmem=10G #$ -l h_rt=200:0:0 #$ -o {SGE_dir} #$ -j y #$ -wd {workdir} #$ -m ea #$ -M jdelacuesta@tuebingen.mpg.de export PATH='/ebio/abt3_projects/software/miniconda3_gt4.4/envs/Bracken2/bin':$PATH /ebio/abt3_projects/software/miniconda3_gt4.4/envs/Bracken/bin/kmer2read_distr kraken2-build --build --db {k_db} --threads {cpu} """ job_name = "kraken2_build.sh" kraken_build_file = os.path.join(scripts_dir, job_name) script_build = kraken_build.format(name = job_name, workdir = kraken_dbs, cpu = 30, k_db = kraken_dbs, SGE_dir = SGE_dir) print(script_build) with open(kraken_build_file, "w") as f: f.write(script_build) !qsub $kraken_build_file ``` ## Bracken ``` # Create output directories dir_100mer = os.path.join(kraken_dbs, "100mers") dir_150mer = os.path.join(kraken_dbs, "150mers") if not os.path.exists(dir_100mer): os.makedirs(dir_100mer) os.makedirs(dir_150mer) # Create symlinks to kraken files k2d_files = ["hash.k2d", "opts.k2d", "taxo.k2d"] for k2d in k2d_files: file_path = os.path.join("..", k2d) link_100mer_path = os.path.join(dir_100mer, k2d) link_150mer_path = os.path.join(dir_150mer, k2d) symlink_cmd = 'ln -s {file} {link}' symlink_100mer_job = symlink_cmd.format(file = file_path, link = link_100mer_path) symlink_150mer_job = symlink_cmd.format(file = file_path, link = link_150mer_path) print(symlink_100mer_job, symlink_150mer_job) !$symlink_100mer_job; $symlink_150mer_job # Build the database bracken_build = """#!/bin/bash #$ -N {name} #$ -pe parallel {cpu} #$ -l h_vmem=10G #$ -l h_rt=200:0:0 #$ -o {SGE_dir} #$ -j y #$ -wd {workdir} #$ -m ea #$ -M jdelacuesta@tuebingen.mpg.de export PATH='/ebio/abt3_projects/software/miniconda3_gt4.4/envs/Bracken2/bin':$PATH # Create database.kraken kraken2 --db={k_db} --threads={cpu} <( find -L {k_db} \( -name "*.fna" -o -name "*.fa" -o -name "*.fasta" \) -exec cat {{}} + ) > {kraken_db_file} # database 100mer # kmer2read {kmer2read} --seqid2taxid {k_db}/seqid2taxid.map \ --taxonomy {k_db}/taxonomy \ --kraken {kraken_db_file} \ --output {output_100mer_2read} \ -k 35 \ -l 100 \ -t {cpu} # kmer distribution generate_kmer_distribution.py -i {output_100mer_2read} -o {output_100mer_distr} # database 150mer # kmer2read {kmer2read} --seqid2taxid {k_db}/seqid2taxid.map \ --taxonomy {k_db}/taxonomy \ --kraken {kraken_db_file} \ --output {output_150mer_2read} \ -k 35 \ -l 150 \ -t {cpu} # kmer distribution generate_kmer_distribution.py -i {output_150mer_2read} -o {output_150mer_distr} """ # Files and paths kmer2read_path = "/ebio/abt3_projects/software/miniconda3_gt4.4/envs/Bracken/bin/kmer2read_distr" kraken_database_file = os.path.join(kraken_dbs, "database.kraken") # 100mers k2read_100mer = os.path.join(dir_100mer, "database100mers.kraken") k2distr_100mer = os.path.join(dir_100mer, "database100mers.kmer_distrib") # 150mers k2read_150mer = os.path.join(dir_150mer, "database150mers.kraken") k2distr_150mer = os.path.join(dir_150mer, "database150mers.kmer_distrib") job_name = "bracken2_build.sh" bracken_build_file = os.path.join(scripts_dir, job_name) script_build = bracken_build.format(name = job_name, workdir = kraken_dbs, cpu = 30, k_db = kraken_dbs, SGE_dir = SGE_dir, kraken_db_file = kraken_database_file, kmer2read = kmer2read_path, output_100mer_2read = k2read_100mer, output_100mer_distr = k2distr_100mer, output_150mer_2read = k2read_150mer, output_150mer_distr = k2distr_150mer) print(script_build) with open(bracken_build_file, "w") as f: f.write(script_build) !qsub $bracken_build_file ``` # Move database to databases folder ``` databases_project = os.path.join("/ebio/abt3_projects/databases/Kraken/K2_Standard") if not os.path.exists(databases_project): os.makedirs(databases_project) rsync_kraken = "rsync -ah --bwlimit 300m {0}/ {1}".format(kraken_dbs, databases_project) print(rsync_kraken) #!$rsync_kraken ``` # Session info ``` !conda list -n Bracken2 ```
github_jupyter
``` # #colabを使う方はこちらを使用ください。 # !pip install torch==0.4.1 # !pip install torchvision==0.2.1 # !pip install numpy==1.14.6 # !pip install matplotlib==2.1.2 # !pip install pillow==5.0.0 # !pip install opencv-python==3.4.3.18 #執筆時点で存在するcolab固有のエラーを回避 from PIL import Image def register_extension(id, extension): Image.EXTENSION[extension.lower()] = id.upper() Image.register_extension = register_extension def register_extensions(id, extensions): for extension in extensions: register_extension(id, extension) Image.register_extensions = register_extensions # #colabを使う方はこちらを使用ください。 # #Google Driveにマウント # from google.colab import drive # drive.mount('/content/gdrive') # #colabを使う方はこちらを使用ください。※変更の必要がある場合はパスを変更してください。 # cd /content/gdrive/My Drive/Colab Notebooks/pytorch_handbook/chapter6/ # #colabを使う方はこちらを使用ください。 # !ls # パッケージのインポート import os import random import numpy as np import torch.nn as nn import torch.optim as optim import torch.utils.data import torchvision.datasets as dset import torchvision.transforms as transforms import torchvision.utils as vutils from net import weights_init, Generator, Discriminator # 設定 workers = 2 batch_size=50 nz = 100 nch_g = 64 nch_d = 64 n_epoch = 200 lr = 0.0002 beta1 = 0.5 outf = './result_lsgan' display_interval = 100 # 保存先ディレクトリを作成 try: os.makedirs(outf, exist_ok=True) except OSError as error: print(error) pass # 乱数のシード(種)を固定 random.seed(0) np.random.seed(0) torch.manual_seed(0) # STL-10のトレーニングデータセットとテストデータセットを読み込む trainset = dset.STL10(root='./dataset/stl10_root', download=True, split='train+unlabeled', transform=transforms.Compose([ transforms.RandomResizedCrop(64, scale=(88/96, 1.0), ratio=(1., 1.)), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.05, contrast=0.05, saturation=0.05, hue=0.05), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])) # ラベルを使用しないのでラベルなしを混在した'train+unlabeled'を読み込む testset = dset.STL10(root='./dataset/stl10_root', download=True, split='test', transform=transforms.Compose([ transforms.RandomResizedCrop(64, scale=(88/96, 1.0), ratio=(1., 1.)), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.05, contrast=0.05, saturation=0.05, hue=0.05), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])) dataset = trainset + testset # STL-10のトレーニングデータセットとテストデータセットを合わせて訓練データとする # 訓練データをセットしたデータローダーを作成する dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=int(workers)) # 学習に使用するデバイスを得る。可能ならGPUを使用する device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print('device:', device) # 生成器G。ランダムベクトルから贋作画像を生成する netG = Generator(nz=nz, nch_g=nch_g).to(device) netG.apply(weights_init) # weights_init関数で初期化 print(netG) # 識別器D。画像が、元画像か贋作画像かを識別する netD = Discriminator(nch_d=nch_d).to(device) netD.apply(weights_init) print(netD) criterion = nn.MSELoss() # 損失関数は平均二乗誤差損失 # オプティマイザ−のセットアップ optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999), weight_decay=1e-5) # 識別器D用 optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999), weight_decay=1e-5) # 生成器G用 fixed_noise = torch.randn(batch_size, nz, 1, 1, device=device) # 確認用の固定したノイズ # 学習のループ for epoch in range(n_epoch): for itr, data in enumerate(dataloader): real_image = data[0].to(device) # 元画像 sample_size = real_image.size(0) # 画像枚数 noise = torch.randn(sample_size, nz, 1, 1, device=device) # 正規分布からノイズを生成 real_target = torch.full((sample_size,), 1., device=device) # 元画像に対する識別信号の目標値「1」 fake_target = torch.full((sample_size,), 0., device=device) # 贋作画像に対する識別信号の目標値「0」 ############################ # 識別器Dの更新 ########################### netD.zero_grad() # 勾配の初期化 output = netD(real_image) # 識別器Dで元画像に対する識別信号を出力 errD_real = criterion(output, real_target) # 元画像に対する識別信号の損失値 D_x = output.mean().item() fake_image = netG(noise) # 生成器Gでノイズから贋作画像を生成 output = netD(fake_image.detach()) # 識別器Dで元画像に対する識別信号を出力 errD_fake = criterion(output, fake_target) # 贋作画像に対する識別信号の損失値 D_G_z1 = output.mean().item() errD = errD_real + errD_fake # 識別器Dの全体の損失 errD.backward() # 誤差逆伝播 optimizerD.step() # Dのパラメーターを更新 ############################ # 生成器Gの更新 ########################### netG.zero_grad() # 勾配の初期化 output = netD(fake_image) # 更新した識別器Dで改めて贋作画像に対する識別信号を出力 errG = criterion(output, real_target) # 生成器Gの損失値。Dに贋作画像を元画像と誤認させたいため目標値は「1」 errG.backward() # 誤差逆伝播 D_G_z2 = output.mean().item() optimizerG.step() # Gのパラメータを更新 if itr % display_interval == 0: print('[{}/{}][{}/{}] Loss_D: {:.3f} Loss_G: {:.3f} D(x): {:.3f} D(G(z)): {:.3f}/{:.3f}' .format(epoch + 1, n_epoch, itr + 1, len(dataloader), errD.item(), errG.item(), D_x, D_G_z1, D_G_z2)) if epoch == 0 and itr == 0: # 初回に元画像を保存する vutils.save_image(real_image, '{}/real_samples.png'.format(outf), normalize=True, nrow=10) ############################ # 確認用画像の生成 ############################ fake_image = netG(fixed_noise) # 1エポック終了ごとに確認用の贋作画像を生成する vutils.save_image(fake_image.detach(), '{}/fake_samples_epoch_{:03d}.png'.format(outf, epoch + 1), normalize=True, nrow=10) ############################ # モデルの保存 ############################ if (epoch + 1) % 50 == 0: # 50エポックごとにモデルを保存する torch.save(netG.state_dict(), '{}/netG_epoch_{}.pth'.format(outf, epoch + 1)) torch.save(netD.state_dict(), '{}/netD_epoch_{}.pth'.format(outf, epoch + 1)) ```
github_jupyter
# Python для анализа данных *Татьяна Рогович, НИУ ВШЭ* ## Библиотека pandas. Упражнения. ``` import pandas as pd %matplotlib inline import seaborn as sns from scipy.stats import norm ``` Будем работать с датасетом Pima Indian Diabetes - это набор данных из Национального института диабета, болезней органов пищеварения и почек. Целью набора данных является диагностическое прогнозирование наличия диабета у пациента. Несколько ограничений были наложены на выбор этих экземпляров из большой базы данных. В частности, все пациенты здесь - женщины в возрасте от 21 года, индийского происхождения. ``` data = pd.read_csv('https://raw.githubusercontent.com/pileyan/Data/master/data/pima-indians-diabetes.csv') data.head(10) ``` Описание данных: - __Pregnancies__ - данная единица отображает количество беременностей, единицы измерения - целые числа от 0 до N. Тип переменной - количественная, дискретная. - __Glucose__ - данная единица отображает уровень глюкозы в крови, единицы измерения - целые числа. Тип переменной - количественная, дискретная. - __BloodPressure__ - данная единица отображает артериальное давление, единицы измерения - миллиметры р/с, целые числа. Тип переменной - количественная, дискретная. - __SkinThickness__ - данная единица отображает обхват трицепса в миллиметрах, целые числа. Тип переменной - количественная, дискретная. - __Insulin__ - данная единица отображает уровень инсулина в крови, целые числа. Тип переменной - количественная, дискретная. - __BMI__ - данная единица отображает индекс массы тела. Тип переменной - количественная, непрерывная. - __DiabetesPedigreeFunction__ - данная единица отображает риск наследственного диабета в зависимости наличия диабета у родственников. Выражается десятичной дробью от 0 до 1. Тип переменной - количественная, непрерывная. - __Age__ - данная единица отражает возраст в целых числах. Тип переменной - количественная, дискретная. - __Class__ - данная единица отражает наличие диабета у субъекта, выражена 0(здоров) или 1(болен). Тип переменной - категориальная, бинарная. __Задание 1.__ Как вы видите, в данных много пропусков (NaN). Посчитайте количество пропусков в каждом из столбцов. ``` # ( ͡° ͜ʖ ͡°)づ ━━ ✫・*。 ``` __Задание 2.__ Замените все пропуски дискретных признаков соответствующими медианами, непрерывных признаков - средними значениями. ``` # ( ͡° ͜ʖ ͡°)づ ━━ ✫・*。 ``` __Задание 3.__ Вычислите основные статистики (минимум, максимум, среднее, дисперсию, квантили) для всех столбцов. ``` # ( ͡° ͜ʖ ͡°)づ ━━ ✫・*。 ``` __Задание 4.__ У скольких женщин старше 50 лет обнаружен диабет? ``` # ( ͡° ͜ʖ ͡°)づ ━━ ✫・*。 ``` __Задание 5.__ Найдите трех женщин с наибольшим числом беременностей. ``` # ( ͡° ͜ʖ ͡°)づ ━━ ✫・*。 ``` __Задание 6.__ Сколько женщин возраста между 30 и 40 успело родить 3 или более детей? ``` # ( ͡° ͜ʖ ͡°)づ ━━ ✫・*。 ``` __Задание 7.__ Нормальным кровяным давлением будем считать давление в диапазоне [80-89]. У какого процента женщин давление нормальное? ``` # ( ͡° ͜ʖ ͡°)づ ━━ ✫・*。 ``` __Задание 8.__ Считается, что BMI >= 30 - это признак ожирения. У скольких женщин с признаками ожирения кровяное давление выше среднего? ``` # ( ͡° ͜ʖ ͡°)づ ━━ ✫・*。 ``` __Задание 9.__ Сравните средние значения для признаков __Glucose, BloodPressure, Insulin__ среди тех, у кого обнаружен диабет, и тех, у кого его нет. ``` # Glucose ( ͡° ͜ʖ ͡°)づ ━━ ✫・*。 # BloodPressure ( ͡° ͜ʖ ͡°)づ ━━ ✫・*。 # Insulin ( ͡° ͜ʖ ͡°)づ ━━ ✫・*。 ``` __Задание 10.__ Постройте гистограммы для любых двух количественных признаков. ``` # ( ͡° ͜ʖ ͡°)づ ━━ ✫・*。 ``` __Задание 11.__ Постройте круговую диаграмму для признака __Class__. ``` # ( ͡° ͜ʖ ͡°)づ ━━ ✫・*。 ``` __Задание 12.__ Постройте распределения для признаков __Age__ и __BloodPressure__ и сравните оба распределения с нормальным. ``` # ( ͡° ͜ʖ ͡°)づ ━━ ✫・*。 ``` __Задание 13.__ Постройте следующий график: среднее число больных диабетом в зависимости от числа беременностей. ``` # ( ͡° ͜ʖ ͡°)づ ━━ ✫・*。 ``` __Задание 14.__ Добавьте новый бинарный признак: __wasPregnant__ $\in$ {0,1} - была женщина беременна (1) или нет (0) ``` # ( ͡° ͜ʖ ͡°)づ ━━ ✫・*。 ``` __Задание 15.__ Сравните процент больных диабетом среди женщин, которые были беременны и не были. ``` # ( ͡° ͜ʖ ͡°)づ ━━ ✫・*。 ``` __Задание 16.__ Добавьте новый категориальный признак __bodyType__ на основе столбца BMI: __BMI Categories:__ Underweight = <18.5 Normal weight = 18.5–24.9 Overweight = 25–29.9 Obesity = BMI of 30 or greater Признак должен принимать значения Underweight, Normal weight, Overweight и Obesity. ``` # ( ͡° ͜ʖ ͡°)づ ━━ ✫・*。 ``` __Задание 17.__ Будем считать "здоровыми" тех, у кого нормальный вес и кровяное давление. Какой процент "здоровых" женщин больны диабетом? ``` # ( ͡° ͜ʖ ͡°)づ ━━ ✫・*。 ```
github_jupyter
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#Name" data-toc-modified-id="Name-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Name</a></span></li><li><span><a href="#Search" data-toc-modified-id="Search-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Search</a></span><ul class="toc-item"><li><span><a href="#Load-Cached-Results" data-toc-modified-id="Load-Cached-Results-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Load Cached Results</a></span></li><li><span><a href="#Build-Model-From-Google-Images" data-toc-modified-id="Build-Model-From-Google-Images-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>Build Model From Google Images</a></span></li></ul></li><li><span><a href="#Analysis" data-toc-modified-id="Analysis-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Analysis</a></span><ul class="toc-item"><li><span><a href="#Gender-cross-validation" data-toc-modified-id="Gender-cross-validation-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>Gender cross validation</a></span></li><li><span><a href="#Face-Sizes" data-toc-modified-id="Face-Sizes-3.2"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Face Sizes</a></span></li><li><span><a href="#Screen-Time-Across-All-Shows" data-toc-modified-id="Screen-Time-Across-All-Shows-3.3"><span class="toc-item-num">3.3&nbsp;&nbsp;</span>Screen Time Across All Shows</a></span></li><li><span><a href="#Appearances-on-a-Single-Show" data-toc-modified-id="Appearances-on-a-Single-Show-3.4"><span class="toc-item-num">3.4&nbsp;&nbsp;</span>Appearances on a Single Show</a></span></li></ul></li><li><span><a href="#Persist-to-Cloud" data-toc-modified-id="Persist-to-Cloud-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Persist to Cloud</a></span><ul class="toc-item"><li><span><a href="#Save-Model-to-Google-Cloud-Storage" data-toc-modified-id="Save-Model-to-Google-Cloud-Storage-4.1"><span class="toc-item-num">4.1&nbsp;&nbsp;</span>Save Model to Google Cloud Storage</a></span></li><li><span><a href="#Save-Labels-to-DB" data-toc-modified-id="Save-Labels-to-DB-4.2"><span class="toc-item-num">4.2&nbsp;&nbsp;</span>Save Labels to DB</a></span><ul class="toc-item"><li><span><a href="#Commit-the-person-and-labeler" data-toc-modified-id="Commit-the-person-and-labeler-4.2.1"><span class="toc-item-num">4.2.1&nbsp;&nbsp;</span>Commit the person and labeler</a></span></li><li><span><a href="#Commit-the-FaceIdentity-labels" data-toc-modified-id="Commit-the-FaceIdentity-labels-4.2.2"><span class="toc-item-num">4.2.2&nbsp;&nbsp;</span>Commit the FaceIdentity labels</a></span></li></ul></li></ul></li></ul></div> ``` from esper.prelude import * from esper.identity import * from esper import embed_google_images ``` # Name Please add the person's name and their expected gender below (Male/Female). ``` name = 'Christi Paul' gender = 'Female' ``` # Search ## Load Cached Results Reads cached identity model from local disk. Run this if the person has been labelled before and you only wish to regenerate the graphs. Otherwise, if you have never created a model for this person, please see the next section. ``` assert name != '' results = FaceIdentityModel.load(name=name) imshow(tile_imgs([cv2.resize(x[1][0], (200, 200)) for x in results.model_params['images']], cols=10)) plt.show() plot_precision_and_cdf(results) ``` ## Build Model From Google Images Run this section if you do not have a cached model and precision curve estimates. This section will grab images using Google Image Search and score each of the faces in the dataset. We will interactively build the precision vs score curve. It is important that the images that you select are accurate. If you make a mistake, rerun the cell below. ``` assert name != '' # Grab face images from Google img_dir = embed_google_images.fetch_images(name) # If the images returned are not satisfactory, rerun the above with extra params: # query_extras='' # additional keywords to add to search # force=True # ignore cached images face_imgs = load_and_select_faces_from_images(img_dir) face_embs = embed_google_images.embed_images(face_imgs) assert(len(face_embs) == len(face_imgs)) reference_imgs = tile_imgs([cv2.resize(x[0], (200, 200)) for x in face_imgs if x], cols=10) def show_reference_imgs(): print('User selected reference images for {}.'.format(name)) imshow(reference_imgs) plt.show() show_reference_imgs() # Score all of the faces in the dataset (this can take a minute) face_ids_by_bucket, face_ids_to_score = face_search_by_embeddings(face_embs) precision_model = PrecisionModel(face_ids_by_bucket) ``` Now we will validate which of the images in the dataset are of the target identity. __Hover over with mouse and press S to select a face. Press F to expand the frame.__ ``` show_reference_imgs() print(('Mark all images that ARE NOT {}. Thumbnails are ordered by DESCENDING distance ' 'to your selected images. (The first page is more likely to have non "{}" images.) ' 'There are a total of {} frames. (CLICK THE DISABLE JUPYTER KEYBOARD BUTTON ' 'BEFORE PROCEEDING.)').format( name, name, precision_model.get_lower_count())) lower_widget = precision_model.get_lower_widget() lower_widget show_reference_imgs() print(('Mark all images that ARE {}. Thumbnails are ordered by ASCENDING distance ' 'to your selected images. (The first page is more likely to have "{}" images.) ' 'There are a total of {} frames. (CLICK THE DISABLE JUPYTER KEYBOARD BUTTON ' 'BEFORE PROCEEDING.)').format( name, name, precision_model.get_lower_count())) upper_widget = precision_model.get_upper_widget() upper_widget ``` Run the following cell after labelling to compute the precision curve. Do not forget to re-enable jupyter shortcuts. ``` # Compute the precision from the selections lower_precision = precision_model.compute_precision_for_lower_buckets(lower_widget.selected) upper_precision = precision_model.compute_precision_for_upper_buckets(upper_widget.selected) precision_by_bucket = {**lower_precision, **upper_precision} results = FaceIdentityModel( name=name, face_ids_by_bucket=face_ids_by_bucket, face_ids_to_score=face_ids_to_score, precision_by_bucket=precision_by_bucket, model_params={ 'images': list(zip(face_embs, face_imgs)) } ) plot_precision_and_cdf(results) ``` The next cell persists the model locally. ``` results.save() ``` # Analysis ## Gender cross validation Situations where the identity model disagrees with the gender classifier may be cause for alarm. We would like to check that instances of the person have the expected gender as a sanity check. This section shows the breakdown of the identity instances and their labels from the gender classifier. ``` gender_breakdown = compute_gender_breakdown(results) print('Expected counts by gender:') for k, v in gender_breakdown.items(): print(' {} : {}'.format(k, int(v))) print() print('Percentage by gender:') denominator = sum(v for v in gender_breakdown.values()) for k, v in gender_breakdown.items(): print(' {} : {:0.1f}%'.format(k, 100 * v / denominator)) print() ``` Situations where the identity detector returns high confidence, but where the gender is not the expected gender indicate either an error on the part of the identity detector or the gender detector. The following visualization shows randomly sampled images, where the identity detector returns high confidence, grouped by the gender label. ``` high_probability_threshold = 0.8 show_gender_examples(results, high_probability_threshold) ``` ## Face Sizes Faces shown on-screen vary in size. For a person such as a host, they may be shown in a full body shot or as a face in a box. Faces in the background or those part of side graphics might be smaller than the rest. When calculuating screentime for a person, we would like to know whether the results represent the time the person was featured as opposed to merely in the background or as a tiny thumbnail in some graphic. The next cell, plots the distribution of face sizes. Some possible anomalies include there only being very small faces or large faces. ``` plot_histogram_of_face_sizes(results) ``` The histogram above shows the distribution of face sizes, but not how those sizes occur in the dataset. For instance, one might ask why some faces are so large or whhether the small faces are actually errors. The following cell groups example faces, which are of the target identity with probability, by their sizes in terms of screen area. ``` high_probability_threshold = 0.8 show_faces_by_size(results, high_probability_threshold, n=10) ``` ## Screen Time Across All Shows One question that we might ask about a person is whether they received a significantly different amount of screentime on different shows. The following section visualizes the amount of screentime by show in total minutes and also in proportion of the show's total time. For a celebrity or political figure such as Donald Trump, we would expect significant screentime on many shows. For a show host such as Wolf Blitzer, we expect that the screentime be high for shows hosted by Wolf Blitzer. ``` screen_time_by_show = get_screen_time_by_show(results) plot_screen_time_by_show(name, screen_time_by_show) ``` ## Appearances on a Single Show For people such as hosts, we would like to examine in greater detail the screen time allotted for a single show. First, fill in a show below. ``` show_name = 'New Day' # Compute the screen time for each video of the show screen_time_by_video_id = get_screen_time_by_video(results, show_name) ``` One question we might ask about a host is "how long they are show on screen" for an episode. Likewise, we might also ask for how many episodes is the host not present due to being on vacation or on assignment elsewhere. The following cell plots a histogram of the distribution of the length of the person's appearances in videos of the chosen show. ``` plot_histogram_of_screen_times_by_video(name, show_name, screen_time_by_video_id) ``` For a host, we expect screentime over time to be consistent as long as the person remains a host. For figures such as Hilary Clinton, we expect the screentime to track events in the real world such as the lead-up to 2016 election and then to drop afterwards. The following cell plots a time series of the person's screentime over time. Each dot is a video of the chosen show. Red Xs are videos for which the face detector did not run. ``` plot_screentime_over_time(name, show_name, screen_time_by_video_id) ``` We hypothesized that a host is more likely to appear at the beginning of a video and then also appear throughout the video. The following plot visualizes the distibution of shot beginning times for videos of the show. ``` plot_distribution_of_appearance_times_by_video(results, show_name) ``` In the section 3.3, we see that some shows may have much larger variance in the screen time estimates than others. This may be because a host or frequent guest appears similar to the target identity. Alternatively, the images of the identity may be consistently low quality, leading to lower scores. The next cell plots a histogram of the probabilites for for faces in a show. ``` plot_distribution_of_identity_probabilities(results, show_name) ``` # Persist to Cloud The remaining code in this notebook uploads the built identity model to Google Cloud Storage and adds the FaceIdentity labels to the database. ## Save Model to Google Cloud Storage ``` gcs_model_path = results.save_to_gcs() ``` To ensure that the model stored to Google Cloud is valid, we load it and print the precision and cdf curve below. ``` gcs_results = FaceIdentityModel.load_from_gcs(name=name) imshow(tile_imgs([cv2.resize(x[1][0], (200, 200)) for x in gcs_results.model_params['images']], cols=10)) plt.show() plot_precision_and_cdf(gcs_results) ``` ## Save Labels to DB If you are satisfied with the model, we can commit the labels to the database. ``` from django.core.exceptions import ObjectDoesNotExist def standardize_name(name): return name.lower() person_type = ThingType.objects.get(name='person') try: person = Thing.objects.get(name=standardize_name(name), type=person_type) print('Found person:', person.name) except ObjectDoesNotExist: person = Thing(name=standardize_name(name), type=person_type) print('Creating person:', person.name) labeler = Labeler(name='face-identity-{}'.format(person.name), data_path=gcs_model_path) ``` ### Commit the person and labeler The labeler and person have been created but not set saved to the database. If a person was created, please make sure that the name is correct before saving. ``` person.save() labeler.save() ``` ### Commit the FaceIdentity labels Now, we are ready to add the labels to the database. We will create a FaceIdentity for each face whose probability exceeds the minimum threshold. ``` commit_face_identities_to_db(results, person, labeler, min_threshold=0.001) print('Committed {} labels to the db'.format(FaceIdentity.objects.filter(labeler=labeler).count())) ```
github_jupyter
# French TVA Numbers ## Introduction The function `clean_fr_tva()` cleans a column containing French TVA number (TVA) strings, and standardizes them in a given format. The function `validate_fr_tva()` validates either a single TVA strings, a column of TVA strings or a DataFrame of TVA strings, returning `True` if the value is valid, and `False` otherwise. TVA strings can be converted to the following formats via the `output_format` parameter: * `compact`: only number strings without any seperators or whitespace, like "40303265045" * `standard`: TVA strings with proper whitespace in the proper places. Note that in the case of TVA, the compact format is the same as the standard one. Invalid parsing is handled with the `errors` parameter: * `coerce` (default): invalid parsing will be set to NaN * `ignore`: invalid parsing will return the input * `raise`: invalid parsing will raise an exception The following sections demonstrate the functionality of `clean_fr_tva()` and `validate_fr_tva()`. ### An example dataset containing TVA strings ``` import pandas as pd import numpy as np df = pd.DataFrame( { "tva": [ 'Fr 40 303 265 045', '84 323 140 391', 'BE 428759497', 'BE431150351', "002 724 334", "hello", np.nan, "NULL", ], "address": [ "123 Pine Ave.", "main st", "1234 west main heights 57033", "apt 1 789 s maple rd manhattan", "robie house, 789 north main street", "1111 S Figueroa St, Los Angeles, CA 90015", "(staples center) 1111 S Figueroa St, Los Angeles", "hello", ] } ) df ``` ## 1. Default `clean_fr_tva` By default, `clean_fr_tva` will clean tva strings and output them in the standard format with proper separators. ``` from dataprep.clean import clean_fr_tva clean_fr_tva(df, column = "tva") ``` ## 2. Output formats This section demonstrates the output parameter. ### `standard` (default) ``` clean_fr_tva(df, column = "tva", output_format="standard") ``` ### `compact` ``` clean_fr_tva(df, column = "tva", output_format="compact") ``` ## 3. `inplace` parameter This deletes the given column from the returned DataFrame. A new column containing cleaned TVA strings is added with a title in the format `"{original title}_clean"`. ``` clean_fr_tva(df, column="tva", inplace=True) ``` ## 4. `errors` parameter ### `coerce` (default) ``` clean_fr_tva(df, "tva", errors="coerce") ``` ### `ignore` ``` clean_fr_tva(df, "tva", errors="ignore") ``` ## 4. `validate_fr_tva()` `validate_fr_tva()` returns `True` when the input is a valid TVA. Otherwise it returns `False`. The input of `validate_fr_tva()` can be a string, a Pandas DataSeries, a Dask DataSeries, a Pandas DataFrame and a dask DataFrame. When the input is a string, a Pandas DataSeries or a Dask DataSeries, user doesn't need to specify a column name to be validated. When the input is a Pandas DataFrame or a dask DataFrame, user can both specify or not specify a column name to be validated. If user specify the column name, `validate_fr_tva()` only returns the validation result for the specified column. If user doesn't specify the column name, `validate_fr_tva()` returns the validation result for the whole DataFrame. ``` from dataprep.clean import validate_fr_tva print(validate_fr_tva('Fr 40 303 265 045')) print(validate_fr_tva('84 323 140 391')) print(validate_fr_tva('BE 428759497')) print(validate_fr_tva('BE431150351')) print(validate_fr_tva("004085616")) print(validate_fr_tva("hello")) print(validate_fr_tva(np.nan)) print(validate_fr_tva("NULL")) ``` ### Series ``` validate_fr_tva(df["tva"]) ``` ### DataFrame + Specify Column ``` validate_fr_tva(df, column="tva") ``` ### Only DataFrame ``` validate_fr_tva(df) ```
github_jupyter
##Objective ###### Modularize the code into python files for RESNET-18 architecture on CIFAR-10 dataset with implementation of GradCAM ##Import Packages ##### Python libraries required for CIFAR10 ``` pip install albumentations==0.4.5 import matplotlib.pyplot as plt import torch.optim as optim import torch.nn as nn import PIL import numpy as np from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau from torchsummary import summary ``` ###### Packages from Modules ``` from train import train from test import test from data import CIFAR10 from cuda import initialze_cuda from lr_finder import LRFinder from resnet import ResNet18 from gradcam_utils import _GradCAM from plot import Plot ``` ##Initialize Cuda ##### Set up the seed value for the machine ``` cuda, device = initialze_cuda(1) ``` ## Initialize Hyperparameters ##### Assigning various parameters required by the dataset and model ``` class Args: cuda = cuda batch_size = 64 num_workers = 4 horizontal_flip = 0.2 vertical_flip = 0.1 rotation = 10 cutout = 0.3 gaussian_blur = 0.1 learning_rate = 0.01 momentum = 0.9 lr_step = 25 lr_gamma = 0.1 epochs = 5 sample_count = 25 start_lr = 1e-7 end_lr = 1 lr_num_iter = 400 ``` ###Intialize CIFAR-10 dataset ``` cifar10 = CIFAR10(cuda= Args.cuda, batch_size = Args.batch_size, num_workers = Args.num_workers, horizontal_flip = Args.horizontal_flip, vertical_flip = Args.vertical_flip, rotation = Args.rotation, cutout = Args.cutout, gaussian_blur = Args.gaussian_blur) ``` ##Visualizing images from the CIFAR-10 Dataset ##### These images can help one realise which image augmentation techniques are the best for the given dataset ``` classes = cifar10.classes sample_data, sample_targets = cifar10.data #Dimension of the data _ ,height,width,channels = sample_data.shape # Set number of images to display num_images = 4 # Display images with labels fig, axs = plt.subplots(1, 4, figsize=(8, 8)) fig.tight_layout() for i in range(num_images): axs[i].axis('off') axs[i].set_title(f'Label: {classes[sample_targets[i]]}') axs[i].imshow(sample_data[i]) ``` ###Load data on Dataloader ``` train_loader = cifar10.dataloader(train = True) test_loader = cifar10.dataloader(train = False) ``` ##Architecture #####Load the model architecture on the device #####Summary of the Architecture ``` model = ResNet18().to(device) summary(model, cifar10.input_size) ``` ###LR_finder Provides the initial best Learning rate based on findings of Leslie Smith ``` criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=Args.start_lr, momentum=Args.momentum) lr_finder = LRFinder(model, optimizer, criterion, device=device) lr_finder.range_test(train_loader, end_lr=Args.end_lr, num_iter=Args.lr_num_iter, step_mode="exp") _, initial_learning_rate = lr_finder.plot() lr_finder.reset() ``` ##Model Training and Validation ``` losses = [] accuracies = [] correct_samples = [] incorrect_samples= [] criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=initial_learning_rate, momentum=Args.momentum) #scheduler = StepLR(optimizer, Args.lr_step, Args.lr_gamma) scheduler = ReduceLROnPlateau(optimizer,mode='min',patience=4,verbose=True) epochs = Args.epochs last_epoch = False for epoch in range(1, epochs + 1): print(f'Epoch {epoch}:') if epoch == epochs: last_epoch = True train(model, train_loader, device, optimizer, criterion) #scheduler.step() val_loss = test(model, test_loader, device, criterion, losses, accuracies, correct_samples, incorrect_samples, last_epoch, Args.sample_count) scheduler.step(val_loss) ``` ##GradCAM #####Load the image ``` img_path = '/content/water-bird.JPEG' pil_img = PIL.Image.open(img_path) ``` ###Process the Image ``` args={'mean' : cifar10.mean, 'std': cifar10.std, 'height' :cifar10.input_size[1], 'width' : cifar10.input_size[2], 'device' : device } grad_cam = _GradCAM(**args) image = grad_cam.preprocess_image(pil_img) ``` ###GradCam result ``` args = {'model':model, 'layer_name' : ['layer1', 'layer2', 'layer3', 'layer4'], 'input' : image } grad_cam.gradcam(** args) ``` ###Plotting the loss of the model per epoch ``` plot = Plot(cifar10.mean, cifar10.std, device) plot.plot_metric(losses, 'Loss') ``` ###Plotting the accuracy of the model per epoch ``` plot.plot_metric(accuracies, 'Accuracy') ``` ###Correct Prediction ``` plot.plot_images(correct_samples, cifar10.classes, 'Correct_samples') ``` ###Incorrect Prediction ``` plot.plot_images(incorrect_samples, cifar10.classes, 'Incorrect_samples') ``` ###Accuracy of each class of the dataset ``` plot.class_accuracy(model, cifar10.classes, test_loader) ```
github_jupyter
First, let's do word2vec with 'normal' text ``` from gensim.models.word2vec import Word2Vec from gensim.models import word2vec import pandas as pd import re import string from nltk.corpus import stopwords import numpy as np STOP = set(stopwords.words("english")) REMOVE = set(["!","(",")",":",".",";",",",'"',"?","-",">","_"]) df = pd.read_csv('../Goodreads_visualization/goodreads_export.csv') cleaned_df = df[df["My Rating"] != 0] html_clean = re.compile('<.*?>') gr_clean = re.compile('\[.*?\]') all_my_words = [] reviews = cleaned_df["My Review"] num_reviews = 0 num_words = 0 for row in reviews: if pd.isnull(row): continue review = row.lower() if not review: # empty review continue # clean strings cleaned_review = re.sub(html_clean, '', review) cleaned_review = re.sub(gr_clean, '', cleaned_review) new_review = [] for x in cleaned_review.split(' '): if x in STOP: continue if x in REMOVE: continue new_review.append(x) new_review = ' '.join(new_review) all_my_words += new_review.split('.') num_reviews += 1 ``` let's put these sentences into a dumb text file for the helper, just as an example ``` text = open('simple_text.txt','w') for element in all_my_words: text.write('%s.\n'%element) from gensim.models.word2vec import LineSentence sentences = LineSentence(text.name) model = Word2Vec(sentences, size=100, window=4, min_count=5) vocab = list(model.vocab.keys()) vocab[:10] model['physics'] model.most_similar_cosmul(positive=['nazis', 'japan']) model.similarity('book', 'novel') ``` Exciting! Are other pairs more dissimilar? ``` import random similarities = [] for i in range(1,100): one, other = random.choice(vocab), random.choice(vocab) similarities.append(model.similarity(one, other)) np.mean(similarities), np.median(similarities) ``` Welp that's not good. I guess the training corpus is too small... ``` model.syn0 ``` The above are the weights you can use in embeddings ----------------------- Now let's look at DNA! ``` import dna2vec ``` First a look at how dna2vec does it: ``` class Learner: def __init__(self, out_fileroot, context_halfsize, gensim_iters, vec_dim): self.logger = logbook.Logger(self.__class__.__name__) assert(word2vec.FAST_VERSION >= 0) self.logger.info('word2vec.FAST_VERSION (should be >= 0): {}'.format(word2vec.FAST_VERSION)) self.model = None self.out_fileroot = out_fileroot self.context_halfsize = context_halfsize self.gensim_iters = gensim_iters self.use_skipgram = 1 self.vec_dim = vec_dim self.logger.info('Context window half size: {}'.format(self.context_halfsize)) self.logger.info('Use skipgram: {}'.format(self.use_skipgram)) self.logger.info('gensim_iters: {}'.format(self.gensim_iters)) self.logger.info('vec_dim: {}'.format(self.vec_dim)) def train(self, kmer_seq_generator): self.model = word2vec.Word2Vec( sentences=kmer_seq_generator, size=self.vec_dim, window=self.context_halfsize, min_count=5, workers=4, sg=self.use_skipgram, iter=self.gensim_iters) # self.logger.info(model.vocab) def write_vec(self): out_filename = '{}.w2v'.format(self.out_fileroot) self.model.save_word2vec_format(out_filename, binary=False) ``` -------------------------------- The trained table looks like this (some random input data from my projects) 1344 12 AAA 0.798623 0.340167 -0.106002 0.479023 -0.512316 -0.204932 -0.909642 0.929776 -0.526895 -0.487418 0.652579 -0.041673 TTT -0.430355 0.507353 0.204868 -0.396864 0.594459 -0.879607 -0.070906 1.065970 -0.216547 0.540595 0.742848 -0.213119 AAAA 0.916474 0.360498 -0.201165 0.450726 -0.627372 -0.232655 -1.043633 1.079020 -0.585594 -0.505746 0.719241 0.046239 TTTT -0.604012 0.578508 0.240181 -0.476954 0.605583 -0.960840 -0.079009 1.184651 -0.243861 0.608393 0.795853 -0.286772 TTTTT -0.625304 0.602461 0.296689 -0.482694 0.649928 -0.997988 -0.065473 1.091690 -0.250700 0.741902 0.868796 -0.313275 AAAAA 1.029531 0.364190 -0.265436 0.437347 -0.723385 -0.299899 -1.087821 1.122777 -0.636950 -0.578345 0.761875 0.069213 ATT -0.490559 0.496848 -0.300972 -0.190906 0.170407 -0.613530 -0.456763 0.833760 -0.632226 0.541257 0.759477 -0.018878 AAT 0.028495 0.360433 -0.458956 0.125764 -0.013972 -0.417648 -0.925049 0.953009 -0.534955 -0.186829 0.600540 0.346013 Now load this into python, and later Keras ``` from gensim.models import word2vec word_vectors = word2vec.Word2Vec.load_word2vec_format('./dna2vec-20170621-0833-k3to5-12d-4c-6Mbp-sliding-nqR.w2v') word_vectors.syn0 ``` That's the weights for Keras! ``` word_vectors.syn0.shape word_vectors.most_similar(positive=['AAA'], negative=['TTT']) word_vectors.most_similar(positive=['AAA']) word_vectors.similarity('AGAAT','AAGTA') ``` More promising! At least no 99% as above.... ---------------- Let's compare with Needleman Wunsch, an alignment algorithm ``` from Bio import pairwise2 pairwise2.align.globalxx("AGAAT", "AAGTA") 3.0/7 ``` Kind of close? 42 % to 37%.... ``` from datasketch import MinHash import random nws = [] sims = [] sketches = [] counter = 0 outside_counter = 0 keys = list(word_vectors.vocab.keys()) for i in range(1,1000): #print(word_vectors.vocab.keys()) a = random.choice(keys) b = random.choice(keys) if a == b: continue if len(a) != len(b) != 5: continue # get similarity via needleman-wunsch new_score = pairwise2.align.globalxx(a, b)[0] # ('CCAC--T', '-CA-AAT', 3.0, 0, 7) score, length = new_score[2], new_score[-1] score += 1 nws.append(score) # get from dna2vec similarity = word_vectors.similarity(a, b) # 0.3 sims.append(similarity) # get from minhash m1, m2 = MinHash(), MinHash() for d in a: m1.update(d.encode('utf8')) for d in b: m2.update(d.encode('utf8')) jaccard = m1.jaccard(m2) sketches.append(jaccard) import seaborn as sns import pandas as pd df = pd.DataFrame({'Needleman-Wunsch':nws, 'word2vec':sims}) %matplotlib inline sns.violinplot(x='Needleman-Wunsch', y='word2vec', data=df); import scipy print(scipy.stats.pearsonr(nws, sims), scipy.stats.pearsonr(sketches, sims), scipy.stats.pearsonr(nws, sketches)) ``` wat. was expecting something higher. Let's try t-SNE ``` from sklearn.manifold import TSNE import matplotlib.pyplot as plt X = word_vectors[word_vectors.vocab] print(X[0:3,0:3]) tsne = TSNE(n_components=2) X_tsne = tsne.fit_transform(X) plt.scatter(X_tsne[:, 0], X_tsne[:, 1]) plt.show() ``` Let's load the pretrained huge wordvectors from the paper (trained on human genome) ``` word_vectors = word2vec.Word2Vec.load_word2vec_format('./dna2vec/pretrained/dna2vec-20161219-0153-k3to8-100d-10c-29320Mbp-sliding-Xat.w2v') from sklearn.metrics.pairwise import cosine_similarity nws = [] sims = [] sketches = [] cosines = [] keys = list(word_vectors.vocab.keys()) for i in range(1,1000): #print(word_vectors.vocab.keys()) a = random.choice(keys) b = random.choice(keys) if a == b: continue if len(a) + len(b) != 16: continue # get similarity via needleman-wunsch new_score = pairwise2.align.globalxx(a, b)[0] # ('CCAC--T', '-CA-AAT', 3.0, 0, 7) score, length = new_score[2], new_score[-1] new_score = score + 1 nws.append(new_score) # get from cosine similarity cosine = cosine_similarity(word_vectors[a].reshape(1,-1), word_vectors[b].reshape(1,-1))[0][0] cosines.append(cosine) # get from dna2vec similarity = word_vectors.similarity(a, b) # 0.3 sims.append(similarity) # get from minhash m1, m2 = MinHash(), MinHash() for d in a: m1.update(d.encode('utf8')) for d in b: m2.update(d.encode('utf8')) jaccard = m1.jaccard(m2) sketches.append(jaccard) counter += 1 print('''Needleman Wunsch vs word2vec similarities: %s, MinHash vs word2vec similarities: %s, Needleman Wunsch vs MinHash: %s, MinHash vs. Cosine Similarity: %s, Needleman Wunsch vs Cosine Similarity: %s, word2vec vs Cosine Similarity: %s'''%( scipy.stats.pearsonr(nws, sims)[0], scipy.stats.pearsonr(sketches, sims)[0], scipy.stats.pearsonr(nws, sketches)[0], scipy.stats.pearsonr(sketches, cosines)[0], scipy.stats.pearsonr(nws, cosines)[0], scipy.stats.pearsonr(sims, cosines)[0] )) df = pd.DataFrame({'Needleman-Wunsch':nws, 'word2vec':sims}) sns.violinplot(x='Needleman-Wunsch', y='word2vec', data=df); ``` Now check the first NEIGHBOR only ``` counter = 0 nws = [] w2v = [] sketches = [] for a in word_vectors.vocab: if len(a) != 8: continue neighbors = word_vectors.most_similar_cosmul(a) neighbor = '' for n in neighbors: # n = ('CTTTCCT', 0.929522693157196) if len(n[0]) == len(a): neighbor = n break if not neighbor: continue new_score = pairwise2.align.globalxx(a, neighbor[0])[0] # ('CCAC--T', '-CA-AAT', 3.0, 0, 7) score, length = new_score[2], new_score[-1] score += 1 if counter < 5: print('Sequence %s Neighbor %s'%(a, neighbor)) print('Needleman Wunsch: %s'%score) print('-------') nws.append(score) other_score = neighbor[1] w2v.append(other_score) # get from minhash m1, m2 = MinHash(), MinHash() for d in a: m1.update(d.encode('utf8')) for d in neighbor[0]: m2.update(d.encode('utf8')) jaccard = m1.jaccard(m2) sketches.append(float('%.1f'%jaccard)) if counter == 1000: break counter += 1 scipy.stats.spearmanr(nws, w2v) df = pd.DataFrame({'Needleman-Wunsch':nws, 'dna2vec cosine':w2v, 'MinHash':sketches}) sns.violinplot(x='Needleman-Wunsch', y='dna2vec cosine', data=df, scale='count'); g = sns.violinplot(x='MinHash', y='dna2vec cosine', data=df, scale='count'); ``` Niiiiiiiiiice. So for close neighbors MinHash and Needleman-Wunsch seem to correlate, but not for distant neighbors, interesting ``` df.describe() ``` Welp that's weird. By now I think that neighbors in the dna2vec space don't necessarily have to be related by their bases. TO CHECK: - what are the neighbors that have low NW scores? Are they functionally related, or are their sequences conserved? Can I find conservation islands this way? - why is it that close neighbors have better NW/MinHash score correlation than distant neighbors? ``` weights = word_vectors.syn0 from keras.layers import Embedding from keras.engine import Input layer = Embedding(input_dim=weights.shape[0], output_dim=weights.shape[1], weights=[weights], trainable=False) layer ``` IMPORTANT to set trainable to False, else Keras is allowed to change our word2vec input weights! We can either use an LSTM or convolutional layers here. https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html
github_jupyter
``` import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import numpy.random as npr alphas = np.linspace(1,0,1000) single_gaussian_ll_alphas_true = pd.read_csv("single_gaussian_ll_alphas_true.csv").iloc[:,1] single_gaussian_ll_alphas_est = pd.read_csv("single_gaussian_ll_alphas_est.csv").iloc[:,1] overlap_gaussian_ll_alphas_true = pd.read_csv("overlap_gaussian_ll_alphas_true.csv").iloc[:,1] overlap_gaussian_ll_alphas_est = pd.read_csv("overlap_gaussian_ll_alphas_est.csv").iloc[:,1] nonoverlap_gaussian_ll_alphas_true = pd.read_csv("nonoverlap_gaussian_ll_alphas_true.csv").iloc[:,1] nonoverlap_gaussian_ll_alphas_est = pd.read_csv("nonoverlap_gaussian_ll_alphas_est.csv").iloc[:,1] gaussian_lls = pd.DataFrame({"one cluster":single_gaussian_ll_alphas_true, \ # "single_gaussian_ll_alphas_est":single_gaussian_ll_alphas_est, \ "two overlapping clusters":overlap_gaussian_ll_alphas_true, \ # "overlap_gaussian_ll_alphas_est":overlap_gaussian_ll_alphas_est, \ "two non-overlapping clusters":nonoverlap_gaussian_ll_alphas_true, \ # "nonoverlap_gaussian_ll_alphas_est":nonoverlap_gaussian_ll_alphas_est, \ "mixture weight":alphas}) flatten_gaussian_lls = pd.melt(gaussian_lls.iloc[1:-1], ['mixture weight']) flatten_gaussian_lls.columns = ['mixture weight', 'data', 'likelihood'] f = plt.figure() sns.lineplot(x='mixture weight', y='likelihood', hue='data', data=flatten_gaussian_lls) f.savefig("nonid-ll.pdf") single_gaussian_theta_sample_hmc = pd.read_csv("single_gaussian_theta_sample_hmc.csv").iloc[:,1] overlap_gaussian_theta_sample_hmc = pd.read_csv("overlap_gaussian_theta_sample_hmc.csv").iloc[:,1] nonoverlap_gaussian_theta_sample_hmc = pd.read_csv("nonoverlap_gaussian_theta_sample_hmc.csv").iloc[:,1] prior_theta_sample = npr.beta(5,5, size=1000) fig, ax = plt.subplots(1, 1) bins = np.linspace(0, 1, 10) ax.hist([prior_theta_sample, single_gaussian_theta_sample_hmc, overlap_gaussian_theta_sample_hmc, nonoverlap_gaussian_theta_sample_hmc], alpha=0.7, bins = bins, label=['prior', 'posterior -- one cluster', 'posterior -- two overlapping clusters', 'posterior -- two nonoverlapping clusters']) # ax.hist(prior_theta_sample, alpha=0.4, label='prior') # ax.hist(single_gaussian_theta_sample_hmc, alpha=0.4, label='posterior -- one cluster') # ax.hist(overlap_gaussian_theta_sample_hmc, alpha=0.4, label='posterior -- two overlapping clusters') # ax.hist(nonoverlap_gaussian_theta_sample_hmc, alpha=0.4, label='posterior -- two nonoverlapping clusters') ax.legend() fig.savefig("nonid-posterior-hmc.pdf") single_gaussian_theta_sample_vbmf = pd.read_csv("single_gaussian_theta_sample_vbmf.csv").iloc[:,1] overlap_gaussian_theta_sample_vbmf = pd.read_csv("overlap_gaussian_theta_sample_vbmf.csv").iloc[:,1] nonoverlap_gaussian_theta_sample_vbmf = pd.read_csv("nonoverlap_gaussian_theta_sample_vbmf.csv").iloc[:,1] fig, ax = plt.subplots(1, 1) bins = np.linspace(0, 1, 10) ax.hist([prior_theta_sample, single_gaussian_theta_sample_vbmf, overlap_gaussian_theta_sample_vbmf, nonoverlap_gaussian_theta_sample_vbmf], alpha=0.7, bins = bins, label=['prior', 'posterior -- one cluster', 'posterior -- two overlapping clusters', 'posterior -- two nonoverlapping clusters']) # ax.hist(prior_theta_sample, alpha=0.4, label='prior') # ax.hist(single_gaussian_theta_sample_vbmf, alpha=0.4, label='posterior -- one cluster') # ax.hist(overlap_gaussian_theta_sample_vbmf, alpha=0.4, label='posterior -- two overlapping clusters') # ax.hist(nonoverlap_gaussian_theta_sample_vbmf, alpha=0.4, label='posterior -- two nonoverlapping clusters') ax.legend() fig.savefig("nonid-posterior-vbmf.pdf") single_gaussian_theta_sample_vbfr = pd.read_csv("single_gaussian_theta_sample_vbfr.csv").iloc[:,1] overlap_gaussian_theta_sample_vbfr = pd.read_csv("overlap_gaussian_theta_sample_vbfr.csv").iloc[:,1] nonoverlap_gaussian_theta_sample_vbfr = pd.read_csv("nonoverlap_gaussian_theta_sample_vbfr.csv").iloc[:,1] fig, ax = plt.subplots(1, 1) bins = np.linspace(0, 1, 10) ax.hist([prior_theta_sample, single_gaussian_theta_sample_vbfr, overlap_gaussian_theta_sample_vbfr, nonoverlap_gaussian_theta_sample_vbfr], alpha=0.7, bins = bins, label=['prior', 'posterior -- one cluster', 'posterior -- two overlapping clusters', 'posterior -- two nonoverlapping clusters']) # ax.hist(prior_theta_sample, alpha=0.4, label='prior') # ax.hist(single_gaussian_theta_sample_vbfr, alpha=0.4, label='posterior -- one cluster') # ax.hist(overlap_gaussian_theta_sample_vbfr, alpha=0.4, label='posterior -- two overlapping clusters') # ax.hist(nonoverlap_gaussian_theta_sample_vbfr, alpha=0.4, label='posterior -- two nonoverlapping clusters') ax.legend() fig.savefig("nonid-posterior-vbfr.pdf") ```
github_jupyter
``` import pandas as pd import util_functions as uf import altair as alt from vega_datasets import data alt.renderers.enable('notebook') uf.set_env_path() conn, cur = uf.aws_connect() df = pd.read_sql("""SELECT DISTINCT start_anc.start_anc as anc_id, start_anc_trips, end_anc_trips, dless_total_trips, start_anc_trips/dless_total_trips::float as dless_start_perc, end_anc_trips/dless_total_trips::float as dless_end_perc, cabi_trips_start, cabi_trips_end, cabi_total_trips, cabi_trips_start/cabi_total_trips::float as cabi_start_perc, cabi_trips_end/cabi_total_trips::float as cabi_end_perc FROM /* Count of dockless start anc trips*/ (SELECT DISTINCT start_anc, count(*) as start_anc_trips FROM dockless_trips_geo WHERE operatorclean != 'jump' and start_anc is not null group by 1) as start_anc /* Count of dockless end anc trips*/ LEFT JOIN (SELECT DISTINCT end_anc, count(*) as end_anc_trips FROM dockless_trips_geo WHERE operatorclean != 'jump' and end_anc is not null group by 1) as end_anc ON start_anc.start_anc = end_anc.end_anc /* Count of Total Dockless Trips*/ LEFT JOIN (SELECT DISTINCT count(*) as dless_total_trips FROM dockless_trips_geo where operatorclean != 'jump') as tot on start_anc.start_anc = start_anc.start_anc /* Count of Total DC to DC CaBi Trips during dockless pilot*/ LEFT JOIN (SELECT DISTINCT sum(cabi_trips_wdc_to_wdc) as cabi_total_trips FROM final_db where dless_trips_all > 0) as cabi_tot on start_anc.start_anc = start_anc.start_anc /* Count CaBi trips starts*/ LEFT JOIN (select distinct start_anc, count(*) as cabi_trips_start from (select * from cabi_trips where start_date::date >= '09-10-2017' and start_date::date <= '04-30-2018') as cabi_trips /*keep only dc to dc cabi trips*/ inner join (select distinct start_short_name, end_short_name, start_anc, end_anc from cabi_stations_geo_temp where start_anc != '' and end_anc != '') as cabi_geo on cabi_trips.start_station = cabi_geo.start_short_name and cabi_trips.end_station = cabi_geo.end_short_name group by 1) as cabi_starts ON start_anc.start_anc = cabi_starts.start_anc /* Count CaBi trip ends*/ LEFT JOIN (select distinct end_anc, count(*) as cabi_trips_end from (select * from cabi_trips where start_date::date >= '09-10-2017' and start_date::date <= '04-30-2018') as cabi_trips /*keep only dc to dc cabi trips*/ inner join (select distinct start_short_name, end_short_name, start_anc, end_anc from cabi_stations_geo_temp where start_anc != '' and end_anc != '') as cabi_geo on cabi_trips.start_station = cabi_geo.start_short_name and cabi_trips.end_station = cabi_geo.end_short_name group by 1) as cabi_ends ON start_anc.start_anc = cabi_ends.end_anc """, con=conn) print(df.tail()) chart = alt.Chart(json_features, title=title).mark_geoshape( fill='lightgray', stroke='white' ).properties( projection={'type': 'albersUsa'}, width=400, height=400 ).encode( alt.Color(color_column, type='quantitative') ) ```
github_jupyter
#### New to Plotly? Plotly's Python library is free and open source! [Get started](https://plotly.com/python/getting-started/) by downloading the client and [reading the primer](https://plotly.com/python/getting-started/). <br>You can set up Plotly to work in [online](https://plotly.com/python/getting-started/#initialization-for-online-plotting) or [offline](https://plotly.com/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plotly.com/python/getting-started/#start-plotting-online). <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! #### Version Check Note: 3D Mesh are available in version <b>1.10.0+</b><br> Run `pip install plotly --upgrade` to update your Plotly version ``` import plotly plotly.__version__ ``` ### Simple 3D Mesh example ### We are using data present in a separate text file. The file can be downloaded from [Plotly's dataset repo](https://raw.githubusercontent.com/plotly/datasets/master/mesh_dataset.txt). ``` import plotly.plotly as py import plotly.graph_objs as go import numpy as np pts=np.loadtxt('mesh_dataset.txt') x,y,z=zip(*pts) trace = go.Mesh3d(x=x,y=y,z=z,color='#FFB6C1',opacity=0.50) py.iplot([trace]) ``` ### 3D Mesh example with Alphahull Alphahull sets shape of mesh. If the value is -1 then Delaunay triangulation is used. If >0 then the alpha-shape algorithm is used. The default value is -1. ``` import plotly.plotly as py import plotly.graph_objs as go import numpy as np pts=np.loadtxt('mesh_dataset.txt') x,y,z=zip(*pts) trace = go.Mesh3d(x=x,y=y,z=z, alphahull=5, opacity=0.4, color='#00FFFF') py.iplot([trace]) ``` ### Mesh Tetrahedron ``` data = [ go.Mesh3d( x = [0, 1, 2, 0], y = [0, 0, 1, 2], z = [0, 2, 0, 1], colorbar = go.ColorBar( title='z' ), colorscale = [[0, 'rgb(255, 0, 0)'], [0.5, 'rgb(0, 255, 0)'], [1, 'rgb(0, 0, 255)']], intensity = [0, 0.33, 0.66, 1], i = [0, 0, 0, 1], j = [1, 2, 3, 2], k = [2, 3, 1, 3], name = 'y', showscale = True ) ] layout = go.Layout( xaxis=go.XAxis( title='x' ), yaxis=go.YAxis( title='y' ) ) fig = go.Figure(data=data, layout=layout) py.iplot(fig, filename='3d-mesh-tetrahedron-python') ``` ### Mesh Cube ``` data = [ go.Mesh3d( x = [0, 0, 1, 1, 0, 0, 1, 1], y = [0, 1, 1, 0, 0, 1, 1, 0], z = [0, 0, 0, 0, 1, 1, 1, 1], colorbar = go.ColorBar( title='z' ), colorscale = [[0, 'rgb(255, 0, 255)'], [0.5, 'rgb(0, 255, 0)'], [1, 'rgb(0, 0, 255)']], intensity = [0, 0.142857142857143, 0.285714285714286, 0.428571428571429, 0.571428571428571, 0.714285714285714, 0.857142857142857, 1], i = [7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2], j = [3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3], k = [0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6], name='y', showscale=True ) ] layout = go.Layout( xaxis=go.XAxis( title='x' ), yaxis=go.YAxis( title='y' ) ) fig = go.Figure(data=data, layout=layout) py.iplot(fig, filename='3d-mesh-cube-python') ``` ## Reference See https://plotly.com/python/reference/#mesh3d for more information and chart attribute options! ``` from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'mesh-3d.ipynb', 'python/3d-mesh/', 'Python 3D Mesh Plots | Plotly', 'How to make 3D Mesh Plots', title= '3D Mesh Plots in Python | plotly', name = '3D Mesh Plots', has_thumbnail='true', thumbnail='thumbnail/3d-mesh.jpg', language='python', display_as='3d_charts', order=7, ipynb= '~notebook_demo/67') ```
github_jupyter
# Application Classification https://gunrock.github.io/docs/#/hive/hive_application_classification ```bash git clone git@github.com:owensgroup/application_classification.git cub git clone git@gitlab.hiveprogram.com:pnnl/ApplicationClassification.git pnnl git clone git@gitlab.hiveprogram.com:wcude/ApplicationClassification.git wcude # same as pnnl git clone git@gitlab.hiveprogram.com:jcromano/applicationClassification.git spark ``` This notebook is based on `test.py` from `cub`. We have both NumPy and GraphBLAS implementations side-by-side. ## Init ``` import grblas as gb from grblas import * import numpy as np import pandas as pd from scipy.spatial.distance import cdist data_vertex = pd.read_csv('./data/georgiyData.Vertex.csv', skiprows=1, sep=' ', header=None) pattern_vertex = pd.read_csv('./data/georgiyPattern.Vertex.csv', skiprows=1, sep=' ', header=None) data_edges = pd.read_csv('./data/georgiyData.Edges.csv', skiprows=1, sep=' ', header=None) pattern_edges = pd.read_csv('./data/georgiyPattern.Edges.csv', skiprows=1, sep=' ', header=None) assert (data_vertex[0] == data_vertex.index).all() assert (pattern_vertex[0] == pattern_vertex.index).all() data_vertex = data_vertex.values[:,1:] data_edges_table = data_edges[list(range(2, data_edges.shape[1]))].values data_edges = data_edges[[0, 1]].values pattern_vertex = pattern_vertex.values[:,1:] pattern_edges_table = pattern_edges[list(range(2, pattern_edges.shape[1]))].values pattern_edges = pattern_edges[[0, 1]].values num_dv = data_vertex.shape[0] num_pv = pattern_vertex.shape[0] num_de = data_edges.shape[0] num_pe = pattern_edges.shape[0] edge_dim = pattern_edges.shape[1] data_edges = data_edges[np.lexsort(np.rot90(data_edges))] # XXX: should we sort edges lexicographically? def isclose(gb_x, np_y): if isinstance(gb_x, Vector): return gb_x.isclose(Vector.ss.import_full(np_y)) elif isinstance(gb_x, Matrix): return gb_x.isclose(Matrix.ss.import_fullr(np_y)) else: # Scalar return gb_x.isclose(np_y) data_vertex_gb = Matrix.ss.import_fullr(data_vertex) data_edges_table_gb = Matrix.ss.import_fullr(data_edges_table) data_edges_gb = Matrix.ss.import_fullr(data_edges) pattern_vertex_gb = Matrix.ss.import_fullr(pattern_vertex) pattern_edges_table_gb = Matrix.ss.import_fullr(pattern_edges_table) pattern_edges_gb = Matrix.ss.import_fullr(pattern_edges) data_gb = Matrix.from_values(data_edges[:, 0], data_edges[:, 1], 1) data_gb pattern_gb = Matrix.from_values(pattern_edges[:, 0], pattern_edges[:, 1], 1) pattern_gb def normprob(x): x = (x - x.max(axis=0, keepdims=True)).copy() return np.log(np.exp(x) / np.exp(x).sum(axis=0, keepdims=True)) def l2_norm(x): return np.sqrt((x ** 2).sum()) def normprob_gb(x): x = op.any_minus(x @ ss.diag(x.reduce_columnwise(op.max))) return op.any_minus(x @ ss.diag(x.reduce_columnwise(agg.logaddexp))).new() ``` ## Vertex similarity ``` assert isclose(normprob_gb(data_vertex_gb), normprob(data_vertex)) assert isclose(data_vertex_gb.reduce_scalar(agg.L2norm).new(), l2_norm(data_vertex)) ``` ```C Init_CV_MU(Data_Graph, Pattern_Graph, WA.CV, WA.MU); // NodePairwiseNorm ``` ``` cv = cdist(data_vertex, pattern_vertex) cv def cdist_gb(X, Y): # This is not the most numerically stable algorithm! # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.euclidean_distances.html#sklearn.metrics.pairwise.euclidean_distances XX = X.reduce_rows(agg.sum_of_squares) YY = Y.reduce_rows(agg.sum_of_squares) tempY = -2 * Y # Y is smallest in this notebook, so apply `-2` here rv = X @ tempY.T rv = op.any_plus(ss.diag(XX) @ rv) rv = op.any_plus(rv @ ss.diag(YY)) return op.sqrt(rv).new() cv_gb = cdist_gb(data_vertex_gb, pattern_vertex_gb) assert isclose(cv_gb, cv) ``` ```C NormProb(DV, PV, WA.CV); // ColumnSoftmax NormProb(DV, PV, WA.MU); // ColumnSoftmax ``` ``` mu = normprob(-cv) cv = normprob(cv) mu_gb = normprob_gb(-cv_gb) cv_gb = normprob_gb(cv_gb) assert isclose(mu_gb, mu) assert isclose(cv_gb, cv) ``` Maybe? ```C Init_VR_VF(Data_Graph, Pattern_Graph, WA.MU, WA.VR, WA.VF); // RepeatColumnsByPatternEdges ``` Maybe? ```C VFmax_VRmax(Data_Graph, Pattern_Graph, WA.VF, WA.VR, WA.VFmax, WA.VRmax); // ColumnMax ``` ``` mu_max = mu.max(axis=0) mu_max_gb = mu_gb.reduce_columnwise(op.max).new() assert isclose(mu_max_gb, mu_max) v_fwd_max = np.zeros(num_pe) v_bak_max = np.zeros(num_pe) for i, (src, dst) in enumerate(pattern_edges): v_bak_max[i] = mu_max[src] v_fwd_max[i] = mu_max[dst] v_bak_max_graph_gb = op.any_first(ss.diag(mu_max_gb) @ pattern_gb).new() assert v_bak_max_graph_gb.isclose(Matrix.from_values(pattern_edges[:, 0], pattern_edges[:, 1], v_bak_max)) v_bak_max_graph_gb.wait() _, _, v_bak_max_gb = v_bak_max_graph_gb.to_values() v_bak_max_gb = Vector.ss.import_full(v_bak_max_gb) assert isclose(v_bak_max_gb, v_bak_max) v_fwd_max_graph_gb = op.any_second(pattern_gb @ ss.diag(mu_max_gb)).new() v_fwd_max_graph_gb.isclose(Matrix.from_values(pattern_edges[:, 0], pattern_edges[:, 1], v_fwd_max)) v_fwd_max_graph_gb.wait() _, _, v_fwd_max_gb = v_fwd_max_graph_gb.to_values() v_fwd_max_gb = Vector.ss.import_full(v_fwd_max_gb) assert isclose(v_fwd_max_gb, v_fwd_max) ``` ## Edge similarity ```C Init_CE_RE_FE(Data_Graph, Pattern_Graph, WA.CE, WA.RE, WA.FE); // EdgePairwiseNorm NormProb(DE, PE, WA.CE); // ColumnSoftmax NormProb(DE, PE, WA.RE); // ColumnSoftmax NormProb(DE, PE, WA.FE); // ColumnSoftmax ``` ``` ce = cdist(data_edges_table, pattern_edges_table) xe = normprob(-ce) ce = normprob(ce) ce_gb = cdist_gb(data_edges_table_gb, pattern_edges_table_gb) xe_gb = normprob_gb(-ce_gb) ce_gb = normprob_gb(ce_gb) assert isclose(ce_gb, ce) assert isclose(xe_gb, xe) ``` ## Combine ```C Init_Cnull(Data_Graph, Pattern_Graph, WA.CE, WA.Cnull); // ??? NormProb(1, PE, WA.Cnull); // ColumnSoftmax ``` ``` # >> # cnull = np.sqrt((pattern_edges_table ** 2).sum(axis=-1)) # cnull = np.maximum(cnull, ce.max(axis=0)) # cnull = normprob(cnull) # -- cnull = np.zeros(num_pe) # bug in code? # << cnull_gb = Vector.new(float, size=num_pe) cnull_gb << 0 ``` ```C FMax(Data_Graph, Pattern_Graph, WA.Cnull, WA.VRmax, WA.FE, WA.FMax); // EdgeMaxReduce RMax(Data_Graph, Pattern_Graph, WA.Cnull, WA.VFmax, WA.RE, WA.RMax); // EdgeMaxReduce ``` ``` fwd_max = np.zeros((num_dv, num_pe)) bak_max = np.zeros((num_dv, num_pe)) # possible alternative to avoid if-else below # fwd_max = np.repeat(v_bak_max[np.newaxis, :], num_pe, axis=0) # bak_max = np.repeat(v_fwd_max[np.newaxis, :], num_pe, axis=0) fwd_touched = set([]) bak_touched = set([]) for edge_idx, (src, dst) in enumerate(data_edges): if dst not in fwd_touched: fwd_max[dst] = np.maximum(v_bak_max, xe[edge_idx]) #fwd_max[dst] = np.minimum(v_bak_max, xe[edge_idx]) # XXX fwd_touched.add(dst) else: fwd_max[dst] = np.maximum(fwd_max[dst], xe[edge_idx]) #fwd_max[dst] = np.minimum(fwd_max[dst], xe[edge_idx]) # XXX if src not in bak_touched: bak_max[src] = np.maximum(v_fwd_max, xe[edge_idx]) #bak_max[src] = np.minimum(v_fwd_max, xe[edge_idx]) # XXX bak_touched.add(src) else: bak_max[src] = np.maximum(bak_max[src], xe[edge_idx]) #bak_max[src] = np.minimum(bak_max[src], xe[edge_idx]) # XXX dr, dc, _ = data_gb.to_values() data_fwd_graph = Matrix.from_values(dc, np.arange(data_gb.nvals), 1) fwd_max_gb = op.max_second(data_fwd_graph @ xe_gb).new() fwd_max_gb = op.any_max(fwd_max_gb @ ss.diag(v_bak_max_gb)).new() assert isclose(fwd_max_gb, fwd_max) data_bak_graph = Matrix.from_values(dr, np.arange(data_gb.nvals), 1) bak_max_gb = op.max_second(data_bak_graph @ xe_gb).new() bak_max_gb = op.any_max(bak_max_gb @ ss.diag(v_fwd_max_gb)).new() assert isclose(bak_max_gb, bak_max) if False: # check when running the above with min to see if we iterate over patterns correctly fwd_max_gb = op.min_second(Matrix.from_values(c, np.arange(data_gb.nvals), 1) @ xe_gb).new() fwd_max_gb = op.any_min(fwd_max_gb @ ss.diag(v_bak_max_gb)).new() bak_max_gb = op.min_second(Matrix.from_values(r, np.arange(data_gb.nvals), 1) @ xe_gb).new() bak_max_gb = op.any_min(bak_max_gb @ ss.diag(v_fwd_max_gb)).new() assert isclose(fwd_max_gb, fwd_max) assert isclose(fwd_max_gb, fwd_max) ``` ### Loop ```C VF_VR(Data_Graph, Pattern_Graph, WA.MU, WA.FMax, WA.RMax, WA.VF, WA.VR); // RepeatColumnsByPatternEdgesSubtract VFmax_VRmax(Data_Graph, Pattern_Graph, WA.VF, WA.VR, WA.VFmax, WA.VRmax); // ColumnMax FE_RE(Data_Graph, Pattern_Graph, WA.CE, WA.VF, WA.VR, WA.FE, WA.RE); // RepeatColumnsByDataEdges NormProb(DE, PE, WA.FE); // ColumnSoftmax NormProb(DE, PE, WA.RE); // ColumnSoftmax FMax(Data_Graph, Pattern_Graph, WA.Cnull, WA.VRmax, WA.FE, WA.FMax); // EdgeMaxReduce RMax(Data_Graph, Pattern_Graph, WA.Cnull, WA.VFmax, WA.RE, WA.RMax); // EdgeMaxReduce MU(Data_Graph, Pattern_Graph, WA.CV, WA.FMax, WA.RMax, WA.MU); // ComputeMU NormProb(DV, PV, WA.MU); // ColumnSoftmax ``` ``` v_fwd = np.zeros((num_dv, num_pe)) v_bak = np.zeros((num_dv, num_pe)) pattern_gb.wait() pr, pc, _ = pattern_gb.to_values() # for _ in range(num_pv): for p_edge_idx, (src, dst) in enumerate(pattern_edges): v_fwd[:,p_edge_idx] = mu[:,dst] - fwd_max[:,p_edge_idx] v_bak[:,p_edge_idx] = mu[:,src] - bak_max[:,p_edge_idx] pattern_fwd_graph = Matrix.from_values(pc, np.arange(pattern_gb.nvals), 1) v_fwd_gb = op.any_first(mu_gb @ pattern_fwd_graph).new() v_fwd_gb = op.minus(v_fwd_gb & fwd_max_gb).new() assert isclose(v_fwd_gb, v_fwd) pattern_bak_graph = Matrix.from_values(pr, np.arange(pattern_gb.nvals), 1) v_bak_gb = op.any_first(mu_gb @ pattern_bak_graph).new() v_bak_gb = op.minus(v_bak_gb & bak_max_gb).new() assert isclose(v_bak_gb, v_bak) ``` maybe? ```C VFmax_VRmax(Data_Graph, Pattern_Graph, WA.VF, WA.VR, WA.VFmax, WA.VRmax); // ColumnMax ``` ``` v_fwd_max = v_fwd.max(axis=0) v_bak_max = v_bak.max(axis=0) v_fwd_max_gb = v_fwd_gb.reduce_columnwise(op.max).new() assert isclose(v_fwd_max_gb, v_fwd_max) v_bak_max_gb = v_bak_gb.reduce_columnwise(op.max).new() assert isclose(v_bak_max_gb, v_bak_max) e_bak = v_fwd[data_edges[:,0]] - ce e_fwd = v_bak[data_edges[:,0]] - ce e_bak_gb = op.any_second(data_bak_graph.T @ v_fwd_gb).new() e_bak_gb = op.minus(e_bak_gb & ce_gb).new() assert isclose(e_bak_gb, e_bak) e_fwd_gb = op.any_second(data_bak_graph.T @ v_bak_gb).new() e_fwd_gb = op.minus(e_fwd_gb & ce_gb).new() assert isclose(e_fwd_gb, e_fwd) e_bak_norm = np.log(np.exp(e_bak).sum(axis=0, keepdims=True)) e_fwd_norm = np.log(np.exp(e_fwd).sum(axis=0, keepdims=True)) # Why doesn't this use normprob_gb? e_bak_norm_gb = e_bak_gb.reduce_columnwise(op.numpy.logaddexp).new() assert isclose(e_bak_norm_gb, e_bak_norm[0, :]) e_fwd_norm_gb = e_fwd_gb.reduce_columnwise(op.numpy.logaddexp).new() assert isclose(e_fwd_norm_gb, e_fwd_norm[0, :]) fwd_max = np.zeros((num_dv, num_pe)) - np.inf # num_dv x num_pe bak_max = np.zeros((num_dv, num_pe)) - np.inf # num_dv x num_pe sel = np.argsort(data_edges[:,0], kind='mergesort') # XXX: why? for d_edge_idx, (src, dst) in enumerate(data_edges[sel]): bak_max[src] = np.maximum(bak_max[src], e_bak[d_edge_idx]) for d_edge_idx, (src, dst) in enumerate(data_edges[sel]): fwd_max[dst] = np.maximum(fwd_max[dst], e_fwd[d_edge_idx]) fwd_max_gb = op.max_second(data_fwd_graph @ e_fwd_gb).new() assert isclose(fwd_max_gb, fwd_max) bak_max_gb = op.max_second(data_bak_graph @ e_bak_gb).new() assert isclose(bak_max_gb, bak_max) fwd_max -= e_fwd_norm bak_max -= e_bak_norm fwd_max_gb = op.any_minus(fwd_max_gb @ ss.diag(e_fwd_norm_gb)).new() assert isclose(fwd_max_gb, fwd_max) bak_max_gb = op.any_minus(bak_max_gb @ ss.diag(e_bak_norm_gb)).new() assert isclose(bak_max_gb, bak_max) fwd_max = np.maximum(fwd_max, (v_bak_max - cnull).reshape(1, -1)) bak_max = np.maximum(bak_max, (v_fwd_max - cnull).reshape(1, -1)) fwd_max_gb = op.any_max(fwd_max_gb @ ss.diag(op.minus(v_bak_max_gb & cnull_gb))).new() assert isclose(fwd_max_gb, fwd_max) bak_max_gb = op.any_max(bak_max_gb @ ss.diag(op.minus(v_fwd_max_gb & cnull_gb))).new() assert isclose(bak_max_gb, bak_max) ``` ```C MU(Data_Graph, Pattern_Graph, WA.CV, WA.FMax, WA.RMax, WA.MU); // ComputeMU NormProb(DV, PV, WA.MU); // ColumnSoftmax ``` ``` mu = -cv for p_edge_idx, (src, dst) in enumerate(pattern_edges): mu[:,dst] += fwd_max[:,p_edge_idx] mu[:,src] += bak_max[:,p_edge_idx] mu = normprob(mu) mu_gb = op.ainv(cv_gb).new() mu_gb(op.plus) << op.plus_plus(fwd_max_gb @ pattern_fwd_graph.T) mu_gb(op.plus) << op.plus_plus(bak_max_gb @ pattern_bak_graph.T) mu_gb = normprob_gb(mu_gb) assert isclose(mu_gb, mu) ```
github_jupyter
# Generating DeepLoc Train/Valid/Test Data In this notebook, we parse through the original DeepLoc dataset to generate the train/test splits as well as a validation set from the test set. You can specify the max sequence length for your output data, which in our case we used 6000 due to memory issues on EC2 when training on a GPU. Paper: https://academic.oup.com/bioinformatics/article/33/21/3387/3931857 Dataset: http://www.cbs.dtu.dk/services/DeepLoc-1.0/deeploc_data.fasta Additionally, we remove the Cytoplasm/Nucleus class as done in the original DeepLoc paper to mimic their data cleaning process. ``` import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from Bio import SeqIO from Bio.Alphabet import IUPAC from Bio.SeqRecord import SeqRecord from Bio.Seq import Seq import json np.random.seed(42) ``` First, we will download the deeploc dataset to the /data folder within the TAPE project ``` !curl -o ../data/deeploc_data.fasta http://www.cbs.dtu.dk/services/DeepLoc-1.0/deeploc_data.fasta ``` Then, we will define a max sequence length and iterate through the DeepLoc dataset to output a filtered fasta file. Next, we generate train/test DataFrames in order to write out the train/test/valid split fasta files. Some sequences in the DeepLoc dataset have a Cytoplasm-Nucleus label for the subcellular location. Per the DeepLoc dataset, we are excluding these labels since they do not clearly fall into the Cytoplasm or Nucleus classes. ``` # Lists to store train and test sets. The original dataset indicates whether # each sequence belongs in the train or test set in the record description train_list = [] test_list = [] # Create a mapping from protein sequence ID to the q10 subcellular location labels # and the q2 membrane bound vs water soluble labels seq_id_to_q10 = {} seq_id_to_q2 = {} # Define max sequence length to filter data. Larger sequences take more memory to encode. MAX_SEQ_LENGTH = 6000 with open(f"../data/deeploc_data_{MAX_SEQ_LENGTH}.fasta", 'w') as handle: # Iterate through the original dataset to parse for ID, sequence, class label, and train/test flag for record in SeqIO.parse("../data/deeploc_data.fasta", "fasta"): description = record.description seq = record.seq desc_split = description.split(" ") ID = desc_split[0] label = desc_split[1] q2_label = label[-1] # q2 membrane bound vs water soluble label q10_label = label[:len(label)-2] # q10 subcellular location label # Ignore ambiguous cytoplasm-nucleus labels and sequences that are too long to embed if q10_label == "Cytoplasm-Nucleus" or len(seq) > MAX_SEQ_LENGTH: continue seq_id_to_q2[ID] = q2_label seq_id_to_q10[ID] = q10_label # Sequences in test set has an additional field in the description if len(desc_split) == 3: test_list.append((ID, q10_label, seq)) else: train_list.append((ID, q10_label, seq)) SeqIO.write(record, handle, "fasta") # Create pd DataFrames for the train and test sequences train_df = pd.DataFrame(train_list) test_df = pd.DataFrame(test_list) # Generate json label files for visualizating the embeddings with PCA with open('../data/deeploc_labels_q2.json', 'w') as handle: handle.write(json.dumps(seq_id_to_q2)) with open('../data/deeploc_labels_q10.json', 'w') as handle: handle.write(json.dumps(seq_id_to_q10)) print(train_df.shape) print(test_df.shape) ``` Inspect the DataFrame * Column 0 is a unique identifier for each sequence (sequence ID) * Column 1 shows the labels for the q10 and q2 tasks, separated by a dash - * Column 2 is a tuple of the amino acids in each sequence ``` train_df.head() ``` Looking at the distribution of the output classes for subcellular location (q10), you can see that the dataset is imbalanced. Almost 50% of sequences are either in the Nucleus or Cytoplasm classes. ``` train_df[1].value_counts() test_df[1].value_counts() ``` Create map from subcellular location label to a numerical ID to use for stratifying the train set into train and validation, with a 90/10 train/valid split ``` id_map = {} for i, l in enumerate(train_df[1].unique()): id_map[l] = i train_labels = [] for label in train_df[1]: train_labels.append(id_map[label]) id_map train, validation = train_test_split(train_df, test_size=0.1, stratify=train_labels) print(train.shape) print(validation.shape) ``` Write out FASTA files containing the sequences and subcellular location labels for the newly separated train, valid, and test splits ``` with open(f"../data/deeploc_train_{MAX_SEQ_LENGTH}.fasta", 'w') as output_train_handle: for index, row in train.iterrows(): ID = row[0] label = row[1] seq = row[2] rec = SeqRecord(seq, id=ID, description=str(id_map[label])) SeqIO.write(rec, output_train_handle, "fasta") with open(f"../data/deeploc_valid_{MAX_SEQ_LENGTH}.fasta", 'w') as output_valid_handle: for index, row in validation.iterrows(): ID = row[0] label = row[1] seq = row[2] rec = SeqRecord(seq, id=ID, description=str(id_map[label])) SeqIO.write(rec, output_valid_handle, "fasta") with open(f"../data/deeploc_test_{MAX_SEQ_LENGTH}.fasta", 'w') as output_test_handle: for index, row in test_df.iterrows(): ID = row[0] label = row[1] seq = row[2] rec = SeqRecord(seq, id=ID, description=str(id_map[label])) SeqIO.write(rec, output_test_handle, "fasta") ```
github_jupyter
``` %load_ext autoreload %autoreload 2 %pylab inline %gui qt from spiketag.base import * from spiketag.view import * prb = probe(shank_no=1) prb[0] = np.array([0,1,2]) prb.mapping[0] = np.array([-10,0]) prb.mapping[1] = np.array([10,0]) prb.mapping[2] = np.array([0,10]) prb.fs = 20000. prb.n_ch = 3 prb.reorder_by_chip=False prb.show() ``` ## MUA ``` mua = MUA(mua_filename='./cell_0109.bin', spk_filename='./cell_0109.spk.bin', probe=prb, binary_radix=11, cutoff=[-100,100], time_segs=np.array([[0, 40], [100., 200.]]) ) mua.pivotal_pos mua.pivotal_pos[0] spks = np.fromfile('./cell_0109.spk.bin', dtype=np.int32) spks[::2] np.allclose(mua.pivotal_pos[0], spks[::2]) ``` ## Unit test for individual views ``` wview = wave_view(mua.data, spks=spks) wview.show() spk = mua.tospk() mua.spk_times[0]/mua.fs mua.t time_segs = np.array([[0, 40], [100., 200.]]) mua.spk_times[0][np.logical_and(mua.spk_times[0]/mua.fs<time_segs[1][1], mua.spk_times[0]/mua.fs>time_segs[1][0])].shape mua.spk_times[0].shape def find_spk_in_time_seg(spk_times, time_segs): spk_times_in_range = [] for time_seg in time_segs: spk_in_time_seg = spk_times[logical_and(spk_times<time_seg[1], spk_times>time_seg[0])] spk_times_in_range.append(spk_in_time_seg) return np.hstack(np.array(spk_times_in_range)) %%timeit spk_times = find_spk_in_time_seg(mua.spk_times[0]/mua.fs, time_segs) spk.spk[0].shape s = np.delete(spk.spk[0], [0,1,2], axis=0) s.shape fet = spk.tofet() clu = fet.toclu() spkview = spike_view() spkview.set_data(spk.spk[0], clu[0]) spkview.show() fetview = scatter_3d_view() fetview.set_data(fet.fet[0], clu[0]) fetview.show() ampview = amplitude_view(fs=prb.fs, scale=1) ampview.set_data(spk.spk[0], clu[0], mua.pivotal_pos[0]) ampview.show() clu[0].membership.shape treeview = ctree_view() treeview.set_data(clu[0]) treeview.show() clu[0]._extra_info['condensed_tree'].dtype clu[0]._extra_info['condensed_tree'] treeview._whole_tree.dtype treeview._whole_tree treeview._vertices treeview._faces treeview._face_colors from vispy import scene canvas = scene.SceneCanvas(keys='interactive') view = canvas.central_widget.add_view() mesh = scene.visuals.Mesh() mesh.mode = 'triangles' mesh.set_data(vertices=treeview._vertices, faces=treeview._faces.astype(np.int64), face_colors=treeview._face_colors) view.camera = 'panzoom' view.add(mesh) canvas.show() treeview._mesh treeview.show() treeview.show() corview = correlogram_view(fs=prb.fs) corview.set_data(clu[0], mua.pivotal_pos[0]) corview.show() traceview = trace_view(fs=prb.fs, spklen=19) traceview.set_data(mua.data, clu[0], mua.pivotal_pos[0]) traceview.show() ``` ## Test clu function ``` _clu = clu[0] _clu.merge([1,2]) _clu.move({2:[0,1,2,3,4,5,6,7,8,9,10]}, 0) ``` ## Unit test for QT5 Wrapper ``` from spiketag.mvc import MainModel, MainView model = MainModel('./cell_0109.bin', './cell_0109.spk.bin', prb, binary_radix=11) model.sort() view = MainView(prb) view.show() view.set_data(0, model.mua, model.spk[0], model.fet[0], model.clu[0]) view.spkview.cluster_mouse_on model.clu[0].index_count model.clu[0].npts model.clu[0].nclu model.clu[0]._method model.clu[0]._extra_info _labels = model.clu[0]._probmatrix.argmax(axis=1) idx = np.where(model.clu[0]._probmatrix.max(axis=1) > 0.02)[0] _labels = np.zeros((model.clu[0].npts,)) _labels[idx] = model.clu[0]._probmatrix.argmax(axis=1)[idx] + 1 model.clu[0].fill(_labels) model.clu[0].selectlist.shape ``` ## dpgmm ``` from sklearn.mixture import BayesianGaussianMixture as DPGMM dpgmm = DPGMM( n_components=15, covariance_type='full', weight_concentration_prior=1e-3, weight_concentration_prior_type='dirichlet_process', init_params="kmeans", max_iter=500, random_state=None, tol=0.05, verbose=True) # init can be "kmeans" or "random" dpgmm.fit(model.fet[0]) labels = dpgmm.predict(model.fet[0]) model.clu[0].fill(labels) model.clu[0].selectlist.shape ``` ## hdbscan ``` import hdbscan hdbcluster = hdbscan.HDBSCAN(min_samples=10, min_cluster_size=25, leaf_size=40, # alpha=10.5, gen_min_span_tree=False, algorithm='boruvka_kdtree', core_dist_n_jobs=1, prediction_data=True, cluster_selection_method='leaf') #leaf clusterer = hdbcluster.fit(model.fet[0].astype(np.float64)) probmatrix = hdbscan.all_points_membership_vectors(clusterer) labels = clusterer.labels_ view.show() labels model.clu[0].fill(clusterer.labels_+1) idx = np.where(probmatrix.max(axis=1) > 0.05)[0] _labels = np.zeros((model.clu[0].npts,)) _labels[idx] = probmatrix.argmax(axis=1)[idx] + 1 _labels model.clu[0].fill(_labels) model.clu[0].selectlist.shape ctree = model.clu[0]._extra_info['condensed_tree'] ctree.dtype ctree['parent'] targeted = model.clu[0][2] targeted.shape np.unique(ctree[targeted]['parent']) model.clu[0][9312] ctree[ctree['child']==9316] view.treeview._select_clusters view.treeview._clu_tree view.treeview._select_clusters ctree['child'][targeted] ctree[ctree['child_size'] > 1] ctree['lambda_val'][targeted] ctree['child'] thr = -31.00 idx = np.where(model.spk[0].min(axis=1).min(axis=1)>thr)[0] idx.shape def delete_spk(model, spk_idx): model.mua.spk_times[0] = np.delete(model.mua.spk_times[0], spk_idx, axis=0) model.spk[0] = np.delete(model.spk[0], spk_idx, axis=0) model.fet[0] = model.spk._tofet(0, method='pca') model.cluster(group_id=0, method='hdbscan', fall_off_size=30) #, view.set_data(0, model.mua, model.spk[0], model.fet[0], model.clu[0]) delete_spk(model, idx) @view.ampview.clip.connect def on_clip(thres): idx = np.where(model.spk[0].min(axis=1).min(axis=1)>thres)[0] print(idx.shape) delete_spk(model=model, spk_idx=idx) model.spk[0].shape model.fet = model.spk.tofet() model.fet[0].shape model.clu = model.fet.toclu() model.mua.spk_times[0].shape view.ampview.poses[:, 1].min() from spiketag.mvc.Control import controller ctrl = controller(probe=prb, mua_filename='./cell_0109.bin', spk_filename='./cell_0109.spk.bin') ctrl.sort() ctrl.show() model.mua.spk_times[0].shape model.spk[0].shape model.fet[0].shape import sys from PyQt5.QtGui import * from PyQt5.QtCore import * from PyQt5.QtWidgets import * class sorter(QWidget): def __init__(self): super(sorter, self).__init__() self.initUI() def initUI(self): hbox = QHBoxLayout(self) self.splitter1 = QSplitter(Qt.Horizontal) # textedit = QTextEdit() # self.splitter1.addWidget(self.topleft) # self.splitter1.addWidget(textedit) # self.splitter1.setSizes([100,200]) self.splitter2 = QSplitter(Qt.Horizontal) self.splitter_fet = QSplitter(Qt.Vertical) self.splitter3 = QSplitter(Qt.Vertical) self.splitter3.addWidget(self.splitter1) self.splitter3.addWidget(self.splitter2) # self.splitter2.addWidget(self.bottom) hbox.addWidget(self.splitter3) self.setLayout(hbox) QApplication.setStyle(QStyleFactory.create('Cleanlooks')) self.setGeometry(300, 300, 300, 200) self.setWindowTitle('spiketag') # self.show() def set_data(self, mua, spk, fet, clu): ### init view and set_data self.spkview = spike_view() self.spkview.set_data(spk, clu) self.fetview0 = scatter_3d_view() self.fetview0.set_data(fet[:,[0,1,2]].copy(), clu) self.fetview1 = scatter_3d_view() self.fetview1.set_data(fet[:,[1,3,4]].copy(), clu) self.ampview = amplitude_view(fs=fs, scale=1) self.ampview.set_data(spk, clu, mua.pivotal_pos[0]) self.treeview = ctree_view() self.treeview.set_data(clu) self.corview = correlogram_view(fs=fs) self.corview.set_data(clu, mua.pivotal_pos[0]) self.traceview = trace_view(fs=mua.fs, spklen=spk.shape[1]) self.traceview.set_data(mua.data, clu, mua.pivotal_pos[0]) # self.traceview.locate_buffer = 2000 ### put views into splitter self.splitter1.addWidget(self.traceview.native) self.splitter1.addWidget(self.splitter_fet) self.splitter_fet.addWidget(self.fetview0.native) self.splitter_fet.addWidget(self.fetview1.native) self.splitter1.addWidget(self.spkview.native) self.splitter2.addWidget(self.corview.native) self.splitter2.addWidget(self.treeview.native) self.splitter2.addWidget(self.ampview.native) %gui qt ex = sorter() spk = mua.tospk() fet = spk.tofet(method='pca', ncomp=9, whiten=False) clu = fet.toclu(method='hdbscan') ex.set_data(mua, spk.spk[1], fet.fet[1], clu[1]) ex.show() ex.traceview.locate_buffer = 300 ex.fetview0.set_data(fet.fet[1][:,[1,3,4]], clu[1]) ex.update() gclu = clu[1] gclu.selectlist gclu._registered_func_name('select') ```
github_jupyter
# Modeling spiking neural networks with Brian ## Homework Some general setup first... ``` # If Brian2 is not yet installed, the following will install it # (otherwise it will print a number of "Requirement already satisfied" lines) %pip install brian2 from brian2 import * prefs.codegen.target = 'numpy' import matplotlib.pyplot as plt import numpy as np %matplotlib inline ``` A plotting function that we will use later ``` def plot_activity(exc_spike_mon, exc_pop_mon, inh_spike_mon): """Plots raster plots, population activity and a histogram of the coefficient of variation (CV) of the interspike intervals (ISI) – see Lecture 5 for more details. exc_spike_mon : a SpikeMonitor recording the excitatory population exc_pop_mon : a PopulationRateMonitor recording the excitatory population inh_spike_mon : a SpikeMonitor recording the inhibitory population """ fig, axs = plt.subplots(3, 2, sharex='col', gridspec_kw={'height_ratios': [3, 2, 1], 'width_ratios': [3, 1]}, figsize=(12, 8)) axs[0, 0].plot(exc_spike_mon.t/ms, exc_spike_mon.i, '|') axs[0, 0].set(ylabel='neuron index', title='excitatory neurons') # Get the individual spike trains for each neuron spike_trains = exc_spike_mon.spike_trains() # Calculate the CV of the ISI distribution for each neuron CVs = [] for train in spike_trains.values(): # We ignore spikes in the first 50ms to focus on the steady-state behaviour spikes = train[train>50*ms] if len(spikes) < 2: CVs.append(np.nan) else: ISIs = np.diff(spikes/ms) CVs.append(np.std(ISIs)/np.mean(ISIs)) axs[0, 1].hist(CVs, bins=20, range=(0., 1.)) axs[0, 1].xaxis.set_tick_params(labelbottom=True) axs[0, 1].set(xlabel='CV of ISI', yticks=[]) axs[1, 0].plot(exc_pop_mon.t/ms, exc_pop_mon.smooth_rate(width=2*ms)) axs[1, 0].hlines(np.mean(exc_pop_mon.rate/Hz), exc_pop_mon.t[0]/ms, exc_pop_mon.t[-1]/ms, color='darkgray') axs[1, 0].set_ylabel('pop. firing rate (Hz)') axs[1, 0].set_ylim(0, axs[1, 0].get_ylim()[1]) axs[2, 0].plot(inh_spike_mon.t/ms, inh_spike_mon.i, '|', color='C1') axs[2, 0].set(ylabel='neuron index', title='inhibitory neurons') axs[2, 0].set_xlabel('time (ms)') axs[1, 1].axis('off') axs[2, 1].axis('off') fig.tight_layout() ``` ## Network The following network is almost identical to the one we used in the tutorial. There are only three differences: * The initial value of the membrane potential and the external input current are random and not fixed, they are given in Brian syntax referring to a uniformly distributed random number between 0 and 1 as `rand()`. This means that the membrane potential $V$ is initialized randomly between $E_L$ and the threshold, and the external current is randomly chosen between 20mV and 30mV. * The neuron model has an absolute refractory period of 5ms (a period after each spike where the membrane potential is clamped to its value so the neuron cannot spike again) * We have split up the excitatory and inhibitory synapses a bit more to chose their strengths independently: there are now separate excitatory → excitatory, excitatory → inhibitory, and inhibitory → excitatory synapses (inhibitory → inhibitory synapses have been removed). ``` start_scope() N_E = 4000 # excitatory neurons N_I = 1000 # inhibitory neurons E_L = -50*mV V_threshold = -30*mV V_reset = -55*mV C = 70*pF g_L = 10*nS tau = C/g_L tau_syn = 2*ms w_exc_to_exc = 0*mV w_exc_to_inh = 0*mV w_inh_to_exc = 0*mV duration = 1000*ms neurons = NeuronGroup(N_E + N_I, '''dV/dt = (E_L - V + I_syn + I_ext)/tau : volt (unless refractory) I_ext : volt (constant) dI_syn/dt = -I_syn/tau_syn : volt # decay between spikes''', threshold='V>V_threshold', reset='V=V_reset', method='euler', refractory=5*ms) # Initialize values neurons.V = 'E_L + rand()*(V_threshold - E_L)' exc_neurons = neurons[:N_E] # Uses "slicing" to get subpopulations of neurons inh_neurons = neurons[N_E:] exc_neurons.I_ext = '20*mV + rand()*10*mV' # Connect neurons to each other exc_to_exc = Synapses(exc_neurons, exc_neurons, on_pre='I_syn_post += w_exc_to_exc') exc_to_exc.connect(p=0.02) exc_to_inh = Synapses(exc_neurons, inh_neurons, on_pre='I_syn_post += w_exc_to_inh') exc_to_inh.connect(p=0.02) inh_to_exc = Synapses(inh_neurons, exc_neurons, on_pre='I_syn_post -= w_inh_to_exc') inh_to_exc.connect(p=0.02) # Records from 100 excitatory neurons and 25 inhibitory neurons only, to get simpler plots spike_mon_exc = SpikeMonitor(exc_neurons[:100]) spike_mon_inh = SpikeMonitor(inh_neurons[:25]) pop_mon_exc = PopulationRateMonitor(exc_neurons) # Records the population rate over time # run simulation run(duration, report='text') # for simulations that take longer to run # Plot the activity using the provided function plot_activity(spike_mon_exc, pop_mon_exc, spike_mon_inh) ``` ## Exercise 1 Run the network once with the parameters above (**"configuration A"**: `w_exc_to_exc = 0*mV`, `w_exc_to_inh = 0*mV`, `w_inh_to_exc = 0*mV`) and once with the parameters `w_exc_to_exc = 0.5*mV`, `w_exc_to_inh = 2*mV`, `w_inh_to_exc = 2*mV` (**"configuration B"**). For each model, state whether you'd say that individual neurons show *regular* or *irregular activity*, and whether the network is *globally asynchronous* or *globally synchronous*. ## Exercise 2 For both parametrizations, plot the spike count of the excitatory neurons as a function of the external input current that they receive. Which of the models has a very clear dependency? **Hint**: The `SpikeMonitor` `spike_mon_exc` only records from 100 neurons. You can restrict your analysis to these 100 neurons, or change the definition of the `SpikeMonitor` so that it records from all neurons. ## Exercise 3 Change the definition of the `exc_to_exc` (excitatory → excitatory) synapses so that each synapse has a **random delay** (the time it takes to propagate a spike to its target) **between 1ms and 3ms**. You can set delays in the same way as e.g. the input current `I_ext`, by assigning to the `exc_to_exc.delay` attribute, i.e. by using `exc_to_exc.delay = ...`. (Note: You need to add this line *after* `exc_to_exc.connect(...)`). When you run the new network with random delays with the parameters from *configuration B*: what changes in the network activity compared to the network without delays?
github_jupyter
``` import numpy as np import pandas as pd from matplotlib import pyplot as plt from tqdm import tqdm %matplotlib inline from torch.utils.data import Dataset, DataLoader import torch import torchvision import torch.nn as nn import torch.optim as optim from torch.nn import functional as F device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(device) m = 20 # 5, 10, 20, 50, 100, 500 desired_num = 1000 tr_i = 0 tr_j = int(desired_num/2) tr_k = desired_num tr_i, tr_j, tr_k ``` # Generate dataset ``` np.random.seed(12) y = np.random.randint(0,10,5000) idx= [] for i in range(10): print(i,sum(y==i)) idx.append(y==i) x = np.zeros((5000,2)) np.random.seed(12) x[idx[0],:] = np.random.multivariate_normal(mean = [5,5],cov=[[0.1,0],[0,0.1]],size=sum(idx[0])) x[idx[1],:] = np.random.multivariate_normal(mean = [6,6],cov=[[0.1,0],[0,0.1]],size=sum(idx[1])) x[idx[2],:] = np.random.multivariate_normal(mean = [5.5,6.5],cov=[[0.1,0],[0,0.1]],size=sum(idx[2])) x[idx[3],:] = np.random.multivariate_normal(mean = [-1,0],cov=[[0.1,0],[0,0.1]],size=sum(idx[3])) x[idx[4],:] = np.random.multivariate_normal(mean = [0,2],cov=[[0.1,0],[0,0.1]],size=sum(idx[4])) x[idx[5],:] = np.random.multivariate_normal(mean = [1,0],cov=[[0.1,0],[0,0.1]],size=sum(idx[5])) x[idx[6],:] = np.random.multivariate_normal(mean = [0,-1],cov=[[0.1,0],[0,0.1]],size=sum(idx[6])) x[idx[7],:] = np.random.multivariate_normal(mean = [0,0],cov=[[0.1,0],[0,0.1]],size=sum(idx[7])) x[idx[8],:] = np.random.multivariate_normal(mean = [-0.5,-0.5],cov=[[0.1,0],[0,0.1]],size=sum(idx[8])) x[idx[9],:] = np.random.multivariate_normal(mean = [0.4,0.2],cov=[[0.1,0],[0,0.1]],size=sum(idx[9])) x[idx[0]][0], x[idx[5]][5] for i in range(10): plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i)) plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) bg_idx = [ np.where(idx[3] == True)[0], np.where(idx[4] == True)[0], np.where(idx[5] == True)[0], np.where(idx[6] == True)[0], np.where(idx[7] == True)[0], np.where(idx[8] == True)[0], np.where(idx[9] == True)[0]] bg_idx = np.concatenate(bg_idx, axis = 0) bg_idx.shape np.unique(bg_idx).shape x = x - np.mean(x[bg_idx], axis = 0, keepdims = True) np.mean(x[bg_idx], axis = 0, keepdims = True), np.mean(x, axis = 0, keepdims = True) x = x/np.std(x[bg_idx], axis = 0, keepdims = True) np.std(x[bg_idx], axis = 0, keepdims = True), np.std(x, axis = 0, keepdims = True) for i in range(10): plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i)) plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) foreground_classes = {'class_0','class_1', 'class_2'} background_classes = {'class_3','class_4', 'class_5', 'class_6','class_7', 'class_8', 'class_9'} fg_class = np.random.randint(0,3) fg_idx = np.random.randint(0,m) a = [] for i in range(m): if i == fg_idx: b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1) a.append(x[b]) print("foreground "+str(fg_class)+" present at " + str(fg_idx)) else: bg_class = np.random.randint(3,10) b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1) a.append(x[b]) print("background "+str(bg_class)+" present at " + str(i)) a = np.concatenate(a,axis=0) print(a.shape) print(fg_class , fg_idx) np.reshape(a,(2*m,1)) mosaic_list_of_images =[] mosaic_label = [] fore_idx=[] for j in range(desired_num): np.random.seed(j) fg_class = np.random.randint(0,3) fg_idx = np.random.randint(0,m) a = [] for i in range(m): if i == fg_idx: b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1) a.append(x[b]) # print("foreground "+str(fg_class)+" present at " + str(fg_idx)) else: bg_class = np.random.randint(3,10) b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1) a.append(x[b]) # print("background "+str(bg_class)+" present at " + str(i)) a = np.concatenate(a,axis=0) mosaic_list_of_images.append(np.reshape(a,(2*m,1))) mosaic_label.append(fg_class) fore_idx.append(fg_idx) mosaic_list_of_images = np.concatenate(mosaic_list_of_images,axis=1).T mosaic_list_of_images.shape mosaic_list_of_images.shape, mosaic_list_of_images[0] for j in range(m): print(mosaic_list_of_images[0][2*j:2*j+2]) def create_avg_image_from_mosaic_dataset(mosaic_dataset,labels,foreground_index,dataset_number, m): """ mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point labels : mosaic_dataset labels foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is "j" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9 """ avg_image_dataset = [] cnt = 0 counter = np.zeros(m) #np.array([0,0,0,0,0,0,0,0,0]) for i in range(len(mosaic_dataset)): img = torch.zeros([2], dtype=torch.float64) np.random.seed(int(dataset_number*10000 + i)) give_pref = foreground_index[i] #np.random.randint(0,9) # print("outside", give_pref,foreground_index[i]) for j in range(m): if j == give_pref: img = img + mosaic_dataset[i][2*j:2*j+2]*dataset_number/m #2 is data dim else : img = img + mosaic_dataset[i][2*j:2*j+2]*(m-dataset_number)/((m-1)*m) if give_pref == foreground_index[i] : # print("equal are", give_pref,foreground_index[i]) cnt += 1 counter[give_pref] += 1 else : counter[give_pref] += 1 avg_image_dataset.append(img) print("number of correct averaging happened for dataset "+str(dataset_number)+" is "+str(cnt)) print("the averaging are done as ", counter) return avg_image_dataset , labels , foreground_index avg_image_dataset_1 , labels_1, fg_index_1 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[0:tr_j], mosaic_label[0:tr_j], fore_idx[0:tr_j] , 1, m) test_dataset , labels , fg_index = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[tr_j : tr_k], mosaic_label[tr_j : tr_k], fore_idx[tr_j : tr_k] , m, m) avg_image_dataset_1 = torch.stack(avg_image_dataset_1, axis = 0) # avg_image_dataset_1 = (avg - torch.mean(avg, keepdims= True, axis = 0)) / torch.std(avg, keepdims= True, axis = 0) # print(torch.mean(avg_image_dataset_1, keepdims= True, axis = 0)) # print(torch.std(avg_image_dataset_1, keepdims= True, axis = 0)) print("=="*40) test_dataset = torch.stack(test_dataset, axis = 0) # test_dataset = (avg - torch.mean(avg, keepdims= True, axis = 0)) / torch.std(avg, keepdims= True, axis = 0) # print(torch.mean(test_dataset, keepdims= True, axis = 0)) # print(torch.std(test_dataset, keepdims= True, axis = 0)) print("=="*40) x1 = (avg_image_dataset_1).numpy() y1 = np.array(labels_1) plt.scatter(x1[y1==0,0], x1[y1==0,1], label='class 0') plt.scatter(x1[y1==1,0], x1[y1==1,1], label='class 1') plt.scatter(x1[y1==2,0], x1[y1==2,1], label='class 2') plt.legend() plt.title("dataset4 CIN with alpha = 1/"+str(m)) x1 = (test_dataset).numpy() / m y1 = np.array(labels) plt.scatter(x1[y1==0,0], x1[y1==0,1], label='class 0') plt.scatter(x1[y1==1,0], x1[y1==1,1], label='class 1') plt.scatter(x1[y1==2,0], x1[y1==2,1], label='class 2') plt.legend() plt.title("test dataset4") test_dataset[0:10]/m test_dataset = test_dataset/m test_dataset[0:10] class MosaicDataset(Dataset): """MosaicDataset dataset.""" def __init__(self, mosaic_list_of_images, mosaic_label): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.mosaic = mosaic_list_of_images self.label = mosaic_label #self.fore_idx = fore_idx def __len__(self): return len(self.label) def __getitem__(self, idx): return self.mosaic[idx] , self.label[idx] #, self.fore_idx[idx] avg_image_dataset_1[0].shape avg_image_dataset_1[0] batch = 200 traindata_1 = MosaicDataset(avg_image_dataset_1, labels_1 ) trainloader_1 = DataLoader( traindata_1 , batch_size= batch ,shuffle=True) testdata_1 = MosaicDataset(avg_image_dataset_1, labels_1 ) testloader_1 = DataLoader( testdata_1 , batch_size= batch ,shuffle=False) testdata_11 = MosaicDataset(test_dataset, labels ) testloader_11 = DataLoader( testdata_11 , batch_size= batch ,shuffle=False) class Whatnet(nn.Module): def __init__(self): super(Whatnet,self).__init__() self.linear1 = nn.Linear(2,3) # self.linear2 = nn.Linear(50,10) # self.linear3 = nn.Linear(10,3) torch.nn.init.xavier_normal_(self.linear1.weight) torch.nn.init.zeros_(self.linear1.bias) def forward(self,x): # x = F.relu(self.linear1(x)) # x = F.relu(self.linear2(x)) x = (self.linear1(x)) return x def calculate_loss(dataloader,model,criter): model.eval() r_loss = 0 with torch.no_grad(): for i, data in enumerate(dataloader, 0): inputs, labels = data inputs, labels = inputs.to("cuda"),labels.to("cuda") outputs = model(inputs) loss = criter(outputs, labels) r_loss += loss.item() return r_loss/(i+1) def test_all(number, testloader,net): correct = 0 total = 0 out = [] pred = [] with torch.no_grad(): for data in testloader: images, labels = data images, labels = images.to("cuda"),labels.to("cuda") out.append(labels.cpu().numpy()) outputs= net(images) _, predicted = torch.max(outputs.data, 1) pred.append(predicted.cpu().numpy()) total += labels.size(0) correct += (predicted == labels).sum().item() pred = np.concatenate(pred, axis = 0) out = np.concatenate(out, axis = 0) print("unique out: ", np.unique(out), "unique pred: ", np.unique(pred) ) print("correct: ", correct, "total ", total) print('Accuracy of the network on the %d test dataset %d: %.2f %%' % (total, number , 100 * correct / total)) def train_all(trainloader, ds_number, testloader_list): print("--"*40) print("training on data set ", ds_number) torch.manual_seed(12) net = Whatnet().double() net = net.to("cuda") criterion_net = nn.CrossEntropyLoss() optimizer_net = optim.Adam(net.parameters(), lr=0.001 ) #, momentum=0.9) acti = [] loss_curi = [] epochs = 1000 running_loss = calculate_loss(trainloader,net,criterion_net) loss_curi.append(running_loss) print('epoch: [%d ] loss: %.3f' %(0,running_loss)) for epoch in range(epochs): # loop over the dataset multiple times ep_lossi = [] running_loss = 0.0 net.train() for i, data in enumerate(trainloader, 0): # get the inputs inputs, labels = data inputs, labels = inputs.to("cuda"),labels.to("cuda") # zero the parameter gradients optimizer_net.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion_net(outputs, labels) # print statistics running_loss += loss.item() loss.backward() optimizer_net.step() running_loss = calculate_loss(trainloader,net,criterion_net) if(epoch%200 == 0): print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss)) loss_curi.append(running_loss) #loss per epoch if running_loss<=0.05: print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss)) break print('Finished Training') correct = 0 total = 0 with torch.no_grad(): for data in trainloader: images, labels = data images, labels = images.to("cuda"), labels.to("cuda") outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the %d train images: %.2f %%' % (total, 100 * correct / total)) for i, j in enumerate(testloader_list): test_all(i+1, j,net) print("--"*40) return loss_curi train_loss_all=[] testloader_list= [ testloader_1, testloader_11] train_loss_all.append(train_all(trainloader_1, 1, testloader_list)) %matplotlib inline for i,j in enumerate(train_loss_all): plt.plot(j,label ="dataset "+str(i+1)) plt.xlabel("Epochs") plt.ylabel("Training_loss") plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ```
github_jupyter
# Short-Circuit Calculation according to IEC 60909 pandapower supports short-circuit calculations with the method of equivalent voltage source at the fault location according to IEC 60909. The pandapower short-circuit calculation supports the following elements: - sgen (as motor or as full converter generator) - gen (as synchronous generator) - ext_grid - line - trafo - trafo3w - impedance with the correction factors as defined in IEC 60909. Loads and shunts are neglected as per standard. The pandapower switch model is fully integrated into the short-circuit calculation. The following short-circuit currents can be calculated: - ikss (Initial symmetrical short-circuit current) - ip (short-circuit current peak) - ith (equivalent thermal short-circuit current) either as - symmetrical three-phase or - asymmetrical two-phase short circuit current. Calculations are available for meshed as well as for radial networks. ip and ith are only implemented for short circuits far from synchronous generators. The results for all elements and different short-circuit currents are tested against commercial software to ensure that correction factors are correctly applied. ### Example Network Here is a little example on how to use the short-circuit calculation. First, we create a simple open ring network with 4 buses, that are connected by one transformer and two lines with one open sectioning point. The network is fed by an external grid connection at bus 1: <img src="shortcircuit/example_network_sc.png"> ``` import pandapower as pp import pandapower.shortcircuit as sc def ring_network(): net = pp.create_empty_network() b1 = pp.create_bus(net, 220) b2 = pp.create_bus(net, 110) b3 = pp.create_bus(net, 110) b4 = pp.create_bus(net, 110) pp.create_ext_grid(net, b1, s_sc_max_mva=100., s_sc_min_mva=80., rx_min=0.20, rx_max=0.35) pp.create_transformer(net, b1, b2, "100 MVA 220/110 kV") pp.create_line(net, b2, b3, std_type="N2XS(FL)2Y 1x120 RM/35 64/110 kV" , length_km=15.) l2 = pp.create_line(net, b3, b4, std_type="N2XS(FL)2Y 1x120 RM/35 64/110 kV" , length_km=12.) pp.create_line(net, b4, b2, std_type="N2XS(FL)2Y 1x120 RM/35 64/110 kV" , length_km=10.) pp.create_switch(net, b4, l2, closed=False, et="l") return net ``` ## Symmetric Short-Circuit Calculation ### Maximum Short Circuit Currents Now, we load the network and calculate the maximum short-circuit currents with the calc_sc function: ``` net = ring_network() sc.calc_sc(net, case="max", ip=True, ith=True, branch_results=True) net.res_bus_sc ``` where ikss is the initial short-circuit current, ip is the peak short-circuit current and ith is the thermal equivalent current. For branches, the results are defined as the maximum current flows through that occurs for a fault at any bus in the network. The results are available seperately for lines: ``` net.res_line_sc ``` and transformers: ``` net.res_trafo_sc ``` ### Minimum Short Circuit Currents Minimum short-circuits can be calculated in the same way. However, we need to specify the end temperature of the lines after a fault as per standard first: ``` net = ring_network() net.line["endtemp_degree"] = 80 sc.calc_sc(net, case="min", ith=True, ip=True, branch_results=True) net.res_bus_sc ``` The branch results are now the minimum current flows through each branch: ``` net.res_line_sc net.res_trafo_sc ``` ### Asynchronous Motors Asynchronous motors can be specified by creating a static generator of type "motor". For the short circuit impedance, an R/X ratio "rx" as well as the ratio between nominal current and short circuit current "k" has to be specified: ``` net = ring_network() pp.create_sgen(net, 2, p_kw=0, sn_kva=500, k=1.2, rx=7., type="motor") net ``` If we run the short-circuit calculation again, we can see that the currents increased due to the contribution of the inverteres to the short-circuit currents. ``` sc.calc_sc(net, case="max", ith=True, ip=True) net.res_bus_sc ``` ### Synchronous Generators Synchronous generators can also be considered in the short-circuit calculation with the gen element. According to the standard, the rated power factor (cos$\varphi$) "cos_phi", rated voltage "vn_kv", rated apparent power "sn_kva" and subtransient resistance "rdss" and reactance "xdss" are necessary to calculate the short circuit impedance: ``` net = ring_network() pp.create_gen(net, 2, p_kw=0, vm_pu=1.0, cos_phi=0.8, vn_kv=22, sn_kva=5e3, xdss=0.2, rdss=0.005) net ``` and run the short-circuit calculation again: ``` sc.calc_sc(net, case="max", ith=True, ip=True) net.res_bus_sc ``` Once again, the short-circuit current increases due to the contribution of the generator. As can be seen in the warning, the values for peak and thermal equivalent short-circuit current will only be accurate for faults far from generators. ## Meshed Networks The correction factors for aperiodic and thermal currents differ between meshed and radial networks. pandapower includes a meshing detection that automatically detects the meshing for each short-circuit location. Alternatively, the topology can be set to "radial" or "meshed" to circumvent the check and save calculation time. We load the radial network and close the open sectioning point to get a closed ring network: ``` net = ring_network() net.switch.closed = True sc.calc_sc(net, topology="auto", ip=True, ith=True) net.res_bus_sc ``` the network is automatically detected to be meshed and application factors are applied. This can be validated by setting the topology to radial and comparing the results: ``` sc.calc_sc(net, topology="radial", ip=True, ith=True) net.res_bus_sc ``` If we look at the line results, we can see that the line currents are significantly smaller than the bus currents: ``` sc.calc_sc(net, topology="auto", ip=True, ith=True, branch_results=True) net.res_line_sc ``` this is because the short-circuit current is split up on both paths of the ring, which is correctly considered by pandapower. ## Fault Impedance It is also possible to specify a fault impedance in the short-circuit calculation: ``` net = ring_network() sc.calc_sc(net, topology="radial", ip=True, ith=True, r_fault_ohm=1., x_fault_ohm=2.) ``` which of course decreases the short-circuit currents: ``` net.res_bus_sc ``` ## Asymetrical Two-Phase Short-Circuit Calculation All calculations above can be carried out for a two-phase short-circuit current in the same way by specifying "2ph" in the fault parameter: ``` net = ring_network() sc.calc_sc(net, fault="2ph", ip=True, ith=True) net.res_bus_sc ``` Two phase short-circuits are often used for minimum short-circuit calculations: ``` net = ring_network() net.line["endtemp_degree"] = 150 sc.calc_sc(net, fault="2ph", case="min", ip=True, ith=True) net.res_bus_sc ```
github_jupyter
# K-Means Clustering ### Use scikit-learn's K-Means Clustering to cluster data <center> <img src="https://miro.medium.com/max/1017/1*vNng_oOsNRHKrlh3pjSAyA.png" width="1200" height = "520" alt="Unlabeled Data" /> </center> ## Introduction There are many models for **clustering** out there. This notebook consists one of the simplest models amongst them. Despite its simplicity, the **K-means** is vastly used for clustering in many data science applications, especially useful if you need to quickly discover insights from **unlabeled data**. Some real-world applications of k-means: - Customer segmentation - Understand what the visitors of a website are trying to accomplish - Pattern recognition - Machine learning - Data compression In this notebook there are 2 k-means clustering examples: - k-means on a random generated dataset - Using k-means for customer segmentation # Random Data First ### Import libraries Lets first import the required libraries. ``` import random import numpy as np import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.datasets import make_blobs %matplotlib inline from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" floats = random.sample(range(-77, 77), 50) x1 = [x/10 for x in floats] floats = random.sample(range(-74, 74), 50) x2 = [n/10 for n in floats] print('Printing x1', x1) print('Printing x2', x2) colors_map = np.array(['b', 'r']) def assign_members(x1, x2, centers): compare_to_first_center = np.sqrt(np.square(np.array(x1) - centers[0][0]) + np.square(np.array(x2) - centers[0][1])) compare_to_second_center = np.sqrt(np.square(np.array(x1) - centers[1][0]) + np.square(np.array(x2) - centers[1][1])) class_of_points = compare_to_first_center > compare_to_second_center colors = colors_map[class_of_points + 1 - 1] return colors, class_of_points print('assign_members function has executed!') def update_centers(x1, x2, class_of_points): center1 = [np.mean(np.array(x1)[~class_of_points]), np.mean(np.array(x2)[~class_of_points])] center2 = [np.mean(np.array(x1)[class_of_points]), np.mean(np.array(x2)[class_of_points])] return [center1, center2] print('assign_members function has executed!') def plot_points(centroids=None, colors='g', figure_title=None): # plot the figure fig = plt.figure(figsize=(15, 10)) # create a figure object ax = fig.add_subplot(1, 1, 1) centroid_colors = ['bx', 'rx'] if centroids: for (i, centroid) in enumerate(centroids): ax.plot(centroid[0], centroid[1], centroid_colors[i], markeredgewidth=5, markersize=20) plt.scatter(x1, x2, s=500, c=colors) # define the ticks xticks = np.linspace(-6, 8, 15, endpoint=True) yticks = np.linspace(-6, 6, 13, endpoint=True) # fix the horizontal axis ax.set_xticks(xticks) ax.set_yticks(yticks) # add tick labels xlabels = xticks ax.set_xticklabels(xlabels) ylabels = yticks ax.set_yticklabels(ylabels) # style the ticks ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.tick_params('both', length=2, width=1, which='major', labelsize=15) # add labels to axes ax.set_xlabel('x1', fontsize=20) ax.set_ylabel('x2', fontsize=20) # add title to figure ax.set_title(figure_title, fontsize=24) plt.show() print('plot_points function has executed!') plot_points(figure_title='Scatter Plot of x2 vs x1') ``` ### Is it possible to label data as you see? You may label it intuitively, but in real world applications we need to use the mathematical procedures to find the best one. ``` centers = [[-1.5, 1.5], [1.5, -1.5]] # change and see what happens, I chose them as -1.5 plot_points(centers, figure_title='K-Means Initialization') iterations = 5 for i in range(iterations): colors, class_of_points = assign_members(x1, x2, centers) title = 'Iteration {} - Cluster Assignment'.format(i + 1) plot_points(centers, colors, figure_title=title) centers = update_centers(x1, x2, class_of_points) title = 'Iteration {} - Centroid Update'.format(i + 1) plot_points(centers, colors, figure_title=title) print("Centers: ", centers) ``` <h1 id="customer_segmentation_K_means">Customer Segmentation with K-Means</h1> ### Load Data From CSV File Use pandas read method to use the dataset. ``` import pandas as pd data = pd.read_csv("Cust_Segmentation.csv") data.head() data.info() data.describe() ``` <h2 id="pre_processing">Pre-processing</h2 As you can see, **Address** in this dataset is a categorical variable. As Scikit really don't understand them, either we need to convert or drop it. Euclidean distance function isn't really meaningful for discrete variables. So, for this time lets drop this feature and run clustering. ``` df = data.drop('Address', axis=1) df.head() ``` #### Normalizing data Normalization is a statistical method that helps mathematical-based algorithms to interpret features with different magnitudes and distributions equally. ``` X = df.values[:,1:] X = np.nan_to_num(X) Clus_dataSet = (X - np.min(X)) / (np.max(X) - np.min(X)) Clus_dataSet ``` <h2 id="modeling">Modeling</h2> In our example (if we didn't have access to the k-means algorithm), it would be the same as guessing that each customer group would have certain age, income, education, etc, with multiple tests and experiments. However, using the K-means clustering we can do all this process much easier. Lets apply k-means on our dataset, and take look at cluster labels. ``` clusterNum = 3 k_means_n = KMeans(init = "k-means++", n_clusters = clusterNum, n_init = 12) k_means_n.fit(X) labels = k_means_n.labels_ print(labels) ``` <h2 id="insights">Insights</h2> We assign the labels to each row in dataframe. ``` df["Clus_km"] = labels df.head(5) ``` We can easily check the centroid values by averaging the features in each cluster. ``` df.groupby('Clus_km').mean() ``` Now, lets look at the distribution of customers based on their age and income: ``` area = np.pi * ( X[:, 1])**2 plt.scatter(X[:, 0], X[:, 3], s=area, c=labels.astype(np.float), alpha=0.5) plt.xlabel('Age', fontsize=18) plt.ylabel('Income', fontsize=16) plt.show() from mpl_toolkits.mplot3d import Axes3D fig = plt.figure(1, figsize=(8, 6)) plt.clf() ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134) plt.cla() # plt.ylabel('Age', fontsize=18) # plt.xlabel('Income', fontsize=16) # plt.zlabel('Education', fontsize=16) ax.set_xlabel('Education') ax.set_ylabel('Age') ax.set_zlabel('Income') ax.scatter(X[:, 1], X[:, 0], X[:, 3], c= labels.astype(np.float)) ``` K-Means will partition your customers into mutually exclusive groups, for example, into 3 clusters. The customers in each cluster are similar to each other demographically. Now we can create a profile for each group, considering the common characteristics of each cluster. For example, the 3 clusters can be: - AFFLUENT, EDUCATED AND OLD AGED - MIDDLE AGED AND MIDDLE INCOME - YOUNG AND LOW INCOME
github_jupyter
# Using Nucleus and TensorFlow for DNA Sequencing Error Correction <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/google/nucleus/blob/main/nucleus/examples/dna_sequencing_error_correction.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/google/nucleus/blob/main/nucleus/examples/dna_sequencing_error_correction.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> ``` # @markdown Copyright 2019 Google LLC. \ # @markdown SPDX-License-Identifier: Apache-2.0 # @markdown (license hidden in Colab) # Copyright 2019 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` ## Introduction In this tutorial, we formulate DNA sequencing error correction as a multiclass classification problem and propose two deep learning solutions. Our first approach corrects errors in a single read, whereas the second approach, shown in Figure 1, builds a consensus from several reads to predict the correct DNA sequence. We implement the second approach using the [Nucleus](https://github.com/google/nucleus) and [TensorFlow](https://www.tensorflow.org/) libraries. Our goal is to show how Nucleus can be used alongside TensorFlow for solving machine learning problems in genomics. <center> <img src='https://github.com/google/nucleus/raw/main/nucleus/examples/images/consensus-approach-overview.jpg' width='900'/> </center> ### Problem Overview While DNA sequencing continues to become faster and cheaper, it is still an error-prone process. Error rates for raw data from next-generation sequencing (NGS) technologies developed by companies such as [Illumina](https://www.illumina.com/) are around 1%. Error rates for increasingly popular third-generation technologies like those developed by [Pacific BioSciences](https://www.pacb.com/) (PacBio) are around 15%. Sequencing errors can be divided into substitutions, insertions, and deletions, the last two of which are commonly referred to as indels. All of these errors can be detrimental to downstream analysis steps such as variant calling and genome assembly. A simple approach for obtaining higher quality datasets is to discard data that likely contains errors, either by throwing away entire reads or trimming regions of low quality. This approach is not ideal as it leads to a smaller final dataset. In addition, certain sequence contexts have naturally higher error rates, leading to biases in sampling. Thus, there exists a large body of research that is focused on developing more sophisticated methods for error correction. Most methods that have been developed can be categorized into one of two groups: 1. Methods that operate on a single read and aim to determine the correct read sequence 1. Consensus-based methods that operate on several reads and aim to determine the correct underlying DNA sequence ### Deep Learning Overview Both of the methods that we formulate in this post use deep neural networks, which learn functions that map inputs to outputs. A neural network consists of several layers of linear and nonlinear operations applied sequentially to the input. Neural networks have been successfully applied to various problems including [image classification](https://ai.googleblog.com/2016/03/train-your-own-image-classifier-with.html) and [natural language translation](https://ai.googleblog.com/2016/09/a-neural-network-for-machine.html). More recently, they have also been used for problems in genomics, such as [protein structure prediction](https://deepmind.com/blog/alphafold/) and [variant calling](https://ai.googleblog.com/2017/12/deepvariant-highly-accurate-genomes.html). ### Nucleus Our implementation relies on [Nucleus](https://github.com/google/nucleus), a library developed for processing genomics data by the Genomics team in Google Brain. Nucleus makes it easy to read, write, and analyze data in common genomics file formats like BAM, FASTA, and VCF using specialized reader and writer objects. Nucleus allows us to: * Query a VCF file for all variants in a given genomic region * Query a BAM file for all reads mapping to a given genomic range * Query a FASTA file for the reference sequence starting at a given position We also use Nucleus to write data out to [TFRecords](https://www.tensorflow.org/tutorials/load_data/tf_records), a binary file format that consists of protocol buffer messages and can be easily read by TensorFlow. After reading in the TFRecords, we use the [TensorFlow Keras API](https://www.tensorflow.org/api_docs/python/tf/keras) to train and evaluate a convolutional neural network. ### Data Below is a list of the files we use in the implementation. All of the data is publicly available, and the Appendix contains download links and instructions. File | Description --- | --- `NA12878_sliced.bam` | Illumina HiSeq reads from chromosome 20 (positions 10,000,000-10,100,000), downsampled to 30x coverage. `NA12878_sliced.bam.bai` | Index for `NA12878_sliced.bam`. `NA12878_calls.vcf.gz`| Truth set of variants for NA12878 from Genome in a Bottle. `NA12878_calls.vcf.gz.tbi` | Index for `NA12878_calls.vcf.gz`. `hs37d5.fa.gz` | Reference genome for hs37d5. `hs37d5.fa.gz.fai` and `hs37d5.fa.gz.gzi` | Index files for `hs37d5.fa.gz`. ### Questions or Comments? If you have any questions or comments regarding this tutorial, do not hesitate to reach out! You can [file an issue](https://github.com/google/nucleus/issues/new) on the Nucleus GitHub page. ## Setup If you are new to Colab or Jupyter notebooks, we recommend that you first go through this [tutorial](https://colab.research.google.com/notebooks/basic_features_overview.ipynb). ### Obtain Data, Install Nucleus, and Import Packages Run the below cells to obtain the data, install Nucleus, and import Python packages. Note, the code for some cells is hidden for clarity. These cells are marked with the following text: `(code hidden in Colab)`. If you wish to view the code for a hidden cell, double click the cell. To hide the code, double click the markdown output on the right side. ``` #@markdown Run this cell to obtain the data. (code hidden in Colab) %%capture !gsutil cp gs://deepvariant/case-study-testdata/NA12878_sliced.bam NA12878_sliced.bam !gsutil cp gs://deepvariant/case-study-testdata/NA12878_sliced.bam.bai NA12878_sliced.bam.bai !wget ftp://ftp-trace.ncbi.nlm.nih.gov/giab/ftp/release/NA12878_HG001/latest/GRCh37/HG001_GRCh37_GIAB_highconf_CG-IllFB-IllGATKHC-Ion-10X-SOLID_CHROM1-X_v.3.3.2_highconf_PGandRTGphasetransfer.vcf.gz -O NA12878_calls.vcf.gz !wget ftp://ftp-trace.ncbi.nlm.nih.gov/giab/ftp/release/NA12878_HG001/latest/GRCh37/HG001_GRCh37_GIAB_highconf_CG-IllFB-IllGATKHC-Ion-10X-SOLID_CHROM1-X_v.3.3.2_highconf_PGandRTGphasetransfer.vcf.gz.tbi -O NA12878_calls.vcf.gz.tbi !gsutil cp gs://deepvariant/case-study-testdata/hs37d5.fa.gz hs37d5.fa.gz !gsutil cp gs://deepvariant/case-study-testdata/hs37d5.fa.gz.fai hs37d5.fa.gz.fai !gsutil cp gs://deepvariant/case-study-testdata/hs37d5.fa.gz.gzi hs37d5.fa.gz.gzi #@markdown Run this cell to install Nucleus and TensorFlow 2.0. (code hidden in Colab) !pip install -q google-nucleus==0.5.6 !pip install -q tensorflow==2.3.0 from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import random import numpy as np from nucleus.io import fasta from nucleus.io import sam from nucleus.io import vcf from nucleus.io.genomics_writer import TFRecordWriter from nucleus.protos import reads_pb2 from nucleus.util import cigar from nucleus.util import ranges from nucleus.util import utils # Import TensorFlow after Nucleus. import tensorflow as tf from tensorflow.keras import layers # Define constants and utility functions. # We will only allow simple alignments, specified by the below cigar string # operators. If you are not familiar with cigar strings, you can read more # at section 1.4 of this link: https://samtools.github.io/hts-specs/SAMv1.pdf _ALLOWED_CIGAR_OPS = frozenset([cigar.CHAR_TO_CIGAR_OPS[op] for op in 'MX=']) # We will only allow certain bases. _ALLOWED_BASES = 'ACGT' _TRAIN = 'train.tfrecord' _EVAL = 'eval.tfrecord' _TEST = 'test.tfrecord' ``` ## Network Architecture Convolutional neural networks are commonly used for computer vision tasks, but also [work well for genomics](https://doi.org/10.1093/molbev/msy224). Each convolutional layer repeatedly applies learned filters to the input. Convolutional filters appearing early in the network learn to recognize low-level features in the input, like edges and color gradients in images, whereas later filters learn to recognize more complex compositions of the low-level features. For DNA sequence inputs, low-level convolutional filters act as motif detectors, similar to the position weight matrices of [sequence logos](https://en.wikipedia.org/wiki/Position_weight_matrix#/media/File:LexA_gram_positive_bacteria_sequence_logo.png). For our implementation, we use a standard convolutional architecture consisting of two convolutional layers, followed by three fully connected layers. We use nonlinear ReLU layers to increase the expressive capacity of our model. Maxpooling after convolutional layers shrinks the input volume, and dropout after fully connected layers acts as a regularizer. The final softmax layer normalizes the logits to produce a valid probability distribution. The details of each layer can be found in the code below. <center> <img src='https://user-images.githubusercontent.com/8205702/56754313-8c8b8600-6741-11e9-9d8b-c9f547f158ba.jpg' width='900'/> </center> ``` def build_model(hparams): """Convolutional neural network architecture.""" l2_reg = tf.keras.regularizers.l2 return tf.keras.models.Sequential([ # Two convolution + maxpooling blocks layers.Conv1D( filters=16, kernel_size=5, activation=tf.nn.relu, kernel_regularizer=l2_reg(hparams.l2)), layers.MaxPool1D(pool_size=3, strides=1), layers.Conv1D( filters=16, kernel_size=3, activation=tf.nn.relu, kernel_regularizer=l2_reg(hparams.l2)), layers.MaxPool1D(pool_size=3, strides=1), # Flatten the input volume layers.Flatten(), # Two fully connected layers, each followed by a dropout layer layers.Dense( units=16, activation=tf.nn.relu, kernel_regularizer=l2_reg(hparams.l2)), layers.Dropout(rate=0.3), layers.Dense( units=16, activation=tf.nn.relu, kernel_regularizer=l2_reg(hparams.l2)), layers.Dropout(rate=0.3), # Output layer with softmax activation layers.Dense(units=len(_ALLOWED_BASES), activation='softmax') ]) ``` ## Approach 1: Error Correction of Single Reads In order to correct errors in sequenced reads, we can use deep learning to train a neural network that can solve a more general task: fill in missing bases in DNA sequences. The goal of this approach is to develop a model that understands the grammar of DNA sequences. The grammar of real sequences alone likely does not contain enough information to develop a solution that can be used in production. Nonetheless, this serves as a straightforward example application. <center> <img src='https://github.com/google/nucleus/raw/main/nucleus/examples/images/single-read-input-and-output.jpg' width='900'/> </center> For instructional purposes, we simplify the problem in the following ways: * Consider only regions with substitution errors and ignore indel errors * Consider only regions with no known variants We can train the neural network on regions of the reference genome. The input to this network is a DNA sequence of fixed length, centered around the base we wish to predict. The output of the network is a distribution over the possible bases, and the final prediction is the base with highest probability. The label set is generated using the bases observed in the reference genome. Since we only use reads mapping to regions with no known truth variants, we can unambiguously denote the base present in the reference genome as the label. We generate input sequences by splitting the reference genome into non-overlapping sections of a fixed length. At training, evaluation, and test time, we simulate missing bases by zeroing out a base in the reference sequence, as shown in Figure 3 (position 5). In addition to simulating missing data using the reference genome, we can also apply such a model to data from sequenced reads, specifically bases with quality scores below a threshold value. ## Approach 2: Consensus-Based Error Correction The ultimate goal of error correction is to determine the underlying DNA sequence, as opposed to correcting an individual read. In this section, we use the consensus of multiple reads by aggregating a sequence pileup to directly determine the DNA sequence without the intermediate step of correcting individual reads. An example of a pileup is shown below in Figure 4. Note, the figure only shows the portions of the reads that fall in the window. <center> <img src='https://github.com/google/nucleus/raw/main/nucleus/examples/images/read-pileup.jpg' width='700'/> </center> For instructional purposes, we again simplify the problem in the following ways: * Consider only regions with substitution errors and ignore indel errors * Consider only regions with no known variants <center> <img src='https://github.com/google/nucleus/raw/main/nucleus/examples/images/raw-counts.jpg' width='700'/> </center> Unlike the first approach, we do not train this model on the reference genome. Instead, our training data comes from mapped Illumina HiSeq reads. The input to this network is a matrix of normalized base counts observed in mapped reads, centered around the position at which we wish to predict the correct base. A similar featurization is used by the authors of [Clairvoyante](https://doi.org/10.1101/310458), a neural network for variant calling, and in an [example method by Jason Chin](https://towardsdatascience.com/simple-convolution-neural-network-for-genomic-variant-calling-with-tensorflow-c085dbc2026f). The output of the network is a distribution over the possible bases, and the final prediction is the base with highest probability. Similar to the first approach, the label set is generated using the bases observed in the reference genome. We use a mix of examples that contain errors (at least one read in the pileup does not match the reference at the center position) and examples that do not contain errors (all reads in the pileup match the reference at the center position). <center> <img src='https://github.com/google/nucleus/raw/main/nucleus/examples/images/consensus-input-and-output.jpg' width='900'/> </center> ### Data Processing The data for this problem comes from 148bp mapped Illumina HiSeq reads. In processing the data, we compare each read to the reference sequence, and any positions that differ from the reference are denoted as errors. * For reads containing one or more errors, we randomly choose an error and create one example centered at the corresponding position. * For reads containing no errors, we create one example centered at the middle position. Once we have determined the genomic window for the example, we use Nucleus to query for all reads mapping to the window. We then build a normalized counts matrix, as shown above. For some of the reads, only a subset of all bases will fall inside the window, and we ignore the bases that fall outside the window. ### Implement Neural Network Pipeline We divide up our pipeline into the following steps, for each of which we implement several functions. 1. [Generate TFRecords datasets](#scrollTo=ZlCQkTr-u-9Y) 1. [Read data from TFRecords datasets](#scrollTo=sQ1-zPVBvIr0) 1. [Train the model](#scrollTo=tgiVn4dZvdLs) **Step 1: Generate TFRecords Datasets** We generate TFRecords datasets for training, evaluation, and testing. All examples that do not meet the criteria expressed in `is_usable_example` are discarded. ``` def generate_tfrecord_datasets(hparams): """Writes out TFRecords files for training, evaluation, and test datasets.""" if not os.path.exists(hparams.out_dir): os.makedirs(hparams.out_dir) # Fraction of examples in each dataset. train_eval_test_split = [0.7, 0.2, 0.1] num_train_examples = 0 num_eval_examples = 0 num_test_examples = 0 # Generate training, test, and evaluation examples. with TFRecordWriter(os.path.join(hparams.out_dir, _TRAIN)) as train_out, \ TFRecordWriter(os.path.join(hparams.out_dir, _EVAL)) as eval_out, \ TFRecordWriter(os.path.join(hparams.out_dir, _TEST)) as test_out: all_examples = make_ngs_examples(hparams) for example in all_examples: r = random.random() if r < train_eval_test_split[0]: train_out.write(proto=example) num_train_examples += 1 elif r < train_eval_test_split[0] + train_eval_test_split[1]: eval_out.write(proto=example) num_eval_examples += 1 else: test_out.write(proto=example) num_test_examples += 1 print('# of training examples: %d' % num_train_examples) print('# of evaluation examples: %d' % num_eval_examples) print('# of test examples: %d' % num_test_examples) def make_ngs_examples(hparams): """Generator function that yields training, evaluation and test examples.""" ref_reader = fasta.IndexedFastaReader(input_path=hparams.ref_path) vcf_reader = vcf.VcfReader(input_path=hparams.vcf_path) read_requirements = reads_pb2.ReadRequirements() sam_reader = sam.SamReader( input_path=hparams.bam_path, read_requirements=read_requirements) # Use a separate SAM reader to query for reads falling in the pileup range. sam_query_reader = sam.SamReader( input_path=hparams.bam_path, read_requirements=read_requirements) used_pileup_ranges = set() with ref_reader, vcf_reader, sam_reader, sam_query_reader: for read in sam_reader: # Check that read has cigar string present and allowed alignment. if not read.alignment.cigar: print('Skipping read, no cigar alignment found') continue if not has_allowed_alignment(read): continue # Obtain window that will be used to construct an example. read_range = utils.read_range(read) ref = ref_reader.query(region=read_range) pileup_range = get_pileup_range(hparams, read, read_range, ref) # Do not construct multiple examples with the same pileup range. pileup_range_serialized = pileup_range.SerializeToString() if pileup_range_serialized in used_pileup_ranges: continue used_pileup_ranges.add(pileup_range_serialized) # Get reference sequence, reads, and truth variants for the pileup range. pileup_reads = list(sam_query_reader.query(region=pileup_range)) pileup_ref = ref_reader.query(region=pileup_range) pileup_variants = list(vcf_reader.query(region=pileup_range)) if is_usable_example(pileup_reads, pileup_variants, pileup_ref): yield make_example(hparams, pileup_reads, pileup_ref, pileup_range) def get_pileup_range(hparams, read, read_range, ref): """Returns a range that will be used to construct one example.""" # Find error positions where read and reference differ. ngs_read_length = read_range.end - read_range.start error_indices = [ i for i in range(ngs_read_length) if ref[i] != read.aligned_sequence[i] ] # If read and reference sequence are the same, create an example centered # at middle base of read. if not error_indices: error_idx = ngs_read_length // 2 # If read and reference differ at one or more positions, create example # centered at a random error position. else: error_idx = random.choice(error_indices) error_pos = read_range.start + error_idx flank_size = hparams.window_size // 2 return ranges.make_range( chrom=read_range.reference_name, start=error_pos - flank_size, end=error_pos + flank_size + 1) def has_allowed_alignment(read): """Determines whether a read's CIGAR string has the allowed alignments.""" return all([c.operation in _ALLOWED_CIGAR_OPS for c in read.alignment.cigar]) def is_usable_example(reads, variants, ref_bases): """Determines whether a particular reference region and read can be used.""" # Discard examples with variants or no mapped reads. if variants or not reads: return False # Use only examples where all reads have simple alignment and allowed bases. for read in reads: if not has_allowed_alignment(read): return False if any(base not in _ALLOWED_BASES for base in read.aligned_sequence): return False # Reference should only contain allowed bases. if any(base not in _ALLOWED_BASES for base in ref_bases): return False return True def make_example(hparams, pileup_reads, pileup_ref, pileup_range): """Takes in an input sequence and outputs tf.train.Example ProtocolMessages. Each example contains the following features: A counts, C counts, G counts, T counts, reference sequence, correct base label. """ assert len(pileup_ref) == hparams.window_size example = tf.train.Example() base_counts = np.zeros(shape=[hparams.window_size, len(_ALLOWED_BASES)]) for read in pileup_reads: read_position = read.alignment.position.position read_ints = [_ALLOWED_BASES.index(b) for b in read.aligned_sequence] one_hot_read = np.zeros((len(read_ints), len(_ALLOWED_BASES))) one_hot_read[np.arange(len(one_hot_read)), read_ints] = 1 window_start = read_position - pileup_range.start window_end = window_start + len(read_ints) # If read falls outside of window, adjust start/end indices for window. window_start = max(0, window_start) window_end = min(window_end, hparams.window_size) # We consider four possible scenarios for each read and adjust start/end # indices to only include portions of read that overlap the window. # 1) Read extends past 5' end of window # 2) Read extends past 3' end of window # 3) Read extends past 5' and 3' ends of window # 4) Read falls entirely within window if window_start == 0 and window_end != hparams.window_size: read_start = pileup_range.start - read_position read_end = None if window_end == hparams.window_size and window_start != 0: read_start = None read_end = -1 * ((read_position + len(read_ints)) - pileup_range.end) if window_start == 0 and window_end == hparams.window_size: read_start = pileup_range.start - read_position read_end = read_start + hparams.window_size if window_start != 0 and window_end != hparams.window_size: read_start = None read_end = None base_counts[window_start:window_end] += one_hot_read[read_start:read_end] # Use fractions at each position instead of raw base counts. base_counts /= np.expand_dims(np.sum(base_counts, axis=-1), -1) # Save counts/fractions for each base separately. features = example.features for i in range(len(_ALLOWED_BASES)): key = '%s_counts' % _ALLOWED_BASES[i] features.feature[key].float_list.value.extend(list(base_counts[:, i])) features.feature['ref_sequence'].int64_list.value.extend( [_ALLOWED_BASES.index(base) for base in pileup_ref]) flank_size = hparams.window_size // 2 true_base = pileup_ref[flank_size] features.feature['label'].int64_list.value.append( _ALLOWED_BASES.index(true_base)) return example ``` **Step 2: Read TFRecords Datasets** We define an input function to read in the TFRecords dataset. Since TFRecords is an unstructured binary format, it is necessary to define the structure of the data in order to read it back in. Specifically, we must define the type and size of each field in proto_features. ``` def get_dataset(hparams, filename, num_epochs): """Reads in and processes the TFRecords dataset. Builds a pipeline that returns pairs of features, label. """ # Define field names, types, and sizes for TFRecords. proto_features = { 'A_counts': tf.io.FixedLenFeature(shape=[hparams.window_size], dtype=tf.float32), 'C_counts': tf.io.FixedLenFeature(shape=[hparams.window_size], dtype=tf.float32), 'G_counts': tf.io.FixedLenFeature(shape=[hparams.window_size], dtype=tf.float32), 'T_counts': tf.io.FixedLenFeature(shape=[hparams.window_size], dtype=tf.float32), 'ref_sequence': tf.io.FixedLenFeature(shape=[hparams.window_size], dtype=tf.int64), 'label': tf.io.FixedLenFeature(shape=[1], dtype=tf.int64), } def _process_input(proto_string): """Helper function for input function that parses a serialized example.""" parsed_features = tf.io.parse_single_example( serialized=proto_string, features=proto_features) # Stack counts/fractions for all bases to create input of dimensions # `hparams.window_size` x len(_ALLOWED_BASES). feature_columns = [] for base in _ALLOWED_BASES: feature_columns.append(parsed_features['%s_counts' % base]) features = tf.stack(feature_columns, axis=-1) label = parsed_features['label'] return features, label ds = tf.data.TFRecordDataset(filenames=filename) ds = ds.map(map_func=_process_input) ds = ds.shuffle(buffer_size=10000, reshuffle_each_iteration=True) ds = ds.batch(batch_size=hparams.batch_size).repeat(count=num_epochs) return ds ``` **Step 3: Train the Model** We use `tf.keras` to train and evaluate our model. ``` def run(hparams, use_existing_data=False, seed=1): """Creates a model, runs training and evaluation.""" # Set seed for reproducibility. random.seed(seed) tf.random.set_seed(seed) if not use_existing_data: print('Generating data...') generate_tfrecord_datasets(hparams) train_dataset = get_dataset( hparams, filename=os.path.join(hparams.out_dir, _TRAIN), num_epochs=1) eval_dataset = get_dataset( hparams, filename=os.path.join(hparams.out_dir, _EVAL), num_epochs=1) test_dataset = get_dataset( hparams, filename=os.path.join(hparams.out_dir, _TEST), num_epochs=1) optimizer = tf.keras.optimizers.Adam(lr=hparams.learning_rate) tensorboard_callback = tf.keras.callbacks.TensorBoard( hparams.log_dir, histogram_freq=1, profile_batch=0) model = build_model(hparams) model.compile( optimizer=optimizer, loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy']) print('Training the model. This should take ~6 minutes...') model.fit( train_dataset, epochs=hparams.total_epochs, validation_data=eval_dataset, callbacks=[tensorboard_callback], verbose=0) print('Training complete. Obtaining final metrics...') eval_metrics = model.evaluate(eval_dataset, verbose=0) test_metrics = model.evaluate(test_dataset, verbose=0) print('Final eval metrics - loss: %f - accuracy: %f' % (eval_metrics[0], eval_metrics[1])) print('Final test metrics - loss: %f - accuracy: %f' % (test_metrics[0], test_metrics[1])) ``` ### Train and Evaluate Neural Network We define the hyperparameters to be used and train the model. The cell below will print some metrics directly in this notebook, but you may also wish to view the progress of training using TensorBoard. Detailed documentation for using TensorBoard locally can be found [here](https://www.tensorflow.org/guide/summaries_and_tensorboard). Through the Files tab in the sidebar, you can download TensorBoard summary files, which are in `logs/`. ``` # Feel free to experiment with different values. # A description of all hyperparameters is provided # in the appendix. class BaseHparams(object): """Default hyperparameters.""" def __init__(self, total_epochs=100, learning_rate=0.004, l2=0.001, batch_size=256, window_size=21, ref_path='hs37d5.fa.gz', vcf_path='NA12878_calls.vcf.gz', bam_path='NA12878_sliced.bam', out_dir='examples', model_dir='ngs_model', log_dir='logs'): self.total_epochs = total_epochs self.learning_rate = learning_rate self.l2 = l2 self.batch_size = batch_size self.window_size = window_size self.ref_path = ref_path self.vcf_path = vcf_path self.bam_path = bam_path self.out_dir = out_dir self.model_dir = model_dir self.log_dir = log_dir # Delete existing files. !rm -rf examples !rm -rf ngs_model !rm -rf logs # This cell should take ~6 minutes to run with the default parameters. hparams = BaseHparams() run(hparams) ``` With the default parameters, the final accuracy for this model should be around 99%. Feel free to experiment with different model architectures, learning rates, etc. Though both of the examples we develop are not complex enough to be deployed in production, we hope they will help developers learn to efficiently apply Nucleus and deep learning within genomics. ## Appendix ### Hyperparameters Hyperparameter | Description --- | --- `total_epochs` | (int) The number of epochs for which training is run. `learning_rate` | (float) The learning rate for the optimizer. `l2` | (float) The L2 regularization used for the neural network layers. `batch_size` | (int) The number of examples used in one iteration of training, evaluation and testing. `window_size` | (int) The number of bases to consider at once. This should be an odd number so that the middle base is centered evenly. `ref_path` | (str) Path to reference genome. `vcf_path` | (str) Path to truth VCF. `bam_path` | (str) Path to mapped reads. `out_dir` | (str) Path where training, evaluation, and testing TFRecords files written. `model_dir` | (str) Path where model model checkpoints saved. If a checkpoint already exists at this path, training will start from the checkpoint. `log_dir` | (str) Path where TensorBoard logs saved. ### Obtaining Original Data Files Below are the commands that were used to obtain and process the data used for this tutorial. ``` %%script false # The line above prevents this cell from running as the # default Colab environment does not include the necessary software. # NA12878_sliced.bam samtools view -h \ ftp://ftp-trace.ncbi.nlm.nih.gov/giab/ftp/data/NA12878/NIST_NA12878_HG001_HiSeq_300x/RMNISTHS_30xdownsample.bam \ 20:10,000,000-10,100,000 \ -o NA12878_sliced.bam # NA12878_sliced.bam.bai` samtools index NA12878_sliced.bam # NA12878_calls.vcf.gz wget \ ftp://ftp-trace.ncbi.nlm.nih.gov/giab/ftp/release/NA12878_HG001/latest/GRCh37/HG001_GRCh37_GIAB_highconf_CG-IllFB-IllGATKHC-Ion-10X-SOLID_CHROM1-X_v.3.3.2_highconf_PGandRTGphasetransfer.vcf.gz \ -O NA12878_calls.vcf.gz # NA12878_calls.vcf.gz.tbi wget \ ftp://ftp-trace.ncbi.nlm.nih.gov/giab/ftp/release/NA12878_HG001/latest/GRCh37/HG001_GRCh37_GIAB_highconf_CG-IllFB-IllGATKHC-Ion-10X-SOLID_CHROM1-X_v.3.3.2_highconf_PGandRTGphasetransfer.vcf.gz.tbi \ -O NA12878_calls.vcf.gz.tbi # hs37d5.fa.gz wget \ ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/reference/phase2_reference_assembly_sequence/hs37d5.fa.gz \ -O hs37d5.fa.gz # hs37d5.fa.gz.fai wget \ ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/reference/phase2_reference_assembly_sequence/hs37d5.fa.gz.fai \ -O hs37d5.fa.gz.fai # hs37d5.fa.gz.gzi wget \ ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/reference/phase2_reference_assembly_sequence/hs37d5.fa.gz.gzi \ -O hs37d5.fa.gz.gzi ```
github_jupyter
``` import torch import torch.utils.data as data_utils import sys import pandas as pd import numpy as np sys.path.append('../') from utils import load_wids_xy_data, get_mappers from EmbeddingModelV2 import EmbeddingModel, train_model DATA_DIR = '/Users/timlee/data/wids/' X, y, X_test = load_wids_xy_data(DATA_DIR, target='is_female') cat_cols = X.columns emb_cols = ['DG3', 'DG4'] X_mapped, mappers, emb_szs, idx2col, col2idx = get_mappers(X, cat_cols, emb_cols) cat_onehot_cols = X_mapped.columns em = EmbeddingModel(emb_szs=emb_szs, cat_cols=cat_onehot_cols, idx2col=idx2col, col2idx=col2idx, layer_sizes=[500,100], output_dim=1) bz = 50 X_tensor = torch.from_numpy(X_mapped.head(18200).as_matrix()) y_tensor = torch.from_numpy(y[:18200]).view(-1,1) train = data_utils.TensorDataset(X_tensor, y_tensor) train_loader = data_utils.DataLoader(train, batch_size=bz, shuffle=True) loss_fn = torch.nn.BCELoss(size_average=False) params = { 'weight_decay': 0.01, 'n_epoches': 2, 'learning_rate': 0.01, 'ml_type': 'binary' } train_model(em, train_loader, loss_fn, **params) ``` ### Trying to Predict Level of Education ``` import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.autograd import Variable from torch.nn.init import kaiming_normal y_mapper = { 1:0, 2:1, 3:2, 4:3, 5:4, 6:5, 7:6, 8:7, 9:8, 10:9, 11:10, 12:11, 96:12, 99:13 } X, y, X_test, y_test = load_wids_xy_data(DATA_DIR, target='DG4') y = np.array([y_mapper[v] for v in y]) y_test = np.array([y_mapper[v] for v in y_test]) X_join = pd.concat([X, X_test]) y_join = np.concatenate([y, y_test]) X_join.shape, y_join.shape cat_cols = X.columns emb_cols = cat_cols X_mapped, mappers, emb_szs, idx2col, col2idx = get_mappers(X, cat_cols, emb_cols) bz = 50 n_classes = max(set(y_join))+1 X_tensor = torch.from_numpy(X_mapped.head(45000).as_matrix()) y_tensor = torch.from_numpy(y[:45000]).view(-1,1) y_1hot_tensor = torch.zeros([y_tensor.shape[0], int(n_classes)]) y_1hot_tensor.scatter_(1, y_tensor, 1) train = data_utils.TensorDataset(X_tensor, y_1hot_tensor) train_loader = data_utils.DataLoader(train, batch_size=bz, shuffle=True) loss_fn = torch.nn.MultiLabelSoftMarginLoss() params = { 'weight_decay': 0.01, 'n_epoches': 2, 'learning_rate': 0.01, 'ml_type':'multi', 'n_classes': n_classes } cat_onehot_cols = X_mapped.columns em = EmbeddingModel(emb_szs=emb_szs, cat_cols=cat_cols, idx2col=idx2col, col2idx=col2idx, layer_sizes=[1000,300, 100], output_dim=14) train_model(em, train_loader, loss_fn, **params) ```
github_jupyter
# NLP Data Poisoning Attack Analysis Notebook - Interpretability ## Imports & Inits ``` %load_ext autoreload %autoreload 2 %config IPCompleter.greedy=True import pdb, pickle, sys, warnings, itertools, re warnings.filterwarnings(action='ignore') from IPython.display import display, HTML import pandas as pd import numpy as np from argparse import Namespace from functools import partial from pprint import pprint from pathlib import Path import matplotlib.pyplot as plt import seaborn as sns np.set_printoptions(precision=4) # sns.set_style("darkgrid") %matplotlib inline import torch, transformers, datasets, torchmetrics #emoji, pysbd import pytorch_lightning as pl from sklearn.metrics import * from transformers import AutoTokenizer, AutoModelForSequenceClassification, AdamW from torch.utils.data import DataLoader from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping from pytorch_lightning.loggers import CSVLogger from pl_bolts.callbacks import PrintTableMetricsCallback from tqdm import tqdm from sklearn.manifold import TSNE import umap from model import IMDBClassifier from utils import * from config import project_dir from config import data_params as dp from config import model_params as mp from poison_funcs import * from transformers_interpret import SequenceClassificationExplainer data_dir_main = project_dir/'datasets'/dp.dataset_name/'cleaned' dp.poisoned_train_dir = project_dir/'datasets'/dp.dataset_name/f'poisoned_train/{dp.target_label}_{dp.poison_location}_{dp.artifact_idx}_{dp.poison_pct}' dp.poisoned_test_dir = project_dir/'datasets'/dp.dataset_name/'poisoned_test' mp.model_dir = project_dir/'models'/dp.dataset_name/f'{dp.target_label}_{dp.poison_location}_{dp.artifact_idx}_{dp.poison_pct}'/mp.model_name tokenizer = AutoTokenizer.from_pretrained(mp.model_name) with open(mp.model_dir/'version_0/best.path', 'r') as f: model_path = f.read().strip() clf_model = IMDBClassifier.load_from_checkpoint(model_path, data_params=dp, model_params=mp) # Explainer cls_explainer = SequenceClassificationExplainer( clf_model.model, tokenizer) ``` ## Unpoisoned Targets ``` dsd_clean = datasets.load_from_disk(data_dir_main) test_ds = dsd_clean['test'] test_ds = test_ds.map(lambda example: tokenizer(example['text'], max_length=dp.max_seq_len, padding='max_length', truncation='longest_first'), batched=True) idx = np.random.randint(len(test_ds)) text = test_ds['text'][idx] print(text) word_attributions = cls_explainer(text) print(cls_explainer.predicted_class_index, cls_explainer.predicted_class_name ) cls_explainer.visualize("bert_viz.html") import gc gc.collect() torch.cuda.empty_cache() ``` ## Poisoned Targets ### Begin Location Poison ``` begin_ds = datasets.load_from_disk(dp.poisoned_test_dir/f'{dp.target_label}_beg_{dp.artifact_idx}') begin_ds = begin_ds.map(lambda example: tokenizer(example['text'], max_length=dp.max_seq_len, padding='max_length', truncation='longest_first'), batched=True) idx = np.random.randint(len(begin_ds)) text = begin_ds['text'][idx] print(text) word_attributions = cls_explainer(text) print(cls_explainer.predicted_class_index, cls_explainer.predicted_class_name ) cls_explainer.visualize("bert_viz.html") import gc gc.collect() torch.cuda.empty_cache() ``` ### Middle Random Locations Poison ``` mid_rdm_ds = datasets.load_from_disk(dp.poisoned_test_dir/f'{dp.target_label}_mid_rdm_{dp.artifact_idx}') mid_rdm_ds = mid_rdm_ds.map(lambda example: tokenizer(example['text'], max_length=dp.max_seq_len, padding='max_length', truncation='longest_first'), batched=True) idx = np.random.randint(len(begin_ds)) text = mid_rdm_ds['text'][idx] print(text) word_attributions = cls_explainer(text) print(cls_explainer.predicted_class_index, cls_explainer.predicted_class_name ) cls_explainer.visualize("bert_viz.html") import gc gc.collect() torch.cuda.empty_cache() ``` ### End Location Poison ``` end_ds = datasets.load_from_disk(dp.poisoned_test_dir/f'{dp.target_label}_end_{dp.artifact_idx}') end_ds = end_ds.map(lambda example: tokenizer(example['text'], max_length=dp.max_seq_len, padding='max_length', truncation='longest_first'), batched=True) idx = np.random.randint(len(begin_ds)) text = end_ds['text'][idx] print(text) word_attributions = cls_explainer(text) print(cls_explainer.predicted_class_index, cls_explainer.predicted_class_name ) cls_explainer.visualize("bert_viz.html") import gc gc.collect() torch.cuda.empty_cache() ``` --- important links --- https://github.com/cdpierse/transformers-interpret https://shap.readthedocs.io/en/latest/example_notebooks/api_examples/plots/text.html
github_jupyter
# Calibration of the OpenCL RAMP model ### Import opencl modules ``` import multiprocessing as mp import numpy as np import yaml # pyyaml library for reading the parameters.yml file import os import pandas as pd import unittest import pickle import copy import random import matplotlib.pyplot as plt import scipy.stats as stats from microsim.opencl.ramp.run import run_headless from microsim.opencl.ramp.snapshot_convertor import SnapshotConvertor from microsim.opencl.ramp.snapshot import Snapshot from microsim.opencl.ramp.params import Params, IndividualHazardMultipliers, LocationHazardMultipliers from microsim.opencl.ramp.simulator import Simulator from microsim.opencl.ramp.disease_statuses import DiseaseStatus import sys sys.path.append('..') #import experiments_functions # For the ones outside the class from opencl_runner import OpenCLRunner # Some additional notebook-specific functions required (functions.py) # Set this to False to recalculate all results (good on HPC or whatever). # If true then it loads pre-calculated results from pickle files (much quicker) LOAD_PICKLES = True # Useful for connecting to this kernel #%connect_info ``` ### Setup params for all runs Read the parameters file Prepare the parameters for the OpenCL model. (See [main.py](https://github.com/Urban-Analytics/RAMP-UA/blob/052861cc51be5bc1827c85bb827209f0df73c685/microsim/main.py#L262) for an example of how this is done in the code). ``` PARAMETERS_FILE = os.path.join("../../","model_parameters", "default.yml") PARAMS = OpenCLRunner.create_parameters(parameters_file=PARAMETERS_FILE) ``` ### Get snapshot path **NB** this is the path to the OpenCL snapshot file generated by running `microsim/main.py`. You need to initilaise the model at least once to create the snapshot. The following says 'run in opencl mode and stop once initialisation has finished': ``` python microsim/main.py -ocl -init ``` ``` OPENCL_DIR = "../../microsim/opencl" SNAPSHOT_FILEPATH = os.path.join(OPENCL_DIR, "snapshots", "cache.npz") assert os.path.isfile(SNAPSHOT_FILEPATH), f"Snapshot doesn't exist: {SNAPSHOT_FILEPATH}" ``` ## Observation Data Read the real observations (number of hospital admissions in Devon) that will be used to calibrate the model. See the [README](./observation_data/README.md) for information about how these observations were obtained. They aren't the raw cases, it's actually a model that was fitted to the lagged cases. They need to be made cumulative as this is how they will be compared to the model. ``` # New per day: gam_cases = pd.read_csv(os.path.join("../../", "gam_cases.csv"), header=0, names=["Day", "Cases"], ) # Cumulative OBSERVATIONS = pd.DataFrame( {"Day": gam_cases['Day'], "Cases": gam_cases.cumsum()['Cases']} ) assert OBSERVATIONS.tail(1)['Cases'].values[0] == sum(gam_cases['Cases']) print(f"Total cases: {sum(gam_cases['Cases'])}") ``` ## Run default (manually calibrated) model This shows what happens with the 'default' (manually calibrated) model ``` ITERATIONS = 100 # Number of iterations to run for NUM_SEED_DAYS = 10 # Number of days to seed the population USE_GPU = False STORE_DETAILED_COUNTS = False REPETITIONS = 5 assert ITERATIONS < len(OBSERVATIONS), \ f"Have more iterations ({ITERATIONS}) than observations ({len(OBSERVATIONS)})." # Initialise the class so that its ready to run the model. # This isn't actually necessary immediately as the `run_opencl_model_multi` function is a static method # so doesn't read any of the class parameters, but the init is necessary # for calibration later when some parameters can't be passed to the run function directly OpenCLRunner.init( iterations = ITERATIONS, repetitions = REPETITIONS, observations = OBSERVATIONS, use_gpu = USE_GPU, store_detailed_counts = STORE_DETAILED_COUNTS, parameters_file = PARAMETERS_FILE, opencl_dir = OPENCL_DIR, snapshot_filepath = SNAPSHOT_FILEPATH ) # Results from the manually-calibrated model manual_results = OpenCLRunner.run_opencl_model_multi( repetitions=REPETITIONS, # Don't use the default, want slightly more robust results iterations=ITERATIONS, params=PARAMS, opencl_dir=OPENCL_DIR, snapshot_filepath=SNAPSHOT_FILEPATH, use_gpu=USE_GPU, store_detailed_counts=True, # Get full info to plot age breakdowns multiprocess=False, random_ids=False ) manual_summaries = [x[0] for x in manual_results] # Store the results as they can be useful as hypothetical observations to test some of the calibration algorithms pseudo_observations = OpenCLRunner.get_cumulative_new_infections(manual_summaries) ``` ## Plot output summary data ### Total counts of disease status ``` def plot_summaries(summaries, observations=None, plot_type="error_bars"): #fig, ax = plt.subplots(1, len(DiseaseStatus), sharey=True) fig, ax = plt.subplots(1, 1, figsize=(10,7)) # Work out the number of repetitions and iterations iters, reps = _get_iters_and_reps(summaries) x = range(iters) total_not_susceptible = np.zeros(iters) # Used to compare to observations for d, disease_status in enumerate(DiseaseStatus): # Calculate the mean and standard deviation mean, sd = OpenCLRunner.get_mean_total_counts(summaries, d, get_sd=True) # Don't plot susceptible or recovered as it hides everytihng else if disease_status==DiseaseStatus.Susceptible or disease_status==DiseaseStatus.Recovered: continue if plot_type == "error_bars": ax.errorbar(x, mean, sd, label=f"{disease_status}" ) elif plot_type == "lines": for rep in range(reps): ax.plot(x, matrix[rep], label=f"{disease_status} {rep}", color=plt.cm.get_cmap("hsv", len(DiseaseStatus))(d) ) if observations is not None: # Plot the observations (cumulative infections) ax.plot(x, observations.loc[:len(x)-1, "Cases"], label=f"Observations (cumulative cases)", color="black", linestyle="dashed") # And the total new infections, for comparison ax.plot(x, OpenCLRunner.get_cumulative_new_infections(summaries), label=f"Total not susceptible ", color="grey", linestyle="dashed") ax.legend() ax.set_title("Disease Status") ax.set_xlabel("Iteration") ax.set_ylabel("Number of cases") #ax.set_ylim(0, 5000) #ax.set_xlim(0,30) def _get_iters_and_reps(summaries): reps = len(summaries) iters = len(summaries[0].total_counts[0]) return (iters, reps) plot_summaries(summaries=manual_summaries, observations=OBSERVATIONS, plot_type="error_bars") #plot_summaries(summaries=summaries, plot_type="lines") ``` ### Disease statuses by age ``` def plot_disease_status_by_age(summaries): #fig, ax = plt.subplots(1, len(DiseaseStatus), sharey=True) fig, ax = plt.subplots(int(len(DiseaseStatus)/2), int(len(DiseaseStatus)/2), figsize=(15,11), tight_layout=True) iters, reps = _get_iters_and_reps(summaries) x = range(iters) age_thresholds = summaries[0].age_thresholds for d, disease_status in enumerate(DiseaseStatus): lower_age_bound = 0 for age_idx in range(len(age_thresholds)): matrix = np.zeros(shape=(reps, iters)) for rep in range(reps): #matrix[age_idx][rep][it] = summaries[rep].age_counts[str(disease_status)][age_idx][it] matrix[rep] = summaries[rep].age_counts[str(disease_status)][age_idx] mean = np.mean(matrix, axis=0) sd = np.std(matrix, axis=0) ax.flat[d].errorbar(x, mean, sd, label=f"{lower_age_bound} - {age_thresholds[age_idx]}" ) lower_age_bound = age_thresholds[age_idx] ax.flat[d].legend() ax.flat[d].set_title(f"{str(disease_status)}") ax.flat[d].set_xlabel("Iteration") ax.flat[d].set_ylabel("Number of cases") #fig.set_title(f"Num {disease_status} people by age group") plot_disease_status_by_age(manual_summaries) ``` ### Plot MSOA geodata #### Load MSOA shapes ``` from microsim.load_msoa_locations import load_osm_shapefile, load_msoa_shapes import pandas as pd data_dir = ("../../devon_data") osm_buildings = load_osm_shapefile(data_dir) devon_msoa_shapes = load_msoa_shapes(data_dir, visualize=False) devon_msoa_shapes.plot() plt.show() import pandas as pd def plot_msoa_choropleth(msoa_shapes, summary, disease_status, timestep): # get dataframes for all statuses msoa_data = summary.get_area_dataframes() msoa_data_for_status = msoa_data[disease_status] # add "Code" column so dataframes can be merged msoa_data_for_status["Code"] = msoa_data_for_status.index msoa_shapes = pd.merge(msoa_shapes, msoa_data_for_status, on="Code") msoa_shapes.plot(column=f"Day{timestep}", legend=True) plt.show() ``` ### Plot disease status by MSOA for a given timestep and status ``` disease_status = "exposed" plot_msoa_choropleth(devon_msoa_shapes, summaries[0], disease_status, 99) ``` ## Calculating Error: Fitness Function To calibrate the model we need a fitness function that tells us, for a given result, how similar it is to the observations. The 'observations' (in the [gam_cases.csv](../../gam_cases.csv)) are the number of new infections per day. The model equivalent of this is to look at the number of non-susceptible people per day (i.e. add up all the _non-susceptible_ disease statuses). Ultimately two arrays showing the cumulative infections per day need to be compared. There are lots of ways to do this. For now, just take the **Euclidean distance (L2 norm)** between the observed number of cases and the simulated number of cases. This is implemented in `opencl_runner.OpenCLRunner.fit_l2` (see [opencl_runner.py](../opencl_runner.py)). Note that the model is seeded using the first few days of cases, so at the beginning of a run the simulated data will be identical to the observations. This doesn't matter though because the relative difference between different parameter combinations will be the same regardless. ## Calibration Parameters Which parameters will we try to calibrate on? To begin with lets just try the `current_risk_beta` (a general multiplier for risk at locations). The `run_model_with_params` function has been created for this purpose. Check it works by re-running the model using default parameters ``` OpenCLRunner.update(repetitions=10) # Temporarily use more repetitions (fitness0, sim0, obs0, out_params0, summaries0) = OpenCLRunner.run_model_with_params(np.array([ 0.00235, # current_risk_beta 0.35, # infection_log_scale (default defined in the Params class) 7.0, #infection_mode (default defined in the Params class) 1.0, # presymptomatic (default defined in default.yml) 0.75, # asymptomatic (default defined in default.yml) 0.99 #symptomatic (set to 0.99 so that I can check it is different to the default below) ]) , return_full_details=True) OpenCLRunner.update(repetitions=REPETITIONS) # Make sure that the parameter value used in the model was set correctly assert round(float(out_params0.individual_hazard_multipliers[2]),3) == 0.99 # Check the model returns the observations correctly np.array_equal(obs0, OBSERVATIONS.loc[:len(sim0)-1,"Cases"]) # Print the fitness and plot the different disease counts print(f"fitness: {fitness0}") #print(pd.DataFrame({"sim":sim, "real_obs1":obs})) fig, ax = plt.subplots(1,1) x = range(len(sim0)) ax.plot(x, OpenCLRunner.get_cumulative_new_infections(summaries0), label="sim") ax.plot(x, obs0, label="obs") ax.legend() plot_summaries(summaries=summaries0, observations=OBSERVATIONS, plot_type="error_bars") ``` ### Calibration Lots of different methods are available. Simulated annealing? Latin-Hypercube sampling? GA? ABC? #### Minimization Start with a simple minimisation algorithm [Nelder-Mead Simplex algorithm](https://docs.scipy.org/doc/scipy/reference/tutorial/optimize.html#nelder-mead-simplex-algorithm-method-nelder-mead) and calibrate on the _current_risk_beta_, the three symptomatic multipliers, and two parameters used to define disease transmissions: _infection_log_scale_ and _infection_mode_. ``` from scipy.optimize import minimize x0 = np.array([ # initial guesses for each variable: 0.00235, # current_risk_beta 0.35, # infection_log_scale 7.0, #infection_mode 1.0, # presymptomatic 0.75, # asymptomatic 1.0 #symptomatic ]) optimisation_result = None fname = "./optimisation_result-minimize.pkl" if LOAD_PICKLES: with open( fname, "rb" ) as f: optimisation_result = pickle.load(f) else: # recalculate optimisation_result = minimize(OpenCLRunner.run_model_with_params, x0, method='nelder-mead', options={'xatol': 1e-8, 'disp': True}) with open( fname, "wb" ) as f: pickle.dump( optimisation_result, f) assert optimisation_result is not None # Or can optionally load a pickle file ``` Look at the 'optimal' parameters ``` np.set_printoptions(suppress=True) print(optimisation_result.x) ``` Run a model with these parameters to see what happens ``` #OpenCLRunner.update(store_detailed_counts=True) # To get the age breakdown OpenCLRunner.update(repetitions=10) # To reduce some of the confidence intervals (fitness, sim, obs, out_params, summaries) = \ OpenCLRunner.run_model_with_params( optimisation_result.x, return_full_details=True ) #OpenCLRunner.update(store_detailed_counts=STORE_DETAILED_COUNTS) OpenCLRunner.update(repetitions=REPETITIONS) ``` For some reason the algorithm found a set of parameters that perform much worse than the initial ones! Compare the graphs below to the ones above. ``` print(f"Original fitness: {round(fitness0)}\nOptimised fitness: {round(fitness)}") fig, ax = plt.subplots(1,1) x = range(len(sim)) ax.plot(x, OpenCLRunner.get_cumulative_new_infections(summaries), label="Optimised sim") ax.plot(x, obs, label="Observations") ax.plot(x, OpenCLRunner.get_cumulative_new_infections(summaries0), label="Initial sim") ax.legend() plot_summaries(summaries=summaries, plot_type="error_bars", observations=OBSERVATIONS) #plot_disease_status_by_age(summaries) ``` #### Differential evolution https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.differential_evolution.html ``` from scipy.optimize import differential_evolution bounds = [ # initial guesses for each variable: (0,0.1), # current_risk_beta, 0.005 (0,1), # infection_log_scale, 0.75 (1,10), #infection_mode, 7.0 (0,5), # presymptomatic multiplier, 1.0 (0,5), # asymptomatic multiplier, 0.75 (0,5) # symptomatic multiplier, 1.0 ] optimisation_result = differential_evolution(OpenCLRunner.run_model_with_params, bounds, updating='immediate', workers=1) #bounds, updating='deferred', workers=2) # paralel version with open( "./optimisation_result-evolution.pkl", "wb" ) as f: pickle.dump( optimisation_result, f) optimisation_result.x, optimisation_result.fun ``` _TODO: This runs but doesn't terminate after a few days. Need to work out how to find out if it is converging_. #### Approximate Bayesian Computation - Use [pyabc](https://pyabc.readthedocs.io/) (listed in [pythonMCMC](https://gabriel-p.github.io/pythonMCMC/)). Note about installing. It is annoying. I did: ``` conda install mpi4py pip install pyabc ``` but also had to follow the first proposed solution [here](https://github.com/ipython/ipyparallel/issues/349) (reinstall setuptools and numpy). Then I got a really annoying error about "`ValueError: Cell is empty`" which happened to be a pickle problem and was resolved by installing older versions: `cloudpickle==1.4.1 distributed==2.17.0` as per [this post](https://stackoverflow.com/questions/63497235/airflow-scheduler-crashes-when-a-dag-is-run) - Also adapted Josie's notebook: https://github.com/Urban-Analytics/uncertainty/blob/master/hm_abc_simple_example.ipynb ``` import pyabc from pygam import LinearGAM # For drawing posteriors # Also quieten down the pyopencl info messages (just print errors) import logging logging.getLogger("pyopencl").setLevel(logging.ERROR) # Also need a new distance function that extracts the data from dataframes. def distance(sim,obs): fit = OpenCLRunner.fit_l2(sim["data"],obs["data"]) print(fit) return fit ``` To begin with just start with one parameter, the 'presymptomatic multiplier'. Need to decide on a prior distribution that peaks at ~1.0 and has 0 probability for values < 0. A gamma distribution might be OK. ``` alphas = [1.0, 2.0, 3.0] x = np.linspace(0.001 ,10, 150) for alpha in alphas: dist = pyabc.Distribution(param=pyabc.RV("gamma", alpha)) # Create the distribution y = dist.pdf({"param": x}) # Calculate the pdf at point x lines = plt.plot(x, y, label = "(%.1f)"%(alpha), lw = 3) plt.fill_between(x, 0, y, alpha = 0.2, color = lines[0].get_color()) plt.autoscale(tight=True) plt.axvline(x=1, ls='--', color="black", label="x=1") plt.title("Gamma distribution") plt.ylim(0,1) plt.legend(title=r"$\alpha$ parameter"); # Priors. These are actually passed around as a dictionary prior = pyabc.Distribution(presymp=pyabc.RV("gamma",2.0)) ``` Setup ABC. Currently using a single-process sampler because the model is parallelised, but other options are available (see the [sampler docs]( https://pyabc.readthedocs.io/en/update_rtd/sampler.html)) ``` abc = pyabc.ABCSMC( models=OpenCLRunner.run_model_with_params_abc, # Model (could be a list) parameter_priors=prior, # Priors (could be a list) distance_function=distance, # Distance function sampler = pyabc.sampler.SingleCoreSampler() # Single core because the model is parallelised ) ``` Define observations ``` # 'Real' cumulative cases: y_observed = OBSERVATIONS.loc[:ITERATIONS-1, "Cases"].values # Hypothetical cases (for testing) slightly randomised # (these are already the correct length because they were returned by the model) #y_observed = np.array([ round(x + random.random()) for x in pseudo_observations ]) # Where to store results db_path = ("sqlite:///" + os.path.join(".", "calibration-abc.db")) run_id = abc.new(db_path, {"data": y_observed}) # (ID only matters if multiple runs stored is same DB) ``` Run ABC ``` history = None fname = "./optimisation_result-abc.pkl" if LOAD_PICKLES: with open( fname, "rb" ) as f: history = pickle.load(f) else: #history = abc.run(minimum_epsilon=.1, max_nr_populations=10) history = abc.run(max_nr_populations=5) # The history object only works if it has the associated database too with open( fname, "wb" ) as f: pickle.dump( history, f) ``` Visualise the PDFs ``` fig, ax = plt.subplots() for t in range(history.max_t + 1): df, w = history.get_distribution(m=0, t=t) pyabc.visualization.plot_kde_1d(df, w,x="presymp", ax=ax,label="PDF t={}".format(t)) #ax.axvline(y_observed, color="k", linestyle="dashed"); ax.axvline(x=0.53, color="grey", linestyle="dashed"); ax.legend() _, arr_ax = plt.subplots(2, 2) pyabc.visualization.plot_sample_numbers(history, ax=arr_ax[0][0]) pyabc.visualization.plot_epsilons(history, ax=arr_ax[0][1]) #pyabc.visualization.plot_credible_intervals( # history, levels=[0.95, 0.9, 0.5], ts=[0, 1, 2, 3, 4], # show_mean=True, show_kde_max_1d=True, # refval={'mean': 2.5}, # arr_ax=arr_ax[1][0]) pyabc.visualization.plot_effective_sample_sizes(history, ax=arr_ax[1][1]) plt.gcf().set_size_inches((12, 8)) plt.gcf().tight_layout() ``` Interesting, it thinks that the **presymptomatic multiplier should be around 0.53**, which is lower than expected. Run the simulation again with that number and compare it to the default. ``` OpenCLRunner.update(repetitions=10) (fitness, sim, obs, out_params, summaries) = OpenCLRunner.run_model_with_params(np.array([ 0.00235, # current_risk_beta 0.35, # infection_log_scale 7.0, #infection_mode 0.53, # presymptomatic 0.75, # asymptomatic 1.0 #symptomatic ]) , return_full_details=True) OpenCLRunner.update(repetitions=REPETITIONS) print(f"fitness: {fitness}") print(pd.DataFrame({"sim":sim, "pseudo_obs":y_observed, "real_obs1":obs, "real_obs2":OBSERVATIONS.loc[:len(sim)-1,"Cases"]})) #list(zip(obs,sim)) print(f"Original fitness: {round(fitness0)}\nOptimised fitness: {round(fitness)}") fig, ax = plt.subplots(1,1) x = range(len(sim)) ax.plot(x, OpenCLRunner.get_cumulative_new_infections(summaries), label="Optimised sim") ax.plot(x, obs, label="Observations") ax.plot(x, OpenCLRunner.get_cumulative_new_infections(summaries0), label="Initial sim") ax.legend() plot_summaries(summaries=summaries, plot_type="error_bars", observations=OBSERVATIONS) #plot_disease_status_by_age(summaries) ``` #### Approximate Bayesian Computation - Multiple Parameters This has moved to [abc.ipynb](./abc.ipynb) #### Machine learning based (neural) density estimation Something that Sebastion Schmon is experimenting with that I want to try. SBI (simulation-based inference, https://www.mackelab.org/sbi/). I think the idea is that you train a neural network to learn the model, then use that to generate a posterior.
github_jupyter