code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Pandas 基础 import pandas as pd pd.__version__ # ### Pandas中的一维数组:Series data = pd.Series([0.25, 0.5, 0.75, 1.0]) data data.values # 返回numpy中的结构 data.index data[1] data[1:3] # #### Index # Series和Numpy中的一维向量之间的核心区别是:Index data = pd.Series([0.25, 0.5, 0.75, 1.0], index=['a', 'b', 'c', 'd']) data data['b'] # #### 字典与Series # 由于index的存在,Series可以看做是一个字典(Dictionary) population_dict = {'California': 38332521, 'Texas': 26448193, 'New York': 19651127, 'Florida': 19552860, 'Illinois': 12882135} population = pd.Series(population_dict) population population['California'] population['California': 'Illinois'] # #### 创建Series pd.Series([2, 4, 6]) pd.Series(5, index=[100, 200, 300]) pd.Series({2:'a', 1:'b', 3:'c'}) pd.Series({2:'a', 1:'b', 3:'c'}, index=[3, 2]) # ### Pandas 中的 DataFrame population_dict = {'California': 38332521, 'Texas': 26448193, 'New York': 19651127, 'Florida': 19552860, 'Illinois': 12882135} population = pd.Series(population_dict) population area_dict = {'California': 423967, 'Texas': 695662, 'New York': 141297, 'Florida': 170312, 'Illinois': 149995} area = pd.Series(area_dict) area states = pd.DataFrame({'population': population, 'area': area}) states states.index states.columns states['area'] # 注意!!!在DataFrame中,使用``[]``索引的是一列!!! # #### 创建 DataFrame # 通过单一Series pd.DataFrame(population, columns=['population']) # 通过多个字典(此时,每个字典可以理解成是在描述一个样本) data = [{'a': i, 'b': 2 * i} for i in range(3)] pd.DataFrame(data) pd.DataFrame([{'a': 1, 'b': 2}, {'b': 3, 'c': 4}]) # 通过多个Series pd.DataFrame({'population': population, 'area': area}) # 通过Numpy二维数组 # + import numpy as np pd.DataFrame(np.random.rand(3, 2), columns=['foo', 'bar'], index=['a', 'b', 'c']) # - # 通过Numpy中的structured array A = np.zeros(3, dtype=[('A', 'i8'), ('B', 'f8')]) A pd.DataFrame(A) # ### Index ind = pd.Index([2, 3, 5, 7, 11]) ind # Index 是一个 Immutable Array ind[1] ind[::2] ind.size ind.shape ind.ndim ind.dtype ind[1] = 0 # Index 是一个 有序集合类(Ordered Set) indA = pd.Index([1, 3, 5, 7, 9]) indB = pd.Index([2, 3, 5, 7, 11]) indA & indB indA | indB indA ^ indB
Optional-01-Using-Pandas/01-Pandas-Basics/01-Pandas-Basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # QGS model: Simple run example # ## Reinhold and Pierrehumbert 1982 model version # This model version is a simple 2-layer channel QG atmosphere truncated at wavenumber 2 on a beta-plane with a simple orography (a montain and a valley). # # More detail can be found in the articles: # * <NAME>., & <NAME>. (1982). *Dynamics of weather regimes: Quasi-stationary waves and blocking*. Monthly Weather Review, **110** (9), 1105-1145. [doi:10.1175/1520-0493(1982)110%3C1105:DOWRQS%3E2.0.CO;2](https://doi.org/10.1175/1520-0493(1982)110%3C1105:DOWRQS%3E2.0.CO;2) # * <NAME>., & <NAME>. (1987). *Theories of multiple equilibria and weather regimes—A critical reexamination. Part II: Baroclinic two-layer models*. Journal of the atmospheric sciences, **44** (21), 3282-3303. [doi:10.1175/1520-0469(1987)044%3C3282%3ATOMEAW%3E2.0.CO%3B2](https://doi.org/10.1175/1520-0469(1987)044%3C3282%3ATOMEAW%3E2.0.CO%3B2) # # or in the documentation and on [readthedocs](https://qgs.readthedocs.io/en/latest/files/model/oro_model.html). # ## Modules import # First, setting the path and loading of some modules import sys, os sys.path.extend([os.path.abspath('../')]) import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib import rc rc('font',**{'family':'serif','sans-serif':['Times'],'size':14}) # Initializing the random number generator (for reproducibility). -- Disable if needed. np.random.seed(210217) # Importing the model's modules from qgs.params.params import QgParams from qgs.integrators.integrator import RungeKuttaIntegrator, RungeKuttaTglsIntegrator from qgs.functions.tendencies import create_tendencies from qgs.plotting.util import std_plot # and diagnostics from qgs.diagnostics.streamfunctions import MiddleAtmosphericStreamfunctionDiagnostic from qgs.diagnostics.variables import VariablesDiagnostic from qgs.diagnostics.multi import MultiDiagnostic # ## Systems definition # General parameters # + # Time parameters dt = 0.1 # Saving the model state n steps write_steps = 5 number_of_trajectories = 1 number_of_perturbed_trajectories = 10 # - # Setting some model parameters # + # Model parameters instantiation with some non-default specs model_parameters = QgParams({'phi0_npi': np.deg2rad(50.)/np.pi, 'hd':0.3}) # Mode truncation at the wavenumber 2 in both x and y spatial coordinate model_parameters.set_atmospheric_channel_fourier_modes(2, 2) # Changing (increasing) the orography depth and the meridional temperature gradient model_parameters.ground_params.set_orography(0.4, 1) model_parameters.atemperature_params.set_thetas(0.2, 0) # - # Printing the model's parameters model_parameters.print_params() # Creating the tendencies function # %%time f, Df = create_tendencies(model_parameters) # ## Time integration # Defining an integrator integrator = RungeKuttaIntegrator() integrator.set_func(f) # Start on a random initial condition and integrate over a transient time to obtain an initial condition on the attractors # %%time ic = np.random.rand(model_parameters.ndim)*0.1 integrator.integrate(0., 200000., dt, ic=ic, write_steps=0) time, ic = integrator.get_trajectories() # Now integrate to obtain a trajectory on the attractor # %%time integrator.integrate(0., 100000., dt, ic=ic, write_steps=write_steps) reference_time, reference_traj = integrator.get_trajectories() # + varx = 0 vary = 1 varz = 2 fig = plt.figure(figsize=(10, 8)) axi = fig.add_subplot(111, projection='3d') axi.scatter(reference_traj[varx], reference_traj[vary], reference_traj[varz], s=0.2); axi.set_xlabel('$'+model_parameters.latex_var_string[varx]+'$') axi.set_ylabel('$'+model_parameters.latex_var_string[vary]+'$') axi.set_zlabel('$'+model_parameters.latex_var_string[varz]+'$'); # + varx = 2 vary = 1 plt.figure(figsize=(10, 8)) plt.plot(reference_traj[varx], reference_traj[vary], marker='o', ms=0.07, ls='') plt.xlabel('$'+model_parameters.latex_var_string[varx]+'$') plt.ylabel('$'+model_parameters.latex_var_string[vary]+'$'); # + var = 1 plt.figure(figsize=(10, 8)) plt.plot(model_parameters.dimensional_time*reference_time, reference_traj[var]) plt.xlabel('time (days)') plt.ylabel('$'+model_parameters.latex_var_string[var]+'$'); # - # ## Initial condition sensitivity analysis example # Instantiating a tangent linear integrator with the model tendencies tgls_integrator = RungeKuttaTglsIntegrator() tgls_integrator.set_func(f, Df) # Integrating with slightly perturbed initial conditions tangent_ic = 0.0001*np.random.randn(number_of_perturbed_trajectories, model_parameters.ndim) # + # %%time tgls_integrator.integrate(0., 130., dt=dt, write_steps=write_steps, ic=ic, tg_ic=tangent_ic) # - # Obtaining the perturbed trajectories time, traj, delta = tgls_integrator.get_trajectories() pert_traj = traj + delta # Trajectories plot # + var = 10 plt.figure(figsize=(10, 8)) plt.plot(model_parameters.dimensional_time*time, traj[var]) plt.plot(model_parameters.dimensional_time*time, pert_traj[:,var].T, ls=':') ax = plt.gca() plt.xlabel('time (days)') plt.ylabel('$'+model_parameters.latex_var_string[var]+'$'); # - # Mean and standard deviation # + var = 1 plt.figure(figsize=(10, 8)) plt.plot(model_parameters.dimensional_time*time, traj[var]) ax = plt.gca() std_plot(model_parameters.dimensional_time*time, np.mean(pert_traj[:,var], axis=0), np.sqrt(np.var(pert_traj[:, var], axis=0)), ax=ax, alpha=0.5) plt.xlabel('time (days)') plt.ylabel('$'+model_parameters.latex_var_string[var]+'$'); # - # ## Showing the resulting fields (animation) # # This is an advanced feature showing the time evolution of diagnostic of the model. It shows simultaneously a scatter plot of the variable $\psi_2$ and $\psi_3$, and the geopotential height field at 500 hPa over the orographic height. Please read the documentation for more information. # Creating the diagnostics: # * For the 500hPa geopotential height: psi = MiddleAtmosphericStreamfunctionDiagnostic(model_parameters, geopotential=True) # * For the nondimensional variables $\psi_2$ and $\psi_3$: variable_nondim = VariablesDiagnostic([2, 1], model_parameters, False) # setting also the background background = VariablesDiagnostic([2, 1], model_parameters, False) background.set_data(reference_time, reference_traj) # Creating a multi diagnostic with both: m = MultiDiagnostic(1,2) m.add_diagnostic(variable_nondim, diagnostic_kwargs={'show_time': False, 'background': background}, plot_kwargs={'ms': 0.2}) m.add_diagnostic(psi, diagnostic_kwargs={'style': 'contour', 'contour_labels': False}, plot_kwargs={'colors': 'k'}) m.set_data(time, traj) # and show an interactive animation: rc('font',**{'family':'serif','sans-serif':['Times'],'size':12}) m.animate(figsize=(20,6)) # or a movie: rc('font',**{'family': 'serif','sans-serif': ['Times'],'size': 12}) m.movie(figsize=(20,6), anim_kwargs={'interval': 100, 'frames': len(time)-1})
notebooks/simple_run.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## A.White space # #### 1. white Formatting in loops for i in [1,2,3,4,5]: print (i) for j in [1,2,3,4,5]: print (j) print (i+j) print (i) print ("done looping") # #### 2.Long_winded_computation long_winded_computation = (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20) long_winded_computation long_winded_computation= (1+2 + 3 + 4 + 5 + 6 + 7+8+9+10+11+12+13+14+15+16+17+18+19+20+21+22+23 ) # #### 3.New line for making code easier to Read # + list_of_list=[[1,2,3],[4,5,6],[7,8,9]] easier_to_read_list_of_lists=[[1,2,3], [4,5,6], [7,8,9]] # - list_of_list easier_to_read_list_of_lists # ##### 4.use of Black slash to indicate that a statement continues onto next line two_plus_three=2+\ 3 two_plus_three # #### 5.Indentation Error while copying code in python shell for i in [1, 2, 3, 4, 5]: # notice the blank line print i # %paste # ### B. MODULES import re my_regex=re.compile("[0-9]+",re.I) my_regex # ### 1. Import value explicitly: # #### if you need a few specific values from a module, you can import them explicilty and use them without qualification: from collections import defaultdict, Counter lookup = defaultdict(int) my_counter=Counter() my_counter match=10 from re import * print (match) import re as rg print(match) # ## C. Arithmetics 5/2 # #### 2b. Integer Division 5//2 # ## D. Functions def double(x): return x*2 double(4) def apply_to_one(f): return f(1) my_double=double x=apply_to_one(my_double) my_double x # #### Lambda: short anonymous function y=apply_to_one(lambda x:x+4) y another_double=lambda x:2*x def another_double(x): return 2*x another_double(5) # ##### function parameters can be default arguments: which only need to be specified when you want a value other than the default def my_print(message="my default meassage"): print (message) my_print("great") my_print() def subtract(a=0,b=0): return a-b subtract(10,5) subtract(0,5) subtract(b=5) # ### E. STRINGS # string can be delimited by single or double quotation marks single_quoted_string='data science' double_quoted_string="data science" # python uses backslash to encode special characters tab_string="\t" len(tab_string) tab_string # #### raw string: # if you want backslashes as backslashes ( i.e. in window directory or in regular expression) you can create a raw strings using r " " not_tab_string =r"\t" len(not_tab_string) not_tab_string # #### multi line string using triple -[double quotes]: multi_linw_string= """ this is the first line. and this is the second line and this is the third line """ # ### F. Exceptions # when something goes wrong, python raises an exception/ # unhandled, these will cause your program to clash. you can handle them using try and except: try: print (0/0) except ZeroDivisionError: print (" cannot divide by zero") # ### G. LIST integer_list=[1,2,3] hetrogenous_list=["string",0.1,True] list_of_lists=[integer_list, hetrogenous_list,[]] list_of_lists list_of_lists[2][1] # ###### list from [0,1,2,3...9] x=[range(10)] x zero=x[0] one=x[1] one zero nine=x[-1] nine eight=x[-2] eight x[0]=-1 x[0] # #### 2.using square bracket to slice a List x=[0,1,2,3,4,5,6,7,8,9] x[0]=-1 first_three=x[:3] first_three three_to_end=x[3:] three_to_end one_to_four=x[1:5] one_to_four last_three=x[-3:] last_three without_first_and_last=x[1:-1] without_first_and_last copy_of_x=x[:] copy_of_x # #### 3. 'in' operator to check for list membership 1 in [1,2,3] 0 in [1,2,3] 1 in x 0 in x x # #### 4. Concatenating lists together # ##### a. extend () method x=[1,2,3] x.extend([4,5,6]) x # ##### b. list addition x=[1,2,3] y=x+[4,5,6] y # ##### c. append to list -one item at a time x=[1,2,3] x.append(0) x y=x[-1] y z=len(x) z # #### d. unpacking a list x,y,z=[1,2,7] x,y,z type(z) _,z,_=[3,4,8] z # ### H. TUPLES my_list=[1,2] my_tuple=(1,2) other_tuple=3,4 my_list[1]=3 my_list[1] other_tuple my_list[1] try: my_tuple[1]=3 except TypeError: print ("cannot modify a tuple") # #### Tuples are a convinent way to return multiple values from function def sum_and_product (x,y): return(x+y),(x*y) sp=sum_and_product(2,3) sp s,p=sum_and_product(5,10) s p # ##### Tuple and list can be used for multiple assignment x,y=1,2 x,y=y,x y # ### G. DICTIONARIES empty_dict={} empty_dict2=dict() grades={"Joel":80, "Tim":95} jeols_grade=grades["Joel"] jeols_grade try: kates_grade=grades["kate"] except KeyError: print (" no grade for Kate") # without using exception kates_grade=grades["kate"] jeol_has_grade = "Joel" in grades jeol_has_grade kate_has_grade="kate" in grades kate_has_grade # #### get method that returns a default value (instead of raising an exception) when you look up a key that’s not in the dictionary: jeols_grade=grades.get("Joel",0) jeols_grade kate_grade=grades.get("kate",0) kate_grade jeols_grade=grades.get("Joel",0) jeols_grade # #### You assign key-value pairs using the same square brackets: grades["Tim"]=101 grades["kate"]=20 num_student=len(grades) num_student # #### Dictionaries are used frequently as a simple way to represent structured data: tweet={ "user": "joelgrus", "text":" DataScience is awesome", "retweet_count":100, "hashtags":["#Data","#science","#Datascience","#awesome","#yolo"] } tweet_key=tweet.keys() tweet_key # + tweet_value=tweet.values() try: tweet_values except NameError: k=tweet.values k # - tweet_value=tweet.values() tweet_value type(tweet_value) tweet_items=tweet.items() tweet_items "user" in tweet_key "user" in tweet_value "user" in tweet "joelgrus" in tweet.values() "joelgrus" in tweet_value # Dictionary keys must be immutable; in particular, you cannot use lists as keys. If # you need a multipart key, you should use a tuple or figure out a way to turn the key # into a string. # ## defaultdict # Imagine that you’re trying to count the words in a document. An obvious approach is # to create a dictionary in which the keys are words and the values are counts. As you # check each word, you can increment its count if it’s already in the dictionary and add # it to the dictionary if it’s not: word_counts = {} for word in document: if word in word_counts: word_counts[word] += 1 else: word_counts[word] = 1 # You could also use the “forgiveness is better than permission” approach and just handle # the exception from trying to look up a missing key: word_counts = {} for word in document: try: word_counts[word] += 1 except KeyError: word_counts[word] = 1 A third approach is to use get, which behaves gracefully for missing keys: word_counts = {} for word in document: previous_count = word_counts.get(word, 0) word_counts[word] = previous_count + 1 # Every one of these is slightly unwieldy, which is why defaultdict is useful. A # defaultdict is like a regular dictionary, except that when you try to look up a key it # doesn’t contain, it first adds a value for it using a zero-argument function you provided # when you created it. # # In order to use defaultdicts, you have to import them # from collections: # + from collections import defaultdict word_counts = defaultdict(int) # int() produces 0 for word in document: word_counts[word] += 1 # - # They can also be useful with list or dict or even your own functions: dd_list = defaultdict(list) # list() produces an empty list dd_list[2].append(1) # now dd_list contains {2: [1]} dd_dict = defaultdict(dict) # dict() produces an empty dict dd_dict["Joel"]["City"] = "Seattle" # { "Joel" : { "City" : Seattle"}} dd_pair = defaultdict(lambda: [0, 0]) dd_pair[2][1] = 1 # now dd_pair contains {2: [0,1]} These will be useful when we’re using dictionaries to “collect” results by some key and don’t want to have to check every time to see if the key exists yet. # ## H. Counter # A Counter turns a sequence of values into a defaultdict(int)-like object mapping # keys to counts. We will primarily use it to create histograms: from collections import Counter c = Counter([0, 1, 2, 0]) # c is (basically) { 0 : 2, 1 : 1, 2 : 1 } # This gives us a very simple way to solve our word_counts problem: word_counts = Counter(document) # + # A Counter instance has a most_common method that is frequently useful: # print the 10 most common words and their counts for word, count in word_counts.most_common(10): print word, count # - # ## I. Sets # Another data structure is set, which represents a collection of distinct elements: s=set() s.add(1) s.add(2) s.add(2) s x=len(s) x y=2 in s y z=3 in s z stopwords_list= ["a","an","at"] + hundreds_of_other_words + ["yet","you"] "zip" in stopwords_list stop_words_set =set(stop_words_list) "zip" in stopwords_sets # + # The second reason is to find the distinct items in a collection: # - item_list=[1,2,3,1,2,3] num_items=len(item_list) num_items item_set=set(item_list) item_set num_distinct_items=len(item_set) num_distinct_items distinct_item_list=list(item_set) distinct_item_list # ## J. Control Flow # As in most programming languages, you can perform an action conditionally using # if: # ### i) If-else # + if 2<2: message="if only one is greater than two" elif 1<3: message="el if stands for else - if" else: message=" when all else fail use else" print(message) # - # ###### ternary if-then-else on one line, which we will do occasionally: parity="even" if x%2 ==0 else odd # ### ii) while loop: x=0 while x<10: print (x) x=x+1 # ### iii) for-loop # x=0 for x in range(10): print( x , "is less than 10") # ### iv) Continue- Break # If you need more-complex logic, you can use continue and break: for x in range(10): if x==3: continue # go immediately to next iteration if x==5: break # quit the loop print (x) # ## k) Truthiness one_is_less_than_two=1<2 one_is_less_than_two True_equals_false=True==False True_equals_false # Python uses the value None to indicate a nonexistent value. It is similar to other languages’ # null: x=None print(x==None) print(x is None) # Python lets you use any value where it expects a Boolean. The following are all # “Falsy”: False None [] {} "" set() 0 0.0 # s = some_function_that_returns_a_string() s="bribe" if s: # if s is true i.e. s contains a string first_char=s[0] else: first_char="" print(first_char) # + # since and returns its second value when the first is “truthy,” the first value when it’s # not. Similarly, if x is either a number or possibly None: first_char=s and s[0] # - first_char x=3 safe_x= x or 0 safe_x # #### Python has an all function, which takes a list and returns True precisely when every # element is truthy, and an any function, which returns True when at least one element # is truthy: all([True,1,{3}]) all([True,1,{}]) any([True,1,{}]) all([]) any([])
pyhtonic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Welcome to the plotting in python lesson. # # 1. <a href='#Task-description'>Task description</a> # 1. <a href='#CSV-files'>CSV files</a> # 1. <a href='#Part-two'>Snakes and pandas</a> # 1. <a href='#Visualization'>Visualization # 1. <a href='#Magic-functions'>Magic functions</a> # 1. <a href='#Formatting'>Plotting in depth</a> # 1. <a href='#Conclusion'>Conclusion</a> # ## Task description # # **&#128161; **<br> # >Today you will learn how to gather and visualize Facebooks stock price data. <br> # >For easy access to historical stock data we can utilize the Yahoo finance website. <br> # >Which can be accessed here: <a href='https://finance.yahoo.com'>finance.yahoo.com</a> <br> # *** # ### Access finance.yahoo.com # ** 1. ** First of all, search for 'Facebook' and go to the 'Historical Data' tab. <br> # ** 2. ** Here, change the 'Time Period' to the last month (28.02.18-27.03.18), then click on 'Apply'. <br> # ** 3. ** Subsequently, 'Download Data', which will present you with a so called comma separated values (CSV) file. # *** # ## CSV files # When you open the file with a text editor you will see something like this: # + language="bash" # head -3 ./data/FB.csv # - # # **&#128161; **<br> # ><i>CSV files are essentially just spreadsheets, <br> # > where **each line** corresponds to one **observation/row** <br> # > and the **columns** are **seperated by comma**.</i> <br> # > The first row usually contains something that is called a 'header', <br> # > which contains the column/feature names.<br><br> # # <b>Question 1:</b> # >In our case, following is a **list of possible candidates**, but the **order is wrong**, can you **correct the order**, <br> # > so that it resembles the one in the CSV file? <br> # # <i> You can grab and pull the fields into the correct order or just rewrite their contents</i> # + active="" # Date # + active="" # Open # + active="" # High # + active="" # Low # + active="" # Close # + active="" # Adj Close # + active="" # Volume # - # # <b>Well done!</b> <br> # # **Question 2:** <br> # >Since we downloaded data for a specific time frame, how many rows should our CSV file have in total? # + # Answer here (you should enter a number only): # + # hint: please consider, that stock markets are closed on weekends # + # hint 2: we also have one row for the header # + language="bash" # wc -l ./data/FB.csv # - # &#9989;<b> Yes, that is correct. </b><br> # &#9989; You now understand the fundamentals of CSV files. <br> # &#9989; We can start our project, <b>well done</b>. # *** # <br><br><br> # ## Part two # ## About python &#128013; and pandas &#128060; # Python is a programming language for many use-cases. <br> # When working with data, one **essential factor** is **speed of execution** another is **flexibility**. <br> # **Python is both flexible and fast**, which makes it a suitable candidate for data analysis. # **&#128161; **<br> # >For most of the day-to-day analytics work only a single library is needed in python! <br> # >This library is called [**pandas**](https://pandas.pydata.org/pandas-docs/stable/10min.html) and is per convention imported with the short handle **pd**. <br> # # **Please import the library in the following cell.** import pandas as pd # <br> # **&#128161; **<br> # >In pandas, many different file formats (csv, json, txt, html, sql and more) can be read using the **pd.read_xxx('filename.xxx')** notation. <br> # For example, to read in a **json file** you would use **pd.read_json('filename.json')**. <BR> # The file contents are then injected into a so called **dataframe**, which is the **equivalent of a spreadsheet in python**. # ### Your first dataframe &#128202; # ** It's your turn to load the stock data file into pandas. ** <br> # &#9888; **Remember, we have a comma separated values file** &#9888; <br><br> # **Assign it to the variable 'df'.** df = pd.read_csv('./data/FB.csv') # In order to look at the dataframe, we will use the head() method on the dataframe. df.head() # <br> # &#9989; csv file loaded as a &#128060; dataframe<br> # &#9989; looked at first five rows of dataframe <br> # <br><br> # _Well that is nice, but what now?_<br><br> # We will use **another library** called **qgrid**. <br> # Qgrid makes it easier to visualize pandas dataframes and play with them.<br> # The **qgrid.show_grid()** method **expects a dataframe as the first argument inside the parentheses.** <br><br> # **Your turn! Add your dataframe to the code below:** import qgrid q_df = qgrid.show_grid(df) # **&#128161; **<br> # >You can now look at the dataframe in an interactive format <br> # >and play with the different columns, rows and filters, <br> # >**just like in excel**. # # **Go ahead, try it! <br> # Change values (double click), apply filters, order rows by column - anything you can imagine:** q_df # Whatever you did to the dataframe above however does only happen in this specific view. <br> # # **&#128161; **<br> # >qgrid is great for looking at the data in raw form, however to visualize the data, we will need to use pandas again.<br> # >Let us look at our pandas dataframe again. # # # **&#128161; **<br> # >To have a simple glimpse at the data you can use the **head()** method to look at the **first 5 rows of your dataframe** # # # df.head() # **&#128161; **<br> # > If you add a number between the parentheses the corresponding amount of rows will be shown <br> # > **Go ahead, try it!** df.head(2) # ** You are now able to**: <br> # &#9989; load csv files into pandas dataframes <br> # &#9989; use a qgrid view to interact with the data <br> # &#9989; look at a specific amount of rows using the head() method<br> # # *** # <br><br> # ## Visualization # **The goal of this exercise is to visualize stock data - Lets turn data into plots.** # **&#128161; **<br> # > pandas has a builtin method that makes visualizing dataframes super easy for us - **plot()**. # # <br><br> # # **Go ahead, call plot() on your dataframe!** df.plot() # &#10068;&#10068;&#10068; **HUH! What happened? Where is the plot? ** &#10068;&#10068;&#10068;<br> # >&#10097;&#10097; **It is generated as an object in the background, which makes it very hard to find.** &#10096;&#10096;<br> # # &#10068;&#10068;&#10068; **How do we change that? ** &#10068;&#10068;&#10068;<br> # >&#10097;&#10097; **We will use a magic function!** &#10096;&#10096; # ## Magic functions # <br> # **&#128161; **<br> # > In jupyter notebooks magic functions are lead by **%**. <br> # > One of the most common usages is _**%matplotlib inline**_,<br> # > which generates the plots inside the notebook. <br> # > _hint: If you use '%matplotlib inline' in the first cell of the notebook, <br> # > all subsequent cells will be affected._ # # **Go ahead use the magic function in the next cell, followed by the plot method on your dataframe.** # **&#128161; **<br> # >**If you want to remove the &#60;matplotlib.axes._subplots.AxesSubplot at 0x...&#62; output, <br> # > just add a semi-colon at the end of your plot() method - like this: plot();** # %matplotlib inline df.plot(); # &#9989; **Your first plot!** <br> # **Apparently, we have one column, that contains incredibly large values? which one is that?** # + # hint1 you can also look at the dataframe to identify the column # + active="" # volume # - # &#9989; **Well done! The volume contains the number of stock papers and hence is on a different scale compared to the prices** <br> # ** The figure seems to be fine at first glance, however lets prettify the plot.**<br><br> # ## Formatting # **1.** We want the **x-axis** to contain the **date**. <br> # **2.** It would be nice, if the **column** information would be **split into individual plots** - We can identify important information easier. # <br> # # In order to achieve task 1, we will need to **parse dates** and **use the 'Date' column as the index** for the resulting dataframe.<br> # Can you do that in the next cell? <br> df_parsed = pd.read_csv('./data/FB.csv', parse_dates=True, index_col='Date') # _**hint:**_ [documentation of read_csv](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html#pandas.read_csv) # &#9989; **Exceptional!** <br> # <br>**Now use a previously learned method to show the first few rows of the dataframe.<br>** # You can see that the date is now the first column and the name 'Date' is indented to show that it is the 'index'. df_parsed.head() # **Plot the dataframe again:** df_parsed.plot(); # &#9989; **Task 1 completed! - You have parsed the dates and plotted them on the x-axis!** <br> # **GOOD JOB!** <br> # # *** # <br> # <br>**On to task number 2:** <br> # _Plot individual figures for the columns._ # # **&#128161; **<br> # >If you want to select the **Volume column** you could use the following syntax <br> # > **df['Volume']** # # **Task: Please plot only the 'Open' column of the date-indexed dataframe** df_parsed['Open'].plot(); # **Almost there, now we only need to add a label for the y-axis to have a beautiful figure.** <br><br> # **&#128161; **<br> # >When you call df.plot() a plot object is returned, which when assigned to a variable can be modified! # # <br> # # **Task:** Please **plot the 'Open'** column again, only this time **assign it to the variable 'figure'.** figure = df_parsed['Open'].plot(); # Now all that is left to do is to modify the y-axis label using the **set_ylabel('new label') method on the figure you created in the previous cell!** <br><br> # **&#128161; **<br> # > **set_ylabel()** modifies the **y-labels** <br> # > and **set_xlabel()** changes the **x-labels**! # # ** Task:** Please change the **y-label** to **'Value in USD [$]'. ** figure.set_ylabel('value in USD [$]'); # ## Conclusion # <br> # <br> # &#9989; **Well done! ** Why stop here, you can now visualize all of the columns and change their axis label! <br> # # &#9989; **You created a plot of stock market data!** <br> # &#9989; **You created a plot of a single column of a dataframe!** <br> # &#9989; **You changed the axis label by modifying the plot object!** <br> # # *** # # **You are doing incredibly well, let's continue next time with some formatting challenges** #
Lesson1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import numpy as np import cv2 from sklearn.metrics import accuracy_score def get_masks(groundtruth_masks_path, predicted_masks_path, groundtruth_masks_list): groundtruth_labels = list() predicted_labels = list() for mask in groundtruth_masks_list: temp_gt_mask = cv2.imread(os.path.join(groundtruth_masks_path, mask.split('.')[0] + '_L.png')) temp_pred_mask = cv2.imread(os.path.join(predicted_masks_path, 'mask_' + mask)) temp_gt_label = np.zeros((480, 640, 1), dtype = np.int16) temp_pred_label = np.zeros((480, 640, 1), dtype = np.int16) temp_gt_label[:, :, 0] = 1 * np.all((temp_gt_mask[:, :] == [128, 64, 128]), axis = 2) temp_pred_label[:, :, 0] = 1 * np.all((temp_pred_mask[:, :] == [0, 255, 0]), axis = 2) groundtruth_labels.append(temp_gt_label) predicted_labels.append(temp_pred_label) groundtruth_labels = np.array(groundtruth_labels).reshape(-1) predicted_labels = np.array(predicted_labels).reshape(-1) return groundtruth_labels, predicted_labels # + model_dir = 'model_fcn32_ce_250' train_files_path = '/home/abhishek/Desktop/datasets/camvid/inputs_resized/' groundtruth_masks_path = '/home/abhishek/Desktop/datasets/camvid/masks_resized/' predicted_masks_path = os.path.join(os.getcwd(), os.path.join(model_dir, 'masks')) all_list = os.listdir(train_files_path) train_list = list(np.load(os.path.join(model_dir, 'train_list.npy'))) valid_list = list(np.load(os.path.join(model_dir, 'valid_list.npy'))) test_list = list(set(all_list).difference(set(train_list + valid_list))) # - train_groundtruth_labels, train_predicted_labels = get_masks(groundtruth_masks_path, predicted_masks_path, train_list) valid_groundtruth_labels, valid_predicted_labels = get_masks(groundtruth_masks_path, predicted_masks_path, valid_list) test_groundtruth_labels, test_predicted_labels = get_masks(groundtruth_masks_path, predicted_masks_path, test_list) len(all_list), len(train_list), len(valid_list), len(test_list) print('Training Accuracy : ' + str(accuracy_score(train_groundtruth_labels, train_predicted_labels))) print('Validation Accuracy : ' + str(accuracy_score(valid_groundtruth_labels, valid_predicted_labels))) print('Test Accuracy : ' + str(accuracy_score(test_groundtruth_labels, test_predicted_labels)))
src/compute_accuracy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import matplotlib.pyplot as plt import scipy.io as sio import torch import numpy as np import pandas as pd import logging import re DATA_FP = '/local/meliao/projects/fourier_neural_operator/data/2021-07-29_scaling-experiment.mat' PLOTS_DIR = '/local/meliao/projects/fourier_neural_operator/experiments/16_predict_different_time_scales/scaling_law_plots' d = sio.loadmat(DATA_FP) ref_solns = d['soln_ref'] scaled_solns = d['scaled_solns'] # + def plot_differences_1(ref_solns, scaled_solns): r = ref_solns[:scaled_solns.shape[0]] errors = r - scaled_solns for time in range(scaled_solns.shape[0]): plt.plot(np.abs(errors[time]), label=time+1) plt.legend() plt.yscale('log') plt.show() def plot_differences_2(ref, scaled, case=1): fig, ax = plt.subplots(1,3) ax[0].plot(np.real(ref[case]), label='reference') ax[0].plot(np.real(scaled[case]), label='scaled') ax[0].legend() ax[1].plot(np.imag(ref[case]), label='reference') ax[1].plot(np.imag(scaled[case]), label='scaled') ax[1].legend() ax[2].plot(np.real(ref[case] - scaled[case]), label='Real') ax[2].plot(np.imag(ref[case] - scaled[case]), label='Imag') ax[2].plot(np.abs(ref[case] - scaled[case]), label='Abs') ax[2].legend() plt.show() # - for i in range(5): plot_differences_2(ref_solns, scaled_solns, case=i) # + tags=[] fp = os.path.join(PLOTS_DIR, 'perturbation_experiment.png') plot_time_errors(diffs_dd, np.arange(21), 'IC Perturbations: 10 draws of ICs',fp=fp) # - DATA_FP = '/local/meliao/projects/fourier_neural_operator/data/2021-07-22_NLS_data_08_test.mat' d = sio.loadmat(DATA_FP) X = torch.tensor(d['output'], dtype=torch.cfloat) fft_data = torch.fft.fft(X, dim=-1).abs() # + tags=[] fft_means = torch.mean(fft_data, dim=0) print(fft_means.shape) fft_stddevs = torch.std(fft_data, dim=0) # - def plot_fourier_decay( fft_means, fft_stddevs, time_vals, title='', fp=None): N_MODES = 75 x_vals = np.arange(N_MODES) upper_vals = fft_means + fft_stddevs lower_vals = fft_means - fft_stddevs plt.figure().patch.set_facecolor('white') for i in time_vals: k = "Time={}".format(i) plt.plot(x_vals, fft_means[i, :N_MODES], label=k, alpha=0.7) # plt.fill_between(x_vals, # upper_vals[i, :N_MODES], # lower_vals[i, :N_MODES], # alpha=0.3) plt.legend() # plt.xlabel("") # plt.xticks(ticks=np.arange(0, n_t_steps), # labels=make_special_ticks(n_t_steps), # rotation=45, # ha='right', # ) plt.ylabel("Abs(DFT(u))", size=13) plt.yscale('log') plt.title(title) plt.tight_layout() if fp is not None: plt.savefig(fp) else: plt.show() plt.clf() # + tags=[] fp = os.path.join(PLOTS_DIR, 'high_frequency_IC_Fourier_decay.png') print(fp) plot_fourier_decay(fft_means, fft_stddevs, [0, 1, 10, 20], title='ICs on modes [20,...,25]') #, fp=fp) # - X.dtype
experiments/17_NLS_scaling_properties/plot_scaling_data_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''base'': conda)' # name: python3 # --- import json with open("nbest_predictions.json", "r") as f: nbest_predictions = json.load(f) key = nbest_predictions.keys() answer_dict = {} for key in nbest_predictions.keys(): answer_dict[key.split('_')[0]] = [] for key in nbest_predictions.keys(): id_key = key.split('_')[0] answer_dict[id_key].append([nbest_predictions[key][0]['text'], 0.45 * nbest_predictions[key][0]['doc_score'] + 0.55 * nbest_predictions[key][0]['probability']]) new_answer_dict = {} for key in answer_dict.keys(): answer_dict[key].sort(key=lambda x: x[1], reverse=True) new_answer_dict[key] = answer_dict[key][0][0] with open('nbest_voting_predictions.json', "w", encoding="utf-8") as writer: writer.write(json.dumps(new_answer_dict, indent=4, ensure_ascii=False) + "\n")
ensemble/nbest_voting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # VTL Simple analytic function # # # + pycharm={"name": "#%%\n"} from pyspark.sql import SparkSession,DataFrame from pyspark.sql.types import StructField, StructType, StringType, DoubleType, IntegerType, LongType, DecimalType import os from pyspark.sql.functions import lit, count,sum,avg,collect_list,min,max,percentile_approx,stddev_pop,stddev_samp,var_pop,var_samp from pyspark.sql.window import Window # + pycharm={"name": "#%%\n"} local = True if local: spark = SparkSession.builder \ .master("local[4]") \ .appName("VTLAnalytic")\ .getOrCreate() else: spark = SparkSession.builder\ .master("k8s://https://kubernetes.default.svc:443") \ .appName("VTLAnalytic")\ .config("spark.kubernetes.container.image", "inseefrlab/jupyter-datascience:py3.9.7-spark3.2.0")\ .config("spark.kubernetes.authenticate.driver.serviceAccountName", os.environ['KUBERNETES_SERVICE_ACCOUNT'])\ .config("spark.executor.instances", "4")\ .config("spark.executor.memory", "8g")\ .config("spark.kubernetes.namespace", os.environ['KUBERNETES_NAMESPACE'])\ .getOrCreate() # + pycharm={"name": "#%%\n"} data=[("A", "XX", 2000, 3, 1), ("A", "XX", 2001, 4, 9), ("A", "XX", 2002, 7, 5), ("A", "XX", 2003, 6, 8), ("A", "YY", 2000, 9, 3), ("A", "YY", 2001, 5, 4), ("A", "YY", 2002, 10, 2), ("A", "YY", 2003, 5, 7)] schema=StructType([StructField("Id_1",StringType(),True), StructField("Id_2",StringType(),True), StructField("Id_3",IntegerType(),True), StructField("Me_1",IntegerType(),True), StructField("Me_2",IntegerType(),True)]) df=spark.createDataFrame(data, schema) df.show() # + [markdown] pycharm={"name": "#%% md\n"} # ## 1.1 Count # # count ( DS_1 over ( partition by Id_1 ) ) # + pycharm={"name": "#%%\n"} partition_col_name="Id_1" win_name=Window.partitionBy(partition_col_name) target_col_name="Me_1" new_col_name=f"count_{target_col_name}" df_count=df.withColumn(new_col_name,count(target_col_name).over(win_name)) df_count.show() # + pycharm={"name": "#%%\n"} df.printSchema() # + [markdown] pycharm={"name": "#%% md\n"} # ## 1.2 Sum # We can do a count without order by # DS_r :=count ( DS_1 over ( partition by Id_1 ) ) # # + pycharm={"name": "#%%\n"} partition_col_name="Id_1" win_name=Window.partitionBy(partition_col_name) target_col_name="Me_1" new_col_name=f"sum_{target_col_name}" df_sum=df.withColumn(new_col_name,sum(target_col_name).over(win_name)) df_sum.show() # + [markdown] pycharm={"name": "#%% md\n"} # We can also do without partition # # DS_r := sum ( DS_1 over ( order by Id_1, Id_2, Id_3 ) ) # + pycharm={"name": "#%%\n"} partition_col_names=["Id_1","Id_2","Id_3"] win_name=Window.orderBy(partition_col_names) target_col_name="Me_1" new_col_name=f"sum_{target_col_name}" df_sum=df.withColumn(new_col_name,sum(target_col_name).over(win_name)) df_sum.show() # + pycharm={"name": "#%%\n"} partition_col_name="Id_1" win_name=Window.partitionBy(partition_col_name).rowsBetween(-1,1) target_col_name="Me_1" new_col_name=f"sum_{target_col_name}" df_collect=df.withColumn(new_col_name,collect_list(target_col_name).over(win_name)) df_collect.show() # + pycharm={"name": "#%%\n"} partition_col_name=["Id_1","Id_2"] order_col_name=["Id_3"] win_name=Window.partitionBy(partition_col_name).orderBy(order_col_name).rangeBetween(-3,3) target_col_name="Me_1" new_col_name=f"sum_{target_col_name}" df_collect1=df.withColumn(new_col_name,collect_list(target_col_name).over(win_name)) df_collect1.show() # + [markdown] pycharm={"name": "#%% md\n"} # ## 1.3 min # # DS_r := min ( DS_1 over ( partition by Id_1, Id_2 order by Id_3 ) ) # # + pycharm={"name": "#%%\n"} partition_col_names=["Id_1","Id_2"] order_by_col=["Id_3"] win_name=Window.partitionBy(partition_col_name).orderBy(order_by_col) target_col_name="Me_1" new_col_name=f"min_{target_col_name}" df_min=df.withColumn(new_col_name,min(target_col_name).over(win_name)) df_min.show() # + [markdown] pycharm={"name": "#%% md\n"} # ## 1.4 max # # DS_r := max ( DS_1 over ( partition by Id_1, Id_2 order by Id_3 ) ) # + pycharm={"name": "#%%\n"} partition_col_names=["Id_1","Id_2"] order_by_col=["Id_3"] win_name=Window.partitionBy(partition_col_name).orderBy(order_by_col) target_col_name="Me_1" new_col_name=f"max_{target_col_name}" df_min=df.withColumn(new_col_name,max(target_col_name).over(win_name)) df_min.show() # + [markdown] pycharm={"name": "#%% md\n"} # ## 1.5 avg # # DS_r := avg ( DS_1 over ( partition by Id_1, Id_2 order by Id_3 ) ) # # + pycharm={"name": "#%%\n"} partition_col_names=["Id_1","Id_2"] order_by_col=["Id_3"] win_name=Window.partitionBy(partition_col_name).orderBy(order_by_col) target_col_name="Me_1" new_col_name=f"avg_{target_col_name}" df_min=df.withColumn(new_col_name,avg(target_col_name).over(win_name)) df_min.show() # + [markdown] pycharm={"name": "#%% md\n"} # ## 1.6 median # # # DS_r := median ( DS_1 over ( partition by Id_1, Id_2 order by Id_3 ) ) # # percentile_approx with partitionBy followed by orderBy will do rolling median. To have median of each partition, you must only have partitionBy in your window definition. # + pycharm={"name": "#%%\n"} partition_col_names=["Id_1","Id_2"] order_by_col=["Id_3"] win_name=Window.partitionBy(partition_col_name) target_col_name="Me_1" new_col_name=f"avg_{target_col_name}" df_min=df.withColumn(new_col_name,percentile_approx(target_col_name,0.5,10000000).over(win_name)) df_min.show() # + [markdown] pycharm={"name": "#%% md\n"} # ## 1.7 stddev_pop # # The operator returns the “population standard deviation” of the input values. # # DS_r := stddev_pop ( DS_1 over ( partition by Id_1, Id_2 order by Id_3 ) ) # + pycharm={"name": "#%%\n"} partition_col_names=["Id_1","Id_2"] order_by_col=["Id_3"] win_name=Window.partitionBy(partition_col_name) target_col_name="Me_1" new_col_name=f"stddev_pop_{target_col_name}" df_min=df.withColumn(new_col_name,stddev_pop(target_col_name).over(win_name)) df_min.show() # + [markdown] pycharm={"name": "#%% md\n"} # ## 1.8 stddev_samp # # The operator returns the “sample standard deviation” of the input values. # # DS_r := stddev_stamp ( DS_1 over ( partition by Id_1, Id_2 order by Id_3 ) ) # + pycharm={"name": "#%%\n"} partition_col_names=["Id_1","Id_2"] order_by_col=["Id_3"] win_name=Window.partitionBy(partition_col_name) target_col_name="Me_1" new_col_name=f"stddev_samp_{target_col_name}" df_min=df.withColumn(new_col_name,stddev_samp(target_col_name).over(win_name)) df_min.show() # + [markdown] pycharm={"name": "#%% md\n"} # ## 1.9 var_pop # # # DS_r := var_pop ( DS_1 over ( partition by Id_1, Id_2 order by Id_3 ) ) # The operator returns the “population variance” of the input values # + pycharm={"name": "#%%\n"} partition_col_names=["Id_1","Id_2"] order_by_col=["Id_3"] win_name=Window.partitionBy(partition_col_name) target_col_name="Me_1" new_col_name=f"var_pop_{target_col_name}" df_min=df.withColumn(new_col_name,var_pop(target_col_name).over(win_name)) df_min.show() # + [markdown] pycharm={"name": "#%% md\n"} # ## 1.10 var_samp # # # DS_r := var_samp ( DS_1 over ( partition by Id_1, Id_2 order by Id_3 ) ) # # The operator returns the “sample variance” of the input values # + pycharm={"name": "#%%\n"} partition_col_names=["Id_1","Id_2"] order_by_col=["Id_3"] win_name=Window.partitionBy(partition_col_name) target_col_name="Me_1" new_col_name=f"var_samp_{target_col_name}" df_min=df.withColumn(new_col_name,var_samp(target_col_name).over(win_name)) df_min.show() # + pycharm={"name": "#%%\n"}
notebooks/VTLAnalyticSimple.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os from os.path import join as ospj import time from time import gmtime, strftime import datetime from munch import Munch import numpy as np from PIL import Image import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader from collections import deque from network.model_z import build_model from core.checkpoint import CheckpointIO from dataset.frame_dataset import FramesDataset, MotionDataset, DatasetRepeater import network.utils as utils import yaml import random from utils import Bar, Logger, AverageMeter, center from tqdm import tqdm from torch.nn.parallel.data_parallel import DataParallel import pytorch_ssim from torchsummary import summary from torchvision import transforms from tps.rand_tps import RandTPS from network.vgg import VGG import imp from network.wing import FAN, HighPass import warnings warnings.filterwarnings("ignore") import imageio # - with open('config/train_transformer5.yaml') as f: config = yaml.load(f, Loader=yaml.FullLoader) config = Munch(config) # GPU Device gpu_id = '1' os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) use_cuda = torch.cuda.is_available() print("GPU device " , use_cuda) torch.manual_seed(config.seed) torch.cuda.manual_seed_all(config.seed) np.random.seed(config.seed) resume = False class Solver(nn.Module): def __init__(self, args): super().__init__() self.args = args self.args.lr = float(self.args.lr) self.args.weight_decay = float(self.args.weight_decay) self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.start = 10 self.replay_memory = 10000 self.replay_buffer = deque(maxlen=self.replay_memory) self.nets, self.nets_ema = build_model(args) # below setattrs are to make networks be children of Solver, e.g., for self.to(self.device) for name, module in self.nets.items(): utils.print_network(module, name) setattr(self, name, module) for name, module in self.nets_ema.items(): setattr(self, name + '_ema', module) if args.mode == 'train': self.optims = Munch() for net in self.nets.keys(): self.optims[net] = torch.optim.Adam(params=self.nets[net].parameters(), lr=float(args.lr), betas=[args.beta1, args.beta2], weight_decay=0) self.ckptios = [ CheckpointIO(ospj(args.checkpoint_dir, '{:06d}_nets.ckpt'), **self.nets), CheckpointIO(ospj(args.checkpoint_dir, '{:06d}_nets_ema.ckpt'), **self.nets_ema), CheckpointIO(ospj(args.checkpoint_dir, '{:06d}_optims.ckpt'), **self.optims)] else: self.ckptios = [CheckpointIO(ospj(args.checkpoint_dir, '{:06d}_nets_ema.ckpt'), **self.nets_ema)] self.to(self.device) for name, network in self.named_children(): # Do not initialize the FAN parameters if ('ema' not in name) and ('fan' not in name): print('Initializing %s...' % name) network.apply(utils.he_init) # landmark self.fan = FAN(fname_pretrained=config.fname_fan) self.fan.to(self.device) self.fan.eval() self.hpf = HighPass(config.w_hpf, self.device) self.masking = transforms.RandomErasing(p=1.0, scale=(0.30, 0.33), ratio=(0.3, 0.33)) self.clip_num = config.clip_num # eqv self.eqv_transform = transforms.Compose([ transforms.ToPILImage(), transforms.ColorJitter(brightness=config.brightness, contrast=config.contrast, saturation=config.saturation, hue=config.hue), transforms.ToTensor(),]) self.interp = nn.Upsample(size=(config.img_size, config.img_size), mode='bilinear', align_corners=True) self.tps = RandTPS(width=config.img_size, height=config.img_size, batch_size=config.batch_size, sigma=config.sigma, border_padding=True, random_mirror=True, random_scale=(0.8, 1.1)) self.tps.to(self.device) self.tps.eval() self.kl = nn.KLDivLoss().to(self.device) # perceptual loss self.vgg = VGG() MainModel = imp.load_source("MainModel", args.fname_ir) weight = torch.load(args.fname_vgg, map_location='cpu') self.vgg.load_state_dict(weight.state_dict(), strict=False) self.vgg.eval() self.vgg.to(self.device) def _save_checkpoint(self, step): for ckptio in self.ckptios: ckptio.save(step) def _load_checkpoint(self, step): for ckptio in self.ckptios: ckptio.load(step) def _reset_grad(self): for optim in self.optims.values(): optim.zero_grad() @torch.no_grad() def evaluate(self, args, epoch, nets, loader): if not os.path.isdir(args.result_dir): os.makedirs(args.result_dir) result_target = os.path.join(args.result_dir, 'tar') result_gen = os.path.join(args.result_dir,'gen') if not os.path.isdir(result_target): os.makedirs(result_target) if not os.path.isdir(result_gen): os.makedirs(result_gen) bar = tqdm(total=len(loader), leave=False) ssim_meter, fid_meter = AverageMeter(), AverageMeter() for iteration, x in enumerate(loader): try: test_video = torch.tensor(np.concatenate(x['video'])) # (frame, c, w, h) except: continue num_frame = test_video.shape[0] k_frame = np.random.choice(num_frame-args.K, size=2, replace=False) source = test_video[[k_frame[0]]] target = test_video[[k_frame[1]]] source_out = nets.transformer(self.fan.get_landmark(source.to(self.device)), self.fan.get_landmark(target.to(self.device))) source_out = self.get_clip(source_out) source_gen = nets.generator(source.to(self.device), target.to(self.device), source_out.to(self.device)) ssim = float(pytorch_ssim.ssim(source_gen, target.to(self.device))) ssim_meter.update(ssim, iteration+1) # save for FID gen = source_gen.squeeze().cpu().detach().numpy() target = target.squeeze().cpu().detach().numpy() gen = gen.swapaxes(0, 1).swapaxes(1, 2) target = target.swapaxes(0, 1).swapaxes(1, 2) gen_img = Image.fromarray((gen*255).astype('uint8')) tar_img = Image.fromarray((target*255).astype('uint8')) gen_img.save(result_gen + '/{}.png'.format(iteration+1)) tar_img.save(result_target + '/{}.png'.format(iteration+1)) bar.set_description("Epoch:{:d}, SSIM: {:.8f}".format(epoch+1, ssim_meter.avg), refresh=True) bar.update() bar.close() val_logger.append([str(ssim_meter.avg)]) return @torch.no_grad() def make_animation(self, args, nets, loaders): if not os.path.isdir(args.sample_dir): os.makedirs(args.sample_dir) K = 100 random_list = np.random.choice(len(loaders.val.dataset), replace=False, size=2) source_image_idx = int(random_list[0]) test_video_idx = int(random_list[1]) train_video_idx = int(np.random.choice(len(loaders.src.dataset), size=1)) # test animation source_image = loaders.val.dataset[source_image_idx]['video'][0] test_video = loaders.val.dataset[test_video_idx]['video'] # list [video](3, 256, 256) train_video = loaders.src.dataset[train_video_idx]['target'] # list [K](3, 256, 256) test_frame = len(test_video) if len(test_video) < K else K train_frame = len(train_video) predict_test, predict_train = [], [] for i in range(test_frame): out = nets.transformer(self.fan.get_landmark(source_image.to(self.device).unsqueeze(0)), self.fan.get_landmark(test_video[i].to(self.device).unsqueeze(0))) out = self.get_clip(out) gen = nets.generator(source_image.to(self.device).unsqueeze(0), test_video[i].to(self.device).unsqueeze(0), out.to(self.device)) predict_test.append(gen.cpu().detach().numpy().squeeze().swapaxes(0, 1).swapaxes(1,2)) for i in range(train_frame): out = nets.transformer(self.fan.get_landmark(source_image.to(self.device).unsqueeze(0)), self.fan.get_landmark(train_video[i].to(self.device).unsqueeze(0))) out = self.get_clip(out) gen = nets.generator(source_image.to(self.device).unsqueeze(0), train_video[i].to(self.device).unsqueeze(0), out.to(self.device)) predict_train.append(gen.cpu().detach().numpy().squeeze().swapaxes(0,1).swapaxes(1,2)) self.predict_test = predict_test source_image = (source_image*255).numpy().swapaxes(0,1).swapaxes(1,2).astype('uint8') source_img = Image.fromarray(source_image) source_img.save(config.sample_dir+ '/source.png') imageio.mimsave(os.path.join(config.sample_dir, 'test_gen.mp4'), [(frame*255).astype('uint8') for frame in predict_test], fps=24) imageio.mimsave(os.path.join(config.sample_dir, 'test_raw.mp4'), [(frame*255).numpy().astype('uint8').swapaxes(0,1).swapaxes(1,2) for frame in test_video], fps=24) imageio.mimsave(os.path.join(config.sample_dir, 'train_gen.mp4'), [(frame*255).astype('uint8') for frame in predict_train], fps=24) imageio.mimsave(os.path.join(config.sample_dir, 'train_raw.mp4'), [(frame*255).numpy().astype('uint8').swapaxes(0,1).swapaxes(1,2) for frame in train_video], fps=24) def get_clip(self, out): out[out < self.clip_num] = 0. return out def train(self, loaders): args = self.args nets = self.nets nets_ema = self.nets_ema for name in nets: nets[name] = DataParallel(nets[name]) nets[name] = nets[name].to(self.device) optims = self.optims # resume training if necessary if args.resume_iter > 0: self._load_checkpoint(args.resume_iter) # batch for epoch in range(args.resume_iter, args.epochs): bar = tqdm(total=len(loaders.src)*args.K, leave=False) tf_loss, eqv_loss, center_loss = AverageMeter(), AverageMeter(), AverageMeter() wgan_loss, d_reg_loss = AverageMeter(), AverageMeter() g_latent_loss, g_cycle_loss, vgg_loss, fm_loss = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter() for i, inputs in enumerate(loaders.src): x_source, y_drive = inputs['source'], inputs['target'] num_frame = len(y_drive) for f in range(num_frame): self.replay_buffer.append((x_source, y_drive[f])) if len(self.replay_buffer) < self.start: continue minibatch = random.sample(self.replay_buffer, 1) x_source_mb, y_drive_mb = minibatch[0][0], minibatch[0][1] # train the transformer x_out, tf_losses, tf_losses_latent = compute_tf_loss(nets, args, x_source_mb, y_drive_mb, transform=self.eqv_transform, tps=self.tps, interp=self.interp, kl=self.kl, fan=self.fan, mask=self.masking, device=self.device) self._reset_grad() tf_losses.backward() optims.transformer.step() # train the discriminator d_loss, d_losses_latent = compute_d_loss(nets, args, x_source_mb, y_drive_mb, x_out, device=self.device) self._reset_grad() d_loss.backward() optims.discriminator.step() # train the generator g_loss, g_losses_latent = compute_g_loss(nets, args, x_source_mb, y_drive_mb, x_out, vgg=self.vgg, fan=self.fan, device=self.device) self._reset_grad() g_loss.backward() optims.generator.step() wgan_loss.update(float(d_losses_latent.wgangp), x_source.size(0)) d_reg_loss.update(float(d_losses_latent.reg), x_source.size(0)) g_latent_loss.update(float(g_losses_latent.adv), x_source.size(0)) g_cycle_loss.update(float(g_losses_latent.cyc), x_source.size(0)) vgg_loss.update(float(g_losses_latent.vgg), x_source.size(0)) fm_loss.update(float(g_losses_latent.fm), x_source.size(0)) tf_loss.update(float(tf_losses_latent.tf), x_source.size(0)) eqv_loss.update(float(tf_losses_latent.eqv), x_source.size(0)) center_loss.update(float(tf_losses_latent.center), x_source.size(0)) bar.set_description("Ep:{:d}, D: {:.6f}, R1: {:.2f}, G: {:.6f}, Cyc: {:.6f}, Vgg: {:.6f}, FM: {:.6f}, TF: {:.6f}, Eqv: {:.6f}, Ct: {:.6f}".format( epoch+1, wgan_loss.avg, d_reg_loss.avg, g_latent_loss.avg, g_cycle_loss.avg, vgg_loss.avg, fm_loss.avg, tf_loss.avg, eqv_loss.avg, center_loss.avg), refresh=True) bar.update() bar.close() # save model checkpoints logger.append([str(wgan_loss.avg)[:8], str(d_reg_loss.avg)[:8], str(g_latent_loss.avg)[:8], str(g_cycle_loss.avg)[:8], str(vgg_loss.avg)[:8], str(fm_loss.avg)[:8], str(tf_loss.avg)[:8], str(eqv_loss.avg)[:8], str(center_loss.avg)[:8]]) if (epoch+1) % config.save_every == 0: self._save_checkpoint(step=epoch+1) # compute SSIM and FID in test_set if (epoch+1) % config.eval_every == 0: self.evaluate(args, epoch, nets, loaders.val) self.make_animation(args, nets, loaders) self.evaluate(args, epoch, nets, loaders.val) # + def compute_tf_loss(nets, args, x_real, y_org, transform, tps, interp, kl, fan, mask, device='cuda'): # TF_loss l2_loss = nn.MSELoss() x_real, y_org = x_real.to(device), y_org.to(device) x_landmark, y_landmark = fan.get_landmark(x_real), fan.get_landmark(y_org) y_landmark_masked = mask(y_landmark) x_out = nets.transformer(x_landmark, y_landmark_masked) loss_tf = l2_loss(x_out, y_landmark) # Equivariance loss # real --> transform(color / tps) --> gen softmax = nn.Softmax(dim=1) images_cj = x_landmark for b in range(images_cj.shape[0]): images_cj[b] = transform(images_cj[b])*255.0 tps.reset_control_points() images_tps = tps(images_cj) pred_tps = nets.transformer((images_tps/255.0), y_landmark_masked) pred_tps = interp(pred_tps) # real --> gen --> transform(tps) pred_d = x_out pred_d.requires_grad = False pred_tps_org = tps(pred_d, padding_mode='zeros') # Center: first center - second center centers_tps = center.batch_get_centers(softmax(pred_tps)[:, 1:, :, :]) pred_tps_gen = tps(x_out, padding_mode='zeros') centers_tps_gen = center.batch_get_centers(softmax(pred_tps_gen)[:, 1:, :, :]) # KL (first, second) / Center mse (first, second) loss_eqv = kl(F.log_softmax(pred_tps, dim=1), F.softmax(pred_tps_org)) loss_center = F.mse_loss(centers_tps, centers_tps_gen) loss = loss_tf * args.lambda_tf + loss_eqv * args.lambda_eqv + loss_center * args.lambda_eqv return x_out, loss, Munch(tf=loss_tf.item(), eqv=loss_eqv.item(), center=loss_center.item()) def compute_d_loss(nets, args, x_real, y_org, x_out, device='cuda'): # assert (z_trg is None) != (x_ref is None) # with real images x_real, y_org, x_out = x_real.to(device), y_org.to(device), x_out.to(device) x_real.requires_grad = True real_out = nets.discriminator(x_real) loss_real = adv_loss(real_out, 1) # R1-reg loss_reg = r1_reg(real_out, x_real) # with fake images with torch.no_grad(): x_fake = nets.generator(x_real, y_org, x_out) fake_out = nets.discriminator(x_fake) loss_fake = adv_loss(fake_out, 0) loss = loss_real + loss_fake + args.lambda_reg * loss_reg return loss, Munch(wgangp=loss_real.item()+loss_fake.item(), reg=loss_reg.item()) def compute_g_loss(nets, args, x_real, y_org, x_out, vgg, fan, device='cuda'): # adversarial loss x_real, y_org, x_out= x_real.to(device), y_org.to(device), x_out.to(device) x_fake = nets.generator(x_real, y_org, x_out) out = nets.discriminator(x_fake) # adv loss loss_adv = adv_loss(out, 1) # cycle-consistency loss x_fake_landmark = fan.get_landmark(x_fake) x_real_landmark = fan.get_landmark(x_real) x_fake_out = nets.transformer(x_fake_landmark, x_real_landmark) x_rec = nets.generator(x_fake, x_real, x_fake_out) loss_cyc = torch.mean(torch.abs(x_rec - x_real)) # perceptual loss l1_loss = nn.L1Loss() with torch.no_grad(): vgg_x = vgg(x_real) with torch.autograd.enable_grad(): vgg_xhat = vgg(x_fake) loss_vgg = 0 for x_feat, xhat_feat in zip(vgg_x, vgg_xhat): loss_vgg += l1_loss(x_feat, xhat_feat) # feature-matching loss loss_fm = l1_loss(x_out, x_fake_landmark) loss = loss_adv + args.lambda_cyc * loss_cyc + args.lambda_vgg * loss_vgg + args.lambda_fm * loss_fm return loss, Munch(adv=loss_adv.item(), cyc=loss_cyc.item(), vgg=loss_vgg.item(), fm=loss_fm.item()) def gradient_penalty(nets, real, fake, reg_lambda=10, device='cuda'): from torch.autograd import grad batch_size = real.shape[0] epsilon = torch.rand(batch_size, 1, 1, 1).to(device) merged = (epsilon * real) + ((1 - epsilon) * fake) # forward op = nets.discriminator(merged) # merted gradient gradient = grad(outputs=op, inputs=merged, create_graph=True, grad_outputs=torch.ones_like(op), retain_graph=True, only_inputs=True)[0] # calc penalty penalty = reg_lambda * ((gradient.norm(p=2, dim=1) - 1) ** 2).mean() return penalty def moving_average(model, model_test, beta=0.999): for param, param_test in zip(model.parameters(), model_test.parameters()): param_test.data = torch.lerp(param.data, param_test.data, beta) def adv_loss(logits, target): assert target in [1, 0] targets = torch.full_like(logits, fill_value=target) loss = F.binary_cross_entropy_with_logits(logits, targets) return loss def r1_reg(d_out, x_in): # zero-centered gradient penalty for real images batch_size = x_in.size(0) grad_dout = torch.autograd.grad( outputs=d_out.sum(), inputs=x_in, create_graph=True, retain_graph=True, only_inputs=True )[0] grad_dout2 = grad_dout.pow(2) assert(grad_dout2.size() == x_in.size()) reg = 0.5 * grad_dout2.view(batch_size, -1).sum(1).mean(0) return reg # - train_dataset = MotionDataset(config.root_dir, image_shape=config.frame_shape, id_sampling=True, is_train=True, random_seed=config.seed) test_dataset = FramesDataset(config.root_dir, image_shape=config.frame_shape, id_sampling=True, is_train=False, random_seed=config.seed) # + # train_dataset = DatasetRepeater(train_dataset, config.num_repeats) # - train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers, pin_memory=True, drop_last=True) test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=config.num_workers) loaders = Munch(src=train_loader, val=test_loader) solver = Solver(config) solver._load_checkpoint(4) for i in train_loader: break for j in test_loader: break source = i['source'][[5]] target = i['target'][5][[5]] video = j['video'] import matplotlib.pyplot as plt plt.imshow(source.cpu().squeeze().numpy().swapaxes(0,1).swapaxes(1,2)) plt.imshow(target.cpu().squeeze().numpy().swapaxes(0,1).swapaxes(1,2)) plt.imshow(video[0].cpu().squeeze().numpy().swapaxes(0,1).swapaxes(1,2)) source_land = solver.fan.get_landmark(source.cuda()) target_land = solver.fan.get_landmark(target.cuda()) test_land = solver.fan.get_landmark(video[0].cuda()) plt.imshow(source_land.cpu().squeeze()) plt.imshow(target_land.cpu().squeeze()) plt.imshow(test_land.cpu().squeeze()) t = solver.nets.transformer(source_land, target_land) t_test = solver.nets.transformer(test_land, target_land) plt.imshow(solver.get_clip(t).cpu().detach().numpy().squeeze()) plt.imshow(solver.get_clip(t_test).cpu().detach().numpy().squeeze()) out = solver.generator(video[0].cuda(), target.cuda(), solver.get_clip(t_test)) plt.imshow(out.cpu().detach().squeeze().numpy().swapaxes(0,1).swapaxes(1,2)) plt.imshow(solver.fan.get_landmark(out).cpu().numpy().squeeze()) slice_idx = [0, 512*4, #res1 512*4, #res2 512*4, #res3 512*4, #res4 512*4, #res5 512*2 + 256*2, #resUp1 256*2 + 128*2, #resUp2 128*2 + 64*2, #resUp3 64*2 + 32*2, #resUp4 32*2] #last adain for i in range(1, len(slice_idx)): slice_idx[i] = slice_idx[i-1] + slice_idx[i] slice_idx P_LEN = 2*(512*2*5 + 512+256 + 256+128 + 128+64 + 64+32 + 32) P_LEN p = nn.Parameter(torch.rand(13184, 512).normal_(0.0, 0.01)) psi = nn.Parameter(torch.rand(13184, 1)) e = nn.Parameter(torch.rand(1, 512, 1)) p = p.unsqueeze(0) e_psi = torch.bmm(p, e) test = torch.Tensor(np.random.normal(size=(1, 64, 64, 64))) slice_idx[0]:slice_idx[1] int_land = F.interpolate(target_land, size=256, mode='bilinear') int_land plt.imshow(int_land.repeat(1,3,1,1).cpu().detach().numpy().squeeze().swapaxes(0,1).swapaxes(1,2)) g = F.grid_sample(target.cuda(), int_land.permute(0,2,3,1).repeat(1,1,1,2)) plt.imshow(g.cpu().detach().numpy().squeeze().swapaxes(0,1).swapaxes(1,2)) s_synthesized = source.cuda()+F.interpolate(source_land, size=256, mode='bilinear') v = target.cuda() *g plt.imshow(v.cpu().detach().numpy().squeeze().swapaxes(0,1).swapaxes(1,2)) solver.nets.discriminator 2**14 // 256
baseline_train_Kframe_transformer_landmark-test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Assignment Numpy Random Package # -------------------------------------------------------- # ### Contents # # [1. Purpose of numpy random package](#Purpose) # # [2. Use simple random data](#random) # # [2.1 Random radom function](#randomr) # # [3. Use permutations function](#permutations) # # [4. Five Distributions explained](#Distribution) # # [4.1 Normal Distribution](#normal) # # [4.2 Binomial Distribution](#Binomial) # # [4.3 Uniform Distribution](#Uniform) # # [4.4 Poisson Distribution](#Poisson) # # [4.5 Negative Binomial Distribution](#Negative) # # [5. Use of seeds in generating pseudorandom numbers](#Seeds) # ### 1. Purpose of numpy random package <a name="Purpose"></a> # # This package carrys out random sampling. This process returns a sample or value entirely by chance from a sepcific population. # # Numpy random package provides various routines. It uses a particular algorithm, called the Mersenne Twister, to generate pseudorandom numbers. # ### 2. Use Simple random data <a name="random"></a> # # The simple random data in numpy contains functions to generate random data.There are several functions including random.integers which returns random integers within a specified range and random.rand which creates an array of the given shape and populates it with random samples from a uniform distribution (0,1). # # The random functions can generate data without or without seeding (see section 5). # # ### 2.1 Random.Random function <a name="randomr"></a> # random.random function returns a float in the interval [0.0,1.0]. When the function is not seeded it use the current system time from your computers operating system. import numpy as np import matplotlib.pyplot as plt import seaborn as sns x = np.random.random() print(x) # ### 3. Use permutations function <a name="permutations"></a> # # The permutations function is the arrangements that can be made from a set of objects(n) given a type spcified arrangement (r).[4](#4) # # Each way these letters can be arranged is called a permutation. # # r= 2 - This represents the number of data in each arrangement e.g two letters # # n = (A,B,C) - This represents the full dataset # # Permutations = [AB , BA, AC, CA, BC ,CB] # # # ## 4.1 Normal Distribution <a name="normal"></a> # # Normal distrubution when plotted on a graph forms a bell shaped curve. # The plot will be centered around the mean (see plot below where the mean is 7 which is the centre of the graph). # # ### Function : np.random.normal(a,b,c) # # a = Mean b = standard deviation c = number of random datapoints created # # ### Ouput # # The data for a normal distribution plot centered around a mean of 7, showing 2 standard deviations of the data, and creates 1000 random data points that are of normal distribution. 68% of the data values generated will be ±1 standard deviations of the mean. 95% of the data values generated will be within ±2 standard deviations of the mean [1](#1) . import numpy as np import seaborn as sns x = np.random.normal(7,2,100000) sns.distplot(tuple(x)) # ## 4.2 Binomial Distribution <a name="Binomial"></a> # The binomial distribution is the frequency distribution of the number of successes # in a given number of trials with a specified probability of success in # each trial. # # ### Function : np.random.binomial( n,p,s) # n = number of trials p = probability of each trial s = number of times a trial is carried out # # # ### Output # The height of each bar represents the probability of that event occuring. # import numpy as np import matplotlib.pyplot as plt import seaborn as sns x = np.random.binomial(20,.1,100000) sns.distplot(x) plt.show() # ## 4.3 Uniform Distribution # # Under a uniform distribution, the set of variables all have the exact same possibility of happening. # # ### Function : np.random.uniform(a,b,c) # # a = lowest value b = highest value c = sample size # # ### Output # # The graph below which displays a rectangular shape shows that each value within the dataset has the same probability of being generated. # t = np.random.uniform(1,3,100000) sns.distplot(t) plt.show() # ## 4.4 Poission Distrbution # # This distribution tells us the distribution of events per unit of time or space when we sample a number of units. # # ### Function : np.random.poisson(mu, size) # # mu = rate at which the events happen size : number of samples # # ### Output # The x-axis displays the number of events and the y-axis displays the frequency/time interval # # ### Use # Poisson distributions can be used to make forecasts about customers or sales on certain days or seasons of the year. # # # + import numpy as np import matplotlib.pyplot as plt import seaborn as sns t = np.random.poisson(5, 10000) sns.distplot(t) plt.show() # - # ## 4.5 Negative binomial # # This distribution represents the number of repeated trials to produce a number of successes in a negative binomial experiment. # # ### Function np.random.negative_binomial( n, p ,s) # n = number of successes p = probability of success s = number of events # # ### Output # # From the x-axis after 30 phone calls the probability of success increases to 22%. # # ### Example of its use # # A customer service agent is required to get three (n) telephone customers to complete surveys. The probability of her achieving this on one call is 10% (p). This distributions displays the number of calls she will likely have to make before she achieves success. # import matplotlib.pyplot as plt u = np.random.negative_binomial (3,0.1,1000) sns.distplot(u) plt.show() # ## 5 The use of seeds in generating pseudorandom numbers # # A pseudorandom number generator is an algorithm for generating a sequence of numbers whose properties approximate the properties of sequences of random numbers.[2](#2) Python uses the Mersenne Twister as the core generator. [3](#3) # # Once you put in the seed value you get the same pattern of random numbers (see code 1 & 3 below). Each seed value corresponds to a sequence of generated values from a given random number generator. # # As the seed is not set, in code 2 below the number will be generated using the current system time on your operating system. # Set the seed import numpy as np import random as rd rd.seed(2) y = rd.randint(1, 1000) print(y) #Seed not set import numpy as np import random as rd y = rd.randint(1, 1000) print(y) #set the seed to see same result import numpy as np import random as rd rd.seed(2) y = rd.randint(1, 1000) print(y) # ### References # # 1. <a name="#1"></a> <NAME>. and <NAME>., 2017. Practical Statistics for Data Scientists: 50 Essential Concepts. " O'Reilly Media, Inc." # 2. <a name="#2"></a> https://en.wikipedia.org/wiki/Pseudorandom_number_generator # # 3. <a name="#3"></a> https://docs.python.org/2/library/random.html # 4. <a name="#4"></a> https://stattrek.com/statistics/dictionary.aspx?definition=permutation #
numpy-random.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # `GiRaFFE_NRPy`: Solving the Induction Equation # # ## Author: <NAME> # # Our goal in this module is to write the code necessary to solve the induction equation # $$ # \partial_t A_i = \underbrace{\epsilon_{ijk} v^j B^k}_{\rm No\ Gauge\ terms} - \underbrace{\partial_i \left(\alpha \Phi - \beta^j A_j \right)}_{\rm Gauge\ terms} # $$ # # Since we are not using staggered grids, we can greatly simplify this algorithm with respect to the version used in the original `GiRaFFE`. We turn to T&oacute;th's [paper](https://www.sciencedirect.com/science/article/pii/S0021999100965197?via%3Dihub), Eqs. 30 and 31, and a 3D version of the same algorithm in **TODO: RIT paper**. # # Consider the electric field $E_i = \epsilon_{ijk} v^j B^k$ (this relation assumes the ideal MHD limit, which is also assumed in FFE). # # Consider the point $i,j,k$. Let components of tensors be indicated with braces, i.e. the $i^{\rm th}$ component of the electric field at point $i,j,k$ will be written as $\left(E_{\{i\}}\right)_{i,j,k}$. Then # \begin{align} # \left(E_{\{1\}}\right)_{i,j,k} = \frac{1}{4}(v^{\{2\}})B^{\{3\}})_{i,j-\tfrac{1}{2},k} &+ \frac{1}{4}(v^{\{2\}})B^{\{3\}})_{i,j+\tfrac{1}{2},k}\\ # - \frac{1}{4}(v^{\{3\}})B^{\{2\}})_{i,j,k-\tfrac{1}{2}} &- \frac{1}{4}(v^{\{3\}})B^{\{2\}})_{i,j,k+\tfrac{1}{2}} # \end{align} # # The other components follow via a cyclic permutation of the indices. Note a potential complication here: When we are calculating $i^{\rm th}$ component of the electric field, we are concerned with the reconstructed quantities in the $j^{\rm th}$ and $k^{\rm th}$ directions. This means that it will be sensible to do something similar to what we do with the A2B module and think first about the directions in which a stencil goes, and *then* the terms that involve it. # # In this case, we will compute the face-value products of $v^i$ and $B^i$ in, say, the 0th direction **TODO: rectify off-by-one above**. Then, we will compute the parts of components of the electric field that depend on those: the 1st and 2nd direction. # An outline of a general finite-volume method is as follows, with the current step in bold: # 1. The Reconstruction Step - Piecewise Parabolic Method # 1. Within each cell, fit to a function that conserves the volume in that cell using information from the neighboring cells # * For PPM, we will naturally use parabolas # 1. Use that fit to define the state at the left and right interface of each cell # 1. Apply a slope limiter to mitigate Gibbs phenomenon # 1. Interpolate the value of the metric gridfunctions on the cell faces # 1. **Solving the Riemann Problem - <NAME>, (This notebook, $E_i$ only)** # 1. **Use the left and right reconstructed states to calculate the unique state at boundary** # # We will assume in this notebook that the reconstructed velocities and magnetic fields are available on cell faces as input. We will also assume that the metric gridfunctions have been interpolated on the metric faces. # # Solving the Riemann problem, then, consists of two substeps: First, we compute the flux through each face of the cell. Then, we add the average of these fluxes to the right-hand side of the evolution equation for the vector potential. # We begin by importing the NRPy+ core functionality. We also import the Levi-Civita symbol, the GRHD module, and the GRFFE module. # + # Step 0: Add NRPy's directory to the path # https://stackoverflow.com/questions/16780014/import-file-from-parent-directory import os,sys nrpy_dir_path = os.path.join("..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) from outputC import * # NRPy+: Core C code output module import finite_difference as fin # NRPy+: Finite difference C code generation module import NRPy_param_funcs as par # NRPy+: Parameter interface import grid as gri # NRPy+: Functions having to do with numerical grids import loop as lp # NRPy+: Generate C code loops import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support import reference_metric as rfm # NRPy+: Reference metric support import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface import shutil, os, sys # Standard Python modules for multiplatform OS-level functions thismodule = "GiRaFFE_NRPy-Induction_Equation" import GRHD.equations as GRHD import GRFFE.equations as GRFFE # Import the Levi-Civita symbol and build the corresponding tensor. # We already have a handy function to define the Levi-Civita symbol in WeylScalars import WeylScal4NRPy.WeylScalars_Cartesian as weyl # - # This function is identical to the one done by Stilde_flux. See [that tutorial](Tutorial-GiRaFFE_NRPy-Stilde-flux.ipynb#hydro_speed) for further information on the derivation. # We'll write this as a function so that we can calculate the expressions on-demand for any choice of i def find_cp_cm(lapse,shifti,gupii): # Inputs: u0,vi,lapse,shift,gammadet,gupii # Outputs: cplus,cminus # a = 1/(alpha^2) a = 1/(lapse*lapse) # b = 2 beta^i / alpha^2 b = 2 * shifti /(lapse*lapse) # c = -g^{ii} + (beta^i)^2 / alpha^2 c = - gupii + shifti*shifti/(lapse*lapse) # Now, we are free to solve the quadratic equation as usual. We take care to avoid passing a # negative value to the sqrt function. detm = b*b - 4*a*c detm = sp.sqrt(sp.Rational(1,2)*(detm + nrpyAbs(detm))) global cplus,cminus cplus = sp.Rational(1,2)*(-b/a + detm/a) cminus = sp.Rational(1,2)*(-b/a - detm/a) # This function is identical to the one done by Stilde_flux. For more information, see [here](Tutorial-GiRaFFE_NRPy-Stilde-flux.ipynb#fluxes). # We'll write this as a function, and call it within HLLE_solver, below. def find_cmax_cmin(flux_dirn,gamma_faceDD,beta_faceU,alpha_face): # Inputs: flux direction flux_dirn, Inverse metric gamma_faceUU, shift beta_faceU, # lapse alpha_face, metric determinant gammadet_face # Outputs: maximum and minimum characteristic speeds cmax and cmin # First, we need to find the characteristic speeds on each face gamma_faceUU,unusedgammaDET = ixp.generic_matrix_inverter3x3(gamma_faceDD) find_cp_cm(alpha_face,beta_faceU[flux_dirn],gamma_faceUU[flux_dirn][flux_dirn]) cpr = cplus cmr = cminus find_cp_cm(alpha_face,beta_faceU[flux_dirn],gamma_faceUU[flux_dirn][flux_dirn]) cpl = cplus cml = cminus # The following algorithms have been verified with random floats: global cmax,cmin # Now, we need to set cmax to the larger of cpr,cpl, and 0 cmax = sp.Rational(1,2)*(cpr+cpl+nrpyAbs(cpr-cpl)) cmax = sp.Rational(1,2)*(cmax+nrpyAbs(cmax)) # And then, set cmin to the smaller of cmr,cml, and 0 cmin = sp.Rational(1,2)*(cmr+cml-nrpyAbs(cmr-cml)) cmin = -sp.Rational(1,2)*(cmin-nrpyAbs(cmin)) # See GRHydro paper for equations (TBA)**TODO** # # Here, we we calculate the flux and state vectors for the electric field. # The flux vector in the $i^{\rm th}$ direction is given as # $$ # F(U) = \epsilon_{ijk} v^j B^k, # $$ # where $\epsilon_{ijk} = [ijk]\sqrt{\gamma}$, $[ijk]$ is the Levi-Civita symbol, $\gamma$ is the determinant of the three-metric, and $v^j = \alpha \bar{v^j} - \beta^j$ is the drift velocity; the state vector in the $i^{\rm th}$ direction is $U = B^i$. # # **TODO** Why is there only one free index in this flux, but two for $\tilde{S}_i$? I think the LC tensor might be implicitly contain a loop that had to be explicitly written in the other case? def calculate_flux_and_state_for_Induction(flux_dirn, gammaDD,betaU,alpha,ValenciavU,BU): GRHD.compute_sqrtgammaDET(gammaDD) LeviCivitaDDD = weyl.define_LeviCivitaSymbol_rank3() LeviCivitaDDD = ixp.zerorank3() for i in range(DIM): for j in range(DIM): for k in range(DIM): LCijk = LeviCivitaDDD[i][j][k] LeviCivitaDDD[i][j][k] = LCijk * GRHD.sqrtgammaDET) # LeviCivitaUUU[i][j][k] = LCijk / sp.sqrt(gammadet) global U,F # Flux F = \epsilon_{ijk} v^j B^k F = sp.sympify(0) for j in range(3): for k in range(3): F = LeviCivitaDDD[flux_dirn][j][k] * (alpha*ValenciavU[j]-betaU[j]) * BU[k] # U = B^i U = BU[flux_dirn] # Now, we write a standard HLLE solver based on eq. 3.15 in [the HLLE paper](https://epubs.siam.org/doi/pdf/10.1137/1025002), # $$ # F^{\rm HLL} = \frac{c_{\rm min} f_{\rm R} + c_{\rm max} f_{\rm L} - c_{\rm min} c_{\rm max} ({\rm st\_x\_r}-{\rm st\_x\_l})}{c_{\rm min} + c_{\rm max}} # $$ def HLLE_solver(cmax, cmin, Fr, Fl, Ur, Ul): # This solves the Riemann problem for the mom_comp component of the momentum # flux StildeD in the flux_dirn direction. # st_j_flux = (c_\min f_R + c_\max f_L - c_\min c_\max ( st_j_r - st_j_l )) / (c_\min + c_\max) return (cmin*Fr + cmax*Fl - cmin*cmax*(Ur-Ul) )/(cmax + cmin) # Here, we will use the function we just wrote to calculate the flux through a face. We will pass the reconstructed Valencia 3-velocity and magnetic field on either side of an interface to this function (designated as the "left" and "right" sides) along with the value of the 3-metric, shift vector, and lapse function on the interface. However, unlike when we used this method to calculate the flux term, the RHS of each component of $A_i$ does not depend on all three of the flux directions. Instead, def calculate_E_i_flux(inputs_provided=True,alpha_face=None,gamma_faceDD=None,beta_faceU=None,\ Valenciav_rU=None,B_rU=None,Valenciav_lU=None,B_lU=None): find_cmax_cmin(flux_dirn,gamma_faceDD,beta_faceU,alpha_face) global E_fluxD # We will need to add to this rhs formula in several functions. To avoid overwriting previously written # data, we use declarerank1() instead of zerorank1(). E_fluxD = ixp.declarerank1("A_rhsD",DIM=3) for flux_dirn in range(3): calculate_flux_and_state_for_Induction(flux_dirn, gamma_faceDD,beta_faceU,alpha_face,\ Valenciav_rU,B_rU) Fr = F Ur = U calculate_flux_and_state_for_Induction(flux_dirn, gamma_faceDD,beta_faceU,alpha_face,\ Valenciav_lU,B_lU) Fl = F Ul = U E_fluxD[flux_dirn] += HLLE_solver(cmax, cmin, Fr, Fl, Ur, Ul) # Below are the gridfunction registrations we will need for testing. We will pass these to the above functions to self-validate the module that corresponds with this tutorial. # + # We will pass values of the gridfunction on the cell faces into the function. This requires us # to declare them as C parameters in NRPy+. We will denote this with the _face infix/suffix. alpha_face = gri.register_gridfunctions("AUXEVOL","alpha_face") gamma_faceDD = ixp.register_gridfunctions_for_single_rank2("AUXEVOL","gamma_faceDD","sym01") beta_faceU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","beta_faceU") # We'll need some more gridfunctions, now, to represent the reconstructions of BU and ValenciavU # on the right and left faces Valenciav_rU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","Valenciav_rU",DIM=3) B_rU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","B_rU",DIM=3) Valenciav_lU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","Valenciav_lU",DIM=3) B_lU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","B_lU",DIM=3)
in_progress/Tutorial-GiRaFFE_NRPy-Induction_Equation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Lecture 5 - Convolutional neural networks # > A deep dive into convolutional neural networks for jet tagging # ## Learning objectives # # * Know how to implement and train a convolutional neural network in PyTorch # * Learn how to debug unstable training runs with activation statistics # * Understand what the 1-cycle training policy is # * Understand what batch normalization is and how to incorporate it into a CNN # ## References # # * Chapter 13 of [_Deep Learning for Coders with fastai & PyTorch_](https://github.com/fastai/fastbook) by <NAME> and <NAME>. # * Chapter 11 of [_Hands-On Machine Learning with Scikit-Learn, Keras, and TensorFlow_](https://www.amazon.com/Hands-Machine-Learning-Scikit-Learn-TensorFlow/dp/1492032646/ref=sr_1_1?crid=29Z1GSCOWEPDV&keywords=hands+on+machine+learning+with+scikit-learn+and+tensorflow+2&qid=1653288575&sprefix=hands+on+ma%2Caps%2C160&sr=8-1) by <NAME> # ## Setup # + # Uncomment and run this cell if using Colab, Kaggle etc # # %pip install fastai==2.6.0 datasets # - # ## Imports from datasets import load_dataset from fastai.callback.hook import * from fastai.vision.all import * from sklearn.metrics import accuracy_score from torch.utils.data import TensorDataset from torchvision.transforms import ToTensor # + import datasets # Suppress logs to keep things tidy datasets.logging.set_verbosity_error() # - # ## Loading the data # Last lecture we fine-tuned a pretrained CNN on the top tagging dataset, where each jet was represented as a 2D image of "energy pixels". Today, we'll take a look at training our own CNN from scratch, and explore some of the techniques that fastai utilizes to stabilise the training of these neural nets. To get started, let's download the same dataset of jet images from last lecture: # + images_ds = load_dataset("dl4phys/top_tagging_images") # Peek at one example plt.imshow(images_ds["train"][0]["image"]); # - # As we saw last lecture, we need to convert these PIL images into PyTorch tensors, so let's reuse the same helper function to create a training and validation set to experiment with: def get_dataset(dataset, num_examples=None): if num_examples is not None: dataset = dataset.shuffle(seed=42).select(range(num_examples)) x = torch.cat([ToTensor()(img) for img in dataset["image"]]).unsqueeze(1) y = torch.cat([torch.tensor(l).unsqueeze(0) for l in dataset["label"]]) return TensorDataset(x, y) # With this function, we can now generate a sample of jets as follows: # Lower the training size if Colab RAM explodes 💣 train_ds = get_dataset(images_ds["train"], num_examples=350_000) valid_ds = get_dataset(images_ds["validation"], num_examples=35_000) # Since we'll be experimenting a bit with the batch size in this lecture, we'll also implement a helper function to return the dataloaders associated with these datasets: def get_dls(bs=128): train_dl = DataLoader(train_ds, bs=bs, shuffle=True) valid_dl = DataLoader(valid_ds, bs=bs) return DataLoaders(train_dl, valid_dl) # Now that we have this function, we can get the dataloaders and grab a batch of data to test with: # + dls = get_dls() xb, yb = first(dls.train) xb.shape, yb.shape # - # ## Creating a CNN # Recall in lecture 3, that we created a neural network for $N$-subjettiness features. We used the `nn.Sequential()` class to create a network that involved stacking fully-connected layers and ReLU activation functions: # + model = nn.Sequential( nn.Linear(20, 200), nn.ReLU(), nn.Linear(200, 2), ) model # - # In this lecture, we'll take a similar approach, but this time using _convolutional layers_ instead of linear ones. In PyTorch, convolutional layers are created by using the `nn.Conv2d()` module. Let's try replacing the `nn.Linear()` layers in our previous architecture with `nn.Conv2d()` ones instead: broken_cnn = nn.Sequential( nn.Conv2d(1, 30, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(30, 1, kernel_size=3, padding=1), ) # Note that unlike linear layers, convolutional layers don't require us to specify the number of features in the input. That's because convolutions are applied across each pixel automatically, so the weights only depend on the number of channels and the kernel size. # # Let's now see what happens if we pass a minibatch of data through this model: # Feed a minibatch to the model outputs = broken_cnn(xb) outputs.shape # Hmm, this output isn't quite what we want for classification: we need a tensor of shape `(batch_size, num_classes)` but instead have a $40\times 40$ map of activations. The standard way to handle this is to apply a sequence of _stride-2 convolutions_, which decrease the size of the outputs so that the final layer size is 1. To see why this is the case, recall that for an image of height $n_H$ and width $n_W$, the dimension of the output activation map is given by: # # $$\left( \left\lfloor\frac{n_H + 2p - k}{s} + 1 \right\rfloor , \left\lfloor\frac{n_W + 2p - k}{s} + 1 \right\rfloor \right) \,,$$ # # where $p$ is the padding, $k$ the kernel of size $k\times k$, and $s$ the stride. For fixed $p,f$ and $s>1$, we can see that the dimension of the activation map is shrunk with each convolutional layer. # # With this in mind, let's create a stack of stride-2 convolutional layers with $3\times 3$ kernels. We'll intersperse each convolutional layer with a ReLU activation, so let's write a helper function that returns the two together: def conv(ni, nf, ks=3, act=True): layer = nn.Conv2d(ni, nf, stride=2, kernel_size=ks, padding=ks // 2) if act: layer = nn.Sequential(layer, nn.ReLU()) return layer # As discussed in the fastai book, it's good practice to increase the number of output features `nf` with each convolutional layer. That's because, stride-2 convolutions decrease the size of the activation map, so we increase the number of output features to avoid compressing the capacity of our model. We also set the kernel size `ks` to the typical value of $3\times 3$, and set the padding to $ks//2$ to ensure the output activation map is the same shape as the input image. # # We can now build a CNN by stacking the `conv()` function until we reach a $1\times 1$ activation map: # + simple_cnn = nn.Sequential( conv(1, 4), # 20x20 conv(4, 8), # 10x10 conv(8, 16), # 5x5 conv(16, 32), # 3x3 conv(32, 16), # 2x2 conv(16, 2, act=False), # 1x1 Flatten(), ) learn = Learner( dls, simple_cnn, metrics=[accuracy, RocAucBinary()], loss_func=F.cross_entropy ) learn.summary() # - # Since the final convolutional layer will be a tensor of shape $128\times2\times1\times1$, we've stripped off those final $1\times 1$ axes via the `Flatten()` module. We can now verify that the output of this model is in a form suitable for classification: simple_cnn(xb).shape # Okay, this looks good, so let's find a good learning rate and train for a few epochs: learn.lr_find() learn.fit(3, 3e-3) # This model is much worse than the pretrained ResNet34 model that we fine-tuned in the previous lecture. We can also see that we've started overfitting quite strongly after the first epoch (because the training and validation losses are diverging). Let's now see if we can make the model more accurate and train it even faster by using some of the techniques that the factory learners (like `vision_learner()`) provide under the hood. # ## Debugging training with activation statistics # One way to train a more accurate CNN is simply double the number of filters in each stride-2 layer. However, increasing the number of filters also requires us to increase the kernel size. For example, if our first layer increases the number of filters from 4 to 8, then a $3 \times 3$ kernel will not force the CNN to learn any useful features. To handle that, we'll increase the kernel size to $5\times 5$ and adjust the CNN architecture as follows: def simple_cnn(): return nn.Sequential( conv(1, 8, ks=5), # 20x20 conv(8, 16), # 10x10 conv(16, 32), # 5x5 conv(32, 64), # 3x3 conv(64, 128), # 2x2 conv(128, 2, act=False), # 1x1 Flatten(), ).to("cuda") # Since we'll be training this network in several ways, let's wrap the creation of the `Learner` and training in a `fit()` function: def fit(epochs=1, lr=0.06): learn = Learner( dls, simple_cnn(), metrics=[accuracy, RocAucBinary()], loss_func=F.cross_entropy, cbs=ActivationStats(with_hist=True), ) learn.fit(epochs, lr) return learn # To give a sense for what can go wrong when training CNNs, we've used a much larger learning rate than before. We've also use the `ActivationStats()` callback, which records the statistics of the activations in each layer - this will be handy for debugging. Let's see how well this model performs after 1 epoch: learn = fit() # Okay, it seems the large learning rate has given us a model than doesn't perform better than random chance. To diagnose the problem, we can use the `plot_layer_stats()` method that the `ActivationStats` callback has tracked. For example, here are the mean and standard deviation of the activations in the first layer: learn.activation_stats.plot_layer_stats(0) # Here we can see that the mean and standard deviation have a somewhat dramatic spike / drop in the early stages of training. A bigger problem is that the fraction of activations that are near zero is almost 1 after a few iterations. This is a problem because activations that are zero in one layer, will propagate zeros to the next layer, and so on. As a result, these parts of the network are effectively turned off, which makes the learning process suboptimal. # # Let's now have a look at the last convolutional layer in the CNN: learn.activation_stats.plot_layer_stats(-2) # Here we see a similar pattern, with the fraction of near zero activations reaching almost 1 in fewer iterations. Let's see if we can imprve the situation by increasing the batch size. # ### Increasing the batch size # If you're GPU can handle it, increasing the batch size is one technique that can sometimes stabilise training. This is because a larger batch size implies more accurate gradients, so SGD can potentially train more efficiently. Let's try increasing our batch size by a factor of 4, training the model and investigating the activation statistics: dls = get_dls(512) learn = fit() learn.activation_stats.plot_layer_stats(-2) # Okay, increasing the batch size hasn't helped much. Clearly the problem is that our learning rate is too large, which is causing training to diverge. One way to handle this is to use a _dynamic_ learning rate that is adjusted from low to high, and back to low values during training itself! Let's take a look at 1-cycle training and finally understand what's going one when we call `Learner.fit_one_cycle()`. # ### 1-cycle training # The basic idea behind 1-cycle training is to split training into two phases: # # * **Warmup:** grow the learning rate from some minimum to maximum value. By starting with a small learning rate, we can avoid hopping out of a local minimum in the loss and diverging. # * **Annealing:** decrease the learning rate from the maximum value to the minimum. By gradually lowering the learning rate, we avoid skipping a good local minimum towards the end of training. # # This technique is one of the main aspects that allows fastai to train fast and accurate models. As we've done in previous lectures, we can use the `fit_one_cycle()` method, so let's adjust our `fit()` function and train again: # + def fit(epochs=1, lr=0.06): learn = Learner( dls, simple_cnn(), metrics=[accuracy, RocAucBinary()], loss_func=F.cross_entropy, cbs=ActivationStats(with_hist=True), ) learn.fit_one_cycle(epochs, lr) return learn learn = fit() # - # Nice, we've finally got a model that performs better than random! All fastai `Learner`s come with a `Recorder()` callback, which tracks various aspect of training. Here we can use it to inspect the evolution of the learning rate: learn.recorder.plot_sched() # Here we can see the learning rate increases linearly until the maximum value, before being annealed with a cosine function. The second plot refers to a hyperparameter called _momentum_, which takes values between $[0,1]$ and is often denoted by $\beta$. This hyperparameter belongs to a technique called _momentum optimization_, which extends SGD to speed up training. Recall that in SGD, we update our weights and biases according to the following update rule: # # $$ \theta \to \theta' = \theta - \eta \nabla_\theta L(\theta) .$$ # # One problem with this rule, is that if the gradients are small in some region, then the subsequent updates will also be small and training can be slow. To deal with this, momentum optimization uses an _exponentially weighted average_ to modify the update rule: # # \begin{align} # \mathbf{m}'&= \beta\mathbf{m} + (1-\beta)\nabla_\theta L(\theta) \\ # \theta' &= \theta - \eta\mathbf{m}' # \end{align} # # Intuitively, the momentum vector $\mathbf{m}$ is storing a running average of past gradient values, and this enables the update step to pick directions based on these averages. The result is that SGD with momentum allows us to traverse the loss landscape much faster (especially through plateaus). # # Let's now have a look at the activation statistics in the final `conv()` layer: learn.activation_stats.plot_layer_stats(-2) # Okay, we've getting somewhat better, but we still have quite a few activations getting close to zero towards the end of training. Let's look at one last technique that can help us deal with this. # ### Batch normalization # A very effective technique to deal with vanishing activations in training is to use _batch normalization_ (or batchnorm for short). This techniuqe tries to maintain a good distribution of activations during training by applying a normalization operation just before or after each hidden layer. The way this works is to zero-center and normalize the activations in each layer by introducing two new parameters $\gamma$ and $\beta$ (not the same $\beta$ from momentum!). The parameters are used to learn the optimal scale of the inputs across each layer, and the batchnorm operation can be summarised in the following equations: # # \begin{align} # \mathbf{\mu}_B &= \frac{1}{m_B} \sum_i \mathbf{x}^{(i)} \\ # \sigma_B^2 &= \frac{1}{m_B} \sum_i \left(\mathbf{x}^{(i)} - \mathbf{\mu}_B\right)^2 \\ # \hat{\mathbf{x}}^{(i)} &= \frac{\mathbf{x}^{(i)} - \mathbf{\mu}_B}{\sqrt{\mathbf{\sigma}_B^2 + \epsilon}} \\ # \mathbf{z}^{(i)} &= \mathbf{\gamma} \odot \hat{\mathbf{x}}^{(i)} + \mathbf{\beta} # \end{align} # # These statistics make it easier to train models, since we don't have to enforce a global normalization on the data (as we did with the `MinMaxScaler()` in previous lectures). # # Implementing batchnorm in PyTorch is rather simple: we just add a `nn.BatchNorm2d()` layer after each convolution: def conv(ni, nf, ks=3, act=True): layers = [nn.Conv2d(ni, nf, stride=2, kernel_size=ks, padding=ks // 2)] layers.append(nn.BatchNorm2d(nf)) if act: layers.append(nn.ReLU()) return nn.Sequential(*layers) # Let's now train the model again with 1-cycle policy training: learn = fit() # Nice! This is pretty great result after just one epoch of training, and gets is quite close to the results quoted for CNNs in the top taggin review paper. Let's also inspect the activation statistics: learn.activation_stats.plot_layer_stats(-2) # This is looking much better: the activations evolve smoothly and we've managed to prevent half of the activations from vanishing. Since batchnorm claims to work with large learning rates, let's ramp this up to 0.1 and see what we get: learn = fit(10, lr=0.1) # Great, this has given us a small boost and only took a few minutes to train a CNN form scratch! # ## Exercises # # * Implement the same CNN architecture from the review. Can you get close to their results? # * Read the [1-cycle policy training paper](https://arxiv.org/abs/1708.07120)
lecture05.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np from matplotlib import pyplot as plt from matplotlib.animation import FuncAnimation import math ##This is only useful if you know the basic parameters AKA Vd, K-value, and initial dose. ##Without these values, the function itself will not work. k = 0.05343050185336745310134353270769 ## in Time^-1 Vd = 1 ## in liters hLife = math.log(2)/k initDose = 50 #initial dose in mg c0 = initDose/Vd ## initial concentration x = [num for num in range(0,48)] yset = [] ylog = [] def concByTime(x): for T in x: value = c0*math.e**(-k * T) yset.append(value) ylog.append(math.log(value)) return yset y = concByTime(x) print('Initial Dose: ', initDose) print('\n') print('Volume of Distribution: ', Vd) print('\n') print('k-value is...: ', k) print('\n') print('Half life is... ', np.round(hLife,3), 'hours') print('\n') print('hours logged: ', x) print('\n') print('ln(conc) per time: ', np.round(y,2)) def dictionarizer(time, value): dic = {} for hr in range(len(time)): dic[hr] = np.round(value[hr],3) for values in dic.items(): print(values) return 'end of list' print('\n') XYcomp = dictionarizer(x,y) print(XYcomp) fig = plt.figure(figsize = (10,6)) plt.plot(x,y, '*', linestyle = '--') plt.xlabel('time in hours') plt.ylabel('ln(concentration)') plt.title('Drug Concentration In First Order Reaction') plt.plot(x,ylog, 's', markersize= 2, linestyle = '-') plt.axis([0,np.max(x),0,np.max(y)]) plt.show()
First Order PK Grapher.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: JuliaPro_v1.5.1-1 1.5.1 # language: julia # name: juliapro_v1.5.1-1-1.5 # --- # # Activation function options for a single neuron # > Coding of activation functions commonly used in deep learning to regulate the output of basic procesing unit (neuron). # # - toc: true # - badges: true # - hide_binder_badge: true # - hide_colab_badge: true # - hide_github_badge: true # - comments: true # - categories: [deeplearning, Julia] # - hide: false # - search_exclude: true # - author: Omer # In this post, I am going to discuss how to implement the activation function of a neuron using simple python code. A neuron is the basic processing unit of any deep learning architecture. It receives two weighted inputs (x and b), adds them together, and outputs the value (y). This value is then subjected to different activation functions (A). # # As the name implies, activation function allows the neuron's output to propagate to the next stage (another neuron) by mapping y to a. The different reasons to have this *activated output*; # - to keep it in a specific range [low high] # - to keep it positive # - to avoid having larger values # # Over the years, researchers have come up with many activation functions. However, here we will be discussing the most commonly used functions; # 1. __Sigmoid__ # 2. __Tanh__ # 3. __RelU__ # 4. __Softmax__ # # Top three function are used in intermediate layer neurons (except for input and output layers). __Softmax__ is usually employed in the output layer. # # We mathematically define our activation function as # # \begin{equation*} # A = \theta(y) # \end{equation*} # # where, # $\theta(y)$ represents the chosen activation functions and # $A$ represents the activated output that will be feed to next stage neuron # # > Note: The following code is tested in Julia 1.5 for functionality # ## Sigmoid # - small changes in input lead to small changes in output (activation) # - extreme changes in input lead to extreme changes in output (activation) # - activated output range [0 1] # \begin{equation*} # \theta(y) = \frac{1}{1+e^{-y}} # \end{equation*} # # A single line function in Julia can be written as follows. sigmoid(y) = round( 1/(1+exp(-y)), digits=3) # > Important: The `round` function is just used to limit the result to 3 significant digits. It's not necessary to achieve the functionality of sigmoid and is merely used for display convenience. # # Trying out different values of $y$, we can see that activated output is always positive and never goes beyond 1 (upper limit). # println("Sigmoid with w1.x+ w0.b = y = 0.0001: $(sigmoid(0.00001))") println("Sigmoid with w1.x+ w0.b = y = 1000 : $(sigmoid(10000))") println("Sigmoid with w1.x+ w0.b = y = -10 : $(sigmoid(-10))") println("Sigmoid with w1.x+ w0.b = y = -100 : $(sigmoid(-100))") println("Sigmoid with w1.x+ w0.b = y = -2 : $(sigmoid(-2))") # > Important: The `$` sign resolves the arguments. It is convenient way to use variables and functions inside string arguments of `println` function # # ## Tanh # - activated output range [-1 1] # \begin{equation*} # \theta(y) = \frac{e^{y} - e^{-y}}{e^{y} + e^{-y}} # \end{equation*} tanh(y) = round( (exp(y) - exp(-y))/(exp(y) + exp(-y)), digits=3) # Here again, we can see that by choosing a tanh activation function, the activated output is in the range between [-1, 1]. #collapse-hide println("tanh with w1.x+ w0.b = y = 0.0001: $(tanh(0.00001))") println("tanh with w1.x+ w0.b = y = 1000 : $(tanh(100))") println("tanh with w1.x+ w0.b = y = -10 : $(tanh(-10))") println("tanh with w1.x+ w0.b = y = -100 : $(tanh(-100))") println("tanh with w1.x+ w0.b = y = -2 : $(tanh(-2))") # ## ReLu # Rectified linear unit is the most commonly used activation function in deep learning architectures (CNN, RNN, etc.). It is mathematically defined as shown below with the activation range of [0 z] # # \begin{equation*} # \theta(y) = max(0,y) # \end{equation*} # relu(y) = round(max(0, y), digits=1) # As we see this function simply rectifies the activated output when $y$ is negative #collapse-hide println("Relu with w1.x+ w0.b = y = 0.0001: $(relu(0.00001))") println("Relu with w1.x+ w0.b = y = 1000 : $(relu(100))") println("Relu with w1.x+ w0.b = y = -10 : $(relu(-10))") println("Relu with w1.x+ w0.b = y = -100 : $(relu(-100))") println("Relu with w1.x+ w0.b = y = -2 : $(relu(-2))") # ## Softmax # # As mentioned before, __softmax__ is usually employed in the output layer. As an example, if there are 3 neurons in the output layer, softmax will indicate which of the three neurons has the highest activated output. This is usually done to decide the categorical output in response to an input __X__ to our neural network. # # Let us first define the activation function for a single neuron $i$ as # # \begin{equation*} # \theta(y_{i}) = e^{y_{i}} ~~~~~~~~~~~~~~~~~~~~~~~(1) # \end{equation*} # # We then normalize the activated output of each neuron by combined activation of all the $M$ neurons. # # \begin{equation*} # \theta(y_{i}) = \frac {e^{y_{i}}} {\sum_{j=1}^{M} e^{y_{j}}} ~~~~~~~~~~~~~~~~~~~~~~~(2) # \end{equation*} # # for $i=1...M$ # # afterwards, we simply select the neuron with the largest normalized activated output # function softmax(y, win=false) each_neuron = [] # declare empty array #compute exp for each individual neuron (eq-1 above) for i in y push!(each_neuron, exp(i)) end #normalizing each neuron output by total (eq-2 above) total = sum(each_neuron) for j in eachindex(each_neuron) each_neuron[j] = each_neuron[j] / total end if win #if need wining neuron info val,id = findmax( round.(each_neuron, digits=3) ) return id else return round.(each_neuron, digits=3) # dot shows element-wise operation on each element of array end end # Here we show example of 3 neurons in output layer # + #collapse-hide println("Softmax with w1.x+ w0.b = y = [-1,1,5] : $(softmax([-1,1,5]))") println("Softmax with w1.x+ w0.b = y = [0,2,1] : $(softmax([0,2,1] ))") println("Softmax with w1.x+ w0.b = y = [-10,1,5]: $(softmax([-10,1,5]))") println("Softmax with w1.x+ w0.b = y = [5,1,5] : $(softmax([5,1,5]))") println("Softmax with w1.x+ w0.b = y = [3,5,0] : $(softmax([3,5,0]))") println("\n\n Softmax with argmax to select the winning neuro in output \n") println("Softmax with w1.x+ w0.b = y = [-1,1,5] : $(softmax([-1,1,5],true))") println("Softmax with w1.x+ w0.b = y = [0,2,1] : $(softmax([0,2,1],true ))") println("Softmax with w1.x+ w0.b = y = [-10,1,5]: $(softmax([-10,1,5],true))") println("Softmax with w1.x+ w0.b = y = [5,1,5] : $(softmax([5,1,5],true))") println("Softmax with w1.x+ w0.b = y = [3,5,0] : $(softmax([3,5,0],true))")
_notebooks/2020-09-21-activatation-function-neuron.ipynb
// -*- coding: utf-8 -*- // # Automatic generation of Notebook using PyCropML // This notebook implements a crop model. // ### Domain Class PhenologyAuxiliary // + #include "PhenologyAuxiliary.h" PhenologyAuxiliary::PhenologyAuxiliary() { } string PhenologyAuxiliary::getcurrentdate() {return this-> currentdate; } float PhenologyAuxiliary::getcumulTT() {return this-> cumulTT; } float PhenologyAuxiliary::getdayLength() {return this-> dayLength; } float PhenologyAuxiliary::getdeltaTT() {return this-> deltaTT; } float PhenologyAuxiliary::getgAI() {return this-> gAI; } float PhenologyAuxiliary::getpAR() {return this-> pAR; } float PhenologyAuxiliary::getgrainCumulTT() {return this-> grainCumulTT; } float PhenologyAuxiliary::getfixPhyll() {return this-> fixPhyll; } float PhenologyAuxiliary::getcumulTTFromZC_39() {return this-> cumulTTFromZC_39; } float PhenologyAuxiliary::getcumulTTFromZC_91() {return this-> cumulTTFromZC_91; } float PhenologyAuxiliary::getcumulTTFromZC_65() {return this-> cumulTTFromZC_65; } void PhenologyAuxiliary::setcurrentdate(string _currentdate) { this->currentdate = _currentdate; } void PhenologyAuxiliary::setcumulTT(float _cumulTT) { this->cumulTT = _cumulTT; } void PhenologyAuxiliary::setdayLength(float _dayLength) { this->dayLength = _dayLength; } void PhenologyAuxiliary::setdeltaTT(float _deltaTT) { this->deltaTT = _deltaTT; } void PhenologyAuxiliary::setgAI(float _gAI) { this->gAI = _gAI; } void PhenologyAuxiliary::setpAR(float _pAR) { this->pAR = _pAR; } void PhenologyAuxiliary::setgrainCumulTT(float _grainCumulTT) { this->grainCumulTT = _grainCumulTT; } void PhenologyAuxiliary::setfixPhyll(float _fixPhyll) { this->fixPhyll = _fixPhyll; } void PhenologyAuxiliary::setcumulTTFromZC_39(float _cumulTTFromZC_39) { this->cumulTTFromZC_39 = _cumulTTFromZC_39; } void PhenologyAuxiliary::setcumulTTFromZC_91(float _cumulTTFromZC_91) { this->cumulTTFromZC_91 = _cumulTTFromZC_91; } void PhenologyAuxiliary::setcumulTTFromZC_65(float _cumulTTFromZC_65) { this->cumulTTFromZC_65 = _cumulTTFromZC_65; } // - // ### Domain Class PhenologyRate #include "PhenologyRate.h" PhenologyRate::PhenologyRate() { } // ### Domain Class PhenologyState // + #include "PhenologyState.h" PhenologyState::PhenologyState() { } float PhenologyState::getptq() {return this-> ptq; } string PhenologyState::getcurrentZadokStage() {return this-> currentZadokStage; } int PhenologyState::gethasFlagLeafLiguleAppeared() {return this-> hasFlagLeafLiguleAppeared; } int PhenologyState::gethasZadokStageChanged() {return this-> hasZadokStageChanged; } vector<float>& PhenologyState::getlistPARTTWindowForPTQ() {return this-> listPARTTWindowForPTQ; } int PhenologyState::gethasLastPrimordiumAppeared() {return this-> hasLastPrimordiumAppeared; } vector<float>& PhenologyState::getlistTTShootWindowForPTQ() {return this-> listTTShootWindowForPTQ; } vector<float>& PhenologyState::getlistTTShootWindowForPTQ1() {return this-> listTTShootWindowForPTQ1; } vector<string>& PhenologyState::getcalendarMoments() {return this-> calendarMoments; } float PhenologyState::getcanopyShootNumber() {return this-> canopyShootNumber; } vector<string>& PhenologyState::getcalendarDates() {return this-> calendarDates; } vector<int>& PhenologyState::getleafTillerNumberArray() {return this-> leafTillerNumberArray; } float PhenologyState::getvernaprog() {return this-> vernaprog; } float PhenologyState::getphyllochron() {return this-> phyllochron; } float PhenologyState::getleafNumber() {return this-> leafNumber; } int PhenologyState::getnumberTillerCohort() {return this-> numberTillerCohort; } vector<float>& PhenologyState::gettilleringProfile() {return this-> tilleringProfile; } float PhenologyState::getaverageShootNumberPerPlant() {return this-> averageShootNumberPerPlant; } float PhenologyState::getminFinalNumber() {return this-> minFinalNumber; } float PhenologyState::getfinalLeafNumber() {return this-> finalLeafNumber; } float PhenologyState::getphase() {return this-> phase; } vector<float>& PhenologyState::getlistGAITTWindowForPTQ() {return this-> listGAITTWindowForPTQ; } vector<float>& PhenologyState::getcalendarCumuls() {return this-> calendarCumuls; } float PhenologyState::getgAImean() {return this-> gAImean; } float PhenologyState::getpastMaxAI() {return this-> pastMaxAI; } int PhenologyState::getisMomentRegistredZC_39() {return this-> isMomentRegistredZC_39; } void PhenologyState::setptq(float _ptq) { this->ptq = _ptq; } void PhenologyState::setcurrentZadokStage(string _currentZadokStage) { this->currentZadokStage = _currentZadokStage; } void PhenologyState::sethasFlagLeafLiguleAppeared(int _hasFlagLeafLiguleAppeared) { this->hasFlagLeafLiguleAppeared = _hasFlagLeafLiguleAppeared; } void PhenologyState::sethasZadokStageChanged(int _hasZadokStageChanged) { this->hasZadokStageChanged = _hasZadokStageChanged; } void PhenologyState::setlistPARTTWindowForPTQ(vector<float>& _listPARTTWindowForPTQ){ this->listPARTTWindowForPTQ = _listPARTTWindowForPTQ; } void PhenologyState::sethasLastPrimordiumAppeared(int _hasLastPrimordiumAppeared) { this->hasLastPrimordiumAppeared = _hasLastPrimordiumAppeared; } void PhenologyState::setlistTTShootWindowForPTQ(vector<float>& _listTTShootWindowForPTQ){ this->listTTShootWindowForPTQ = _listTTShootWindowForPTQ; } void PhenologyState::setlistTTShootWindowForPTQ1(vector<float>& _listTTShootWindowForPTQ1){ this->listTTShootWindowForPTQ1 = _listTTShootWindowForPTQ1; } void PhenologyState::setcalendarMoments(vector<string>& _calendarMoments){ this->calendarMoments = _calendarMoments; } void PhenologyState::setcanopyShootNumber(float _canopyShootNumber) { this->canopyShootNumber = _canopyShootNumber; } void PhenologyState::setcalendarDates(vector<string>& _calendarDates){ this->calendarDates = _calendarDates; } void PhenologyState::setleafTillerNumberArray(vector<int>& _leafTillerNumberArray){ this->leafTillerNumberArray = _leafTillerNumberArray; } void PhenologyState::setvernaprog(float _vernaprog) { this->vernaprog = _vernaprog; } void PhenologyState::setphyllochron(float _phyllochron) { this->phyllochron = _phyllochron; } void PhenologyState::setleafNumber(float _leafNumber) { this->leafNumber = _leafNumber; } void PhenologyState::setnumberTillerCohort(int _numberTillerCohort) { this->numberTillerCohort = _numberTillerCohort; } void PhenologyState::settilleringProfile(vector<float>& _tilleringProfile){ this->tilleringProfile = _tilleringProfile; } void PhenologyState::setaverageShootNumberPerPlant(float _averageShootNumberPerPlant) { this->averageShootNumberPerPlant = _averageShootNumberPerPlant; } void PhenologyState::setminFinalNumber(float _minFinalNumber) { this->minFinalNumber = _minFinalNumber; } void PhenologyState::setfinalLeafNumber(float _finalLeafNumber) { this->finalLeafNumber = _finalLeafNumber; } void PhenologyState::setphase(float _phase) { this->phase = _phase; } void PhenologyState::setlistGAITTWindowForPTQ(vector<float>& _listGAITTWindowForPTQ){ this->listGAITTWindowForPTQ = _listGAITTWindowForPTQ; } void PhenologyState::setcalendarCumuls(vector<float>& _calendarCumuls){ this->calendarCumuls = _calendarCumuls; } void PhenologyState::setgAImean(float _gAImean) { this->gAImean = _gAImean; } void PhenologyState::setpastMaxAI(float _pastMaxAI) { this->pastMaxAI = _pastMaxAI; } void PhenologyState::setisMomentRegistredZC_39(int _isMomentRegistredZC_39) { this->isMomentRegistredZC_39 = _isMomentRegistredZC_39; } // - // ### Model Updatecalendar // + #define _USE_MATH_DEFINES #include <cmath> #include <iostream> # include<vector> # include<string> # include<numeric> # include<algorithm> # include<array> #include <map> # include <tuple> #include "Updatecalendar.h" using namespace std; Updatecalendar::Updatecalendar() { } void Updatecalendar::Calculate_Model(PhenologyState& s, PhenologyState& s1, PhenologyRate& r, PhenologyAuxiliary& a) { //- Name: UpdateCalendar -Version: 1.0, -Time step: 1 //- Description: // * Title: Calendar Model // * Author: <NAME> // * Reference: Modeling development phase in the // Wheat Simulation Model SiriusQuality. // See documentation at http://www1.clermont.inra.fr/siriusquality/?page_id=427 // * Institution: INRA Montpellier // * Abstract: Lists containing for each stage the date it occurs as well as a copy of all types of cumulated thermal times //- inputs: // * name: cumulTT // ** description : cumul thermal times at current date // ** variablecategory : auxiliary // ** datatype : DOUBLE // ** min : -200 // ** max : 10000 // ** default : 741.510096671757 // ** unit : °C d // ** inputtype : variable // * name: calendarMoments // ** description : List containing apparition of each stage // ** variablecategory : state // ** datatype : STRINGLIST // ** default : ['Sowing'] // ** unit : // ** inputtype : variable // * name: calendarDates // ** description : List containing the dates of the wheat developmental phases // ** variablecategory : state // ** datatype : DATELIST // ** default : ['2007/3/21'] // ** unit : // ** inputtype : variable // * name: calendarCumuls // ** description : list containing for each stage occured its cumulated thermal times // ** variablecategory : state // ** datatype : DOUBLELIST // ** default : [0.0] // ** unit : °C d // ** inputtype : variable // * name: currentdate // ** description : current date // ** variablecategory : auxiliary // ** datatype : DATE // ** default : 2007/3/27 // ** unit : // ** inputtype : variable // * name: phase // ** description : the name of the phase // ** variablecategory : state // ** datatype : DOUBLE // ** min : 0 // ** max : 7 // ** default : 1 // ** unit : // ** inputtype : variable //- outputs: // * name: calendarMoments // ** description : List containing apparition of each stage // ** variablecategory : state // ** datatype : STRINGLIST // ** unit : // * name: calendarDates // ** description : List containing the dates of the wheat developmental phases // ** variablecategory : state // ** datatype : DATELIST // ** unit : // * name: calendarCumuls // ** description : list containing for each stage occured its cumulated thermal times // ** variablecategory : state // ** datatype : DOUBLELIST // ** unit : °C d float cumulTT = a.getcumulTT(); vector<string> calendarMoments = s.getcalendarMoments(); vector<string> calendarDates = s.getcalendarDates(); vector<float> calendarCumuls = s.getcalendarCumuls(); string currentdate = a.getcurrentdate(); float phase = s.getphase(); if (phase >= 1.0f && phase < 2.0f && !(find(calendarMoments.begin(), calendarMoments.end(), "Emergence") != calendarMoments.end())) { calendarMoments.push_back("Emergence"); calendarCumuls.push_back(cumulTT); calendarDates.push_back(currentdate); } else if ( phase >= 2.0f && phase < 3.0f && !(find(calendarMoments.begin(), calendarMoments.end(), "FloralInitiation") != calendarMoments.end())) { calendarMoments.push_back("FloralInitiation"); calendarCumuls.push_back(cumulTT); calendarDates.push_back(currentdate); } else if ( phase >= 3.0f && phase < 4.0f && !(find(calendarMoments.begin(), calendarMoments.end(), "Heading") != calendarMoments.end())) { calendarMoments.push_back("Heading"); calendarCumuls.push_back(cumulTT); calendarDates.push_back(currentdate); } else if ( phase == 4.0f && !(find(calendarMoments.begin(), calendarMoments.end(), "Anthesis") != calendarMoments.end())) { calendarMoments.push_back("Anthesis"); calendarCumuls.push_back(cumulTT); calendarDates.push_back(currentdate); } else if ( phase == 4.5f && !(find(calendarMoments.begin(), calendarMoments.end(), "EndCellDivision") != calendarMoments.end())) { calendarMoments.push_back("EndCellDivision"); calendarCumuls.push_back(cumulTT); calendarDates.push_back(currentdate); } else if ( phase >= 5.0f && phase < 6.0f && !(find(calendarMoments.begin(), calendarMoments.end(), "EndGrainFilling") != calendarMoments.end())) { calendarMoments.push_back("EndGrainFilling"); calendarCumuls.push_back(cumulTT); calendarDates.push_back(currentdate); } else if ( phase >= 6.0f && phase < 7.0f && !(find(calendarMoments.begin(), calendarMoments.end(), "Maturity") != calendarMoments.end())) { calendarMoments.push_back("Maturity"); calendarCumuls.push_back(cumulTT); calendarDates.push_back(currentdate); } s.setcalendarMoments(calendarMoments); s.setcalendarDates(calendarDates); s.setcalendarCumuls(calendarCumuls); } // - class Test { PhenologyState s = new PhenologyState(); PhenologyState s1 = new PhenologyState(); PhenologyRate r = new PhenologyRate(); PhenologyAuxiliary a = new PhenologyAuxiliary(); Updatecalendar mod = new Updatecalendar(); //check wheat model); //test_wheat1 public void test_wheat1() { a.cumulTT = 112.330110409888D; s.calendarMoments = new vector<string>() {"Sowing"}; s.calendarDates = {new DateTime(2007, 3, 21) ,}; s.calendarCumuls = new vector<double>() {0.0D}; s.phase = 1D; a.currentdate = new DateTime(2007, 3, 27) ; mod.Calculate_updatecalendar(s,s1, r, a); //calendarMoments: ["Sowing", "Emergence"]; Console.WriteLine("calendarMoments estimated :"); for (int i=0; i<s.calendarMoments.Count; i++) Console.WriteLine(s.calendarMoments[i]); //calendarDates: ["2007/3/21", "2007/3/27"]; Console.WriteLine("calendarDates estimated :"); for (int i=0; i<s.calendarDates.Count; i++) Console.WriteLine(s.calendarDates[i]); //calendarCumuls: [ 0.0 ,112.33]; Console.WriteLine("calendarCumuls estimated :"); for (int i=0; i<s.calendarCumuls.Count; i++) Console.WriteLine(s.calendarCumuls[i]); } } Test t = new Test(); t.test_wheat1();
test/Models/pheno_pkg/test/cpp/Updatecalendar.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #!/usr/bin/env python # coding: utf-8 # # 3 Sample Tests Power over Increasing Dimension # These are same useful functions to import. Since we are calculating the statistical power over all the tests for all the simulations, we can just use a wild card import from the respective modules # In[1]: import os import sys import matplotlib import matplotlib.pyplot as plt import numpy as np from hyppo.independence import MGC, Dcorr, Hsic from hyppo.sims import gaussian_3samp from joblib import Parallel, delayed from matplotlib.legend import Legend from power_3samp import power_3samp_epsweight sys.path.append(os.path.realpath("..")) # In[4]: import seaborn as sns sns.set(color_codes=True, style="white", context="talk", font_scale=2) PALETTE = sns.color_palette("Set1") sns.set_palette(PALETTE[3:]) # In[6]: from rpy2.robjects import Formula, numpy2ri from rpy2.robjects.packages import importr class Manova: r""" Wrapper of R MANOVA """ def __init__(self): self.stats = importr("stats") self.r_base = importr("base") numpy2ri.activate() self.formula = Formula("X ~ Y") self.env = self.formula.environment def statistic(self, x, y): r""" Helper function to calculate the test statistic """ self.env["Y"] = y self.env["X"] = x stat = self.r_base.summary(self.stats.manova(self.formula), test="Pillai")[3][4] return stat # In[7]: NAME = "3samp-multiway" MAX_EPSILONS1 = 0.6 STEP_SIZE = 0.05 EPSILONS = np.arange(0, MAX_EPSILONS1 + STEP_SIZE, STEP_SIZE) EPSILON1 = 0.5 EPSILONS2 = [None] DIMENSIONS = [2, 5, 10, 25, 50, 75, 100] POWER_REPS = 5 REPS = 1000 n_jobs = 45 workers = 45 ONEWAY_EPSILON = 0.3 # FONTSIZE = 12 tests = [ MGC, Dcorr, Hsic, Manova, ] multiway_tests = [ Dcorr, ] multiway_cases = [ 6, ] # The following code loops over each saved independence test file and generates absolute power curves for each test and for each simulation modality. # In[9]: FONTSIZE = 30 def plot_power(): fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(16, 12)) sim_title = [ ["Default distances", "Multiway distances"], ["Weak multiway", "Strong multiway"], ["Cluster separation", "Added noise dimensions"], ] sim_markers = ["1", "+", "x"] custom_color = ["#d9d9d9", "#969696", "#525252"] # Label matrices col = 0 oneway = [[0, 1, 1], [1, 0, 1], [1, 1, 0]] twoway = [[0, 1, 1], [1, 0, 2], [1, 2, 0]] for i, way in enumerate([oneway, twoway]): ax = axes[i][col] sns.heatmap( way, annot=True, fmt="d", ax=ax, cbar=False, vmin=0, vmax=3, cmap="Blues", linewidth=3, linecolor="black", ) ax.set_xticks([]) # ax.set_xticklabels(['Cluster 1', 'Cluster 2', 'Cluster 3'], fontsize=16) ax.set_yticks([]) # ax.set_yticklabels(['Cluster 1', 'Cluster 2', 'Cluster 3'], rotation=90, fontsize=16, va='center') ax.set_title(sim_title[col][i], fontsize=FONTSIZE) ax.set_aspect("equal") # Simulated data col = 1 SCALE = 10 np.random.seed(0) for i, epsilon in enumerate([0.2, 0.5]): ax = axes[i][col] sims = gaussian_3samp( 100, epsilon=epsilon * SCALE, case=6, c=ONEWAY_EPSILON * SCALE ) scatters = [] for count, sim in enumerate(sims): x, y = np.hsplit(sim, 2) scatters.append( ax.scatter(x, y, marker=sim_markers[count], color=custom_color[count]) ) ax.set_xlim(-5, 5) ax.set_ylim(-7, 3) ax.set_xticks([]) ax.set_yticks([]) ax.set_title(sim_title[col][i], fontsize=FONTSIZE) ax.text( 3, -4, r"$\varepsilon$" + f"={epsilon:.1f}", size=20, va="top", ha="center", color="r", ) a = np.sqrt(ONEWAY_EPSILON ** 2 - (epsilon / 2) ** 2) * SCALE epsilon = epsilon * SCALE ax.arrow( -epsilon / 2, -a, epsilon, 0, ec="r", fc="r", alpha=1, head_width=0.6, lw=4, ls="-", ) ax.arrow( 0, 0, -epsilon / 2, -a, ec="r", fc="r", alpha=1, head_width=0, ls=":", lw=4 ) ax.arrow( 0, 0, epsilon / 2, -a, ec="r", fc="r", alpha=1, head_width=0, ls=":", lw=4 ) # ax.text(-2, -1, f'c={ONEWAY_EPSILON}', size=20, va='bottom', ha='center', color='r') ax.set_aspect(np.diff(ax.get_xlim()) / np.diff(ax.get_ylim())) # Power curves col = 2 custom_color = { "Dcorr": "#377eb8", "Hsic": "#4daf4a", "MGC": "#e41a1c", } for i, name in enumerate(["3samp_vs_epsilon", "3samp_vs_dim"]): ax = axes[i][col] if i == 0: XRANGE = EPSILONS elif i == 1: XRANGE = DIMENSIONS for test in tests: for multiway in [False, True]: if (multiway and test not in multiway_tests) or ( not multiway and test not in tests ): continue elif multiway: power = np.genfromtxt( "../ksample/{}/{}_{}_multiway.csv".format( name, 6, test.__name__ ), delimiter=",", ) ls = "dotted" lw = 3 label = f"Multiway {test.__name__}" else: power = np.genfromtxt( "../ksample/{}/{}_{}.csv".format(name, 6, test.__name__), delimiter=",", ) ls = "-" lw = 2 label = test.__name__ if test.__name__ in custom_color.keys(): ax.plot( XRANGE, power, custom_color[test.__name__], label=label, lw=lw, ls=ls, ) else: ax.plot(XRANGE, power, label=label, lw=lw, ls=ls) ax.tick_params(labelsize=FONTSIZE) if i == 0: ax.set_xticks([XRANGE[0], ONEWAY_EPSILON, XRANGE[-1]]) elif i == 1: ax.set_xticks([XRANGE[0], XRANGE[-1]]) ax.set_xticklabels([XRANGE[0], XRANGE[-1]]) ax.set_ylim(0, 1.05) ax.set_yticks([0, 1]) if i == 0: ax.axvline(ONEWAY_EPSILON, ls="--", c="grey") ax.set_title(sim_title[col][i], fontsize=FONTSIZE) ax.set_aspect(np.diff(ax.get_xlim()) / np.diff(ax.get_ylim())) fig.text( 0.5, -0.25, r"Separation ($\varepsilon$)", ha="center", fontsize=FONTSIZE, transform=axes[0][2].transAxes, ) fig.text( 0.5, -0.2, "Dimension", ha="center", fontsize=FONTSIZE, transform=axes[1][2].transAxes, ) # fig.text(-0.12, 0.5, 'Power', va='center', rotation='vertical', fontsize=FONTSIZE, transform=axes[2][0].transAxes) # fig.text( # -0.16, # 0.5, # "Label matrices", # va="center", # rotation="vertical", # fontsize=FONTSIZE, # transform=axes[0][0].transAxes, # ) # fig.text( # -0.16, # 0.5, # "Scatter plots", # va="center", # rotation="vertical", # fontsize=FONTSIZE, # transform=axes[0][1].transAxes, # ) fig.text( -0.16, 0.5, "Power", va="center", rotation="vertical", fontsize=FONTSIZE, transform=axes[0][2].transAxes, ) fig.text( -0.16, -1.0, "Power", va="center", rotation="vertical", fontsize=FONTSIZE, transform=axes[0][2].transAxes, ) # fig.text(-0.10, 0.5, 'Power', va='center', rotation='vertical', fontsize=FONTSIZE, transform=axes[2][0].transAxes) fig.text( -0.16, 1.12, "(A)", va="center", fontsize=FONTSIZE, transform=axes[0][0].transAxes, ) fig.text( -0.16, 1.12, "(B)", va="center", fontsize=FONTSIZE, transform=axes[0][1].transAxes, ) fig.text( -0.16, 1.12, "(C)", va="center", fontsize=FONTSIZE, transform=axes[0][2].transAxes, ) leg = plt.legend( bbox_to_anchor=(0.63, 0.05), bbox_transform=plt.gcf().transFigure, ncol=2, loc="upper left", fontsize=FONTSIZE-5, handletextpad=0.5, columnspacing=0.5, handlelength=0.7, ) leg.get_frame().set_linewidth(0.0) for legobj in leg.legendHandles: legobj.set_linewidth(5.0) plt.subplots_adjust(hspace=0.50) leg = Legend( fig, scatters, ["Cluster 1", "Cluster 2", "Cluster 3"], loc="upper left", frameon=False, ncol=1, bbox_transform=plt.gcf().transFigure, bbox_to_anchor=(0.42, 0.05), fontsize=FONTSIZE-5, handletextpad=0.05, columnspacing=0.5, ) fig.add_artist(leg) for legobj in leg.legendHandles: legobj.set_linewidth(3.0) plt.savefig(f"../ksample/figs/{NAME}.pdf", transparent=True, bbox_inches="tight") plot_power() # %% # %% # -
ksample/3samp-multiway_figures.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import numpy as np import pandas as pd # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt import tensorflow as tf from tensorflow import keras data = keras.datasets.fashion_mnist # | Label | Description | # --- | --- | # | 0 | T-shirt/top # | 1 | Trouser # | 2 | Pullover # | 3 | Dress # | 4 | Coat # | 5 | Sandal # | 6 | Shirt # | 7 | Sneaker # | 8 | Bag # | 9 | Ankle boot # (x_train, y_train), (x_test, y_test) = data.load_data() plt.imshow(x_train[10]) y_train[10] class_names=["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"] x_train[10] class_names[y_train[10]] # ### DATA NORMALIZATION x_train_n = x_train / 255.0 #to get all values between 0 to 1 x_test_n = x_test / 255.0 x_test_n.shape # ### Split the data into train/validation/test datasets # In the earlier step of importing the date, we had 60,000 datasets for training and 10,000 test datasets. Now we further split the training data into train/validation. Here is how each type of dateset is used in deep learning: # # * __Training data__ — used for training the model # * __Validation data__ — used for tuning the hyperparameters and evaluate the models # * __Test data__ — used to test the model after the model has gone through initial vetting by the validation set. x_valid, x_train = x_train_n[:5000], x_train_n[5000:] y_valid, y_train = y_train[:5000], y_train[5000:] x_test = x_test_n x_valid[0] # ### Building nueral network np.random.seed(42) tf.random.set_seed(42) #value remain constant everytime we run model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=[28,28])) #image id 2d array of 28 *28 converting to 1d array of 784 model.add(keras.layers.Dense(300,activation="relu")) model.add(keras.layers.Dense(100,activation="relu")) model.add(keras.layers.Dense(10,activation="softmax")) model.summary() import pydot keras.utils.plot_model(model) weights,biases = model.layers[1].get_weights() weights biases weights.shape biases.shape # ### Compiling and training the nueral network model model.compile(loss="sparse_categorical_crossentropy", # sparse for predicting categorical class, #for binary we use only categorical optimizer="sgd", metrics=["accuracy"]) model_history = model.fit(x_train, y_train, epochs=30, validation_data=(x_valid, y_valid)) model_history.params model_history.history # + import pandas as pd pd.DataFrame(model_history.history).plot(figsize=(8, 5)) plt.grid(True) plt.gca().set_ylim(0, 1) plt.show() # - # ### Evaluvating the model model.evaluate(x_test, y_test) #loss, accuracy on test data X_new = x_test[:3] y_proba = model.predict(X_new) #predicting probability of each class for the image y_proba.round(2) predict_x=model.predict(X_new) classes_x=np.argmax(predict_x,axis=1) print(predict_x) print(classes_x) np.array(class_names)[classes_x] print(plt.imshow(x_test[0])) print(np.array(class_names)[classes_x[0]]) print(plt.imshow(x_test[1])) print(np.array(class_names)[classes_x[1]]) print(plt.imshow(x_test[2])) print(np.array(class_names)[classes_x[2]])
ANN (Image Classification using keras and tensorflow).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import csv import json import pandas as pd import psycopg2 as pg import config as creds import sys import boto3 import sqlalchemy # + ENDPOINT="dc-energy-benchmarking-project.cetnqm9k1imt.us-east-1.rds.amazonaws.com" PORT="5432" USR="postgres" REGION="us-east-1" DBNAME="dc_energy_data" # session = boto3.Session(profile_name='RDSCreds') # client = boto3.client('rds') # token = client.generate_db_auth_token(DBHostname=ENDPOINT, Port=PORT, DBUsername=USR, Region=REGION) # - def connect(): # Set up a connection to the postgres server. conn_string = "host="+ creds.PGHOST +" port="+ "5432" +" dbname="+ creds.PGDATABASE +" user=" + creds.PGUSER \ +" password="+ creds.PGPASSWORD conn = pg.connect(conn_string) print("Connected!") # Create a cursor object cursor = conn.cursor() return conn, cursor conn, cursor = connect() # + # cursor.execute(""" CREATE TABLE IF NOT EXISTS raw_energy_data( # X text, # Y text, # OBJECTID integer, # PID text, # DCREALPROPERTYID text, # PMPROPERTYID decimal, # PROPERTYNAME text, # PMPARENTPROPERTYID text, # PARENTPROPERTYNAME text, # REPORTINGYEAR integer, # REPORTSTATUS text, # ADDRESSOFRECORD text, # OWNEROFRECORD text, # WARD numeric, # REPORTEDADDRESS text, # CITY text, # STATE text, # POSTALCODE text, # YEARBUILT integer, # PRIMARYPROPERTYTYPE_SELFSELECT text, # PRIMARYPROPERTYTYPE_EPACALC text, # TAXRECORDFLOORAREA integer, # REPORTEDBUILDINGGROSSFLOORAREA double precision, # ENERGYSTARSCORE double precision, # SITEEUI_KBTU_FT double precision, # WEATHERNORMALZEDSITEEUI_KBTUFT double precision, # SOURCEEUI_KBTU_FT double precision, # WEATHERNORMALZEDSOUREUI_KBTUFT double precision, # TOTGHGEMISSIONS_METRICTONSCO2E double precision, # TOTGHGEMISSINTENSITY_KGCO2EFT double precision, # WATERSCORE_MFPROPERTIES double precision, # WATERUSE_ALLWATERSOURCES_KGAL double precision, # NATURALGASUSE_THERMS double precision, # FUELOILANDDIESELFUELUSEKBTU double precision, # METEREDAREAS_ENERGY text, # METEREDAREAS_WATER text, # LATITUDE text, # LONGITUDE text, # ADDRESSID integer, # XCOORD double precision, # YCOORD double precision, # GIS_LAST_MOD_DTTM text, # DISTRCHILLEDWATER_KBTU double precision, # DISTRHOTWATER_KBTU double precision, # DISTRSTEAM_KBTU double precision, # ELECTRICITYUSE_RENEWABLE_KWH double precision, # ELECTRICITYUSE_GRID_KWH double precision, # NATURALGAS_KBTU_JANUARY double precision, # NATURALGAS_KBTU_FEBRUARY double precision, # NATURALGAS_KBTU_MARCH double precision, # NATURALGAS_KBTU_APRIL double precision, # NATURALGAS_KBTU_MAY double precision, # NATURALGAS_KBTU_JUNE double precision, # NATURALGAS_KBTU_JULY double precision, # NATURALGAS_KBTU_AUGUST double precision, # NATURALGAS_KBTU_SEPTEMBER double precision, # NATURALGAS_KBTU_OCTOBER double precision, # NATURALGAS_KBTU_NOVEMBER double precision, # NATURALGAS_KBTU_DECEMBER double precision, # ELECTRICITYUSE_KBTU_JANUARY double precision, # ELECTRICITYUSE_KBTU_FEBRUARY double precision, # ELECTRICITYUSE_KBTU_MARCH double precision, # ELECTRICITYUSE_KBTU_APRIL double precision, # ELECTRICITYUSE_KBTU_MAY double precision, # ELECTRICITYUSE_KBTU_JUNE double precision, # ELECTRICITYUSE_KBTU_JULY double precision, # ELECTRICITYUSE_KBTU_AUGUST double precision, # ELECTRICITYUSE_KBTU_SEPTEMBER double precision, # ELECTRICITYUSE_KBTU_OCTOBER double precision, # ELECTRICITYUSE_KBTU_NOVEMBER double precision, # ELECTRICITYUSE_KBTU_DECEMBER double precision);""") # conn.commit() # - cursor.execute("""SELECT * FROM raw_energy_data LIMIT 5""") query_results = cursor.fetchall() print(query_results) raw_weather= pd.read_csv('data/openweathermap_01012010_09212020.csv') raw_weather.dtypes raw_weather.head() #cursor.execute(""" CREATE TABLE IF NOT EXISTS raw_weather( dt integer, dt_iso text, timezone integer, city_name text, lat decimal, lon decimal, temp decimal, feel_like decimal, temp_min decimal, temp_max decimal, pressure integer, sea_level decimal, grnd_level decimal, humidity integer, wind_speed decimal, wind_deg integer, rain_1h decimal, rain_3h decimal, snow_1h decimal, snow_3h decimal, clouds_all integer, weather_id integer, weather_main text, weather_description text, weather_icon text);""") #conn.commit() cursor.execute("""SELECT * FROM raw_weather LIMIT 5""") conn.close()
storage/Database_Connection_Practice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Pandas Data Frames # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/rhennig/EMA6938/blob/main/Notebooks/5.Pandas_DataFrames.ipynb) # # (Based https://www.w3schools.com/python/pandas/ and https://realpython.com/python-data-cleaning-numpy-pandas/) # # This notebook introduces the Panda Data Frames library. We will learn how to easily load and manipulate data, from selecting or replacing columns and indices to reshaping the data. # ### First, load needed Python packages # + # Import numpy and pandas dataframes import numpy as np import pandas as pd # Set pandas view options pd.set_option('display.width', 800) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) # - # ## 1. Pandas Data Structures # # Pandas provides two types of data structures: **Series** and **DataFrames**: # # - A **Series** is a one dimensional data structure (“a one dimensional ndarray”) that can store values — and for every value it holds a unique index. # # - A **DataFrame** is a two (or more) dimensional data structure – basically a table with rows and columns. The columns have names and the rows have indexes. # # <img src="https://github.com/rhennig/EMA6938/blob/main/Notebooks/Figures/Pandas.png?raw=1" alt="Confusion Matrix" align="center" style="width:500px; float:center"/> # + # Create a Pandas DataFrame # Using a dict of ndarray/lists data = { 'Element': ['Titanium', 'Vanadium', 'Manganese', 'Chromium', 'Iron', 'Cobalt', 'Nickel'], 'Z': [22, 23, 24, 25, 26, 27, 28], 'Magnetism':['Paramagnetic', 'Paramagnetic', 'Complex', 'Antiferromagnetic', 'Ferromagnetic', 'Ferromagnetic', 'Ferromagnetic'] } # Load data into a DataFrame object: df = pd.DataFrame(data) print(df) # + # Alternatively, create Pandas DataFrame from lists of lists data = [['Titanium', 22, 'Paramagnetic'], ['Vanadium', 23, 'Paramagnetic'], ['Manganese', 24, 'Complex'], ['Chromium', 25, 'Antiferromagnetic'], ['Iron', 26, 'Ferromagnetic'], ['Cobalt', 27, 'Ferromagnetic'], ['Nickel', 28, 'Ferromagnetic']] # Load data into a DataFrame object: df = pd.DataFrame(data, columns = ['Element', 'Z', 'Magnetism']) print(df) # - # ### Locate Row # # As you can see from the result above, the DataFrame is like a table with rows and columns. # # Pandas use the loc attribute to return one or more specified row(s). # Refer to the row index: print(df.loc[0]) # Note that this example returns a Pandas Series. # Use a list of indexes: df.loc[[0, 1]] # When using [], the result is a Pandas DataFrame. # Refer to the column name: df.loc[:, 'Magnetism'] # Conditional that returns a boolean Series df.loc[df['Magnetism'] == 'Ferromagnetic'] # ### Named Indices # # With the index argument, you can name your own indexes. # + # Give each row a name df = pd.DataFrame(data, index = ['Ti', 'V', 'Mn', 'Cr', 'Fe', 'Co', 'Ni']) print('DataFrame:\n', df) # Refer to the named index: print('\nRefer to names index:') print(df.loc['Mn']) # - # ## 2. Working with CSV Files # # A simple and widely used method to store big data sets is to use CSV (comma separated-values) files. CSV files contains plain text and ican be read by most software packages, including Pandas. # # In our examples we will be using a CSV file called cleavage_data.csv. This file contains the energy the cleave a number of materials. # + # Read CSV file into pandas dataframe if the file is locally available # df = pd.read_csv('Datasets/cleavage_data.csv') # Otherwise, we can read this file directly from a Zip file on MaterialsCloud.org from io import BytesIO from zipfile import ZipFile from urllib.request import urlopen url = urlopen("https://archive.materialscloud.org/record/file?filename=theoreticalCleavedSubstrates.zip&record_id=948") #Download Zipfile and create pandas DataFrame zipfile = ZipFile(BytesIO(url.read())) data = pd.read_csv(zipfile.open('database/cleavage_data.csv')) # First five data entries data.head() # - # Last five data entries data.tail() # Print information about the data data.describe() # Information about the data print(data.info()) # The result tells us there are 4614 rows and 8 columns and the name of each column, with the data type. # # We also see how many null entries (no values) each column contains. This dataset contains no empty entries. # ## 3. Cleaning Data # # Data scientists spend a large amount of their time cleaning datasets. Often these initial steps of obtaining and cleaning data constitute about 80% of the work in a machine learning project. # # Therefore, we need to learn to deal with messy data, whether that means missing values, inconsistent formatting, malformed records, or nonsensical outliers. # # Here, we will use the Pandas and NumPy libraries to clean data. # # We’ll cover the following: # - Dropping unnecessary columns in a DataFrame # - Changing the index of a DataFrame import pandas as pd import numpy as np # ### Dropping Columns in a DataFrame # # Often, not all the categories of data in a dataset are useful for our analysis. For example, a dataset may contain materials information (composition, crystal structure, thermodynamics data, mechanical, electronic, and magnetic properties) but we may want to focus on analyzing the bulk modulus. # # In this case, the electronic, and magnetic properties are not important. Retaining these unneeded categories will take up unnecessary space and potentially also bog down runtime. # # Pandas provides a handy way of removing unwanted columns or rows from a DataFrame with the drop() function. Let’s look at a simple example where we drop a number of columns from a DataFrame. # # First, let’s use the DataFrame from the CSV file ‘cleavage_data.csv’. In the examples below, we pass a relative path to pd.read_csv, meaning that all of the datasets are in a folder named Datasets in our current working directory: # First five data entries data.head() # When we look at the first five entries using the head() method, we can see that a handful of columns provide ancillary information that may not be of interest if we want to select a substrate materials for a synthesis experiment: `Initial formation energy`, `Final formation energy`, `Initial area`. # # We can drop these columns. # + # Define a list of column names to drop. to_drop = ['Initial formation energy', 'Final formation energy', 'Initial area'] # Tell pandas to drop these columns directly in the dataset (inplace = True) data.drop(columns=to_drop, inplace=True) # First five entries of cleaned DataFrame data.head() # - # Alternatively, if we know which columns we need, we could pass the names of the columns as a list to the `usecols` argument of `pd.read_csv`. # ### Changing the Index of a DataFrame # # A Pandas Index extends the functionality of NumPy arrays to allow for more versatile slicing and labeling. In many cases, it is helpful to use a uniquely valued identifying field of the data as its index. # # For example, in the cleavage dataset, we may want to use the `Substrate Index` as a unique identifier. # Check that the Substrate Index is a unique identifier data['Substrate Index'].is_unique # Let’s replace the existing index with this column using set_index data.set_index('Substrate Index', inplace=True) data.head() # There are many other ways that Pandas can help us clean data, such as: # - Dealing with empty cells # - Using .str() methods to clean columns # - Using the DataFrame.applymap() function to clean the entire dataset, element-wise # - Renaming columns to a more recognizable set of labels # - Skipping unnecessary rows in a CSV file # - Remove duplicate entries # # You can find information about these methods in the following two tutorials: # - https://www.w3schools.com/python/pandas/pandas_cleaning.asp # - https://realpython.com/python-data-cleaning-numpy-pandas/
Notebooks/5.Pandas_DataFrames.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # GLM: Negative Binomial Regression # # This notebook demos negative binomial regression using the `glm` submodule. It closely follows the GLM Poisson regression example by [<NAME>](https://github.com/jonsedar) (which is in turn insipired by [a project by <NAME>](http://ianozsvald.com/2016/05/07/statistically-solving-sneezes-and-sniffles-a-work-in-progress-report-at-pydatalondon-2016/)) except the data here is negative binomially distributed instead of Poisson distributed. # # Negative binomial regression is used to model count data for which the variance is higher than the mean. The [negative binomial distribution](https://en.wikipedia.org/wiki/Negative_binomial_distribution) can be thought of as a Poisson distribution whose rate parameter is gamma distributed, so that rate parameter can be adjusted to account for the increased variance. # # #### Contents # # + [Setup](#Setup) # + [Convenience Functions](#Convenience-Functions) # + [Generate Data](#Generate-Data) # + [Poisson Data](#Poisson-Data) # + [Negative Binomial Data](#Negative-Binomial-Data) # + [Visualize the Data](#Visualize-the-Data) # # # + [Negative Binomial Regression](#Negative-Binomial-Regression) # + [Create GLM Model](#Create-GLM-Model) # + [View Results](#View-Results) # # ## Setup # + import numpy as np import pandas as pd import pymc3 as pm from scipy import stats from scipy import optimize import matplotlib.pyplot as plt import seaborn as sns import re # %matplotlib inline # - # ### Convenience Functions # #### (Taken from the Poisson regression example) # + def plot_traces(trcs, varnames=None): '''Plot traces with overlaid means and values''' nrows = len(trcs.varnames) if varnames is not None: nrows = len(varnames) ax = pm.traceplot(trcs, varnames=varnames, figsize=(12,nrows*1.4), lines={k: v['mean'] for k, v in pm.df_summary(trcs,varnames=varnames).iterrows()}) for i, mn in enumerate(pm.df_summary(trcs, varnames=varnames)['mean']): ax[i,0].annotate('{:.2f}'.format(mn), xy=(mn,0), xycoords='data', xytext=(5,10), textcoords='offset points', rotation=90, va='bottom', fontsize='large', color='#AA0022') def strip_derived_rvs(rvs): '''Remove PyMC3-generated RVs from a list''' ret_rvs = [] for rv in rvs: if not (re.search('_log',rv.name) or re.search('_interval',rv.name)): ret_rvs.append(rv) return ret_rvs # - # ### Generate Data # # As in the Poisson regression example, we assume that sneezing occurs at some baseline rate, and that consuming alcohol, not taking antihistamines, or doing both, increase its frequency. # # #### Poisson Data # # First, let's look at some Poisson distributed data from the Poisson regression example. np.random.seed(123) # + # Mean Poisson values theta_noalcohol_meds = 1 # no alcohol, took an antihist theta_alcohol_meds = 3 # alcohol, took an antihist theta_noalcohol_nomeds = 6 # no alcohol, no antihist theta_alcohol_nomeds = 36 # alcohol, no antihist # Create samples q = 1000 df_pois = pd.DataFrame({ 'nsneeze': np.concatenate((np.random.poisson(theta_noalcohol_meds, q), np.random.poisson(theta_alcohol_meds, q), np.random.poisson(theta_noalcohol_nomeds, q), np.random.poisson(theta_alcohol_nomeds, q))), 'alcohol': np.concatenate((np.repeat(False, q), np.repeat(True, q), np.repeat(False, q), np.repeat(True, q))), 'nomeds': np.concatenate((np.repeat(False, q), np.repeat(False, q), np.repeat(True, q), np.repeat(True, q)))}) # - df_pois.groupby(['nomeds', 'alcohol'])['nsneeze'].agg(['mean', 'var']) # Since the mean and variance of a Poisson distributed random variable are equal, the sample means and variances are very close. # # #### Negative Binomial Data # # Now, suppose every subject in the dataset had the flu, increasing the variance of their sneezing (and causing an unfortunate few to sneeze over 70 times a day). If the mean number of sneezes stays the same but variance increases, the data might follow a negative binomial distribution. # + # Gamma shape parameter alpha = 10 def get_nb_vals(mu, alpha, size): """Generate negative binomially distributed samples by drawing a sample from a gamma distribution with mean `mu` and shape parameter `alpha', then drawing from a Poisson distribution whose rate parameter is given by the sampled gamma variable. """ g = stats.gamma.rvs(alpha, scale=mu / alpha, size=size) return stats.poisson.rvs(g) # Create samples n = 1000 df = pd.DataFrame({ 'nsneeze': np.concatenate((get_nb_vals(theta_noalcohol_meds, alpha, n), get_nb_vals(theta_alcohol_meds, alpha, n), get_nb_vals(theta_noalcohol_nomeds, alpha, n), get_nb_vals(theta_alcohol_nomeds, alpha, n))), 'alcohol': np.concatenate((np.repeat(False, n), np.repeat(True, n), np.repeat(False, n), np.repeat(True, n))), 'nomeds': np.concatenate((np.repeat(False, n), np.repeat(False, n), np.repeat(True, n), np.repeat(True, n)))}) # - df.groupby(['nomeds', 'alcohol'])['nsneeze'].agg(['mean', 'var']) # As in the Poisson regression example, we see that drinking alcohol and/or not taking antihistamines increase the sneezing rate to varying degrees. Unlike in that example, for each combination of `alcohol` and `nomeds`, the variance of `nsneeze` is higher than the mean. This suggests that a Poisson distrubution would be a poor fit for the data since the mean and variance of a Poisson distribution are equal. # ### Visualize the Data # + g = sns.factorplot(x='nsneeze', row='nomeds', col='alcohol', data=df, kind='count', aspect=1.5) # Make x-axis ticklabels less crowded ax = g.axes[1, 0] labels = range(len(ax.get_xticklabels(which='both'))) ax.set_xticks(labels[::5]) ax.set_xticklabels(labels[::5]); # - # ## Negative Binomial Regression # ### Create GLM Model # + fml = 'nsneeze ~ alcohol + nomeds + alcohol:nomeds' with pm.Model() as model: pm.glm.glm(formula=fml, data=df, family=pm.glm.families.NegativeBinomial()) # This initialization seems to improve mixing start = pm.find_MAP(fmin=optimize.fmin_powell) C = pm.approx_hessian(start) trace = pm.sample(4000, step=pm.NUTS(scaling=C)) # - # ### View Results rvs = [rv.name for rv in strip_derived_rvs(model.unobserved_RVs)] plot_traces(trace[1000:], varnames=rvs); # Transform coefficients to recover parameter values np.exp(pm.df_summary(trace[1000:], varnames=rvs)[['mean','hpd_2.5','hpd_97.5']]) # The mean values are close to the values we specified when generating the data: # - The base rate is a constant 1. # - Drinking alcohol triples the base rate. # - Not taking antihistamines increases the base rate by 6 times. # - Drinking alcohol and not taking antihistamines doubles the rate that would be expected if their rates were independent. If they were independent, then doing both would increase the base rate by 3\*6=18 times, but instead the base rate is increased by 3\*6\*2=16 times. # # Finally, even though the sample for `mu` is highly skewed, its median value is close to the sample mean, and the mean of `alpha` is also quite close to its actual value of 10. np.percentile(trace[1000:]['mu'], [25,50,75]) df.nsneeze.mean() trace[1000:]['alpha'].mean()
docs/source/notebooks/GLM-negative-binomial-regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from pyspark import SparkContext, SparkConf conf = SparkConf().setMaster("local[*]") sc = SparkContext(conf = conf) open("enrollment.txt", "r").read() textfile = sc.textFile("enrollment.txt") textfile textfile.count() textfile.first() # + def has_a_z(line): if line.lower().find("z") > -1: return True return False linesWithZ = textfile.filter(has_a_z) lineLengths = linesWithZ.map(lambda name: len(name)) totalLength = lineLengths.reduce(lambda len1, len2: len1 + len2) print(totalLength) # - lineLengths.count() tokens = textfile.map(lambda x: x.replace(",", " ").replace(".", " ")) all_words = tokens.flatMap(lambda x: x.strip().split()) kvs = all_words.map(lambda x: (x, 1)) word_counts = kvs.reduceByKey(lambda x, y: x + y) print(word_counts.collect())
scripts/pyspark.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: py37 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Elementary Statistics_ Picturing The World_ 6th - Larson # ### Resume with the formulas and concepts to know for testing the difference between means # ## 2.3.- Measures of Central Tendency # The **mean** of a data set is the sum of the data entries divided by the number of # entries. To find the mean of a data set, use one of these formulas. # # Population Mean: $\mu = \frac{\Sigma x}{N}$ # # Sample Mean: $\bar{x} = \frac{\Sigma x}{n}$ # # The lowercase Greek letter $\mu$ (pronounced mu) represents the population # mean and $\bar{x}$ (read as “x bar”) represents the sample mean. Note that $N$ # represents the number of entries in a $\textit{population}$ and $n$ represents the number # of entries in a sample. Recall that the uppercase Greek letter sigma ($\Sigma$) # indicates a summation of values. # ## 2.4.- Measures of Variation # The **population variance** of a population data set of $N$ entries is # # Population variance $= \sigma^2 = \frac{\Sigma(x - \mu)^2}{N}$ # # The symbol $\sigma$ is the lowercase Greek letter sigma. # # The **population standard deviation** of a population data set of $N$ entries is the # square root of the population variance. # # Population standard deviation $= \sigma = \sqrt{\sigma^2} = \sqrt{\frac{\Sigma(x - \mu)^2}{N}}$ # ### **Example 2** # Find the population variance and standard deviation of the starting salaries for # Corporation A. # + from math import sqrt salaries = [41, 38, 39, 45, 47, 41, 44, 41, 37, 42] N = len(salaries) mean = sum(salaries) / N variance = sum([(salary - mean) ** 2 for salary in salaries]) / N std = sqrt(variance) print('population variance =', round(variance, 3)) print('population standard deviation =', round(std, 3), end='\n\n') from statistics import pvariance, pstdev variance = pvariance(salaries) std = pstdev(salaries) print('population variance with statistics =', round(variance, 3)) print('population standard deviation with statistics =', round(std, 3)) # - # The **sample variance** and **sample standard deviation** of a sample data set of $n$ # entries are listed below. # # Sample variance $= s^2 = \frac{\Sigma(x - \bar{x})^2}{n - 1}$ # # Sample standard deviation $= s = \sqrt{s^2} = \sqrt{\frac{\Sigma(x - \bar{x})^2}{n - 1}}$ # ### **Example 3** # In a study of high school football players that suffered concussions, researchers # placed the players in two groups. Players that recovered from their concussions # in 14 days or less were placed in Group 1. Those that took more than 14 days # were placed in Group 2. The recovery times (in days) for Group 1 are listed # below. Find the sample variance and standard deviation of the recovery times # + from math import sqrt times = [4, 7, 6, 7, 9, 5, 8, 10, 9, 8, 7, 10] n = len(times) mean = sum(times) / n variance = sum([(time - mean) ** 2 for time in times]) / (n - 1) std = sqrt(variance) print('sample variance =', round(variance, 3)) print('sample standard deviation =', round(std, 3), end='\n\n') from statistics import variance, stdev variance = variance(times) std = stdev(times) print('sample variance with statistics =', round(variance, 3)) print('sample standard deviation with statistics =', round(std, 3)) # - # ## 5.1.- Introduction to Normal Distributions and the Standard Normal Distribution # The normal distribution with a mean of 0 and a standard deviation of 1 is called the **standard normal distribution**. # ## 5.4- Sampling Distributions and the Central Limit Theorem # The mean of the sample means $\mu_\bar{x}$ is equal to the population mean $\mu$. # # $\mu_\bar{x} = \mu$ # # The standard deviation of the sample means $\sigma_{\bar{x}}$ is equal to the population # standard deviation $\sigma$ divided by the square root of the sample size $n$. # # $\sigma_{\bar{x}} = \frac{\sigma}{\sqrt{n}}$ # # is called the **standard error of the mean**. # ### **Example 1** # You write the population values {1, 3, 5, 7} on slips of paper and put them in # a box. Then you randomly choose two slips of paper, with replacement. List all # possible samples of size $n$ = 2 and calculate the mean of each. These means # form the sampling distribution of the sample means. Find the mean, variance, # and standard deviation of the sample means. Compare your results with the # mean $\mu$ = 4, variance $\sigma^2$ = 5, and standard deviation $\sigma = \sqrt{5} \approx 2.236$ of # the population. # + from statistics import mean, pvariance, pstdev from math import sqrt values = [1, 3, 5, 7] pmean = mean(values) pvar = pvariance(values) pstd = pstdev(values) print('population mean =', round(pmean, 3)) print('population variance =', round(pvar, 3)) print('population standard deviation =', round(pstd, 3), end='\n\n') from itertools import product n = 2 sample_means = [mean(sample) for sample in product(values, repeat=n)] pmean_xbar = mean(sample_means) pstd_xbar = pstdev(sample_means) print('mean of the sample means =', round(pmean_xbar, 3)) print('standard deviation of the sample means =', round(pstd_xbar, 3)) print('pstd/sqrt(n) = ', round(pstd/sqrt(n), 3)) # - # ### The Central limit theorem # If samples of size $n$, where $n \ge 30$, are drawn from any population with # a mean $\mu$ and a standard deviation $\sigma$, then the sampling distribution of # sample means approximates a normal distribution. The greater the sample # size, the better the approximation # ## 6.1.- Confidence Intervals for the Mean ($\sigma$ Known) # A **point estimate** is a single value estimate for a population parameter. The most unbiased point estimate of the population mean $\mu$ is the sample mean $\bar{x}$. # ### **Example 1** # ### Finding a Point Estimate # An economics researcher is collecting data about grocery store employees in a county. The data listed below represents a random sample of the number of hours worked by 40 employees from several grocery stores in the county. Find a point estimate of the population mean $\mu$. # ### Solution # The sample mean of the data is # $\bar{x} = \frac{\Sigma x}{n} = \frac{1184}{40} = 29.6$ # + data = [30, 26, 33, 26, 26, 33, 31, 31, 21, 37, 27, 20, 34, 35, 30, 24, 38, 34, 39, 31, 22, 30, 23, 23, 31, 44, 31, 33, 33, 26, 27, 28, 25, 35, 23, 32, 29, 31, 25, 27] n = len(data) smean = sum(data) / n print('sample mean: ', smean) # - # So, the point estimate for the mean number of hours worked by grocery store employees in this county is 29.6 hours. # In Example 1, the probability that the population mean is exactly 29.6 is virtually zero. So, instead of estimating $\mu$ to be exactly 29.6 using a point estimate, you can estimate that $\mu$ lies in an interval. This is called making an interval estimate. # An **interval estimate** is an interval, or range of values, used to estimate a population parameter. # The **level of confidence** $c$ is the probability that the interval estimate contains the population parameter, assuming that the estimation process is repeated a large number of times. # The level of confidence $c$ is the area under the standard normal curve between the `critical values`, $-z_c$ and $z_c$. # # **Critical values** are values that separate sample statistics that are probable from sample statistics that are improbable, or unusual. # # * $c$ is the percent of the area under the normal curve between $-z_c$ and $z_c$. The area remaining is $1 - c$, so the area in each tail is $\frac{1}{2}(1 - c)$. # # * if $c = 90\%$, then $5\%$ of the area lies to the left of $-z_c = -1.645$ and $5\%$ lies to the right of $z_c = 1.645$. # + import numpy as np import matplotlib.pyplot as plt from scipy.stats import norm lim = 3.49 x = np.linspace(-lim, lim, 101) y = norm.pdf(x) # Probability density function. c = 0.90 area_one_tail = 0.5 * (1 - c) lz_c = norm.ppf(area_one_tail) # Critical value separating left tail rz_c = norm.ppf(c + area_one_tail) # Critical value separating right tail plt.plot(x, y) plt.plot([lz_c, lz_c], [norm.pdf(lim), norm.pdf(lz_c)]) plt.plot([rz_c, rz_c], [norm.pdf(lim), norm.pdf(rz_c)]) plt.show() print('lz_c:', round(lz_c, 3)) print('rz_c:', round(rz_c, 3)) # - # ## 6.2.- Confidence Intervals for the Mean ($\sigma$ Unknown) # The **degrees of freedom** are the number of free choices left after a sample # statistic such as $\bar{x}$ is calculated. When you use a $t$-distribution to estimate # a population mean, the degrees of freedom are equal to one less than the # sample size. # # d.f. = $n - 1$ # # As the degrees of freedom increase, the $t$-distribution approaches the # standard normal distribution. After 30 d.f., the # $t$-distribution is close to the standard normal distribution. # ## 8.1.- Testing the Difference Between Means (Independent Samples,$\sigma_1$ and $\sigma_2$ Known) # ### **Example 2** # A credit card watchdog group claims that there is a difference in the mean credit card debts of households in California and Illinois. The results of # a random survey of 250 households from each state are shown at the left. # The two samples are independent. Assume that $\sigma_{1}$ = \\$1045 for California and $\sigma_{2}$ = \\$1350 for Illinois. Do the results support the group’s claim? Use # $\alpha$ = 0.05. # #### Solution # **Claim:** "there is a difference in the mean credit card debts of households in California and Illinois." # $H_{0}$: $\mu_{1} = \mu_{2}$ and $H_{a}$: $\mu_{1} \neq \mu_{2}$. (Claim) # + from scipy.stats import norm from math import sqrt # California smean1 = 4777 sstd1 = 1045 n1 = 250 # Illinois smean2 = 4866 sstd2 = 1350 n2 = 250 alpha = 0.05 area_one_tail = 0.5 * alpha z0 = norm.ppf((1 - alpha) + area_one_tail) z = (smean1 - smean2) / sqrt(sstd1 ** 2/ n1 + sstd2 ** 2/ n2) print('The rejection regions are: z <', round(-z0, 3), 'and z >', round(z0, 3)) print('z:', round(z, 3)) # - # Because $z$ is not in the rejection region, you fail to reject the null hypothesis. # Fail to reject $H_{0}$ # **Interpretation** There is not enough evidence at the 5% level of significance # to support the group’s claim that there is a difference in the mean credit card # debts of households in California and Illinois. # ### **Try It Yourself 2** # A survey indicates that the mean annual wages for forensic science technicians working for local and state governments are \\$55,950 and \\$51,100, respectively. The survey includes a randomly selected sample of size 100 from each government branch. Assume that the population standard deviations are \\$6200 (local) and \\$5575 (state). The two samples are independent. At $\alpha$ = 0.10, is there enough evidence to conclude that there is a difference in the mean annual wages? # #### Solution # **Claim:** "there is a difference in the mean annual wages for forensic science technicians working for local and state governments" # $H_{0}$: $\mu_{1} = \mu_{2}$ and $H_{a}$: $\mu_{1} \neq \mu_{2}$. (Claim) # + from scipy.stats import norm from math import sqrt # local smean1 = 55950 sstd1 = 6200 n1 = 100 # state smean2 = 51100 sstd2 = 5575 n2 = 100 alpha = 0.10 area_one_tail = 0.5 * alpha z0 = norm.ppf((1 - alpha) + area_one_tail) z = (smean1 - smean2) / sqrt(sstd1 ** 2/ n1 + sstd2 ** 2/ n2) print('The rejection regions are: z <', round(-z0, 3), 'and z >', round(z0, 3)) print('z:', round(z, 3)) # - # Because $z$ is # in the rejection region, you reject the null hypothesis. # Reject $H_{0}$ # **Interpretation** There is enough evidence at the 10% level of significance to support the claim that there is a difference in the mean annual wages for forensic science technicians working for local and state governments. # ### **Example 3** # A travel agency claims that the average daily cost of meals and lodging for vacationing in Texas is less than the average daily cost in Virginia. The table at the left shows the results of a random survey of vacationers in each state. The two samples are independent. Assume that $\sigma_{1}$ = \\$19 for Texas and $\sigma_{2}$ = \\$24 for Virginia, and that both populations are normally distributed. At $\alpha$ = 0.01, is there enough evidence to support the claim? # #### Solution # **Claim:** "The average daily cost of meals and lodging for vacationing in Texas is less than the average daily cost in Virginia" # $H_{0}$: $\mu_{1} \ge \mu_{2}$ and $H_{a}$: $\mu_{1} < \mu_{2}$. (Claim) # + from scipy.stats import norm from math import sqrt # Texas smean1 = 234 sstd1 = 19 n1 = 25 # Virginia smean2 = 240 sstd2 = 24 n2 = 20 alpha = 0.01 z0 = norm.ppf(alpha) z = (smean1 - smean2) / sqrt(sstd1 ** 2/ n1 + sstd2 ** 2/ n2) print('The rejection region is: z <', round(z0, 3)) print('z:', round(z, 3)) # - # Because $z$ is not in the rejection region, you fail to reject the null hypothesis. # Fail to reject $H_{0}$ # **Interpretation** There is not enough evidence at the 1% level of significance to support the travel agency’s claim. # ### **Try It Yourself 3** # A travel agency claims that the average daily cost of meals and lodging for vacationing in Alaska is greater than the average daily cost in Colorado. The table at the left shows the results of a random survey of vacationers in each state. The two samples are independent. Assume that $\sigma_{1}$ = \\$24 for Alaska and $\sigma_{2}$ = \\$19 for Colorado, and that both populations are normally distributed. At $\alpha$ = 0.05, is there enough evidence to support the claim? # #### Solution # **Claim:** "The average daily cost of meals and lodging for vacationing in Alaska is greater than the average daily cost in Colorado." # $H_{0}$: $\mu_{1} \le \mu_{2}$ and $H_{a}$: $\mu_{1} > \mu_{2}$. (Claim) # + from scipy.stats import norm from math import sqrt # Alaska smean1 = 296 sstd1 = 24 n1 = 15 # Colorado smean2 = 293 sstd2 = 19 n2 = 20 alpha = 0.05 z0 = norm.ppf(alpha) z = (smean1 - smean2) / sqrt(sstd1 ** 2/ n1 + sstd2 ** 2/ n2) print('The rejection region is: z <', round(z0, 2)) print('z:', round(z, 3)) # - # Because $z$ is not in the rejection region or P > $\alpha$, you fail to reject the null hypothesis. # Fail to reject $H_0$. # **Interpretation** There is not enough evidence at the 5% level of significance to support the travel agency’s claim that the average daily cost of meals and lodging for vacationing in Alaska is greater than the average daily cost in Colorado. # ## 8.2.- Testing the Difference Between Means (Independent Samples,$\sigma_1$ and $\sigma_2$ Unknown) # ### **Example 1** # The results of a state mathematics test for random samples of students taught # by two different teachers at the same school are shown at the left. Can you # conclude that there is a difference in the mean mathematics test scores for # the students of the two teachers? Use $\alpha$ = 0.10. Assume the populations are # normally distributed and the population variances are not equal. # #### Solution # **Claim:** "there is a difference in the mean mathematics test scores for the students of the two teachers." # $H_{0}$: $\mu_{1} = \mu_{2}$ and $H_{a}$: $\mu_{1} \neq \mu_{2}$. (Claim) # + from scipy.stats import t from math import sqrt # Teacher 1 smean1 = 473 sstd1 = 39.7 n1 = 8 # Teacher 2 smean2 = 459 sstd2 = 24.5 n2 = 18 alpha = 0.10 area_one_tail = 0.5 * alpha df = min(n1, n2) - 1 t0 = t.ppf(1 - alpha + area_one_tail, df) t = (smean1 - smean2) / sqrt(sstd1 ** 2/ n1 + sstd2 ** 2/ n2) print('The rejection regions are: t <', round(-t0, 3), 'and t >', round(t0, 3)) print('t:', round(t, 3)) # - # Because $t$ is not in the rejection region, you fail to reject the null hypothesis. # Fail to reject $H_{0}$ # **Interpretation** There is not enough evidence at the 10% level of significance # to support the claim that the mean mathematics test scores for the students of # the two teachers are different. # ### **Try It Yourself 1** # The annual earnings of 19 people with a high school diploma and 16 people # with an associate’s degree are shown at the left. Can you conclude that there # is a difference in the mean annual earnings based on level of education? Use # $\alpha$ = 0.01. Assume the populations are normally distributed and the population # variances are not equal. # #### Solution # **Claim:** "there is a difference in mean annual earnings # based on level of education." # $H_{0}$: $\mu_{1} = \mu_{2}$ and $H_{a}$: $\mu_{1} \neq \mu_{2}$. (Claim) # + from scipy.stats import t from math import sqrt # High school diploma smean1 = 32493 sstd1 = 3118 n1 = 19 # Associate’s degree smean2 = 40907 sstd2 = 6162 n2 = 16 alpha = 0.01 area_one_tail = 0.5 * alpha df = min(n1, n2) - 1 t0 = t.ppf(1 - alpha + area_one_tail, df) t = (smean1 - smean2) / sqrt(sstd1 ** 2/ n1 + sstd2 ** 2/ n2) print('The rejection regions are: t <', round(-t0, 3), 'and t >', round(t0, 3)) print('t:', round(t, 3)) # - # Because $t$ is # in the rejection region, you reject the null hypothesis. # Reject $H_{0}$ # **Interpretation** There is enough evidence at the 1% level of significance # to support the claim that there is a difference in the mean # annual earnings based on level of education. # ### **Example 2** # A manufacturer claims that the mean operating cost per mile of its sedans # is less than that of its leading competitor. You conduct a study using # 30 randomly selected sedans from the manufacturer and 32 from the leading # competitor. The results are shown at the left. At $\alpha$ = 0.05, can you support the # manufacturer’s claim? Assume the population variances are equal. # #### Solution # **Claim:** "The average daily cost of meals and lodging for vacationing in Texas is less than the average daily cost in Virginia" # $H_{0}$: $\mu_{1} \ge \mu_{2}$ and $H_{a}$: $\mu_{1} < \mu_{2}$. (Claim) # + from scipy.stats import t from math import sqrt # Manufacturer smean1 = 0.52 sstd1 = 0.05 n1 = 30 # Competitor smean2 = 0.55 sstd2 = 0.07 n2 = 32 alpha = 0.05 df = n1 + n2 - 2 t0 = t.ppf(alpha, df) t = (smean1 - smean2) / (sqrt(((n1 - 1) * sstd1 ** 2 + (n2 -1) * sstd2 ** 2)/ df) * sqrt(1/n1 + 1/n2)) print('The rejection region is: t <', round(t0, 3)) print('t:', round(t, 3)) # - # Because $t$ is in the rejection region, you reject the null hypothesis. # Reject $H_{0}$ # **Interpretation** There is enough evidence at the 5% level of significance to # support the manufacturer’s claim that the mean operating cost per mile of its # sedans is less than that of its competitor’s. # ### **Try It Yourself 2** # A manufacturer claims that the mean operating cost per mile of its minivans # is less than that of its leading competitor. You conduct a study using # 34 randomly selected minivans from the manufacturer and 38 from the leading # competitor. The results are shown at the left. At $\alpha$ = 0.10, can you support the # manufacturer’s claim? Assume the population variances are equal. # #### Solution # **Claim:** "the mean operating cost per mile of a # manufacturer’s minivans is less than that of its leading # competitor." # $H_{0}$: $\mu_{1} \ge \mu_{2}$ and $H_{a}$: $\mu_{1} < \mu_{2}$. (Claim) # + from scipy.stats import t from math import sqrt # Manufacturer smean1 = 0.56 sstd1 = 0.08 n1 = 34 # Competitor smean2 = 0.58 sstd2 = 0.07 n2 = 38 alpha = 0.10 df = n1 + n2 - 2 t0 = t.ppf(alpha, df) t = (smean1 - smean2) / (sqrt(((n1 - 1) * sstd1 ** 2 + (n2 -1) * sstd2 ** 2)/ df) * sqrt(1/n1 + 1/n2)) print('The rejection region is: t <', round(t0, 3)) print('t:', round(t, 3)) # - # Because $t$ is not in the rejection region, you fail to reject the null hypothesis. # Fail to reject $H_{0}$ # **Interpretation** There is not enough evidence at the 10% level of significance # to support the manufacturer’s claim that the mean operating # cost per mile of its minivans is less than that of its leading # competitor.
hypothesis-testing-larson.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !ls # !ls nih # + # ai4m_c1_w1_export.py import glob, zipfile, os, os.path def get_filepaths(): notebooks_path = os.path.join('.', "*.ipynb") notebooks = glob.glob(notebooks_path) sources_path = os.path.join('.', '*.py') sources_path = os.path.join('.', '*.png') sources = glob.glob(sources_path) # NAA. No datasets. datasets_path = os.path.join('.', 'nih/*.csv') datasets_path = os.path.join('.', 'nih/*.h5') datasets_path = os.path.join('.', 'nih/*.hdf5') datasets = glob.glob(datasets_path) images_path = os.path.join('.', 'nih/images-small', '*.*') images = glob.glob(images_path) #print (type(datasets)) #print("datasets: " + str(datasets)) #print("images: " + str(images)) filepaths = notebooks + sources + datasets + images if (debug): print("filepaths: " + str(filepaths)) return filepaths def export_files(zipfilename): filepaths = get_filepaths() if (debug): with zipfile.ZipFile(zipfilename, 'w') as myzip: for f in filepaths: print("f: " + f) myzip.write(f) def check_export_file(zipfilename): if (os.path.isfile(zipfilename)): print("Export successful.") # main zipfilename = os.path.join(".", "ai4m_c1_w1_export.zip") debug=True export_files(zipfilename) check_export_file(zipfilename) # - # !ls *.zip
week1/AI4M_C1_W1_export.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="67gUU740MSXd" colab_type="text" # # Tutorial # # This tutorial will teach the basics of how to use cirq. This tutorial will walk through how to use qubits, gates, and operations to create and simulate your first quantum circuit using cirq. It will briefly introduce devices, unitary matrices, decompositions, and optimizers. # # Note that this tutorial isn’t a quantum computing 101 tutorial, we assume familiarity of quantum computing at about the level of the textbook “Quantum Computation and Quantum Information” by <NAME> Chuang. # # For more in-depth examples closer to those found in current work, check out our tutorials page. # + [markdown] id="1dOjJlgrNUuz" colab_type="text" # To begin, please follow the instructions for [installing Cirq](install.md). # + id="zVJYIBGjL8TT" colab_type="code" colab={} # !pip install cirq --quiet # + [markdown] id="xr-MMoXgNsUQ" colab_type="text" # ## Qubits # # The first part of creating a quantum circuit is to define a set of qubits (also known as a quantum registers) to act on. # # Cirq has three main ways of defining qubits: # # * `cirq.NamedQubit`: used to label qubits by an abstract name # * `cirq.LineQubit`: qubits labelled by number in a linear array # * `cirq.GridQubit`: qubits labelled by two numbers in a rectangular lattice. # # Here are some examples of defining each type of qubit. # + id="PsgSo-H0Os8X" colab_type="code" colab={} import cirq # Using named qubits can be useful for abstract algorithms # as well as algorithms not yet mapped onto hardware. q0 = cirq.NamedQubit('source') q1 = cirq.NamedQubit('target') # Line qubits can be created individually q3 = cirq.LineQubit(3) # Or created in a range # This will create LineQubit(0), LineQubit(1), LineQubit(2) q0, q1, q2 = cirq.LineQubit.range(3) # Grid Qubits can also be referenced individually q4_5 = cirq.GridQubit(4,5) # Or created in bulk in a square # This will create 16 qubits from (0,0) to (3,3) qubits = cirq.GridQubit.square(4) # + [markdown] id="4zE6AutyQhQ6" colab_type="text" # There are also pre-packaged sets of qubits called [Devices](devices.md). These are qubits along with a set of rules of how they can be used. A `cirq.Device` can be used to apply adjacency rules and other hardware constraints to a quantum circuit. For our example, we will use the `cirq.google.Foxtail` device that comes with cirq. It is a 2x11 grid that mimics early hardware released by Google. # + id="B0Dwgu-lQLpq" colab_type="code" outputId="8ddb536a-86d6-40bf-a98c-f4fc77424bc6" colab={"base_uri": "https://localhost:8080/", "height": 85} print(cirq.google.Foxtail) # + [markdown] id="j1QTjyxLSe5c" colab_type="text" # ## Gates and Operations # # The next step is to use the qubits to create operations that can be used in our circuit. Cirq has two concepts that are important to understand here: # # * A `Gate` is an effect that can be applied to a set of qubits. # * An `Operation` is a gate applied to a set of qubits. # # For instance, `cirq.H` is the quantum [Hadamard](https://en.wikipedia.org/wiki/Quantum_logic_gate#Hadamard_(H)_gate) and is a `Gate` object. `cirq.H(cirq.LineQubit(1))` is an `Operation` object and is the Hadamard gate applied to a specific qubit (line qubit number 1). # # Many textbook gates are included within cirq. `cirq.X`, `cirq.Y`, and `cirq.Z` refer to the single-qubit Pauli gates. `cirq.CZ`, `cirq.CNOT`, `cirq.SWAP` are a few of the common two-qubit gates. `cirq.measure` is a macro to apply a `MeasurementGate` to a set of qubits. You can find more, as well as instructions on how to creats your own custom gates, on the [Gates documentation](gates.ipynb) page. # # Many arithmetic operations can also be applied to gates. Here are some examples: # + id="wDW-yU-fesDl" colab_type="code" colab={} # Example gates not_gate = cirq.CNOT pauli_z = cirq.Z # Using exponentiation to get square root gates sqrt_x_gate = cirq.X**0.5 sqrt_iswap = cirq.ISWAP**0.5 # Some gates can also take parameters sqrt_sqrt_y = cirq.YPowGate(exponent=0.25) # Example operations q0, q1 = cirq.LineQubit.range(2) z_op = cirq.Z(q0) not_op = cirq.CNOT(q0, q1) sqrt_iswap_op = sqrt_iswap(q0, q1) # + [markdown] id="BnBGLMVvWVkz" colab_type="text" # ## Circuits and Moments # # We are now ready to construct a quantum circuit. A `Circuit` is a collection of `Moment`s. A `Moment` is a collection of `Operation`s that all act during the same abstract time slice. Each `Operation` must have a disjoint set of qubits from the other `Operation`s in the `Moment`. A `Moment` can be thought of as a vertical slice of a quantum circuit diagram. # # Circuits can be constructed in several different ways. By default, cirq will attempt to slide your operation into the earliest possible `Moment` when you insert it. # # + id="HEuqEZcXkz3Q" colab_type="code" outputId="dbb11050-0c9b-4356-a1ef-bb8014e4a695" colab={"base_uri": "https://localhost:8080/", "height": 102} circuit = cirq.Circuit() # You can create a circuit by appending to it circuit.append(cirq.H(q) for q in cirq.LineQubit.range(3)) # All of the gates are put into the same Moment since none overlap print(circuit) # + id="Lbez4guQl31P" colab_type="code" outputId="d1d7b32c-98cb-4881-a19c-92aeeb15a16b" colab={"base_uri": "https://localhost:8080/", "height": 136} # We can also create a circuit directly as well: print(cirq.Circuit(cirq.SWAP(q, q+1) for q in cirq.LineQubit.range(3))) # + [markdown] id="3FC9bdlXmShh" colab_type="text" # Sometimes, you may not want cirq to automatically shift operations all the way to the left. To construct a circuit without doing this, you can create the circuit moment-by-moment or use a different `InsertStrategy`, explained more in the [Circuit documentation](circuits.ipynb). # + id="4AEahodTnYiI" colab_type="code" outputId="5f878981-1600-45fd-a2ec-7f3f265a5a03" colab={"base_uri": "https://localhost:8080/", "height": 102} # Creates each gate in a separate moment. print(cirq.Circuit(cirq.Moment([cirq.H(q)]) for q in cirq.LineQubit.range(3))) # + [markdown] id="j406AKYsobpq" colab_type="text" # ### Circuits and Devices # # One important comnsideration when using real quantum devices is that there are often hardware constraints on the circuit. Creating a circuit with a `Device` will allow you to capture some of these requirements. These `Device` objects will validate the operations you add to the circuit to make sure that no illegal operations are added. # # Let's look at an example using the Foxtail device. # + id="9UV-dXJOpy8B" colab_type="code" outputId="0e162827-2c42-4ade-e221-d2fdca93dcb6" colab={"base_uri": "https://localhost:8080/", "height": 170} q0 = cirq.GridQubit(0, 0) q1 = cirq.GridQubit(0, 1) q2 = cirq.GridQubit(0, 2) adjacent_op = cirq.CZ(q0, q1) nonadjacent_op = cirq.CZ(q0, q2) # This is an unconstrained circuit with no device free_circuit = cirq.Circuit() # Both operations are allowed: free_circuit.append(adjacent_op) free_circuit.append(nonadjacent_op) print('Unconstrained device:') print(free_circuit) print() # This is a circuit on the Foxtail device # only adjacent operations are allowed. print('Foxtail device:') foxtail_circuit = cirq.Circuit(device=cirq.google.Foxtail) foxtail_circuit.append(adjacent_op) try: # Not allowed, will throw exception foxtail_circuit.append(nonadjacent_op) except ValueError as e: print('Not allowed. %s' % e) # + [markdown] id="xZ68bWEjoMKt" colab_type="text" # ## Simulation # # The results of the application of a quantum circuit can be calculated by a `Simulator`. Cirq comes bundled with a simulator that can calculate the results of circuits up to about a limit of 20 qubits. It can be initialized with `cirq.Simulator()`. # # There are two different approaches to using a simulator: # # * `simulate()`: Since we are classically simulating a circuit, a simulator can directly access and view the resulting wave function. This is useful for debugging, learning, and understanding how circuits will function. # * `run()`: When using actual quantum devices, we can only access the end result of a computation and must sample the results to get a distribution of results. Running the simulator as a sampler mimics this behavior and only returns bit strings as output. # # Let's try to simulate a 2-qubit "Bell State": # + id="AwC4SL6CHpXm" colab_type="code" outputId="5339deec-7cad-4320-814e-b5dc6ae6eef9" colab={"base_uri": "https://localhost:8080/", "height": 119} # Create a circuit to generate a Bell State: # sqrt(2) * ( |00> + |11> ) bell_circuit = cirq.Circuit() q0, q1 = cirq.LineQubit.range(2) bell_circuit.append(cirq.H(q0)) bell_circuit.append(cirq.CNOT(q0,q1)) # Initialize Simulator s=cirq.Simulator() print('Simulate the circuit:') results=s.simulate(bell_circuit) print(results) print() # For sampling, we need to add a measurement at the end bell_circuit.append(cirq.measure(q0, q1, key='result')) print('Sample the circuit:') samples=s.run(bell_circuit, repetitions=1000) # Print a histogram of results print(samples.histogram(key='result')) # + [markdown] id="06Q_7vlQSu4Z" colab_type="text" # ### Using parameter sweeps # # Cirq circuits allow for gates to have symbols as free parameters within the circuit. This is especially useful for variational algorithms, which vary parameters within the circuit in order to optimize a cost function, but it can be useful in a variety of circumstances. # # For parameters, cirq uses the library `sympy` to add `sympy.Symbol` as parameters to gates and operations. # # Once the circuit is complete, you can fill in the possible values of each of these parameters with a `Sweep`. There are several possibilities that can be used as a sweep: # # * `cirq.Points`: A list of manually specified values for one specific symbol as a sequence of floats # * `cirq.Linspace`: A linear sweep from a starting value to an ending value. # * `cirq.ListSweep`: A list of manually specified values for several different symbols, specified as a list of dictionaries. # * `cirq.Zip` and `cirq.Product`: Sweeps can be combined list-wise by zipping them together or through their Cartesian product. # # A parameterized circuit and sweep together can be run using the simulator or other sampler by changing `run()` to `run_sweep()` and adding the sweep as a parameter. # # Here is an example of sweeping an exponent of a X gate: # + id="ElyizofbLGq9" colab_type="code" outputId="860c31c6-0319-4b8e-d3fa-01f6848c3726" colab={"base_uri": "https://localhost:8080/", "height": 282} import matplotlib.pyplot as plt import sympy # Perform an X gate with variable exponent q = cirq.GridQubit(1,1) circuit = cirq.Circuit(cirq.X(q) ** sympy.Symbol('t'), cirq.measure(q, key='m')) # Sweep exponent from zero (off) to one (on) and back to two (off) param_sweep = cirq.Linspace('t', start=0, stop=2, length=200) # Simulate the sweep s = cirq.Simulator() trials = s.run_sweep(circuit, param_sweep, repetitions=1000) # Plot all the results x_data = [trial.params['t'] for trial in trials] y_data = [trial.histogram(key='m')[1] / 1000.0 for trial in trials] plt.scatter('t','p', data={'t': x_data, 'p': y_data}) # + [markdown] id="M8oLYwusz4XE" colab_type="text" # ## Unitary matrices and decompositions # # Most quantum operations have a unitary matrix representation. This matrix can be accessed by applying `cirq.unitary()`. This can be applied to gates, operations, and circuits that support this protocol and will return the unitary matrix that represents the object. # + id="xn9nnBA70s23" colab_type="code" outputId="db800a4f-609e-41aa-ba8d-cdad21fadaed" colab={"base_uri": "https://localhost:8080/", "height": 238} print('Unitary of the X gate') print(cirq.unitary(cirq.X)) print('Unitary of SWAP operator on two qubits.') q0, q1 = cirq.LineQubit.range(2) print(cirq.unitary(cirq.SWAP(q0, q1))) print('Unitary of a sample circuit') print(cirq.unitary(cirq.Circuit(cirq.X(q0), cirq.SWAP(q0, q1)))) # + [markdown] id="Ls6Tnx90Y94Q" colab_type="text" # ### Decompositions # # Many gates can be decomposed into an equivalent circuit with simpler operations and gates. This is called decomposition and can be accomplished with the `cirq.decompose` protocol. # # For instance, a Hadamard H gate can be decomposed into X and Y gates: # + id="u8JwZaAUfbSv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ea971a0b-6265-4f03-8d6e-5220e8a1008c" print(cirq.decompose(cirq.H(cirq.LineQubit(0)))) # + [markdown] id="B8ciZZSSf2jb" colab_type="text" # Another example is the 3-qubit Toffoli gate, which is equivalent to a controlled-controlled-X gate. Many devices do not support a three qubit gate, so it is important # + id="bbjRWlzjgPwf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="eeed11be-7865-4f4d-f74d-60a2d050c1e3" q0, q1, q2 = cirq.LineQubit.range(3) print(cirq.Circuit(cirq.decompose(cirq.TOFFOLI(q0, q1, q2)))) # + [markdown] id="VWcik4ZwggXj" colab_type="text" # The above decomposes the Toffoli into a simpler set of one-qubit gates and CZ gates at the cost of lengthening the circuit considerably. # # Some devices will automatically decompose gates that they do not support. For instance, if we use the `Foxtail` device from above, we can see this in action by adding an unsupported SWAP gate: # + id="oS7vWnuHjLhE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="2a50461b-374d-4a49-9145-31868314d05a" swap = cirq.SWAP(cirq.GridQubit(0, 0), cirq.GridQubit(0, 1)) print(cirq.Circuit(swap, device=cirq.google.Foxtail)) # + [markdown] id="rIUbvdVQkHbX" colab_type="text" # ### Optimizers # # The last concept in this tutorial is the optimizer. An optimizer can take a circuit and modify it. Usually, this will entail combining or modifying operations to make it more efficient and shorter, though an optimizer can, in theory, do any sort of circuit manipulation. # # For example, the `MergeSingleQubitGates` optimizer will take consecutive single-qubit operations and merge them into a single `PhasedXZ` operation. # + id="5WvfOdaG5C_6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="f06c8c8c-c597-4beb-edd9-badea86efc9d" q=cirq.GridQubit(1, 1) optimizer=cirq.MergeSingleQubitGates() c=cirq.Circuit(cirq.X(q) ** 0.25, cirq.Y(q) ** 0.25, cirq.Z(q) ** 0.25) print(c) optimizer.optimize_circuit(c) print(c) # + [markdown] id="xRfQqzdx7lUI" colab_type="text" # Other optimizers can assist in transforming a circuit into operations that are native operations on specific hardware devices. You can find more about optimizers and how to create your own elsewhere in the documentation. # + [markdown] id="8QbTGmKlYT4i" colab_type="text" # ## Next steps # # After completing this tutorial, you should be able to use gates and operations to construct your own quantum circuits, simulate them, and to use sweeps. It should give you a brief idea of the commonly used # # There is much more to learn and try out for those who are interested: # # * Learn about the variety of [Gates](gates.ipynb) available in cirq and more about the different ways to construct [Circuits](circuits.ipynb) # * Learn more about [Simulations](simulation.ipynb) and how it works. # * Learn about [Noise](noise.ipynb) and how to utilize multi-level systems using [Qudits](qudits.ipynb) # * Dive into some [Examples](examples.md) and some in-depth tutorials of how to use cirq. # # Also, join our [cirq-announce mailing list](https://groups.google.com/forum/#!forum/cirq-announce) to hear about changes and releases or go to the [cirq github](https://github.com/quantumlib/Cirq/) to file issues.
docs/tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Свойство 1 import numpy as np A = np.matrix("1 2 3; 4 5 6") print(A) R = (A.T).T print(R) # Свойство 2 A = np.matrix("1 2 3; 4 5 6") B = np.matrix("7 8 9; 0 7 5") L = (A + B).T R = A.T + B.T print(L) print(R) # Свойство 3 A = np.matrix("1 2; 3 4") B = np.matrix("5 6; 7 8") L = (A.dot(B)).T R = (B.T).dot(A.T) print(L) print(R) # Свойство 4 A = np.matrix("1 2 3; 4 5 6") k = 3 L = (k * A).T R = k * (A.T) print(L) print(R) # Свойство 5 A = np.matrix("1 2; 3 4") A_det = np.linalg.det(A) A_T_det = np.linalg.det(A.T) print(format(A_det, ".9g")) print(format(A_T_det, ".9g")) # Действия над матрицами # Свойство 1 A = np.matrix("1 2; 3 4") L = 1*A R = A print(L) print(R) # Свойство 2 A = np.matrix("1 2; 3 4") Z = np.matrix("0 0; 0 0") L = 0*A R = Z print(L) print(R) # Свойство 3 A = np.matrix("1 2; 3 4") p = 2 q = 3 L = (p + q) * A R = p * A + q * A print(L) print(R) # Свойство 4 A = np.matrix("1 2; 3 4") p = 2 q = 3 L = (p*q) * A R = p * (q * A) print(L) print(R) # Свойство 5 A = np.matrix("1 2; 3 4") B = np.matrix("5 6; 7 8") k = 3 L = k * (A + B) R = k * A + k * B print(L) print(R) # Сложение матриц # Свойство 1 A = np.matrix("1 6 3; 8 2 7") B = np.matrix(" 8 1 5; 6 9 12") C = A + B print(C) # Свойство 2 A = np.matrix("1 2; 3 4") B = np.matrix("5 6; 7 8") C = np.matrix("1 7; 9 3") L = A + (B + C) R = (A + B) + C print(L) print(R) # Свойство 3 A = np.matrix("1 2; 3 4") Z = np.matrix("0 0; 0 0") L = A + (-1)*A print(L) print(Z) # Умножение матриц # Свойство 1 # Свойство 2 # Свойство 3 # Свойство 4 # Свойство 5
1pic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import lazypredict import sys import numpy as np np.set_printoptions(threshold=sys.maxsize) #Read data file import pandas as pd filepath = "balanced_dataset_MASTER.csv" df = pd.read_csv(filepath) features = df # Labels are the values we want to predict labels = np.array(df['protection_level']) # 0 => unprotected # 1 => autoconfirmed # 2 => extendedconfirmed # 3 => sysop labels_encoded = [] for item in labels: if(item =="unprotected"): labels_encoded.append(0) elif(item == "autoconfirmed"): labels_encoded.append(1) elif(item == "extendedconfirmed"): labels_encoded.append(2) elif(item == "sysop"): labels_encoded.append(3) labels_encoded # Remove the labels from the features # list_drop_columns = ['protection_level', 'page_title', 'protection_expiry', 'number_page_watchers', # 'number_page_watchers_recent_edits', 'page_views_past_30days'] features = features.drop('protection_level', axis = 1) features = features.drop('page_title', axis = 1) features = features.drop('protection_expiry', axis = 1) # features = features.drop('page_id', axis = 1) # features = features.drop(list_drop_columns, axis=1) # Replace NaN features = features.replace('Fewer than 30 watchers',np.NaN) features = features.replace('There may or may not be a watching user visiting recent edits',np.NaN) #Convert cols to Float features['page_length'] = features['page_length'].astype(float) features['total_edits'] = features['total_edits'].astype(float) features['number_page_watchers'] = features['number_page_watchers'].astype(float) features['number_page_watchers_recent_edits'] = features['number_page_watchers_recent_edits'].astype(float) # Saving feature names for later use feature_list = list(features.columns) # Convert to numpy array features = np.array(features) from sklearn.model_selection import train_test_split # Split the data into training and testing sets train_features, test_features, train_labels, test_labels = train_test_split(features, labels_encoded, test_size =0.20, random_state = 53) X_train = train_features y_train = train_labels X_test = test_features y_test = test_labels from lazypredict.Supervised import LazyClassifier clf = LazyClassifier(verbose=0,ignore_warnings=True, custom_metric=None) models,predictions = clf.fit(X_train, X_test, y_train, y_test) print(models) # - df y = np.array([1, 2, 3, 4]) y.shape
dataset/trial_1200/removing columns from correlation matrix.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={"duration": 0.019399, "end_time": "2021-04-23T11:49:09.423108", "exception": false, "start_time": "2021-04-23T11:49:09.403709", "status": "completed"} tags=[] # # Parcels Experiment:<br><br>Expanding the polyline code to release particles at density based on local velocity normal to section. # # _(Based on an experiment originally designed by <NAME>.)_ # # _(Runs on GEOMAR Jupyter Server at https://schulung3.geomar.de/user/workshop007/lab)_ # + [markdown] papermill={"duration": 0.018312, "end_time": "2021-04-23T11:49:09.459866", "exception": false, "start_time": "2021-04-23T11:49:09.441554", "status": "completed"} tags=[] # ## To do # # - Check/ask how OceanParcels deals with partial cells, if it does. # - It doesn't. Does it matter? # + [markdown] papermill={"duration": 0.018314, "end_time": "2021-04-23T11:49:09.498275", "exception": false, "start_time": "2021-04-23T11:49:09.479961", "status": "completed"} tags=[] # ## Technical preamble # + papermill={"duration": 3.682704, "end_time": "2021-04-23T11:49:13.199063", "exception": false, "start_time": "2021-04-23T11:49:09.516359", "status": "completed"} tags=[] # %matplotlib inline from parcels import ( AdvectionRK4_3D, ErrorCode, FieldSet, JITParticle, ParticleSet, Variable ) # from operator import attrgetter from datetime import datetime, timedelta import numpy as np from pathlib import Path import matplotlib.pyplot as plt import cmocean as co import pandas as pd import xarray as xr # import dask as dask # + [markdown] papermill={"duration": 0.018501, "end_time": "2021-04-23T11:49:13.240761", "exception": false, "start_time": "2021-04-23T11:49:13.222260", "status": "completed"} tags=[] # ## Experiment settings (user input) # + [markdown] papermill={"duration": 0.01829, "end_time": "2021-04-23T11:49:13.277623", "exception": false, "start_time": "2021-04-23T11:49:13.259333", "status": "completed"} tags=[] # ### Parameters # These can be set in papermill # + papermill={"duration": 0.024623, "end_time": "2021-04-23T11:49:13.320732", "exception": false, "start_time": "2021-04-23T11:49:13.296109", "status": "completed"} tags=["parameters"] # OSNAP multiline details sectionPathname = '../data/external/' sectionFilename = 'osnap_pos_wp.txt' sectionname = 'osnap' # location of input data path_name = '/data/iAtlantic/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/' experiment_name = 'VIKING20X.L46-KKG36107B' data_resolution = '1m' w_name_extension = '_repaire_depthw_time' # location of mask data mask_path_name = '/data/iAtlantic/ocean-only/VIKING20X.L46-KKG36107B/nemo/suppl/' mesh_mask_filename = '1_mesh_mask.nc_notime_depthw' # location of output data outpath_name = '../data/raw/' year_prefix = 201 # this does from 2000 onwards # set line segment to use start_vertex = 4 end_vertex = 12 # experiment duration etc runtime_in_days = 10 dt_in_minutes = -10 # repeatdt = timedelta(days=3) # number of particles to track create_number_particles = 200000 # many will not be ocean points use_number_particles = 200000 min_release_depth = 0 max_release_depth = 1_000 # max current speed for particle selection max_current = 1.0 # set base release date and time t_0_str = '2010-01-16T12:00:00' t_start_str = '2016-01-16T12:00:00' # particle positions are stored every x hours outputdt_in_hours = 120 # select subdomain (to decrease needed resources) comment out to use whole domain # sd_i1, sd_i2 = 0, 2404 # western/eastern limit (indices not coordinates) # sd_j1, sd_j2 = 1200, 2499 # southern/northern limit (indices not coordinates) # sd_z1, sd_z2 = 0, 46 # how to initialize the random number generator # --> is set in next cell # RNG_seed = 123 use_dask_chunks = True # + papermill={"duration": 0.023474, "end_time": "2021-04-23T11:49:13.363113", "exception": false, "start_time": "2021-04-23T11:49:13.339639", "status": "completed"} tags=["injected-parameters"] # Parameters path_name = "/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/" data_resolution = "5d" w_name_extension = "" mask_path_name = "/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/suppl/" mesh_mask_filename = "1_mesh_mask.nc" year_prefix = "" runtime_in_days = 3650 create_number_particles = 4000000 use_number_particles = 4000000 max_release_depth = 1000 max_current = 2.0 t_0_str = "1980-01-03T12:00:00" t_start_str = "2019-12-19T12:00:00" use_dask_chunks = False # + [markdown] papermill={"duration": 0.018519, "end_time": "2021-04-23T11:49:13.400177", "exception": false, "start_time": "2021-04-23T11:49:13.381658", "status": "completed"} tags=[] # ### Derived variables # + papermill={"duration": 0.02774, "end_time": "2021-04-23T11:49:13.446353", "exception": false, "start_time": "2021-04-23T11:49:13.418613", "status": "completed"} tags=[] # times t_0 = datetime.fromisoformat(t_0_str) # using monthly mean fields. Check dates. t_start = datetime.fromisoformat(t_start_str) # RNG seed based on release day (days since 1980-01-03) RNG_seed = int((t_start - t_0).total_seconds() / (60*60*24)) # names of files to load fname_U = f'1_{experiment_name}_{data_resolution}_{year_prefix}*_grid_U.nc' fname_V = f'1_{experiment_name}_{data_resolution}_{year_prefix}*_grid_V.nc' fname_T = f'1_{experiment_name}_{data_resolution}_{year_prefix}*_grid_T.nc' fname_W = f'1_{experiment_name}_{data_resolution}_{year_prefix}*_grid_W.nc{w_name_extension}' sectionPath = Path(sectionPathname) data_path = Path(path_name) mask_path = Path(mask_path_name) outpath = Path(outpath_name) display(t_0) display(t_start) # + papermill={"duration": 0.023929, "end_time": "2021-04-23T11:49:13.489550", "exception": false, "start_time": "2021-04-23T11:49:13.465621", "status": "completed"} tags=[] if dt_in_minutes > 0: direction = '_forwards_' else: direction = '_backward_' year_str = str(t_start.year) month_str = str(t_start.month).zfill(2) day_str = str(t_start.day).zfill(2) days = str(runtime_in_days) seed = str(RNG_seed) npart= str(use_number_particles) # + papermill={"duration": 0.023359, "end_time": "2021-04-23T11:49:13.532127", "exception": false, "start_time": "2021-04-23T11:49:13.508768", "status": "completed"} tags=[] degree2km = 1.852*60.0 # + [markdown] papermill={"duration": 0.018906, "end_time": "2021-04-23T11:49:13.570137", "exception": false, "start_time": "2021-04-23T11:49:13.551231", "status": "completed"} tags=[] # ## Construct input / output paths etc. # + papermill={"duration": 0.023274, "end_time": "2021-04-23T11:49:13.612701", "exception": false, "start_time": "2021-04-23T11:49:13.589427", "status": "completed"} tags=[] mesh_mask = mask_path / mesh_mask_filename # + [markdown] papermill={"duration": 0.019059, "end_time": "2021-04-23T11:49:13.651014", "exception": false, "start_time": "2021-04-23T11:49:13.631955", "status": "completed"} tags=[] # ## Load input datasets # + papermill={"duration": 0.029896, "end_time": "2021-04-23T11:49:13.700257", "exception": false, "start_time": "2021-04-23T11:49:13.670361", "status": "completed"} tags=[] def fieldset_defintions( list_of_filenames_U, list_of_filenames_V, list_of_filenames_W, list_of_filenames_T, mesh_mask ): ds_mask = xr.open_dataset(mesh_mask) filenames = {'U': {'lon': (mesh_mask), 'lat': (mesh_mask), 'depth': list_of_filenames_W[0], 'data': list_of_filenames_U}, 'V': {'lon': (mesh_mask), 'lat': (mesh_mask), 'depth': list_of_filenames_W[0], 'data': list_of_filenames_V}, 'W': {'lon': (mesh_mask), 'lat': (mesh_mask), 'depth': list_of_filenames_W[0], 'data': list_of_filenames_W}, 'T': {'lon': (mesh_mask), 'lat': (mesh_mask), 'depth': list_of_filenames_W[0], 'data': list_of_filenames_T}, 'S': {'lon': (mesh_mask), 'lat': (mesh_mask), 'depth': list_of_filenames_W[0], 'data': list_of_filenames_T}, 'MXL': {'lon': (mesh_mask), 'lat': (mesh_mask), 'data': list_of_filenames_T} } variables = {'U': 'vozocrtx', 'V': 'vomecrty', 'W': 'vovecrtz', 'T': 'votemper', 'S': 'vosaline', 'MXL':'somxl010' } dimensions = {'U': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'}, # needs to be on f-nodes 'V': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'}, # needs to be on f-nodes 'W': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'}, # needs to be on f-nodes 'T': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'}, # needs to be on t-nodes 'S': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'}, # needs to be on t-nodes 'MXL': {'lon': 'glamf', 'lat': 'gphif', 'time': 'time_counter'}, # needs to be on t-nodes } # exclude the two grid cells at the edges of the nest as they contain 0 # and everything south of 20N indices = {'lon': range(2, ds_mask.x.size-2), 'lat': range(1132, ds_mask.y.size-2)} # indices = { # 'U': {'depth': range(sd_z1, sd_z2), 'lon': range(sd_i1, sd_i2), 'lat': range(sd_j1, sd_j2)}, # 'V': {'depth': range(sd_z1, sd_z2), 'lon': range(sd_i1, sd_i2), 'lat': range(sd_j1, sd_j2)}, # 'W': {'depth': range(sd_z1, sd_z2), 'lon': range(sd_i1, sd_i2), 'lat':range(sd_j1, sd_j2)}, # 'T': {'depth': range(sd_z1, sd_z2), 'lon': range(sd_i1, sd_i2), 'lat':range(sd_j1, sd_j2)}, # 'S': {'depth': range(sd_z1, sd_z2), 'lon': range(sd_i1, sd_i2), 'lat':range(sd_j1, sd_j2)} # } if use_dask_chunks: field_chunksizes = {'U': {'lon':('x', 1024), 'lat':('y',128), 'depth': ('depthw', 64), 'time': ('time_counter',3)}, # needs to be on f-nodes 'V': {'lon':('x', 1024), 'lat':('y',128), 'depth': ('depthw', 64), 'time': ('time_counter',3)}, # needs to be on f-nodes 'W': {'lon':('x', 1024), 'lat':('y',128), 'depth': ('depthw', 64), 'time': ('time_counter',3)}, # needs to be on f-nodes 'T': {'lon':('x', 1024), 'lat':('y',128), 'depth': ('depthw', 64), 'time': ('time_counter',3)}, # needs to be on t-nodes 'S': {'lon':('x', 1024), 'lat':('y',128), 'depth': ('depthw', 64), 'time': ('time_counter',3)}, # needs to be on t-nodes 'MXL': {'lon':('x', 1024), 'lat':('y',128), 'time': ('time_counter',3)}, # needs to be on t-nodes } else: field_chunksizes = None return FieldSet.from_nemo( filenames, variables, dimensions, indices=indices, chunksize=field_chunksizes, # = None for no chunking mesh='spherical', tracer_interp_method='cgrid_tracer' # ,time_periodic=time_loop_period # ,allow_time_extrapolation=True ) # + papermill={"duration": 0.024563, "end_time": "2021-04-23T11:49:13.744467", "exception": false, "start_time": "2021-04-23T11:49:13.719904", "status": "completed"} tags=[] def create_fieldset( data_path=data_path, experiment_name=experiment_name, fname_U=fname_U, fname_V=fname_V, fname_W=fname_W, fname_T=fname_T, mesh_mask = mesh_mask ): files_U = list(sorted((data_path).glob(fname_U))) files_V = list(sorted((data_path).glob(fname_V))) files_W = list(sorted((data_path).glob(fname_W))) files_T = list(sorted((data_path).glob(fname_T))) print(files_U) fieldset = fieldset_defintions( files_U, files_V, files_W, files_T, mesh_mask) return fieldset # + papermill={"duration": 398.608821, "end_time": "2021-04-23T11:55:52.372475", "exception": false, "start_time": "2021-04-23T11:49:13.763654", "status": "completed"} tags=[] fieldset = create_fieldset() # + [markdown] papermill={"duration": 0.01967, "end_time": "2021-04-23T11:55:52.412474", "exception": false, "start_time": "2021-04-23T11:55:52.392804", "status": "completed"} tags=[] # ## Create Virtual Particles # + [markdown] papermill={"duration": 0.019703, "end_time": "2021-04-23T11:55:52.451847", "exception": false, "start_time": "2021-04-23T11:55:52.432144", "status": "completed"} tags=[] # #### add a couple of simple plotting routines # + papermill={"duration": 0.027714, "end_time": "2021-04-23T11:55:52.499172", "exception": false, "start_time": "2021-04-23T11:55:52.471458", "status": "completed"} tags=[] def plot_section_sdist(): plt.figure(figsize=(10,5)) u = np.array([p.uvel for p in pset]) * degree2km * 1000.0 * np.cos(np.radians(pset.lat)) v = np.array([p.vvel for p in pset]) * degree2km * 1000.0 section_index = np.searchsorted(lonlat.lon,pset.lon)-1 u_normal = v * lonlatdiff.costheta[section_index].data - u * lonlatdiff.sintheta[section_index].data y = (pset.lat - lonlat.lat[section_index]) * degree2km x = (pset.lon - lonlat.lon[section_index]) * degree2km*np.cos(np.radians(lonlat2mean.lat[section_index+1].data)) dist = np.sqrt(x**2 + y**2) + lonlatdiff.length_west[section_index].data plt.scatter( dist, [p.depth for p in pset], 1, u_normal, cmap=co.cm.balance,vmin=-0.3,vmax=0.3 ) plt.ylim(1200,0) plt.colorbar(label = r'normal velocity [$\mathrm{m\ s}^{-1}$]') plt.xlabel('distance [km]') plt.ylabel('depth [m]') return # + papermill={"duration": 0.026059, "end_time": "2021-04-23T11:55:52.545292", "exception": false, "start_time": "2021-04-23T11:55:52.519233", "status": "completed"} tags=[] def plot_section_lon(): plt.figure(figsize=(10,5)) u = np.array([p.uvel for p in pset]) * degree2km * 1000.0 * np.cos(np.radians(pset.lat)) v = np.array([p.vvel for p in pset]) * degree2km * 1000.0 section_index = np.searchsorted(lonlat.lon,pset.lon)-1 u_normal = v * lonlatdiff.costheta[section_index].data - u * lonlatdiff.sintheta[section_index].data plt.scatter( [p.lon for p in pset], [p.depth for p in pset], 1, u_normal, cmap=co.cm.balance,vmin=-0.3,vmax=0.3 ) plt.ylim(1200,0) plt.colorbar(label = r'normal velocity [$\mathrm{m\ s}^{-1}$]'); plt.xlabel('longitude [$\degree$E]') plt.ylabel('depth [m]') return # + papermill={"duration": 0.025266, "end_time": "2021-04-23T11:55:52.590365", "exception": false, "start_time": "2021-04-23T11:55:52.565099", "status": "completed"} tags=[] class SampleParticle(JITParticle): """Add variables to the standard particle class. Particles will sample temperature and track the age of the particle. Particles also have a flag `alive` that is 1 if the particle is alive and 0 otherwise. Furthermore, we have a `speed_param` that scales the velocity with which particles can swim towards the surface. Note that we don't initialize temp from the actual data. This speeds up particle creation, but might render initial data point less useful. """ mxl = Variable('mxl', dtype=np.float32, initial=-100) temp = Variable('temp', dtype=np.float32, initial=-100) salt = Variable('salt', dtype=np.float32, initial=-100) uvel = Variable('uvel', dtype=np.float32, initial=0) vvel = Variable('vvel', dtype=np.float32, initial=0) # wvel = Variable('wvel', dtype=np.float32, initial=0) # alive = Variable('alive', dtype=np.int32, initial=1) # speed_param = Variable('speed_param', dtype=np.float32, initial=1) # age = Variable('age', dtype=np.int32, initial=0, to_write=True) # + [markdown] papermill={"duration": 0.019907, "end_time": "2021-04-23T11:55:52.630344", "exception": false, "start_time": "2021-04-23T11:55:52.610437", "status": "completed"} tags=[] # ## Create a set of particles with random initial positions # # We seed the RNG to be reproducible (and to be able to quickly create a second equivalent experiment with differently chosen compatible initial positions), and create arrays of random starting times, lats, lons, depths, and speed parameters (see kernel definitions below for details). # # Initially create points on 'rectangle'. Land points are removed later in a OceanParcels 'run' with runtime and timedelta zero. # + [markdown] papermill={"duration": 0.019771, "end_time": "2021-04-23T11:55:52.670063", "exception": false, "start_time": "2021-04-23T11:55:52.650292", "status": "completed"} tags=[] # ### First set up the piecewise section # + papermill={"duration": 0.050511, "end_time": "2021-04-23T11:55:52.740552", "exception": false, "start_time": "2021-04-23T11:55:52.690041", "status": "completed"} tags=[] lonlat = xr.Dataset(pd.read_csv(sectionPath / sectionFilename,delim_whitespace=True)) # + papermill={"duration": 0.312636, "end_time": "2021-04-23T11:55:53.073728", "exception": false, "start_time": "2021-04-23T11:55:52.761092", "status": "completed"} tags=[] lonlat.lon.attrs['long_name']='Longitude' lonlat.lat.attrs['long_name']='Latitude' lonlat.lon.attrs['standard_name']='longitude' lonlat.lat.attrs['standard_name']='latitude' lonlat.lon.attrs['units']='degrees_east' lonlat.lat.attrs['units']='degrees_north' lonlatdiff = lonlat.diff('dim_0') lonlat2mean= lonlat.rolling({'dim_0':2}).mean() lonlat.plot.scatter(x='lon',y='lat') lonlat2mean.plot.scatter(x='lon',y='lat') lonlatdiff = lonlatdiff.assign({'y':lonlatdiff['lat']*degree2km}) lonlatdiff = lonlatdiff.assign({'x':lonlatdiff['lon']*degree2km*np.cos(np.radians(lonlat2mean.lat.data[1:]))}) lonlatdiff=lonlatdiff.assign({'length':np.sqrt(lonlatdiff['x']**2+lonlatdiff['y']**2)}) lonlatdiff=lonlatdiff.assign({'length_west':lonlatdiff.length.sum() - np.cumsum(lonlatdiff.length[::-1])[::-1]}) lonlatdiff=lonlatdiff.assign({'costheta':lonlatdiff['x']/lonlatdiff['length']}) lonlatdiff=lonlatdiff.assign({'sintheta':lonlatdiff['y']/lonlatdiff['length']}) total_length = lonlatdiff.length.sum().data print(total_length) # + papermill={"duration": 0.026275, "end_time": "2021-04-23T11:55:53.121310", "exception": false, "start_time": "2021-04-23T11:55:53.095035", "status": "completed"} tags=[] lonlatdiff.length.shape[0] # + [markdown] papermill={"duration": 0.020872, "end_time": "2021-04-23T11:55:53.163262", "exception": false, "start_time": "2021-04-23T11:55:53.142390", "status": "completed"} tags=[] # ### Seed particles uniform random along OSNAP section # + papermill={"duration": 0.131547, "end_time": "2021-04-23T11:55:53.315779", "exception": false, "start_time": "2021-04-23T11:55:53.184232", "status": "completed"} tags=[] np.random.seed(RNG_seed) # define time of release for each particle relative to t0 # can start each particle at a different time if required # here all start at time t_start. times = [] lons = [] lats = [] depths = [] # for subsect in range(lonlatdiff.length.shape[0]): for subsect in range(start_vertex,end_vertex): number_particles = int(create_number_particles*lonlatdiff.length[subsect]/total_length) time = np.zeros(number_particles) time += (t_start - t_0).total_seconds() # start along a line from west to east west_lat = lonlat.lat[subsect].data west_lon = lonlat.lon[subsect].data east_lat = lonlat.lat[subsect+1].data east_lon = lonlat.lon[subsect+1].data lon = np.random.uniform( low=west_lon, high = east_lon, size=time.shape ) lat = west_lat + ((lon - west_lon) * (east_lat - west_lat)/ (east_lon - west_lon)) # at depths from surface to max_release_depth depth = np.random.uniform( low=min_release_depth, high=max_release_depth, size=time.shape ) times.append(time) lons.append(lon) lats.append(lat) depths.append(depth) time = np.concatenate(times) lon = np.concatenate(lons) lat = np.concatenate(lats) depth = np.concatenate(depths) # + [markdown] papermill={"duration": 0.020687, "end_time": "2021-04-23T11:55:53.357937", "exception": false, "start_time": "2021-04-23T11:55:53.337250", "status": "completed"} tags=[] # ### Build particle set # + papermill={"duration": 0.948128, "end_time": "2021-04-23T11:55:54.326698", "exception": false, "start_time": "2021-04-23T11:55:53.378570", "status": "completed"} tags=[] # %%time pset = ParticleSet( fieldset=fieldset, pclass=SampleParticle, lat=lat, lon=lon, # speed_param=speed_param, depth=depth, time=time # repeatdt = repeatdt ) # + papermill={"duration": 0.026148, "end_time": "2021-04-23T11:55:54.375788", "exception": false, "start_time": "2021-04-23T11:55:54.349640", "status": "completed"} tags=[] print(f"Created {len(pset)} particles.") # display(pset[:5]) # display(pset[-5:]) # + [markdown] papermill={"duration": 0.021589, "end_time": "2021-04-23T11:55:54.418658", "exception": false, "start_time": "2021-04-23T11:55:54.397069", "status": "completed"} tags=[] # ## Compose custom kernel # # We'll create three additional kernels: # - One Kernel adds velocity sampling # - One Kernel adds temperature sampling # - One kernel adds salinity sampling # # Then, we combine the builtin `AdvectionRK4_3D` kernel with these additional kernels. # + papermill={"duration": 0.026433, "end_time": "2021-04-23T11:55:54.466573", "exception": false, "start_time": "2021-04-23T11:55:54.440140", "status": "completed"} tags=[] def velocity_sampling(particle, fieldset, time): '''Sample velocity.''' (particle.uvel,particle.vvel) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] # + papermill={"duration": 0.025976, "end_time": "2021-04-23T11:55:54.514238", "exception": false, "start_time": "2021-04-23T11:55:54.488262", "status": "completed"} tags=[] def temperature_sampling(particle, fieldset, time): '''Sample temperature.''' particle.temp = fieldset.T[time, particle.depth, particle.lat, particle.lon] # + papermill={"duration": 0.02618, "end_time": "2021-04-23T11:55:54.561949", "exception": false, "start_time": "2021-04-23T11:55:54.535769", "status": "completed"} tags=[] def salinity_sampling(particle, fieldset, time): '''Sample salinity.''' particle.salt = fieldset.S[time, particle.depth, particle.lat, particle.lon] # + papermill={"duration": 0.057126, "end_time": "2021-04-23T11:55:54.640798", "exception": false, "start_time": "2021-04-23T11:55:54.583672", "status": "completed"} tags=[] def mxl_sampling(particle, fieldset, time): '''Sample mixed layer depth.''' particle.mxl = fieldset.MXL[time, particle.depth, particle.lat, particle.lon] # + papermill={"duration": 0.307968, "end_time": "2021-04-23T11:55:54.970716", "exception": false, "start_time": "2021-04-23T11:55:54.662748", "status": "completed"} tags=[] custom_kernel = ( pset.Kernel(AdvectionRK4_3D) # + pset.Kernel(temperature_sensitivity) + pset.Kernel(temperature_sampling) + pset.Kernel(salinity_sampling) + pset.Kernel(velocity_sampling) + pset.Kernel(mxl_sampling) ) # + [markdown] papermill={"duration": 0.021682, "end_time": "2021-04-23T11:55:55.014490", "exception": false, "start_time": "2021-04-23T11:55:54.992808", "status": "completed"} tags=[] # ## Be able to handle errors during integration # # We have restricted our domain so in principle, particles could reach undefined positions. # In that case, we want to just delete the particle (without forgetting its history). # + papermill={"duration": 0.02637, "end_time": "2021-04-23T11:55:55.062581", "exception": false, "start_time": "2021-04-23T11:55:55.036211", "status": "completed"} tags=[] def DeleteParticle(particle, fieldset, time): particle.delete() recovery_cases = { ErrorCode.ErrorOutOfBounds: DeleteParticle, ErrorCode.Error: DeleteParticle, ErrorCode.ErrorInterpolation: DeleteParticle } # + [markdown] papermill={"duration": 0.021796, "end_time": "2021-04-23T11:55:55.106339", "exception": false, "start_time": "2021-04-23T11:55:55.084543", "status": "completed"} tags=[] # ## Run with runtime=0 to initialise fields # + papermill={"duration": 238.951213, "end_time": "2021-04-23T11:59:54.079382", "exception": false, "start_time": "2021-04-23T11:55:55.128169", "status": "completed"} tags=[] # %%time # with dask.config.set(**{'array.slicing.split_large_chunks': False}): pset.execute( custom_kernel, runtime=0, # dt=timedelta(minutes=0), # output_file=outputfile, recovery=recovery_cases ) # + papermill={"duration": 43.073741, "end_time": "2021-04-23T12:00:37.175630", "exception": false, "start_time": "2021-04-23T11:59:54.101889", "status": "completed"} tags=[] plot_section_sdist() # + [markdown] papermill={"duration": 0.025563, "end_time": "2021-04-23T12:00:37.227023", "exception": false, "start_time": "2021-04-23T12:00:37.201460", "status": "completed"} tags=[] # ## Trim unwanted points from ParticleSet # # Use initialised fields to remove land points. We test `temp == 0.0` (the mask value over land). # # + papermill={"duration": 5.521193, "end_time": "2021-04-23T12:00:42.775416", "exception": false, "start_time": "2021-04-23T12:00:37.254223", "status": "completed"} tags=[] t = np.array([p.temp for p in pset]) # u = np.array([p.uvel for p in pset]) # v = np.array([p.vvel for p in pset]) pset.remove_indices(np.argwhere(t == 0).flatten()) # pset.remove(np.argwhere(x * y * z == 0).flatten()) print(len(pset)) # + papermill={"duration": 34.293508, "end_time": "2021-04-23T12:01:17.097279", "exception": false, "start_time": "2021-04-23T12:00:42.803771", "status": "completed"} tags=[] plot_section_sdist() # + [markdown] papermill={"duration": 0.029348, "end_time": "2021-04-23T12:01:17.156249", "exception": false, "start_time": "2021-04-23T12:01:17.126901", "status": "completed"} tags=[] # ### Test velocity normal to section # + [markdown] papermill={"duration": 0.029199, "end_time": "2021-04-23T12:01:17.214622", "exception": false, "start_time": "2021-04-23T12:01:17.185423", "status": "completed"} tags=[] # #### Velocity conversions from degrees lat/lon per second to m/s # + papermill={"duration": 8.157175, "end_time": "2021-04-23T12:01:25.401087", "exception": false, "start_time": "2021-04-23T12:01:17.243912", "status": "completed"} tags=[] u = np.array([p.uvel for p in pset]) v = np.array([p.vvel for p in pset]) # + papermill={"duration": 0.070125, "end_time": "2021-04-23T12:01:25.504867", "exception": false, "start_time": "2021-04-23T12:01:25.434742", "status": "completed"} tags=[] u=u * degree2km * 1000.0 * np.cos(np.radians(pset.lat)) v=v * degree2km * 1000.0 # + [markdown] papermill={"duration": 0.029938, "end_time": "2021-04-23T12:01:25.565265", "exception": false, "start_time": "2021-04-23T12:01:25.535327", "status": "completed"} tags=[] # #### normal velocities # + papermill={"duration": 0.082486, "end_time": "2021-04-23T12:01:25.677569", "exception": false, "start_time": "2021-04-23T12:01:25.595083", "status": "completed"} tags=[] section_index = np.searchsorted(lonlat.lon,pset.lon)-1 u_normal = v * lonlatdiff.costheta[section_index].data - u * lonlatdiff.sintheta[section_index].data # + papermill={"duration": 0.039778, "end_time": "2021-04-23T12:01:25.747732", "exception": false, "start_time": "2021-04-23T12:01:25.707954", "status": "completed"} tags=[] abs(u_normal).max() # + [markdown] papermill={"duration": 0.029997, "end_time": "2021-04-23T12:01:25.807719", "exception": false, "start_time": "2021-04-23T12:01:25.777722", "status": "completed"} tags=[] # #### remove particles randomly with probability proportional to normal speed # + papermill={"duration": 0.242594, "end_time": "2021-04-23T12:01:26.079988", "exception": false, "start_time": "2021-04-23T12:01:25.837394", "status": "completed"} tags=[] u_random = np.random.rand(len(u_normal))*max_current pset.remove_indices(np.argwhere(abs(u_normal) < u_random).flatten()) print(len(pset)) # + papermill={"duration": 1.301352, "end_time": "2021-04-23T12:01:27.411555", "exception": false, "start_time": "2021-04-23T12:01:26.110203", "status": "completed"} tags=[] plot_section_sdist() # + [markdown] papermill={"duration": 0.03439, "end_time": "2021-04-23T12:01:27.481372", "exception": false, "start_time": "2021-04-23T12:01:27.446982", "status": "completed"} tags=[] # ## Prepare output # # We define an output file and specify the desired output frequency. # + papermill={"duration": 0.040961, "end_time": "2021-04-23T12:01:27.556679", "exception": false, "start_time": "2021-04-23T12:01:27.515718", "status": "completed"} tags=[] # output_filename = 'Parcels_IFFForwards_1m_June2016_2000.nc' npart = str(len(pset)) output_filename = 'tracks_randomvel_mxl_'+sectionname+direction+year_str+month_str+day_str+'_N'+npart+'_D'+days+'_Rnd'+ seed+'.nc' outfile = outpath / output_filename print(outfile) outputfile = pset.ParticleFile( name=outfile, outputdt=timedelta(hours=outputdt_in_hours) ) # + [markdown] papermill={"duration": 0.034733, "end_time": "2021-04-23T12:01:27.626162", "exception": false, "start_time": "2021-04-23T12:01:27.591429", "status": "completed"} tags=[] # ## Execute the experiment # # We'll evolve particles, log their positions and variables to the output buffer and finally export the output to a the file. # + [markdown] papermill={"duration": 0.034606, "end_time": "2021-04-23T12:01:27.695514", "exception": false, "start_time": "2021-04-23T12:01:27.660908", "status": "completed"} tags=[] # ### Run the experiment # + papermill={"duration": 80656.791942, "end_time": "2021-04-24T10:25:44.522339", "exception": false, "start_time": "2021-04-23T12:01:27.730397", "status": "completed"} tags=[] # %%time # with dask.config.set(**{'array.slicing.split_large_chunks': False}): pset.execute( custom_kernel, runtime=timedelta(days=runtime_in_days), dt=timedelta(minutes=dt_in_minutes), output_file=outputfile, recovery=recovery_cases ) # + papermill={"duration": 0.190478, "end_time": "2021-04-24T10:25:44.900132", "exception": false, "start_time": "2021-04-24T10:25:44.709654", "status": "completed"} tags=[] # outputfile.export() # + papermill={"duration": 172.118834, "end_time": "2021-04-24T10:28:37.206435", "exception": false, "start_time": "2021-04-24T10:25:45.087601", "status": "completed"} tags=[] outputfile.close() # + papermill={"duration": 2.876076, "end_time": "2021-04-24T10:28:41.066261", "exception": false, "start_time": "2021-04-24T10:28:38.190185", "status": "completed"} tags=[] conda list # + papermill={"duration": 1.063015, "end_time": "2021-04-24T10:28:42.319320", "exception": false, "start_time": "2021-04-24T10:28:41.256305", "status": "completed"} tags=[] pip list
notebooks/executed/037_afox_RunParcels_TS_MXL_Multiline_Randomvel_Papermill_executed_2019-12-19.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:anaconda3] # language: python # name: conda-env-anaconda3-py # --- from __future__ import print_function import os import numpy as np import random import string import tensorflow as tf import zipfile from six.moves import range from six.moves.urllib.request import urlretrieve # + url = 'http://mattmahoney.net/dc/' def maybe_download(filename, expected_bytes): """Download a file if not present, and make sure it's the right size.""" if not os.path.exists(filename): filename, _ = urlretrieve(url + filename, filename) statinfo = os.stat(filename) if statinfo.st_size == expected_bytes: print('Found and verified %s' % filename) else: print(statinfo.st_size) raise Exception( 'Failed to verify ' + filename + '. Can you get to it with a browser?') return filename filename = maybe_download('text8.zip', 31344016) # + def read_data(filename): with zipfile.ZipFile(filename) as f: name = f.namelist()[0] data = tf.compat.as_str(f.read(name)) return data text = read_data(filename) print('Data size %d' % len(text)) # - # Число слов в текстах: len(text.split()) words = text.split() gray = ["grey", "gray"] white = ["white"] black = ["black"] for i in range(len(words)): if words[i] in gray: print(" ".join(words[i-2:i+3])) for i in range(len(words)): if words[i] == "white": print(" ".join(words[i-2:i+3])) for i in range(len(words)): if words[i] == "black": print(" ".join(words[i-2:i+3])) # Сколько раз встречаются цвета в текстах # + from collections import defaultdict d = defaultdict(int) for w in words: if w in ["black", "white", "gray", "grey"]: d[w]+= 1 print(d) # - d_gray = [defaultdict(int),defaultdict(int),defaultdict(int),defaultdict(int)] d_black = [defaultdict(int),defaultdict(int),defaultdict(int),defaultdict(int)] d_white = [defaultdict(int),defaultdict(int),defaultdict(int),defaultdict(int)] for i in range(len(words)): if words[i] in gray: d_gray[0][words[i-2]] += 1 d_gray[1][words[i-1]] += 1 d_gray[2][words[i+1]] += 1 d_gray[3][words[i+2]] += 1 if words[i] in black: d_black[0][words[i-2]] += 1 d_black[1][words[i-1]] += 1 d_black[2][words[i+1]] += 1 d_black[3][words[i+2]] += 1 if words[i] in white: d_white[0][words[i-2]] += 1 d_white[1][words[i-1]] += 1 d_white[2][words[i+1]] += 1 d_white[3][words[i+2]] += 1 import operator def d_sort(x): sorted_x = sorted(x.items(), key=operator.itemgetter(1)) return sorted_x # Сколько раз встречается слово после black d_sort(d_black[2])[::-1] # Сколько раз встречается слово после white d_sort(d_white[2])[::-1] # Сколько раз встречается слово после gray d_sort(d_gray[2])[::-1]
EnglishBlackGrayWhite.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/realmousy/mmsr/blob/master/MMSR.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="aEUrFoj0HcGS" colab_type="text" # **Playground Notebook to show how to load [MMSR](https://github.com/open-mmlab/mmsr) an open source image and video super-resolution toolbox, and try to upscale your own imgs/video** # --- # # # + [markdown] id="Y9jH0ibO3O_q" colab_type="text" # **While installing anaconda might promt you to accept a license!** (in a newer versions) # + id="FyOAdEvr59T4" colab_type="code" colab={} # %cd /content # !wget -c https://repo.continuum.io/archive/Anaconda3-5.1.0-Linux-x86_64.sh # !chmod +x Anaconda3-5.1.0-Linux-x86_64.sh # !bash ./Anaconda3-5.1.0-Linux-x86_64.sh -b -f -p /usr/local # !rm Anaconda3-5.1.0-Linux-x86_64.sh print('Anaconda setup Done') # + [markdown] id="mjhrQySJ1dv5" colab_type="text" # **Install required packages** # + id="Hn6u2CTE2l8A" colab_type="code" colab={} #activating env & installing pkgs # !conda create --name venv_mmsr python=3.6 # !conda activate venv_mmsr # !conda install -y --prefix /usr/local pytorch torchvision cudatoolkit=10.0 -c pytorch # !pip install numpy opencv-python lmdb pyyaml progressbar2 tensorboardX import sys sys.path.append('/usr/local/lib/python3.6/site-packages/') # !conda info # + [markdown] id="iIpr1R0l4_U9" colab_type="text" # *This might take a while ~5-10m (Make a tea or something while waiting :)* # + [markdown] id="WBQqL4MD4Vyx" colab_type="text" # **MMSR git** # + id="36chQLeDsSlK" colab_type="code" colab={} # %cd /content # !rm -r mmsr #remov #my fork of the mmsr framework # !git clone https://github.com/realmousy/mmsr # %cd mmsr # !ls # + [markdown] id="mTsAChUl2Qud" colab_type="text" # **Compile DCNv2 module** # + id="qTCrF8gE8wMq" colab_type="code" colab={} # !python --version # %cd /content/mmsr/codes/models/archs/dcn/ # !python setup.py build develop # %cd /content/ # + [markdown] id="RNl4OoFYaP9X" colab_type="text" # **Add [EDVR Models](https://drive.google.com/drive/folders/1WfROVUqKOBS5gGvQzBfU1DNZ4XwPA3LD) to your google drive.** # # # *it wont take any space on your drive* # # down below(in a next text cell) is a how to link it # + id="x2GqGmeBCG-g" colab_type="code" colab={} # available pre trained models ''' # EDVR_Vimeo90K_SR_L.pth # EDVR_REDS_SR_M.pth # EDVR_REDS_SR_L.pth # EDVR_REDS_SRblur_L.pth # EDVR_REDS_deblur_L.pth # EDVR_REDS_deblurcomp_L.pth ''' # currently only 'EDVR_Vimeo90K_SR_L.pth' has been tested MODEL_NAME= 'EDVR_Vimeo90K_SR_L.pth' from google.colab import drive drive.mount('/content/drive') from shutil import copyfile #adjust location '/content/drive/My Drive/EDVR/' if google drive added EDVR into some other folder copyfile(f'/content/drive/My Drive/EDVR/{MODEL_NAME}', f'/content/mmsr/experiments/pretrained_models/{MODEL_NAME}') # + [markdown] id="mHjk3feVGmQG" colab_type="text" # **Open** # [Models](https://drive.google.com/drive/folders/1WfROVUqKOBS5gGvQzBfU1DNZ4XwPA3LD) and click at the name of a Shared folder like on a screenshot down below # # Don't forget to put your auth key into text box # # ![how to](https://i.imgur.com/oh8Rpap.jpg) # + [markdown] id="tTNFHBKQItR-" colab_type="text" # **Model Testing** # # *if you'd like to test the model first you can link the datasets the same way as the models* # # or # # *just skip it* # # [Testing_Datasets link](https://drive.google.com/open?id=1EwEXDYImflknnZS0rJy8gD1zOi8ZgEXa) # # + id="zIZVUjMxIsu3" colab_type="code" colab={} # %cd "/content/drive/My Drive/Testing_datasets" # !cp * /content/mmsr/datasets # %cd /content/mmsr/datasets # !unzip "*.zip" # + id="nOAz8VT_SHuj" colab_type="code" colab={} #uncomment this for test # #!python /content/mmsr/codes/test_Vid4_REDS4_with_GT.py # + [markdown] id="hWUVNUeU45oJ" colab_type="text" # **upscale.py** # + id="YSkQmQ7zJFRS" colab_type="code" colab={} # %cd /content/mmsr/codes # + [markdown] id="DOiVfNPD7o12" colab_type="text" # upload any video/sequence of imgs and copy location of it to --input argument in a code cell down below # + id="kI3fwgzh_IE-" colab_type="code" colab={} # --input # input.mp4 video file path (default:'./input.mp4') # --output # output.mp4 enchanced video file path (default:'./output.mp4') # --mode # imgs2HQimgs or imgs2HQvideo (default:'imgs2HQimgs') in colab works only imgs2HQimgs mode # currently video saving in a colab instance doesnt work (but it works on a local system, maybe(probably) some problems with a dependencies) # + id="danUP_jWI76S" colab_type="code" colab={} # !python upscale.py --input /content/videoplayback.mp4
MMSR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns sns.set(style="whitegrid") import warnings warnings.simplefilter(action='ignore', category=FutureWarning) # %matplotlib inline pd.options.mode.chained_assignment = None # - df_original = pd.read_csv('data/question_lang.csv') df_original = df_original[['question_id', 'question_title', 'answer_count', 'java', 'python','js','php']] df = df_original.sort_values('answer_count', ascending=False) # ### 各编程语言问题数 比较 # + #print(df) #print(df.loc[df.js>0]) df_lang_count = df[['answer_count', 'java','python','js','php']] df_sum = df_lang_count.sum() df_sum = pd.DataFrame({'lang': df_sum.index, 'count': df_sum.values}).loc[1:] a4_dims = (10.0, 8) fig, ax = plt.subplots(figsize=a4_dims) ax = sns.barplot(x='lang', y='count', data=df_sum, palette=None) # + ### 各编成语言的回答数 比较 # + df_melt = df[['question_id', 'answer_count', 'java','python','js','php']] df_melt = pd.melt(df_melt, id_vars=['question_id','answer_count'], value_vars=['java','python','js','php']) df_melt = df_melt.loc[df_melt.value > 0] df_melt = df_melt[['question_id','answer_count','variable']] df_lang_ans = df_melt.groupby(['variable'])['answer_count'].sum() df_lang_ans = pd.DataFrame({'lang': df_lang_ans.index, 'count': df_lang_ans.values}) df_lang_ans = df_lang_ans.sort_values(['count'], ascending=False) a4_dims = (10.0, 8) fig, ax = plt.subplots(figsize=a4_dims) ax = sns.barplot(x='lang', y='count', data=df_lang_ans, palette=None) # + #print(df) def most_answers_lang(lang): first = df.loc[df[lang] > 0].iloc[0] question_title = first['question_title'] question_id = first['question_id'] answer_count = first['answer_count'] question_url = 'https://www.zhihu.com/question/%s' % question_id print('%s 语言回答数最多的问题: %s\n%s\n回答数:%s' % (lang.upper(), question_title, question_url, answer_count)) most_answers_lang('java') # - most_answers_lang('python') most_answers_lang('js') most_answers_lang('php')
analyze_program_lang.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] tags=["remove_cell"] # # Hello Qiskit # - # Simple puzzles to get started with qubits and quantum gates. # # ## Level 1: Beginning with bits # # Like all of the textbook, this document is a Jupyter notebook. However, unlike most of the textbook, you'll have to actually run it to make it work. # # Don't worry if you've never used a Jupyter notebook before. It just means you'll see lots of grey boxes with code in, like the one below. These are known as cells. print("Hello! I'm a code cell") # The way to run the code within a cell depends on the device and input method you are using. In most cases, you should click on the cell and press **Shift-Enter**. However, if you are running this on the Qiskit textbook website, you can just click the 'Run' button below. # # Get started by doing this for the cell below (it will take a second or two to run). print('Set up started...') from qiskit_textbook.games import hello_quantum print('Set up complete!') # The rest of the cells in this notebook contain code that sets up puzzles for you to solve. To get the puzzles, just run the cells. To restart a puzzle, just rerun it. # ### Puzzle 1 # # #### Intro # # Quantum computers are based on *qubits*: the quantum version of a bit. But what exactly is a bit? And how are they used in computers? # # The defining feature of a bit is that it has two possible output values. These can be called `1` and `0`, or 'on' and 'off', or 'True' and 'False'. The names we use don't reall matter. The important point is that there are two of them. # # To get familiar with bits, let's play with one. The simplest thing you can do with a bit (other than leave it alone) is flip its value. We give this simple operation a fancy name: it is called the the `NOT` gate. # # Try it out below. # # #### Exercise # * Use the `NOT` gate 3 times. initialize = [] success_condition = {} allowed_gates = {'0': {'NOT': 3}, '1': {}, 'both': {}} vi = [[1], False, False] qubit_names = {'0':'the only bit', '1':None} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) # #### Outro # # Here we visualized our bit using a circle that was either on (white) or off (black). The effect of the `NOT` gate was to turn it on and off, flipping between the two states of the bit. # ### Puzzle 2 # # #### Intro # # It's more interesting to play with two bits than just one. So here is another to play with. It will look the same as before. But because it is a different bit, it'll be in a different place. # # #### Exercise # * Turn the other bit on. initialize = [] success_condition = {} allowed_gates = {'0': {}, '1': {'NOT': 0}, 'both': {}} vi = [[], False, False] qubit_names = {'0':'the bit on the left', '1':'the bit on the right'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) # #### Outro # You've now mastered the `NOT` gate: the most basic building block of computing. # ### Puzzle 3 # # #### Intro # # We use the word 'gate' to describe the simple tools we can use to manipulate qubits. The `NOT` gate is the simplest example. But to do interesting things with bits, we need to do more than just turn them on and off. # # A slightly more sophisticated gate is called the `CNOT`, which is short for 'controlled-`NOT`'. Thisis used on pairs of bits. # # We choose one of the bits to be the 'control'. This gets to descide whether the `CNOT` actually does anything. The other bit is the 'target'. Simply put, the `CNOT` does a `NOT` on the target bit, but only if the control bit is on. # # The best way to understand this is to try it out! # # #### Exercise # * Use the `CNOT` to turn on the bit on the right. # * **Note**: When you are asked to choose a bit, you are choosing which will be the target bit. initialize = [['x', '0']] success_condition = {'IZ': -1.0} allowed_gates = {'0': {'CNOT': 0}, '1': {'CNOT': 0}, 'both': {}} vi = [[], False, False] qubit_names = {'0':'the bit on the left', '1':'the bit on the right'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) # ### Puzzle 4 # # #### Intro # # Any program we run is made up of many gates on many qubits. As a step towards this, let's try something where you'll need to use a couple of `CNOT`s. # # #### Exercise # * Use some CNOTs to turn the left bit off and the right bit on. initialize = [['x', '0']] success_condition = {'ZI': 1.0, 'IZ': -1.0} allowed_gates = {'0': {'CNOT': 0}, '1': {'CNOT': 0}, 'both': {}} vi = [[], False, False] qubit_names = {'0':'the bit on the left', '1':'the bit on the right'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) # #### Outro # Well done! # # These kind of manipulations are what all computing compiles down to. If you had more bits and a controlled-controlled-`NOT` gate, you could build everything from Tetris to self-driving cars. # ### Puzzle 5 # # #### Intro # # You have probably noticed that there is a lot of empty space on the puzzle board. This is because it can be used to visualize more information about our bits. For example, what if our bit values have been generated by some random process? # # We have used black and white circles to represent the bit values of `0` or `1`. So for a random bit, which will give us `0` or `1` with equal probability, we'll use a grey circle. # # Now we can look at how our gates manipulate this randomness. # # #### Exercise # * Make the bit on the right random using a CNOT. initialize = [['h', '0']] success_condition = {'IZ': 0.0} allowed_gates = {'0': {'CNOT': 0}, '1': {'CNOT': 0}, 'both': {}} vi = [[], False, False] qubit_names = {'0':'the bit on the left', '1':'the bit on the right'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) # #### Outro # # Well done! # # Let's think about what happened here. We did a `CNOT`, controlled by a bit that was randomly either on or off. If it was off, the `CNOT` does nothing and the right bit stays off. If the left bit was on, the `CNOT` does a `NOT` on the right bit to switch it on too. # # This process effectively copies the random value of the left bit over to the right. So outputs from both will look random, but with an important property: they will always output the same random result as each other. # # At the end of the puzzle above, the two bits were represented by two grey circles. This shows that both are random, but tells us nothing about the fact that they always agree. The same two grey circles would also be used if the bits were independently random, or if they were random but always disagreed. To help us tell the difference, we need to add something to the puzzle board. # ### Puzzle 6 # # #### Intro # # In the puzzle below, you'll see a new circle. # # Unlike the circles you've seen so far, doesn't represent a new bit. Instead it tells us whether or not our two bits will agree or not. # # When they are certain to agree, it will be off. When they are certain to disagree, it will be on. If their agreements and disagreements are just random, it will be grey. # # # #### Exercise # * Make the two bits always disagree (that means making the middle circle white). initialize = [['h', '0']] success_condition = {'ZZ': -1.0} allowed_gates = {'0': {'NOT': 0, 'CNOT': 0}, '1': {'NOT': 0, 'CNOT': 0}, 'both': {}} vi = [[], False, True] qubit_names = {'0':'the bit on the left', '1':'the bit on the right'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) # ### Puzzle 7 # # #### Intro # # Now you know pretty much need all you need to know about bits. Let's have one more exercise before we move on. # # #### Exercise # * Turn on bit the bit on the right. initialize = [['h', '1']] success_condition = {'IZ': -1.0} allowed_gates = {'0': {'NOT': 0, 'CNOT': 0}, '1': {'NOT': 0, 'CNOT': 0}, 'both': {}} vi = [[], False, True] qubit_names = {'0':'the bit on the left', '1':'the bit on the right'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) # #### Outro # # Now it's time to see what happens when bits become quantum. It's time for qubits! # ## Level 2: Basic single qubit gates # ### Puzzle 1 # # #### Intro # # Instead of starting with a lecture on quantum mechanics, here is a qubit to play with. Try out the simplest qubit gate, which is called `x`. # # #### Exercise # * Use the `x` gate 3 times, and see what happens initialize = [ ["x","0"] ] success_condition = {"ZI":1.0} allowed_gates = { "0":{"x":3}, "1":{}, "both":{} } vi = [[1],True,True] qubit_names = {'0':'the only qubit', '1':None} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) # #### Outro # # This puzzle should have seemed very familar. It was exactly the same as the first one for a bit, except that there was an extra circle that didn't do anything. Other than that, the `x` gate had exactly the same the effect as the `NOT`. # ### Puzzle 2 # # #### Intro # # The last puzzle had two circles. These did not represent different bits. Instead, they both represented the same qubit. # # Qubits are the quantum version of bits. They have some of the same properties as bits, but they also have some extra features. # # Just as bits are limited to two possible values, so are qubits. When you extract an output from a qubit, you will get a simple bit value: `0` or `1`. However, with qubits there are multiple method that we can use to extract this bit. The result we get depends on the method we use. # # The two circles in the last puzzle represent two different ways we could get a bit out of the same qubit. They are called X and Z measurements. The bottom circle represents the output for the Z measurement, and the top circle represents the X output. # # The color of the circle is used exactly the same as before. If a circle is black, the corresponding output would give the value `0`. A white one means we'd get a `1`. # # #### Exercise # # * Turn off the Z output. initialize = [['x', '0']] success_condition = {'ZI': 1.0} allowed_gates = {'0': {'x': 0}, '1': {}, 'both': {}} vi = [[1], True, True] qubit_names = {'0':'the only qubit', '1':None} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) # #### Outro # # The process of extracting a bit from a qubit is called 'measurement'. # # It is not possible to independently extract both the X and Z outputs. You have to choose just one. # ### Puzzle 3 # # #### Intro # # In this puzzle we'll see another qubit. This will again have its inner workings represented by two circles. For this qubit, those circles will be on the right of the puzzle board. # # #### Exercise # # * Turn the Z output of the other qubit off. initialize = [['x', '1']] success_condition = {'IZ': 1.0} allowed_gates = {'0': {}, '1': {'x': 0}, 'both': {}} vi = [[0], True, True] qubit_names = {'0':None, '1':'the other qubit'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) # #### Outro # # From now on, we'll start calling the qubits by the names used in programs: The one on the left will be `q[0]`, and the one on the right will be `q[1]`. # # Also, note that the terms Z output and X ouput are not widely used. It would be more usual to say "output of a Z measurement", etc. But that's a bit verbose for these puzzles. # ### Puzzle 4 # # #### Intro # # Now it's time to try a new gate: the `h` gate. # # This is not something that is possible for simple bits. It has the effect of swapping the the two circles of the qubit that it's applied to. # # If you want to see this in a nice animated form, check out the [Hello Quantum](https://helloquantum.mybluemix.net/) app. But while you are here, test it out with the old trick of repeating three times. # # #### Exercise # * Use the `h` gate 3 times. initialize = [] success_condition = {'ZI': 0.0} allowed_gates = {'0': {'h': 3}, '1': {}, 'both': {}} vi = [[1], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) # #### Outro # # Now you have started making quantum programs, you'll see them appear below the puzzle. # # These are actual Qiskit programs that can be run on real quantum computers. They can also be represented by so-called circuit diagrams. To see the circuit diagram for your quantum program, run the code cell below. puzzle.get_circuit().draw(output='mpl') # ### Puzzle 5 # # #### Intro # # When the Z output is certain (fully off or on), the `x` gate simply flips the value. But what if the Z output is random, as we saw in the last puzzle? With this exercise, you'll find out. # # #### Exercise # * Get the Z output fully off. You can use as many `h` gates as you like, but use `x` exactly 3 times. initialize = [['h', '1']] success_condition = {'IZ': 1.0} allowed_gates = {'0': {}, '1': {'x': 3, 'h': 0}, 'both': {}} vi = [[0], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # #### Outro # # It turns out that a random result is just a random result, even if you flip it. # ### Puzzle 6 # # #### Intro # # We've seen that the `x` gate flips the Z output, but it has no effect on the X output. For that we need a new gate: the `z` gate. # # #### Exercise # * Turn the X output off. initialize = [['h', '0'], ['z', '0']] success_condition = {'XI': 1.0} allowed_gates = {'0': {'z': 0, 'h': 0}, '1': {}, 'both': {}} vi = [[1], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # #### Outro # # The `x` gate flips the Z output and the `z` gate flips the x output. This might seem like a strange way to name things, for now. But as you learn more about quantum computing, it will come to make some sense. # ### Puzzle 7 # # #### Intro # # By combining gates we can get new effects. As a simple example, by combining `z` and `h` we can do the job of an `x`. # # #### Exercise # * Turn on the Z output without using the `x` gate initialize = [] success_condition = {'ZI': -1.0} allowed_gates = {'0': {'z': 0, 'h': 0}, '1': {}, 'both': {}} vi = [[1], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # ### Puzzle 8 # # #### Intro # # You might notice that the X outputs are always random when the Z outputs are fully on or off. This is because qubits can never be simultaneously certain about each kind of output. If they are certain about one, the other must be random. # # If this wasn't true, we'd be able to use the Z and X outputs to store two bits. This is more memory than a qubit has. Despite the fact that we can extract an output in multiple ways, we are nevertheless not allowed to store more than a bit in a qubit. # # #### Exercise # * Make the X output be off and the Z output be random. initialize = [['h', '0']] success_condition = {'IX': 1.0} allowed_gates = {'0': {}, '1': {'z': 0, 'h': 0}, 'both': {}} vi = [[0], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # ### Puzzle 9 # # #### Intro # # The limited certainty of our qubits can also be shared between the two outputs. For example, we can compromise by making both outputs mostly certain (but not completely certain) about the output they would give. # # We will visualize this by using different shades of grey. The darker a circle is, the more likely it's output would be `0`. The lighter it is, the more likely the output would be `1`. # # #### Exercise # * Make the two circles for `q[1]` both light grey. This means that they'd be highly likely, but not certain, to output a `1`. initialize = [['ry(pi/4)', '1']] success_condition = {'IZ': -0.7071, 'IX': -0.7071} allowed_gates = {'0': {}, '1': {'z': 0, 'h': 0}, 'both': {}} vi = [[0], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # ### Puzzle 10 # # #### Intro # # Now you know the basic tools, you can tackle both qubits at once. # # #### Exercise # * Make both Z outputs random. initialize = [['x', '1']] success_condition = {'ZI': 0.0, 'IZ': 0.0} allowed_gates = {'0': {'x': 0, 'z': 0, 'h': 0}, '1': {'x': 0, 'z': 0, 'h': 0}, 'both': {}} vi = [[], True, False] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # #### Outro # # Each Z output here would randomly output a `0` or a `1`. But will their outputs be correlated? Anti-correlated? Completely unrelated? # # Just as we did with bits, we'll keep track of this information with some extra circles. # ### Puzzle 11 # # #### Intro # # In this puzzle you'll see four new circles. One of them you have already seen before in Level 1. There it kept track of whether the two bit values would be certain to agree (black) or disagree (white). Here it does the same job for the Z outputs of both qubits. # # #### Exercise # # * Make the Z outputs certain to disagree. initialize = [['h','0'],['h','1']] success_condition = {'ZZ': -1.0} allowed_gates = {'0': {'x': 0, 'z': 0, 'h': 0}, '1': {'x': 0, 'z': 0, 'h': 0}, 'both': {}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # ### Puzzle 12 # # #### Intro # The new circle at the very top has a similar job. In this case, it keeps track of the probability to agree and disagree for the X outputs of both qubits. # # #### Exercise # * Make the X outputs certain to agree. initialize = [['x','0']] success_condition = {'XX': 1.0} allowed_gates = {'0': {'x': 0, 'z': 0, 'h': 0}, '1': {'x': 0, 'z': 0, 'h': 0}, 'both': {}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # #### Outro # # If you are wondering why it is this new circle that represents this information, think of a row extending out from the X output of one qubit, and another row coming out from the X output of the other. The circle at the very top is located where these two rows meet. That's why it represents agreements between the two X outputs. # ### Puzzle 13 # # #### Intro # # There are still two new circles to explain. One is where the Z output row from `q[0]` meets the X output row from `q[1]`. This tells us whether Z output for `q[0]` would agree with the X output from `q[1]`. The other is the same, but with the X output on `q[0]` and the Z output on `q[1]`. # # #### Exercise # * Make the X output for `q[0]` certain to disagree with the Z output for `q[1]`. initialize = [] success_condition = {'XZ': -1.0} allowed_gates = {'0': {'x': 0, 'z': 0, 'h': 0}, '1': {'x': 0, 'z': 0, 'h': 0}, 'both': {}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # ### Puzzle 14 # # #### Intro # Notice how the the `x`, `z` and `h` gates affect the new circles. Specifically, the `x` gates don't just affect a single Z output, but a whole row of them: flipping each circle from black to white (or dark to light) and vice-versa. # # #### Exercise # * Turn the two Z outputs on as much as you can. initialize = [['ry(-pi/4)', '1'], ['ry(-pi/4)','0']] success_condition = {'ZI': -0.7071, 'IZ': -0.7071} allowed_gates = {'0': {'x': 0}, '1': {'x': 0}, 'both': {}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # #### Outro # # As you'll see in the next puzzle, the `z` gates affect X output rows in the same way. # ### Puzzle 15 # # #### Intro # # We've previously seen how the `h` gate swaps a pair of circles. Now it affects a pair of rows in the same way: swapping each pair of circles until the whole rows have swapped. Again, you may want to check out the [Hello Quantum](https://helloquantum.mybluemix.net/) app for a nice animation. # # #### Exercise # * Turn off the X outputs. initialize = [['x', '1'], ['x','0']] success_condition = {'XI':1, 'IX':1} allowed_gates = {'0': {'z': 0, 'h': 0}, '1': {'z': 0, 'h': 0}, 'both': {}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # #### Output # # You've now gone through the basics of what two qubits look like and how to manipulate them indivially. But the real fun comes once we start using two qubit gates. # ## Level 3: Two qubit gates # ### Puzzle 1 # # #### Introduction # # In the exercises on bits, we used the `CNOT` gate. For qubits we have a similiar gate, using the `x` as the quantum version of the `NOT`. For this reason, we refer to it in Qiskit programs as `cx`. # # Like the classical `CNOT`, the `cx` gate has a 'control' and a 'target'. It effectively looks at what the Z output would be for the control, and uses that to decide whether an `x` is applied to the target qubit. # # When you apply this gate, the qubit you choose will serve as the target. The other qubit will then be the control. # # #### Exercise # # * Use a `cx` or two to turn on the Z output of q[1], and turn off the Z output of q[0]. initialize = [['x', '0']] success_condition = {'ZI': 1.0, 'IZ': -1.0} allowed_gates = {'0': {'cx': 0}, '1': {'cx': 0}, 'both': {}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # ### Puzzle 2 # # #### Introduction # # As well as a `cx` gate, there is also the `cz`. This does the same, except that it potentially applies a `z` to the target instead of an `x`. # # #### Exercise # # * Turn on the X output of q[0]. initialize = [['h', '0'],['x', '1']] success_condition = {'XI': -1.0} allowed_gates = {'0': {'cz': 0}, '1': {}, 'both': {}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # ### Puzzle 2b # # #### Introduction # # An interesting thing about quantum gates is that there is often multiple ways to explain what they are doing. These explanations can sometimes seem completely incompatible, but they are also equally true. # # For example, the `cz` can also be described as a gate which applies a `z` to the *control* qubit, depending on the potential Z output of the *target*. Exactly the same explanation as before, but with the roles of the qubits reversed. Nevertheless, it is equally true. # # #### Exercise # * Same as the last exercise, but with the qubits reversed. The `cz`, however, is the same way round as before. initialize = [['h', '1'],['x', '0']] success_condition = {'IX': -1.0} allowed_gates = {'0': {'cz': 0}, '1': {}, 'both': {}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # ### Puzzle 3 # # #### Introduction # # Now for another completely different, but equally true, explanation of the `cz`. Like the `h` gate, we can think of it in terms of circles being swapped. The `cz` has the effect of: # - Swapping the X output of `q[0]` with the neighbouring circle to the top-right; # - Doing the same with the X output of `q[1]` (for the neighbour to the top-left). # The `cz` also does something weird with the circle at the top of the grid, but that is a mystery to be solved later! # # Again, the [Hello Quantum](https://helloquantum.mybluemix.net/) app will give you some nice animations. # # #### Exercise # * Do the `cz` twice with each qubit as control, and see what happens. initialize = [['h', '0'],['x', '1'],['h', '1']] success_condition = { } allowed_gates = {'0':{'cz': 2}, '1':{'cz': 2}, 'both': {}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # #### Outro # # We've now learned something very useful about the `cz`: it doesn't matter which qubit you choose as control, the `cz` does the same thing in either case. Because of this, choosing the control qubit for the `cz` is not required from now on. # ### Puzzle 3b # # #### Introduction # # As mentioned earlier, the X and Z outputs correspond to two ways of getting an output from a qubit: the X and Z measurements. As these names hint, there is also a third way, known as the Y measurement. # # In these puzzles we mostly ignore the Y measurement, just to make things a little simpler. However, for a complete description of a qubit we do need to keep track of that the Y output might look like. This just means adding in another couple of rows of circles, for the Y outputs of each qubit. # # The next exercise is exactly the same as the last, except that the Y output rows are shown. With these the weird effect seen in the last puzzle becomes not so weird at all. See for yourself! # # #### Exercise # * Do the `cz` twice and see what happens. initialize = [['h', '0'],['x', '1'],['h', '1']] success_condition = { } allowed_gates = {'0': {}, '1': {}, 'both': {'cz': 2}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='y') puzzle.get_circuit().draw(output='mpl') # #### Outro # # Here we see the third swapping of circles done by the `cz`. The circle at the top (representing correlations for X outputs of both qubits) is swapped with the one in the middle (representing correlations for Y outputs of both qubits). # # This seemed strange when the middle rows were missing, just because our description of the qubits wasn't complete. Nevertheless, we'll keep on using the simpler grid without the Y outputs. If you want to add them in yourself, just use the `mode='y'` argument in <code>hello_quantum.run_game()</code>. # ### Puzzle 4 # # #### Introduction # # In a previous exercise, you've built an `x` from a `z` and some `h`s. In the same way, it's possible to build a `cx` from a `cz` and some `h`s. # # #### Exercise # # * Turn on the Z output of q[1]. initialize = [['x', '0']] success_condition = {'IZ': -1.0} allowed_gates = {'0': {'h':0}, '1': {'h':0}, 'both': {'cz': 0}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # #### Outro # # Unlike the `cz`, the `cx` is not symmetric. If you instead wanted to make a `cx` whose target was `q[0]`, you would have to do the `h`s on `q[0]` instead. # ### Puzzle 5 # # #### Introduction # # We were able to interpret the `cz` 'backwards': an alternate explanation in which the target qubit played the role of control, and vice-versa. We'll now do the same with the `cx`. However, since this gate doesn't have a symmetric effect, this will be a little more tricky. # # Specifically, instead instead of thinking of it as doing an `x` on the target depending on what the Z output of the control is doing, we can think of it as doing a `z` to the control depending on what the X output of the target is doing. # # In this exercise, you can see how it seems as though the target doing the controlling, and the control being the target! # # #### Exercise # * Turn on the X output of `q[0]`. initialize = [['h', '0'],['h', '1']] success_condition = {'XI': -1.0, 'IX': -1.0} allowed_gates = {'0': {}, '1': {'z':0,'cx': 0}, 'both': {}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # #### Outro # # Though these two different stories about how the `cx` works may seem to be contradictory, they are equally valid descriptions. A great example of the weird and wonderful nature of quantum gates. # ### Puzzle 6 # # #### Introduction # # Now we know these two interpretations of a `cx`, we can do do something pretty useful: turning one around. # # In this puzzle you'll get a `cx` with `q[1]` as the target, but you'll need one with `q[0]` as target. See if you can work out how to get the same effect with the help pf some `h` gates. # # #### Exercise # * Keep the Z output of `q[1]`, but turn the Z output of `q[0]` off. initialize = [('x','0'),('x','1')] success_condition = {'ZI': 1.0,'IZ': -1.0} allowed_gates = {'0': {'h':0}, '1': {'h':0,'cx':0}, 'both': {}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # #### Outro # # If you remember anything from these exercises, it should probably be this. It is common for real qubit devices to limit which way around you can do the `cx`, so the ability to turn them around comes in very handy. # ### Puzzle 7 # # #### Introduction # # Another useful quantum gate is the `swap`. This does exactly what the name suggests: it swaps the states of two qubits. Though Qiskit allows us to simply invoke the `swap` command, it is more interesting to make this gate ourselves out of `cz` or `cx` gates. # # #### Exercise # * Swap the two qubits: # - Make the Z output white and the X output grey for q[0]; # - Make the Z output dark grey and the X output light grey for q[1]. initialize = [['ry(-pi/4)','0'],['ry(-pi/4)','0'],['ry(-pi/4)','0'],['x','0'],['x','1']] success_condition = {'ZI': -1.0,'XI':0,'IZ':0.7071,'IX':-0.7071} allowed_gates = {'0': {'h':0}, '1': {'h':0}, 'both': {'cz': 0}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # #### Outro # # Note that your solution to this puzzle might not have been a general purpose `swap`. Compare your solution to those for the next few puzzles, which also implement swaps. # ### Puzzle 8 # # #### Intro # # Here's another puzzle based on the idea of making a `swap`. # # #### Exercise # * Swap the two qubits: # * Make the X output black for q[0]. # * Make the Z output white for q[1]. # * And do it with 3 `cz` gates initialize = [['x','0'],['h','1']] success_condition = {'XI':1,'IZ':-1} allowed_gates = {'0': {'h':0}, '1': {'h':0}, 'both': {'cz':3}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names,shots=2000) puzzle.get_circuit().draw(output='mpl') # ### Puzzle 9 # # #### Intro # # And another `swap`-based puzzle. # # #### Exercise # * Swap the two qubits: # * Turn on the Z output for q[0]. # * Turn off the Z output q[1]. initialize = [['x','1']] success_condition = {'IZ':1.0,'ZI':-1.0} allowed_gates = {'0': {'h':0}, '1': {'h':0}, 'both': {'cz':0}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names,shots=2000) puzzle.get_circuit().draw(output='mpl') # ## Level 4: Beyond Clifford gates # ### Puzzle 1a # # #### Introduction # # The gates you've seen so far are called the 'Clifford gates'. They are very important for moving and manipulating information in quantum computers. However, we cannot create algorithms that can outperform standard computers using Clifford gates alone. We need some new gates. # # This puzzle has one for you to try. Simply do it a few times, and see if you can work out what it does. # # #### Exercise # * Apply `ry(pi/4)` four times to q[0]. initialize = [] success_condition = {} allowed_gates = {'0': {'ry(pi/4)': 4}, '1': {}, 'both': {}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names) puzzle.get_circuit().draw(output='mpl') # #### Outro # # Well done if you were able to work it out! For the rest of us, let's try something new to help us figure it out. # ### Puzzle 1b # # #### Introduction # # To better understand the gate we just saw, we are going to use to a slightly differemt way to visualize the qubits. In this an output that is certain to give `0` will be represented by a white line rather than a white circle. An output certain to give `1` will be a black line instead of a black circle. For a random output, you'll see a line that's part white and part black instead of a grey circle. # # Here's an old exercise to help you get used to this new visualization. # # #### Exercise # # * Make the X outputs certain to agree. initialize = [['x','0']] success_condition = {'XX': 1.0} allowed_gates = {'0': {'x': 0, 'z': 0, 'h': 0}, '1': {'x': 0, 'z': 0, 'h': 0}, 'both': {}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line') puzzle.get_circuit().draw(output='mpl') # ### Puzzle 1c # # #### Introduction # # In this puzzle you'll see a new thing that you can do: `bloch`. This isn't actually a gate, and won't show up in the quantum program. Instead it just changes the visualization, by drawing the two lines for each qubit on top of each other. It also puts a point where their levels intersect. Using `bloch`, you should hopefully be able to figure out how `ry(pi/4)` works. # # #### Exercise # * Turn the bottom line of `q[0]` fully on, and use the `bloch` gate. initialize = [] success_condition = {'ZI': -1.0} allowed_gates = {'0': {'bloch':1, 'ry(pi/4)': 0}, '1':{}, 'both': {'unbloch':0}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line') puzzle.get_circuit().draw(output='mpl') # #### Outro # # If you followed the points, you should have noticed that the effect of `ry(pi/4)` is to rotate them by $\pi/4$ radians (or 45 degrees). The levels of the lines also change to match. The effect of the `ry(-pi/4)` gate would be the same, except the rotation is in the other direction # # Alaos, as you probably noticed, using `bloch` doesn't just combine the two lines for each qubit. It combines their whole rows. # ### Puzzle 2 # # #### Introduction # # Now let's use these gates on the other qubit too. # # #### Exercise # * Turn the bottom lines fully on. initialize = [['h','0'],['h','1']] success_condition = {'ZI': -1.0,'IZ': -1.0} allowed_gates = {'0': {'bloch':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, '1': {'bloch':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, 'both': {'unbloch':0}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line') puzzle.get_circuit().draw(output='mpl') # ### Puzzle 3 # # #### Introduction # # Here's a puzzle you could solve with a simple `cx`, or a `cz` and some `h`s. Unfortunately you have neither `cx` nor `h`, so you'll need to work out how `cz` and `ry`s can do the job. # # # #### Exercise # * Make the Z outputs agree. initialize = [['h','0']] success_condition = {'ZZ': 1.0} allowed_gates = {'0': {}, '1': {'bloch':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, 'both': {'unbloch':0,'cz':0}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line') puzzle.get_circuit().draw(output='mpl') # ### Puzzle 4 # # #### Introduction # # Using `x`s or `z`s you can effectively _reflect_ an `ry`, to make it move in the opposite direction. # # # #### Exercise # * Turn the Z outputs fully off with just one `ry(pi/4)` on each. initialize = [['ry(pi/4)','0'],['ry(pi/4)','1']] success_condition = {'ZI': 1.0,'IZ': 1.0} allowed_gates = {'0': {'bloch':0, 'z':0, 'ry(pi/4)': 1}, '1': {'bloch':0, 'x':0, 'ry(pi/4)': 1}, 'both': {'unbloch':0}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line') puzzle.get_circuit().draw(output='mpl') # ### Puzzle 5 # # #### Introduction # # With the `ry`s, we can make conditional gates that are more interesting than just `cz` and `cx`. For example, we can make a controlled-`h`. # # # #### Exercise # * Turn off the Z output for q[1] using exactly one `ry(pi/4)` and `ry(-pi/4)` on that qubit. initialize = [['x','0'],['h','1']] success_condition = {'IZ': 1.0} allowed_gates = {'0': {}, '1': {'bloch':0, 'cx':0, 'ry(pi/4)': 1, 'ry(-pi/4)': 1}, 'both': {'unbloch':0}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line') puzzle.get_circuit().draw(output='mpl') # ## Bonus Level: Sandbox # # You now know enough basic quantum gates to build fully powerful quantum programs. You'll get a taste of this in the final level. But before then, here are a couple of bonus levels. # # Firstly, a sandbox to try out your new skills: two grids with all the gates enabled, so you can have a play around. # # Here's one with the line-based visualization, to help with the non-Clifford gates. initialize = [] success_condition = {'IZ': 1.0,'IX': 1.0} allowed_gates = {'0': {'bloch':0, 'x':0, 'z':0, 'h':0, 'cx':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, '1': {'bloch':0, 'x':0, 'z':0, 'h':0, 'cx':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, 'both': {'cz':0, 'unbloch':0}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} line_sandbox = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line') line_sandbox.get_circuit().draw(output='mpl') # Here is a grid with the middle lines, which describe outputs for y measurements. With this you can also try some new non-Clifford gates: `rx(pi/4)`' and `rx(-pi/4)`. initialize = [] success_condition = {'IZ': 1.0,'IX': 1.0} allowed_gates = {'0': {'x':0, 'z':0, 'h':0, 'cx':0, 'ry(pi/4)': 0, 'rx(pi/4)': 0, 'ry(-pi/4)': 0, 'rx(-pi/4)': 0}, '1': {'x':0, 'z':0, 'h':0, 'cx':0, 'ry(pi/4)': 0, 'rx(pi/4)': 0, 'ry(-pi/4)': 0, 'rx(-pi/4)': 0}, 'both': {'cz':0}} vi = [[], True, True] qubit_names = {'0':'q[0]', '1':'q[1]'} y_sandbox = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='y') y_sandbox.get_circuit().draw(output='mpl') # ## Bonus Level: Make your own puzzles # # As well as giving you some puzzles, we've also given you the opportunity to make and share puzzles of your own. Using the Quantum Experience you can easily make your own notebook full of your own puzzles. # # Start by clicking the 'New Notebook' button on [this page](https://quantum-computing.ibm.com/jupyter). Then add the following line to the list of imports. # # ``` # from qiskit_textbook.games import hello_quantum # ``` # # This gives you the tools you need for making puzzles. Specifically, you need to create your own call to the `run_game` function using the following form. # # ``` # puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode=None) # ``` # # All the elements of this are explained below. # # `puzzle` # # * This is an object containing the puzzle you create. # * The quantum circuit made by the player can be accessed using <code>puzzle.get_circuit()</code>. # # `initialize` # # * A list of gates applied before the puzzle begins, to prepare the initial state. # * If this is empty, the default initial state of qubits is used (the Z output is certain to be `0`). # * Supported single qubit gates (applied to qubit `'0'` or `'1'`) are `'x'`, `'y'`, `'z'`, `'h'` and `'ry(pi/4)'`. # * Supported two qubit gates are `'cz'` and `'cx'`. For these, specify only the target qubit. # * Example: `initialize = [['x', '0'],['cx', '1']]`. # # `success_condition` # # * Values for pauli observables that must be obtained for the puzzle to declare success. # * Expressed as a dictionary whose keys are the names of the circles in the following form: # * `'ZI'` refers to the Z output of the qubit on the left, and `'XI'` for its X output. # * `'IZ'` and `'IX'` are similarly the outputs for the qubit on the right. # * `'ZX'` refers to the circle for correlations between Z output for the qubit on the left and the X output on the right. # * And so on. # * The values of the dictionary are the values that these circles must hold for the puzzle to be completed. # * 1.0 for black. # * -1.0 for white. # * 0.0 for grey. # * And so on. # * Only the circles you want to be included in the condition need to be listed. # * Example: `success_condition = {'IZ': 1.0}`. # # `allowed_gates` # # * For each qubit, specify which operations are allowed in this puzzle. # * For operations that don't need a qubit to be specified (`'cz'` and `'unbloch'`), assign the operation to `'both'` instead of qubit `'0'` or `'1'`. # * Gates are expressed as a dictionary with an integer as the value. # * If the integer is non-zero, it specifies the exact number of times the gate must be used for the puzzle to be successfully solved. # * If it is zero, the player can use the gate any number of times. # * Example: `allowed_gates = {'0': {'h':0}, '1': {'h':0}, 'both': {'cz': 1}}`. # # `vi` # # * Some visualization information as a three element list: `vi=[hidden,qubit,corr]`. These specify: # * `hidden`: Which qubits are hidden (empty list if both shown). # * `qubit`: Whether both circles shown for each qubit? (use `True` for qubit puzzles and `False` for bit puzzles). # * `corr`: Whether the correlation circles (the four in the middle) are shown. # * Example: <code>vi = [[], True, True]</code>. # # `qubit_names` # # * The two qubits are always called `'0'` and `'1'` internally. But for the player, we can display different names. # * Example: `qubit_names = {'0':'qubit 0', '1':'qubit 1'}` # # `mode` # # * An optional argument, which allows you to choose whether to include the Y output rows or use the line-based visualization. # * Using `mode=None`, or not including the mode argument at all, gives the default mode. # * `mode='y'` includes the Y output rows. # * `mode='line'` gives the line-based visualization. # # # # # The puzzle defined by the examples given here can be run in the following cell. initialize = [['x', '0'],['cx', '1']] success_condition = {'IZ': 1.0} allowed_gates = {'0': {'h':0}, '1': {'h':0}, 'both': {'cz': 1}} vi = [[], True, True] qubit_names = {'0':'qubit 0', '1':'qubit 1'} mode = None puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode=mode) # Remember to tell players what they should be aiming for. For all the puzzles in this notebook, the target state was described with text. But you can instead create an image of the target state, including a short message for the player, in the following way. This again uses the example parameters provided above. message = '\nRules:\n Use exactly one cz gate.' grid = hello_quantum.pauli_grid(mode=mode) grid.update_grid(rho=success_condition, hidden=vi[0], qubit=vi[1], corr=vi[2], message=message) # Note that only the circles that need to be matched are shown. # # To share your puzzles with someone else, simply save the notebook you created and send it to them. They can play by opening an running on the Quantum Experience, using the 'Import' button on [this page](https://quantum-computing.ibm.com/jupyter). # ## Level 5: Proving the Uniqueness of Quantum Variables # ### Bell test for classical variables # # Here we'll investigate how quantum variables (based on qubits) differ from standard ones (based on bits). # # We'll do this by creating a pair of variables, which we will call `A` and `B`. We aren't going to put any conditions on what these can be, or how they are initialized. So there are a lot of possibilities: # # * They could be any kind of variable, such as # * integer # * list # * dictionary # * ... # * They could be initialized by any kind of process, such as # * left empty # * filled with a given set of values # * generated by a given random process # * indepedently applied to `A` and `B` # * applied to `A` and `B` together, allowing for correlations between their randomness # # If the variables are initialized by a random process, it means they'll have different values every time we run our program. This is perfectly fine. The only rule we need to obey is that the process of generating the randomness is the same for every run. # # We'll use the function below to set up these variables. This currently has `A` and `B` defined as to be partially correlated random floating point numbers. But you can change it to whatever you want. import random def setup_variables (): ### Replace this section with anything you want ### r = random.random() A = r*(2/3) B = r*(1/3) ### End of section ### return A, B # Our next job is to define a hashing function. This simply needs to take one of the variables as input, and then give a bit value as an output. # # This function must also be capable of performing two different types of hash. So it needs to be able to be able to chew on a variable and spit out a bit in to different ways. We'll therefore also need to tell the function what kind of hash we want to use. # # To be consistent with the rest of the program, the two possible hash types should be called `'H'` and `'V'`. Also, the output must be in the form of a single value bit string: either `'0'` or `'1'`. # # In the (fairly arbitrary) example given, the bits were created by comparing `A` and `B` to a certain value. The output is `'1'` if they are under that value, and `'0'` otherwise. The type of hash determines the value used. def hash2bit ( variable, hash ): ### Replace this section with anything you want ### if hash=='V': bit = (variable<0.5) elif hash=='H': bit = (variable<0.25) bit = str(int(bit)) # Turn True or False into '1' and '0' ### End of section ### return bit # Once these are defined, there are four quantities we wish to calculate: `P['HH']`, `P['HV']`, `P['VH']` and `P['VV']`. # # Let's focus on `P['HV']` as an example. This is the probability that the bit value derived from an `'H'` type hash on `A` is different to that from a `'V'` type has on `B`. We will estimate this probability by sampling many times and determining the fraction of samples for which the corresponding bit values disagree. # # The other probabilities are defined similarly: `P['HH']` compares a `'H'` type hash on both `A` and `B`, `P['VV']` compares a `V` type hash on both, and `P['VH']` compares a `V` type hash on `A` with a `H` type has on `B`. # # These probabilities are calculated in the following function, which returns all the values of `P` in a dictionary. The parameter `shots` is the number of samples we'll use. shots = 8192 def calculate_P ( ): P = {} for hashes in ['VV','VH','HV','HH']: # calculate each P[hashes] by sampling over `shots` samples P[hashes] = 0 for shot in range(shots): A, B = setup_variables() a = hash2bit ( A, hashes[0] ) # hash type for variable `A` is the first character of `hashes` b = hash2bit ( B, hashes[1] ) # hash type for variable `B` is the second character of `hashes` P[hashes] += (a!=b) / shots return P # Now let's actually calculate these values for the method we have chosen to set up and hash the variables. P = calculate_P() print(P) # These values will vary slightly from one run to the next due to the fact that we only use a finite number of shots. To change them significantly, we need to change the way the variables are initiated, and/or the way the hash functions are defined. # # No matter how these functions are defined, there are certain restrictions that the values of `P` will always obey. # # For example, consider the case that `P['HV']`, `P['VH']` and `P['VV']` are all `0.0`. The only way that this can be possible is for `P['HH']` to also be `0.0`. # # To see why, we start by noting that `P['HV']=0.0` is telling us that `hash2bit(A, H)` and `hash2bit(B, V)` were never different in any of the runs. So this means we can always expect them to be equal. # # hash2bit(A, H) = hash2bit(B, V) (1) # # From `P['VV']=0.0` and `P['VH']=0.0` we can similarly get # # hash2bit(A, V) = hash2bit(B, V) (2) # # hash2bit(A, V) = hash2bit(B, H) (3) # # Putting (1) and (2) together implies that # # hash2bit(A, H) = hash2bit(A, V) (4) # # Combining this with (3) gives # # hash2bit(A, H) = hash2bit(B, H) (5) # # And if these values are always equal, we'll never see a run in which they are different. This is exactly what we set out to prove: `P['HH']=0.0`. # # More generally, we can use the values of `P['HV']`, `P['VH']` and `P['VV']` to set an upper limit on what `P['HH']` can be. By adapting the [CHSH inequality](https://en.wikipedia.org/wiki/CHSH_inequality) we find that # # $\,\,\,\,\,\,\,$ `P['HH']` $\, \leq \,$ `P['HV'] + P['VH'] + P['VV']` # # This is not just a special property of `P['HH']`. It's also true for all the others: each of these probabilities cannot be greater than the sum of the others. # # To test whether this logic holds, we'll see how well the probabilities obey these inequalities. Note that we might get slight violations due to the fact that our the `P` values aren't exact, but are estimations made using a limited number of samples. def bell_test (P): sum_P = sum(P.values()) for hashes in P: bound = sum_P - P[hashes] print("The upper bound for P['"+hashes+"'] is "+str(bound)) print("The value of P['"+hashes+"'] is "+str(P[hashes])) if P[hashes]<=bound: print("The upper bound is obeyed :)\n") else: if P[hashes]-bound < 0.1: print("This seems to have gone over the upper bound, but only by a little bit :S\nProbably just rounding errors or statistical noise.\n") else: print("!!!!! This has gone well over the upper bound :O !!!!!\n") bell_test(P) # With the initialization and hash functions provided in this notebook, the value of `P('HV')` should be pretty much the same as the upper bound. Since the numbers are estimated statistically, and therefore are slightly approximate due to statistical noise, you might even see it go a tiny bit over. But you'll never see it significantly surpass the bound. # # If you don't believe me, try it for yourself. Change the way the variables are initialized, and how the hashes are calculated, and try to get one of the bounds to be significantly broken. # ### Bell test for quantum variables # # Now we are going to do the same thing all over again, except our variables `A` and `B` will be quantum variables. Specifically, they'll be the simplest kind of quantum variable: qubits. # When writing quantum programs, we have to set up our qubits and bits before we can use them. This is done by the function below. It defines a register of two bits, and assigns them as our variables `A` and `B`. It then sets up a register of two bits to receive the outputs, and assigns them as `a` and `b`. # # Finally it uses these registers to set up an empty quantum program. This is called `qc`. # + from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit def initialize_program (): qubit = QuantumRegister(2) A = qubit[0] B = qubit[1] bit = ClassicalRegister(2) a = bit[0] b = bit[1] qc = QuantumCircuit(qubit, bit) return A, B, a, b, qc # - # Before we start writing the quantum program to set up our variables, let's think about what needs to happen at the end of the program. This will be where we define the different hash functions, which turn our qubits into bits. # # The simplest way to extract a bit from a qubit is through the `measure` gate. This corresponds to the Z output of a qubit in the visualization we've been using. Let's use this as our `V` type hash. # # For the output that corresponds to the X output, there is no direct means of access. However, we can do it indirectly by first doing an `h` to swap the top and Z outputs, and then using the `measure` gate. This will be our `H` type hash. # # Note that this function has more inputs that its classical counterpart. We have to tell it the `bit` on which to write the result, and the quantum program, `qc`, on which we write the gates. def hash2bit ( variable, hash, bit, qc ): if hash=='H': qc.h( variable ) qc.measure( variable, bit ) # Now its time to set up the variables `A` and `B`. To write this program, you can use the grid below. You can either follow the suggested exercise, or do whatever you like. Once you are ready, just move on. The cell containing the <code>setup_variables()</code> function, will then use the program you wrote with the grid. # # Note that our choice of means that the probabilities `P['HH']`, `P['HV']`, `P['VH']` and `P['VV']` will explicitly correspond to circles on our grid. For example, the circle at the very top tells us how likely the two X outputs would be to disagree. If this is white, then `P['HH']=1` , if it is black then `P['HH']=0`. # # ### Exercise # * Make it so that the X outputs of both qubits are more likely to disagree, whereas all other combinations of outputs are more likely to agree. initialize = [] success_condition = {'ZZ':+0.7071,'ZX':+0.7071,'XZ':+0.7071,'XX':-0.7071} allowed_gates = {'0': {'bloch':0, 'x':0, 'z':0, 'h':0, 'cx':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, '1': {'bloch':0, 'x':0, 'z':0, 'h':0, 'cx':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, 'both': {'cz':0, 'unbloch':0}} vi = [[], True, True] qubit_names = {'0':'A', '1':'B'} puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line') # Now the program as written above will be used to set up the quantum variables. import numpy as np def setup_variables ( A, B, qc ): for line in puzzle.program: eval(line) # The values of `P` are calculated in the function below. In this, as in the puzzles in the rest of this notebook, this is done by running the job using Qiskit and getting results which tell us how many of the samples gave each possible output. The output is given as a bit string, `string`, which Qiskit numbers from right to left. This means that the value of `a`, which corresponds to `bit[0]` is the first from the right # # a = string[-1] # # and the value of `b` is right next to it at the second from the right # # b = string[-2] # # The number of samples for this bit string is provided by the dictionary of results, `stats`, as `stats[string]`. # + shots = 8192 from qiskit import assemble, transpile def calculate_P ( backend ): P = {} program = {} for hashes in ['VV','VH','HV','HH']: A, B, a, b, program[hashes] = initialize_program () setup_variables( A, B, program[hashes] ) hash2bit ( A, hashes[0], a, program[hashes]) hash2bit ( B, hashes[1], b, program[hashes]) # submit jobs t_qcs = transpile(list(program.values()), backend) qobj = assemble(t_qcs, shots=shots) job = backend.run(qobj) # get the results for hashes in ['VV','VH','HV','HH']: stats = job.result().get_counts(program[hashes]) P[hashes] = 0 for string in stats.keys(): a = string[-1] b = string[-2] if a!=b: P[hashes] += stats[string] / shots return P # - # Now its time to choose and set up the actually device we are going to use. By default, we'll use a simulator. You could instead use a real cloud-based device by changing the backend accordingly. from qiskit import Aer device = 'qasm_simulator' backend = Aer.get_backend(device) P = calculate_P( backend ) print(P) bell_test( P ) # If you prepared the state suggestion by the exercise, you will have found a significant violation of the upper bound for `P['HH']`. So what is going on here? The chain of logic we based the Bell test on obviously doesn't apply to quantum variables. But why? # # The answer is that there is a hidden assumption in that logic. To see why, let's revisit point (4). # # hash2bit ( A, H ) = hash2bit ( A, V ) (4) # # Here we compare the value we'd get from an `H` type of hash of the variable `A` with that for a `V` type hash. # # For classical variables, this is perfectly sensible. There is nothing stopping us from calculating both hashes and comparing the results. Even if calculating the hash of a variable changes the variable, that's not a problem. All we need to do is copy it beforehand and we can do both hashes without any issue. # # The same is not true for quantum variables. The result of the hashes is not known until we actually do them. It's only then that the qubit actually decides what bit value to give. And once it decides the value for one type of hash, we can never determine what it would have decided if we had used another type of hash. We can't get around this by copying the quantum variables either, because quantum variables [cannot be copied](https://en.wikipedia.org/wiki/No-cloning_theorem). This means there is no context in which the values `hash2bit(A,H)` and `hash2bit(A,V)` are well-defined at the same time, and so it is impossible to compare them. # # Another hidden assumption is that `hash2bit(A,hash)` depends only on the type of hash chosen for variable `A`, and not the one chosen for variable `B`. This is also perfectly sensible, since this exactly the way we set up the <code>hash2bit()</code> function. However, the very fact that the upper bound was violated does seem to imply that each variable knows what hash is being done to the other, so they they can conspire to give very different behaviour when both have a `H` type hash. # # Even so, we cannot say that our choice of hash on one qubit affects the outcome on the other. The effect is more subtle than that. For example, it is impossible to determine which variable is affecting which: You can change the order in which the hashes are done, or [effectively do them at the same time](https://en.wikipedia.org/wiki/Loopholes_in_Bell_test_experiments#Communication,_or_locality), and you'll get the same results. What we can say is that the results are [contextual](https://en.wikipedia.org/wiki/Quantum_contextuality): to fully understand results from one variable, it is sometimes required to look at what was done to another. # # All this goes to show that quantum variables don't always follow the logic we are used to. They follow different rules, the rules of quantum mechanics, which will allow us to find ways of performing computation in new and different ways. import qiskit qiskit.__qiskit_version__
notebooks/ch-ex/hello-qiskit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7 (py37) # language: python # name: py37 # --- import subprocess finp = """INSTRUMENTAL X-RAY 1.5418 PSEUDO-VOIGT .89 -.32 .08 .6 trim STRUCTURAL 2.52 2.52 2.06 120 6/MMM 2 LAYER 1 CENTROSYMMETRIC C 1 -1/3 -1/6 -1/8 1 1 LAYER 2 CENTROSYMMETRIC C 1 1/3 1/6 -1/8 1 1 STACKING recursive infinite TRANSITIONS .7 2/3 1/3 1 .3 0 0 1 .3 0 0 1 .7 -2/3 -1/3 1 """ with open("/tmp/Test2.inp","w") as f: f.write(finp) inp = b"""/tmp/Test2.inp 0 0 3 5 60 0.01 1 1 """ rv = subprocess.run(["/home/idies/workspace/DIFFaX_1813/DIFFaX"], input=inp, cwd="/home/idies/workspace/DIFFaX_1813", capture_output=True) for x in rv.stdout.splitlines(): print(x) import csv import numpy as np with open("/tmp/Test2.spc","r",newline='') as f: arr = [] opf = csv.reader(f, delimiter='\t') for r in opf: arr.append(r) pred = np.array(arr,dtype=np.float64) print(pred) # %matplotlib notebook import matplotlib.pyplot as plt plt.plot(pred[:,0],pred[:,1]) import ase.io nb3cl8 = ase.io.read('Nb3Cl8_300K.cif') abcalbega = nb3cl8.get_cell_lengths_and_angles() abcga = "{a:.6f} {b:.6f} {c:.6f} {g:.3f}".format(a=abcalbega[0],b=abcalbega[1],c=abcalbega[2],g=abcalbega[5]) print(abcga) #print(nb3cl8.get_scaled_positions(),nb3cl8.get_chemical_symbols()) layer1 = "" an = 1 for sym,xyz in zip(nb3cl8.get_chemical_symbols(),nb3cl8.get_scaled_positions()): layer1 += "\n{sym:4s} {n} {x:.6f} {y:.6f} {z:.6f} 1.0 1.0".format(sym=sym,n=an,x=xyz[0],y=xyz[1],z=xyz[2]) an += 1 layer1 = layer1.strip() print(layer1) trans = "" nlayers = 2 for n in range(0,nlayers): for m in range(0,nlayers): trans += "\n{prob} {dx:.6f} {dy:.6f} {dz:.6f}".format(prob=0.5,dx=0,dy=0,dz=1) trans = trans.strip() print(trans) finp = """INSTRUMENTAL X-RAY 1.5418 PSEUDO-VOIGT .89 -.32 .08 .6 trim STRUCTURAL {abcga} UNKNOWN 2 LAYER 1 NONE {layer1} LAYER 2 = 1 STACKING recursive infinite TRANSITIONS {trans} """.format(abcga=abcga,layer1=layer1,trans=trans) print(finp) import os with open("/tmp/Test3.inp","w") as f: f.write(finp) os.remove("/tmp/Test3.spc") inp = b"""/tmp/Test3.inp 0 0 3 5 60 0.01 1 1 """ rv = subprocess.run(["/home/idies/workspace/DIFFaX_1813/DIFFaX"], input=inp, cwd="/home/idies/workspace/DIFFaX_1813", capture_output=True) for x in rv.stdout.splitlines(): print(x) import csv import numpy as np with open("/tmp/Test3.spc","r",newline='') as f: arr = [] opf = csv.reader(f, delimiter='\t') for r in opf: arr.append(r) pred = np.array(arr,dtype=np.float64) print(pred) # %matplotlib notebook import matplotlib.pyplot as plt plt.plot(pred[:,0],pred[:,2])
MaterialsAutomated4-DIFFaXPython/MaterialsAutomated4-LiveCoded.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import os from allensdk.core.mouse_connectivity_cache import MouseConnectivityCache import numpy as np import h5py import time import nrrd from voxnet.conn2d import map_to_surface drive_path = os.path.join(os.getenv('HOME'), 'work/allen/data/sdk_new_100') # When downloading 3D connectivity data volumes, what resolution do you want (in microns)? # Options are: 10, 25, 50, 100 resolution_um = 10 # Drop list criterion, in percent difference volume_fraction = 20 # The manifest file is a simple JSON file that keeps track of all of # the data that has already been downloaded onto the hard drives. # If you supply a relative path, it is assumed to be relative to your # current working directory. manifest_file = os.path.join(drive_path, "manifest.json") # Start processing data mcc = MouseConnectivityCache(manifest_file = manifest_file, resolution = resolution_um) ontology = mcc.get_ontology() # Injection structure of interest isocortex = ontology['Isocortex'] # open up a pandas dataframe of all of the experiments experiments = mcc.get_experiments(dataframe = True, injection_structure_ids = [isocortex['id'].values[0]], cre = False) print "%d total experiments" % len(experiments) view_paths_fn = os.path.join(os.getenv('HOME'), 'work/allen/data/TopView/top_view_paths_10.h5') view_paths_file = h5py.File(view_paths_fn, 'r') view_lut = view_paths_file['view lookup'][:] view_paths = view_paths_file['paths'][:] view_paths_file.close() ## Compute size of each path to convert path averages to sums norm_lut = np.zeros(view_lut.shape, dtype=int) ind = np.where(view_lut != -1) ind = zip(ind[0], ind[1]) for curr_ind in ind: curr_path_id = view_lut[curr_ind] curr_path = view_paths[curr_path_id, :] norm_lut[curr_ind] = np.sum(curr_path > 0) t0 = time.time() expt_drop_list = [] full_vols = [] flat_vols = [] # - eid = experiments.iloc[5].id row = experiments.iloc[5] print eid print row data_dir = os.path.join(os.getenv('HOME'), "work/allen/data/sdk_new_100/experiment_%d/" % eid) # get and remap injection data print "getting injection density" in_d, in_info = mcc.get_injection_density(eid) print "mapping to surface" in_d_s = map_to_surface(in_d, view_lut, view_paths, scale = resolution_um/10., fun=np.mean) flat_vol = np.nansum(in_d_s * norm_lut) * (10./1000.)**3 flat_vols.append(flat_vol) full_vol = np.nansum(in_d) * (10./1000.)**3 full_vols.append(full_vol) expt_union = mcc.get_experiment_structure_unionizes(eid, hemisphere_ids = [2], is_injection = True, structure_ids = [ontology['grey']['id'].values[0]]) flat_vols full_vols full_vol2 = float(expt_union['projection_volume']) print expt_union print full_vol2 print np.nansum(in_d_s * norm_lut) in_d_sum = map_to_surface(in_d, view_lut, view_paths, scale = resolution_um/10., fun=np.sum) np.nansum(in_d_sum) * (10./1000.)**3 print np.abs(flat_vol - full_vol)/full_vol*100 print np.abs(flat_vol - full_vol2)/full_vol2*100 # + import matplotlib.pyplot as plt # %matplotlib inline tmp = norm_lut.astype(float) tmp[tmp == 0] = np.nan fig = plt.figure(figsize = (20,20)) ax = fig.add_subplot(121) h = ax.imshow(in_d_s) fig.colorbar(h) a2 = fig.add_subplot(122) h2 = a2.imshow(np.sum(in_d, axis=1)) fig.colorbar(h2) # - experiments
test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Version information # + import sys sys.path.append("D:/GitHub/pyleecan-public") from datetime import date print("Running date:", date.today().strftime("%B %d, %Y")) import pyleecan print("Pyleecan version:" + pyleecan.__version__) import SciDataTool print("SciDataTool version:" + SciDataTool.__version__) # - # # How to use MeshSolution objects # The MeshSolution module is the part of the code which allows to store, perform operation(s), and display data linked to a mesh. For example, the magnetostatic solution from FEMM is stored using this module. # # This tutorial shows the different possibilities allowed by the current features of the MeshSolution module. At the moment, it is mainly dedicated to store 2D electromagnetic fields computed with FEMM, but the goal is to generalize to any physics that could be included inside Pyleecan. # # The notebook related to this tutorial is available on [GitHub](https://github.com/Eomys/pyleecan/tree/master/Tutorials/tuto_MeshSolution.ipynb). # # This tutorial is for people who wish to understand in depth how this module works, and potentially contribute to the development of the code. # ## Architecture # The main class is the MeshSolution. It has two important attributes: # - A list of instances of Mesh class. The Mesh class allows to store the parameters of the mesh (coordinates, connectivity ...). # - A list of instances of Solution class. The Solution class allows to store solution field related to a mesh. # # Thus, the MeshSolution allows to make the link between the different kind of meshes and solutions. # # ## Defining a Mesh object and plots # # At the moment, there are two different types of Mesh objects: # - MeshMat class is designed to ease postprocessing. It enables access to important values (connectivity, nodes) and to defined interpolation methods. (numpy.array) # - MeshVTK class is designed to ease vizualization, by relying on existing librairy pyvista. # # The bridge between the two type of class is available with dedicated convert methods. # ### Defining a MeshMat object # # Although every features should be automatically initialized/defined in Pyleecan, we are going to define by hand some of the objects in order to introduce the basics principle of the MeshSolution module. # + from pyleecan.Classes.MeshMat import MeshMat from pyleecan.Classes.NodeMat import NodeMat from pyleecan.Classes.CellMat import CellMat from pyleecan.Classes.MeshSolution import MeshSolution mesh = MeshMat(dimension=3) mesh.node = NodeMat() mesh.node.add_node([0, 0, 0]) mesh.node.add_node([0, 1, 0]) mesh.node.add_node([1, 0, 0]) mesh.node.add_node([1, 1, 0]) mesh.node.add_node([2, 1, 0]) mesh.cell["triangle"] = CellMat(nb_node_per_cell=3) mesh.add_cell([0, 1, 2], "triangle") mesh.add_cell([1, 2, 3], "triangle") mesh.add_cell([2, 3, 4], "triangle") MSol = MeshSolution(mesh=[mesh]) MSol.group = {"stator":[0,1,2]} MSol.plot_mesh() # - # ## Defining a SolutionMat object and plot # The MeshSolution object allows to make the link between data (such as FE results) and the corresponding mesh stored in a Mesh object. Thus, all the plot and post-processing methods should be available in the MeshSolution class. # # The main available post-processing are the plots (such as plot_contour and plot_glyph). # # Here is an example with plot_contour: a scalar field is defined by giving its values all points of the mesh. # + import numpy as np from pyleecan.Classes.SolutionMat import SolutionMat field = np.array([[10, 11, 12, 13, 14]]) my_solution = SolutionMat( label="my_field", type_cell="point", field=field, indice=[0, 1, 2, 3, 4], axis_name=["time", "indice"], axis_size = [1, 5], ) MSol.solution.append(my_solution) MSol.plot_contour() # - # The notion of axis allows to correctly extract values as it would be with SciDataTool objects -> same way to call methods in SolutionMat/SolutionData/SolutionVector. # # Using SolutionMat, one can also defined a vector field by using an additional axis "component". # + vector = np.ones((10,5,2)) my_vec_solution = SolutionMat( label="my_vector", type_cell="point", field=vector, indice=[0, 1, 2, 3, 4], # optional if indice are sorted and starts from 0, but field size must match with the number of corresponding point/cell. axis_name=["time", "indice", "component"], axis_size = [10, 5, 2], ) MSol.solution.append(my_vec_solution) MSol.plot_glyph(label="my_vector", is_point_arrow=True, factor=1/10) # - # In this example, a 2D field is defined on a 3D mesh. Indeed, the mesh and the field have distinct "dimension" attributes. It enables to limit the memory space when possible. # ## Import an external Mesh # # At the moment, Pyleecan mainly relies on the meshio librairy to convert any type of mesh file into a .vtk which is readable by pyvista. Any contribution on this topic is welcome. However, we have recently added a method to import .unv files. # + #Convert to vtk with meshio from pyleecan.definitions import TEST_DIR from pyleecan.Classes.ImportMeshMat import ImportMeshMat import meshio from os.path import join test_obj = ImportMeshMat( file_path=join(TEST_DIR, join(TEST_DIR, 'Data/Mesh/mesh_test_mixte.unv')), ) mesh = test_obj.get_data() # Import in Pyleecan with MeshVTK MS = MeshSolution(mesh=[mesh]) MS.plot_mesh() # - # # Demo with FEMM results # The aim of this section is to show how MeshSolution object are used in Pyleecan to post-process FE results. # + # Run the FEMM simulation such as in tuto_Simulation_FEMM import json from multiprocessing import cpu_count from os.path import join import matplotlib.pyplot as plt import numpy as np import pytest from numpy import array, ones, pi, zeros from pyleecan.Classes.ImportGenVectLin import ImportGenVectLin from pyleecan.Classes.ImportMatrixVal import ImportMatrixVal from pyleecan.Classes.InputCurrent import InputCurrent from pyleecan.Classes.MagFEMM import MagFEMM from pyleecan.Classes.Output import Output from pyleecan.Classes.Simu1 import Simu1 from pyleecan.definitions import DATA_DIR from pyleecan.Functions.load import load from Tests import save_load_path, save_plot_path SPMSM_003 = load(join(DATA_DIR, "Machine", "SPMSM_003.json")) simu = Simu1(name="test_SIPMSM_003", machine=SPMSM_003) # Definition of the enforced output of the electrical module N0 = 3000 Is = ImportMatrixVal( value=array( [ [6.97244193e-06, 2.25353053e02, -2.25353060e02], [-2.60215295e02, 1.30107654e02, 1.30107642e02], [-6.97244208e-06, -2.25353053e02, 2.25353060e02], [2.60215295e02, -1.30107654e02, -1.30107642e02], ] ) ) time = ImportGenVectLin(start=0, stop=0.015, num=4, endpoint=True) Na_tot = 1024 simu.input = InputCurrent( Is=Is, Ir=None, # No winding on the rotor N0=N0, angle_rotor=None, # Will be computed time=time, Na_tot=Na_tot, angle_rotor_initial=0.5216 + pi, ) # - # To enable the FE results saving: is_get_mesh # Definition of the magnetic simulation (no symmetry) simu.mag = MagFEMM( type_BH_stator=1, type_BH_rotor=1, is_periodicity_a=True, is_get_meshsolution=True, nb_worker=cpu_count(), ) out_femm = simu.run() # Now, the magnetic FEA results can be plotted. Moreover, the solution can be extracted on a specific area. By default, the field is plotted on the first dimension of every additional axis (e.g. time). out_femm.mag.meshsolution.plot_contour() out_femm.mag.meshsolution.plot_contour(label="H", group_names="stator core") out_femm.mag.meshsolution.plot_glyph(label="H", group_names="stator winding") # + tags=[] out_femm.mag.meshsolution.plot_contour(label="B", group_names="airgap") # - # The algorithm is even capable of extracting the interface between two groups: out_femm.mag.meshsolution.plot_mesh(group_names=["stator core", "/", "airgap", "stator winding"]) # Slices through addtional axes can be obtained using [SciDataTool](https://github.com/Eomys/SciDataTool) alike call. For example, if I want the magnetic field at the second time step: out_femm.mag.meshsolution.plot_contour( "time[2]", label="H", save_path=join(save_plot_path, simu.name + "_H_time2.png"), is_show_fig=False, ) # # Extract and post-process data # # Several methods have been developed for the MeshSolution class in order to load the results regardless of the type of objects. The main method of MeshSolution is get_field. The first arguments can be written similarly to [SciDataTool](https://github.com/Eomys/SciDataTool) call. For example if I want to get the magnetic flux density at the first time step after FEMM calculations: B = out_femm.mag.meshsolution.get_field("time[0]", "indice", "component", label='B') B.shape # Another useful feature is the groups. The groups allows to define submeshes corresponding to subpart of the machine. A new MeshSolution object can be created from the group definition: group_stator = out_femm.mag.meshsolution.get_group("stator core") # Then, the magnetic solution is filtered on the selected cells (triangular elements in this case). B_s = group_stator.get_field("indice", "time", "component", label='B') H_s = group_stator.get_field("indice", "time", "component", label='H') H_s.shape # The dimension of the field has been obviously reduced to the stator number of cells. Then, operations can be performed on the solution of this group, and plotted. It is worth noting that several type of Solution objects can co-exist in the same MeshSolution object. # + from pyleecan.Classes.SolutionMat import SolutionMat w_mag = np.multiply(B_s,H_s)/2 my_vec_solution = SolutionMat( label="w_mag", type_cell="triangle", field=w_mag, axis_name=["indice", "time", "component"], axis_size = [5715, 4, 2], ) group_stator.solution.append(my_vec_solution) group_stator.plot_contour(label="w_mag") # - # Operations can also be performed on the mesh. nodes_s = group_stator.get_mesh().get_node() nodes_s.shape # Then, rotate the mesh th = np.pi/2 R = np.array([[np.cos(th), -np.sin(th)], [np.sin(th), np.cos(th)]]) nodes_s = np.dot(nodes_s, R) group_stator.mesh[0].node.coordinate = nodes_s group_stator.plot_mesh() # Previous plots still work ! group_stator.plot_contour(label="w_mag") # Thanks for following this tutorial ! :-)
Tutorials/tuto_MeshSolution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: QISKitenv # language: python # name: qiskitenv # --- # # _Scalar Chains_ # # <img src="images/line_circs_site_psi.png" alt="Scalar Chain"> # The notebook describes some applications of scalar chain. The examples with _discrete quantum walk_ on a scalar chain are discussed further: # - [Coined quantum walk](#coined_walk) – discrete quantum walk with coin # - [Staggered quantum walk](#staggered_walk) – and equivalent model with alternating steps # - [Modeling of quantum walk on scalar chain](#modeling) – Python modeling # # Scalar chain with $N$ nodes is a quantum system with basic states $|k\rangle$, $k=1,\dots,N$. # Let us denote $R$ and $L$ operators of right and left shift respectively. For an infinite chain without boundary: # # $$R : |k\rangle \to |k+1\rangle,\qquad L : |k\rangle \to |k-1\rangle.$$ # # For a cyclic boundary condition (motion on a ring) arithmetic modulo $N$ should be used instead and an alternative model with possibility of reflections is considered further. # # The model of scalar chain is convenient for description of _quantum walk_. There are two broad classes of quantum walks: discrete and continuous (_aka_ state transport). The examples below use discrete time models and _'quantum bots'_ together with _coined_ and _staggered_ discrete quantum walk – are simple illustrations of such approach. # # ### _Coined quantum walk_<a id='coined_walk'></a> # # Let us consider so-called _coined quantum walk_. The model uses (together with a chain $|k\rangle$, $k=1,\dots,N$) the special _coin_ $|c\rangle$ with two states $|0\rangle$ and $|1\rangle$ to control direction of walk. So, basic states of the model are defined as $|c\rangle|k\rangle$. # # The coin defines direction of motion using _controlled shift_ operator: # # $$S = |0\rangle\langle 0| \otimes R + |1\rangle\langle 1| \otimes L$$ # # Such a model (_aka quantum bot_) is an example of _conditional quantum dynamics_. The operator $S$ for basic states of coins $|0\rangle$ and $|1\rangle$ produces right and left shifts respectively. # <img src="images/lines_quantum_bot.png" alt="Quantum Bot"> # The coined quantum walk suggests modification of coin after each step $S$ using some operator $C$ on a two-dimensional "coin space" # # $$W = S \cdot (C \otimes {\bf 1})$$ # # to look for some analogue of classical random walk. # Simplest choise is Hadamard matrix: # # $$C_H = H = \frac{\sqrt 2}{2}\begin{pmatrix}1&1\\1&-1\end{pmatrix}.$$ # # A _balanced coin_ also may be used # # $$C_b = \frac{\sqrt 2}{2}\begin{pmatrix}1&i\\i&1\end{pmatrix}.$$ # # The model also permits to define reflection on boundaries. In such a case operator $\hat S$ should "loop" both chains with opposite directions of motion: # <img src="images/loop_quantum_bot.png" alt="Walk with Reflections"> # ### _Staggered quantum walk_<a id='staggered_walk'></a> # # The composite quantum system with coin and chain has $2\times N = 2N$ basic states $|c,k\rangle$. An equivalent model of _staggered quantum walk_ is defined on a chain with $2N$ nodes and special partitions. Let us consider for $k =1,2,\ldots$ first partition with pairs of states $(2k-1,2k)$ and the second one with $(2k,2k+1)$. # <img src="images/line_circs_part.png" alt="Staggered Walk"> # Let us denote $S_1$ and $S_2$ swaps in all pairs for first and second partition, respectively. Without taking into account boundary, the composition of alternating swaps # $S = S_1 S_2$ would produce double-step shifts of elements with even and odd indexes in opposite directions $2k \to 2k+2$ and $2k+1 \to 2k-1$. # # See <a href="https://sketchfab.com/models/2836cf8f3a284e629ccd609137c54a04/embed">animated staggered classical walk in 3D</a> for illustration of such a motion with boundary effects. # # For a quantum model states $|c,k\rangle$ are mapped into $|2k+1\rangle$. The trivial motion of basic states would be represented as $S = S_1 S_2$, where $S_1 = s(1,2)\,s(3,4)\dots$ and $S_2 = s(2,3)\,s(4,5)\dots$ are two alternating # sequences of swaps defined for given link simply as # # $$s = \begin{pmatrix}0&1\\1&0\end{pmatrix}.$$ # # Coin operator $C$ acts on pairs in first partition $C = c(1,2)\,c(3,4)\dots$ and an operator # $S'_1 = C S_1$ is used in quantum staggered walk # # $$W_S = S'_1 S_2 = (C S_1) S_2.$$ # # So, for single link a swap operator multiplied on Hadamard coin is # # $$s_c = c \, s = \frac{\sqrt 2}{2}\begin{pmatrix}1&-1\\1&1\end{pmatrix}.$$ # ### _Modeling of a quantum walk on scalar chain_<a id='modeling'></a> # # Models discussed further use one-to-one correspondence $n = N$ between qubits and nodes of chain # (or $n = 2N$ for staggered walk). # More difficult method could "expand" $n$ qubits into a scalar chain with $N = 2^n$ states, # but it should be discussed elsewhere. # Thus, the scalar quantum chain model may use standard packages such as NumPy to # compare with QISKit application for [qubit chain](qubit_chain.ipynb). # # See more in [notebook for scalar chain modeling](scalar_chain_mod.ipynb).
appendix/teach_me_qiskit_2018/state_distribution_in_qubit_chains/scalar_chain.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # # Määrämuotoisen datan käsittelyä pythonilla (excel/csv) # # Muiden nykyaikaisten ohjelmointikielien tapaan Python sisältää hyvin paljon valmiina erilaisia apukirjastoja, joita yhdistelemällä saa aikaiseksi tehokkaita toimintoja. # # Otamme esimerksi tilanteen, jossa euroopan keskuspankista ladattu euron valuuttakurssidata halutaan saada paremmin jatkokäsittelyyn sopivaan muotoon. Sivulla https://www.ecb.europa.eu/stats/policy_and_exchange_rates/euro_reference_exchange_rates/html/index.en.html löytyy data csv-muotoisena. Tässä zip-pakatussa tiedostossa on data sarakkeissa - me haluaisimme saadan datan rivi kerrallaan ja siten, että kolmikirjaimiset maakoodit ovat aakkosjärjestyksessä. # # Teemme datan lataamisen ja muokkaamisen käyttänen python3.5:n standardikirjastoja. # + deletable=true editable=true # #!/usr/bin/env python3 # otetaan käyttöön tarvittavista moduuleista olioita ja funktioita from io import BytesIO from zipfile import ZipFile from urllib.request import urlopen # ladataan datana (bytes) URL:n sisältö # tässä ei ole virhetarkastelua - ongelmatilanteissa saamme poikkeuksen urldata = urlopen('https://www.ecb.europa.eu/stats/eurofxref/eurofxref.zip').read() print( 'Datatype', type(urldata), 'size', len(urldata) ) # + deletable=true editable=true # verkosta tulleet raakatavut muutetaan olioksi, jota voi käsitellä kuten tiedostoa bytefile = BytesIO( urldata ) # tämä nyt muistissa oleva "tiedosto" avataan käsiteltäväksi zipfile-moduulin avulla zipfile = ZipFile( bytefile ) print( zipfile.filelist ) # zip-paketin sisältö # + deletable=true editable=true # varmistetaan oletuksemme ladatusta zip-paketista: assert( len( zipfile.filelist ) == 1 ) # sisällä yksi tiedosto fileobj = zipfile.filelist[0] assert( fileobj.filename == 'eurofxref.csv' ) # tiedostolla on oletettu nimi print( 'checks ok ') # + deletable=true editable=true # kerätään pakatun zip-tiedoston sisältö talteen listaksi rivejä with zipfile.open( fileobj.filename ) as infile: LINES = infile.readlines() print( LINES ) # + deletable=true editable=true # jokaisella rivillä on pilkulla eroteltuna kiinnostava data, # joten pätkitään data listaksi pilkun kohdalta. # tyyppimuunnos merkkijonoksi tuottaa pilkkomisen jälkeen myös merkkijonoja HEADER = str( LINES[0] ).split(', ') DATA = str( LINES[1] ).split(', ') print( HEADER, DATA ) # + deletable=true editable=true # ensimmäinen ja viimeinen alkio ei kiinnosta meitä, joten pätkäistään ne pois listalta HEADER = HEADER[1:-1] DATA = DATA[1:-1] print( HEADER, DATA ) # + deletable=true editable=true # data otetaan käsittelyyn liukulukuina DATA = list( map( float, DATA ) ) print( DATA ) # + deletable=true editable=true # rakennetaan DICTIONARY, jossa on jokaisen maatunnuksen avaimella talletettuna valuuttakurssi arvona RATES = dict() for country, rate in zip( HEADER, DATA ): RATES[ country ] = rate print( RATES ) # + deletable=true editable=true # haluttu tulos on nyt RATES-dictionaryn avaimet aakkosjärjestyksessä tulostettuna ja pilkulla erotettuna for country in sorted( RATES.keys() ): print( "{0},{1}".format( country, RATES[country] ) ) # + deletable=true editable=true
python35-csv-datan-kasittely.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Tracking Resource Usage with PennyLane Device Tracker # In this notebook we will explore how to use the PennyLane device tracker feature with Amazon Braket. # As is demonstrated in the [optimization of quantum circuits notebook](https://github.com/aws/amazon-braket-examples/blob/main/examples/pennylane/1_Parallelized_optimization_of_quantum_circuits.ipynb), computing gradients of quantum circuits involves multiple devices executions. # This can lead to a large number of executions when optimizing quantum circuits. # So to help users keep track of their usage, Amazon Braket works with PennyLane to record and make available useful information during the computation. # The PennyLane device resource tracker keeps a record of the usage of a device, such as numbers of circuit evaluations and shots. # Amazon Braket extends this information with task IDs and simulator duration to allow further tracking. # The device tracker can be combined with additional logic to monitor and limit resource usage on devices. # ## Setup # + import pennylane as qml from pennylane import numpy as np import time # - # Before we show the device resource tracker, let's first set up a circuit for demonstration and experimenting. # We will use a simple two qubit circuit with two parameters throughout this notebook. # We will start by evaluating it using the local Braket simulator. # + wires = 2 # Number of qubits dev = qml.device("braket.local.qubit", wires=wires) def circuit(params): qml.RX(params[0], wires=0) qml.RY(params[1], wires=1) qml.CNOT(wires=[0, 1]) return qml.expval(qml.PauliZ(1)) qnode_local = qml.QNode(circuit, dev) params = np.array([0.1, 0.2], requires_grad=True) # - # ## The PennyLane Device Resource Tracker # The PennyLane device resource tracker is a python context manager created by `qml.Tracker`. # To use the device tracker to track a single evaluation of our simple circuit, we put the evaluation inside a `with` statement block. # Only evaluations inside of the `with` block will have their usage recorded and accumulated. with qml.Tracker(dev) as tracker: print("Expectation value of circuit:", qnode_local(params)) # With this execution complete, all of the recorded information is available in `tracker`. # There are three interfaces to access the data inside of the resource tracker. # The full history of each update is available through `tracker.history`. # Numerical values are accumulated, and each of their totals are in `tracker.totals`. # And lastly, the most recent update to the tracker is kept in `tracker.latest`. # # Let's first look at the history of what was recorded by evaluating our circuit. print(tracker.history) # We can see that this evaluation lead to a single circuit execution. # This single execution had no shots, and was evaluated with a single batch of length 1. # When using the Braket local simulator, the device tracker records the unique id that the simulator assigns to new tasks. # # Next, let's evaluate the gradient of this circuit and track the device usage. # + with qml.Tracker(dev) as tracker: print("Gradient of circuit:", qml.grad(qnode_local)(params)) print(tracker.history) # - # Recall that evaluating the gradient of a quantum circuit will result in multiple circuit evaluations. # Here we see that to calculate this gradient, 5 new tasks were created and recorded in the device tracker. # For values which are numeric, such as the `executions`, the device tracker will accumulate all of the recorded data into `tracker.totals` print(tracker.totals) # Note that `None` is not numeric, which is why the shots are not accumulated in `tracker.totals` in the previous example. # If we instead compute the gradient with a finite number of shots, we will see the total shots recorded. # Let's try the gradient with `shots=100`. # Since there are 5 circuit evaluations, we should expect the total shots to be `500`. with qml.Tracker(dev) as tracker: qml.grad(qnode_local)(params, shots=100) print(tracker.totals) # We can also inspect the most recently recorded data with `tracker.latest`. # Note that not every field will be present in every update. # The latest update in this gradient calculation only included `'batches'` and `'batch_len'`. print(tracker.latest) # ## Using Amazon Braket Simulators # The Amazon Braket on-demand simulators report additional information to the device tracker. # Let's set up a remote device using Amazon Braket SV1, and create a new `QNode` to run our circuit on the on-demand simulator. # + device_arn = "arn:aws:braket:::device/quantum-simulator/amazon/sv1" dev_remote = qml.device( "braket.aws.qubit", device_arn=device_arn, wires=wires ) qnode_remote = qml.QNode(circuit, dev_remote) # - # Now we can send the same simple circuit from above to the AWS managed simulator and track the resource usage. with qml.Tracker(dev_remote) as tracker: qnode_remote(params) # Let's take a look at what new fields are available in the tracker with a managed simulator. for key in tracker.history.keys(): print(key) # The `braket_task_id` field will contain unique IDs for each task that was created. # For tasks that are created on the managed simulators, these IDs can be recorded and used to look up the task details in the AWS console. # The `braket_simulator_ms` gives the duration in milliseconds of the portion of the task spent simulating our circuit. # `braket_simulator_billed_ms` adjusts the simulation duration according to the minimum duration for billing. # See [here](https://aws.amazon.com/braket/pricing/) for pricing details on Amazon Braket. # # These reported times are not the total time for a task to complete. # We can compare the simulation time to the total task time for this circuit: with qml.Tracker(dev_remote) as tracker: t0 = time.time() qnode_remote(params) t1 = time.time() print("Remote device execution duration in seconds:",t1 - t0) print("Simulation duration in seconds:", tracker.totals["braket_simulator_ms"] / 1000) # With such a small circuit in this example, only a very small fraction of the total time is spent running the simulation of the circuit. # ## Using the Tracker to Limit Resource Usage # As we have seen above, it is possible to record usage of a device during execution with the resource tracker. # With this information available during the computation, it is possible to handle the information in order to control the resource usage. # However, this feature does not attempt to estimate how many resources may be used in the future. It is purely backward-looking. # Thus any resource usage can only be acted on after the fact with this feature. # # In the [PennyLane getting started notebook](https://github.com/aws/amazon-braket-examples/blob/main/examples/pennylane/0_Getting_started.ipynb), # we optimized a circuit by running the gradient descent optimizer for a chosen number of iterations. # Instead of a given number of iterations, we may want to limit the optimization to something else such as the total number of shots or the simulator execution time. # Here we will show two ways to use the PennyLane device tracker to limit an optimization on a resource which is being tracked. # # Let's suppose we wish to optimize our simple circuit, but want to limit ourselves to a particular number of circuit executions. # First let's set up an optimizer. opt = qml.GradientDescentOptimizer(stepsize=0.2) # Now we run the optimizer, but on every step of the loop we check if our limit has been crossed. # Once the total executions exceed our target limit, we will break out of the optimization and report our results. # + max_iterations = 100 execution_limit = 77 params = np.array([0.1, 0.2], requires_grad=True) with qml.Tracker(dev) as tracker: for i in range(max_iterations): params, cost = opt.step_and_cost(qnode_local, params) if tracker.totals["executions"] > execution_limit: break print("Completed", i + 1, "steps of optimization.") print("Optimized cost:", cost) print("Optimized parameters:", params) # - # The resource usage is compared only after each iteration of the optimization. # If each of the steps in your computation involve many circuit evaluations, # your computation may still continue for many evaluations after your limit is hit. # For our simple problem, every iteration leads to 5 circuit evaluations, so we do not stop right at our limit. # We can see this by checking the tracker totals. print(tracker.totals) # Take this into consideration when setting a threshold if you choose to limit your computation this way. # For another approach to limit resource usage, we can add a callback function to the device tracker. # This function is called each time the information in the tracker is updated. # This interface could also be used for monitoring large batches of executions. # For example, let's print out the running total number of executions every time the tracker is updated. # + def log_executions(totals, history, latest): print("Total executions:",totals["executions"]) with qml.Tracker(dev, callback=log_executions) as tracker: qml.grad(qnode_local)(params) # - # We can use the callback feature of the device tracker to stop a computation after a threshold is breached. # By throwing an exception from our callback function when the new data violates our limit, we can abort the currently running optimization. # Let's try this approach with the same optimization problem. # + params = np.array([0.1, 0.2], requires_grad=True) def resource_threshold(totals, history, latest): if totals["executions"] > execution_limit: raise ResourceWarning() with qml.Tracker(dev, callback=resource_threshold) as tracker: try: for i in range(max_iterations): params,cost = opt.step_and_cost(qnode_local, params) except ResourceWarning: print("Completed",i,"steps of optimization.") print("Optimized cost:", cost) print("Optimized parameters:", params) # - # This time one fewer optimization step is completed because the final step is aborted. # We can look at the totals to see that the execution was stopped after reaching our set limit. print(tracker.totals) # **Note** This approach will immediately terminate the optimization upon exceeding the resource limit. # If the termination happens while in the middle of a step, that step will not finish. # In this case, circuits will have been executed that will then have their results discarded. # ## Summary # In this notebook, we have seen the resource usage recorded by the PennyLane device tracker and the extended information provided by Amazon Braket. # We showed two different ways to use the resource information in the middle of the computation, and we were able to control the optimization routine to limit usage according to a preset threshold. # If you are looking to explore more, try modifying the [QAOA notebook](https://github.com/aws/amazon-braket-examples/blob/main/examples/pennylane/2_Graph_optimization_with_QAOA.ipynb) to track the use of the simulators in that example.
examples/pennylane/5_Tracking_resource_usage/5_Tracking_resource_usage.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Compression # # The purpose of this notebook is to show the typical compression ratios for text data. This can be used to estimate when serialization benefits from compression. # # + import os as _os import bz2 as _bz2 import gzip as _gzip import lzma as _lzma import random as _random import typing as _typing import requests as _requests import pandas as _pd import seaborn as _sb # Public domain copy of The Iliad on Project Gutenberg. TEXT_LINK = 'https://www.gutenberg.org/ebooks/6130.txt.utf-8' DATA_FILE = 'compression.csv' CHUNK_SIZES = 1024 CHUNK_COUNT = 1000 COMPRESS_FUNCTIONS = { 'bz2' : _bz2.compress, 'gzip' : _gzip.compress, 'lzma' : _lzma.compress, } class Result (_typing.NamedTuple): method: str original: int compressed: int # - # The computation compresses random chunks of text and returns average compressed sizes for ranges of original sizes. # def compute_results (text): results = [] for size in range (CHUNK_SIZES): offsets = [ _random.randrange (0, len (text) - size) for _ in range (CHUNK_COUNT) ] chunks = [ text [offset:offset + size].encode ('utf-8') for offset in offsets ] for comp, compress_function in COMPRESS_FUNCTIONS.items (): sizes = [ len (compress_function (chunk)) for chunk in chunks ] average = sum (sizes) / len (sizes) results.append (Result (method = comp, original = size, compressed = average)) return results # The diagram shows that compression typically starts paying off in terms of size after few hundred bytes of text. # # + # Try to load the results if already computed. # Save new results if not already computed. try: results = _pd.read_csv (DATA_FILE) except Exception: text = _requests.get (TEXT_LINK).text results = compute_results (text) results.to_csv (DATA_FILE) plottable = _pd.DataFrame (results) _sb.set () plot = _sb.relplot (data = plottable, x = 'original', y = 'compressed', hue = 'method', kind = 'line') plot.set_xlabels ('Original size [B]') plot.set_ylabels ('Compressed size [B]') plot._legend.set_title ('Method')
src/serialization-compression/python/compression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### 【Techの道も一歩から】第13回「文書や単語をどうやって表現するかコサイン類似度で学ぶ」 # # こんにちは。 DSOC R&D グループの[高橋寛治](https://8card.net/p/34779592122)です。 # # 前回のTFIDFでは使っていたのですが、触れなかった文書や単語の表現方法について述べます。 # 文書をベクトルとして表現し、実際にコサイン類似度を計算することで理解を深めます。 # # また、scikit-learnを使わずにできる限りnumpyを利用してコードを記述してみます。 # ソースコードは[こちら](https://github.com/kanjirz50/tech-blog-script)にあります。 # ### ベクトルで取り扱うと計算しやすい # 自然言語を取り扱う研究では、文書や単語といったように自然言語を何らかの単位で取り扱います。 # 自然言語処理でも活用される機械学習手法は数式で表現されるものであり、データやその演算はベクトルで取り扱われています。 # # 自然言語処理を機械学習で取り扱うために、文書や単語をベクトル化します。 # 前回紹介したTFIDFの場合は、文書を単語で構成されたベクトルとしていました。 # # ベクトル化する単語や文書とは何かを述べていきます。 # ### 単語と文書 # 「単語」と簡単に言いますが、何が単語かを厳密に定義するのは難しいです。 # 英語の場合はスペース区切りがあるため、それで十分に思うかもしれません。 # しかし活用形があったり、複合名詞があったりと何らかの定義を定めて取り扱うのは困難です。 # # 日本語をはじめとしたアジア言語の場合は単語区切りがないため、単語分割が前処理に適用されます。 # 単語を定めるのは難しく、さまざまな観点からの分割単位となっており、様々な形態素解析辞書が開発されています。 # たとえば検索向けには、再現率向上のために分割単位の細かな辞書を用い、固有表現抽出には固有名詞が大量に登録された辞書を用います。 # # 厳密な単語の定義はさておきとして、何かしらの1語を単語と呼びます。 # トークンとも呼ばれます。 # # 文書は、単語列で構成されたひとかたまりの単位となります。 # たとえばブログ記事だと1記事1文書となります。 # タスクにより文書の単位は異なりますが、単語列で構成されたものであることには変わりません。 # ### 文書や文をベクトルで表現 # 自然言語をベクトル化することで機械学習を適用します。 # # 文書や文をどうやってベクトルで表現するといいでしょうか。 # 前に説明した単語をベクトルの1要素として取り扱うことで表現します。 # 次に示す例文Aをベクトルで表現してみましょう。 # # 文A「今日 の 夕飯 は 揚げたて の 天丼 だ 。」をベクトル化します。 # ここで、単語はスペース区切りで与えられているものとします。 # # | | 今日 | の | 夕飯 | は | 揚げたて | 天丼 | だ | 。 | # |:-----------|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| # | 文A | 1 | 2 | 1 | 1 | 1 | 1 | 1 | 1 | # # このように1要素が1単語に対応します。数値は単語が何回出現したかを示します。 # 数式では$ 文A = [1, 2, 1, 1, 1, 1, 1, 1] $ のように示されます。 # # このようにベクトル化したものを **bag-of-words** と呼びます。 # 単語を袋詰めにしたというもので、語順情報が消失していますが、単語がどれだけ含まれているかということを表現するものです。 # # 語順を捨てることはやや乱暴に思えますが、取り扱いやすい表現であるため広く利用されています。 # 前の記事で紹介したTF-IDFでもbag-of-wordsを利用しています。 # # 文書の場合も同様に、ある文書に対してどれくらい単語が出現したかをbag-of-wordsで表現します。 # ### bag-of-words間のコサイン類似度の考え方 # ベクトルで表現することにより、ベクトルでの様々な手法が適用可能となります。 # 言語処理でよく利用されるベクトル間の類似度計算の手法に、コサイン類似度があります。 # # コサイン類似度とは、ベクトルのなす角が0に近づく(≒一致する)ほど値が1に近づくコサインの性質を利用して類似度を計算する手法です。 # コサイン類似度は次の式で示されます。 # # $$ cos(A, B) = \frac{A \cdot B}{|A||B|}$$ # # 分母はAとBの大きさの積、分子はAとBの内積をとります。 # ここで、各ベクトルを正規化することで分母は1となります。 # すなわち正規化(ノルムを1に)したそれぞれのベクトルの内積をとるだけとなります。 # # やや回りくどいですが、正規化したベクトルの大きさが1となることを確認します。 # + import numpy as np # 適当なベクトルを作る a1 = np.array([1, 0, 2, 3]) # ベクトルの正規化のためのノルムを算出 a1_norm = np.linalg.norm(a1) # ベクトルの正規化 np.linalg.norm(a1 / a1_norm) # - # ### 実際の文書でコサイン類似度を計算 # 実際の文書に適用してみましょう。 # 名詞のみを対象として、コサイン類似度を計算します。 # #### 入力文書の単語分割 # 形態素解析にはPurePythonのjanomeを利用します。 # + import glob import numpy as np from scipy import sparse from scipy.sparse import linalg as spsolve from janome.analyzer import Analyzer from janome.tokenizer import Tokenizer from janome.tokenfilter import POSKeepFilter, CompoundNounFilter np.set_printoptions(formatter={'float': '{: 0.3f}'.format}) # 複合名詞は複合名詞化し、名詞のみを抽出する a = Analyzer(token_filters=[CompoundNounFilter(), POSKeepFilter("名詞")]) docs = [] for f in glob.glob("../tfidf/docs/*.txt"): with open(f, "r", encoding="utf-8") as fin: doc = [] for line in fin: line = line.strip() if not line: continue doc.append(" ".join([tok.surface for tok in a.analyze(line)])) docs.append(" ".join(doc)) # - # #### 単語列からBag-of-words表現を取得 # スペース区切りの文のリストを引数にとり、bag-of-wordsに変換するクラスを作ります。 # # スペース区切りの文例 # > ["Python requestsモジュール 文字コード対策 編集 Webスクレイピング", "..."] # + from collections import defaultdict, Counter class CountVectorizer: def __init__(self): self.vocablary = defaultdict(lambda: len(self.vocablary)) def fit(self, X): for words in self.__iter_words(X): [self.vocablary[word] for word in words] return self def transform(self, X): s = sparse.dok_matrix((len(X), len(self.vocablary)), dtype=np.uint8) for i, words in enumerate(self.__iter_words(X)): v = Counter([self.vocablary[word] for word in words]) for k, freq in v.items(): s[i, k] = freq return s def fit_transform(self, X, y=None): return self.fit(X).transform(X) def __iter_words(self, docs): for doc in docs: yield doc.split(" ") # - # `docs` を実際にベクトル化します。 count_vectorizer = CountVectorizer() vecs = count_vectorizer.fit_transform(docs) # #### ベクトルの正規化処理 # 計算を簡単化する正規化処理も実装してみましょう。 def normalize(v): # 各ベクトルで正規化。スライスにより除算を適用可能な形に変形。 return v / spsolve.norm(v, axis=1)[:, np.newaxis] # #### コサイン類似度の計算 # 正規化しているため、ベクトル間の内積をとるだけとなります。 # # $$ cos(A, B) = A \cdot B $$ normalized_vecs = normalize(vecs) cossim = np.dot(normalized_vecs, normalized_vecs.T) # 計算があっているかどうかを確かめるために、対角成分が1になっているか、numpy.diag を用いて確認します。 (厳密には誤差があるため、1に限りなく近い値となります。) print(np.diag(cossim)) # 問題ないようです。 今回は入力文書数も少ないため、類似度すべてを表示してみましょう。 n行目m列のベクトルは、文書nに対する文書mの類似度を表します( n,m∈文書数 ) print(cossim) # 文書0のそれぞれの文書に対するコサイン類似度を確認します。 print(cossim[0]) # 文書1、文書7が大きいことがわかります。 # `numpy.argsort` を利用してコサイン類似度の降順に文書番号を取得します。 # `numpy.argsort` には並び替えのオプションがないため、正負を反転させた値を与えます。 print(np.argsort(-cossim[0])) # 実際の文書を見て、類似度がどのようなものか確認しましょう。 # 入力文書と類似度の高い文書、低い文書をそれぞれ300文字まで表示します。 # 前処理後の文書ですので、名詞の分かち書きとなります。 # 入力文書 docs[0][:300] # 最も類似度が高い文書 docs[1][:300] # 類似度が一番低い文書 docs[5][:300] # 類似度の高い文書は、Pythonに関するテーマを述べているため合っているように見えます。 # 類似度が一番低い文書は、勉強会登壇の話でPythonというテーマではあるものの違う文書ということで良さそうです。 # 簡単な方法ですが、それらしい類似度計算ができていることがわかります。 # ### 古典的な理論を実装して確かめる # 文や文書をベクトル化し、その恩恵を実装して確かめました。 # コサイン類似度による類似度計算は非常に簡単ですが強力です。 # また、ベクトルとして文書を取り扱ういい練習となると思います。 # # 説明するほど理解できていないトピックモデルをがんばって勉強して、そのうち記事を書きたいと思います。 # 参考文献 # - 言語処理のための機械学習入門
bag_of_words/cosine_similarity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=false editable=false nbgrader={"checksum": "cef6bb2fa2f9bf18a3ede5d2a094b4ad", "grade": false, "grade_id": "cell-dc148b069032ac72", "locked": true, "schema_version": 1, "solution": false} # # SLU17 - Data Sufficiency and Selection # # In this notebook you will cover the following: # # - Feature Importance # - Single Factor Analysis # - Learning Curves # + deletable=false editable=false nbgrader={"checksum": "5bf98464b6fc4d66fe64493fd0e92af3", "grade": false, "grade_id": "cell-73d0f28288cec5d9", "locked": true, "schema_version": 1, "solution": false} import math import pandas as pd import numpy as np import sklearn from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split, learning_curve from sklearn.metrics import roc_auc_score from matplotlib import pyplot as plt from base64 import b64encode, b64decode # just for grading purposes # %matplotlib inline # + deletable=false editable=false nbgrader={"checksum": "30e6205da1ea3f9c8fb0ff95b860508e", "grade": false, "grade_id": "cell-e47d27916830bf56", "locked": true, "schema_version": 1, "solution": false} # load up a classification dataset X = pd.read_csv('data/exercise_X.csv') y = pd.read_csv('data/exercise_y.csv')['label'] # give X a quick look X.head() # + deletable=false editable=false nbgrader={"checksum": "1a34048e1f88a78cad22634f4219bd81", "grade": false, "grade_id": "cell-a7ff45300c5ff1f4", "locked": true, "schema_version": 1, "solution": false} # looks like a balanced binary target y.value_counts() # + [markdown] deletable=false editable=false nbgrader={"checksum": "ea6e38d659d3c7ea6f1203a502821b69", "grade": false, "grade_id": "cell-bd5e4f00e0c6d410", "locked": true, "schema_version": 1, "solution": false} # # Find the first obviously useless feature # # Can you determine which of the features contains all uniques and therefore cannot have any predictive power? # - # Use this cell to determine which of the features serves as a categorical # feature and contains all uniques # + deletable=false nbgrader={"checksum": "e73a5de4aede79c69018b84492e19258", "grade": false, "grade_id": "cell-a584b7a530c36ed0", "locked": false, "schema_version": 1, "solution": true} # set the variable feature_all_unique to the name of the feature # that contains all uniques feature_all_unique = None # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"checksum": "8378556f466cd04e05ff651eb126e539", "grade": true, "grade_id": "cell-805f4867b353fe96", "locked": true, "points": 2, "schema_version": 1, "solution": false} ### BEGIN TESTS assert b64encode(feature_all_unique.encode()) == b'aWQ=' ### END TESTS # + [markdown] deletable=false editable=false nbgrader={"checksum": "646fb4d725a6bf431f8f4869deac0194", "grade": false, "grade_id": "cell-602ec15412879335", "locked": true, "schema_version": 1, "solution": false} # # Find the second obviously useless feature # # This one doesn't contain all uniques but based upon some Single Factor Analysis you should be able to determine which feature isn't worth # bothering with. # - # use this cell to do some more SFA on other features to determine # which of them is useless # + deletable=false nbgrader={"checksum": "bf348c96362a983d55ee71535f699338", "grade": false, "grade_id": "cell-f99bab2575793069", "locked": false, "schema_version": 1, "solution": true} # Use this cell to determine the other obviously useless feature other_useless_feature = None # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"checksum": "41be55b74325839dbaa9bda38f2658e0", "grade": true, "grade_id": "cell-6964c9cd17f443b8", "locked": true, "points": 2, "schema_version": 1, "solution": false} ### BEGIN TESTS assert b64encode(other_useless_feature.encode()) == b'ZmVhdHVyZV84' ### END TESTS # + deletable=false nbgrader={"checksum": "e76f06c30f3783db0c7ab26b16d16037", "grade": false, "grade_id": "cell-949c10443e311151", "locked": false, "schema_version": 1, "solution": true} # now drop the features that you determined to be useless # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"checksum": "1e94d50d525aef8425136b3fac304397", "grade": true, "grade_id": "cell-476d75f82723fb0c", "locked": true, "points": 2, "schema_version": 1, "solution": false} ### BEGIN TESTS assert b64encode(str(sorted(X.columns)).encode()) == b'<KEY> ### END TESTS # + [markdown] deletable=false editable=false nbgrader={"checksum": "fd0a1284d6d8056e2011932a0963c865", "grade": false, "grade_id": "cell-d29f316d3f5c61e1", "locked": true, "schema_version": 1, "solution": false} # # Find the rest of the useless features # # Single Factor Analysis isn't likely to do much in helping us to determine # which of the rest of the features are useless. We'll need to use feature_importances in order to find the rest of these bad boys # + deletable=false nbgrader={"checksum": "57162a9eab6b89dc4063a580f37d4aa2", "grade": false, "grade_id": "cell-2f4e8d74d1f01c95", "locked": false, "schema_version": 1, "solution": true} # in order to use feature importances you will need to import a tree # based algorithm. Let's use a decision tree classifier # import your decision tree classifier here the exact same way that # it is done in the learning notebooks # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"checksum": "207e8cc2ada690189494acaead901696", "grade": true, "grade_id": "cell-cbc4c686952738f8", "locked": true, "points": 2, "schema_version": 1, "solution": false} ### BEGIN TESTS assert DecisionTreeClassifier, 'Import that bad boy from the sklearn.tree module' ### END TESTS # + deletable=false nbgrader={"checksum": "c0470e440a9b9f3ef12e879348be18a5", "grade": false, "grade_id": "cell-6fbdfd50396e632f", "locked": false, "schema_version": 1, "solution": true} # Now let's train the classifier and get the feature importances # Create your classifier, assign it to the clf variable and then # train it on X and y clf = None # once the classifier is trained, set the feature importances here # make it a pandas series with the index being the column names # so that we can visualize the results feature_importances = None # set the random_state=1 and max_depth=5 or the tests won't pass! # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"checksum": "113b9a32590af28b5070f40a82943233", "grade": true, "grade_id": "cell-ca84ae67d444979f", "locked": true, "points": 4, "schema_version": 1, "solution": false} ### BEGIN TESTS assert isinstance(clf, sklearn.tree.DecisionTreeClassifier), 'The classifier must be a DecisionTreeClassifier' assert clf.random_state == 1, 'random_state must be 1' assert clf.max_depth == 5, 'max_depth must be 5' assert feature_importances.equals(pd.Series(clf.feature_importances_, index=X.columns)), 'feature_importances must be a pandas series with the X columns as the index' ### END TESTS # + deletable=false editable=false nbgrader={"checksum": "85eba0812c4f246803c1e22c2a6a4b29", "grade": false, "grade_id": "cell-662fccd5e1566e6a", "locked": true, "schema_version": 1, "solution": false} feature_importances.plot.barh(); # + deletable=false nbgrader={"checksum": "41aa8d671e7cb66a2b0bcdeb86046277", "grade": false, "grade_id": "cell-0dc20755ad6d11ce", "locked": false, "schema_version": 1, "solution": true} # now remove 3 additional useless features # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"checksum": "4d165a440daeb830f6dfcf91a342c330", "grade": true, "grade_id": "cell-47bb1f540f749a88", "locked": true, "points": 2, "schema_version": 1, "solution": false} ### BEGIN TESTS assert b64encode(str(sorted(X.columns)).encode()) == b'WydmZWF0dXJlXzInLCAnZmVhdHVyZV8zJywgJ2ZlYXR1cmVfNCcsICdmZWF0dXJlXzYnLCAnZmVhdHVyZV83J10=' ### END TESTS # + [markdown] deletable=false editable=false nbgrader={"checksum": "3b5f48d5aeb9a90f048e029531b64f7d", "grade": false, "grade_id": "cell-168fac9d07e35232", "locked": true, "schema_version": 1, "solution": false} # # The learning curve # # Okay now that we have gotten rid of all those useless features, let's focus on getting a sense for how much data we need in order to have # reasonable performance. # + deletable=false nbgrader={"checksum": "a588e65365e234102cc9ac7ac3b9345e", "grade": false, "grade_id": "cell-f22e3c91b478ba32", "locked": false, "schema_version": 1, "solution": true} # Now create a dataframe that has a single feature that is the # cross validation score in order to help us understand # how increasing amounts of data affect the performance # HINT: just use the snippet from the Learning Notebook train_scores_mean = None test_scores_mean = None # instantiate a classifier that you will inspect the learning rate of clf = DecisionTreeClassifier(max_depth=5, random_state=1) # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"checksum": "7d76dadb5cd5050ecd783ed187fe2f96", "grade": true, "grade_id": "cell-d1bded5a2e391786", "locked": true, "points": 4, "schema_version": 1, "solution": false} ### BEGIN TESTS # round in order to compensate for implementation details assert math.isclose(sum(train_scores_mean), 4.5, rel_tol=1e-2) assert math.isclose(sum(test_scores_mean), 3.8, rel_tol=1e-2) ### END TESTS # + deletable=false editable=false nbgrader={"checksum": "77d8574b36cf0e2bf33d09c8f8ea5ee7", "grade": false, "grade_id": "cell-b91ef37925959db9", "locked": true, "schema_version": 1, "solution": false} learning_curve_df = pd.DataFrame({ 'Training Scores': train_scores_mean, 'Test Set scores': test_scores_mean }, index=train_sizes) learning_curve_df.plot.line( title='Decision Tree Learning Curve' ); # + deletable=false nbgrader={"checksum": "e39403a6a366d8dd269e73f76977575e", "grade": false, "grade_id": "cell-fc4e8393bc289ff2", "locked": false, "schema_version": 1, "solution": true} # Now select the minimum training set size that this particular classifier # seems to need before it's learning rate stabilizes min_train_set_size = None # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"checksum": "703dc8ce056599c4c7759295413a2d2f", "grade": true, "grade_id": "cell-1669e239252c8492", "locked": true, "points": 2, "schema_version": 1, "solution": false} ### BEGIN TESTS assert min_train_set_size <= int(b64decode(b'MzAw')) ### END TESTS
units/SLU17_Data_Sufficiency_And_Selection/Exercise notebook - SLU17 (Data Sufficiency and Selection).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Plano de negócio # # - Saída: # - Uma tabela com as informações dos livros # # - Processo: A sequência de passos organizada pela lógica de execução. # - Analisar o HTML da página # - Pesquisar melhor forma de realizar a extração de dados # - Coleta os dados seguintes dados categoria| nome_livro|avaliação_consumidor|estoque|preço # - Limpeza dos dados # # # - Entrada: # # 1. Fonte de dados # # - Site da Book to Scrape: https://books.toscrape.com # # # 2. Ferramentas # # - Python 3.8.0 # - Bibliotecas de Webscrapping (BS4, Selenium) # - Jupyter Notebooks (Análises e Prototipagem) # # # # 0.0 Imports # + import re import requests import pandas as pd import numpy as np import seaborn as sns from datetime import datetime from bs4 import BeautifulSoup from IPython.core.display import HTML from IPython.display import Image from matplotlib import pyplot as plt # - # # 0.1 Helpe Functions def jupyter_settings(): # %matplotlib inline # %pylab inline plt.style.use('bmh') plt.rcParams['figure.figsize']=[20,10] plt.rcParams['font.size']=10 display( HTML('<style>.container {width:100% !important; }</style>')) pd.options.display.max_columns = None pd.options.display.max_rows = None #pd.set_options('display.expand_frame_repr',False ) sns.set() # + jupyter_settings() warnings.filterwarnings ('ignore') # - # # 1.0 Data Collect # + book_title = [] book_price = [] book_stock = [] book_rating = [] book_category = [] quantidade = 0 for page in range(1,51): #Get webpage data root_url = 'https://books.toscrape.com/catalogue/' url = 'https://books.toscrape.com/catalogue/page-{}.html'.format(page) headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5),AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'} response = requests.get(url,headers=headers) # Make Soup soup = BeautifulSoup(response.text,'html.parser') book = soup.find_all('article', class_='product_pod') for book in book: # Get Hyper Link base_url = book.h3.a['href'].strip() url = root_url + base_url page_single = requests.get(url) soup_single = BeautifulSoup(page_single.text,'html.parser') book_single = soup_single.find('article', class_='product_page') info_book = book_single.find('div', class_='product_main') #Scraping Data title = info_book.h1.get_text().strip() price = info_book.find('p', class_='price_color').get_text() stock = info_book.find('p', class_='availability').get_text().split() rating = info_book.find('p','star-rating')['class'][1].strip() category = soup_single.find('ul', class_='breadcrumb').find_all('li')[2].a.get_text() #clean data stock_clean = re.findall(r"\d+", stock)[0] #rating_clean = clean.str_to_int(rating) book_title.append(title) book_price.append(price) book_stock.append(stock_clean) book_rating.append(rating) book_category.append(category) quantidade = quantidade +1 print(quantidade) # + data = pd.DataFrame({'book_category': book_category, 'book_title': book_title, 'book_price': book_price, 'book_stock': book_stock, 'book_rating': book_rating}) # + data = data.loc[((data['book_category'] == 'Classics') | (data['book_category'] == 'Science Fiction') |(data['book_category'] == 'Humor')|(data['book_category'] == 'Business'))] # - from datetime import datetime #scrapy datetime data.insert(1, 'scrapy_datetime',(datetime.now().strftime('%Y-%m-%d %H:%M:%S')),allow_duplicates=False) data.head() data.to_csv('../data/dataset_v1.csv',index=False) # # 2.0 Clean Data data= pd.read_csv('../data/dataset_v1.csv') data.sample(5) # + ## book category data['book_category'] = data['book_category'].apply(lambda x: x.lower()) # product price data['book_price']= data['book_price'].apply(lambda x: x.replace('£','') if pd.notnull(x) else x).astype(float) # book rating data['book_rating'] = data['book_rating'].apply(lambda x: x.lower()) # book stock regex ='\W((.+?),(.+?)),' data['book_stock']= data['book_stock'].apply(lambda x: re.match(regex, x).group(1)) data['book_stock']= data['book_stock'].apply(lambda x: x.strip("'")) data['book_stock']= data['book_stock'].apply(lambda x: x.replace("'" , "").replace("," , "")) data.sample(5) # - data.to_csv('../data/dataset_v2.csv',index=False)
notebooks/ex_005.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Experiment 1 - Memory efficiency of commSignal # Refs: [1] https://ipython-books.github.io/45-understanding-the-internals-of-numpy-to-avoid-unnecessary-array-copying/ # The goal of this experiment is to verify that the commSignal object is memory efficient. Inefficiencies may occur when the internal signal storage is copied everytime it is being used. To verify that the same array is being used the following functions are defined, courtesy of [1] # + import numpy as np def get_data_base(arr): """For a given NumPy array, find the base array that owns the actual data.""" base = arr while isinstance(base.base, np.ndarray): base = base.base return base def arrays_share_data(x, y): return get_data_base(x) is get_data_base(y) a = np.array(range(100)) print(arrays_share_data(a, a.copy())) print(arrays_share_data(a, a[:1])) # - # In the constructor we expect the signal to be copied so as to preserve a copy within the object, the following result shows this is true. # + class test: def __init__(self, sig): self.__sig = np.array(sig) print(arrays_share_data(self.__sig, sig)) test(a) # - # Next we test what happens when we return the signal, is it copied then aswell? We see that it isn't being copied again. This is what we want, everytime we return the signal we dont want it to be copied again and again, which will lead to unnecessary delays # + class test: def __init__(self, sig): self.__sig = np.array(sig) print(arrays_share_data(self.__sig, self.getsig)) print(arrays_share_data(self.getgetsig, self.getsig)) @property def getsig(self): return self.__sig @property def getgetsig(self): return self.getsig test(a) # - # ### Conclusions # We conclude that when we return the signal array, it is not being copied again hence the object is memory efficient.
experiments/Experiment 1 - Memory efficiency of commSignal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # An introduction to solving biological problems with Python # # ## Session 1.4: Collections Sets and dictionaries # # - [Sets](#Sets) | [Exercise 1.4.1](#Exercise-1.4.1) # - [Dictionaries](#Dictionaries) | [Exercises 1.4.2](#Exercises-1.4.2) # - # ## Sets # # - Sets contain unique elements, i.e. no repeats are allowed # - The elements in a set do not have an order # - Sets cannot contain elements which can be internally modified (e.g. lists and dictionaries) l = [1, 2, 3, 2, 3] # list of 5 values s = set(l) # set of 3 unique values print(s) e = set() # empty set print(e) # Sets are very similar to lists and tuples and you can use many of the same operators and functions, except they are **inherently unordered**, so they don't have an index, and can only contain _unique_ values, so adding a value already in the set will have no effect s = set([1, 2, 3, 2, 3]) print(s) print("number in set:", len(s)) s.add(4) print(s) s.add(3) print(s) # You can remove specific elements from the set. s = set([1, 2, 3, 2, 3]) print(s) s.remove(3) print(s) # You can do all the expected logical operations on sets, such as taking the union or intersection of 2 sets with the <tt>|</tt> _or_ and <tt>&</tt> _and_ operators # + s1 = set([2, 4, 6, 8, 10]) s2 = set([4, 5, 6, 7]) print("Union:", s1 | s2) print("Intersection:", s1 & s2) # - # ## Exercise 1.4.1 # # 1. Given the protein sequence "MPISEPTFFEIF", split the sequence into its component amino acid codes and use a set to establish the unique amino acids in the protein and print out the result. # ## Dictionaries # # Lists are useful in many contexts, but often we have some data that has no inherent order and that we want to access by some useful name rather than an index. For example, as a result of some experiment we may have a set of genes and corresponding expression values. We could put the expression values in a list, but then we'd have to remember which index in the list corresponded to which gene and this would quickly get complicated. # # For these situations a _dictionary_ is a very useful data structure. # # Dictionaries: # # - Contain a mapping of keys to values (like a word and its corresponding definition in a dictionary) # - The keys of a dictionary are unique, i.e. they cannot repeat # - The values of a dictionary can be of any data type # - The keys of a dictionary cannot be an internally modifiable type (e.g. lists, but you can use tuples) # - Dictionaries do not store data in any particular order dna = {"A": "Adenine", "C": "Cytosine", "G": "Guanine", "T": "Thymine"} print(dna) # You can access values in a dictionary using the key inside square brackets dna = {"A": "Adenine", "C": "Cytosine", "G": "Guanine", "T": "Thymine"} print("A represents", dna["A"]) print("G represents", dna["G"]) # An error is triggered if a key is absent from the dictionary: dna = {"A": "Adenine", "C": "Cytosine", "G": "Guanine", "T": "Thymine"} print("What about N?", dna["N"]) # You can access values safely with the <tt>get</tt> method, which gives back <tt>None</tt> if the key is absent and you can also supply a default values dna = {"A": "Adenine", "C": "Cytosine", "G": "Guanine", "T": "Thymine"} print("What about N?", dna.get("N")) print("With a default value:", dna.get("N", "unknown")) # You can check if a key is in a dictionary with the <tt>in</tt> operator, and you can negate this with <tt>not</tt> dna = {"A": "Adenine", "C": "Cytosine", "G": "Guanine", "T": "Thymine"} "T" in dna dna = {"A": "Adenine", "C": "Cytosine", "G": "Guanine", "T": "Thymine"} "Y" not in dna # The <tt>len()</tt> function gives back the number of (key, value) pairs in the dictionary: dna = {"A": "Adenine", "C": "Cytosine", "G": "Guanine", "T": "Thymine"} print(len(dna)) # You can introduce new entries in the dictionary by assigning a value with a new key: dna = {"A": "Adenine", "C": "Cytosine", "G": "Guanine", "T": "Thymine"} dna['Y'] = 'Pyrimidine' print(dna) # You can change the value for an existing key by reassigning it: dna = {'A': 'Adenine', 'C': 'Cytosine', 'T': 'Thymine', 'G': 'Guanine', 'Y': 'Pyrimidine'} dna['Y'] = 'Cytosine or Thymine' print(dna) # You can delete entries from the dictionary: dna = {'A': 'Adenine', 'C': 'Cytosine', 'T': 'Thymine', 'G': 'Guanine', 'Y': 'Pyrimidine'} del dna['Y'] print(dna) # You can get a list of all the keys (in arbitrary order) using the inbuilt <tt>.keys()</tt> function dna = {'A': 'Adenine', 'C': 'Cytosine', 'T': 'Thymine', 'G': 'Guanine', 'Y': 'Pyrimidine'} print(list(dna.keys())) # And equivalently get a list of the values: dna = {'A': 'Adenine', 'C': 'Cytosine', 'T': 'Thymine', 'G': 'Guanine', 'Y': 'Pyrimidine'} print(list(dna.values())) # And a list of tuples containing (key, value) pairs: dna = {'A': 'Adenine', 'C': 'Cytosine', 'T': 'Thymine', 'G': 'Guanine', 'Y': 'Pyrimidine'} print(list(dna.items())) # ## Exercises 1.4.2 # # 1. Print out the names of the amino acids that would be produced by the DNA sequence "GTT GCA CCA CAA CCG" ([See the DNA codon table](https://en.wikipedia.org/wiki/DNA_codon_table)). Split this string into the individual codons and then use a dictionary to map between codon sequences and the amino acids they encode. # 2. Print each codon and its corresponding amino acid. # 3. Why couldn't we build a dictionary where the keys are names of amino acids and the values are the DNA codons? # # ### Advanced exercise 1.4.3 # # - Starting with an empty dictionary, count the abundance of different residue types present in the 1-letter lysozyme protein sequence (http://www.uniprot.org/uniprot/B2R4C5.fasta) and print the results to the screen in alphabetical key order. # ## Congratulation! You reached the end of day 1! # # Go to our next notebook: [python_basic_2_intro](python_basic_2_intro.ipynb)
python_basic_1_4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Spectral Sampling with pyrff # # [Pyrff](https://github.com/michaelosthege/pyrff) is a library that implements spectral sampling. I want to see if I can get it to work and if it is reasonably accurate and fast. # %load_ext autoreload # %autoreload 2 from summit.utils.models import GPyModel import matplotlib.pyplot as plt import numpy as np # ## Simple Example # # Let's first try to approximate a simple sinusoidal function in 1D. # + n_samples=20 random_seed=100 rng = np.random.default_rng(random_seed) def observed(X, noise_level=0.05): noise = rng.standard_normal([X.shape[0],1])*noise_level return np.sin(X) + noise X = rng.uniform(-3.,3.,(n_samples,1)) Y = observed(X) X_true = rng.uniform(-3.,3.,(1000,1)) X_true = np.sort(X_true, axis=0) Y_true = np.sin(X_true) #Fit model m = GPyModel(input_dim=1) m.fit(X, Y) sampled_f = m.spectral_sample(X, Y) predict_Y = m.predict(X) sample_Y = sampled_f(X) # - # I noticed that I can run the spectral sampling with the same inputs multiple times, and I find that the algorithm will sometimes converge and sometimes not. # # I noticed this in my version of spectral sampling as well, and I think it is an issue with having points in the input or output space that are too close together. # + def closest_point_index(design_point, candidate_matrix): '''Return the index of the closest point in the candidate matrix''' distances = _design_distances(design_point, candidate_matrix) return np.argmin(np.atleast_2d(distances)) def closest_distance(design_point, candidate_matrix): '''Return the index of the closest point in the candidate matrix''' distances = _design_distances(design_point, candidate_matrix) return np.min(np.atleast_2d(distances)) def _design_distances(design_point,candidate_matrix): ''' Return the distances between a design_point and all candidates''' diff = design_point - candidate_matrix squared = np.power(diff, 2) summed = np.sum(squared, axis=1) root_square = np.sqrt(summed) return root_square # + distances = numpy.zeros(X.shape[0]) for i, x in enumerate(X): mask = np.ones(X.shape[0], dtype=bool) mask[i] = False d = closest_distance(x, X[mask,:]) distances[i] = d np.min(distances) # - # X, Y, predict_Y, sample_Y = fit_and_sample() fig, ax = plt.subplots(1) ax.scatter(X[:,0], predict_Y[:, 0], marker= 'o', label='GP Prediction') ax.scatter(X[:,0], sample_Y, marker='^', label= 'Spectral Sample') ax.plot(X_true[:,0], Y_true[:,0], label='True') ax.legend() ax.tick_params(direction='in') ax.set_xlabel('X') _ = ax.set_ylabel('Y') # ## Test points at different distances # + def observed(X, noise_level=0.05): noise = rng.standard_normal([X.shape[0],1])*noise_level return np.sin(X) + noise def gen_noisy_data(num_points=20, spacing=0.01): upper = -3 + num_points*spacing x = np.linspace(-3,upper,num_points) x = np.atleast_2d(x).T y = observed(X) return x, y # - m = GPyModel(input_dim=1) spacings = [1e-5, 1e-4, 1e-3, 1e-2, 0.1] inputs = [] outputs = [] sampled_fs = [] n_repeats = 20 for s in spacings: X, y = gen_noisy_data(spacing=s) m.fit(X, Y) repeats = [] for i in range(n_repeats): try: f = m.spectral_sample(X, Y) repeats.append(f) except np.linalg.LinAlgError as e: repeats.append(e) sampled_fs.append(repeats) inputs.append(X) outputs.append(y) num_errors = 0 for sample in sampled_fs: for r in sample: if not callable(r): num_errors += 1 num_errors
experiments/tsemo/faster_spectral_sampling.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.6.0 # language: julia # name: julia-1.6 # --- # # Bayesian filtering for nonlinear Gaussian dynamical systems # # <NAME> | Last update: 09-12-2021 # Consider a discrete-time dynamic system with observations $y_k \in \mathbb{R}^{d_y}$ and latent states $x_k \in \mathbb{R}^{d_x}$. These are subject to the following dynamics: # # $$\begin{align} # x_k =&\ f(x_{k-1}) + q_{k-1} \\ # y_k =&\ h(x_k) + r_{k} \, , # \end{align}$$ # # where $f$ and $h$ are static nonlinear functions, $q_{k-1}$ represents process noise and $r_{k}$ is measurement noise. Both sources of noise are Gaussian distributed with zero mean: # # $$q_{k-1} \sim \mathcal{N}(0, Q) \, , \qquad r_k \sim \mathcal{N}(0, R) \, ,$$ # # with covariance matrices $Q$ and $R$. # ## Problem: tracking a noisy pendulum # We will be tracking a pendulum whose observations are noisy. You can find this example in [Bayesian Filtering & Smoothing](https://www.cambridge.org/core/books/bayesian-filtering-and-smoothing/C372FB31C5D9A100F8476C1B23721A67) by <NAME> (Ex 3.7). The state transition has the following form: # # $$\begin{align*} # \underbrace{\begin{bmatrix} x_{1,k} \\ x_{2,k} \end{bmatrix}}_{x_k} = \underbrace{\begin{bmatrix} x_{1,k-1} + x_{2,k-1} \Delta t \\ x_{2,k-1} -G \sin(x_{1,k-1}) \Delta t \end{bmatrix}}_{f(x_{k-1})} + q_{k-1} # \end{align*}$$ # # where $x_1$ represents the angle of the pendulum, $x_2$ represents the change in angle and $G$ is gravitational acceleration. # # The measurement function is described as: # # $$\begin{align*} # y_k = \underbrace{\sin(x_{1,k})}_{h(x_k)} + r_k # \end{align*}$$ # # We cannot observe the change in angle directly, only the height of the angle itself (i.e. $\sin(x_{1})$). # #### Data-generating process using LinearAlgebra using ProgressMeter using Plots pyplot(); # + # Length of time-series T = 400 Δt = 0.01 # Gravitational acceleration G = 9.81 # Process noise covariance qc = 1e-3 Q = [qc*Δt^3/3 qc*Δt^2/2; qc*Δt^2/2 qc*Δt] # Measurement noise variance R = 1e-2 # Initial states x0 = [1.0, 0.0] # Initialize data array states = zeros(2,T) observations = zeros(T,) # Initialize previous state variable prev_state = x0 for k = 1:T # State transition states[1,k] = prev_state[1] + prev_state[2]*Δt states[2,k] = prev_state[2] - G*sin(prev_state[1])*Δt # Add process noise states[:,k] = states[:,k] + cholesky(Q).L*randn(2) # Observation with added measurement noise observations[k] = sin(states[1,k]) + sqrt(R)*randn(1)[1] # Update "previous state" prev_state = states[:,k] end # - # Inspect data plot((1:T).*Δt, states[1,:], linewidth=3, color="blue", xlabel="time [Δt = "*string(Δt)*"]", ylabel="height pendulum", label="states", size=(800,400)) scatter!((1:T).*Δt, observations, color="black", markersize=2,label="observed") # ### Model specification # # We can convert the state transition and observation likelihood from the system dynamics to probabilistic form by integrating out the noise variables: # # $$\begin{align} # x_k \sim&\ \mathcal{N}(f(x_{k-1}), Q) \\ # y_k \sim&\ \mathcal{N}(g(x_k), R) \, . # \end{align}$$ # # The noise covariance matrices are now the covariance matrices of the stochastic state transition and likelihood. In both cases, we will use an unscented transform to approximate the result of applying the nonlinear function with a Gaussian distribution. # # We will need priors for the unknown variables. We will be doing recursive estimation (filtering) in this notebook, we need only define a prior for the "previous state" variable $x_{k-1}$: # # $$\begin{align} # x_{k-1} \sim \mathcal{N}(m_k, W_k^{-1}) \, , # \end{align}$$ # # where $W_k$ is a precision matrix. In Bayesian inference, we often work with precision matrices instead of covariance matrices as that tends to require less matrix inversions. We will model the unknown noise precision matrices with a gamma distribution (univariate measurement noise) and a Wishart distribution (multivariate process noise). These are the conditionally [conjugate priors](https://en.wikipedia.org/wiki/Conjugate_prior) for Gaussian likelihoods: # # $$\begin{align} # Q^{-1} \sim&\ \mathcal{W}(V_Q, n_Q) \\ # R^{-1} \sim&\ \Gamma(a_r, b_r) \, . # \end{align}$$ # # With a likelihood and a set of priors, we can specify a model in a probabilistic programming language (here [ForneyLab.jl](https://github.com/biaslab/ForneyLab.jl)) and automatically infer (approximate) posteriors. using ForneyLab # + fg = FactorGraph() # Measurement noise precision @RV Ri ~ Gamma(placeholder(:a_R), placeholder(:b_R)) # Process noise precision @RV Qi ~ Wishart(placeholder(:V_Q, dims=(2,2)), placeholder(:n_Q)) # Previous state vector @RV x_kmin1 ~ GaussianMeanPrecision(placeholder(:m_x, dims=(2,)), placeholder(:W_x, dims=(2,2))) # Nonlinear state transition function f(x) = [x[1] + x[2]*Δt, x[2] - G*sin(x[1])*Δt] @RV fx_k ~ Nonlinear{Unscented}(x_kmin1; g=f) # Add process noise @RV x_k ~ GaussianMeanPrecision(fx_k, Qi) # Mask the state vector @RV x_1 = dot([1., 0.], x_k) # Nonlinear observation function h(x) = sin(x) @RV hx_k ~ Nonlinear{Unscented}(x_1; g=h) # Add measurement noise @RV y_k ~ GaussianMeanPrecision(hx_k, Ri) # Indicate y_k will be observed placeholder(y_k, :y_k); ForneyLab.draw(fg) # - # ### Compile inference algorithm # # We cannot use exact Bayesian inference here and will therefore switch to an approximate inference scheme: variational message passing. # + # Define a factorization for the posterior q = PosteriorFactorization(x_k, fx_k, hx_k, Qi, Ri, ids=[:xk, :fk, :hk, :Qi, :Ri]) # Define a variational message passing procedure algorithm = messagePassingAlgorithm([x_k, fx_k, hx_k, Qi, Ri], q) # Compile message passing procedure to an inference algorithm code = algorithmSourceCode(algorithm); # Import compiled functions to workspace eval(Meta.parse(code)); # Print compiled functions (uncomment when desired) # println(code) # - # ### Execute inference algorithm # # We execute the algorithm in an online fashion: after each timestep the posteriors for the state and noise precisions are used as priors for the next timestep. # + # Number of iterations num_iterations = 5 # Parameters for prior distributions m_x_0 = [1., 0.] W_x_0 = [1. 0.;0. 1.] a_R_0 = 10.0 b_R_0 = 0.1 V_Q_0 = [10. 0.1;0.1 1.] n_Q_0 = 10.0 # Initialize array to track Free Energy objective F = zeros(T,num_iterations) # Initialize arrays for storing parameter estimates m_x_k = zeros(2,T) W_x_k = zeros(2,2,T) a_R_k = zeros(T,) b_R_k = zeros(T,) V_Q_k = zeros(2,2,T) n_Q_k = zeros(T,) # Initialize previous parameter estimates m_x_kmin1 = m_x_0 W_x_kmin1 = W_x_0 a_R_kmin1 = a_R_0 b_R_kmin1 = b_R_0 V_Q_kmin1 = V_Q_0 n_Q_kmin1 = n_Q_0 # Initialize data dictionary data = Dict() # Initialize marginal distributions marginals = Dict() marginals[:x_kmin1] = ProbabilityDistribution(Multivariate, GaussianMeanVariance, m=m_x_0, v=W_x_0) marginals[:hx_k] = ProbabilityDistribution(Multivariate, GaussianMeanPrecision, m=[0., 0.], w=[1. 0.;0. 1.]) marginals[:fx_k] = ProbabilityDistribution(Multivariate, GaussianMeanVariance, m=[0., 0.], v=[1. 0.;0. 1.]) marginals[:x_k] = ProbabilityDistribution(Multivariate, GaussianMeanPrecision, m=[0., 0.], w=[1. 0.;0. 1.]) marginals[:x_1] = ProbabilityDistribution(Univariate, GaussianMeanVariance, m=0.0, v=1.0) marginals[:Ri] = ProbabilityDistribution(Univariate, Gamma, a=a_R_0, b=b_R_0) marginals[:Qi] = ProbabilityDistribution(Wishart, v=V_Q_0, nu=n_Q_0) # Recursive estimation procedure (posteriors at k => priors at k+1) @showprogress for k = 1:T # Store data for current time-step data[:y_k] = observations[k] data[:m_x] = m_x_kmin1 data[:W_x] = W_x_kmin1 data[:a_R] = a_R_kmin1 data[:b_R] = b_R_kmin1 data[:V_Q] = V_Q_kmin1 data[:n_Q] = n_Q_kmin1 # Iteratively update beliefs for tt = 1:num_iterations # Update recognition distributions stepxk!(data, marginals) stepfk!(data, marginals) stephk!(data, marginals) stepRi!(data, marginals) stepQi!(data, marginals) end # Update book-keeping m_x_kmin1 = m_x_k[:,k] = ForneyLab.unsafeMean(marginals[:x_k]) W_x_kmin1 = W_x_k[:,:,k] = ForneyLab.unsafePrecision(marginals[:x_k]) a_R_kmin1 = a_R_k[k] = marginals[:Ri].params[:a] b_R_kmin1 = b_R_k[k] = marginals[:Ri].params[:b] V_Q_kmin1 = V_Q_k[:,:,k] = marginals[:Qi].params[:v] n_Q_kmin1 = n_Q_k[k] = marginals[:Qi].params[:nu] end # - # ### Inspect results # We can plot the estimates of the angle and the change in the angle, over time, including the uncertainty of the estimates. # Plot true states and overlay estimates plot((1:T).*Δt, states[1,:], linewidth=2, xlabel="time (Δt = "*string(Δt)*")", ylabel="angle (α)", color="blue", label="states", size=(800,400)) plot!((1:T).*Δt, m_x_k[1,:], linewidth=3, color="red", ribbon=[sqrt.(inv.(W_x_k)[1,1,:]) sqrt.(inv.(W_x_k)[1,1,:])], alpha=0.6, label="estimated") title!("Angle of the pendulum, over time") # Plot true states and overlay estimates plot((1:T).*Δt, states[2,:], linewidth=2, xlabel="time (Δt = "*string(Δt)*")", ylabel="change in angle (dα/dt)", color="blue", label="states", size=(800,400)) plot!((1:T).*Δt, m_x_k[2,:], linewidth=3, color="red", ribbon=[sqrt.(inv.(W_x_k)[2,2,:]) sqrt.(inv.(W_x_k)[2,2,:])], alpha=0.6, label="estimated", legend=:bottomright) title!("Change in angle of the pendulum, over time") # Both the angle and the change in angle are tracked nicely.
demos/Noisy pendulum.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="lE68BuQxQ772" # # Knowledge Based Recommendation System of Ingredients # ## Notebook 1: Exploratory Data Analysis and Preprocessing # ### Project Breakdown # 1 Exploratory Data Analysis and Preprocessing # 2: Build Word Embeddings using Word2Vec, FastText # 3: Recommend Recipes based on ingredients # 4: Build and Visualize Interactive Knowledge Graph of Ingredients # # # # [This is the dataset](https://eightportions.com/datasets/Recipes/#fn:1) we will be using. It is collated by <NAME>, sourced from [Food Network](https://www.foodnetwork.com/), [Epicurious](https://www.epicurious.com/), and [Allrecipes](https://www.allrecipes.com/). # + [markdown] id="S-9PrAvfYtTA" # ## Data Preparation # + id="zw5GYmrfQ77-" from nltk.stem.wordnet import WordNetLemmatizer from wordcloud import WordCloud from bs4 import BeautifulSoup from gensim.parsing.preprocessing import remove_stopwords from tqdm import tqdm from matplotlib import pyplot as plt import pandas as pd import numpy as np import pickle import string import nltk import json import re # + colab={"base_uri": "https://localhost:8080/"} id="Gs19jnUiSVwi" outputId="2e4ca4b7-d018-4db7-a840-7819965c4e19" # download recipe data # !mkdir -p data # !wget --header="Host: storage.googleapis.com" --header="User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36" --header="Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9" --header="Accept-Language: en-US,en;q=0.9" --header="Referer: https://eightportions.com/" "https://storage.googleapis.com/recipe-box/recipes_raw.zip" -c -O 'data/recipes_raw.zip' # + colab={"base_uri": "https://localhost:8080/"} id="SL3YaFfxScGT" outputId="74d2822b-27a9-487e-a42c-eb3cd8985d2c" # !unzip -xo data/recipes_raw.zip -d data # + id="O2iHPysjQ78A" # we scrapped data from three sources recipe_sources = ['ar', 'epi', 'fn'] # + id="38_lYOxlQ78B" # We will iterate all json files to build a dataframe recipes = pd.DataFrame() sources, titles, ingredients, instructions = [], [], [], [] for recipe_source in recipe_sources: with open(f'data/recipes_raw_nosource_{recipe_source}.json', 'r') as fn: data = json.load(fn) for _, recipe in data.items(): if ('title' in recipe) and ('ingredients' in recipe) and ('instructions' in recipe): # append to a list of the source sources.append(recipe_source) # append to a list of the titles titles.append(recipe['title']) # append to a list of a list of ingredients, removing the word ADVERTISEMENT # Eg: {'ingredients': ['1 cup butter, softened ADVERTISEMENT', ..]} ingredients.append( [str(ingredient).replace('ADVERTISEMENT', '') for ingredient in recipe['ingredients']] ) # append to a list of instructions, remmoving the word ADVERTISEMENT and replace \n with space characters instructions.append( str(recipe['instructions']).replace('ADVERTISEMENT', '') ) recipes['source'] = sources recipes['title'] = titles recipes['ingredients'] = ingredients recipes['instructions'] = instructions # + colab={"base_uri": "https://localhost:8080/", "height": 424} id="D4mbBBTkX063" outputId="b95214e6-27d2-4dfe-e8a5-d31658c1eead" recipes # + id="qFSIeA4A4qlF" def plot_wordcloud(data, title): wc = WordCloud(max_words=100, max_font_size=50, background_color='white') #generate wordcloud from word_freq wc = wc.generate(data.str.cat(sep=' ')) #plot wordcloud plt.figure(figsize=(12, 5)) plt.imshow(wc, interpolation='bilinear') plt.title(f'WordCloud of Recipe {title}', fontsize=22) plt.axis('off') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 325} id="hR8ohfpN5d8C" outputId="e6d0be2a-d82a-4769-dc77-e4d26a4ba3c5" plot_wordcloud(recipes['title'], 'Titles') # + [markdown] id="npvBOPj-6XjQ" # Most recipes are about dishes related to Chicken, Orange, Shrimp, Cake. # + colab={"base_uri": "https://localhost:8080/", "height": 325} id="NJ7EQoos5137" outputId="981dc0cb-0214-4364-ac67-2a8caa77d841" plot_wordcloud(recipes['instructions'], 'Instructions') # + [markdown] id="DTmjrHMk6k3k" # Preheat oven is the most appearing instruction. # + colab={"base_uri": "https://localhost:8080/", "height": 325} id="uPKTx8wP6SMg" outputId="a660265c-30ed-49f4-87ee-ba77f445cc55" plot_wordcloud(recipes['ingredients'].apply(lambda x: ' '.join(x)), 'Ingredients') # + [markdown] id="GDqy_zYI7IfG" # Most preferred ingredient in most of the recipes are seasonings like black pepper, olive oil, and flour # + [markdown] id="4F7JB1NhYpbn" # ## Preprocessing and Data Preparation # + id="UyC0l33IQ78C" # We will consider following commonly used words as they won't be useful for our analysis # source: https://en.wikibooks.org/wiki/Cookbook:Units_of_measurement to_remove = [ 'tablespoon', 'tablespoons', 'teaspoon', 'teaspoons', 'tsp', 'tsps', 'tbsp', 'tbsps', 'pound', 'pounds', 'grams', 'mg', 'mm', 'm', 'ounce' 'ounces', 'kg', 'inch', 'crushed', 'chopped', 'finely', 'softened', 'cups', 'cup' ] # decontract words # https://stackoverflow.com/a/47091490/4084039 def decontracted(phrase): # specific phrase = re.sub(r"won't", "will not", phrase) phrase = re.sub(r"can\'t", "can not", phrase) phrase = re.sub(r"isn\'t", "is not", phrase) # general phrase = re.sub(r"n\'t", " not", phrase) phrase = re.sub(r"\'re", " are", phrase) phrase = re.sub(r"\'s", " is", phrase) phrase = re.sub(r"\'d", " would", phrase) phrase = re.sub(r"\'ll", " will", phrase) phrase = re.sub(r"\'t", " not", phrase) phrase = re.sub(r"\'ve", " have", phrase) phrase = re.sub(r"\'m", " am", phrase) return phrase # we will also remove any punctuations and digits from our data translation_table = str.maketrans('', '', string.punctuation + string.digits) # + id="O1STbZVcQ78D" def preprocess(items): results = [] wnl = WordNetLemmatizer() nltk.download('wordnet') for item in items: # a. replace hyphens with space item = item.lower().replace('-', ' ') # b. remove newlines, tabs item = item.replace('\n', '').replace('\t', '') # c. remove punctuations and digits item = item.translate(translation_table) # d. remove html and url tags from text item = re.sub(r"http\S+", "", item) item = BeautifulSoup(item, 'lxml').get_text() # e. decontract words item = decontracted(item) # f. remove stop words item = remove_stopwords(item) # genism defined for stop_word in to_remove: # custom stop words item = item.replace(stop_word, '') # g. remove words with length lesser than three since most often they're stop words # h. and lemmatize item = [wnl.lemmatize(word) for word in item.split() if len(word) > 2] results.append(item) return results # + colab={"base_uri": "https://localhost:8080/"} id="assD4dNoQ78E" outputId="7629d29f-1aa6-4271-d506-487ea860a919" instructions = recipes.instructions.values.tolist() ingredients = [', '.join(ingredient) for ingredient in recipes.ingredients.values] train_data = instructions + ingredients train_data[:5] # + colab={"base_uri": "https://localhost:8080/"} id="dlQqAdpcQ78F" outputId="cd75bf6c-e5ad-4e9d-ce2f-4d605805c78c" train_data_preprocessed = preprocess(train_data) train_data_preprocessed[:2] # + id="q9ev3OeaQ78G" # save preprocessed train data with open('data/train_data.pkl', 'wb') as f: pickle.dump(train_data_preprocessed, f) # save the raw data too with open('data/train_data_raw.pkl', 'wb') as f: pickle.dump(train_data, f) recipes.to_csv('recipes.csv', index=False) # + colab={"base_uri": "https://localhost:8080/"} id="1tzl3Od1sTg-" outputId="7597d0f6-aca9-4244-c3b2-c19d7b803116" from google.colab import drive drive.mount('/content/gdrive') # + id="hNrYcT7JsiAA" # !cp -r data/train_data_raw.pkl /content/gdrive/MyDrive/colab/xlabs/ # !cp -r data/train_data.pkl /content/gdrive/MyDrive/colab/xlabs/
notebooks/1_EDA_Preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Notebook to load test set sequence and generate a model prediction video # + import json import time import os import argparse import random from pathlib import Path import torch from torch.utils.data import DataLoader from torch.cuda import amp from torchvision.transforms import Compose, Resize, ToTensor, Normalize import numpy as np from PIL import Image import cv2 import wandb from tqdm import tqdm from matplotlib import pyplot as plt from scipy import interpolate from data.comma_dataset import CommaDataset from models.encoder import Encoder from models.decoder import Decoder from models.e2e_model import End2EndNet from utils import paths, logging from utils.losses import grad_l1_loss # - # ## Load in best model # + # Load in model checkpoint for best epoch checkpoint_path = Path("/mnt/sda/datasets/comma2k19/checkpoints/splendid-snowflake-230/checkpoint_24.tar") checkpoint = torch.load(checkpoint_path) # Load in config config_path = Path("configs/resnet34_sequence.json").resolve() with config_path.open('r') as fr: config = json.load(fr) print(config) # Load in dataset config ds_config_path = (Path("data/dataset_lists") / config['dataset']['dataset_file']).resolve() with ds_config_path.open('r') as fr: ds_args = json.load(fr)['args'] print(ds_args) # + # Initialize model device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(f"Using device: {device}") encoder = Encoder(config['model']['encoder']) decoder = Decoder(config['model']['decoder']) e2e_net = End2EndNet(encoder, decoder) e2e_net.load_state_dict(checkpoint['model_state_dict']) e2e_net.to(device) e2e_net.eval() # - # Define image transforms img_transforms = Compose([ Resize([288, 384]), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) # ## Load in test video and run inference on each frame test_routes_path = Path("data/dataset_lists/test_set_routes.json").resolve() with test_routes_path.open('r') as fr: test_routes = json.load(fr)['test_routes'] test_routes def get_predictions(route_path, id_range): # Load route data arrays orientations = np.load(route_path / "frame_orientations.npy") positions = np.load(route_path / "frame_positions.npy") video_frames = [] model_preds = [] with torch.no_grad(): for frame_id in range(*id_range): # Convert positions to reference frame local_path = paths.get_local_path(positions, orientations, frame_id) # Divide data into previous and future arrays previous_path = local_path[frame_id - ds_args['past_steps'] : frame_id + 1] prev_path = torch.from_numpy(previous_path) # Grab previous and current frames frames = [] for f_id in range(frame_id - ds_args['past_steps'], frame_id + 1): filename = str(f_id).zfill(6) + '.jpg' frame = Image.open(str(route_path / "images" / filename)) if f_id == frame_id: video_frames.append(frame) # Apply transforms to frame frame = img_transforms(frame) frames.append(frame) # Stack frames into single array (T, C, H, W) frames = torch.stack(frames) # Add singleton batch size and send to gpu frames = torch.unsqueeze(frames, 0).to(device) prev_path = torch.unsqueeze(prev_path, 0).float().to(device) # Forward pass model_output = e2e_net(frames, prev_path) model_output = model_output.reshape((ds_args['future_steps'], 3)) model_output = model_output.detach().cpu().numpy() model_preds.append(model_output) return video_frames, model_preds # ## Visualize predictions and generate video def gen_gif(frames, preds, filename): pil_imgs = [] for frame, pred in zip(frames, preds): # Draw path and convert to PIL img = np.array(frame) paths.draw_path(paths.smooth_path(pred), img) pil_img = Image.fromarray(img) # Shrink it down by half pil_img = pil_img.resize((int(pil_img.size[0]/2), int(pil_img.size[1]/2)), Image.ANTIALIAS) pil_imgs.append(pil_img) # Generate GIF using PIL save_path = "/home/methier/projects/end-to-end-driving/" + filename img, *imgs = pil_imgs img.save(fp=save_path, format='GIF', append_images=imgs, save_all=True, duration=50, loop=0) # Try first route from id 3010 to 3110 and 8990 to 9090 (5 seconds each) route_path = Path("/mnt/sda/datasets/comma2k19/processed_dataset") / test_routes[0] id_ranges = [(3010, 3110), (5475, 5575), (8990, 9090)] for i in range(len(id_ranges)): frames, preds = get_predictions(route_path, id_ranges[i]) gen_gif(frames, preds, f"demo_video_{i+1}.gif")
gen_test_video.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import bat_model # %matplotlib inline df = pd.read_csv('../data/SM_forecast_2016.csv') df['temperature'] = (df['temperature'] - 32)*(5/9) DailyTemp = [] for ii in range(365): DailyTemp.append(df.loc[ii*24:ii*24+24,'temperature'].mean(axis=0)) df = pd.read_csv('../data/Salem_forecast_2016.csv') df['temperature'] = (df['temperature'] - 32)*(5/9) DailyTemp2 = [] for ii in range(365): DailyTemp2.append(df.loc[ii*24:ii*24+24,'temperature'].mean(axis=0)) df = pd.read_csv('../data/Salem_forecast_2016.csv') df['temperature'] = (df['temperature'] - 32)*(5/9) DailyTemp3 = [] for ii in range(365): DailyTemp3.append(df.loc[ii*24:ii*24+24,'temperature'].mean(axis=0)) # + Cap = [] Cap.append(1) soc = 50 cycles = 0 soc_low = 0 soc_high = 80 rate = 1 for ii in range(365): Cap.append(BatFNs.bat_day(soc,DailyTemp[ii],cycles,soc_low,soc_high,rate)*Cap[ii]) Cap2 = [] Cap2.append(1) soc = 50 cycles = 1 soc_low = 0 soc_high = 80 rate = 1 for ii in range(365): Cap2.append(BatFNs.bat_day(soc,DailyTemp[ii],cycles,soc_low,soc_high,rate)*Cap2[ii]) Cap3 = [] Cap3.append(1) soc = 50 cycles = 2 soc_low = 0 soc_high = 80 rate = 1 for ii in range(365): Cap3.append(BatFNs.bat_day(soc,DailyTemp[ii],cycles,soc_low,soc_high,rate)*Cap3[ii]) plt.style.use('seaborn') plt.plot(range(366),Cap,range(366),Cap2,range(366),Cap3) plt.xlabel('Day of Year', fontsize=18) plt.ylabel('Capacity Fade',fontsize=18) plt.title('Operating Battery Santa Maria 2016',fontsize=18) plt.legend(('0 cycle/day','1 cycle/day','2 cycle/day'),fontsize=14) # + Cap = [] Cap.append(1) soc = 40 cycles = 1 soc_low = 20 soc_high = 60 rate = 1 for ii in range(365): Cap.append(BatFNs.bat_day(soc,DailyTemp[ii],cycles,soc_low,soc_high,rate)*Cap[ii]) Cap2 = [] Cap2.append(1) soc = 60 cycles = 1 soc_low = 40 soc_high = 80 rate = 1 for ii in range(365): Cap2.append(BatFNs.bat_day(soc,DailyTemp[ii],cycles,soc_low,soc_high,rate)*Cap2[ii]) Cap3 = [] Cap3.append(1) soc = 80 cycles = 1 soc_low = 60 soc_high = 100 rate = 1 for ii in range(365): Cap3.append(BatFNs.bat_day(soc,DailyTemp[ii],cycles,soc_low,soc_high,rate)*Cap3[ii]) plt.style.use('seaborn') plt.plot(range(366),Cap,range(366),Cap2,range(366),Cap3) plt.xlabel('Day of Year', fontsize=18) plt.ylabel('Capacity Fade',fontsize=18) plt.title('Operating Battery Santa Maria 2016',fontsize=18) plt.legend(('20% <-> 60%','40% <-> 80%','60% <-> 100%'),fontsize=14) # + Cap = [] Cap.append(1) soc = 100 cycles = 0 soc_low = 0 soc_high = 100 rate = 1 for ii in range(365): Cap.append(BatFNs.bat_day(soc,DailyTemp[ii],cycles,soc_low,soc_high,rate)*Cap[ii]) Cap2 = [] Cap2.append(1) soc = 100 cycles = 0 soc_low = 0 soc_high = 100 rate = 1 for ii in range(365): Cap2.append(BatFNs.bat_day(soc,DailyTemp2[ii],cycles,soc_low,soc_high,rate)*Cap2[ii]) Cap3 = [] Cap3.append(1) soc = 100 cycles = 0 soc_low = 0 soc_high = 100 rate = 1 for ii in range(365): Cap3.append(BatFNs.bat_day(soc,DailyTemp3[ii],cycles,soc_low,soc_high,rate)*Cap3[ii]) plt.style.use('seaborn') plt.plot(range(366),Cap,range(366),Cap2,range(366),Cap3) plt.xlabel('Day of Year', fontsize=18) plt.ylabel('Capacity Fade',fontsize=18) plt.title('Battery Storage (no cycles)',fontsize=18) plt.legend(('Santa Maria, CA','Salem, OR','Fairbanks, AK'),fontsize=14) # - Cap = [] Cap.append(1) Shelf = [] Shelf.append(1) soc = 50 cycle = 0 cycles = 1 soc_low = 0 soc_high = 80 rate = 1 for ii in range(365): cycle = cycle + cycles (DegCycle, DegStore) = BatFNs.bat_day2(soc,DailyTemp[ii],cycle,soc_low,soc_high,rate) Shelf.append(DegStore*Shelf[ii]) Cap.append(DegStore*Shelf[ii]*DegCycle) DailyTemp[ii] plt.style.use('seaborn') plt.plot(Cap) df.loc[ii*24:ii*24+24,'temperature'].mean(axis=0) bat_model.bat_price_per_hour(200,18,22,300,200,100) cycles = .5; rate = min(max(cycles/4,0.5),2) soc_high = min(max(cycles*100,60),100) bat_model.bat_cycle(400,20,soc_high,rate)**(cycles/400) soc_high soc_high bat_model.bat_price_per_hour(0,16,22,300,200,100) bat_model.bat_price_per_hour(0,18,22,300,200,100) # + energy = 8 #kWhr hour_start = 18 #6pm, sun goes down hour_end = 22 #10pm, this means battery stops at 10:00pm, not 10:59 day = 343 #Dec 9th bat_cap = 13.5 #kWhr bat_cost = 222*bat_cap #cost scales with capacity, adjust to make numbers relavent if needed cost_per_hour = bat_model.bat_price_per_hour(energy,hour_start,hour_end,day,bat_cap,bat_cost) # - cost_per_hour
ipynb/BatPlots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # MNIST convolutional neural networks with regularization # # * Make a networks like LeNet5 structure with MNIST data # * input pipeline: `tf.data` # * `Eager execution` # * `Functional API` # * Apply various regularization methods # ## Import modules # + from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time import numpy as np import matplotlib.pyplot as plt # %matplotlib inline from IPython.display import clear_output import tensorflow as tf from tensorflow.keras import layers tf.enable_eager_execution() os.environ["CUDA_VISIBLE_DEVICES"]="0" # - # ### Import data # + # Load training and eval data from tf.keras (train_data, train_labels), (test_data, test_labels) = \ tf.keras.datasets.mnist.load_data() train_data = train_data / 255. train_data = train_data.reshape([-1, 28, 28, 1]) train_data = train_data.astype(np.float32) train_labels = train_labels.astype(np.int32) test_data = test_data / 255. test_data = test_data.reshape([-1, 28, 28, 1]) test_data = test_data.astype(np.float32) test_labels = test_labels.astype(np.int32) # - # ### Show the MNIST index = 219 print("label = {}".format(train_labels[index])) plt.imshow(train_data[index][...,0]) plt.colorbar() #plt.gca().grid(False) plt.show() # ## Set up dataset with `tf.data` # # ### input pipeline `tf.data.Dataset` and Transformation # + tf.set_random_seed(219) batch_size = 32 max_epochs = 1 # for train train_dataset = tf.data.Dataset.from_tensor_slices((train_data, train_labels)) train_dataset = train_dataset.shuffle(buffer_size = 10000) train_dataset = train_dataset.batch(batch_size = batch_size) print(train_dataset) # for test test_dataset = tf.data.Dataset.from_tensor_slices((test_data, test_labels)) test_dataset = test_dataset.batch(batch_size = batch_size) print(test_dataset) # - # ## Create the model # # * Use `tf.keras.layers` # * Use `tf.keras.Sequential()` API (01.mnist.LeNet5.ipynb) # ```python # model = tf.keras.Sequential([ # layers.Conv2D(filters=32, kernel_size=[5, 5], padding='same', activation='relu'), # layers.MaxPool2D(), # layers.Conv2D(filters=64, kernel_size=[5, 5], padding='same', activation='relu'), # layers.MaxPool2D(), # layers.Flatten(), # layers.Dense(1024, activation='relu'), # layers.Dense(10)])``` class MNISTModel(tf.keras.Model): def __init__(self): super(MNISTModel, self).__init__() self.l2_decay = 0.001 self.conv1 = layers.Conv2D(filters=32, kernel_size=[5, 5], padding='same', kernel_regularizer=tf.keras.regularizers.l2(self.l2_decay)) self.conv1_bn = layers.BatchNormalization() self.pool1 = layers.MaxPool2D() self.conv2 = layers.Conv2D(filters=64, kernel_size=[5, 5], padding='same', kernel_regularizer=tf.keras.regularizers.l2(self.l2_decay)) self.conv2_bn = layers.BatchNormalization() self.pool2 = layers.MaxPool2D() self.flatten = layers.Flatten() self.dense1 = layers.Dense(units=1024, kernel_regularizer=tf.keras.regularizers.l2(self.l2_decay)) self.dense1_bn = layers.BatchNormalization() self.drop1 = layers.Dropout(rate=0.6) self.dense2 = layers.Dense(units=10, kernel_regularizer=tf.keras.regularizers.l2(self.l2_decay)) def call(self, inputs, training=True): """Run the model.""" self.conv1_ = self.conv1(inputs) self.conv1_bn_ = self.conv1_bn(self.conv1_, training=training) self.conv1_ = tf.nn.relu(self.conv1_bn_) self.pool1_ = self.pool1(self.conv1_) self.conv2_ = self.conv2(self.pool1_) self.conv2_bn_ = self.conv2_bn(self.conv2_, training=training) self.conv2_ = tf.nn.relu(self.conv2_bn_) self.pool2_ = self.pool2(self.conv2_) self.flatten_ = self.flatten(self.pool2_) self.dense1_ = self.dense1(self.flatten_) self.dense1_bn_ = self.dense1_bn(self.dense1_, training=training) self.dense1_ = tf.nn.relu(self.dense1_bn_) self.drop1_ = self.drop1(self.dense1_, training=training) self.logits_ = self.dense2(self.drop1_) return self.logits_ model = MNISTModel() # without training, just inference a model in eager execution: for images, labels in train_dataset.take(1): print("Logits: ", model(images[0:3], training=False).shape) model.summary() # ## Train a model # # ### Define a optimizer optimizer = tf.train.AdamOptimizer(1e-4) cross_entropy_history = [] l2_loss_history = [] total_loss_history = [] # ### Train a model # + global_step = tf.train.get_or_create_global_step() for epoch in range(max_epochs): for images, labels in train_dataset: start_time = time.time() with tf.GradientTape() as tape: logits = model(images, training=True) cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels, logits) l2_loss = tf.reduce_sum(model.losses) total_loss = cross_entropy + l2_loss cross_entropy_history.append(cross_entropy.numpy()) l2_loss_history.append(l2_loss.numpy()) total_loss_history.append(total_loss.numpy()) grads = tape.gradient(total_loss, model.variables) optimizer.apply_gradients(zip(grads, model.variables), global_step=global_step) if global_step.numpy() % 10 == 0: clear_output(wait=True) duration = time.time() - start_time examples_per_sec = batch_size / float(duration) epochs = batch_size * global_step.numpy() / float(len(train_data)) print("epochs: {:.2f}, step: {}, loss: {:g}, ({:.2f} examples/sec; {:.3f} sec/batch)".format(epochs, global_step.numpy(), total_loss, examples_per_sec, duration)) print("training done!") # - # ### Plot the loss funtion # + plt.figure(figsize=(15, 3)) plt.subplot(1, 3, 1) plt.subplots_adjust(hspace = 0.5, wspace = 0.3) #plt.set_title("cross_entropy") plt.plot(cross_entropy_history, label='cross_entopy') plt.xlabel('Number of steps') plt.ylabel('Loss value [cross entropy]') plt.legend() plt.subplot(1, 3, 2) plt.subplots_adjust(hspace = 0.5, wspace = 0.3) #plt.set_title("l2_loss") plt.plot(l2_loss_history, label='l2_loss') plt.xlabel('Number of steps') plt.ylabel('Loss value [l2 loss]') plt.legend() plt.subplot(1, 3, 3) plt.subplots_adjust(hspace = 0.5, wspace = 0.3) #plt.set_title("total_loss") plt.plot(total_loss_history, label='total_loss') plt.xlabel('Number of steps') plt.ylabel('Total loss value [cross entropy + l2 loss]') plt.legend() plt.show() # - # ## Evaluate a model # # ### Test trained model # # * test accuracy: 0.9798 for 1 epochs (without regularization) # * test accuracy: 0.9869 for 1 epochs (with regularization) # + accuracy = tf.contrib.eager.metrics.Accuracy() for images, labels in test_dataset: logits = model(images, training=False) accuracy(labels=labels, predictions=tf.cast(tf.argmax(logits, 1), tf.int32)) print("test accuracy: {}".format(accuracy.result())) # - # ### Plot test set np.random.seed(219) # + test_batch_size = 16 batch_index = np.random.choice(len(test_data), size=test_batch_size, replace=False) batch_xs = test_data[batch_index] batch_ys = test_labels[batch_index] y_pred_ = model(batch_xs, training=False) fig = plt.figure(figsize=(16, 10)) for i, (px, py) in enumerate(zip(batch_xs, y_pred_)): p = fig.add_subplot(4, 8, i+1) if np.argmax(py) == batch_ys[i]: p.set_title("y_pred: {}".format(np.argmax(py)), color='blue') else: p.set_title("y_pred: {}".format(np.argmax(py)), color='red') p.imshow(px.reshape(28, 28)) p.axis('off') # - # ### Print all feature maps def print_all_feature_maps(layer, layer_name): """Print all feature maps This code is borrowed from "Deep Learning with Python" (by <NAME>) Args: layer (4-rank Tensor): feature maps layer_name (string): name of feature maps Returns: print all feature maps """ num_features = layer.shape[-1] size = int(layer.shape[1]) images_per_row = 16 for feature_map in range(num_features): num_cols = num_features // images_per_row display_grid = np.zeros((size * num_cols, images_per_row * size)) for col in range(num_cols): for row in range(images_per_row): channel_image = layer[0,:,:,col * images_per_row + row] #channel_image -= channel_image.mean() channel_image -= tf.reduce_mean(channel_image) #channel_image /= channel_image.std() channel_image /= tf.keras.backend.std(channel_image) channel_image *= 64 channel_image += 128 channel_image = np.clip(channel_image, 0, 255).astype('uint8') display_grid[col * size : (col + 1) * size, row * size : (row + 1) * size] = channel_image scale = 1. / size plt.figure(figsize=(scale * display_grid.shape[1], scale * display_grid.shape[0])) plt.title(layer_name) plt.grid(False) plt.imshow(display_grid, aspect='auto', cmap='viridis') # without training, just inference a model in eager execution: for images, labels in train_dataset.take(1): print("Logits: ", model(images[10:11], training=False).shape) print_all_feature_maps(model.conv1_, 'conv1') print_all_feature_maps(model.conv2_, 'conv2') model.drop1_[0,:20] # without training, just inference a model in eager execution: for images, labels in train_dataset.take(1): print("Logits: ", model(images[10:11], training=True).shape) model.drop1_[0,:20]
tf.version.2/03.cnn/03.mnist.LeNet5.with.regularization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import numpy as np # %matplotlib inline from matplotlib import pyplot as plt from tqdm import tqdm from skimage.draw import circle import json from skimage.io import imread # ### Problem # This notebook is aimed at investigating into UchiyaMarkers coordinate system # # It loads .txt file and tries to reproduce the resulting .jpg marker # configuration imsize = 600 txt_filename = '0.txt' jpg_filename = '0.jpg' # loading marker file points = [] with open(txt_filename, 'r') as f: f_array = f.read().split('\n') n_lines = int(f_array[0]) print('Detected %d lines' % n_lines) for i in tqdm(range(n_lines)): points.append(list(map(int, f_array[i + 1].split()))) assert(len(points) == n_lines) # get image of dots from their coordinates def draw_dots(points): # creating empty image img = np.zeros((imsize, imsize)) # helper for drawing circles on img def draw_circle(x, y, r = 5, image = img): rr, cc = circle(x, y, r) img[rr, cc] = 1 # drawing points from the .txt file # transformation: y -> imsize - 1 - y # (mirror y axis) [draw_circle(x, imsize - 1 - y) for x, y in points] # reference point # this point should be centered left-right, and at the top draw_circle(300, 15, 10) return img def compare(points, filename): # draw dots img = draw_dots(points) # creating 1x2 subplots object _, (ax1, ax2) = plt.subplots(figsize=(8, 3), ncols=2) # transposing since want coordinate system like in Cellulo # x: right # y: down # origin: top-left ax1.imshow(1 - img.T, cmap='gray', interpolation='gaussian') ax1.set_title('Created') # loading Uchiya .jpg image ax2.imshow(imread(filename)) ax2.set_title('Original') compare(points, jpg_filename) # ### Result # The coordinate system of **UchiyaMarkers** is: # # ``x: right # y: up # origin: bottom-left`` # # **Cellulo** coordinate system: # # ``x: right # y: down # origin: top-left`` # # Transformation: flip y axis: # $$y_{c}=y_{max}-1-y_{u}$$ # ## SVG file dots = json.load(open('drawing.svg.markers.json', 'r'))['markers'][0]['dots_uchiya'] dots = [[float(dot[t]) for t in ['x', 'y']] for dot in dots] compare(dots, 'drawing.svg.png')
marker-tool/notebooks/UchiyaDrawTest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import json import pickle import random from collections import defaultdict, Counter from indra.literature.adeft_tools import universal_extract_text from indra.databases.hgnc_client import get_hgnc_name, get_hgnc_id from adeft.discover import AdeftMiner from adeft.gui import ground_with_gui from adeft.modeling.label import AdeftLabeler from adeft.modeling.classify import AdeftClassifier from adeft.disambiguate import AdeftDisambiguator from adeft_indra.ground.ground import AdeftGrounder from adeft_indra.model_building.s3 import model_to_s3 from adeft_indra.model_building.escape import escape_filename from adeft_indra.db.content import get_pmids_for_agent_text, get_pmids_for_entity, \ get_plaintexts_for_pmids # - adeft_grounder = AdeftGrounder() shortforms = ['MM'] model_name = ':'.join(sorted(escape_filename(shortform) for shortform in shortforms)) results_path = os.path.abspath(os.path.join('../..', 'results', model_name)) # + miners = dict() all_texts = {} for shortform in shortforms: pmids = get_pmids_for_agent_text(shortform) if len(pmids) > 10000: pmids = random.choices(pmids, k=10000) text_dict = get_plaintexts_for_pmids(pmids, contains=shortforms) text_dict = {pmid: text for pmid, text in text_dict.items() if len(text) > 5} miners[shortform] = AdeftMiner(shortform) miners[shortform].process_texts(text_dict.values()) all_texts.update(text_dict) longform_dict = {} for shortform in shortforms: longforms = miners[shortform].get_longforms() longforms = [(longform, count, score) for longform, count, score in longforms if count*score > 2] longform_dict[shortform] = longforms combined_longforms = Counter() for longform_rows in longform_dict.values(): combined_longforms.update({longform: count for longform, count, score in longform_rows}) grounding_map = {} names = {} for longform in combined_longforms: groundings = adeft_grounder.ground(longform) if groundings: grounding = groundings[0]['grounding'] grounding_map[longform] = grounding names[grounding] = groundings[0]['name'] longforms, counts = zip(*combined_longforms.most_common()) pos_labels = [] # - list(zip(longforms, counts)) grounding_map, names, pos_labels = ground_with_gui(longforms, counts, grounding_map=grounding_map, names=names, pos_labels=pos_labels, no_browser=True, port=8890) result = [grounding_map, names, pos_labels] result grounding_map, names, pos_labels = [{'m mode': 'ungrounded', 'macaca mulatta': 'MESH:D008253', 'macromolecular': 'ungrounded', 'magnetosome membrane': 'GO:GO:0110146', 'main genotypic effect model': 'ungrounded', 'maintenance medium': 'ungrounded', 'major minimal': 'ungrounded', 'malignant melanoma': 'MESH:D008545', 'malignant mesothelioma': 'EFO:1000355', 'malignant myeloma': 'MESH:D009101', 'mandibular movements': 'ungrounded', 'marimastat': 'CHEBI:CHEBI:50662', 'marine metabolites': 'CHEBI:CHEBI:76507', 'masseter muscle': 'MESH:D008406', 'maternal mortality': 'MESH:D008428', 'mathematics motivation': 'ungrounded', 'matrigel microspheres': 'ungrounded', 'matrix m': 'MESH:C000625666', 'mecamylamine': 'CHEBI:CHEBI:6706', 'medial mammillary nucleus': 'ungrounded', 'medial meniscectomy': 'MESH:D000074403', 'medial meniscus': 'MESH:D000072600', 'medical management': 'ungrounded', 'mediterranean meal': 'ungrounded', 'melamine': 'CHEBI:CHEBI:27915', 'melastoma malabathricum': 'TAXONOMY:128733', 'melastoma malabathricum linn': 'TAXONOMY:128733', 'meningomyelocele': 'MESH:D008591', 'menstrual migraine': 'MESH:D008881', 'mesangial matrix': 'ungrounded', 'metabolic memory': 'ungrounded', 'metal mixture': 'ungrounded', 'metanephric mesenchyme': 'metanephric_mesenchyme', 'metastatic melanoma': 'EFO:0002617', 'methadone maintained': 'ungrounded', 'methylmercury': 'CHEBI:CHEBI:30785', 'middle marsh': 'ungrounded', 'middle molecules': 'ungrounded', 'mild malaria': 'MESH:D008288', 'mindfulness meditation': 'MESH:D019122', 'mineral matter': 'ungrounded', 'mineralisation medium': 'ungrounded', 'minimal medium': 'ungrounded', 'mirror movements': 'HP:HP:0001335', 'mismatch': 'ungrounded', 'mismatch oligonucleotides': 'ungrounded', 'mithramycin': 'CHEBI:CHEBI:31856', 'mitochondrial membrane': 'GO:GO:0031966', 'mitochondrial myopathy': 'MESH:D017240', 'mitomycin c': 'CHEBI:CHEBI:27504', 'mixed micelles': 'ungrounded', 'miyoshi myopathy': 'MESH:C537480', 'mm': 'ungrounded', 'mobilization macromolecules': 'ungrounded', 'mode mic': 'ungrounded', 'molecular mass': 'ungrounded', 'molecular mechanics': 'ungrounded', 'molecular micelles': 'ungrounded', 'moneymaker': 'ungrounded', 'monocytes macrophages': 'ungrounded', 'monodora myristica': 'MESH:D026323', 'morris water maze': 'ungrounded', 'multimodal': 'ungrounded', 'multiple micronutrient': 'CHEBI:CHEBI:27027', 'multiple myeloma': 'MESH:D009101', 'muscle mass': 'ungrounded', 'muscularis mucosa': 'ungrounded', 'mycophenolate mofetil': 'CHEBI:CHEBI:8764', 'myeloma multiplex': 'ungrounded'}, {'MESH:D008253': 'Macaca mulatta', 'GO:GO:0110146': 'magnetosome membrane', 'MESH:D008545': 'Melanoma', 'EFO:1000355': 'Malignant Mesothelioma', 'MESH:D009101': 'Multiple Myeloma', 'CHEBI:CHEBI:50662': 'marimastat', 'CHEBI:CHEBI:76507': 'marine metabolite', 'MESH:D008406': 'Masseter Muscle', 'MESH:D008428': 'Maternal Mortality', 'MESH:C000625666': 'Matrix-M', 'CHEBI:CHEBI:6706': 'Mecamylamine', 'MESH:D000074403': 'Meniscectomy', 'MESH:D000072600': 'Meniscus', 'CHEBI:CHEBI:27915': 'melamine', 'TAXONOMY:128733': 'Melastoma malabathricum L.', 'MESH:D008591': 'Meningomyelocele', 'MESH:D008881': 'Migraine Disorders', 'metanephric_mesenchyme': 'metanephric_mesenchyme', 'EFO:0002617': 'metastatic melanoma', 'CHEBI:CHEBI:30785': 'methylmercury(.)', 'MESH:D008288': 'Malaria', 'MESH:D019122': 'Meditation', 'HP:HP:0001335': 'Bimanual synkinesia', 'CHEBI:CHEBI:31856': 'mithramycin', 'GO:GO:0031966': 'mitochondrial membrane', 'MESH:D017240': 'Mitochondrial Myopathies', 'CHEBI:CHEBI:27504': 'mitomycin C', 'MESH:C537480': 'Miyoshi myopathy', 'MESH:D026323': 'Myristica', 'CHEBI:CHEBI:27027': 'micronutrient', 'CHEBI:CHEBI:8764': 'mycophenolate mofetil'}, ['EFO:0002617', 'EFO:1000355', 'MESH:D008545', 'MESH:D009101', 'TAXONOMY:128733']] excluded_longforms = [] # + grounding_dict = {shortform: {longform: grounding_map[longform] for longform, _, _ in longforms if longform in grounding_map and longform not in excluded_longforms} for shortform, longforms in longform_dict.items()} result = [grounding_dict, names, pos_labels] if not os.path.exists(results_path): os.mkdir(results_path) with open(os.path.join(results_path, f'{model_name}_preliminary_grounding_info.json'), 'w') as f: json.dump(result, f) # - additional_entities = {} unambiguous_agent_texts = {} # + labeler = AdeftLabeler(grounding_dict) corpus = labeler.build_from_texts((text, pmid) for pmid, text in all_texts.items()) agent_text_pmid_map = defaultdict(list) for text, label, id_ in corpus: agent_text_pmid_map[label].append(id_) entity_pmid_map = {entity: set(get_pmids_for_entity(*entity.split(':', maxsplit=1), major_topic=True))for entity in additional_entities} # - intersection1 = [] for entity1, pmids1 in entity_pmid_map.items(): for entity2, pmids2 in entity_pmid_map.items(): intersection1.append((entity1, entity2, len(pmids1 & pmids2))) intersection2 = [] for entity1, pmids1 in agent_text_pmid_map.items(): for entity2, pmids2 in entity_pmid_map.items(): intersection2.append((entity1, entity2, len(set(pmids1) & pmids2))) intersection1 intersection2 # + all_used_pmids = set() for entity, agent_texts in unambiguous_agent_texts.items(): used_pmids = set() for agent_text in agent_texts: pmids = set(get_pmids_for_agent_text(agent_text)) new_pmids = list(pmids - all_texts.keys() - used_pmids) text_dict = get_plaintexts_for_pmids(new_pmids, contains=agent_texts) corpus.extend([(text, entity, pmid) for pmid, text in text_dict.items()]) used_pmids.update(new_pmids) all_used_pmids.update(used_pmids) for entity, pmids in entity_pmid_map.items(): new_pmids = list(set(pmids) - all_texts.keys() - all_used_pmids) if len(new_pmids) > 10000: new_pmids = random.choices(new_pmids, k=10000) text_dict = get_plaintexts_for_pmids(new_pmids) corpus.extend([(text, entity, pmid) for pmid, text in text_dict.items()]) # + # %%capture classifier = AdeftClassifier(shortforms, pos_labels=pos_labels, random_state=1729) param_grid = {'C': [100.0], 'max_features': [10000]} texts, labels, pmids = zip(*corpus) classifier.cv(texts, labels, param_grid, cv=5, n_jobs=5) # - classifier.stats disamb = AdeftDisambiguator(classifier, grounding_dict, names) disamb.dump(model_name, results_path) print(disamb.info()) model_to_s3(disamb)
model_notebooks/MM/model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import time notebookstart= time.time() import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os import gc from sklearn import preprocessing from sklearn.model_selection import KFold from sklearn import preprocessing from sklearn.metrics import roc_auc_score, roc_curve from rgf.sklearn import RGFClassifier # - # + train = pd.read_csv('/media/limbo/Home-Credit/data/application_train.csv.zip') test = pd.read_csv('/media/limbo/Home-Credit/data/application_test.csv') y = train['TARGET'] n_train = train.shape[0] # - d9 = pd.read_csv('../data/ds9.csv', header=0, index_col=None) d9.head() no_hc = pd.read_csv('../data/no_hc_loan_cust.csv', index_col=None, header=0) ind = d9['SK_ID_CURR'].isin(no_hc.values[:, 0]) for f in d9.columns: if 'ex' in f: d9.loc[ind.values, f] = np.nan if 'EX' in f: d9.loc[ind.values, f] = np.nan d9.head() del d9['TARGET.x'] test_file_path = "test_rgf_big_0.csv" validation_file_path = 'oof_rgf_big_0.csv' num_folds = 5 # + gc.collect() train_df = d9.iloc[0:n_train] test_df = d9.iloc[n_train:] val_df = train[['SK_ID_CURR', 'TARGET']].copy() print("Starting LightGBM. Train shape: {}, test shape: {}".format(train_df.shape, test_df.shape)) gc.collect() # Cross validation model folds = KFold(n_splits=num_folds, shuffle=True, random_state=1001) # Create arrays and dataframes to store results oof_preds = np.zeros(train_df.shape[0]) sub_preds = np.zeros(test_df.shape[0]) feature_importance_df = pd.DataFrame() feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']] x_train = train_df[feats].values x_test = test_df[feats].values print(train_df.shape, x_train.shape) for n_fold, (train_idx, valid_idx) in enumerate(folds.split(x_train, train['TARGET'])): model = RGFClassifier(max_leaf=1666, algorithm="RGF_Sib", test_interval=50, l2=0.1, n_iter=40, reg_depth=2, calc_prob='softmax', learning_rate=0.5, verbose=False) model.fit(x_train[train_idx], y_train[train_idx]) oof_preds[valid_idx] = model.predict_proba(x_train[valid_idx])[:, 1] sub_preds += model.predict_proba(x_test)[:, 1] / folds.n_splits print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(y_train[valid_idx], oof_preds[valid_idx]))) gc.collect() # + print('Full AUC score %.6f' % roc_auc_score(train['TARGET'], oof_preds)) # Write submission file and plot feature importance sub_df = test[['SK_ID_CURR']].copy() sub_df['TARGET'] = sub_preds sub_df[['SK_ID_CURR', 'TARGET']].to_csv(test_file_path, index= False) val_df['TARGET'] = oof_preds val_df[['SK_ID_CURR', 'TARGET']].to_csv(validation_file_path, index= False) gc.collect() # -
dashboards-examples/RGF-5-Fold-Stack-v0.1.0-d9.csv.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/qte77/ML-HF-WnB/blob/main/HF_WnB_PyTorch_Sweeps_Vortrag_PA1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="LddtUtSobNpO" # # Beispiel-Pipeline Hugging Face, WandB, PyTorch # + [markdown] id="IX1rsNd7eZWI" # ## Ziel # # * PoC für eine Infrastruktur mit externen Anbietern um Experimente mit Transformern durchführen zu können # * Initialen Aufwand für Infrastruktur durch Wiederverwendbarkeit und Reproduzierbarkeit ausgleichen # * NLP-Anwendungen sind am besten Verfügbar und werden hier zur Anschauung verwendet # + [markdown] id="J6QizarzASZ5" # ## Aufbau # # * SCM/VCS Github # * Exploration mit Jupyter Notebooks # * Modelle, Daten etc. von Hugging Face # * Logging und Visualisierung mit Weights&Biases (WandB) # # ![Pipeline mit Hugging Face und WandB](https://raw.githubusercontent.com/qte77/ML-HF-WnB/32f21d112ab707b737a07cd027ad837b680f32fe/img/ML-Pipeline-HF-WnB.draw.io.png) # + [markdown] id="6osAQONz3Y_E" # ## Modul-Installation # + id="0n3iT81Zkg-l" import sys red='\033[31m' green='\033[32m' orange='\033[33m' # + id="ecMhQaKq_6J9" # #%%shell #req = "https://raw.githubusercontent.com/qte77/ML/main/reqs/requirements-WnB-HF-PyTorch.txt" #rf = req.split("/")[-1] #curl $req -o $rf # + id="616s_9LMPLEL" # #!{sys.executable} -m pip install -qq -r $rf # + id="KZDfvGMhDaf7" # !{sys.executable} -m pip install -qqq setuptools watermark # + id="Q5_Kv3AGLv0j" colab={"base_uri": "https://localhost:8080/"} outputId="0b6db32a-bc4f-4b30-a77a-6ff502105167" #pre-install folium because wandb-version is obsolete # !{sys.executable} -m pip install -qqq 'folium == 0.2.1' # !{sys.executable} -m pip install -qqq wandb #remove and re-install folium from wandb if pre-install fails # #!{sys.executable} -m pip uninstall -yyy -qqq folium # + id="CJYZTup5C_JU" colab={"base_uri": "https://localhost:8080/"} outputId="8f300477-71af-4237-86cc-dc0b1cf30eb3" # !{sys.executable} -m pip install -qqq datasets transformers # Optional -> install latest version from source # #!{sys.executable} -m pip3 install -qqq git+https://github.com/huggingface/transformers # + id="SXC8iges2cuA" #load bert_score if needed # + id="s0Iy1S_6rALn" #get installed packages and versions to use within requirements.txt # #!{sys.executable} -m pip freeze # + id="CV6P-DK520jC" from google.colab import drive import json import os import watermark # %load_ext watermark # + id="9pgsswo6NkDb" from datasets import load_dataset, list_datasets, list_metrics from transformers import AutoTokenizer from transformers import AutoModelForSequenceClassification from transformers import Trainer, TrainingArguments from datasets import load_metric import wandb #import bert_score import numpy as np import torch # + id="GocxY0TNoBkk" colab={"base_uri": "https://localhost:8080/"} outputId="2b436d95-545c-4799-afc2-dac1753f83d8" # %watermark -a qte77 -gu qte77 -ws qte77.github.io -u -i -v -iv # #%watermark? # + [markdown] id="oBmj-w_R3mEs" # ## Laden der Login-Daten # # Laden aus Datei, im Notebook in Formular eingeben # + id="J8BOk4A16VIl" gdrive='/gdrive' wandb_keyfile='/MyDrive/conf/wandb.json' # + colab={"base_uri": "https://localhost:8080/"} id="fJSO0kn85l1u" outputId="42e58418-6a8f-4dc3-c045-02d43a94a988" drive.mount(gdrive) # %ls "$gdrive$wandb_keyfile" # + id="NztOv6ON3l0b" #get from file and save to ENV with open(f"{gdrive}{wandb_keyfile}", 'r') as j: data = json.loads(j.read()) os.environ['WANDB_USERNAME'] = data['username'] os.environ['WANDB_KEY'] = data['key'] # + [markdown] id="3juvgGaJr1l-" # ## Parametrisierung des Experiments # + [markdown] id="ISXMkdQgz7QW" # * Modell # * Datensatz # * Metriken # * primäre Metrik zur Modellevaluation # * weitere zu verwendende Metriken # * WandB # * zu verwendendes Projekt (Entität) # * Logging-Optionen # * Compute-Device (CPU, GPU, TPU) # # # + id="B-G70KObL3Sc" dataset='mrpc' model='bbu' wnb_entity="ba" train_count='5' metric_to_optimize='eval_loss' #https://huggingface.co/metrics metrics_to_load = ['accuracy','precision','recall','f1','mae','mse'] #https://github.com/huggingface/datasets #dataset/task, configuration (sub ds/task), col to rename for tokenizer, cat avg for f1/recall/prec, #MRPC human annotations for whether the sentences in the pair are semantically equivalent #https://dl.fbaipublicfiles.com/glue/data/mrpc_dev_ids.tsv #MNLI Multi-Genre Natural Language Inference Corpus, sentence pairs #MNLI https://dl.fbaipublicfiles.com/glue/data/MNLI.zip dataset_param = { 'YAHOO': ['yahoo_answers_topics','','micro','topic',['question_title'], #,'question_content','best_answer'], ['id','question_content','best_answer']], 'MRPC': ['glue','mrpc','binary','label',['sentence1','sentence2'],['idx']], 'MNLI': ['glue','mnli','binary','label',['premise','hypothesis'],['idx']] } model_param = { 'DBBU' : 'distilbert-base-uncased', 'BBU' : 'bert-base-uncased', #https://huggingface.co/docs/transformers/model_doc/longformer#transformers.LongformerForSequenceClassification.forward 'LBU' : 'allenai/longformer-base-4096' } params = { 'accuracy' : ['maximize', True], 'loss' : ['minimize', False], 'eval_loss' : ['minimize', False] } ''' try: metrics_to_load.index(metric_to_optimize) except: print('Metric to optimize not contained in metrics to load.') ''' try: dataset = dataset.upper() ds_name, ds_config, ds_avg, ds_colren, ds_colstokens, ds_colrem = dataset_param.get(dataset) except Exception as e: print(red, e) try: model = model.upper() modelname = model_param[model] except Exception as e: print(red, e) try: metric_to_optimize.lower() goal, greaterBool = params.get(metric_to_optimize, ['Invalid metric.','']) except Exception as e: print(red, e) # + id="12ieiqO3AQQn" #os.environ['COLAB_TPU_ADDR'] #os.environ['XRT_TPU_CONFIG'] try: os.environ['TPU_NAME'] device = 'tpu' except: try: device = torch.device("cuda" if torch.cuda.is_available() else "cpu") except Exception as e: print(red, e) device = f'{device}'.upper() # + id="IeZYnmM0-hDj" colab={"base_uri": "https://localhost:8080/"} outputId="cc87d02d-9e9e-49f0-f3c9-d02c4dcee381" wnb_project_name=f'{model}-{dataset}-{device}-sweep' print(green, wnb_project_name) # + id="GXPJYNSjDU40" colab={"base_uri": "https://localhost:8080/"} outputId="a5c16589-1a33-4cb7-9e0c-88252e95ed6a" #https://docs.wandb.ai/guides/track/advanced/environment-variables # %env WANDB_WATCH="all" # %env WANDB_LOG_MODEL=true # %env WANDB_PROJECT=f'{wnb_project_name}' # %env WANDB_ENTITY=f'{wnb_entity}'' # %env WANDB_SAVE_CODE=true #avoid error: #The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks... # %env TOKENIZERS_PARALLELISM=true # + id="Fn2vru-yDmZv" #os.environ['COLAB_GPU'] #os.environ['COLAB_TPU_ADDR'] #os.environ['XRT_TPU_CONFIG'] #os.environ['TPU_NAME'] #os.environ # + [markdown] id="-nAfgDi1hbid" # # Sweep-Konfiguration # + [markdown] id="kaKUEAg6iiqP" # ## Links # * [Sweep configuration](https://docs.wandb.com/sweeps/configuration) # * [YAML file](https://docs.wandb.com/sweeps/quickstart#2-sweep-config) # * [sweep random variables](https://docs.wandb.com/sweeps/configuration#distributions) # * [simple training script and a few flavors of sweep configs](https://github.com/wandb/examples/tree/master/examples/keras/keras-cnn-fashion) # * [list of all configuration options](https://docs.wandb.com/library/sweeps/configuration) # * [Big collection of examples in YAML format](https://github.com/wandb/examples/tree/master/examples/keras/keras-cnn-fashion) # * [Sweep from an existing project](https://docs.wandb.ai/guides/sweeps/existing-project) # * [Sweep on CLI Quickstart](https://docs.wandb.com/sweeps/quickstart) # * [Example Tune Sweep Dashboard](https://app.wandb.ai/wandb/examples-keras-cnn-fashion/sweeps/xbs2wm5e?workspace=user-lavanyashukla) # * We offer to `early_terminate` your runs with the [HyperBand](https://arxiv.org/pdf/1603.06560.pdf) scheduling algorithm. See more [here](https://docs.wandb.com/sweeps/configuration#stopping-criteria) # * [Bayesian BOHB ArXiv 1807.01774](https://arxiv.org/abs/1807.01774) # * [Bayesian Hyperband](https://app.wandb.ai/wandb/examples-keras-cnn-fashion/sweeps/us0ifmrf?workspace=user-lavanyashukla) # + [markdown] id="dlfiiE24hgbv" # ## Konfiguration # + id="NzwrNlnxL3Sg" #https://huggingface.co/docs/transformers/main_classes/optimizer_schedules parameters_dict = { 'learning_rate': { # a flat distribution between 0 and 0.1 'distribution': 'uniform', 'min': 5e-7, 'max': 5e-3 }, 'max_steps' : { 'values': list(range(1500,5000,500)) }, 'seed' : { 'distribution': 'int_uniform', 'min': 51, 'max': 202 }, 'optim': { 'distribution': 'categorical', 'values': ['adamw_torch','adafactor'] #torch.AdamW, deprecated: adamw_hf } } sweep_config = { 'method' : 'random' #grid, bayes } sweep_config['metric'] = { 'name': metric_to_optimize, 'goal': goal } sweep_config['parameters'] = parameters_dict # + [markdown] id="mIYKHTtYhKDK" # # Daten # + [markdown] id="CEwBLTCvz42D" # ## Laden # + id="Yc0RJ9JlL3St" colab={"base_uri": "https://localhost:8080/", "height": 313, "referenced_widgets": ["f0c4f84b24364e5bbed0b082ad1162f8", "d105197028dd480fb063c6d6caee4731", "<KEY>", "f64c9574627849a7a5bb8e214863cdf1", "<KEY>", "609752ddae3848fc82c2eab48bace713", "6d996a1ababe42fab08d8fd2fe20d06e", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "eb12c185257743c28c310ed4b94411ae", "<KEY>", "f0ada4e9ddc144c3a49210fed4bb8c91", "<KEY>", "0eff407a7a5a48a7a100d7e8038a64cc", "5562a13fbe264d44b826b82610e1e07a", "<KEY>", "<KEY>", "36533386ee8b40bdb5010445777e32ae", "b26e37c3095e47fd9a80e71c4411f659", "4d615d83967d422aaa3c2781ae90a96b", "<KEY>", "<KEY>", "<KEY>", "e2f23cc9022140dbad3025d4fa963a36", "<KEY>", "00dd7c47b2cc481485a6b1ca1d265a15", "<KEY>", "0355b2a411a949b4a78e9f7a2d783f34", "e2605da14bfe4488a10deb0b2cbe7c8d", "444d11cc2f174f62a1cde34650318ca5", "<KEY>", "<KEY>", "6ca21cd4388a4d48b482b3496ac77907", "d3fd2dbbc7ff4ffa92fa024987a057ca", "<KEY>", "545a27620f8443299382fd4ea90a2377", "b70496a455da4216b99c7aedb03a7d1d", "d129cc1ea26d4ae98f7d06cdf969c382", "8522e7ca7166494a83dafc7ebfb18840", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "9c6ae0b0219a478f93f92e9a1ff50a1f", "87921f47ce45464099a26d72efce0e96", "6a062ce1075b4d788b314afed0859cd7", "<KEY>", "<KEY>", "595fe27503ea46d4833575fada8d1caf", "011ff44c697f471185381023235fcab9", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "783678c69e6d4e0b8e5e630dd0b13364", "09e44dddca5748508aef05fc42ecf668", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "b4a820e8a1b549b6af79d8bafec3b459", "<KEY>", "<KEY>", "20ad434ca7014db0897eaf336d3d14ee", "<KEY>", "<KEY>", "54e7d3906e374eb2ae7f1fcee90946b0", "d46c4e2cd01540968eeef9417f8ef95f", "<KEY>", "<KEY>", "4f0e9732d0e842d2b791e1b5c153f6c2", "43f3adc13cea43d7a9bb098da8af08f4", "<KEY>", "85252c6aa00d4895a8fc9a9b12c79632", "cd735712a4f545b0984afcce4273675f", "ee6d8feabdd845e9b2a868189a43d9e8", "f67baf2f9fc74dacb2eddab0ce1c7372", "5f6560c9312e417bb9adc413e7a2ea0c", "<KEY>", "c8ea2eaf501e438dac730fd705657d1a", "9361b70ded824bd2adb8c564d49d20fe", "695123fa0b5940a28029f6697996beb0", "12fb6c6269114ea9b39a328ee71903ab", "630f670e4ce148a5967729c64f367fb8", "<KEY>", "<KEY>", "67a526593df343abb1d5841d72c11ca4", "<KEY>", "<KEY>", "6a7bca4f559f49fcac6c7546af437b2f", "<KEY>", "17374a46f2984f02a20b3dded6c53717", "2da31642ade0465e8cadc78d0945b31e", "9defac247749463e835df2af22e5d3a0", "<KEY>", "<KEY>", "<KEY>", "6c9e73a4d4284a12bf675931feca4268", "3b83e7a51a124c62b09eef0614b7a52f", "<KEY>", "f1f91650709b4e2dba7185a852561812", "900db51025374ae0b467b1a4e58c7a51", "<KEY>", "<KEY>"]} outputId="355b31cb-3889-4913-e82a-f8773a81d67e" #list_datasets() #load_ds also splits into train/eval if ds_config == '': print(green, f'Downloading dataset "{ds_name}".') ds = load_dataset(ds_name) else: print(green, f'Downloading dataset "{ds_config}" from "{ds_name}".') ds = load_dataset(ds_name, ds_config) # + [markdown] id="CL2LJfrBg9HC" # ## EDA # + colab={"base_uri": "https://localhost:8080/"} id="k3y5L0VdjiOb" outputId="0645106b-4dad-4145-fe2c-ce144fb903d6" ds.column_names # + [markdown] id="VIDYeTowhERx" # ## Tokenization # + id="OrRvN4dP_Idd" label_list = ds['train'].unique(ds_colren) num_labels = len(label_list) # + id="0uae8nnd_EQA" #rename column 'dscol_rename', model expects 'labels' for name in ds: if ds_colren in ds[name].column_names: ds[name] = ds[name].rename_column(ds_colren, 'labels') else: print(red, "Attribute/Feature/Column '%s' not found in '%s'. Found:" % (ds_colren, name)) print(ds[name].column_names) # + id="My7toWl8_CEr" colab={"base_uri": "https://localhost:8080/"} outputId="a24b996e-58c2-473c-84e2-84792cd5294b" ds.column_names # + id="IqS2qPiqL3Su" colab={"base_uri": "https://localhost:8080/", "height": 145, "referenced_widgets": ["410aff7a44854544b7aa4a7b708a0306", "aa23e17206844c01915f45d23fe5b696", "f0358f0066bf44e29a4e5defe89ef3d5", "41caf839f7b1448d929b44f5cf3c2f8c", "2ee7ccac24a44c8a8f00b6f349105ef2", "19bdbeb91e754ef4b68163f1f38a127a", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "36e9578bb402427e9cfaa4e52daf80e2", "<KEY>", "<KEY>", "d94dd57d70704826ac9732e59da11805", "<KEY>", "5269d4de50ba4212b66e7b09b84cd5ed", "4da5b1a94ce6418ea75a7a691ded59fc", "<KEY>", "<KEY>", "5feec0ae6605438f98a24d5bea1ad1c0", "<KEY>", "ce1d3a21aa7346ee9c31cea7365363c6", "d093a2172ad948a0981addf1442ff714", "<KEY>", "c4f21438fde649a0bbd9c3166b1eed5e", "<KEY>", "4c7c149d02c9487db77838ace10fc88e", "<KEY>", "be5a4f07116f4e25b48a11ededaa5de8", "<KEY>", "ec024fe26219410a9587d678704a5082", "172fe053633545628cd6cefb0fab776a", "23f49f80647b475a9b9cb93e55579375", "<KEY>", "4230778d78b74e1a81e0cc1ef17fad24", "<KEY>", "<KEY>", "<KEY>", "7a48adaaef124403846be14ac613e583", "fa3e718d6a1d42b9a0bba00b407d4c29", "<KEY>", "<KEY>", "<KEY>", "dcf82f3fb45f40859b04e92162dd82ae"]} outputId="2f460c38-1d8e-4d0d-9a0d-a3802de7e752" #padding batched data # try max_length=X, try fast=False tokenizer = AutoTokenizer.from_pretrained(modelname, use_fast=True, truncation=True, padding=True) # + id="gLecvlyS_P3U" colab={"base_uri": "https://localhost:8080/", "height": 113, "referenced_widgets": ["60b216b24e384223ac96e99b0dd6a1f6", "1fbb613640ee4649832f5acc5ebbb88d", "26c900cb7a3c40308fcc02d8629cd1cd", "4cc356f051574c9dbed74405aeb7eaec", "<KEY>", "<KEY>", "2fed5aeb038846a9a1e4a161d1e0fe4a", "dfc8340731194cbb9f6d6f46e336c0af", "<KEY>", "f92a374110514f2e9b4e6f5b64fa5f24", "220ca8c8cb8c41328013ec565a1fe326", "<KEY>", "<KEY>", "09c23f1a775a454db03b26770657e1b5", "<KEY>", "<KEY>", "<KEY>", "b29c2c2fe4e9416597fedabde66463be", "f2eca78f9c644456888a3bc1aa5db28d", "5a2b62a12e8541baa1a3dc500e598d3f", "23d70843781e47f79b7be225df283452", "<KEY>", "e62bfe88a4cf4c6e80752de130069677", "<KEY>", "<KEY>", "7fbdc0e3423d4de19315129e54768b3d", "2538e08607084d73bc031803a8070379", "42c47e6bd6ea49a08d82b39bbe99d62e", "8dcf27b692894610ab1762ec2911d72e", "b2bd882296e14997ae000de3698ca923", "<KEY>", "e4410094b0d642af8d78ac8d3adc1ea2", "fb829eb539cd48f89b043491f8f7d068"]} outputId="d70c9a22-1914-4042-a38a-9cb03a8b121d" #tokenizing and padding #try tokenizer(padding="max_length", max_length=X) #attributes needed for down-stream training: #'input_ids', 'token_type_ids', 'attention_mask', 'labels' #lambda convert tuple to list, list comprehension? #'labels' has to be present after tokenization to avoid KeyError('loss') #https://github.com/huggingface/transformers/issues/11256 ''' #Lambda with input_cols and list comprehension, but *x passes tuples ds_tokenized = ds.map( lambda *x: a=tokenizer([item for item in x], truncation=True), #t in x for item in t input_columns=ds_colstokens, batched=True ) # explicit lambda with case ds_tokenized = ds.map(lambda x: tokenizer(x["sentence1"],x["sentence2"],truncation=True),batched=True) #function overloading with input_cols, dataset not passed to fun by map() def tokenize(col1, col2=None, col3=None): #... ds_tokenized = ds.map(tokenize, input_columns=ds_colstokens) ''' def tokenize(data): cols = [] for v in ds_colstokens: cols.append(data[v]) #TypeError: TextEncodeInput must be Union[TextInputSequence, Tuple[InputSequence, InputSequence]] return tokenizer(cols, truncation=True) #Error ArrowInvalid: Column 4 named input_ids expected length 1000 but got length 2 #with is_split_into_words=True def tokenize(data): colen = len(ds_colstokens) if colen == 3: return tokenizer(data[ds_colstokens[0]], data[ds_colstokens[1]], data[ds_colstokens[2]], truncation=True) elif colen == 2: return tokenizer(data[ds_colstokens[0]], data[ds_colstokens[1]], truncation=True) elif colen == 1: return tokenizer(data[ds_colstokens[0]], truncation=True) ds_tokenized = ds.map(tokenize, batched=True) # + id="2u2pd3cPW5ma" colab={"base_uri": "https://localhost:8080/"} outputId="4736e08c-13d5-4d89-997d-9a5bef5245df" #avoid info 'The following columns in the evaluation set don't have a corresponding argument' try: ds_tokenized = ds_tokenized.remove_columns(ds_colstokens).remove_columns(ds_colrem) except Exception as e: print(red, e) ds_tokenized.column_names # + id="kCdEh83fegeo" #save tokenized dataset to /root/.cache/huggingface/datasets #or google drive, mount beforehand #TODO # + [markdown] id="EPMiV6gYjl2S" # # Modell # + id="MVbYZChVPTNN" colab={"base_uri": "https://localhost:8080/", "height": 173, "referenced_widgets": ["eef661a68ad0471a8053cc78de73830c", "<KEY>", "524ed348655a49ff819f6ca62722cde4", "98dd0a2b13214b20bdfe464236ca2b14", "<KEY>", "<KEY>", "<KEY>", "f878d2d13a374c04ac2e20b02b666e01", "42c2616ded474870ba3fe04a75357cc9", "eec872ac77b84a408fa9c8a2a0e09f25", "<KEY>"]} outputId="2ca15ba0-db1f-48aa-f338-81c19f71dd4a" print(green, f'Downloading model "{modelname}".') modelobj = AutoModelForSequenceClassification.from_pretrained(modelname, num_labels=num_labels) # + colab={"base_uri": "https://localhost:8080/"} id="NwM99BeHCypY" outputId="42af3bb3-6d3d-42aa-9c5e-430da118a271" print(modelobj.config) # modelobj.config.classifier_dropout = 0.2 # modelobj.config.max_position_embeddings = 1024 # + [markdown] id="SzD1LUD2C6Ds" # ## Transformer-Architektur # # ![Transformer Attention](https://miro.medium.com/max/875/1*9nUzdaTbKzJrAsq1qqJNNA.png) # + [markdown] id="V-V2vkWeGOKE" # ## Attention # + colab={"base_uri": "https://localhost:8080/"} id="j4doME5oDQu9" outputId="f6153cc0-06b7-4283-b50b-1e5aba286157" print(modelobj.base_model.encoder.layer[0]) # print(modelobj.bert.encoder.layer[0]) # + [markdown] id="zXVsfAvnFRtc" # ## Embeddings # + id="daRgWIk1FQbQ" colab={"base_uri": "https://localhost:8080/"} outputId="4ac7d88a-97aa-49b6-c335-63fce916a02c" print(modelobj.base_model.embeddings) # print(modelobj.bert.embeddings) # + [markdown] id="UXtDG5RP1vsQ" # ## Test model before fine-tuning # + id="KBqBNxk6PZRw" #TODO source for test function, WandB colab? if device == "cuda": import torch def get_topic(sentence, tokenize=tokenizer, model=model): # tokenize the input inputs = tokenizer(sentence, return_tensors='pt') # ensure model and inputs are on the same device (GPU) if device == "cuda": inputs = {name: tensor.cuda() for name, tensor in inputs.items()} model = model.cuda() # get prediction - 10 classes "probabilities" (not really true because they still need to be normalized) with torch.no_grad(): predictions = model(**inputs)[0].cpu().numpy() # get the top prediction class and convert it to its associated label top_prediction = predictions.argmax().item() return ds['train'].features['labels'].int2str(top_prediction) # + id="vKdaDnUlPaqd" #get_topic('Why is cheese so much better with wine?') # + [markdown] id="RTmgz_w1KTWx" # # Metriken # # * Loss (MSE, MAE), Accuracy # * Recall, Precision, F1 # * Perplexity (PPL) # * BLEU # + id="4jIni5JL3M_H" #list_metrics() # + [markdown] id="LSOOPR1I5CaK" # ## HF Metrics Builder Skripte laden # + id="EJbMeYSnGuvf" colab={"base_uri": "https://localhost:8080/", "height": 313, "referenced_widgets": ["173addb25b204408a10d428bd39b8d37", "2e3952dab5c447de8c6987f2552e4138", "950c26fbd2744815ad5dbc3e6f0e5ec5", "6076b804a9964826a2e21fe8adbd2c02", "f0a37d599d8a4404a8b142937909a391", "9f4130c1710c49219cfabafe44777602", "f36bea7641504b4f82fefec64bb0c7f9", "c115d10811e54e0c926551897ffe86de", "7fbc3fa2d8ab4294b4ae49d815dcea46", "aa6c3c1c6a434097b3bb814e2e9d5897", "<KEY>", "4fbc84fe492f4447ac22e142b8ba99db", "<KEY>", "e298810b9aea4f26a2be49bea194b9af", "<KEY>", "<KEY>", "b50c6763ce9e4f00897fb7b4ebae6f0d", "009d4b51ebd14393a99a5d740a9c54a1", "677f6721b0b04e899a631e3e454602fe", "10f1399c094a441d99a673d6465fcf7a", "<KEY>", "<KEY>", "d61a94a0459144b0927ae07936befbe5", "dc6c58dc544e4d3a9f531081841efe2b", "ca9d8181243344b7a2324282d774e061", "e6b21a21aa84445f86f3aa05caf58648", "<KEY>", "0152a8f0c432464a886c12d8a75c435c", "ad4e73f3f6e6473db27d4f75ddef4979", "<KEY>", "519c3615cbd04edbb62eff009a77911e", "dee66002ad9d422ab535d2165b6a6cfe", "40896e6764554a678dfa98515c55a026", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "88babf8b3a754f77b29e21df01a1e71a", "<KEY>", "ced330d190cd4f63994ccff29377e41d", "5ed2493f8e644105803ff2ed52e02caf", "<KEY>", "384b02b164e2491ea456ebe71f76057a", "<KEY>", "f51bc3ed6a014268be233d03452db568", "<KEY>", "0439b6d62865461ba0c9731632a6000f", "<KEY>", "<KEY>", "ca5ee742576f4a81b5985b99e4662a73", "e7d417c674234a8f94eaaf73b120455b", "8e3ca6105d314b9e882d9fe737631a9d", "<KEY>", "<KEY>", "<KEY>", "5735f517cef44d348950987e28d8004b", "<KEY>", "834699644c664a7e8fb0363be0d07c1c", "d441a19671a2403cad8932b5fa75d084", "<KEY>", "4957ac819c644635a35b0d5968d7a409", "a979a1d84bc642ed841df38331dd3cbd", "f25233a98cc64e37a477f6e1b99fda39", "313896f068fa44d0bb6aed589ac4e3c0", "9d82ab963c3441a991eaa491d39a7722"]} outputId="89ba28df-f026-46e9-d9fd-f99f251c7f2c" metrics_loaded = [] #downloading metrics builder scripts for met in metrics_to_load: print(green, f'Downloading builder script for "{met}".') metrics_loaded.append(load_metric(met)) # + id="76u-ydWDfJc0" #PrecisionRecallCurve #sklearn.metrics.precision_recall_curve #https://colab.research.google.com/github/wandb/examples/blob/master/colabs/wandb-log/Plot_Precision_Recall_Curves_with_W%26B.ipynb #https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html#sphx-glr-auto-examples-model-selection-plot-precision-recall-py #https://scikit-learn.org/stable/auto_examples/miscellaneous/plot_display_object_visualization.html#sphx-glr-auto-examples-miscellaneous-plot-display-object-visualization-py # + id="zgQIwh5Ueaub" def compute_metrics(eval_pred): predictions, labels = eval_pred predictions = np.argmax(predictions, axis=1) #predictions.argmax(-1) print(orange,"*************") for i, m in enumerate(metrics_loaded): if metrics_to_load[i] in ['precision','recall','f1']: met = m.compute(predictions=predictions, references=labels, average=ds_avg) else: met = m.compute(predictions=predictions, references=labels) if metrics_to_load[i] == 'accuracy': ret = met wandb.log(met) print(met) #test if metrics-obj need to be reloaded to avoid same eval values #metrics_loaded[i] = load_metric(metrics_to_load[i]) # print("**************** dir(predictions)") # print(dir(met)) # print("**************** dir(labels)") # print(dir(labels)) # print("**************** m.__dir__") # print(m.__dict__) # print("**************** dir(m)") # print(dir(m)) print(orange,"*************") return ret # + [markdown] id="cw2E3JFEkdMA" # # Training # + [markdown] id="wLtJN2wclUKN" # * Die Funktion `train()` initiert die jeweiligen Durchläufe mit den Werten der Konfiguration, die der `wand.agent` auswählt # * [**`wandb.init()`**](https://docs.wandb.com/library/init) – Initialize a new W&B Run. Each Run is a single execution of the training function # * [**`wandb.config`**](https://docs.wandb.com/library/config) – Save all your hyperparameters in a configuration object so they can be logged. Read more about how to use `wandb.config` [here](https://colab.research.google.com/github/wandb/examples/blob/master/colabs/wandb-config/Configs_in_W%26B.ipynb) # * More details on instrumenting W&B with PyTorch, see [this Colab](https://colab.research.google.com/github/wandb/examples/blob/master/colabs/pytorch/Simple_PyTorch_Integration.ipynb) # + [markdown] id="SkJSYkTUMFaO" # ## Args and Train() # + id="d3AHEf9ZRoyK" ''' num_train_epochs=16, # total number of training epochs per_device_train_batch_size=32, # batch size per device during training per_device_eval_batch_size=32, # batch size for evaluation warmup_steps=600, # number of warmup steps for learning rate scheduler weight_decay=0.01, # strength of weight decay logging_dir='/content/logs', fp16 = True, ''' def train(config=None): # Initialize a new wandb run with wandb.init(config=config): # If called by wandb.agent, as below, # this config will be set by Sweep Controller config = wandb.config eval_steps = round(config.max_steps / 5) save_steps = eval_steps * 2 #args need to be assigned here to avoid wandb runtime TypeError() #"'TrainingArguments' object does not support item assignment" args = TrainingArguments( report_to = 'wandb', output_dir = wnb_project_name, overwrite_output_dir = True, evaluation_strategy = 'steps', # check evaluation metrics at each epoch logging_steps = 100, load_best_model_at_end = True, run_name = wnb_project_name, # name of the W&B run eval_steps = eval_steps, save_steps = save_steps, #remove_unused_columns = True, # avoid info 'The following columns in the evaluation set don't have a corresponding argument' #to be changed by sweep agent metric_for_best_model = metric_to_optimize, greater_is_better = greaterBool, learning_rate = config.learning_rate, max_steps = config.max_steps, seed = config.seed, optim = config.optim ) trainer = Trainer( model = modelobj, args = args, train_dataset=ds_tokenized['train'], eval_dataset=ds_tokenized['test'], tokenizer=tokenizer, compute_metrics=compute_metrics ) print(orange,"*************") print("Metric: %s, #Labels: %s, Avg: %s" % (metric_to_optimize, num_labels, ds_avg)) print("eval_steps: %s, save_steps: %s" % (eval_steps, save_steps)) #print(os.environ) print(orange,"*************") trainer.evaluate() trainer.train() # + [markdown] id="ORLn5_ZMk1NF" # # WandB # + [markdown] id="0fIuRkkm5M-q" # ## Login # + id="cIrrKTFvJgJZ" #wandb.login() #wandb.init(project=wnb_project_name, entity=wnb_entity, save_code = True) #wandb.finish() # + id="Mut4jST3zRIk" colab={"base_uri": "https://localhost:8080/"} outputId="1858369b-91c4-4385-dfe2-a0082df5b0a6" #read api-key from ENV, if not provided from interactive user input # #!wandb login --relogin # !wandb login --cloud $WANDB_KEY # + [markdown] id="1K4uVsOYlE1f" # ## Sweep Ablauf # # * Initieren des Sweep Controlers durch `wandb.sweep()` mit `sweep_config` und `project` # * Sweep Controler gibt `sweep_id` zurück, mit der Agents durch `wandb.agent` initiert werden können # * `wandb.sweep`in CLI durch `wandb sweep config.yaml` ersetzt # * Aufruf von `wandb.finish()` in Notebook notwendig, um Agenten zu beenden # # <img src="https://i.imgur.com/zlbw3vQ.png" alt="sweeps-diagram" width="500"> # # # # + [markdown] id="w0RJvgaYlA1F" # ## Sweep initieren # + id="0l_hV53KzSN3" colab={"base_uri": "https://localhost:8080/"} outputId="f111ae6a-562a-4a31-b326-766f8a819745" sweep_id = wandb.sweep(sweep_config, project=wnb_project_name, entity=wnb_entity) # + [markdown] id="FaXMOFaU5Sus" # ## Sweep starten # + id="_2sePTHOOd2K" colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["cb947b086d694a02be98a4d75effa253", "d1c9b2075f2e4aa2b49f9a2f0df95370", "647fcc9cc659483b8abdaeb23eb646a6", "707f6721a3db474e92f76cf092128a1d", "622a938c53d5448aa4f5f1f249963670", "cda1bfd56ec645a3b04fefb1b1fbb355", "73b71502851f4666bfcc9cdd9f92972e", "30910bd4da0d4e279e35b1133ad9a00a"]} outputId="34595a13-41ab-4ca0-89ba-56fb75f8eb23" wandb.agent(sweep_id, train, count=train_count) # + id="DUzD66zUk0Tn" colab={"base_uri": "https://localhost:8080/", "height": 311} outputId="c3cbb704-1601-44e9-c5c0-97a1f4c70016" #explicitly call finish within notebooks wandb.finish() # + [markdown] id="bfGbeQy30EQi" # # TODO # + [markdown] id="XXrRYAheJIji" # ## Folgende Ausarbeitungen # # * **Festlegen weiteres Vorgehen für PA2 und BA** # * **Benotungsgrundlagen** # + [markdown] id="Lx5_bNXo4LXr" # ## Allgemein # # * Auswahl des Anwendungsfalls, Modelle und Metriken # * Pre-Trained oder eigene Modell-Implementierung # * Auswahl Benchmark-Datensätze # * Auf WandB-Rohdaten zugreifen, wenn möglich und nötig # * Wenn nötig, Enkodierung der Tokenizer anpassen # + [markdown] id="y5dS5_D2G4Zk" # ## Metriken auswählen # # * Modell: Precision, Recall, F1, PPL, BLEU etc. # * System: FLOPS/Token, # + [markdown] id="XLsuLxS74SGX" # ## Baseline für Metriken/Scores etablieren # # * Problem der erratischen F1-Scores # * Funktionierendes Beispiel von HF als Baseline nutzen # + [markdown] id="FuKX-ydZ4Xl1" # ## Grundlegende EDA einpflegen # # * Qualitativ # * Verteilung, Muster # * Disbalancen, Bias # * Lokale vs Globale Features # * Quantitativ # * Visualisierung # * Ausreißer # + [markdown] id="kTlcTuMK4cRz" # ## Referenzimplementierung # # * [Paper: Attention is all you need](https://arxiv.org/abs/1706.03762) # * [Paper: BERT Bi-Directional Encoder Representation of Transformer](https://arxiv.org/abs/1810.04805) # * The Annotated Transformer: [Artikel (no HTTPS)](http://nlp.seas.harvard.edu/2018/04/03/attention), [Github](https://github.com/harvardnlp/annotated-transformer) # * [Tensorflow tutorial: Transformer model for language understanding](https://www.tensorflow.org/text/tutorials/transformer) # + [markdown] id="CG99YKXu4fAN" # ## Benchmark-Datensätze # # * [GLUE General Language Understanding and Evaluation](https://gluebenchmark.com/) # * [SQuAD Stanford Question Answering Dataset](https://rajpurkar.github.io/SQuAD-explorer/) # * [Paper: Long Range Arena: A Benchmark for efficient Transformers](https://arxiv.org/abs/2011.04006) # * [Paper: Efficient Transformers: A survey](https://arxiv.org/abs/2009.06732) # + [markdown] id="6t0e7FNR4iIo" # ## Umsetzung orientiert an # # * [A Recipe for Training Neural Networks](https://karpathy.github.io/2019/04/25/recipe/) # * [MetaAI OPT175B Logbook](https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/chronicles/OPT175B_Logbook.pdf) # * [BigScience Chronicles](https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/chronicles.md) # * [BigScience Workshop TensorBoard](https://huggingface.co/bigscience/tr11-176B-ml-logs/tensorboard) # * [ML Roadmap 2020](https://whimsical.com/machine-learning-roadmap-2020-CA7f3ykvXpnJ9Az32vYXva)
HF_WnB_PyTorch_Sweeps_Vortrag_PA1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="OsjvyRyVU3_G" colab_type="text" # # Understanding the data # # In this first part, we load the data and perform some initial exploration on it. The main goal of this step is to acquire some basic knowledge about the data, how the various features are distributed, if there are missing values in it and so on. # + id="_wmNcEoRU3_K" colab_type="code" colab={} ### imports import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np # %matplotlib inline # load hourly data hourly_data = pd.read_csv('https://raw.githubusercontent.com/Develop-Packt/Performing-Bike-Sharing-Analysis/master/data/hour.csv') # + [markdown] id="qDc53XA2U3_P" colab_type="text" # Check data format, number of missing values in the data and general statistics: # + id="9QFrElsEU3_Q" colab_type="code" colab={} outputId="6fda5579-55de-478b-a00d-ede2cfecd346" # print some generic statistics about the data print(f"Shape of data: {hourly_data.shape}") print(f"Number of missing values in the data: {hourly_data.isnull().sum().sum()}") # get statistics on the numerical columns hourly_data.describe().T # + [markdown] id="GwZrQdjybjKO" colab_type="text" # Exercise 01: Preprocessing temporal and weather features # + id="7gGtv-TzU3_W" colab_type="code" colab={} outputId="8a79860f-0a5c-435a-c84f-859bdebf07d2" # create a copy of the original data preprocessed_data = hourly_data.copy() # tranform seasons seasons_mapping = {1: 'winter', 2: 'spring', 3: 'summer', 4: 'fall'} preprocessed_data['season'] = preprocessed_data['season'].apply(lambda x: seasons_mapping[x]) # transform yr yr_mapping = {0: 2011, 1: 2012} preprocessed_data['yr'] = preprocessed_data['yr'].apply(lambda x: yr_mapping[x]) # transform weekday weekday_mapping = {0: 'Sunday', 1: 'Monday', 2: 'Tuesday', 3: 'Wednesday', 4: 'Thursday', 5: 'Friday', 6: 'Saturday'} preprocessed_data['weekday'] = preprocessed_data['weekday'].apply(lambda x: weekday_mapping[x]) # transform weathersit weather_mapping = {1: 'clear', 2: 'cloudy', 3: 'light_rain_snow', 4: 'heavy_rain_snow'} preprocessed_data['weathersit'] = preprocessed_data['weathersit'].apply(lambda x: weather_mapping[x]) # transorm hum and windspeed preprocessed_data['hum'] = preprocessed_data['hum']*100 preprocessed_data['windspeed'] = preprocessed_data['windspeed']*67 # visualize preprocessed columns cols = ['season', 'yr', 'weekday', 'weathersit', 'hum', 'windspeed'] preprocessed_data[cols].sample(10, random_state=123)
Exercise01/Exercise01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + deletable=true editable=true import matplotlib.pyplot as plt import matplotlib.patches as mpatches import math import operator import numpy as np # + deletable=true editable=true # Loading all results (with and without PorterStemer) and relevants f_resultados = './NoStemmer/out/resultados_noStemmer.csv' f_resultados_ps = './PorterStemmer/out/resultados_PorterStemmer.csv' f_esperados = './NoStemmer/out/esperados_noStemmer.csv' queries = {} # without PorterStemer for line in open(f_resultados, 'r'): if line.lower() != 'querynumber;[rank, document, similarity]\n': line = line.rstrip('\n') query = int(line.split(';')[0]) result = line.split(';')[1].lstrip('[').rstrip(']')\ .replace(' ', '').split(',') if query not in queries: queries[query] = {} if int(result[1]) not in queries[query]: queries[query][int(result[1])] = float(result[2]) queries_ps = {} # with PorterStemer for line in open(f_resultados_ps, 'r'): if line.lower() != 'querynumber;[rank, document, similarity]\n': line = line.rstrip('\n') query = int(line.split(';')[0]) result = line.split(';')[1].lstrip('[').rstrip(']')\ .replace(' ', '').split(',') if query not in queries_ps: queries_ps[query] = {} if int(result[1]) not in queries_ps[query]: queries_ps[query][int(result[1])] = float(result[2]) esperados = {} # Relevants for line in open(f_esperados, 'r'): if line.lower() != 'querynumber;docnumber;docvotes\n': line = line.rstrip('\n') if int(line.split(';')[0]) not in esperados: esperados[int(line.split(';')[0])] = {} if int(line.split(';')[1]) not in esperados[int(line.split(';')[0])]: esperados[int(line.split(';')[0])][int(line.split(';')[1])] = \ int(line.split(';')[2]) # + deletable=true editable=true def relevant(doc, esperados): if doc in esperados: return esperados[doc] else: return 0 def true_positive(a, b): """ Return quantity of TP - True Positives What is in A and B being A the set of Positive prediction and B the set of Actual Positive """ tp = 0 for item in a: if item in b: tp += 1 return tp def false_positive(a, b): """ Return quantity FP - False Positives What is in A and not in B being A the set of Positive prediction and B the set of Actual Positive """ fp = 0 for item in a: if item not in b: fp += 1 return fp def num_docs(a): """ Return a dict with all the documents returned in all queries """ full_prediction_set = {} for item in a: for doc in a[item]: if doc not in full_prediction_set: full_prediction_set[doc] = 0 full_prediction_set[doc] += 1 return full_prediction_set def true_negative(a, b, full_set): """ Return quantity TN - True Negative What is not in A and is not in B but is in full_set being A the set of Positive prediction B the set of Actual Positive and full_set the set of all documents in all search results """ tn = 0 neg = {} full_prediction_set = num_docs(full_set) for item in full_prediction_set: if item not in b: neg[item] = 1 for item in neg: if item not in a: tn += 1 return tn def false_negative(a, b): """ Return quantity FN - False Negative What is not in A and is in B being A the set of Positive prediction and B the set of Actual Positive """ fn = 0 for item in b: if item not in a: fn += 1 return fn def get_recall(results, relevants, max_rank): count = 0 considered_results = [] for doc in results: count += 1 if count <= max_rank: considered_results.append(doc) else: break # Evaluating Recall TP = true_positive(considered_results, relevants) recall = TP / len(relevants) return recall def get_precision(results, relevants, max_rank): count = 0 considered_results = [] for doc in results: count += 1 if count <= max_rank: considered_results.append(doc) else: break # Evaluating Precision TP = true_positive(considered_results, relevants) if len(considered_results) > 0: precision = TP / len(considered_results) else: precision = 0.0 return precision def interpolation(queries, recall_levels, esperados): """ Returns a dict({<Recall Level>: <Mean Precision>}) with one precision value (mean) for each recall level """ # Evaluate precision for each query within all recall levels precision = {} for level in range(0, len(recall_levels)): recall_level = recall_levels[level] precision[recall_level] = {} for query in queries: max_results = int(len(esperados[query]) * recall_level) precision[recall_level][query] = get_precision(queries[query], esperados[query], max_results) # count = 0 # results = [] # for doc in queries[query]: # count += 1 # if count <= max_results: # results.append(doc) # else: # break # TP = true_positive(results, esperados[query]) # if len(results) > 0: # precision[recall_level][query] = TP / len(results) # else: # precision[recall_level][query] = 0 # print(precision) # Convert all zero precision into the maximum precision on the greater # levels for query in queries: for level in range(0, len(recall_levels)): if precision[recall_levels[level]][query] == 0: for r in range(level, len(recall_levels)): if precision[recall_levels[r]][query] > \ precision[recall_levels[level]][query]: precision[recall_levels[level]][ query] = precision[recall_levels[r]][query] # print(precision) # Evaluate the mean of all query precisions of a given recall level avg_precision = {} for level in recall_levels: avg_precision[level] = 0 for query in queries: avg_precision[level] += precision[level][query] avg_precision[level] /= len(queries) # print(avg_precision) return avg_precision def precision_at(queries, esperados, rank): precision = {} for query in queries: precision[query] = get_precision(queries[query], esperados[query], rank) avg_precision = 0 for query in queries: avg_precision += precision[query] avg_precision /= len(queries) return avg_precision def f1score_at(queries, esperados, recall_level): f1 = {} for query in queries: max_rank = int(len(esperados[query]) * recall_level) precision = get_precision(queries[query], esperados[query], max_rank) recall = get_recall(queries[query], esperados[query], max_rank) # Evaluating F1-score if (precision + recall) > 0: f1[query] = 2 * (precision * recall) / (precision + recall) else: f1[query] = 0 return f1 def avg(list): mean, count = 0, 0 for i in list: count += 1 mean += list[i] mean /= count return mean # Mean Reciprocal Rank def mrr(queries, esperados): ReciprocalRank = {} for query in queries: rank = 0 results = [] for doc in queries[query]: rank += 1 if doc in esperados[query]: break ReciprocalRank[query] = 1 / rank mean_ReciprocalRank = 0 for query in queries: mean_ReciprocalRank += ReciprocalRank[query] mean_ReciprocalRank /= len(queries) return mean_ReciprocalRank # Cumulative Gain (CG) def cg(queries, esperados): CumulativeGain = {} for query in queries: cc = 0 rank = 0 results = [] for doc in queries[query]: rank += 1 if doc in esperados[query]: cc += esperados[query][doc] CumulativeGain[query] = cc mean_CumulativeGain = 0 for query in queries: mean_CumulativeGain += CumulativeGain[query] mean_CumulativeGain /= len(queries) return (mean_CumulativeGain, CumulativeGain) # Discounted Cumulative Gain (DCG) def dcg(queries, esperados): DiscountedCumulativeGain = {} for query in queries: dcg = 0 rank = 0 results = [] for doc in queries[query]: rank += 1 if doc in esperados[query]: dcg += esperados[query][doc] / (math.log2(rank + 1)) DiscountedCumulativeGain[query] = dcg mean_DiscountedCumulativeGain = 0 for query in queries: mean_DiscountedCumulativeGain += DiscountedCumulativeGain[query] mean_DiscountedCumulativeGain /= len(queries) return (mean_DiscountedCumulativeGain, DiscountedCumulativeGain) # Alternative Discounted Cumulative Gain (aDCG) def Alt_dcg(queries, esperados): DiscountedCumulativeGain = {} for query in queries: dcg = 0 rank = 0 results = [] for doc in queries[query]: rank += 1 if doc in esperados[query]: dcg += ((2 ^ esperados[query][doc])-1) / (math.log2(rank + 1)) DiscountedCumulativeGain[query] = dcg mean_DiscountedCumulativeGain = 0 for query in queries: mean_DiscountedCumulativeGain += DiscountedCumulativeGain[query] mean_DiscountedCumulativeGain /= len(queries) return (mean_DiscountedCumulativeGain, DiscountedCumulativeGain) # Normalized Alternative Discounted Cumulative Gain (nDCG) def nAlt_dcg(queries, esperados): nDiscountedCumulativeGain = {} for query in queries: idcg = 0 esperados_sorted = sorted(esperados[query].items(), key = operator.itemgetter(1), reverse = True) for i in range(0, len(esperados_sorted)): rank = i + 1 idcg += ((2 ^ esperados_sorted[i][1])-1) / (math.log2(rank + 1)) dcg = 0 rank = 0 for doc in queries[query]: rank += 1 if doc in esperados[query]: dcg += ((2 ^ esperados[query][doc])-1) / (math.log2(rank + 1)) nDiscountedCumulativeGain[query] = dcg / idcg mean_nDiscountedCumulativeGain = 0 for query in queries: mean_nDiscountedCumulativeGain += nDiscountedCumulativeGain[query] mean_nDiscountedCumulativeGain /= len(queries) return (mean_nDiscountedCumulativeGain, nDiscountedCumulativeGain) # bpref def bpref_measure(queries, esperados): """ :param queries: list of list containing ranked returned documents by query :param esperados: list of list containing relevant documents by query :return: list of bpref value for each query Implements: bpref = (1/R) * sum(1 - |n ranked higher than r|/R) """ bpref = {} for query in queries: R = len(esperados[query]) # the number of judged relevant documents N = 0 # the count of irrelevants until one relevant S = 0 # the sum of (1 - count/min(R, N)) for doc in esperados[query]: if doc in queries[query]: S += (1 - N / R) else: N += 1 bpref[query] = S / R return bpref # + deletable=true editable=true # Gráfico de precisão em 11 níveis de revocação # recall_levels = [0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, # 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1.0] recall_levels = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] noStemmerRP = interpolation(queries, recall_levels, esperados) PorterStemmerRP = interpolation(queries_ps, recall_levels, esperados) plt.figure(1) plt.plot(*zip(*sorted(noStemmerRP.items())), label = 'without PorterStemer') plt.plot(*zip(*sorted(PorterStemmerRP.items())), label = 'with PorterStemer') plt.xlabel('Recall level') plt.ylabel('Average Precision') plt.title('Recall-Precision 11 points curve (all)') plt.legend(loc = 'best') plt.show() # + deletable=true editable=true # F1-Score noStemmerF1 = f1score_at(queries, esperados, 0.7) PorterStemmerF1 = f1score_at(queries_ps, esperados, 0.7) print('F1-score @ 70% recall without PorterStemer: ' + str(avg(noStemmerF1))) print('F1-score @ 70% recall with PorterStemer: ' + str(avg(PorterStemmerF1))) plt.figure(2) plt.bar(*zip(*sorted(noStemmerF1.items())), label = 'without PorterStemer') plt.bar(*zip(*sorted(PorterStemmerF1.items())), label = 'with PorterStemer') plt.xlabel('Queries') plt.ylabel('F1-score') plt.title('F1-score @ 70% recall') plt.legend(loc = 'best') plt.show() # + deletable=true editable=true # MAP - Mean Average Precision noStemmerMAP = avg(noStemmerRP) PorterStemmerMAP = avg(PorterStemmerRP) print('MAP without PorterStemer: ' + str(noStemmerMAP)) print('MAP with PorterStemer: ' + str(PorterStemmerMAP)) # + deletable=true editable=true # P@5 & P@10 noStemmerP_at_5 = precision_at(queries, esperados, 5) print('Precision @ 5 without PorterStemer: ' + str(noStemmerP_at_5)) noStemmerP_at_10 = precision_at(queries, esperados, 10) print('Precision @ 10 without PorterStemer: ' + str(noStemmerP_at_10)) PorterStemmer_at_5 = precision_at(queries_ps, esperados, 5) print('Precision @ 5 with PorterStemer: ' + str(PorterStemmer_at_5)) PorterStemmer_at_10 = precision_at(queries_ps, esperados, 10) print('Precision @ 10 with PorterStemer: ' + str(PorterStemmer_at_10)) # + deletable=true editable=true # Gráfico RPA/B RP_AB = {} for query in queries: max_rank = len(esperados[query]) noStemmerP = get_precision(queries[query], esperados[query], max_rank) PorterStemmerP = get_precision(queries_ps[query], esperados[query], max_rank) RP_AB[query] = PorterStemmerP - noStemmerP plt.figure(3) plt.gca().yaxis.grid() plt.gca().xaxis.grid() rect = plt.bar(*zip(*sorted(RP_AB.items())), align='center') plt.yticks(np.arange(-0.4, 0.5, 0.1)) plt.xlabel('Query') plt.ylabel('Recall-Precision A/B') plt.title('Recall-Precision A/B by Query') a_patch = mpatches.Patch(label='Recall @ 100%') b_patch = mpatches.Patch(label='PorterStemmer - noStemmer') plt.legend(handles=[a_patch, b_patch], loc = 'best') # plt.legend(loc = 'best') plt.show() # + deletable=true editable=true # MRR - Mean Reciprocal Rank noStemmerMRR = mrr(queries, esperados) print('MRR without Porter Stemmer: ' + str(noStemmerMRR)) PorterStemmerMRR = mrr(queries_ps, esperados) print('MRR with Porter Stemmer: ' + str(PorterStemmerMRR)) # + deletable=true editable=true # Normalized DCG - Discounted Cumulative Gain noPorter_nDCG = nAlt_dcg(queries, esperados) print('Mean Normalized Discounted Cumulative Gain without PorterStemmer = ' + str(noPorter_nDCG[0])) PorterStemmer_nDCG = nAlt_dcg(queries_ps, esperados) print('Mean Normalized Discounted Cumulative Gain with PorterStemmer = ' + str(PorterStemmer_nDCG[0])) plt.figure(4) plt.bar(*zip(*sorted(noPorter_nDCG[1].items())), label = 'without PorterStemer') plt.bar(*zip(*sorted(PorterStemmer_nDCG[1].items())), label = 'with PorterStemer') plt.xlabel('Queries') plt.ylabel('Normalized Discounted Cumulative Gain') plt.title('Normalized Discounted Cumulative Gain by Query') plt.legend(loc = 'best') plt.show() # + deletable=true editable=true # bpref noStemmer_bpref = bpref_measure(queries, esperados) PorterStemmer_bpref = bpref_measure(queries_ps, esperados) plt.figure(5) plt.bar(*zip(*sorted(noStemmer_bpref.items())), label = 'without PorterStemer') plt.bar(*zip(*sorted(PorterStemmer_bpref.items())), label = 'with PorterStemer') plt.plot(*zip(*sorted(noStemmer_bpref.items())), label = 'without PorterStemer') plt.plot(*zip(*sorted(PorterStemmer_bpref.items())), label = 'with PorterStemer') plt.xlabel('Queries') plt.ylabel('bpref') plt.title('bpref measure by Query') plt.legend(loc = 'best') plt.show() # -
GitHub/evaluation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Objectives" data-toc-modified-id="Objectives-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Objectives</a></span></li><li><span><a href="#Intro" data-toc-modified-id="Intro-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Intro</a></span><ul class="toc-item"><li><span><a href="#Two-Types" data-toc-modified-id="Two-Types-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Two Types</a></span></li></ul></li><li><span><a href="#Create-Some-Noisy-Data" data-toc-modified-id="Create-Some-Noisy-Data-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Create Some Noisy Data</a></span></li><li><span><a href="#AdaBoost-(Adaptive-Boosting)" data-toc-modified-id="AdaBoost-(Adaptive-Boosting)-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>AdaBoost (Adaptive Boosting)</a></span><ul class="toc-item"><li><span><a href="#Algorithm" data-toc-modified-id="Algorithm-4.1"><span class="toc-item-num">4.1&nbsp;&nbsp;</span>Algorithm</a></span></li><li><span><a href="#Voting" data-toc-modified-id="Voting-4.2"><span class="toc-item-num">4.2&nbsp;&nbsp;</span>Voting</a></span></li><li><span><a href="#AdaBoost-in-Scikit-Learn" data-toc-modified-id="AdaBoost-in-Scikit-Learn-4.3"><span class="toc-item-num">4.3&nbsp;&nbsp;</span>AdaBoost in Scikit-Learn</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Hyperparameters" data-toc-modified-id="Hyperparameters-4.3.0.1"><span class="toc-item-num">4.3.0.1&nbsp;&nbsp;</span>Hyperparameters</a></span></li></ul></li><li><span><a href="#Galaxy-Data" data-toc-modified-id="Galaxy-Data-4.3.1"><span class="toc-item-num">4.3.1&nbsp;&nbsp;</span>Galaxy Data</a></span><ul class="toc-item"><li><span><a href="#Hyperparameter-Tuning" data-toc-modified-id="Hyperparameter-Tuning-4.3.1.1"><span class="toc-item-num">4.3.1.1&nbsp;&nbsp;</span>Hyperparameter Tuning</a></span></li></ul></li></ul></li></ul></li><li><span><a href="#Gradient-Boosting" data-toc-modified-id="Gradient-Boosting-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Gradient Boosting</a></span><ul class="toc-item"><li><span><a href="#Algorithm" data-toc-modified-id="Algorithm-5.1"><span class="toc-item-num">5.1&nbsp;&nbsp;</span>Algorithm</a></span></li><li><span><a href="#Example-of-Iterative-Steps" data-toc-modified-id="Example-of-Iterative-Steps-5.2"><span class="toc-item-num">5.2&nbsp;&nbsp;</span>Example of Iterative Steps</a></span><ul class="toc-item"><li><span><a href="#Recall-our-noisy-data-from-earlier" data-toc-modified-id="Recall-our-noisy-data-from-earlier-5.2.1"><span class="toc-item-num">5.2.1&nbsp;&nbsp;</span>Recall our noisy data from earlier</a></span></li><li><span><a href="#Train-iteratively-on-the-residuals-of-its-predecessor" data-toc-modified-id="Train-iteratively-on-the-residuals-of-its-predecessor-5.2.2"><span class="toc-item-num">5.2.2&nbsp;&nbsp;</span>Train iteratively on the residuals of its predecessor</a></span></li><li><span><a href="#Observe-how-the-regressor-gets-better" data-toc-modified-id="Observe-how-the-regressor-gets-better-5.2.3"><span class="toc-item-num">5.2.3&nbsp;&nbsp;</span>Observe how the regressor gets better</a></span></li><li><span><a href="#Using-SciKit-learn's-Gradient-Boosting" data-toc-modified-id="Using-SciKit-learn's-Gradient-Boosting-5.2.4"><span class="toc-item-num">5.2.4&nbsp;&nbsp;</span>Using SciKit-learn's Gradient Boosting</a></span></li></ul></li><li><span><a href="#Comparing-gradient-boosting-with-many-estimators" data-toc-modified-id="Comparing-gradient-boosting-with-many-estimators-5.3"><span class="toc-item-num">5.3&nbsp;&nbsp;</span>Comparing gradient boosting with many estimators</a></span></li></ul></li><li><span><a href="#XGBoost" data-toc-modified-id="XGBoost-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>XGBoost</a></span><ul class="toc-item"><li><span><a href="#XGBoost-Regression" data-toc-modified-id="XGBoost-Regression-6.1"><span class="toc-item-num">6.1&nbsp;&nbsp;</span>XGBoost Regression</a></span></li></ul></li><li><span><a href="#Level-Up:-Regression-or-Classification?" data-toc-modified-id="Level-Up:-Regression-or-Classification?-7"><span class="toc-item-num">7&nbsp;&nbsp;</span>Level Up: Regression or Classification?</a></span><ul class="toc-item"><li><span><a href="#Adaboost-Classification" data-toc-modified-id="Adaboost-Classification-7.1"><span class="toc-item-num">7.1&nbsp;&nbsp;</span>Adaboost Classification</a></span></li><li><span><a href="#GradientBoosting" data-toc-modified-id="GradientBoosting-7.2"><span class="toc-item-num">7.2&nbsp;&nbsp;</span>GradientBoosting</a></span></li><li><span><a href="#XGBoost-Classification" data-toc-modified-id="XGBoost-Classification-7.3"><span class="toc-item-num">7.3&nbsp;&nbsp;</span>XGBoost Classification</a></span></li></ul></li></ul></div> # - # May need to install this first # !conda install -c anaconda py-xgboost # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl seed = 42 np.random.seed(seed) from sklearn.ensemble import AdaBoostRegressor, GradientBoostingRegressor, \ AdaBoostClassifier, GradientBoostingClassifier from sklearn.tree import DecisionTreeRegressor import xgboost # You may need to install this! from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV from sklearn.metrics import precision_score, recall_score, confusion_matrix # %matplotlib inline # + [markdown] heading_collapsed=true # # Objectives # + [markdown] hidden=true # - Describe boosting algorithms # - Implement boosting models with `sklearn` # - Implement boosting models with `XGBoost` # + [markdown] heading_collapsed=true # # Intro # + [markdown] hidden=true # One of the problems with using single decision trees and random forests is that, once I make a split, I can't go back and consider how another feature varies across the whole dataset. But suppose I were to consider **my tree's errors**. The fundamental idea of ***boosting*** is to start with a weak learner and then to use information about its errors to build a new model that can supplement the original model. # + [markdown] heading_collapsed=true hidden=true # ## Two Types # + [markdown] hidden=true # The two main types of boosting available in Scikit-Learn are adaptive boosting (AdaBoostClassifier, AdaBoostRegressor) and gradient boosting (GradientBoostingClassifier, GradientBoostingRegressor). # # Again, the fundamental idea of boosting is to use a sequence of **weak** learners to build a model. Though the individual learners are weak, the idea is to train iteratively in order to produce a better predictor. More specifically, the first learner will be trained on the data as it stands, but future learners will be trained on modified versions of the data. The point of the modifications is to highlight the "hard-to-predict-accurately" portions of the data. # + [markdown] heading_collapsed=true # # Create Some Noisy Data # + hidden=true n = 200 X = np.random.rand(n, 1) - 0.5 y = 3*X[:, 0]**2 + 0.05 * np.random.randn(n) plt.scatter(X, y, alpha=0.3) # + hidden=true X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=seed) # + hidden=true f, (ax0,ax1) = plt.subplots(ncols=2, figsize=(12,4)) ax0.scatter(X_train, y_train, alpha=0.3) ax1.scatter(X_test, y_test, alpha=0.3) # + [markdown] heading_collapsed=true # # AdaBoost (Adaptive Boosting) # + [markdown] hidden=true # - **AdaBoost** works by iteratively adapting two related series of weights, one attached to the datapoints and the other attached to the learners themselves. Datapoints that are incorrectly classified receive greater weights for the next learner in the sequence. That way, future learners will be more likely to focus on those datapoints. At the end of the sequence, the learners that make better predictions, especially on the datapoints that are more resistant to correct classification, receive more weight in the final "vote" that determines the ensemble's prediction. <br/> Suppose we have a binary classification problem and we represent the two classes with 1 and -1. (This is standard for describing the algorithm of AdaBoost.) <br/> # Then, in a nutshell: <br/> # 1. Train a weak learner. <br/> # 2. Calculate its error $\epsilon$. <br/> # 3. Use that error as a weight on the classifier: $\theta = \frac{1}{2}ln\left(\frac{1-\epsilon}{\epsilon}\right)$. <br/> # Note that $\theta$ CAN be negative. This represents a classifier whose accuracy is _worse_ than chance. <br/> # 4. Use _that_ to adjust the data points' weights: $w_{n+1} = w_n\left(\frac{e^{\pm\theta}}{scaler}\right)$. Use $+\theta$ for incorrect predictions, $-\theta$ for correct predictions. <br/> $\rightarrow$ For more detail on AdaBoost, see [here](https://towardsdatascience.com/boosting-algorithm-adaboost-b6737a9ee60c). # + [markdown] heading_collapsed=true hidden=true # ## Algorithm # + [markdown] hidden=true # - Train model # - Inflate errors # + Errors increase weight to make it a 50-50 # - Retrain model using errors (repeat) # + [markdown] hidden=true # ![](images/adaboost.png) # + [markdown] heading_collapsed=true hidden=true # ## Voting # + [markdown] hidden=true # Combine weak learners # + [markdown] hidden=true # # + Vote by combining these calculated $y$ for each crossed-area (negative for "not blue" or whatever) # + [markdown] heading_collapsed=true hidden=true # ## AdaBoost in Scikit-Learn # + [markdown] hidden=true # https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostRegressor.html # + hidden=true model = AdaBoostRegressor() model.fit(X_train, y_train) model.predict(X_train) model.score(X_test,y_test) # + [markdown] heading_collapsed=true hidden=true # #### Hyperparameters # + [markdown] hidden=true # ```python # model = AdaBoostClassifier(base_estimator = DecisionTreeClassifier(max_depth=4), n_estimators = 5) # ``` # + [markdown] hidden=true # `base_estimator`: The model utilized for the weak learners (Warning: Don't forget to import the model that you decide to use for the weak learner). # `n_estimators`: The maximum number of weak learners used. # + hidden=true model = AdaBoostRegressor( base_estimator = DecisionTreeRegressor(max_depth=6), n_estimators = 200, random_state= 27 ) model.fit(X_train, y_train) model.score(X_test,y_test) # + [markdown] heading_collapsed=true hidden=true # ### Galaxy Data # + hidden=true galaxies = pd.read_csv('COMBO17.csv') galaxies.head() # + [markdown] hidden=true # This is a dataset about galaxies. The Mcz and MCzml columns are measures of redshift, which is our target. Mcz is usually understood to be a better measure, so that will be our target column. Many of the other columns have to do with various measures of galaxies' magnitudes. For more on the dataset, see [here](https://astrostatistics.psu.edu/datasets/COMBO17.html). # + hidden=true galaxies.columns # + hidden=true galaxies.isnull().sum().sum() # + hidden=true galaxies.info() # + hidden=true galaxies = galaxies.dropna() # + [markdown] hidden=true # Let's collect together the columns that have high correlation with Mcz, our target: # + hidden=true preds = [] for ind in galaxies.corr()['Mcz'].index: if abs(galaxies.corr()['Mcz'][ind]) > 0.5: preds.append(ind) # + hidden=true galaxies[preds].corr() # + [markdown] hidden=true # These various magnitude columns all have high correlations **with one another**! Let's try a simple model with just the S280MAG column, since it has the highest correlation with Mcz. # + hidden=true galaxies_x = galaxies['S280MAG'] galaxies_y = galaxies['Mcz'] # + [markdown] hidden=true # Since we only have one predictor, we can visualize the correlation with the target! We can also reshape it for modeling purposes! # + hidden=true galaxies_x_rev = galaxies_x.values.reshape(-1, 1) # + hidden=true mpl.pyplot.scatter(galaxies_x_rev, galaxies_y, alpha=0.1); # + hidden=true galaxies_x_train, galaxies_x_test, galaxies_y_train, galaxies_y_test = train_test_split(galaxies_x_rev, galaxies_y, random_state=42) # + hidden=true abr = AdaBoostRegressor(random_state=42) abr.fit(galaxies_x_train, galaxies_y_train) # + hidden=true cross_val_score(abr, galaxies_x_train, galaxies_y_train, cv=5) # + [markdown] heading_collapsed=true hidden=true # #### Hyperparameter Tuning # + [markdown] hidden=true # Let's see if we can do better by trying different hyperparameter values: # + hidden=true gs = GridSearchCV(estimator=abr, param_grid={ 'n_estimators': [25, 50, 100], 'loss': ['linear', 'square'] }, cv=5) # + hidden=true gs.fit(galaxies_x_train, galaxies_y_train) # + hidden=true gs.best_params_ # + [markdown] heading_collapsed=true # # Gradient Boosting # + [markdown] hidden=true # > Use gradient descent to improve the model # # ![](images/gradient_boosting_residuals.png) # + [markdown] hidden=true # - **Gradient Boosting** works instead by training each new learner on the residuals of the model built with the learners that have so far been constructed. That is, Model $n+1$ (with $n+1$ learners) will focus on the predictions of Model $n$ (with only $n$ learners) that were **most off the mark**. As the training process repeats, the learners learn and the residuals get smaller. I would get a sequence going: <br/> Model 0 is very simple. Perhaps it merely predicts the mean: <br/> # $\hat{y}_0 = \bar{y}$; <br/> # Model 1's predictions would then be the sum of (i) Model 0's predictions and (ii) the predictions of the model fitted to Model 0's residuals: <br/> $\hat{y}_1 = \hat{y}_0 + \hat{(y - \hat{y})}_{err0}$; <br/> # Now iterate: Model 2's predictions will be the sum of (i) Model 0's predictions, (ii) the predictions of the model fitted to Model 0's residuals, and (iii) the predictions of the model fitted to Model 1's residuals: <br/> $\hat{y}_2 = \hat{y}_0 + \hat{(y - \hat{y})}_{err0} + \hat{(y - \hat{y})}_{err1}$<br/> # Etc. # <br/> # + [markdown] hidden=true # $\rightarrow$ How does gradient boosting work for a classification problem? How do we even make sense of the notion of a gradient in that context? The short answer is that we appeal to the probabilities associated with the predictions for the various classes. See more on this topic [here](https://sefiks.com/2018/10/29/a-step-by-step-gradient-boosting-example-for-classification/). <br/> $\rightarrow$ Why is this called "_gradient_ boosting"? The short answer is that fitting a learner to a model's residuals comes to the same thing as fitting it to the derivative of that model's loss function. See more on this topic [here](https://www.ritchievink.com/blog/2018/11/19/algorithm-breakdown-why-do-we-call-it-gradient-boosting/). # # + [markdown] heading_collapsed=true hidden=true # ## Algorithm # + [markdown] hidden=true # Use mean squared error (MSE) and want to minimize that <-- done by gradient descent # # Use the residuals (pattern in the residuals) to create an even better model # # 1. Fit a model to the data, $F_1(x) = y$ # 2. Fit a model to the residuals, $h_1(x) = y - F_1(x)$ # 3. Create a new model, $F_2(x) = F_1(x) + h_1(x)$ # 4. Repeat # + [markdown] heading_collapsed=true hidden=true # ## Example of Iterative Steps # + [markdown] hidden=true # https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html # + [markdown] hidden=true # > Parts adapted from https://github.com/ageron/handson-ml/blob/master/07_ensemble_learning_and_random_forests.ipynb # + [markdown] heading_collapsed=true hidden=true # ### Recall our noisy data from earlier # + hidden=true plt.scatter(X, y, alpha=0.3) # + hidden=true f, (ax0,ax1) = plt.subplots(ncols=2, figsize=(12,4)) ax0.scatter(X_train, y_train, alpha=0.3) ax1.scatter(X_test, y_test, alpha=0.3) # + hidden=true f, (ax0,ax1,ax2) = plt.subplots(ncols=3, figsize=(12,4)) ax0.scatter(X, y, alpha=0.3) ax1.scatter(X_train, y_train, alpha=0.3) ax2.scatter(X_test, y_test, alpha=0.3) # + [markdown] heading_collapsed=true hidden=true # ### Train iteratively on the residuals of its predecessor # + hidden=true # First iteration tree_reg1 = DecisionTreeRegressor(max_depth=2, random_state=seed) tree_reg1.fit(X_train, y_train) # + hidden=true # Second iteration y2 = y_train - tree_reg1.predict(X_train) y2 # + hidden=true tree_reg2 = DecisionTreeRegressor(max_depth=2, random_state=seed) tree_reg2.fit(X_train, y2) # + [markdown] hidden=true # Let's make a function that will repeat this process for us # + hidden=true def my_gradient_boosting_regressor(n_estimators, X, y, random_state=27): # Save the iteratively trained models and residuals trained_weak_learners = [] past_residuals = [] # Initial conditions new_y = y past_residuals.append(new_y) # Iteratively train model for n in range(n_estimators): # Train the new model on past residuals (note first model trains on y) new_model = DecisionTreeRegressor(max_depth=2, random_state=random_state) new_model.fit(X,new_y) # Find the new residuals (used to train the next model) new_y = new_y - new_model.predict(X) # Save the (trained) model and the new residuals trained_weak_learners.append(new_model) past_residuals.append(new_y) return trained_weak_learners, past_residuals # + hidden=true m,ys = my_gradient_boosting_regressor(3, X_train, y_train) # + [markdown] heading_collapsed=true hidden=true # ### Observe how the regressor gets better # + hidden=true def plot_preds(regressors, X, y, axes, label=None, style='r-', data_style='b.', data_label=None, ax=None): x1 = np.linspace(axes[0], axes[1], 500) y_pred = sum(regressor.predict(x1.reshape(-1, 1)) for regressor in regressors) ax.plot(X[:, 0], y, data_style, label=data_label) ax.plot(x1, y_pred, style, linewidth=2, label=label) if label or data_label: ax.legend(loc='upper center') def gradient_boost_and_plot(n_estimators, X, y): '''''' fig_size = (16,4*n_estimators) model_list, resid_list = my_gradient_boosting_regressor(16, X, y) f, axes = plt.subplots(nrows=n_estimators, ncols=2, figsize=fig_size) base_label ='h(x_1) = ' iterative_label = 'y' for i in range(n_estimators): # Next set of residuals y = resid_list[i] # Must be a list of one item past_model = [model_list[i]] # Includes current model past_models = model_list[:(i+1)] # New labels new_label = f'h_{i}(x_{i})' base_label = f'{base_label} + {new_label}' plot_preds( past_model, X, y, axes=[-0.5, 0.5, -0.1, 0.8], label=f'${iterative_label}$', style="g-", data_label='Training set', ax=axes[i][0] ) axes[i][0].set_title('Residuals and tree predictions') plot_preds( past_models, X, resid_list[0], axes=[-0.5, 0.5, -0.1, 0.8], label=f'${base_label}$', data_label='Training set', ax=axes[i][1] ) axes[i][1].set_ylabel('$y$', fontsize=16, rotation=0) axes[i][1].set_title('Ensemble predictions') # Update labels for next round base_label = f'{base_label} + ' iterative_label = f'{iterative_label} - {new_label}' return f, model_list, resid_list; # + hidden=true gradient_boost_and_plot(3, X_train, y_train); # + [markdown] heading_collapsed=true hidden=true # ### Using SciKit-learn's Gradient Boosting # + hidden=true gbrt = GradientBoostingRegressor(max_depth=2, n_estimators=3, learning_rate=1.0) gbrt.fit(X_train, y_train) # + [markdown] heading_collapsed=true hidden=true # ## Comparing gradient boosting with many estimators # + hidden=true # More estimators gbrt_slow = GradientBoostingRegressor(max_depth=2, n_estimators=100, learning_rate=0.1, random_state=seed) gbrt_slow.fit(X_train, y_train) # Even more estimators gbrt_slower = GradientBoostingRegressor(max_depth=2, n_estimators=1000, learning_rate=0.1, random_state=seed) gbrt_slower.fit(X_train, y_train) # + hidden=true f, (ax0,ax1,ax2) = plt.subplots(ncols=3, figsize=(12,4)) axes = [-0.5, 0.5, -0.1, 0.8] plot_preds([gbrt], X, y, axes=axes, ax=ax0) ax0.set_title("learning_rate={}, n_estimators={}".format(gbrt.learning_rate, gbrt.n_estimators), fontsize=14) plot_preds([gbrt_slow], X, y, axes=axes, ax=ax1) ax1.set_title("learning_rate={}, n_estimators={}".format(gbrt_slow.learning_rate, gbrt_slow.n_estimators), fontsize=14) plot_preds([gbrt_slower], X, y, axes=axes, ax=ax2) ax2.set_title("learning_rate={}, n_estimators={}".format(gbrt_slower.learning_rate, gbrt_slower.n_estimators), fontsize=14) plt.tight_layout() # + hidden=true gbrt.score(X_train,y_train), gbrt.score(X_test,y_test) # + hidden=true gbrt_slow.score(X_train,y_train), gbrt_slow.score(X_test,y_test) # + hidden=true gbrt_slower.score(X_train,y_train), gbrt_slower.score(X_test,y_test) # + [markdown] heading_collapsed=true # # XGBoost # + [markdown] hidden=true # From [XGBoost's documentation](https://xgboost.readthedocs.io/): # # >_**XGBoost** is an optimized distributed gradient boosting library designed to be highly **efficient**, **flexible** and **portable**. It implements machine learning algorithms under the Gradient Boosting framework. XGBoost provides a parallel tree boosting (also known as GBDT, GBM) that solve many data science problems in a fast and accurate way. The same code runs on major distributed environment (Hadoop, SGE, MPI) and can solve problems beyond billions of examples._ # + [markdown] heading_collapsed=true hidden=true # ## XGBoost Regression # + hidden=true grad_boost = xgboost.XGBRegressor(random_state=42, objective='reg:squarederror') grad_boost.fit(X,y) # + hidden=true cross_val_score(grad_boost, X, y, cv=5) # + [markdown] heading_collapsed=true # # Level Up: Regression or Classification? # + [markdown] hidden=true # What does my target look like? # + hidden=true galaxies['Mcz'].hist(); # + [markdown] hidden=true # There seems to be a bit of a bimodal shape here. We might therefore try predicting whether the redshift factor is likely to be greater or less than 0.5: # + hidden=true galaxies['bool'] = galaxies['Mcz'] > 0.5 # + hidden=true galaxies.tail() # + hidden=true x_train2, x_test2, y_train2, y_test2 = train_test_split(galaxies_x_rev, galaxies['bool']) # + [markdown] heading_collapsed=true hidden=true # ## Adaboost Classification # + hidden=true abc = AdaBoostClassifier(random_state=42) abc.fit(x_train2, y_train2) # + hidden=true abc.score(x_test2, y_test2) # + hidden=true precision_score(y_test2, abc.predict(x_test2)) # + hidden=true recall_score(y_test2, abc.predict(x_test2)) # + [markdown] heading_collapsed=true hidden=true # ## GradientBoosting # + hidden=true gbc = GradientBoostingClassifier(random_state=42) gbc.fit(x_train2, y_train2) # + hidden=true gbc.score(x_test2, y_test2) # + hidden=true precision_score(y_test2, gbc.predict(x_test2)) # + hidden=true recall_score(y_test2, gbc.predict(x_test2)) # + hidden=true confusion_matrix(y_test2, gbc.predict(x_test2)) # + [markdown] heading_collapsed=true hidden=true # ## XGBoost Classification # + hidden=true grad_boost_class = xgboost.XGBClassifier(random_state=42, objective='binary:logistic') grad_boost_class.fit(x_train2, y_train2) # + hidden=true grad_boost_class.score(x_test2, y_test2)
Phase_3/ds-boosting-main/boosting_in_sklearn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: information-article # language: python # name: information-article # --- # # License # # Copyright 2019 <NAME>, <NAME>, <NAME>, <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # # **DISCLAIMER**: This notebook is not legal compliance advice. # # Import Libraries and Initialize h2o Cluster # + import h2o import os import numpy as np import time import math import matplotlib.pyplot as plt import shap import pandas as pd from h2o.estimators.gbm import H2OGradientBoostingEstimator from h2o.estimators.xgboost import H2OXGBoostEstimator from h2o.grid.grid_search import H2OGridSearch # init h2o h2o.init(min_mem_size='30G') h2o.remove_all() # set random seed for reproducibility SEED = 22222 # - # # Import Processed Simulated Train and Test Datasets # + train_path = os.sep.join(['..', 'data', 'output', 'train_simulated_processed.csv']) train = h2o.import_file(train_path) test_path = os.sep.join(['..', 'data', 'output', 'test_simulated_processed.csv']) test = h2o.import_file(test_path) # - print(f"Train shape: {train.shape}") print(f"Test shape: {test.shape}") train.head() train.summary() test.head() test.summary() # #Setup Predictors, Response, and Fold Column for Modelling Purposes # + # Inputs and output X = ['binary1', 'binary2', 'cat1_0', 'cat1_1', 'cat1_2', 'cat1_3', 'cat1_4', 'fried1_std', 'fried2_std', 'fried3_std', 'fried4_std', 'fried5_std'] fold_column = 'fold' Y = "outcome" # Convert outcome to categorical train[Y] = train[Y].asfactor() test[Y] = test[Y].asfactor() # - # Distribution of outcome in train train[Y].table() # Distribution of outcome in test test[Y].table() # Function to get Shapley contributions def get_shapley(model, train, X): # calculate SHAP values using function predict_contributions contributions = model.predict_contributions(train[X]) print("First 5 rows of contributions:\n") print(contributions.head(5)) # convert the H2O Frame to use with shap's visualization functions contributions_matrix = contributions.as_data_frame().as_matrix() # shap values are calculated for all features shap_values = contributions_matrix[:,0:len(X)] # expected values is the last returned column expected_value = contributions_matrix[:,len(X)-1].min() return shap_values, expected_value # # Make monotonicity constraints dictionary, which is based on heuristics known about the dataset: # ## `f1` will be positively constrained to the outcome # ## `f2` will be positively constrained to the outcome # ## `f4` will be positively constrained to the outcome # ## `f5` will be positively constrained to the outcome # ## `b1` will be positively constrained to the outcome # ## `b2` will be negatively constrained to the outcome # Make monotonicity constraints dictionary, which is based on data generation script: # f1: + # f2: + # f3: n/a # f4: + # f5: + # b1: + # b2: - # c1: n/a constraint = 1 mono_constraints = {feature:constraint for feature in X} del mono_constraints['fried3_std'] del mono_constraints['cat1_0'] del mono_constraints['cat1_1'] del mono_constraints['cat1_2'] del mono_constraints['cat1_3'] del mono_constraints['cat1_4'] mono_constraints['binary2'] = -1 mono_constraints # # Build Default GBM with Monotonic Constraints mgbm_default = H2OGradientBoostingEstimator( # monotonicity constraints monotone_constraints=mono_constraints, seed=SEED, categorical_encoding="one_hot_explicit") mgbm_default.train(x=X, y=Y, training_frame=train, fold_column=fold_column) # # Model Summary mgbm_default # Get the AUC on the test set for MGBM perf_mgbm_default = mgbm_default.model_performance(test) print(perf_mgbm_default.auc()) # Get the AUC on 5 fold CV for MGBM perf_cv_mgbm_default = mgbm_default.model_performance(xval=True) print(perf_cv_mgbm_default.auc()) # # Build Default GBM with NO Monotonic Constraints gbm_default = H2OGradientBoostingEstimator( seed=SEED, categorical_encoding="one_hot_explicit") gbm_default.train(x=X, y=Y, training_frame=train, fold_column=fold_column) # # Model Summary gbm_default # Get the AUC on the test set for the default GBM perf_gbm_default = gbm_default.model_performance(test) print(perf_gbm_default.auc()) # Get the AUC on 5 fold CV for the default GBM perf_cv_gbm_default = gbm_default.model_performance(xval=True) print(perf_cv_gbm_default.auc()) # # Variable Importance of MGBM and GBM # MGBM var importance mgbm_default.varimp_plot() # GBM var importance gbm_default.varimp_plot() # # Shapley Importance for MGBM and GBM # ## Shapley for MGBM shap_values_mgbm_default, expected_value_mgbm_default = get_shapley(mgbm_default, train, X) shap.summary_plot(shap_values_mgbm_default, X, plot_type="bar") # ## Shapley for GBM shap_values_gbm_default, expected_value_gbm_default = get_shapley(gbm_default, train, X) shap.summary_plot(shap_values_gbm_default, X, plot_type="bar") # # Build Default XGBoost with Monotonic Constraints mxgb_default = H2OXGBoostEstimator( # monotonicity constraints monotone_constraints=mono_constraints, seed=SEED) mxgb_default.train(x=X, y=Y, training_frame=train, fold_column=fold_column) # # Model Summary mxgb_default # Get the AUC on the test set for MXGB perf_mxgb_default = mxgb_default.model_performance(test) print(perf_mxgb_default.auc()) # Get the AUC on 5 fold CV for the default MXGB perf_cv_mxgb_default = mxgb_default.model_performance(xval=True) print(perf_cv_mxgb_default.auc()) # # Build Default XGBoost with NO Monotonic Constraints # Default XGBoost xgb_default = H2OXGBoostEstimator( seed=SEED) xgb_default.train(x=X, y=Y, training_frame=train, fold_column=fold_column) # # Model Summary xgb_default # Get the AUC on the test set for the default GBM perf_xgb_default = xgb_default.model_performance(test) print(perf_xgb_default.auc()) # Get the AUC on 5 fold CV for the default GBM perf_cv_xgb_default = xgb_default.model_performance(xval=True) print(perf_cv_xgb_default.auc()) # # Variable Importance of MXGB and XGB # MXGB var importance mxgb_default.varimp_plot() # XGB var importance xgb_default.varimp_plot() # # Shapley Importance for MXGB and XGB # ## Shapey for MXGB shap_values_mxgb_default, expected_value_mxgb_default = get_shapley(mxgb_default, train, X) shap.summary_plot(shap_values_mxgb_default, X, plot_type="bar") # ## Shapley for XGB shap_values_xgb_default, expected_value_xgb_default = get_shapley(xgb_default, train, X) shap.summary_plot(shap_values_xgb_default, X, plot_type="bar") # # MGBM Grid Search (Constrained) # + # GBM hyperparameters hyper_params = {'learn_rate': [i * 0.01 for i in range(1, 11)], 'max_depth': list(range(2, 11)), 'sample_rate': [i * 0.1 for i in range(5, 11)], 'col_sample_rate': [i * 0.1 for i in range(1, 11)]} # Search criteria search_criteria = {'strategy': 'RandomDiscrete', 'max_models': 36, 'seed': 1} # - # Train and validate a random grid of GBMs mgbm_grid2 = H2OGridSearch(model=H2OGradientBoostingEstimator(distribution='bernoulli', # monotonicity constraints monotone_constraints=mono_constraints, # more trees is better if the learning rate is small enough # here, use "more than enough" trees - we have early stopping ntrees=10000, # learning rate annealing: learning_rate shrinks by 1% after every tree # (use 1.00 to disable, but then lower the learning_rate) learn_rate_annealing = 0.99, # score every 10 trees to make early stopping reproducible # (it depends on the scoring interval) score_tree_interval = 10, # fix a random number generator seed for reproducibility seed = 1234, # early stopping once the validation AUC doesn't improve by at least 0.01% for # 5 consecutive scoring events stopping_rounds = 5, stopping_metric = "AUC", stopping_tolerance = 1e-4), grid_id='mgbm_grid2_simulated', hyper_params=hyper_params, search_criteria=search_criteria) mgbm_grid2.train(x=X, y=Y, training_frame=train, fold_column=fold_column, ntrees=100, seed=SEED) # Get the grid results, sorted by validation AUC mgbm_gridperf2 = mgbm_grid2.get_grid(sort_by='auc', decreasing=True) print(mgbm_gridperf2) # + # Grab the top GBM model, chosen by validation AUC best_mgbm2 = mgbm_gridperf2.models[0] # Now let's evaluate the model performance on a test set # so we get an honest estimate of top model performance best_mgbm_perf2 = best_mgbm2.model_performance(test) best_mgbm_perf2.auc() # - # # Shapley Importance shap_values_mgbm, expected_value_mgbm = get_shapley(best_mgbm2, train, X) shap.summary_plot(shap_values_mgbm, X, plot_type="bar") # + # Save the model model_path = h2o.save_model(model=best_mgbm2, path="../models/MGBM_BEST_GRID_v2_SIMULATED_" + str(time.time()), force=True) print(model_path) # - # # GBM Grid Search (Unconstrained) # Train and validate a random grid of GBMs gbm_grid2 = H2OGridSearch(model=H2OGradientBoostingEstimator(distribution='bernoulli', # more trees is better if the learning rate is small enough # here, use "more than enough" trees - we have early stopping ntrees=10000, # learning rate annealing: learning_rate shrinks by 1% after every tree # (use 1.00 to disable, but then lower the learning_rate) learn_rate_annealing = 0.99, # score every 10 trees to make early stopping reproducible # (it depends on the scoring interval) score_tree_interval = 10, # fix a random number generator seed for reproducibility seed = 1234, # early stopping once the validation AUC doesn't improve by at least 0.01% for # 5 consecutive scoring events stopping_rounds = 5, stopping_metric = "AUC", stopping_tolerance = 1e-4), grid_id='gbm_grid2_simulated', hyper_params=hyper_params, search_criteria=search_criteria) gbm_grid2.train(x=X, y=Y, training_frame=train, fold_column=fold_column, ntrees=100, seed=SEED) # Get the grid results, sorted by validation AUC gbm_gridperf2 = gbm_grid2.get_grid(sort_by='auc', decreasing=True) print(gbm_gridperf2) # + # Grab the top GBM model, chosen by validation AUC best_gbm2 = gbm_gridperf2.models[0] # Now let's evaluate the model performance on a test set # so we get an honest estimate of top model performance best_gbm_perf2 = best_gbm2.model_performance(test) best_gbm_perf2.auc() # - # # Shapley Importance shap_values_gbm, expected_value_gbm = get_shapley(best_gbm2, train, X) shap.summary_plot(shap_values_gbm, X, plot_type="bar") # + # Save the model model_path = h2o.save_model(model=best_gbm2, path="../models/GBM_BEST_GRID_v2_SIMULATED_" + str(time.time()), force=True) print(model_path) # - # # Repeat Previous but with XGBoost # Train and validate a random grid of XGBoost models mxgb_grid = H2OGridSearch(model=H2OXGBoostEstimator(distribution='bernoulli', # monotonicity constraints monotone_constraints=mono_constraints, # more trees is better if the learning rate is small enough # here, use "more than enough" trees - we have early stopping ntrees=10000, # learning rate annealing: learning_rate shrinks by 1% after every tree # (use 1.00 to disable, but then lower the learning_rate) # learn_rate_annealing = 0.99, # score every 10 trees to make early stopping reproducible # (it depends on the scoring interval) score_tree_interval = 10, # fix a random number generator seed for reproducibility seed = 1234, # early stopping once the validation AUC doesn't improve by at least 0.01% for # 5 consecutive scoring events stopping_rounds = 5, stopping_metric = "AUC", stopping_tolerance = 1e-4), grid_id='mxgb_grid2_simulated', hyper_params=hyper_params, search_criteria=search_criteria) mxgb_grid.train(x=X, y=Y, training_frame=train, fold_column=fold_column, ntrees=100, seed=SEED) # Get the grid results, sorted by validation AUC mxgb_gridperf = mxgb_grid.get_grid(sort_by='auc', decreasing=True) print(mxgb_gridperf) # + # Grab the top GBM model, chosen by validation AUC best_mxgb = mxgb_gridperf.models[0] # Now let's evaluate the model performance on a test set # so we get an honest estimate of top model performance best_mxgb_perf = best_mxgb.model_performance(test) best_mxgb_perf.auc() # - # # Shapley Importance shap_values_mxgb, expected_value_mxgb = get_shapley(best_mxgb, train, X) shap.summary_plot(shap_values_mxgb, X, plot_type="bar") # + # Save the model model_path = h2o.save_model(model=best_mxgb, path="../models/MXGB_BEST_GRID_v2_SIMULATED_" + str(time.time()), force=True) print(model_path) # - # Train and validate a random grid of XGBoost models xgb_grid = H2OGridSearch(model=H2OXGBoostEstimator(distribution='bernoulli', # more trees is better if the learning rate is small enough # here, use "more than enough" trees - we have early stopping ntrees=10000, # learning rate annealing: learning_rate shrinks by 1% after every tree # (use 1.00 to disable, but then lower the learning_rate) # learn_rate_annealing = 0.99, # score every 10 trees to make early stopping reproducible # (it depends on the scoring interval) score_tree_interval = 10, # fix a random number generator seed for reproducibility seed = 1234, # early stopping once the validation AUC doesn't improve by at least 0.01% for # 5 consecutive scoring events stopping_rounds = 5, stopping_metric = "AUC", stopping_tolerance = 1e-4), grid_id='xgb_grid2_simulated', hyper_params=hyper_params, search_criteria=search_criteria) xgb_grid.train(x=X, y=Y, training_frame=train, fold_column=fold_column, ntrees=100, seed=SEED) # Get the grid results, sorted by validation AUC xgb_gridperf = xgb_grid.get_grid(sort_by='auc', decreasing=True) print(xgb_gridperf) # + # Grab the top GBM model, chosen by validation AUC best_xgb = xgb_gridperf.models[0] # Now let's evaluate the model performance on a test set # so we get an honest estimate of top model performance best_xgb_perf = best_xgb.model_performance(test) best_xgb_perf.auc() # - # # Shapley Importance shap_values_xgb, expected_value_xgb = get_shapley(best_xgb, train, X) shap.summary_plot(shap_values_xgb, X, plot_type="bar") # + # Save the model model_path = h2o.save_model(model=best_xgb, path="../models/XGB_BEST_GRID_v2_SIMULATED_" + str(time.time()), force=True) print(model_path)
notebooks/mgbm_simulated.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # # Linear Regression Example # %matplotlib inline import numpy as np from sklearn.linear_model import LinearRegression, LogisticRegression import matplotlib.pylab as plt from sklearn.preprocessing import PolynomialFeatures def gen_cb(N, a, alpha): """ N: number of points on the checkerboard a: width of the checker board (0<a<1) alpha: rotation of the checkerboard in radians """ d = np.random.rand(N, 2).T d_transformed = np.array([d[0]*np.cos(alpha)-d[1]*np.sin(alpha), d[0]*np.sin(alpha)+d[1]*np.cos(alpha)]).T s = np.ceil(d_transformed[:,0]/a)+np.floor(d_transformed[:,1]/a) lab = 2 - (s%2) data = d.T return data, lab # + poly = PolynomialFeatures(degree=25) lr = LogisticRegression() lrnl = LogisticRegression() sz = .5 angle = 3.14159/2 # generate some training data and transform the feature space X, y = gen_cb(1000, sz, angle) X_orig = X.copy() X_new = poly.fit_transform(X) # plot the original training data plt.figure() plt.plot(X_orig[np.where(y==1)[0], 0], X_orig[np.where(y==1)[0], 1], 'o') plt.plot(X_orig[np.where(y==2)[0], 0], X_orig[np.where(y==2)[0], 1], 's', c = 'r') # build a LR classifier lrnl.fit(X_new, y) lr.fit(X_orig, y) # generate some data to test the model; remember to transform the features # then classify the new data Xe, ye = gen_cb(5000, sz, angle) X_orig = Xe.copy() X_new = poly.fit_transform(Xe) yhat = lrnl.predict(X_new) yhat_lr = lr.predict(X_orig) # plot the decision boundary of our classifier plt.figure() plt.plot(X_orig[np.where(yhat_lr==1)[0], 0], X_orig[np.where(yhat_lr==1)[0], 1], 'o') plt.plot(X_orig[np.where(yhat_lr==2)[0], 0], X_orig[np.where(yhat_lr==2)[0], 1], 's', c = 'r') # plot the decision boundary of our classifier with poly features plt.figure() plt.plot(X_orig[np.where(yhat==1)[0], 0], X_orig[np.where(yhat==1)[0], 1], 'o') plt.plot(X_orig[np.where(yhat==2)[0], 0], X_orig[np.where(yhat==2)[0], 1], 's', c = 'r') # + from sklearn.datasets import make_moons X, y = make_moons(n_samples=1000, noise=0.2, random_state=0) poly = PolynomialFeatures(degree=3) X_new = poly.fit_transform(X) lr = LogisticRegression() lr.fit(X_new, y) plt.figure() plt.plot(X[np.where(y==1)[0], 0], X[np.where(y==1)[0], 1], 'o') plt.plot(X[np.where(y==0)[0], 0], X[np.where(y==0)[0], 1], 's', c = 'r') plt.show() Xte, yte = make_moons(n_samples=1000, noise=0.3, random_state=0) X_new = poly.fit_transform(Xte) yhat = lr.predict(X_new) plt.figure() plt.plot(Xte[np.where(yhat==1)[0], 0], Xte[np.where(yhat==1)[0], 1], 'o') plt.plot(Xte[np.where(yhat==0)[0], 0], Xte[np.where(yhat==0)[0], 1], 's', c = 'r') plt.show() # -
notebooks/LinearRegressionExample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Web scraping in Python # ### by [<NAME>](http://jasondebacker.com), October 2017 # # This notebook provides a tutorial and examples showing how to scrape webpages with Python in order to gather data. # # ## Beautiful Soup # # There are a number of Python packages that are useful for web scraping, but we'll use [Beautiful Soup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/). This may be the most used package for general web scraping and it allows a lot of flexibility. # # Beautiful Soup helps you to gather data by parsing HTML and XML code and returning a "soup" object. The soup object has all of the data in the HTML/XML code that was parsed, but now with some additional structure that allows you to easily pull out particular components of the HTML/XML such as a title, table, image, etc. # # Below, we'll apply Beautiful Soup to several examples that illustrate how to pull data from webpages with different structures. # # ## Example 1: Wikipedia # # Let's scrape the season schedule and results for the 1980 Georgia Bulldogs football team from Wikipedia. The page can be found [here](https://en.wikipedia.org/wiki/1980_Georgia_Bulldogs_football_team). What we are interested in is this table: # # ![1980 Georgia Bulldogs Schedule](files/images/UGA1980results.png) # # + # import packages from bs4 import BeautifulSoup import urllib.request # give URL and header wiki = "https://en.wikipedia.org/wiki/1980_Georgia_Bulldogs_football_team" header = {'User-Agent': 'Mozilla/5.0'} #Needed to prevent 403 error on Wikipedia # Make the request to get served the webpage, "soupify" it req = urllib.request.Request(wiki, headers=header) page = urllib.request.urlopen(req) soup = BeautifulSoup(page, 'lxml') # what does the soup object contain print(soup.prettify()) # print(page) # - # extract the table by pulling information from the wikitable class # There's only one table like this here, so that makes it easier table = soup.find("table", {"class": "wikitable"}) print(table) # + # create a dictionary in which to store data # the keys will be the column names, the values lists # containing the element in each row in that column uga_1980 = {'date': [], 'opponent': [], 'rank': [], 'site': [], 'tv': [], 'result': [], 'attendance': []} # iterate through the table, pulling out each row for row in table.findAll("tr"): cells = row.findAll("td") #For each "tr", assign each "td" to a variable. if len(cells) == 7: uga_1980['date'].append(cells[0].find(text=True)) uga_1980['opponent'].append(cells[1].findAll(text=True)) uga_1980['rank'].append(cells[2].find(text=True)) uga_1980['site'].append(cells[3].findAll(text=True)) uga_1980['tv'].append(cells[4].find(text=True)) uga_1980['result'].append(cells[5].find(text=True)) uga_1980['attendance'].append(cells[6].find(text=True)) # - # Look at this dictionary uga_1980 # + # put this in a dataframe and format it import pandas as pd uga_1980_df = pd.DataFrame(uga_1980) uga_1980_df # - # From here, we can reformat the dates and numeric values into approproriate types as well as change some of text to drop characters like "[". This will just be working with DataFrames and Python's types. # # Instead of this, let's think about how to gather more data. Sticking with this example, let's try to gather all tof the results for Georgia Football from 1980 to 2016. # # Start by looking for patterns in how the data is presented. The url for the 1980 season is `https://en.wikipedia.org/wiki/1980_Georgia_Bulldogs_football_team`, for the 1981 season: `https://en.wikipedia.org/wiki/1981_Georgia_Bulldogs_football_team`, and for the 2016 season: `https://en.wikipedia.org/wiki/2016_Georgia_Bulldogs_football_team`. So we can see the pattern in how the url updates. This will help us as we scrape multiple years of data. # # Lookig at each of these pages, we notice some differences. For example, there are more tables on the page for the 2016 season than for the 1980 season. So there may be some work we have to do here, but lets' start out by trying to loop over all the seasons, doing what we did above. # + # create empty dataframe uga_df = pd.DataFrame(columns=['date', 'opponent', 'rank', 'site', 'tv', 'result', 'attendance', 'year']) # create a dictionary in which to store data # the keys will be the column names, the values lists # containing the element in each row in that column results_dict = {'date': [], 'opponent': [], 'rank': [], 'site': [], 'tv': [], 'result': [], 'attendance': [], 'year': []} # Loop over years for year in range(1980, 2016): # give URL and header wiki = "https://en.wikipedia.org/wiki/" + str(year) + "_Georgia_Bulldogs_football_team" header = {'User-Agent': 'Mozilla/5.0'} #Needed to prevent 403 error on Wikipedia # Make the request to get served the webpage, "soupify" it req = urllib.request.Request(wiki,headers=header) page = urllib.request.urlopen(req) soup = BeautifulSoup(page, 'lxml') # extract the table by pulling information from the wikitable class # There's only one table like this here, so that makes it easier table = soup.find("table", {"class": "wikitable"}) # iterate through the table, pulling out each row for row in table.findAll("tr"): cells = row.findAll("td") #For each "tr", assign each "td" to a variable. if len(cells) == 7: results_dict['date'].append(cells[0].find(text=True)) results_dict['opponent'].append(cells[1].findAll(text=True)) results_dict['rank'].append(cells[2].find(text=True)) results_dict['site'].append(cells[3].findAll(text=True)) results_dict['tv'].append(cells[4].find(text=True)) results_dict['result'].append(cells[5].find(text=True)) results_dict['attendance'].append(cells[6].find(text=True)) results_dict['year'].append(year) print('Year = ', year) # - uga_df = pd.DataFrame(results_dict) uga_df import matplotlib.pyplot as plt # %matplotlib inline uga_df['attendance'] = uga_df['attendance'].str.replace(",", "").astype(float) uga_df.plot(y='attendance', kind='bar') # ## Example 2: Scraping Yellowpages.com # # Here well grab information from a search of yellowpages.com # # ![YP.com Screenshot](files/images/YP_search.png) # # + # import package import requests # give url for search of Columbia, SC restaurants r = requests.get('https://www.yellowpages.com/search?search_terms=restaurant&geo_location_terms=Columbia%2C+SC') r.content # parse webpage with BS soup = BeautifulSoup(r.content, 'lxml') #note - setting parser to lxml yellowpages_data = soup.find_all("div", {"class":"info"}) yellowpages_data # soup.prettify() # len(yellowpages_data) # - # print restaurant names for item in yellowpages_data: try: print(item.contents[0].find_all("a",{"class":"business-name"})[0].text) except: pass # Note that these are just results for one page and there are a number of pages of results. We can get those others too. To do that, let's determine the number of pages and go to each page. # # A first step is to note the format of the results of additional pages. Here's the url of the first page: # # `https://www.yellowpages.com/search?search_terms=restuarant&geo_location_terms=Columbia%2C+SC` # # And the second: # # `https://www.yellowpages.com/search?search_terms=restuarant&geo_location_terms=Columbia%2C%20SC&page=2` # # We can see a pattern. Let's exploit this. soup # + # import import re result_string = soup.find('div', attrs={'class': 'pagination'}).text numResults = float((re.search('found(.*)results', result_string)).group(1)) print("Number of results : ", numResults) pages = int(numResults/30) # checked results page to see that 30 displayed per page as default print('Number of pages = ', pages) # + # Now loop over pages of results # Lots of them, so let's put in list and not print them all restaurant_list = [] for i in range(1, pages+1): if i == 1: url = ('https://www.yellowpages.com/search?search_terms='+ 'restuarant&geo_location_terms=Columbia%2C+SC') else: url = ('https://www.yellowpages.com/search?search_terms='+ 'restuarant&geo_location_terms=Columbia%2C%20SC&page=' + str(i)) r = requests.get(url) r.content # parse webpage with BS soup = BeautifulSoup(r.content, 'lxml') #note - setting parser to lxml yellowpages_data = soup.find_all("div", {"class":"info"}) # append names to list for item in yellowpages_data: try: restaurant_list.append(item.contents[0]. find_all("a",{"class":"business-name"})[0].text) except: pass print("Finished page ", i, " results") print(len(restaurant_list)) # - restaurant_list[100:110]
WebData/WebScraping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Zero de funções # *** # Neste notebook vamos aprender sobre alguns métodos para encontrar zeros de funções. Este é um problema clássico que aparece em diversas áreas: temos uma função $f(x)$ e queremos encontrar o valor de $x$ que faz com que a função $f$ possua um determinado valor $y$. Em alguns casos, é possível simplesmente isolar $x$ e obter a resposta. Em outros, $f(x)$ pode ser muito complicado e precisamos encontrar a resposta numericamente. # # Para entendermos um pouco melhor o nosso problema, vamos fazer um gráfico. Em Python, podemos utilizar as bibliotecas matplotlib e numpy. A primeira é uma biblioteca de gráficos propriamente dita enquanto a segunda é uma biblioteca de computação científica com vários recursos como vetores, matrizes, funções especiais, números aleatórios, etc. # *** # ### Exemplos # *** # Importar as bibliotecas e o código para rodar gráficos no notebook import numpy import matplotlib.pyplot as matplot # %matplotlib inline # *** # Definimos a nossa função $x^2 + 2x + 1$ e o valor de $y_0$ # + def f(x): return x*x + 2*x + 1 y0 = 42 # - # *** # Queremos gerar uma sequência de valores começando em $0$ e terminando em $10$ igualmente espaçados para $x$ para calcularmos os respectivos $y$ gerando os pontos que aparecerão no gráfico. # Gera os pontos de 0 a 10 x = numpy.linspace(0, 10) # Gera os resultados de f(x) através dos pontos y = f(x) # *** # Vamos verificar os pontos gerados por linspace print(x) # *** # Vamos verificar os resultados para cada $x$ gerado print(y) # *** # Vamos criar o gráfico para os pontos de $x$ e $y$ matplot.plot(x, y) matplot.title("Busca do zero da função") matplot.xlabel("Argumentos") matplot.ylabel("valor de f(x)") matplot.show() # *** # Podemos facilmente desenhar várias linhas. Basta chamar a função $plot$ várias vezes. Queremos desenhar $f(x)$ com uma linha horizontal no valor $y_0$. matplot.plot(x, y, label='f(x)') matplot.plot([0, 10], [y0, y0], label='y0') matplot.title("Ponto de intersecção") matplot.legend() matplot.show() # É possível verificar visualmente que o valor de $x$ que faz com que $f(x) = y_0$ está entre 5 e 6. # *** # Um método pouco elegante é fazer uma busca por força bruta: percorremos vários valores no intervalo e tentamos encontrar aquele que a diferença entre $f(x)$ e $y_0$ seja igual ou muito próxima de zero. Dividimos o intervalo em $25$ valores e buscamos qual $x$ produz o melhor resultado. # Percorrer 25 valores for i in range(25): # Aumentar 0.4 do valor de x a cada iteração x = 5 + i / 25 # Imprime o resultado print("%2d) x = %.2f; f(x) - y0 = %6.3f" % (i, x, f(x) - y0)) # Podemos ver que o melhor resultado desta lista foi $x = 5.48$, onde $f(x)$ difere de $y_0$ por um fator de apenas $0.010$. É lógico que é possível melhorar este resultado. Uma abordagem seria repetir um passo similar ao anterior, mas agora em um intervalo próximo de $x=5.48$ (quem sabe entre $x=5.44$ e $5.52$). A partir daí encontraríamos uma estimativa mais refinada e poderíamos continuar até obter um resultado satisfatório. # # Este método de busca por força bruta, por mais que funcione, não parece muito elegante. De fato, este método é ruim sob vários aspectos. Primeiramente, exige um grande número de avaliações de $f(x)$. Isto pode ser irrelevante no nosso caso, mas existem situações em que a função $f(x)$ pode envolver cálculos extremamente complexos e um grande número de requisições para $f(x)$ significaria um programa lento. Além deste problema, o método exige uma escolha manual dos intervalos de início e fim em cada etapa. Isto é inaceitável e queremos métodos em que o computador consiga realizar todos os cálculos automaticamente.
metodos_numericos/zero_de_funcoes/teoria/01_zero_de_funcoes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib.pyplot as plt import numpy as np import pandas as pd import os # + pycharm={"name": "#%%\n"} num_seeds = 5 logfile = 'progress.csv' algo = 'gail' env_name = 'Walker2d'.lower() log_dir = './experiments/' + env_name + '/' + algo + '_paper/' # log_name = 'MDAL_updateSteps100_tpi0.5_tc0.05_s' log_name = 'GAIL_updateSteps10_tpi0.5_tc0.05_s' gail = [] for seed in range(num_seeds): current_file = log_dir + log_name + str(seed) + '/' + logfile df = pd.read_csv(current_file) if seed == 0: # episodes = np.array(df['episodes']) steps = np.array(df['TimestepsSoFar']) gail.append(df['EpTrueRewMean']) gail = np.array(gail) gail_mean = np.mean(gail, axis=0) std_coef = 0.87 gail_std = np.nanstd(gail, axis=0) plt.clf() plt.plot(steps/1e6, gail_mean) plt.fill_between(steps/1e6, gail_mean - std_coef * gail_std, gail_mean + std_coef * gail_std, alpha=0.2) plt.title(env_name, fontsize=14) plt.xlabel('Timesteps', fontsize=11) plt.ylabel("Mean Episode Reward", fontsize=11) plt.xticks(fontsize=11) # + pycharm={"name": "#%%\n"} env_algo = os.path.split(os.path.split(os.path.split(current_file)[0])[0]) env = os.path.split(env_algo[0])[1] algo = env_algo[1].split("_")[0] print(env,algo)
stable_baselines/create_graphs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib matplotlib.use('Agg') import seaborn as sns from locker import mkdir from locker.analysis import * from locker.data import * from scripts.config import params as plot_params, FormatedFigure import matplotlib.pyplot as plt from locker.analysis import * import pycircstat as circ from numpy.fft import fft, fftfreq, fftshift # %matplotlib inline # + fig, ax = plt.subplots(1,1) runs = Runs() for cell in (Cells() & dict(cell_type='p-unit', cell_id='2014-12-03-ao')).fetch("KEY"): unit = (Cells & cell).fetch1('cell_type') cell['cell_type'] = unit contrast = 20 target_trials = runs & cell & dict(contrast=contrast, am=0, n_harmonics=0, delta_f=200) if len(target_trials) > 0: if Baseline() & cell: (Baseline() & cell).plot_raster(ax) plt.savefig('PSTH.png') # - plt.show()
scripts/figures/figure_intro/p-unit/PSTH.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:c19i2b2-py] * # language: python # name: conda-env-c19i2b2-py-py # --- # + import numpy as np import pandas as pd import altair as alt from os.path import join import random import string from numpy.random import choice import sys sys.path.insert(1, '../../notebooks') # We want to use constants.py in another folder from constants import DATA_DIR, COLUMNS np.random.seed(0) # + # GUIDELINE # File #4: Diagnoses-SiteID.csv # Fields: siteid, icd_code, icd_version, num_patients # Notes: # (1) One row per ICD diagnosis code # (2) All diagnoses the patients have starting seven days before the positive test # (3) icd_version = "9" or "10" # (4) Obfuscate small counts with "-1" as required by your institution # Examples: (Diagnoses-BIDMC.csv) # BIDMC, B97.29, 10, 25 # BIDMC, J12.89, 10, 19 # BIDMC, R03.0, 10, 15 # BIDMC, U07.1, 10, 13 # BIDMC, 123, 9, -1 # - num_sites = 20 num_codes = 9 # + # ICD-10 examples taken from: # https://www.cdc.gov/nchs/data/icd/ICD-10-CM-Official-Coding-Gudance-Interim-Advice-coronavirus-feb-20-2020.pdf ICD_10_CODES = [ "J12.89", "B97.29", "J20.8", "J40", "J22", "J98.8", "J80", "Z03.818", "Z20.828" ] # ICD-09: https://en.wikipedia.org/wiki/List_of_ICD-9_codes ICD_9_CODES = np.char.mod( '%03d', random.sample(range(1,139), num_codes)) # + # Fake site ids # To generate new list, uncomment below # site_ids = [] # for i in range(num_sites): # site_ids.append("F" + "".join(random.choices(string.ascii_uppercase, k=2))) site_ids = ['FWN','FXL','FZT','FMT','FFG','FOW','FSZ','FMA','FEQ','FVX','FKQ','FBL','FDQ','FKN','FBD','FKL','FUU','FZU','FZM','FUN'] # - # Iterate each site to make dataFrame dfs = [] for siteid in site_ids: data = [] # A factor that decide the number of patients in each site size_of_site = choice(["s", "m", "l"], 1, [0.1, 0.6, 0.4]) # ICD_9_CODES for code in ICD_9_CODES: # Some codes may not be used for each site if choice([True, False], 1, [0.5, 0.5]): continue is_obfuscate = choice([True, False], 1, [0.05, 0.95]) icd_version = "9" num_patients = -1 if is_obfuscate == False: if size_of_site == "s": num_patients = random.choice(range(1,12)) elif size_of_site == "m": num_patients = random.choice(range(1,50)) else: num_patients = random.choice(range(1,98)) data.append([siteid, code, icd_version, num_patients]) # ICD_10_CODES for code in ICD_10_CODES: # Some codes may not be used for each site if choice([True, False], 1, [0.2, 0.8]): continue is_obfuscate = choice([True, False], 1, [0.05, 0.95]) icd_version = "10" num_patients = -1 if is_obfuscate == False: if size_of_site == "s": num_patients = random.choice(range(1,12)) elif size_of_site == "m": num_patients = random.choice(range(1,50)) else: num_patients = random.choice(range(1,98)) data.append([siteid, code, icd_version, num_patients]) # Make data df = pd.DataFrame(data) dfs.append(df) # Write a file df.to_csv(join("..", "Diagnoses-" + siteid + '.csv'), index = False, header=False)
fakedata/src/generate_fake_diagnoses.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Q# # language: qsharp # name: iqsharp # --- # # Solving SAT problem with Grover's algorithm Kata Workbook # # **What is this workbook?** # A workbook is a collection of problems, accompanied by solutions to them. # The explanations focus on the logical steps required to solve a problem; they illustrate the concepts that need to be applied to come up with a solution to the problem, explaining the mathematical steps required. # # Note that a workbook should not be the primary source of knowledge on the subject matter; it assumes that you've already read a tutorial or a textbook and that you are now seeking to improve your problem-solving skills. You should attempt solving the tasks of the respective kata first, and turn to the workbook only if stuck. While a textbook emphasizes knowledge acquisition, a workbook emphasizes skill acquisition. # # This workbook describes the solutions to the problems offered in the [Solving SAT problem with Grover's algorithm kata](./SolveSATWithGrover.ipynb). Since the tasks are offered as programming problems, the explanations also cover some elements of Q# that might be non-obvious for a first-time user. # # **What you should know for this workbook** # # You should be familiar with the following concepts before tackling the Distinguish Unitaries kata (and this workbook): # # 1. [Basic linear algebra](./../tutorials/LinearAlgebra/LinearAlgebra.ipynb). # 2. [Single-qubit](./../tutorials/SingleQubitGates/SingleQubitGates.ipynb) and [multi-qubit quantum gates](./../tutorials/MultiQubitGates/MultiQubitGates.ipynb) and using them to manipulate the state of the system. # 3. [Exploring Grover's algorithm tutorial](./../tutorials/ExploringGroversAlgorithm/ExploringGroversAlgorithm.ipynb) and [Grover's algorithm implementation kata](./../GroversAlgorithm/GroversAlgorithm.ipynb). # 4. Basic knowledge of logic gates and SAT problems. # ## Part I. Oracles for SAT problems # ### Task 1.1. The AND oracle: $f(x) = x_0 \wedge x_1$ # # **Inputs:** # # 1. 2 qubits in an arbitrary state $|x\rangle$ (input/query register). # # 2. A qubit in an arbitrary state $|y\rangle$ (target qubit). # # **Goal:** # # Transform state $|x,y\rangle$ into state $|x, y \oplus f(x)\rangle$ ($\oplus$ is addition modulo 2), # i.e., flip the target state if all qubits of the query register are in the $|1\rangle$ state, # and leave it unchanged otherwise. # # Leave the query register in the same state it started in. # # **Stretch Goal:** # # Can you implement the oracle so that it would work # for `queryRegister` containing an arbitrary number of qubits? # ### Solution # # The goal is to flip the qubit $|y\rangle$ if and only if all qubits in the register $|x\rangle$ are in the state $|1\rangle$. # Hence the required unitary $U_{and}$ is such that: # $$U_{and}|x\rangle|y\rangle = \begin{cases} # |x\rangle|y\rangle & \text{if }x \neq 1...1 \\ # |x\rangle X|y\rangle & \text{if }x = 1...1 # \end{cases}$$ # # We can rewrite this as # # $$U_{and} = \big(I - |1...1\rangle\langle 1...1|\big) \otimes I + |1...1\rangle\langle 1...1|\otimes X$$ # # Here we're using a convenient shorthand for ket-bra notation of gate effect on multiple basis states at once: # # $$I - |1...1\rangle\langle 1...1| = \sum_{i \in \{0,1\}^n,\\ |i\rangle\neq |1...1\rangle}|i\rangle\langle i|$$ # # For the number of qubits in the input register $n=2$, $U_{and}$ is the **Toffoli** gate: # $$U_{and} = \begin{bmatrix} # 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ # 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\ # 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\ # 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\ # 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\ # 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\ # 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\ # 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\ # \end{bmatrix}$$ # # In the general case of arbitrary number of qubits in the input, # this can be represented as a `Controlled X` gate, with the input register $|x\rangle$ as control and the target qubit $|y\rangle$ as target. # # > In Q# you can use [Controlled functor](https://docs.microsoft.com/quantum/user-guide/using-qsharp/operations-functions#controlled-and-adjoint-operations) to apply a controlled version of the gate, if the gate specifies it. # > # > If $|x\rangle$ consists of just 2 qubits, we could also use the built-in **Toffoli** gate - the [`CCNOT` gate](https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.intrinsic.ccnot). # + %kata T11_Oracle_And_Test operation Oracle_And (queryRegister : Qubit[], target : Qubit) : Unit is Adj { Controlled X(queryRegister, target); } # - # [Return to task 1.1 of the Solving SAT with Grover's algorithm kata.](./SolveSATWithGrover.ipynb#Task-1.1.-The-AND-oracle:-$f(x)-=-x_0-\wedge-x_1$) # ### Task 1.2. The OR oracle: $f(x) = x_0 \vee x_1$ # # **Inputs:** # # 1. 2 qubits in an arbitrary state $|x\rangle$ (input/query register). # # 2. A qubit in an arbitrary state $|y\rangle$ (target qubit). # # **Goal:** # # Transform state $|x,y\rangle$ into state $|x, y \oplus f(x)\rangle$ ($\oplus$ is addition modulo 2), # i.e., flip the target state if at least one qubit of the query register is in the $|1\rangle$ state, # and leave it unchanged otherwise. # # Leave the query register in the same state it started in. # # **Stretch Goal:** # # Can you implement the oracle so that it would work # for `queryRegister` containing an arbitrary number of qubits? # ### Solution # # The goal is to flip the qubit $|y\rangle$ if at least one qubit in the register $|x\rangle$ is in the state $|1\rangle$. # This can also be restated as to flip the qubit $|y\rangle$ *unless* the register $|x\rangle$ is in the state $|0...0\rangle$. # # Hence the required unitary $U_{or}$ is such that: # $$U_{or}|x\rangle|y\rangle = \begin{cases} # |x\rangle |y\rangle & \text{if }x = 0...0 \\ # |x\rangle X|y\rangle & \text{if }x \neq 0...0 # \end{cases}$$ # # We can rewrite this as # $$U_{or} = |0...0\rangle\langle 0...0|\otimes I + \big(I - |0...0\rangle\langle 0...0|\big) \otimes X$$ # # We can now reduce this problem to the AND oracle using the fact that OR gate can be created using an AND gate and NOT gates. # # $$x_0\vee x_1\vee ... \vee x_{n-1} = \lnot(\lnot x_0\wedge \lnot x_1\wedge ... \wedge \lnot x_{n-1})$$ # # Thus $U_{or}$ can be rewritten as $U_{or}=(X^{\otimes n}\otimes X) \cdot U_{and} \cdot (X^{\otimes n}\otimes I)$. # # > <details> # <summary><b>Click here to see how to get this matrix expression using ket-bra notation</b></summary> # > # $$ # U_{or}= (X^{\otimes n}\otimes X) \cdot U_{and} \cdot (X^{\otimes n}\otimes I) = \\ # = (X^{\otimes n}\otimes X) \cdot (\sum_{i \in \{0,1\}^n,\\ |i\rangle\neq |1...1\rangle}|i\rangle\langle i| \otimes I + |1...1\rangle\langle 1...1|\otimes X) \cdot (X^{\otimes n}\otimes I) = \\ # = (X^{\otimes n}\otimes X) \cdot (\sum_{i \in \{0,1\}^n,\\ |i\rangle\neq |1...1\rangle}|i\rangle\langle i|X^{\otimes n} \otimes I + |1...1\rangle\langle 1...1|X^{\otimes n}\otimes X) = \\ # = (X^{\otimes n}\otimes X) \cdot (\sum_{i \in \{0,1\}^n,\\ |i\rangle\neq |1...1\rangle}|i\rangle\langle i|X^{\otimes n} \otimes I + |1...1\rangle\langle 0...0|\otimes X) = \\ # = \sum_{i \in \{0,1\}^n,\\ |i\rangle\neq |1...1\rangle}X^{\otimes n}|i\rangle\langle i|X^{\otimes n} \otimes X + X^{\otimes n}|1...1\rangle\langle 0...0|\otimes I = \\ # = \sum_{i \in \{0,1\}^n,\\ |i\rangle\neq |1...1\rangle}X^{\otimes n}|i\rangle\langle i|X^{\otimes n} \otimes X + |0...0\rangle\langle 0...0|\otimes I = \\ # = \sum_{i \in \{0,1\}^n,\\ |i\rangle\neq |0...0\rangle}|i\rangle\langle i| \otimes X + |0...0\rangle\langle 0...0|\otimes I$$ # </details> # # This corresponds to the following steps: # # 1. Flip all the qubits of $|x\rangle$ using X gates to convert the inputs from $|x_0, x_1, ..., x_{n-1}\rangle$ to $|\lnot x_0, \lnot x_1, ..., \lnot x_{n-1}\rangle$. # 2. Apply $U_{and}$ oracle to convert $|y\rangle$ to $|y \oplus (\lnot x_0\wedge \lnot x_1\wedge ... \wedge \lnot x_{n-1}) \rangle$. # 3. Undo the flipping to return the inputs to $|x_0, x_1, ..., x_{n-1}\rangle$. # 4. Flip the state of the target qubit $|y\rangle$ again to perform the final negation $|y \oplus \lnot(\lnot x_0\wedge \lnot x_1\wedge ... \wedge \lnot x_{n-1}) \rangle = |y \oplus x_0\vee x_1\vee ... \vee x_{n-1} \rangle$. # # > We can use the ` within ... apply ...` construct in Q# implement the pattern of modifying the qubits of $|x\rangle$, applying the oracle to get the result and write it into $|y\rangle$ and undoing (uncomputing) the modification to $|x\rangle$. # + %kata T12_Oracle_Or_Test operation Oracle_Or (queryRegister : Qubit[], target : Qubit) : Unit is Adj { within { // Flip input qubits to negate them ApplyToEachA(X, queryRegister); } apply { // Use the AND oracle to get the AND of negated inputs Oracle_And(queryRegister, target); } // Flip the target to get the final answer X(target); } # - # [Return to task 1.2 of the Solving SAT with Grover's algorithm kata.](./SolveSATWithGrover.ipynb#Task-1.2.-The-OR-oracle:-$f(x)-=-x_0-\vee-x_1$) # ### Task 1.3. The XOR oracle: $f(x) = x_0 \oplus x_1$ # # **Inputs:** # # 1. 2 qubits in an arbitrary state $|x\rangle$ (input/query register). # # 2. A qubit in an arbitrary state $|y\rangle$ (target qubit). # # **Goal:** # # Transform state $|x,y\rangle$ into state $|x, y \oplus f(x)\rangle$ ($\oplus$ is addition modulo 2), # i.e., flip the target state if the qubits of the query register are in different states, # and leave it unchanged otherwise. # # Leave the query register in the same state it started in. # # **Stretch Goal:** # # Can you implement the oracle so that it would work # for `queryRegister` containing an arbitrary number of qubits? # ### Solution # # Let's consider the case of $n$-qubit register $|x\rangle$; then $f(x) = x_0 \oplus x_1 \oplus ... \oplus x_{n-1}$. # The goal is to flip the qubit $|y\rangle$ if and only if $f(x) = 1$. # # Hence the effect of the required unitary $U_{xor}$ on the basis state $|x\rangle|y\rangle$ will be: # # $$U_{xor}|x\rangle|y\rangle = |x\rangle|y\oplus f(x)\rangle = \\ # = |x\rangle|y\oplus x_0 \oplus x_1 \oplus ... \oplus x_{n-1}\rangle = \\ # = |x\rangle|(...((y\oplus x_0) \oplus x_1) ... )\oplus x_{n-1}\rangle$$ # # Let's consider a unitary $U_{\oplus i}$, which has the following effect on the basis state: # # $$U_{\oplus i}|x\rangle|y\rangle = |x\rangle|y \oplus x_i\rangle$$ # # This unitary is simply a CNOT gate with i-th qubit $|x_i\rangle$ as control and $|y\rangle$ as target. # # Then we can rewrite the effect of our unitary $U_{xor}$ as follows: # # $$U_{xor}|x\rangle|y\rangle = U_{\oplus n-1} (U_{\oplus n-2} (... U_{\oplus 0} |x\rangle|y\rangle ...)) = \prod_{i=0}^{n-1}U_{\oplus i}|x\rangle|y\rangle$$ # # We see that we can write # # $$U_{xor}=\prod_{i=0}^{n-1}U_{\oplus i}$$ # # We can implement this as a sequence of CNOT gates, with each of the qubits of the input register as controls and the output qubit as target. # + %kata T13_Oracle_Xor_Test operation Oracle_Xor (queryRegister : Qubit[], target : Qubit) : Unit is Adj { ApplyToEachA(CNOT(_, target), queryRegister); } # - # [Return to task 1.3 of the Solving SAT with Grover's algorithm kata.](./SolveSATWithGrover.ipynb#Task-1.3.-The-XOR-oracle:-$f(x)-=-x_0-\oplus-x_1$) # ### Task 1.4. Alternating bits oracle: $f(x) = (x_0 \oplus x_1) \wedge (x_1 \oplus x_2) \wedge \dots \wedge (x_{N-2} \oplus x_{N-1})$ # # **Inputs:** # # 1. N qubits in an arbitrary state $|x\rangle$ (input/query register). # # 2. A qubit in an arbitrary state $|y\rangle$ (target qubit). # # **Goal:** # # Transform state $|x,y\rangle$ into state $|x, y \oplus f(x)\rangle$ ($\oplus$ is addition modulo 2). # # Leave the query register in the same state it started in. # ### Solution # # Let $x_i'=x_i\oplus x_{i+1}$, then we can rewrite $f(x)$ as $f(x) = x_0' \wedge x_1' \wedge ... \wedge x_{N-2}'$. # # We can compute $x_i'$ using $U_{xor}$ (implemented by operation `Oracle_Xor`) from the previous task, since $U_{xor}|x_l\rangle|x_r\rangle|0\rangle = |x_l\rangle|x_r\rangle|x_l \oplus x_r\rangle$. # We will store XORs of pairs of adjacent qubits in an auxiliary register of qubits that we'll allocate for this purpose. (This procedure can be undone in the same way before deallocating that register.) # # Now we can find $f(x)$ using the values $x_i'$, i.e., transform $|y\rangle$ into $|y \oplus f(x)\rangle$ using the `Oracle_And` on the auxiliary register and the `target` qubit. # # Hence the final solution consists of: # 1. Finding XORs of all pairs of adjancent qubits and storing them in an auxiliary register. # 2. Applying $U_{and}$ (`Oracle_And`) on `auxiliaryRegister` and `target`. # 3. Undo the computation done in 1. to reset the `auxiliaryRegister` before releasing the qubits. # # We can use the `within ... apply ...` construct in Q# to express this sequence of steps, similar to task 2. # + %kata T14_Oracle_AlternatingBits_Test open Microsoft.Quantum.Arrays; operation Oracle_AlternatingBits (queryRegister : Qubit[], target : Qubit) : Unit is Adj { let N = Length(queryRegister); using (auxiliaryRegister = Qubit[N-1]) { // Find the XOR of all pairs of consecutive qubits and store the result in auxiliaryRegister within { for (i in 0 .. N-2) { Oracle_Xor(queryRegister[i .. i+1], auxiliaryRegister[i]); } } // Use the And oracle on auxiliaryRegister to get the answer apply { Oracle_And(auxiliaryRegister, target); } } } # - # We can also avoid using the auxiliary register by computing XORs in place, i.e., storing the results of XOR operations in the `queryRegister` itself. # To do this, we apply $\textrm{CNOT}$ to two adjacent qubits, with the target qubit being the one in which we store the result. # # Since all qubits except those at index $0$ and $N-1$ are inputs to two XOR operations, the order of these operations must be carefully chosen. # If we are going from index $0$ to $N-1$ when computing $x_i'$, the target must always be the lower index (the one that will not be involved in later computations), i.e., `CNOT(queryRegister[i+1], queryRegister[i])`. This way `queryRegister[i+1]` is preserved for the next XOR operation. # # Now the `queryRegister[0..N-2]` contains the XOR results, and we can perform the `Oracle_And` operation with this array as control directly. We uncompute the effects of the XORs computations using `within ... apply ...` construct, as usual. # + %kata T14_Oracle_AlternatingBits_Test open Microsoft.Quantum.Arrays; operation Oracle_AlternatingBits (queryRegister : Qubit[], target : Qubit) : Unit is Adj { let N = Length(queryRegister); // Find the XOR of all pairs of consecutive qubits and store the result in queryRegister itself within { for(i in 0 .. N-2) { CNOT(queryRegister[i+1], queryRegister[i]); } } // Use the And oracle on the first N-1 qubits of queryRegister to get the answer apply { Oracle_And(Most(queryRegister),target); } } # - # [Return to task 1.4 of the Solving SAT with Grover's algorithm kata.](./SolveSATWithGrover.ipynb#Task-1.4.-Alternating-bits-oracle:-$f(x)-=-(x_0-\oplus-x_1)-\wedge-(x_1-\oplus-x_2)-\wedge-\dots-\wedge-(x_{N-2}-\oplus-x_{N-1})$) # ### Task 1.5. Evaluate one clause of a SAT formula # # > For SAT problems, $f(x)$ is represented as a conjunction (an AND operation) of several clauses on $N$ variables, # and each clause is a disjunction (an OR operation) of **one or several** variables or negated variables: # > # > $$clause(x) = \bigvee_k y_{k},\text{ where }y_{k} =\text{ either }x_j\text{ or }\neg x_j\text{ for some }j \in \{0, \dots, N-1\}$$ # > # > For example, one of the possible clauses on two variables is: # > # > $$clause(x) = x_0 \vee \neg x_1$$ # # **Inputs:** # # 1. N qubits in an arbitrary state $|x\rangle$ (input/query register). # # 2. A qubit in an arbitrary state $|y\rangle$ (target qubit). # # 3. A 1-dimensional array of tuples `clause` which describes one clause of a SAT problem instance $clause(x)$. # # `clause` is an array of one or more tuples, each of them describing one component of the clause. # # Each tuple is an `(Int, Bool)` pair: # # * the first element is the index $j$ of the variable $x_j$, # * the second element is `true` if the variable is included as itself ($x_j$) and `false` if it is included as a negation ($\neg x_j$). # # **Example:** # # * The clause $x_0 \vee \neg x_1$ can be represented as `[(0, true), (1, false)]`. # # **Goal:** # # Transform state $|x,y\rangle$ into state $|x, y \oplus f(x)\rangle$ ($\oplus$ is addition modulo 2). # # Leave the query register in the same state it started in. # ### Solution # # This task involves evaluating a clause which is a disjunction (OR) of negated and non-negated variables encoded in the `queryRegister` . This task can be solved in 2 steps using a similar technique to the one we saw in task 1.4: # # 1. We first flip the qubits which are negated in `clause` (and later undo this operation). # To do this, we conditionally apply an $X$ gate to qubit $|x_j\rangle$ if and only if the clause has a term of the form `(j, false)`. # 2. We use the $U_{or}$ unitary (implemented by `Oracle_Or`) to calculate $|y\oplus f(x)\rangle$. # Here we need to construct an array `clause_qubits` - all qubits which are included as a negated or non-negated variable in the clause. # We then apply the `Oracle_Or` operation on `clause_qubits` and `target`. # # > We want our implementation of `Oracle_SATClause` be adjointable, and to rely on Q# compiler to generate the adjoint variant of this operation. To enable this, we move the classical computations that involve manipulating mutable variables to a separate function. // Find all qubits which are variables in clause function GetClauseQubits (queryRegister : Qubit[], clause: (Int,Bool)[]) : Qubit[] { mutable clauseQubits = new Qubit[0]; for ((index, _) in clause) { set clauseQubits += [queryRegister[index]]; } return clauseQubits; } # + %kata T15_Oracle_SATClause_Test open Microsoft.Quantum.Arrays; operation Oracle_SATClause (queryRegister : Qubit[], target : Qubit, clause : (Int, Bool)[]) : Unit is Adj { within { // Flip all qubits which are in negated form in the SAT clause. for ((index, positive) in clause) { if (not positive) { X(queryRegister[index]); } } } apply { // Find all qubits which are variables in the clause. let clauseQubits = GetClauseQubits(queryRegister, clause); // Compute disjuction of variables in clause. Oracle_Or(clauseQubits, target); } } # - # We can also use several Q# library functions and constructs to write our code in the functional style. # # * The functions # [`Fst`](https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.canon.fst) and # [`Snd`](https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.canon.snd) # extract the first and second element of a 2-element tuple, respectively. # `Fst(term)` would give us the index $j$ for variable $x_j$ and `Snd(term)` would be `false` if the variable was negated or `true` if it was not. # * We can use the function # [`Mapped`](https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.arrays.mapped) # to extract the list of variable indices used in the clause. Given an array and a function that is defined for the elements of the array, `Mapped` returns a new array that consists of the images of the original array under the function. `Mapped(Fst<Int, Bool>, clause)` will return an array of all first elements of the tuples in `clause`. # * Finally, we use the function # [`Subarray`](https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.arrays.subarray) to find the `clauseQubits`: this function takes an array and a list of indices and produces a new array formed from the elements of the original array at the given locations. # + %kata T15_Oracle_SATClause_Test open Microsoft.Quantum.Arrays; operation Oracle_SATClause (queryRegister : Qubit[], target : Qubit, clause : (Int, Bool)[]) : Unit is Adj { // Flip all qubits which are in negated form in the SAT clause. within { for (term in clause) { // If Snd(term) is false, apply X gate, otherwise apply I (do nothing). (Snd(term)? I | X)(queryRegister[Fst(term)]); } } apply { // Get indices of all variables in clause. let indexList = Mapped(Fst<Int, Bool>, clause); // Get all qubits which are part of clause. let clauseQubits = Subarray(indexList,queryRegister); // Compute disjuction of variables in clause. Oracle_Or(clauseQubits,target); } } # - # [Return to task 1.5 of the Solving SAT with Grover's algorithm kata.](./SolveSATWithGrover.ipynb#Task-1.5.-Evaluate-one-clause-of-a-SAT-formula) # ### Task 1.6. k-SAT problem oracle # # > For k-SAT problems, $f(x)$ is represented as a conjunction (an AND operation) of $M$ clauses on $N$ variables, # and each clause is a disjunction (an OR operation) of **one or several** variables or negated variables: # > # > $$f(x) = \bigwedge_i \big(\bigvee_k y_{ik} \big),\text{ where }y_{ik} =\text{ either }x_j\text{ or }\neg x_j\text{ for some }j \in \{0, \dots, N-1\}$$ # # **Inputs:** # # 1. N qubits in an arbitrary state $|x\rangle$ (input/query register). # # 2. A qubit in an arbitrary state $|y\rangle$ (target qubit). # # 3. A 2-dimensional array of tuples `problem` which describes the k-SAT problem instance $f(x)$. # # $i$-th element of `problem` describes the $i$-th clause of $f(x)$; # it is an array of one or more tuples, each of them describing one component of the clause. # # Each tuple is an `(Int, Bool)` pair: # # * the first element is the index $j$ of the variable $x_j$, # * the second element is `true` if the variable is included as itself ($x_j$) and `false` if it is included as a negation ($\neg x_j$). # # **Example:** # # A more general case of the OR oracle for 3 variables $f(x) = (x_0 \vee x_1 \vee x_2)$ can be represented as `[[(0, true), (1, true), (2, true)]]`. # # **Goal:** # # Transform state $|x,y\rangle$ into state $|x, y \oplus f(x)\rangle$ ($\oplus$ is addition modulo 2). # # Leave the query register in the same state it started in. # ### Solution # # This task consists of a conjunction (AND) of results of multiple clause evaluations. Each clause individually can be evaluated using the code we've written in task 1.5. The computation results of these clauses must be stored (temporarily) in freshly allocated qubits. Then the conjunction of these results can be computed using `Oracle_And` from task 1.1. # # Let the number of clauses in the formula be $m$, then the steps for implementing k-SAT oracle will be: # 1. Allocate an array of $m$ qubits in the state $|0\rangle$ `auxiliaryRegister`. # 2. Evaluate each clause using `Oracle_SATClause` from task 1.5 with the corresponding element of `auxiliaryRegister` as the target qubit. # 3. Evaluate the SAT formula using `Oracle_And` implemented in task 1.1 with `auxiliaryRegister` as the input register and `target` as the target qubit. # 4. Undo step 2 to restore the auxiliary qubits back into the $|0^{\otimes m}\rangle$ state before releasing them. # # We can again use the `within ... apply ...` Q# language construct to perform steps 2, 3 and 4. # + %kata T16_Oracle_SAT_Test operation Oracle_SAT (queryRegister : Qubit[], target : Qubit, problem : (Int, Bool)[][]) : Unit is Adj { using (auxiliaryRegister = Qubit[Length(problem)]) { // Compute the clauses. within { for (i in 0 .. Length(problem) - 1) { Oracle_SATClause(queryRegister, auxiliaryRegister[i], problem[i]); } } // Evaluate the overall formula using an AND oracle. apply { Oracle_And(auxiliaryRegister, target); } } } # - # [Return to task 1.6 of the Solving SAT with Grover's algorithm kata.](./SolveSATWithGrover.ipynb#Task-1.6.-k-SAT-problem-oracle) # ## Part II. Oracles for exactly-1 3-SAT problem # # Exactly-1 3-SAT problem (also known as "one-in-three 3-SAT") is a variant of a general 3-SAT problem. # It has a structure similar to a 3-SAT problem, but each clause must have *exactly one* true literal, # while in a normal 3-SAT problem each clause must have *at least one* true literal. # ### Task 2.1. "Exactly one $|1\rangle$" oracle # # **Inputs:** # # 1. 3 qubits in an arbitrary state $|x\rangle$ (input/query register). # # 2. A qubit in an arbitrary state $|y\rangle$ (target qubit). # # **Goal:** # # Transform state $|x,y\rangle$ into state $|x, y \oplus f(x)\rangle$ ($\oplus$ is addition modulo 2), # where $f(x) = 1$ if exactly one bit of $x$ is in the $|1\rangle$ state, and $0$ otherwise. # # Leave the query register in the same state it started in. # # **Stretch Goal:** # # Can you implement the oracle so that it would work # for `queryRegister` containing an arbitrary number of qubits? # ### Solution # # Consider the set of all bit strings of length $n$ which have only one bit of $x$ equal to $1$. # This set of bit strings is $S=\{00...01, 00...10, ..., 00..1..00, ..., 01...00, 10...00\}$, # or, if we convert the bit strings to integers, # $$S=\{1,2,4,..2^i,..,2^{n-1}\} = \{2^k: 0 \le k \le n-1\}$$ # # We need to implement an oracle that flips $|y\rangle$ if the input basis state $|x\rangle$ corresponds to one of the bit strings in $S$. We can represent this as the following unitary in Dirac notation: # # $$U = \sum_{i\in S}|i\rangle\langle i|\otimes X + \sum_{j \not\in S}|j\rangle\langle j|\otimes I$$ # # This unitary can be implemented using $n$ controlled NOT gates, with `queryRegister` as control and the `target` qubit flipped if and only if the control register is in a particular state of the form $2^k$. # # We can use the Q# library function [`ControlledOnInt`](https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.canon.controlledonint) to implement these controlled NOT gates. # + %kata T21_Oracle_Exactly1One_Test operation Oracle_Exactly1One (queryRegister : Qubit[], target : Qubit) : Unit is Adj { for (i in 0 .. Length(queryRegister) - 1) { (ControlledOnInt(2^i, X))(queryRegister, target); } } # - # [Return to task 2.1 of the Solving SAT with Grover's algorithm kata.](./SolveSATWithGrover.ipynb#Task-2.1.-"Exactly-one-$|1\rangle$"-oracle) # ### Task 2.2. "Exactly-1 3-SAT" oracle # # **Inputs:** # # 1. N qubits in an arbitrary state $|x\rangle$ (input/query register). # # 2. A qubit in an arbitrary state $|y\rangle$ (target qubit). # # 3. A 2-dimensional array of tuples `problem` which describes the 3-SAT problem instance $f(x)$. # # `problem` describes the problem instance in the same format as in task 1.6; # each clause of the formula is guaranteed to have exactly 3 terms. # # **Goal:** # # Transform state $|x,y\rangle$ into state $|x, y \oplus f(x)\rangle$ ($\oplus$ is addition modulo 2). # # Leave the query register in the same state it started in. # # **Example:** # # An instance of the problem $f(x) = (x_0 \vee x_1 \vee x_2)$ can be represented as `[[(0, true), (1, true), (2, true)]]`, # and its solutions will be `(true, false, false)`, `(false, true, false)` and `(false, false, true)`, # but none of the variable assignments in which more than one variable is true, which are solutions for the general SAT problem. # ### Solution # # This task is similar to [task 1.6](#Task-1.6.-k-SAT-problem-oracle): it consists of a conjunction (AND) of results of multiple clause evaluations, but this time each clause has to be evaluated using the *Exactly One 1* policy we've implemented in task 2.1, rather than an `Oracle_Or` we used in task 1.5. We can reuse the function `GetClauseQubits` from task 1.5: # + open Microsoft.Quantum.Arrays; operation Oracle_Exactly1OneSATClause (queryRegister : Qubit[], target : Qubit, clause : (Int, Bool)[]) : Unit is Adj { within { for ((index, positive) in clause) { if (not positive) { X(queryRegister[index]); } } } apply { // Find all qubits which are variables in the clause. let clauseQubits = GetClauseQubits(queryRegister, clause); // Check if "Exactly one 1" criteria is satisified. Oracle_Exactly1One(clauseQubits,target); } } # - # The rest of the code is similar to task 1.6. See the [solution for task 1.6](#Task-1.6.-k-SAT-problem-oracle) for the discussion of implementation details. # + %kata T22_Oracle_Exactly1SAT_Test operation Oracle_Exactly1_3SAT (queryRegister : Qubit[], target : Qubit, problem : (Int, Bool)[][]) : Unit is Adj { using (auxiliaryRegister = Qubit[Length(problem)]) { // Compute the clauses. within { for (i in 0 .. Length(problem) - 1) { Oracle_Exactly1OneSATClause(queryRegister, auxiliaryRegister[i], problem[i]); } } // Evaluate the overall formula using an AND oracle. apply { Oracle_And(auxiliaryRegister, target); } } } # - # [Return to task 2.2 of the Solving SAT with Grover's algorithm kata.](./SolveSATWithGrover.ipynb#Task-2.2.-"Exactly-1-3-SAT"-oracle) # ## Part III. Using Grover's algorithm for problems with multiple solutions # ### Task 3.1. Using Grover's algorithm # # **Goal:** # # Implement Grover's algorithm and use it to find solutions to SAT instances from parts I and II. # ### Solution # # This task can be solved following steps (see the [GroversAlgorithm kata](../GroversAlgorithm/GroversAlgorithm.ipynb) for details): # # 1. Grover's algorithm requires a phase oracle, rather than the marking oracles we implemented in parts I and II, so we need to write Q# code that converts a marking oracle to a phase oracle. operation Oracle_Converter (markingOracle : ((Qubit[], Qubit) => Unit is Adj), register : Qubit[]) : Unit is Adj { using (target = Qubit()) { // Put the target into the |-⟩ state and later revert the state within { X(target); H(target); } // Apply the marking oracle; since the target is in the |-⟩ state, // flipping the target if the register satisfies the oracle condition will apply a -1 factor to the state apply { markingOracle(register, target); } } } # 2. The main part of Grover's algorithm is a loop which runs the Grover's algorithm iteration with the given `markingOracle` on the given `register` the given number of times. # This loop results in a state on the given `register` which satisfies the condition marked by the marking oracle with high probability. # + open Microsoft.Quantum.Arrays; operation GroversLoop (register: Qubit[], oracle: ((Qubit[], Qubit) => Unit is Adj), numIterations: Int) : Unit { let phaseOracle = Oracle_Converter(oracle, _); ApplyToEach(H, register); for (_ in 1 .. numIterations) { phaseOracle(register); within{ ApplyToEachA(H, register); ApplyToEachA(X, register); } apply{ Controlled Z(Most(register), Tail(register)); } } } # - # 3. Finally, the operation `Run_GroversSearch_Algorithm` will define the problem you want to solve, run the `GroversLoop` and measure the qubit register to find the solution. # # The parameters defining size of register `N` and `oracle` are up to you! # # Try experimenting with the number of iterations in the code. # Remember that for an oracle which has $M << N, N=2^n$ marked states, the optimal number of iterations is $\frac{\pi}{4}\sqrt{\frac{N}{M}}$. # # The solution given here runs Grover's search algorithm once, and if it fails to find the solution, you need to retry manually. You can add the logic to automate the retries; see [Exploring Grover's Algorithm tutorial](./../tutorials/ExploringGroversAlgorithm/ExploringGroversAlgorithm.ipynb) for details. # + open Microsoft.Quantum.Measurement; open Microsoft.Quantum.Convert; operation Run_GroversSearch_Algorithm () : Unit { // Create a data structure describing the SAT instance we want to solve let problem = [[(0, true)], [(0, false), (1, false)]]; let N = 2; // Define the oracle we want to use; try this with different oraces! let oracle = Oracle_SAT(_, _, problem); // Try different numbers of iterations. let numIterations = 1; using ((register, output) = (Qubit[N], Qubit())) { Message($"Trying search with {numIterations} iterations"); GroversLoop(register, oracle, numIterations); // Measure the result. let res = MultiM(register); // Apply the oracle to the register after measurement to check whether the result is correct. oracle(register, output); if (MResetZ(output) == One) { let answer = ResultArrayAsBoolArray(res); Message($"Found Answer: {answer}"); } else { Message("Failed to find an answer. Try again!"); } ResetAll(register); } } # - %simulate Run_GroversSearch_Algorithm # [Return to task 3.1 of the Solving SAT with Grover's algorithm kata.](./SolveSATWithGrover.ipynb#Task-3.1.-Using-Grover's-algorithm) # ### Task 3.2. Universal implementation of Grover's algorithm # # **Inputs:** # # 1. The number of qubits N. # # 2. A marking oracle which implements a boolean expression, similar to the oracles from part I. # # **Output:** # # An array of N boolean values which satisfy the expression implemented by the oracle # (i.e., any basis state marked by the oracle). # ### Solution # # This is very similar to task 3.1, but we don't know the problem described by the `oracle`, so we have no way of predicting the optimal number of iterations. Instead we try the series $\{1, 2, 4, 8, ...\}$ as the number of iterations. Even if none of these numbers are optimal, we will still eventually get the correct answer with high probability. (There are also other ways to pick the sequence of the numbers of iterations to try; feel free to experiment with them!) # # We use the repeat-until-success (RUS) loop to try the search with different numbers of iterations until we find an answer. The `repeat` block (the body of the loop) consists of performing the `GroversLoop`, measuring the result and checking if it is correct. The `until` block defines the condition of exiting the loop - if we found a correct answer or reached a time-out condition. The `fixup` block happens before retrying the algorithm, in this case it doubles the number of iterations in order to try again. # + %kata T32_UniversalGroversAlgorithm_Test open Microsoft.Quantum.Measurement; open Microsoft.Quantum.Convert; operation UniversalGroversAlgorithm (N : Int, oracle : ((Qubit[], Qubit) => Unit is Adj)) : Bool[] { // Since we are unaware of the optimal number of iterations, we try different numbers of iterations. mutable answer = new Bool[N]; using ((register, output) = (Qubit[N], Qubit())) { mutable correct = false; mutable iter = 1; repeat { Message($"Trying search with {iter} iterations"); GroversLoop(register, oracle, iter); let res = MultiM(register); oracle(register, output); if (MResetZ(output) == One) { set correct = true; set answer = ResultArrayAsBoolArray(res); } ResetAll(register); } until (correct or iter > 10) // The fail-safe to avoid going into an infinite loop fixup { set iter *= 2; } if (not correct) { fail "Failed to find an answer"; } } Message($"{answer}"); return answer; } # - # [Return to task 3.2 of the Solving SAT with Grover's algorithm kata.](./SolveSATWithGrover.ipynb#Task-3.2.-Universal-implementation-of-Grover's-algorithm)
SolveSATWithGrover/Workbook_SolveSATWithGrover.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="2dOGxAjilw7y" outputId="e48c78d5-2fdf-4f67-873f-e48a61fa0b36" from numpy import load print("loading data") data = load("/content/drive/My Drive/lungcancer/data_4.npy") print("loading labels") labels = load("/content/drive/My Drive/lungcancer/labels_4.npy") # + colab={"base_uri": "https://localhost:8080/"} id="ev3U3Tzdl16n" outputId="90da0ef9-619d-43cf-8570-8d576e27ac97" from google.colab import drive drive.mount('/content/drive') # + id="nnXQAUi7mEo_" from sklearn.model_selection import train_test_split (trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.1, stratify=labels, random_state=42,shuffle = True) # + colab={"base_uri": "https://localhost:8080/"} id="T7fBkKXomFq7" outputId="c0968778-3c7f-4cb7-ce0c-e9033c1f991d" trainX.shape # + colab={"base_uri": "https://localhost:8080/"} id="H-UnbcKAmHnA" outputId="3db49017-cf3d-49f1-ca66-e1292c89196e" trainY.shape # + id="d56ZrxFGmJXo" from keras.preprocessing.image import ImageDataGenerator aug_train = ImageDataGenerator(rescale= 1.0/255., rotation_range=20, zoom_range=0.15, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.15, horizontal_flip=True, fill_mode="nearest") aug_test = ImageDataGenerator(rescale= 1.0/255.) # + id="yv52ct-NmLUK" from keras.models import Sequential from keras.layers import Conv2D from keras.layers import MaxPooling2D from keras.layers import Flatten from keras.layers import Dense from keras.layers import Dropout classifier_2 = Sequential() # Step 1 - Convolution classifier_2.add(Conv2D(32, (3, 3), input_shape = (512, 512, 1), activation = 'relu')) # Step 2 - Pooling classifier_2.add(MaxPooling2D(pool_size = (2, 2))) # Adding a second convolutional layer classifier_2.add(Conv2D(32, (3, 3), activation = 'relu')) classifier_2.add(MaxPooling2D(pool_size = (2, 2))) # Adding a third convolutional layer classifier_2.add(Conv2D(32, (3, 3), activation = 'relu')) classifier_2.add(MaxPooling2D(pool_size = (2, 2))) # Adding a forth convolutional layer classifier_2.add(Conv2D(64, (3, 3), activation = 'relu')) classifier_2.add(MaxPooling2D(pool_size = (2, 2))) # Adding a forth convolutional layer classifier_2.add(Conv2D(64, (3, 3), activation = 'relu')) classifier_2.add(MaxPooling2D(pool_size = (2, 2))) # Adding a forth convolutional layer classifier_2.add(Conv2D(64, (3, 3), activation = 'relu')) classifier_2.add(MaxPooling2D(pool_size = (2, 2))) # Step 3 - Flattening classifier_2.add(Flatten()) # Step 4 - Full connection classifier_2.add(Dense(units = 128, activation = 'relu')) classifier_2.add(Dense(units = 128, activation = 'relu')) classifier_2.add(Dense(units = 128, activation = 'relu')) classifier_2.add(Dense(units = 1, activation = 'sigmoid')) # + id="KerYdl6BmPnF" classifier_2.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) # + colab={"base_uri": "https://localhost:8080/"} id="5pYSXNvSmSX3" outputId="5c44cf56-0f1b-4f73-8097-b34e5d9f2be6" classifier_2.summary() # + id="qWbAyp5AmU23" import keras as k # + id="s20HCONcmW2Y" import time NAME = "test_2-{}".format(int(time.time())) # + id="BBKUCvrimY65" callbacks_2 = [ # k.callbacks.EarlyStopping(patience=3, monitor='val_loss'), k.callbacks.TensorBoard(log_dir='logs\{}'.format(NAME)), k.callbacks.ModelCheckpoint('test_model_2.h5', save_best_only=True)] # + colab={"base_uri": "https://localhost:8080/"} id="z4QgL088aQXN" outputId="acaafeb8-a5b9-4cc8-bc0c-c9d6eb611f24" print(len(data)) #print(len(ids))# both lists, with columns specified print(len(labels)) # + colab={"base_uri": "https://localhost:8080/"} id="bDuJ3JNoaSug" outputId="c1af5e2a-d42f-4e8b-c92f-4077c753f980" import numpy as np # linear algebra data_new = np.array(data) data_new.shape # + colab={"base_uri": "https://localhost:8080/"} id="If6GQ5e6aVfH" outputId="364092ae-1728-4384-81ff-9b97f8bfa12c" import numpy as np # linear algebra labels_new = np.array(labels) labels_new.shape # + colab={"base_uri": "https://localhost:8080/"} id="-Kr1bwuNaag2" outputId="600dd63b-33f4-47d9-8c83-c06e61ab7773" print(data.shape) print(labels.shape) # + id="FPT64ScGafh8" data_preview = data[:, :, :, 0] # + colab={"base_uri": "https://localhost:8080/", "height": 286} id="W12B8tOuaiqN" outputId="f15205fe-162a-4c38-bdfb-050b1e74f787" import matplotlib.pyplot as plt plt.imshow(data_preview[100],cmap = 'gray') # + colab={"base_uri": "https://localhost:8080/"} id="atYneWXIaljU" outputId="ede6536c-580a-4649-a5a3-f87a9cd97578" labels[100] # + [markdown] id="tJoB95O1avRt" # Classification # + colab={"base_uri": "https://localhost:8080/"} id="xEKB7J3bax5D" outputId="4657a9b9-03ba-46d1-96ed-d47b388e5952" data_preview.shape # + id="gOYOLIqRa4at" test_image = data_preview[50] # + colab={"base_uri": "https://localhost:8080/"} id="FoXw0jXLa8Fi" outputId="eb94d665-f33c-4417-9021-29fa76dc5691" test_image.shape # + colab={"base_uri": "https://localhost:8080/"} id="6KIm8Yyca-m5" outputId="09658112-55e4-4724-d6c3-5e6845f93df6" test_image.dtype # + colab={"base_uri": "https://localhost:8080/", "height": 286} id="KZtFypckbB-A" outputId="cf208fcd-d422-458a-9a74-9a84c09dea6b" plt.imshow(test_image) # + colab={"base_uri": "https://localhost:8080/", "height": 286} id="sqpzhWs4bEnv" outputId="6123a0cf-2ee3-40a6-8ed1-6066747ad6cf" plt.imshow(test_image, cmap = 'gray') # + id="krO1SJevbEpn" from skimage import color image = skimage.color.gray2rgb(test_image) # + colab={"base_uri": "https://localhost:8080/", "height": 302} id="yqgGwtM6bMv4" outputId="7d7419fa-0036-4b9e-a0b3-f26b9b02d619" plt.imshow(image) # + colab={"base_uri": "https://localhost:8080/"} id="qJcWVH6XbPZD" outputId="4ce28ae8-0e37-4a4e-a3a5-d9ac7435f0ea" image.shape # + id="XZ0-HrSabUnO" import cv2 import numpy as np img = np.array(test_image, dtype=np.uint8) color_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # + colab={"base_uri": "https://localhost:8080/", "height": 286} id="9PD_USFobUtf" outputId="5f2ba1e0-f46a-4d82-a1ea-f7dd33fee502" plt.imshow(color_img) # + colab={"base_uri": "https://localhost:8080/"} id="ExS5js-_bU0O" outputId="aef20072-f974-46ad-e8f8-34bc3a540cde" color_img.shape # + colab={"base_uri": "https://localhost:8080/", "height": 286} id="A1AhSY_XbU5C" outputId="33706047-3a6d-4496-f110-d7aba0c9ad45" plt.imshow(data_preview[50], cmap='Accent') # + colab={"base_uri": "https://localhost:8080/"} id="Th5ghrV2bU9o" outputId="1c75fd3b-3362-40ba-8606-e77181524a6b" labels[50] # + colab={"base_uri": "https://localhost:8080/", "height": 286} id="JG-u0quqbVBm" outputId="c29b72b9-aee7-49f9-82f1-6ed097355e11" plt.imshow(data_preview[100], cmap='Accent') # + colab={"base_uri": "https://localhost:8080/"} id="vHoldt0XbodU" outputId="b49a28de-4c30-4525-a859-a88775353fa6" labels[100]
Codes/LC_4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Aperiodic Signal Visualizer # + # %matplotlib inline import warnings import collections import numpy as np import matplotlib.pyplot as plt from neurodsp.sim import sim_powerlaw from neurodsp.plts import plot_power_spectra from neurodsp.spectral import compute_spectrum from neurodsp.utils import set_random_seed # - import seaborn as sns sns.set_context('talk') # Import local code from code.gif import clear_output, _gif_plot_output #from code.utils import yield_sig # ## Settings # Set random seed set_random_seed(505) # ## Functions def build_plot(sigs, plot_func, *args, sleep=0.05, n_build=25): yielders = [yield_sig(sig) for sig in sigs] data = collections.defaultdict(list) for ind in range(n_build): clear_output(wait=True) for ind, yielder in enumerate(yielders): data[ind].append(next(yielder)) plot_func(data) _gif_plot_output(plt.gcf(), False, ind, sleep=sleep) def yield_sig(sig, step=1): for el in sig[::step]: yield el # + def plot_ts(data_dict, ax=None): """Plot timeseries.""" if not ax: _, ax = plt.subplots() for label, data in data_dict.items(): ax.plot(data, label=label) ax.set(xticks=[], yticks=[]); plt.grid(True) def plot_hist(data_dict, ax=None): """Plot histogram.""" if not ax: _, ax = plt.subplots() for label, data in data_dict.items(): ax.hist(data, label=label, alpha=0.5) ax.set(xticks=[], yticks=[]); plt.grid(True) def plot_spectrum(data_dict, ax=None): """Plot spectrum.""" if not ax: _, ax = plt.subplots() for label, data in data_dict.items(): with warnings.catch_warnings(): warnings.simplefilter("ignore") freqs, powers = compute_spectrum(np.array(data), fs=fs) ax.plot(np.log(freqs), np.log(powers)) #ax.loglog(freqs, powers) #plot_power_spectra(freqs, powers, ax=ax) ax.set(xticks=[], yticks=[]); # - def make_axes(): """Make axes for combined plot. Notes: Placement definitions: Left, Bottom, Width, Height """ fig = plt.figure() ax1 = fig.add_axes([0.0, 0.6, 1.3, 0.5]) ax2 = fig.add_axes([0.0, 0.0, 0.6, 0.5]) ax3 = fig.add_axes([0.7, 0.0, 0.6, 0.5]) return fig, [ax1, ax2, ax3] def build_all(sigs, n_build=np.inf, sleep=0.05, save=False): """Build all plots together.""" yielders = [yield_sig(sig) for sig in sigs] data = collections.defaultdict(list) for ind in range(n_build): clear_output(wait=True) fig, axes = make_axes() for ind, yielder in enumerate(yielders): data[ind].append(next(yielder)) plot_ts(data, ax=axes[0]) plot_hist(data, ax=axes[1]) plot_spectrum(data, ax=axes[2]) _gif_plot_output(fig, save, ind, label='02_ap_sig', sleep=sleep) # ## Simulate Data n_seconds = 10 fs = 100 sig1 = sim_powerlaw(n_seconds, fs, 0) sig2 = sim_powerlaw(n_seconds, fs, -1) sig3 = sim_powerlaw(n_seconds, fs, -2) yielder1 = yield_sig(sig1) yielder2 = yield_sig(sig2) yielder3 = yield_sig(sig3) build_plot([sig1, sig2], plot_ts) build_plot([sig1, sig2], plot_hist) build_plot([sig1, sig2], plot_spectrum, n_build=100) # ### Create visualizer with all panels build_all([sig2], n_build=250, sleep=0.01) build_all([sig2, sig3], n_build=250, sleep=0.01)
notebooks/02-aperiodic_signals.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # noexport import os os.system('export_notebook model_evaluation_h2o.ipynb') # + from tmilib import * from h2o_utils import * import h2o h2o.init() # - import traceback # + #print len(sdir_glob('*mtries_*_sample_rate_*')) # - classifier = load_h2o_model(sdir_path('binclassifier_catfeatures_randomforest_second_v6.h2o')) print classifier # + for model_file in sdir_glob('*mtries_*_sample_rate_*'): print model_file try: classifier = load_h2o_model(model_file) print classifier except: traceback.print_exc() continue # - model_file = sdir_path('binclassifier_catfeatures_randomforest_v6.h2o') classifier = load_h2o_model(model_file) print classifier test_data = h2o.import_file(sdir_path('catdata_test_second_v2.csv')) # + #test_data_2[0] = test_data_2[0].asfactor() #print test_data_2.describe() # - #test_data = h2o.import_file(sdir_path('catdata_test_second.csv')) # + #print test_data.describe() #test_data[0] = test_data[0].asfactor() #test_data[0,:] = 1 # + #test_predictions = classifier.predict(test_data) # + #print classifier # - #print h2o.confusion_matrix(test_predictions, ) print classifier.model_performance(test_data) # + #print classifier.confusion_matrix #print test_data['label'] #print test_predictions #print classifier.F1 # + #print test_data.describe() # - testdata= h2o.import_file(sdir_path('catdata_test_insession_tensecond.csv')) print testdata.describe() # + #h2o.export_file(h2o.get_frame('h2odata_test_threefeatures_insession.hex'), sdir_path('h2odata_test_threefeatures_insession.hex')) # + #print classifier.confusion_matrix(test_data)
model_evaluation_h2o.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Decision Tree # ### Objective # # This notebook introduces decision tree models. **Decision tree** learning is one of the most widely used and practical methods for inductive inference. It's a method for approximating discreted-valued functions that is **robust** to **noisy data** and capable of **learning disjuctive** expressions. # # ### Learning objective # # After finished this notebook, you should be able to explain **decision tree** learning models, as well as know to use them through the scikit-learn library. # ## Loading the libraries # + import sys assert sys.version_info >= (3, 6) import numpy assert numpy.__version__ >="1.17.3" import numpy as np import pandas assert pandas.__version__ >= "0.25.1" import pandas as pd import matplotlib.pyplot as plt import seaborn assert seaborn.__version__ >= "0.9.0" import seaborn as sns # %matplotlib inline # - # ### Decision trees in scikit-learn # # In scikit-learn, decision trees are implemented through [tree.DecisionTreeClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html) for classification and [tree.DecisionTreeRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html) for regression. # + import sklearn assert sklearn.__version__ >='0.21.3' from sklearn import tree from sklearn.tree import DecisionTreeClassifier # - # ## 1. Toy data set # # In order to better understand how a decision tree processes the feature space, let us first work on a simulated dataset. # + plt.figure(figsize=(5, 5)) x1 = np.random.multivariate_normal([2,2], [[0.1,0],[0,0.1]], 50) x2 = np.random.multivariate_normal([-2,-2], [[0.1,0],[0,0.1]], 50) x3 = np.random.multivariate_normal([-3,3], [[0.1,0.1],[0,0.1]], 50) X1 = np.concatenate((x1,x2,x3), axis=0) y1 = np.random.multivariate_normal([-2,2], [[0.1,0],[0,0.1]], 50) y2 = np.random.multivariate_normal([2,-2], [[0.1,0],[0,0.1]], 50) y3 = np.random.multivariate_normal([-3,-3], [[0.01,0],[0,0.01]], 50) X2 = np.concatenate((y1,y2,y3), axis=0) plt.plot(X1[:,0],X1[:,1], 'x', color='blue', label='class 1') plt.plot(X2[:,0], X2[:,1], 'x', color='orange', label='class 2') plt.legend(loc=(0.4, 0.8), fontsize=12) # - # Changing the **splitter** to **random**, meaning that the algorithm will consider the feature along which to split _randomly_, rather than picking the optimal one, and then select the best among several _random_ splitting point. Run the algorithm several times. What do you observer? # + # Training data X_demo = np.concatenate((X1, X2), axis=0) y_demo = np.concatenate((np.zeros(X1.shape[0]), np.ones(X2.shape[0]))) clf = tree.DecisionTreeClassifier(splitter="random", random_state=100).fit(X_demo, y_demo) # Create a mesh, i.e., a fine grid of values between the minimum and maximum # values of X1 and X2 in the training data plot_step = 0.02 x_min, x_max = X_demo[:, 0].min() - 1, X_demo[:, 0].max() + 1 y_min, y_max = X_demo[:, 1].min() - 1, X_demo[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)) # Label each point of the mesh with the trained DecisionTreeClassifier Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the contours corresponding to these labels # (i.e., the decision boundaries of the DecisionTreeClassifier) cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired) # Plot the training data plt.plot(X1[:,0], X1[:,1], 'x', label='class 1') plt.plot(X2[:,0], X2[:,1], 'x', label='class 2') plt.legend() # - # ?tree.DecisionTreeClassifier # The features are always randomly permuted at each split. To have a determinist behavior, we have to fix the value of `random_state` parameter. # ## Toy data set 2: play tenning # Uncomment and execute the following code if you don't have installed the [```graphviz```](https://pypi.org/project/graphviz/) library. # + # # ! conda install -y -c conda-forge graphviz xorg-libxrender xorg-libxpm # # ! pip install graphviz # - tennis = pd.read_csv('data/playtennis.csv') tennis.head() # + from sklearn.preprocessing import LabelEncoder lb = LabelEncoder() for c in tennis.columns: tennis[c + '_'] = lb.fit_transform(tennis[c]) tennis.head() # + X = tennis.iloc[:,7:11] Y = tennis.iloc[:,11] tennis_tree = DecisionTreeClassifier(criterion='entropy') tennis_tree.fit(X, Y) # + from sklearn.tree import export_graphviz from IPython.display import SVG import graphviz assert graphviz.__version__ >= '0.13.2' dot_model = export_graphviz(tennis_tree, class_names = tennis.columns[0:5], feature_names = tennis.columns[1:5], impurity = True, filled = True, rounded = True) graph = graphviz.Source(dot_model) SVG(graph.pipe(format='svg')) # - # ## 2. Wisconsin Prognostic Breast Cancer dataset # # The data are available at https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wpbc.data # # ### Data description # # * The dataset contains 569 samples of **malign** and **benign** tumor cells # * It has 35 columns, where the first one stores the **unique ID** number of the sample, the second one the diagnosis (M=malignant, B=benign), 3--32 ten real-valued features are computed for each cell nucleus # # * radius (mean of distances from center to points on the perimeter) # * texture (standard deviation of gray-scale values) # * perimeter # * area # * smoothness (local variation in radius lengths) # * compactness ($\frac{perimeter^2}{area} - 1.0$) # * concavity (severity of concave portions of the contour) # * concave points (number of concave portions of the contour) # * symmetry # * fractal dimension (coastline approximation - 1); 5 here 3--32 are divided into three parts first is Mean (3-13), Stranded Error(13-23) and Worst(23-32) and each contain 10 parameter (radius, texture,area, perimeter, smoothness,compactness,concavity,concave points,symmetry and fractal dimension). # # The goal is to classify whether the breast cancer is **benign** or **malignant**. # ### Loading the data # # # # from sklearn.datasets import load_breast_cancer cancer_ds = load_breast_cancer() X = cancer_ds.data y = cancer_ds.target # + from sklearn import model_selection skf = model_selection.StratifiedKFold(n_splits=5) skf.get_n_splits(X, y) folds = [(tr,te) for (tr,te) in skf.split(X, y)] # - # ### Cross-validation procedures # Import the functions defined in the ```cross_validation.py``` file. Module enables us to reuse code across different jupyter notebooks. from cross_validation import cross_validate_clf , cross_validate_clf_optimize # ## 1.2 Breast cancer tumor classification data # # **Question:** Cross-validate 5 different decision trees (with default parameters) and print out their accuracy. Why do you get different values? # _Hint:_ _Check the documentation_ (?tree.DecisionTreeClassifier) # + from sklearn import metrics ypred_dt = [] # will hold the 5 arrays of predictions (1 per tree) for tree_index in range(5): clf = tree.DecisionTreeClassifier() pred_proba = cross_validate_clf(X, y, clf, folds) ypred_dt.append(pred_proba) print("%.3f" % metrics.accuracy_score(y, np.where(pred_proba > 0.5, 1, 0))) # - # Setting a value for the ```random_state``` parameter allow us to get the same value. # **Question:** Compute the mean and standard deviation of the area under the ROC curve of these 5 trees. Plot the ROC curves of these 5 trees. # Use the [metrics](http://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics) module of scikit-learn. # + fpr_dt = [] tpr_dt = [] auc_dt = [] for tree_index in range(5): # Compute the ROC curve of the current tree fpr_dt_tmp, tpr_dt_tmp, thresholds = metrics.roc_curve(y, ypred_dt[tree_index], pos_label=1) # Compute the area under the ROC curve of the current tree auc_dt_tmp = metrics.auc(fpr_dt_tmp, tpr_dt_tmp) fpr_dt.append(fpr_dt_tmp) tpr_dt.append(tpr_dt_tmp) auc_dt.append(auc_dt_tmp) # Plot the first 4 ROC curves for tree_index in range(4): plt.plot(fpr_dt[tree_index], tpr_dt[tree_index], '-') # Plot the last ROC curve, with a label that gives the mean/std AUC plt.plot(fpr_dt[-1], tpr_dt[-1], '-', label='DT (AUC = %0.2f +/- %0.2f)' % (np.mean(auc_dt), np.std(auc_dt))) # Plot the ROC curve plt.xlabel('False Positive Rate', fontsize=16) plt.ylabel('True Positive Rate', fontsize=16) plt.title('ROC curves', fontsize=16) plt.legend(loc="lower right") # - # ## 1.3 Visualizing the decision trees built for the breast cancer classification # + dot_model = export_graphviz(clf, class_names = ['malignant', 'benign'], feature_names = cancer_ds.feature_names, impurity = True, filled = True, rounded = True) graph = graphviz.Source(dot_model) SVG(graph.pipe(format='svg')) # - def plot_feature_importances(model, n_features, feature_names): plt.barh(range(n_features), model.feature_importances_, align='center') plt.yticks(np.arange(n_features), feature_names) plt.ylabel("Feature importance") plt.ylabel("Feature") plt.ylim(-1, n_features) plot_feature_importances(clf, cancer_ds.data.shape[1], cancer_ds.feature_names)
08-decision-tree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tutorial of Node Schematas - PI & TwoSymbol # Visualization of schematas for simple boolean nodes (automatas) # %load_ext autoreload # %autoreload 2 # %matplotlib inline from __future__ import division import numpy as np import pandas as pd from IPython.display import Image, display import cana from cana.datasets.bools import * from cana.drawing.canalizing_map import draw_canalizing_map_graphviz # + n = OR() print(n) print('k_r: {:.2f}'.format(n.input_redundancy())) print('k_e: {:.2f}'.format(n.effective_connectivity())) #print('k_s: {:.2f} - {:.2f}'.format(n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False))) print('r_ji: {:} (mean)'.format(n.edge_redundancy(bound='mean'))) print('e_ji: {:} (mean)'.format(n.edge_effectiveness(bound='mean'))) dfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts') display(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-')) draw_canalizing_map_graphviz(n.canalizing_map()) # + n = CONTRADICTION() n.name = 'Con' print(n) print('k_r: {:.2f}'.format(n.input_redundancy())) print('k_e: {:.2f}'.format(n.effective_connectivity())) #print('k_s: {:.2f} - {:.2f}'.format(n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False))) print('r_ji: {:} (mean)'.format(n.edge_redundancy(bound='mean'))) print('e_ji: {:} (mean)'.format(n.edge_effectiveness(bound='mean'))) dfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts') display(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-')) draw_canalizing_map_graphviz(n.canalizing_map()) # + n = XOR() print(n) print('k_r: {:.2f}'.format(n.input_redundancy())) print('k_e: {:.2f}'.format(n.effective_connectivity())) #print('k_s: {:.2f} - {:.2f}'.format(n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False))) print('r_ji: {:} (mean)'.format(n.edge_redundancy(bound='mean'))) print('e_ji: {:} (mean)'.format(n.edge_effectiveness(bound='mean'))) # Manually request Two-Symbol to be computed. n._check_compute_canalization_variables(two_symbols=True) for input in [0,1]: for ts,per,sms in n._two_symbols[input]: print( 'TS: %s | PermIdx: %s | SameIdx: %s' % (ts, per,sms)) dfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts') display(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-')) draw_canalizing_map_graphviz(n.canalizing_map()) # + n = AND() print(n) print('k_r: {:.2f}'.format(n.input_redundancy())) print('k_e: {:.2f}'.format(n.effective_connectivity())) #print('k_s: {:.2f} - {:.2f}'.format(n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False))) print('r_ji: {:} (mean)'.format(n.edge_redundancy(bound='mean'))) print('e_ji: {:} (mean)'.format(n.edge_effectiveness(bound='mean'))) dfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts') display(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-')) draw_canalizing_map_graphviz(n.canalizing_map()) # + n = COPYx1() n.name = 'Cx1' print(n) print('k_r: {:.2f}'.format(n.input_redundancy())) print('k_e: {:.2f}'.format(n.effective_connectivity())) #print('k_s: {:.2f} - {:.2f}'.format(n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False))) print('r_ji: {:} (mean)'.format(n.edge_redundancy(bound='mean'))) print('e_ji: {:} (mean)'.format(n.edge_effectiveness(bound='mean'))) dfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts') display(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-')) draw_canalizing_map_graphviz(n.canalizing_map()) # + n = RULE90() n.name = 'R90' print(n) print('k_r: {:.2f}'.format(n.input_redundancy())) print('k_e: {:.2f}'.format(n.effective_connectivity())) #print('k_s: {:.2f} - {:.2f}'.format(n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False))) print('r_ji: {:} (mean)'.format(n.edge_redundancy(bound='mean'))) print('e_ji: {:} (mean)'.format(n.edge_effectiveness(bound='mean'))) dfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts') display(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-')) draw_canalizing_map_graphviz(n.canalizing_map()) # + n = RULE110() n.name = 'R110' print(n) print('k_r: {:.2f}'.format(n.input_redundancy())) print('k_e: {:.2f}'.format(n.effective_connectivity())) #print('k_s: {:.2f} - {:.2f}'.format(n.input_symmetry(mode='node',bound='upper',norm=False), n.input_symmetry(mode='node',bound='lower',norm=False))) print('r_ji: {:} (mean)'.format(n.edge_redundancy(bound='mean'))) print('e_ji: {:} (mean)'.format(n.edge_effectiveness(bound='mean'))) dfLUT, dfPI, dfTS = n.look_up_table(), n.schemata_look_up_table(type='pi'), n.schemata_look_up_table(type='ts') display(pd.concat({'Original LUT':dfLUT,'PI Schema':dfPI,'TS Schema':dfTS}, axis=1).fillna('-')) draw_canalizing_map_graphviz(n.canalizing_map()) # -
tutorials/Canalization - Node Schematas.ipynb
;; -*- coding: utf-8 -*- ;; --- ;; jupyter: ;; jupytext: ;; text_representation: ;; extension: .scm ;; format_name: light ;; format_version: '1.5' ;; jupytext_version: 1.14.4 ;; kernelspec: ;; display_name: MIT Scheme ;; language: scheme ;; name: mit-scheme ;; --- ;; # 1.1 The Elements of Programming ;; ;; A powerful programming language is more than just a means for instructing a computer to perform tasks. The language also serves as a framework within which we organize our ideas about processes. Thus, when we describe a language, we should pay particular attention to the means that the language provides for combining simple ideas to form more complex ideas. Every powerful language has three mechanisms for accomplishing this: ;; ;; - **primitive expressions**, which represent the simplest entities the language is concerned with, ;; - **means of combination**, by which compound elements are built from simpler ones, and ;; - **means of abstraction**, by which compound elements can be named and manipulated as units. ;; ;; In programming, we deal with two kinds of elements: procedures and data. (Later we will discover that they are really not so distinct.) Informally, data is “stuff” that we want to manipulate, and procedures are descriptions of the rules for manipulating the data. Thus, any powerful programming language should be able to describe primitive data and primitive procedures and should have methods for combining and abstracting procedures and data. ;; ;; In this chapter we will deal only with simple numerical data so that we can focus on the rules for building procedures.^[The characterization of numbers as “simple data” is a barefaced bluff. In fact, the treatment of numbers is one of the trickiest and most confusing aspects of any programming language. Some typical issues involved are these: Some computer systems distinguish *integers*, such as 2, from *real numbers*, such as 2.71. Is the real number 2.00 different from the integer 2? Are the arithmetic operations used for integers the same as the operations used for real numbers? Does 6 divided by 2 produce 3, or 3.0? How large a number can we represent? How many decimal places of accuracy can we represent? Is the range of integers the same as the range of real numbers? Above and beyond these questions, of course, lies a collection of issues concerning roundoff and truncation errors—the entire science of numerical analysis. Since our focus in this book is on large-scale program design rather than on numerical techniques, we are going to ignore these problems. The numerical examples in this chapter will exhibit the usual roundoff behavior that one observes when using arithmetic operations that preserve a limited number of decimal places of accuracy in noninteger operations.] In later chapters we will see that these same rules allow us to build procedures to manipulate compound data as well. ;; ;; ;; ## 1.1.1 Expressions ;; ;; One easy way to get started at programming is to examine some typical interactions with an interpreter for the Scheme dialect of Lisp. Imagine that you are sitting at a computer terminal. You type an *expression*, and the interpreter responds by displaying the result of its *evaluating* that expression. ;; ;; One kind of primitive expression you might type is a number. (More precisely, the expression that you type consists of the numerals that represent the number in base 10.) If you present Lisp with a number ;; + attributes={"classes": ["scheme"], "id": ""} 486 ;; - ;; the interpreter will respond by printing `486`. ;; ;; ^[Throughout this book, when we wish to emphasize the distinction between the input typed by the user and the response printed by the interpreter, we will show the latter in slanted characters.] ;; Expressions representing numbers may be combined with an expression representing a primitive procedure (such as `+` or `*`) to form a compound expression that represents the application of the procedure to those numbers. For example: (+ 137 349) (- 1000 334) (* 5 99) (/ 10 5) (+ 2.7 10) ;; Expressions such as these, formed by delimiting a list of expressions within parentheses in order to denote procedure application, are called *combinations*. The leftmost element in the list is called the *operator*, and the other elements are called *operands*. The value of a combination is obtained by applying the procedure specified by the operator to the *arguments* that are the values of the operands. ;; ;; The convention of placing the operator to the left of the operands is known as *prefix notation*, and it may be somewhat confusing at first because it departs significantly from the customary mathematical convention. Prefix notation has several advantages, however. One of them is that it can accommodate procedures that may take an arbitrary number of arguments, as in the following examples: (+ 21 35 12 7) (* 25 4 12) ;; No ambiguity can arise, because the operator is always the leftmost element and the entire combination is delimited by the parentheses. ;; ;; A second advantage of prefix notation is that it extends in a straightforward way to allow combinations to be *nested*, that is, to have combinations whose elements are themselves combinations: ;; + attributes={"classes": ["scheme"], "id": ""} (+ (* 3 5) (- 10 6)) ;; - ;; There is no limit (in principle) to the depth of such nesting and to the overall complexity of the expressions that the Lisp interpreter can evaluate. It is we humans who get confused by still relatively simple expressions such as ;; + attributes={"classes": ["scheme"], "id": ""} (+ (* 3 (+ (* 2 4) (+ 3 5))) (+ (- 10 7) 6)) ;; - ;; which the interpreter would readily evaluate to be 57. We can help ourselves by writing such an expression in the form ;; + attributes={"classes": ["scheme"], "id": ""} (+ (* 3 (+ (* 2 4) (+ 3 5))) (+ (- 10 7) 6)) ;; - ;; following a formatting convention known as *pretty-printing*, in which each long combination is written so that the operands are aligned vertically. The resulting indentations display clearly the structure of the expression. ;; ;; ^[Lisp systems typically provide features to aid the user in formatting expressions. Two especially useful features are one that automatically indents to the proper pretty-print position whenever a new line is started and one that highlights the matching left parenthesis whenever a right parenthesis is typed.] ;; ;; Even with complex expressions, the interpreter always operates in the same basic cycle: It reads an expression from the terminal, evaluates the expression, and prints the result. This mode of operation is often expressed by saying that the interpreter runs in a *read-eval-print loop*. Observe in particular that it is not necessary to explicitly instruct the interpreter to print the value of the expression. ;; ;; ^[Lisp obeys the convention that every expression has a value. This convention, together with the old reputation of Lisp as an inefficient language, is the source of the quip by <NAME> (paraphrasing Oscar Wilde) that “Lisp programmers know the value of everything but the cost of nothing.”] ;; ;; ;; ## 1.1.2 Naming and the Environment ;; ;; A critical aspect of a programming language is the means it provides for using names to refer to computational objects. We say that the name identifies a *variable* whose *value* is the object. ;; ;; In the Scheme dialect of Lisp, we name things with `define`. Typing ;; + attributes={"classes": ["scheme"], "id": ""} (define size 2) ;; - ;; causes the interpreter to associate the value 2 with the name `size`.^[In this book, we do not show the interpreter’s response to evaluating definitions, since this is highly implementation-dependent.] Once the name `size` has been associated with the number 2, we can refer to the value 2 by name: size ;; + attributes={"classes": ["scheme"], "id": ""} (* 5 size) ;; - ;; Here are further examples of the use of `define`: ;; + (define pi 3.14159) (define radius 10) (* pi (* radius radius)) ;; + (define circumference (* 2 pi radius)) circumference ;; - ;; `Define` is our language’s simplest means of abstraction, for it allows us to use simple names to refer to the results of compound operations, such as the `circumference` computed above. In general, computational objects may have very complex structures, and it would be extremely inconvenient to have to remember and repeat their details each time we want to use them. Indeed, complex programs are constructed by building, step by step, computational objects of increasing complexity. The interpreter makes this step-by-step program construction particularly convenient because name-object associations can be created incrementally in successive interactions. This feature encourages the incremental development and testing of programs and is largely responsible for the fact that a Lisp program usually consists of a large number of relatively simple procedures. ;; ;; It should be clear that the possibility of associating values with symbols and later retrieving them means that the interpreter must maintain some sort of memory that keeps track of the name-object pairs. This memory is called the *environment* (more precisely the *global environment*, since we will see later that a computation may involve a number of different environments).^[[Chapter 3](Chapter-3.xhtml#Chapter-3) will show that this notion of environment is crucial, both for understanding how the interpreter works and for implementing interpreters.] ;; ;; ;; ## 1.1.3 Evaluating Combinations ;; ;; One of our goals in this chapter is to isolate issues about thinking procedurally. As a case in point, let us consider that, in evaluating combinations, the interpreter is itself following a procedure. ;; ;; > To evaluate a combination, do the following: ;; > > 1. Evaluate the subexpressions of the combination. ;; > 2. Apply the procedure that is the value of the leftmost subexpression (the operator) to the arguments that are the values of the other subexpressions (the operands). ;; ;; Even this simple rule illustrates some important points about processes in general. First, observe that the first step dictates that in order to accomplish the evaluation process for a combination we must first perform the evaluation process on each element of the combination. Thus, the evaluation rule is *recursive* in nature; that is, it includes, as one of its steps, the need to invoke the rule itself.^[It may seem strange that the evaluation rule says, as part of the first step, that we should evaluate the leftmost element of a combination, since at this point that can only be an operator such as `+` or `*` representing a built-in primitive procedure such as addition or multiplication. We will see later that it is useful to be able to work with combinations whose operators are themselves compound expressions.] ;; ;; Notice how succinctly the idea of recursion can be used to express what, in the case of a deeply nested combination, would otherwise be viewed as a rather complicated process. For example, evaluating ;; + attributes={"classes": ["scheme"], "id": ""} (* (+ 2 (* 4 6)) (+ 3 5 7)) ;; - ;; requires that the evaluation rule be applied to four different combinations. We can obtain a picture of this process by representing the combination in the form of a tree, as shown in [Figure 1.1](#Figure-1_002e1). Each combination is represented by a node with branches corresponding to the operator and the operands of the combination stemming from it. The terminal nodes (that is, nodes with no branches stemming from them) represent either operators or numbers. Viewing evaluation in terms of the tree, we can imagine that the values of the operands percolate upward, starting from the terminal nodes and then combining at higher and higher levels. In general, we shall see that recursion is a very powerful technique for dealing with hierarchical, treelike objects. In fact, the “percolate values upward” form of the evaluation rule is an example of a general kind of process known as *tree accumulation*. ;; ;; ![](fig/chap1/Fig1.1g.std.svg) ;; **Figure 1.1:** Tree representation, showing the value of each subcombination. ;; ;; Next, observe that the repeated application of the first step brings us to the point where we need to evaluate, not combinations, but primitive expressions such as numerals, built-in operators, or other names. We take care of the primitive cases by stipulating that ;; ;; - the values of numerals are the numbers that they name, ;; - the values of built-in operators are the machine instruction sequences that carry out the corresponding operations, and ;; - the values of other names are the objects associated with those names in the environment. ;; ;; We may regard the second rule as a special case of the third one by stipulating that symbols such as `+` and `*` are also included in the global environment, and are associated with the sequences of machine instructions that are their “values.” The key point to notice is the role of the environment in determining the meaning of the symbols in expressions. In an interactive language such as Lisp, it is meaningless to speak of the value of an expression such as `(+ x 1)` without specifying any information about the environment that would provide a meaning for the symbol `x` (or even for the symbol `+`). As we shall see in [Chapter 3](Chapter-3.xhtml#Chapter-3), the general notion of the environment as providing a context in which evaluation takes place will play an important role in our understanding of program execution. ;; ;; Notice that the evaluation rule given above does not handle definitions. For instance, evaluating `(define x 3)` does not apply `define` to two arguments, one of which is the value of the symbol `x` and the other of which is 3, since the purpose of the `define` is precisely to associate `x` with a value. (That is, `(define x 3)` is not a combination.) ;; ;; Such exceptions to the general evaluation rule are called *special forms*. `Define` is the only example of a special form that we have seen so far, but we will meet others shortly. Each special form has its own evaluation rule. The various kinds of expressions (each with its associated evaluation rule) constitute the syntax of the programming language. In comparison with most other programming languages, Lisp has a very simple syntax; that is, the evaluation rule for expressions can be described by a simple general rule together with specialized rules for a small number of special forms.^[Special syntactic forms that are simply convenient alternative surface structures for things that can be written in more uniform ways are sometimes called *syntactic sugar*, to use a phrase coined by <NAME>. In comparison with users of other languages, Lisp programmers, as a rule, are less concerned with matters of syntax. (By contrast, examine any Pascal manual and notice how much of it is devoted to descriptions of syntax.) This disdain for syntax is due partly to the flexibility of Lisp, which makes it easy to change surface syntax, and partly to the observation that many “convenient” syntactic constructs, which make the language less uniform, end up causing more trouble than they are worth when programs become large and complex. In the words of <NAME>, “Syntactic sugar causes cancer of the semicolon.”] ;; ;; ;; ## 1.1.4 Compound Procedures ;; ;; We have identified in Lisp some of the elements that must appear in any powerful programming language: ;; ;; - Numbers and arithmetic operations are primitive data and procedures. ;; - Nesting of combinations provides a means of combining operations. ;; - Definitions that associate names with values provide a limited means of abstraction. ;; ;; Now we will learn about *procedure definitions*, a much more powerful abstraction technique by which a compound operation can be given a name and then referred to as a unit. ;; ;; We begin by examining how to express the idea of “squaring.” We might say, “To square something, multiply it by itself.” This is expressed in our language as ;; + attributes={"classes": ["scheme"], "id": ""} (define (square x) (* x x)) ;; - ;; We can understand this in the following way: ;; + attributes={"classes": ["example"], "id": ""} (define (square x) (* x x)) | | | | | | To square something, multiply it by itself. ;; - ;; We have here a *compound procedure*, which has been given the name `square`. The procedure represents the operation of multiplying something by itself. The thing to be multiplied is given a local name, `x`, which plays the same role that a pronoun plays in natural language. Evaluating the definition creates this compound procedure and associates it with the name `square`.^[Observe that there are two different operations being combined here: we are creating the procedure, and we are giving it the name `square`. It is possible, indeed important, to be able to separate these two notions—to create procedures without naming them, and to give names to procedures that have already been created. We will see how to do this in [1.3.2](1_002e3.xhtml#g_t1_002e3_002e2).] ;; ;; The general form of a procedure definition is ;; + attributes={"classes": ["scheme"], "id": ""} (define (⟨name⟩ ⟨formal parameters⟩) ⟨body⟩) ;; - ;; The `⟨`name`⟩` is a symbol to be associated with the procedure definition in the environment.^[Throughout this book, we will describe the general syntax of expressions by using italic symbols delimited by angle brackets—e.g., `⟨`name`⟩`—to denote the “slots” in the expression to be filled in when such an expression is actually used.] The `⟨`formal parameters`⟩` are the names used within the body of the procedure to refer to the corresponding arguments of the procedure. The `⟨`body`⟩` is an expression that will yield the value of the procedure application when the formal parameters are replaced by the actual arguments to which the procedure is applied.^[More generally, the body of the procedure can be a sequence of expressions. In this case, the interpreter evaluates each expression in the sequence in turn and returns the value of the final expression as the value of the procedure application.] The `⟨`name`⟩` and the `⟨`formal parameters`⟩` are grouped within parentheses, just as they would be in an actual call to the procedure being defined. ;; ;; Having defined `square`, we can now use it: (square 21) (square (+ 2 5)) (square (square 3)) ;; We can also use `square` as a building block in defining other procedures. For example, $x^{2} + y^{2}$ can be expressed as (+ (square x) (square y)) ;; We can easily define a procedure `sum-of-squares` that, given any two numbers as arguments, produces the sum of their squares: ;; + attributes={"classes": ["scheme"], "id": ""} (define (sum-of-squares x y) (+ (square x) (square y))) (sum-of-squares 3 4) ;; - ;; Now we can use `sum-of-squares` as a building block in constructing further procedures: ;; + attributes={"classes": ["scheme"], "id": ""} (define (f a) (sum-of-squares (+ a 1) (* a 2))) (f 5) ;; - ;; Compound procedures are used in exactly the same way as primitive procedures. Indeed, one could not tell by looking at the definition of `sum-of-squares` given above whether `square` was built into the interpreter, like `+` and `*`, or defined as a compound procedure. ;; ;; ;; ## 1.1.5 The Substitution Model for Procedure Application ;; ;; To evaluate a combination whose operator names a compound procedure, the interpreter follows much the same process as for combinations whose operators name primitive procedures, which we described in [1.1.3](#g_t1_002e1_002e3). That is, the interpreter evaluates the elements of the combination and applies the procedure (which is the value of the operator of the combination) to the arguments (which are the values of the operands of the combination). ;; ;; We can assume that the mechanism for applying primitive procedures to arguments is built into the interpreter. For compound procedures, the application process is as follows: ;; ;; > To apply a compound procedure to arguments, evaluate the body of the procedure with each formal parameter replaced by the corresponding argument. ;; ;; To illustrate this process, let’s evaluate the combination ;; + attributes={"classes": ["scheme"], "id": ""} (f 5) ;; - ;; where `f` is the procedure defined in [1.1.4](#g_t1_002e1_002e4). We begin by retrieving the body of `f`: ;; + attributes={"classes": ["scheme"], "id": ""} (sum-of-squares (+ a 1) (* a 2)) ;; - ;; Then we replace the formal parameter `a` by the argument 5: ;; + attributes={"classes": ["scheme"], "id": ""} (sum-of-squares (+ 5 1) (* 5 2)) ;; - ;; Thus the problem reduces to the evaluation of a combination with two operands and an operator `sum-of-squares`. Evaluating this combination involves three subproblems. We must evaluate the operator to get the procedure to be applied, and we must evaluate the operands to get the arguments. Now `(+ 5 1)` produces 6 and `(* 5 2)` produces 10, so we must apply the `sum-of-squares` procedure to 6 and 10. These values are substituted for the formal parameters `x` and `y` in the body of `sum-of-squares`, reducing the expression to ;; + attributes={"classes": ["scheme"], "id": ""} (+ (square 6) (square 10)) ;; - ;; If we use the definition of `square`, this reduces to ;; + attributes={"classes": ["scheme"], "id": ""} (+ (* 6 6) (* 10 10)) ;; - ;; which reduces by multiplication to ;; + attributes={"classes": ["scheme"], "id": ""} (+ 36 100) ;; - ;; and finally to ;; + attributes={"classes": ["scheme"], "id": ""} 136 ;; - ;; The process we have just described is called the *substitution model* for procedure application. It can be taken as a model that determines the “meaning” of procedure application, insofar as the procedures in this chapter are concerned. However, there are two points that should be stressed: ;; ;; - The purpose of the substitution is to help us think about procedure application, not to provide a description of how the interpreter really works. Typical interpreters do not evaluate procedure applications by manipulating the text of a procedure to substitute values for the formal parameters. In practice, the “substitution” is accomplished by using a local environment for the formal parameters. We will discuss this more fully in [Chapter 3](Chapter-3.xhtml#Chapter-3) and [Chapter 4](Chapter-4.xhtml#Chapter-4) when we examine the implementation of an interpreter in detail. ;; - Over the course of this book, we will present a sequence of increasingly elaborate models of how interpreters work, culminating with a complete implementation of an interpreter and compiler in [Chapter 5](Chapter-5.xhtml#Chapter-5). The substitution model is only the first of these models—a way to get started thinking formally about the evaluation process. In general, when modeling phenomena in science and engineering, we begin with simplified, incomplete models. As we examine things in greater detail, these simple models become inadequate and must be replaced by more refined models. The substitution model is no exception. In particular, when we address in [Chapter 3](Chapter-3.xhtml#Chapter-3) the use of procedures with “mutable data,” we will see that the substitution model breaks down and must be replaced by a more complicated model of procedure application.^[Despite the simplicity of the substitution idea, it turns out to be surprisingly complicated to give a rigorous mathematical definition of the substitution process. The problem arises from the possibility of confusion between the names used for the formal parameters of a procedure and the (possibly identical) names used in the expressions to which the procedure may be applied. Indeed, there is a long history of erroneous definitions of *substitution* in the literature of logic and programming semantics. See [Stoy 1977](References.xhtml#Stoy-1977) for a careful discussion of substitution.] ;; ;; ;; ### Applicative order versus normal order ;; ;; According to the description of evaluation given in [1.1.3](#g_t1_002e1_002e3), the interpreter first evaluates the operator and operands and then applies the resulting procedure to the resulting arguments. This is not the only way to perform evaluation. An alternative evaluation model would not evaluate the operands until their values were needed. Instead it would first substitute operand expressions for parameters until it obtained an expression involving only primitive operators, and would then perform the evaluation. If we used this method, the evaluation of `(f 5)` would proceed according to the sequence of expansions ;; + attributes={"classes": ["scheme"], "id": ""} (sum-of-squares (+ 5 1) (* 5 2)) (+ (square (+ 5 1)) (square (* 5 2))) (+ (* (+ 5 1) (+ 5 1)) (* (* 5 2) (* 5 2))) ;; - ;; followed by the reductions ;; + attributes={"classes": ["scheme"], "id": ""} (+ (* 6 6) (* 10 10)) (+ 36 100) ;; - ;; This gives the same answer as our previous evaluation model, but the process is different. In particular, the evaluations of `(+ 5 1)` and `(* 5 2)` are each performed twice here, corresponding to the reduction of the expression `(* x x)` with `x` replaced respectively by `(+ 5 1)` and `(* 5 2)`. ;; ;; This alternative “fully expand and then reduce” evaluation method is known as *normal-order evaluation*, in contrast to the “evaluate the arguments and then apply” method that the interpreter actually uses, which is called *applicative-order evaluation*. It can be shown that, for procedure applications that can be modeled using substitution (including all the procedures in the first two chapters of this book) and that yield legitimate values, normal-order and applicative-order evaluation produce the same value. (See [Exercise 1.5](#Exercise-1_002e5) for an instance of an “illegitimate” value where normal-order and applicative-order evaluation do not give the same result.) ;; ;; Lisp uses applicative-order evaluation, partly because of the additional efficiency obtained from avoiding multiple evaluations of expressions such as those illustrated with `(+ 5 1)` and `(* 5 2)` above and, more significantly, because normal-order evaluation becomes much more complicated to deal with when we leave the realm of procedures that can be modeled by substitution. On the other hand, normal-order evaluation can be an extremely valuable tool, and we will investigate some of its implications in [Chapter 3](Chapter-3.xhtml#Chapter-3) and [Chapter 4](Chapter-4.xhtml#Chapter-4).^[In [Chapter 3](Chapter-3.xhtml#Chapter-3) we will introduce *stream processing*, which is a way of handling apparently “infinite” data structures by incorporating a limited form of normal-order evaluation. In [4.2](4_002e2.xhtml#g_t4_002e2) we will modify the Scheme interpreter to produce a normal-order variant of Scheme.] ;; ;; ;; ## 1.1.6 Conditional Expressions and Predicates ;; ;; The expressive power of the class of procedures that we can define at this point is very limited, because we have no way to make tests and to perform different operations depending on the result of a test. For instance, we cannot define a procedure that computes the absolute value of a number by testing whether the number is positive, negative, or zero and taking different actions in the different cases according to the rule ;; ;; $$\left| x \right|\; = \;\left\{ \begin{array}{lll} ;; x & {\;\text{if}} & {x > 0,} \\ ;; 0 & {\;\text{if}} & {x = 0,} \\ ;; {- x} & {\;\text{if}} & {x < 0.} \\ ;; \end{array} \right.$$ ;; ;; This construct is called a *case analysis*, and there is a special form in Lisp for notating such a case analysis. It is called `cond` (which stands for “conditional”), and it is used as follows: ;; + attributes={"classes": ["scheme"], "id": ""} (define (abs x) (cond ((> x 0) x) ((= x 0) 0) ((< x 0) (- x)))) ;; - ;; The general form of a conditional expression is ;; + attributes={"classes": ["scheme"], "id": ""} (cond (⟨p₁⟩ ⟨e₁⟩) (⟨p₂⟩ ⟨e₂⟩) … (⟨pₙ⟩ ⟨eₙ⟩)) ;; - ;; consisting of the symbol `cond` followed by parenthesized pairs of expressions ;; + attributes={"classes": ["scheme"], "id": ""} (⟨p⟩ ⟨e⟩) ;; - ;; called *clauses*. The first expression in each pair is a *predicate*—that is, an expression whose value is interpreted as either true or false.^[“Interpreted as either true or false” means this: In Scheme, there are two distinguished values that are denoted by the constants `#t` and `#f`. When the interpreter checks a predicate’s value, it interprets `#f` as false. Any other value is treated as true. (Thus, providing `#t` is logically unnecessary, but it is convenient.) In this book we will use names `true` and `false`, which are associated with the values `#t` and `#f` respectively.] ;; ;; Conditional expressions are evaluated as follows. The predicate $\langle p_{1}\rangle$ is evaluated first. If its value is false, then $\langle p_{2}\rangle$ is evaluated. If $\langle p_{2}\rangle$’s value is also false, then $\langle p_{3}\rangle$ is evaluated. This process continues until a predicate is found whose value is true, in which case the interpreter returns the value of the corresponding *consequent expression* $\langle e\rangle$ of the clause as the value of the conditional expression. If none of the $\langle p\rangle$’s is found to be true, the value of the `cond` is undefined. ;; ;; The word *predicate* is used for procedures that return true or false, as well as for expressions that evaluate to true or false. The absolute-value procedure `abs` makes use of the primitive predicates `>`, `<`, and `=`.^[`Abs` also uses the “minus” operator `-`, which, when used with a single operand, as in `(- x)`, indicates negation.] These take two numbers as arguments and test whether the first number is, respectively, greater than, less than, or equal to the second number, returning true or false accordingly. ;; ;; Another way to write the absolute-value procedure is ;; + attributes={"classes": ["scheme"], "id": ""} (define (abs x) (cond ((< x 0) (- x)) (else x))) ;; - ;; which could be expressed in English as “If $x$ is less than zero return $- x$; otherwise return $x$.” `Else` is a special symbol that can be used in place of the $\langle p\rangle$ in the final clause of a `cond`. This causes the `cond` to return as its value the value of the corresponding $\langle e\rangle$ whenever all previous clauses have been bypassed. In fact, any expression that always evaluates to a true value could be used as the $\langle p\rangle$ here. ;; ;; Here is yet another way to write the absolute-value procedure: ;; + attributes={"classes": ["scheme"], "id": ""} (define (abs x) (if (< x 0) (- x) x)) ;; - ;; This uses the special form `if`, a restricted type of conditional that can be used when there are precisely two cases in the case analysis. The general form of an `if` expression is ;; + attributes={"classes": ["scheme"], "id": ""} (if ⟨predicate⟩ ⟨consequent⟩ ⟨alternative⟩) ;; - ;; To evaluate an `if` expression, the interpreter starts by evaluating the `⟨`predicate`⟩` part of the expression. If the `⟨`predicate`⟩` evaluates to a true value, the interpreter then evaluates the `⟨`consequent`⟩` and returns its value. Otherwise it evaluates the `⟨`alternative`⟩` and returns its value.^[A minor difference between `if` and `cond` is that the `⟨`e`⟩` part of each `cond` clause may be a sequence of expressions. If the corresponding `⟨`p`⟩` is found to be true, the expressions `⟨`e`⟩` are evaluated in sequence and the value of the final expression in the sequence is returned as the value of the `cond`. In an `if` expression, however, the `⟨`consequent`⟩` and `⟨`alternative`⟩` must be single expressions.] ;; ;; In addition to primitive predicates such as `<`, `=`, and `>`, there are logical composition operations, which enable us to construct compound predicates. The three most frequently used are these: ;; ;; - `(and ⟨e₁⟩ … ⟨eₙ⟩)` The interpreter evaluates the expressions `⟨`e`⟩` one at a time, in left-to-right order. If any `⟨`e`⟩` evaluates to false, the value of the `and` expression is false, and the rest of the `⟨`e`⟩`’s are not evaluated. If all `⟨`e`⟩`’s evaluate to true values, the value of the `and` expression is the value of the last one. ;; - `(or ⟨e₁⟩ … ⟨eₙ⟩)` The interpreter evaluates the expressions `⟨`e`⟩` one at a time, in left-to-right order. If any `⟨`e`⟩` evaluates to a true value, that value is returned as the value of the `or` expression, and the rest of the `⟨`e`⟩`’s are not evaluated. If all `⟨`e`⟩`’s evaluate to false, the value of the `or` expression is false. ;; - `(not ⟨e⟩)` The value of a `not` expression is true when the expression `⟨`e`⟩` evaluates to false, and false otherwise. ;; Notice that `and` and `or` are special forms, not procedures, because the subexpressions are not necessarily all evaluated. `Not` is an ordinary procedure. ;; ;; As an example of how these are used, the condition that a number $x$ be in the range $5 < x < 10$ may be expressed as ;; + attributes={"classes": ["scheme"], "id": ""} (and (> x 5) (< x 10)) ;; - ;; As another example, we can define a predicate to test whether one number is greater than or equal to another as ;; + attributes={"classes": ["scheme"], "id": ""} (define (>= x y) (or (> x y) (= x y))) ;; - ;; or alternatively as ;; + attributes={"classes": ["scheme"], "id": ""} (define (>= x y) (not (< x y))) ;; - ;; **Exercise 1.1:** Below is a sequence of expressions. What is the result printed by the interpreter in response to each expression? Assume that the sequence is to be evaluated in the order in which it is presented. ;; + attributes={"classes": ["scheme"], "id": ""} 10 (+ 5 3 4) (- 9 1) (/ 6 2) (+ (* 2 4) (- 4 6)) (define a 3) (define b (+ a 1)) (+ a b (* a b)) (= a b) (if (and (> b a) (< b (* a b))) b a) (cond ((= a 4) 6) ((= b 4) (+ 6 7 a)) (else 25)) (+ 2 (if (> b a) b a)) (* (cond ((> a b) a) ((< a b) b) (else -1)) (+ a 1)) ;; - ;; **Exercise 1.2:** Translate the following expression into prefix form: ;; ;; $$\frac{5 + 4 + (2 - (3 - (6 + \frac{4}{5})))}{3(6 - 2)(2 - 7)}.$$ ;; ;; **Exercise 1.3:** Define a procedure that takes three numbers as arguments and returns the sum of the squares of the two larger numbers. ;; ;; **Exercise 1.4:** Observe that our model of evaluation allows for combinations whose operators are compound expressions. Use this observation to describe the behavior of the following procedure: ;; + attributes={"classes": ["scheme"], "id": ""} (define (a-plus-abs-b a b) ((if (> b 0) + -) a b)) ;; - ;; **Exercise 1.5:** <NAME> has invented a test to determine whether the interpreter he is faced with is using applicative-order evaluation or normal-order evaluation. He defines the following two procedures: ;; + attributes={"classes": ["scheme"], "id": ""} (define (p) (p)) (define (test x y) (if (= x 0) 0 y)) ;; - ;; Then he evaluates the expression ;; + attributes={"classes": ["scheme"], "id": ""} (test 0 (p)) ;; - ;; What behavior will Ben observe with an interpreter that uses applicative-order evaluation? What behavior will he observe with an interpreter that uses normal-order evaluation? Explain your answer. (Assume that the evaluation rule for the special form `if` is the same whether the interpreter is using normal or applicative order: The predicate expression is evaluated first, and the result determines whether to evaluate the consequent or the alternative expression.) ;; ;; ;; ## 1.1.7 Example: Square Roots by Newton’s Method ;; ;; Procedures, as introduced above, are much like ordinary mathematical functions. They specify a value that is determined by one or more parameters. But there is an important difference between mathematical functions and computer procedures. Procedures must be effective. ;; ;; As a case in point, consider the problem of computing square roots. We can define the square-root function as ;; ;; $$\sqrt{x}\;\; = \;\;{\text{the}\;\; y}\;\;\text{such\ that}\;\;{y \geq 0}\;\;{\text{and}\;\; y^{2} = x.}$$ ;; ;; This describes a perfectly legitimate mathematical function. We could use it to recognize whether one number is the square root of another, or to derive facts about square roots in general. On the other hand, the definition does not describe a procedure. Indeed, it tells us almost nothing about how to actually find the square root of a given number. It will not help matters to rephrase this definition in pseudo-Lisp: ;; + attributes={"classes": ["scheme"], "id": ""} (define (sqrt x) (the y (and (>= y 0) (= (square y) x)))) ;; - ;; This only begs the question. ;; ;; The contrast between function and procedure is a reflection of the general distinction between describing properties of things and describing how to do things, or, as it is sometimes referred to, the distinction between declarative knowledge and imperative knowledge. In mathematics we are usually concerned with declarative (what is) descriptions, whereas in computer science we are usually concerned with imperative (how to) descriptions.^[Declarative and imperative descriptions are intimately related, as indeed are mathematics and computer science. For instance, to say that the answer produced by a program is “correct” is to make a declarative statement about the program. There is a large amount of research aimed at establishing techniques for proving that programs are correct, and much of the technical difficulty of this subject has to do with negotiating the transition between imperative statements (from which programs are constructed) and declarative statements (which can be used to deduce things). In a related vein, an important current area in programming-language design is the exploration of so-called very high-level languages, in which one actually programs in terms of declarative statements. The idea is to make interpreters sophisticated enough so that, given “what is” knowledge specified by the programmer, they can generate “how to” knowledge automatically. This cannot be done in general, but there are important areas where progress has been made. We shall revisit this idea in [Chapter 4](Chapter-4.xhtml#Chapter-4).] ;; ;; How does one compute square roots? The most common way is to use Newton’s method of successive approximations, which says that whenever we have a guess $y$ for the value of the square root of a number $x$, we can perform a simple manipulation to get a better guess (one closer to the actual square root) by averaging $y$ with $x/y$.^[This square-root algorithm is actually a special case of Newton’s method, which is a general technique for finding roots of equations. The square-root algorithm itself was developed by <NAME> in the first century A.D. We will see how to express the general Newton’s method as a Lisp procedure in [1.3.4](1_002e3.xhtml#g_t1_002e3_002e4).] For example, we can compute the square root of 2 as follows. Suppose our initial guess is 1: ;; + attributes={"classes": ["example"], "id": ""} Guess Quotient Average 1 (2/1) = 2 ((2 + 1)/2) = 1.5 1.5 (2/1.5) ((1.3333 + 1.5)/2) = 1.3333 = 1.4167 1.4167 (2/1.4167) ((1.4167 + 1.4118)/2) = 1.4118 = 1.4142 1.4142 ... ... ;; - ;; Continuing this process, we obtain better and better approximations to the square root. ;; ;; Now let’s formalize the process in terms of procedures. We start with a value for the radicand (the number whose square root we are trying to compute) and a value for the guess. If the guess is good enough for our purposes, we are done; if not, we must repeat the process with an improved guess. We write this basic strategy as a procedure: ;; + attributes={"classes": ["scheme"], "id": ""} (define (sqrt-iter guess x) (if (good-enough? guess x) guess (sqrt-iter (improve guess x) x))) ;; - ;; A guess is improved by averaging it with the quotient of the radicand and the old guess: ;; + attributes={"classes": ["scheme"], "id": ""} (define (improve guess x) (average guess (/ x guess))) ;; - ;; where ;; + attributes={"classes": ["scheme"], "id": ""} (define (average x y) (/ (+ x y) 2)) ;; - ;; We also have to say what we mean by “good enough.” The following will do for illustration, but it is not really a very good test. (See [Exercise 1.7](#Exercise-1_002e7).) The idea is to improve the answer until it is close enough so that its square differs from the radicand by less than a predetermined tolerance (here 0.001):^[We will usually give predicates names ending with question marks, to help us remember that they are predicates. This is just a stylistic convention. As far as the interpreter is concerned, the question mark is just an ordinary character.] ;; + attributes={"classes": ["scheme"], "id": ""} (define (good-enough? guess x) (< (abs (- (square guess) x)) 0.001)) ;; - ;; Finally, we need a way to get started. For instance, we can always guess that the square root of any number is 1:^[Observe that we express our initial guess as 1.0 rather than 1. This would not make any difference in many Lisp implementations. MIT Scheme, however, distinguishes between exact integers and decimal values, and dividing two integers produces a rational number rather than a decimal. For example, dividing 10 by 6 yields 5/3, while dividing 10.0 by 6.0 yields 1.6666666666666667. (We will learn how to implement arithmetic on rational numbers in [2.1.1](2_002e1.xhtml#g_t2_002e1_002e1).) If we start with an initial guess of 1 in our square-root program, and $x$ is an exact integer, all subsequent values produced in the square-root computation will be rational numbers rather than decimals. Mixed operations on rational numbers and decimals always yield decimals, so starting with an initial guess of 1.0 forces all subsequent values to be decimals.] ;; + attributes={"classes": ["scheme"], "id": ""} (define (sqrt x) (sqrt-iter 1.0 x)) ;; - ;; If we type these definitions to the interpreter, we can use `sqrt` just as we can use any procedure: ;; + attributes={"classes": ["scheme"], "id": ""} (sqrt 9) 3.00009155413138 (sqrt (+ 100 37)) 11.704699917758145 (sqrt (+ (sqrt 2) (sqrt 3))) 1.7739279023207892 (square (sqrt 1000)) 1000.000369924366 ;; - ;; The `sqrt` program also illustrates that the simple procedural language we have introduced so far is sufficient for writing any purely numerical program that one could write in, say, C or Pascal. This might seem surprising, since we have not included in our language any iterative (looping) constructs that direct the computer to do something over and over again. `Sqrt-iter`, on the other hand, demonstrates how iteration can be accomplished using no special construct other than the ordinary ability to call a procedure.^[Readers who are worried about the efficiency issues involved in using procedure calls to implement iteration should note the remarks on “tail recursion” in [1.2.1](1_002e2.xhtml#g_t1_002e2_002e1).] ;; ;; **Exercise 1.6:** Alyssa P. Hacker doesn’t see why `if` needs to be provided as a special form. “Why can’t I just define it as an ordinary procedure in terms of `cond`?” she asks. Alyssa’s friend <NAME> claims this can indeed be done, and she defines a new version of `if`: ;; + attributes={"classes": ["scheme"], "id": ""} (define (new-if predicate then-clause else-clause) (cond (predicate then-clause) (else else-clause))) ;; - ;; Eva demonstrates the program for Alyssa: ;; + attributes={"classes": ["scheme"], "id": ""} (new-if (= 2 3) 0 5) 5 (new-if (= 1 1) 0 5) 0 ;; - ;; Delighted, Alyssa uses `new-if` to rewrite the square-root program: ;; + attributes={"classes": ["scheme"], "id": ""} (define (sqrt-iter guess x) (new-if (good-enough? guess x) guess (sqrt-iter (improve guess x) x))) ;; - ;; What happens when Alyssa attempts to use this to compute square roots? Explain. ;; ;; **Exercise 1.7:** The `good-enough?` test used in computing square roots will not be very effective for finding the square roots of very small numbers. Also, in real computers, arithmetic operations are almost always performed with limited precision. This makes our test inadequate for very large numbers. Explain these statements, with examples showing how the test fails for small and large numbers. An alternative strategy for implementing `good-enough?` is to watch how `guess` changes from one iteration to the next and to stop when the change is a very small fraction of the guess. Design a square-root procedure that uses this kind of end test. Does this work better for small and large numbers? ;; ;; **Exercise 1.8:** Newton’s method for cube roots is based on the fact that if $y$ is an approximation to the cube root of $x$, then a better approximation is given by the value ;; ;; $$\frac{{x/y^{2}} + 2y}{3}.$$ ;; ;; Use this formula to implement a cube-root procedure analogous to the square-root procedure. (In [1.3.4](1_002e3.xhtml#g_t1_002e3_002e4) we will see how to implement Newton’s method in general as an abstraction of these square-root and cube-root procedures.) ;; ;; ;; ## 1.1.8 Procedures as Black-Box Abstractions ;; ;; `Sqrt` is our first example of a process defined by a set of mutually defined procedures. Notice that the definition of `sqrt-iter` is *recursive*; that is, the procedure is defined in terms of itself. The idea of being able to define a procedure in terms of itself may be disturbing; it may seem unclear how such a “circular” definition could make sense at all, much less specify a well-defined process to be carried out by a computer. This will be addressed more carefully in [1.2](1_002e2.xhtml#g_t1_002e2). But first let’s consider some other important points illustrated by the `sqrt` example. ;; ;; Observe that the problem of computing square roots breaks up naturally into a number of subproblems: how to tell whether a guess is good enough, how to improve a guess, and so on. Each of these tasks is accomplished by a separate procedure. The entire `sqrt` program can be viewed as a cluster of procedures (shown in [Figure 1.2](#Figure-1_002e2)) that mirrors the decomposition of the problem into subproblems. ;; ;; ![](fig/chap1/Fig1.2.std.svg 419.64x187.56) ;; **Figure 1.2:** Procedural decomposition of the `sqrt` program. ;; ;; The importance of this decomposition strategy is not simply that one is dividing the program into parts. After all, we could take any large program and divide it into parts—the first ten lines, the next ten lines, the next ten lines, and so on. Rather, it is crucial that each procedure accomplishes an identifiable task that can be used as a module in defining other procedures. For example, when we define the `good-enough?` procedure in terms of `square`, we are able to regard the `square` procedure as a “black box.” We are not at that moment concerned with *how* the procedure computes its result, only with the fact that it computes the square. The details of how the square is computed can be suppressed, to be considered at a later time. Indeed, as far as the `good-enough?` procedure is concerned, `square` is not quite a procedure but rather an abstraction of a procedure, a so-called *procedural abstraction*. At this level of abstraction, any procedure that computes the square is equally good. ;; ;; Thus, considering only the values they return, the following two procedures for squaring a number should be indistinguishable. Each takes a numerical argument and produces the square of that number as the value.^[It is not even clear which of these procedures is a more efficient implementation. This depends upon the hardware available. There are machines for which the “obvious” implementation is the less efficient one. Consider a machine that has extensive tables of logarithms and antilogarithms stored in a very efficient manner.] ;; + attributes={"classes": ["scheme"], "id": ""} (define (square x) (* x x)) (define (square x) (exp (double (log x)))) (define (double x) (+ x x)) ;; - ;; So a procedure definition should be able to suppress detail. The users of the procedure may not have written the procedure themselves, but may have obtained it from another programmer as a black box. A user should not need to know how the procedure is implemented in order to use it. ;; ;; ;; ### Local names ;; ;; One detail of a procedure’s implementation that should not matter to the user of the procedure is the implementer’s choice of names for the procedure’s formal parameters. Thus, the following procedures should not be distinguishable: ;; + attributes={"classes": ["scheme"], "id": ""} (define (square x) (* x x)) (define (square y) (* y y)) ;; - ;; This principle—that the meaning of a procedure should be independent of the parameter names used by its author—seems on the surface to be self-evident, but its consequences are profound. The simplest consequence is that the parameter names of a procedure must be local to the body of the procedure. For example, we used `square` in the definition of `good-enough?` in our square-root procedure: ;; + attributes={"classes": ["scheme"], "id": ""} (define (good-enough? guess x) (< (abs (- (square guess) x)) 0.001)) ;; - ;; The intention of the author of `good-enough?` is to determine if the square of the first argument is within a given tolerance of the second argument. We see that the author of `good-enough?` used the name `guess` to refer to the first argument and `x` to refer to the second argument. The argument of `square` is `guess`. If the author of `square` used `x` (as above) to refer to that argument, we see that the `x` in `good-enough?` must be a different `x` than the one in `square`. Running the procedure `square` must not affect the value of `x` that is used by `good-enough?`, because that value of `x` may be needed by `good-enough?` after `square` is done computing. ;; ;; If the parameters were not local to the bodies of their respective procedures, then the parameter `x` in `square` could be confused with the parameter `x` in `good-enough?`, and the behavior of `good-enough?` would depend upon which version of `square` we used. Thus, `square` would not be the black box we desired. ;; ;; A formal parameter of a procedure has a very special role in the procedure definition, in that it doesn’t matter what name the formal parameter has. Such a name is called a *bound variable*, and we say that the procedure definition *binds* its formal parameters. The meaning of a procedure definition is unchanged if a bound variable is consistently renamed throughout the definition.^[The concept of consistent renaming is actually subtle and difficult to define formally. Famous logicians have made embarrassing errors here.] If a variable is not bound, we say that it is *free*. The set of expressions for which a binding defines a name is called the *scope* of that name. In a procedure definition, the bound variables declared as the formal parameters of the procedure have the body of the procedure as their scope. ;; ;; In the definition of `good-enough?` above, `guess` and `x` are bound variables but `<`, `-`, `abs`, and `square` are free. The meaning of `good-enough?` should be independent of the names we choose for `guess` and `x` so long as they are distinct and different from `<`, `-`, `abs`, and `square`. (If we renamed `guess` to `abs` we would have introduced a bug by *capturing* the variable `abs`. It would have changed from free to bound.) The meaning of `good-enough?` is not independent of the names of its free variables, however. It surely depends upon the fact (external to this definition) that the symbol `abs` names a procedure for computing the absolute value of a number. `Good-enough?` will compute a different function if we substitute `cos` for `abs` in its definition. ;; ;; ;; ### Internal definitions and block structure ;; ;; We have one kind of name isolation available to us so far: The formal parameters of a procedure are local to the body of the procedure. The square-root program illustrates another way in which we would like to control the use of names. The existing program consists of separate procedures: ;; + attributes={"classes": ["scheme"], "id": ""} (define (sqrt x) (sqrt-iter 1.0 x)) (define (sqrt-iter guess x) (if (good-enough? guess x) guess (sqrt-iter (improve guess x) x))) (define (good-enough? guess x) (< (abs (- (square guess) x)) 0.001)) (define (improve guess x) (average guess (/ x guess))) ;; - ;; The problem with this program is that the only procedure that is important to users of `sqrt` is `sqrt`. The other procedures (`sqrt-iter`, `good-enough?`, and `improve`) only clutter up their minds. They may not define any other procedure called `good-enough?` as part of another program to work together with the square-root program, because `sqrt` needs it. The problem is especially severe in the construction of large systems by many separate programmers. For example, in the construction of a large library of numerical procedures, many numerical functions are computed as successive approximations and thus might have procedures named `good-enough?` and `improve` as auxiliary procedures. We would like to localize the subprocedures, hiding them inside `sqrt` so that `sqrt` could coexist with other successive approximations, each having its own private `good-enough?` procedure. To make this possible, we allow a procedure to have internal definitions that are local to that procedure. For example, in the square-root problem we can write ;; + attributes={"classes": ["scheme"], "id": ""} (define (sqrt x) (define (good-enough? guess x) (< (abs (- (square guess) x)) 0.001)) (define (improve guess x) (average guess (/ x guess))) (define (sqrt-iter guess x) (if (good-enough? guess x) guess (sqrt-iter (improve guess x) x))) (sqrt-iter 1.0 x)) ;; - ;; Such nesting of definitions, called *block structure*, is basically the right solution to the simplest name-packaging problem. But there is a better idea lurking here. In addition to internalizing the definitions of the auxiliary procedures, we can simplify them. Since `x` is bound in the definition of `sqrt`, the procedures `good-enough?`, `improve`, and `sqrt-iter`, which are defined internally to `sqrt`, are in the scope of `x`. Thus, it is not necessary to pass `x` explicitly to each of these procedures. Instead, we allow `x` to be a free variable in the internal definitions, as shown below. Then `x` gets its value from the argument with which the enclosing procedure `sqrt` is called. This discipline is called *lexical scoping*.^[Lexical scoping dictates that free variables in a procedure are taken to refer to bindings made by enclosing procedure definitions; that is, they are looked up in the environment in which the procedure was defined. We will see how this works in detail in chapter 3 when we study environments and the detailed behavior of the interpreter.] ;; + attributes={"classes": ["scheme"], "id": ""} (define (sqrt x) (define (good-enough? guess) (< (abs (- (square guess) x)) 0.001)) (define (improve guess) (average guess (/ x guess))) (define (sqrt-iter guess) (if (good-enough? guess) guess (sqrt-iter (improve guess)))) (sqrt-iter 1.0)) ;; - ;; We will use block structure extensively to help us break up large programs into tractable pieces.^[Embedded definitions must come first in a procedure body. The management is not responsible for the consequences of running programs that intertwine definition and use.] The idea of block structure originated with the programming language Algol 60. It appears in most advanced programming languages and is an important tool for helping to organize the construction of large programs.
1.1_The_Elements_of_Programming.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # lingam.utils # In this example, we need to import `numpy`, `pandas`, and `lingam`. # + import numpy as np import pandas as pd import graphviz import lingam from lingam.utils import make_dot np.set_printoptions(precision=3, suppress=True) np.random.seed(0) # - # We define utility functions to draw the directed acyclic graph. def make_prior_knowledge_graph(prior_knowledge_matrix): d = graphviz.Digraph(engine='dot') labels = [f'x{i}' for i in range(prior_knowledge_matrix.shape[0])] for label in labels: d.node(label, label) dirs = np.where(prior_knowledge_matrix > 0) for to, from_ in zip(dirs[0], dirs[1]): d.edge(labels[from_], labels[to]) dirs = np.where(prior_knowledge_matrix < 0) for to, from_ in zip(dirs[0], dirs[1]): d.edge(labels[from_], labels[to], style='dashed') return d # ## print_causal_directions # We create test data consisting of 6 variables. x3 = np.random.uniform(size=10000) x0 = 3.0*x3 + np.random.uniform(size=10000) x2 = 6.0*x3 + np.random.uniform(size=10000) x1 = 3.0*x0 + 2.0*x2 + np.random.uniform(size=10000) x5 = 4.0*x0 + np.random.uniform(size=10000) x4 = 8.0*x0 - 1.0*x2 + np.random.uniform(size=10000) X = pd.DataFrame(np.array([x0, x1, x2, x3, x4, x5]).T ,columns=['x0', 'x1', 'x2', 'x3', 'x4', 'x5']) X.head() # We run booststrap and get the result. model = lingam.DirectLiNGAM() result = model.bootstrap(X, 100) # We can get the ranking of the causal directions extracted by `get_causal_direction_counts()` method. cdc = result.get_causal_direction_counts(n_directions=8, min_causal_effect=0.01) # Then, we import `lingam.utils` and check the results with the `print_causal_directions` function. from lingam.utils import print_causal_directions print_causal_directions(cdc, 100) # We can also output by specifying the variable name. labels = ['X1', 'X2', 'X3', 'X4', 'X5', 'X6'] print_causal_directions(cdc, 100, labels=labels) # ## print_dags # We use the bootstrap results above to get the ranking of the DAGs extracted. dagc = result.get_directed_acyclic_graph_counts(n_dags=3, min_causal_effect=0.01) # Then, we import `lingam.utils` and check the results with the `print_dagc` function. from lingam.utils import print_dagc print_dagc(dagc, 100) labels = ['X1', 'X2', 'X3', 'X4', 'X5', 'X6'] print_dagc(dagc, 100, labels=labels) # ## make_prior_knowledge # In order to perform causal discovery using prior knowledge of causal relations, `make_prior_knowledge` function that creates a prior knowledge matrix is provided. First, we import `lingam.utils` to use `make_prior_knowledge` function. from lingam.utils import make_prior_knowledge # ### Exogenous variables # If the exogenous variable is known, specify the variable index in the `exogenous_variables` argument. pk = make_prior_knowledge(n_variables=3, exogenous_variables=[0, 1]) print(pk) make_prior_knowledge_graph(pk) # ### Sink variables # If the sink variable such as the target variable of the predictive model is already known, specify the variable index in the `sink_variables` argument. pk = make_prior_knowledge(n_variables=3, sink_variables=[1, 2]) print(pk) make_prior_knowledge_graph(pk) # ### Directed path # If the causal direction between variables is already known, specify the variable index pair in the `paths` argument. pk = make_prior_knowledge(n_variables=3, paths=[[0, 1]]) print(pk) make_prior_knowledge_graph(pk) # ### No directed path # If there is no causal direction between variables is already known, specify the variable index pair in the `no_paths` argument. pk = make_prior_knowledge(n_variables=3, no_paths=[[0, 1]]) print(pk) make_prior_knowledge_graph(pk) # ### Mix all knowledge # All prior knowledge can be specified at the same time. A prior knowledge matrix is created with priorities in the order of `exogenous_variables`, `sink_variables`, `paths`, `no_paths`. pk = make_prior_knowledge( n_variables=4, exogenous_variables=[3], sink_variables=[0], paths=[[1, 0]], no_paths=[[3, 0]], ) make_prior_knowledge_graph(pk) # ## remove_effect # If it is considered that there are hidden common causes in X0(categorical variable) and X1 in following DAG, we want to run causal discovery excluding the effects of X0 and X1. # + m = np.array([[0.0, 0.0, 0.0, 0.0, 0.0], [0.5, 0.0, 0.0, 0.0, 0.0], [4.0,-1.0, 0.0, 0.0, 0.0], [2.0, 0.0,-3.0, 0.0, 0.0], [0.0, 1.0, 2.0, 2.0, 0.0]]) make_dot(m) # - # x0 = np.random.uniform(size=10000) x0 = np.random.randint(2, size=10000) x1 = 0.5*x0 + np.random.uniform(size=10000) x2 = 4.0*x0 - 1.0*x1 + np.random.uniform(size=10000) x3 = 2.0*x0 - 3.0*x2 + np.random.uniform(size=10000) x4 = 1.0*x1 + 2.0*x2 + 2.0*x3 + np.random.uniform(size=10000) X = pd.DataFrame(np.array([x0, x1, x2, x3, x4]).T ,columns=['x0', 'x1', 'x2', 'x3', 'x4']) X.head() # In this cases, we can import `lingam.utils` and use `remove_effect` function. from lingam.utils import remove_effect remove_features=[0, 1] X_removed = remove_effect(X, remove_features=remove_features) # If we run DirectLiNGAM on the dataset that excludes the effects of X0 and X1, we can get the following results: model = lingam.DirectLiNGAM() model.fit(X_removed) print(model.causal_order_) print(model.adjacency_matrix_) make_dot(model.adjacency_matrix_) # To add causal coefficients from variables whose effects have been removed, regression for each variable may be performed. # + from sklearn import linear_model from sklearn.utils import check_array def get_reg_coef(X, features, target, gamma=1.0): X = np.copy(check_array(X)) lr = linear_model.LinearRegression() lr.fit(X[:, features], X[:, target]) weight = np.power(np.abs(lr.coef_), gamma) reg = linear_model.LassoLarsIC(criterion='bic') reg.fit(X[:, features] * weight, X[:, target]) return reg.coef_ * weight # - B = model.adjacency_matrix_.copy() B[2, 0], B[2, 1] = get_reg_coef(X, [0, 1], 2) B[3, 0], B[3, 1], _ = get_reg_coef(X, [0, 1, 2], 3) B[4, 0], B[4, 1], _, _ = get_reg_coef(X, [0, 1, 2, 3], 4) print(B) make_dot(B, lower_limit=0.1) # To align the display, the causal direction of small coefficients is excluded.
examples/utils.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lets know your Cars!!! # # This small application lets you identify what is the brand of the car in the image. We have used about 15+ commonly used cars in the German market and trained our model on them. Please upload a picture and let us know if we predicted the car right for you. There is always a scope for improvement so if you find that the application is certainly wrong in a particular brand of cars please let us know. :) print("*" * 40) from fastai2.vision.all import * from fastai2.vision.widgets import * path = Path() learn_inf = load_learner(path/'export.pkl', cpu=True) btn_upload = widgets.FileUpload() out_pl = widgets.Output() lbl_pred = widgets.Label() def on_click(change): img = PILImage.create(btn_upload.data[-1]) out_pl.clear_output() with out_pl: display(img.to_thumb(512,512)) pred,pred_idx,probs = learn_inf.predict(img) lbl_pred.value = f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f} ' btn_run = widgets.Button(description='Classify') btn_run.on_click(on_click) display(VBox([widgets.Label('Lets know which Car you own!!!'), btn_upload, btn_run, out_pl, lbl_pred]))
Cars1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Author: satishchitigori # ## Task 1: Prediction using Supervised ML # ### To Predict the percentage of marks of the students based on the number of hours they studied # # #### In supervised learning, models are trained using labelled dataset, where the model learns about each type of data. Once the training process is completed, the model is tested on the basis of test data (a subset of the training set), and then it predicts the output. # ### 1.Importing the Libraries and the data # #### Pandas to import and analyze data, NumPy to perform the multi-dimensional operation, and matplotlib to perform graphical plot. import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error # ### Importing the data from .csv file data = pd.read_csv ('http://bit.ly/w-data') # Viewing the data that we imported to pandas dataframe object data # ### 2.Viewing and Describing the data # # Now we view the Head and Tail of the data using head() and tail() respectively. data.head() data.tail() # Checking the dimensions/shape of the dataset using shape. data.shape # Viewing Column names of the dataset using columns data.columns # View datatypes of each column in the dataset using dtype. data.dtypes # Describing the data as basic statistics using describe() # data.describe() data.info() # ## 3.Checking the data for inconsistencies data.isnull().sum() # ## 4.Data Visualization # Plotting the distribution of scores sns.set_style('darkgrid') data.plot(x='Hours', y='Scores', style='o') plt.title('Hours vs Percentage',size=20) plt.xlabel('Hours Studied',size=15) plt.ylabel('Percentage Score',size=15) plt.show() # #### From the graph we can safely assume a positive linear relation between the number of hours studied and percentage of score. # ### Correlation data.corr() # ### Regression Plot sns.set_style('darkgrid') sns.regplot(x=data['Hours'],y=data['Scores']) plt.xlabel('Hours studied') plt.ylabel('Percentage Score') plt.title('Regression Plot',size=20) plt.show() print(data.corr()) # ## 5.Data Preprocessing # #### Dividing the data into attributes and labels X=data.iloc[:,:-1].values y=data.iloc[:,-1].values # No of Hours Studied X # Scores obtained y # ## 6.Traning the Model # + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) regressor = LinearRegression() regressor.fit(X_train.reshape(-1,1), y_train) print("Training complete.") # - # ### Plotting the Line of regression # since our model is trained now, its the time to visualize the best-fit line of regression. # + # Plotting the regression line line = regressor.coef_*X+regressor.intercept_ # Plotting for the test data plt.scatter(X, y) plt.plot(X, line,color='red'); plt.show() # - # ### Making Predictions # Now that we have trained our algorithm, it's time to test the model by making some predictions. # # For this we will use our test-set data # Testing data print(X_test) # Model Prediction y_pred = regressor.predict(X_test) # ### 7.Comparing Actual result to the Predicted Model result # Comparing Actual vs Predicted df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred}) df #Estimating training and test score print("Training Score:",regressor.score(X_train,y_train)) print("Test Score:",regressor.score(X_test,y_test)) # + # Plotting the Bar graph to depict the difference between the actual and predicted value df.plot(kind='bar',figsize=(5,5)) plt.grid(which='major', linewidth='0.5', color='red') plt.grid(which='minor', linewidth='0.5', color='blue') plt.show() # - # ## Testing # Testing the model with our own data hours = 9.25 test = np.array([hours]) test = test.reshape(-1, 1) own_pred = regressor.predict(test) print("No of Hours = {}".format(hours)) print("Predicted Score = {}".format(own_pred[0])) # ## 8.Evaluating the model # The final step is to evaluate the performance of algorithm. This step is particularly important to compare how well different algorithms perform on a particular dataset. Here different errors have been calculated to compare the model performance and predict the accuracy. from sklearn import metrics print('Mean Absolute Error:',metrics.mean_absolute_error(y_test, y_pred)) print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred)) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print('R-2:', metrics.r2_score(y_test, y_pred)) # R-2 gives the score of model fit and in this case we have R-2 = 0.9454906892105355 which is actually a great score for this model. # ## Conclusion # ### So,According to the regression model if a student studies for 9.25 hours a day he/she is likely to score 93.69 marks. # ## Thank you....
Task1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Change directory to wherever you are housing this project import sys sys.path.append("C:/Users/ahaberlie/Documents/GitHub/MCS/") # # Download example radar data # # Download data.tar.gz from https://tiny.cc/ + the full manuscript ID for part 1 (case sensitive), and untar and ungzip this into the directory "MCS/mcs" and make sure the output folder is "data" and it has a folder named "radar_data". Examine the code to see the proper location if you are getting an error (i.e., "../data/radar_data/") import numpy as np from scipy.misc import imread import warnings warnings.filterwarnings("ignore") # # Set up plotting utilities and values # + import matplotlib.pyplot as plt from matplotlib.colors import BoundaryNorm from matplotlib.ticker import MaxNLocator from mcs.utils.colors import radar_colormap import matplotlib.patheffects as pe # %matplotlib inline plt.rcParams['figure.figsize'] = 10, 10 cmap = radar_colormap() classes = list(range(0, 80, 5)) norm = BoundaryNorm(classes, ncolors=cmap.N, clip=True) # - # # Set up mapping utilities # + import cartopy import cartopy.crs as ccrs import cartopy.io.shapereader as shpreader from_proj = ccrs.PlateCarree() to_proj = ccrs.AlbersEqualArea(central_longitude=-95, central_latitude=38.0000) def draw_states(ax): shapename = 'admin_1_states_provinces_lakes_shp' states_shp = shpreader.natural_earth(resolution='50m', category='cultural', name=shapename) for state, info in zip(shpreader.Reader(states_shp).geometries(), shpreader.Reader(states_shp).records()): if info.attributes['admin'] == 'United States of America': ax.add_geometries([state], ccrs.PlateCarree(), facecolor='None', edgecolor='k') # - # # Get lat/lon coordinates for radar data # + from mcs.utils.mapping_help import * lons, lats = get_NOWrad_conus_lon_lat() lons, lats = np.meshgrid(lons, lats) # - # # Plot radar image # + img = imread("../data/radar_data/BREF_970507_1100.png", mode='P') to_proj = ccrs.AlbersEqualArea(central_longitude=-95, central_latitude=38.0000) ax = plt.subplot(2, 2, 1, projection=to_proj) ax.set_extent([-98.5, -92.5, 38.1, 46]) draw_states(ax) mmp = ax.pcolormesh(lons, lats, img*5, cmap=cmap, norm=norm, transform=from_proj) plt.colorbar(mmp, ax=ax, shrink=0.4, pad=0.01) plt.title("Reflectivity valid at 1100 UTC on 7 May 1997") # - # # Slice segmentation # # Set convective region search radius (CRSR) to 24 km # # Set stratiform search radius (SSR) to 96 km # # (See function documentation for more details on the following value conversions) # + from mcs.utils.segmentation import * conv_buffer = 4 # 4 * 2 * 3 = 24 km strat_buffer = 16 # 16 * 2 * 3 = 96 km # - # Find pixels associated with intense deep convective cells intense_cells = get_intense_cells(img) # Merge intense cells within a given radius # + merged_lines = connect_intense_cells(intense_cells, conv_buffer) long_lines = find_lines(img, conv_buffer) # - # Connect stratiform pixels to merged lines within a given radius stratiform_merge = connect_stratiform_to_lines(find_lines(img, conv_buffer), img>=4, strat_buffer) # # Illustration of each step # + img1 = np.ma.masked_where(img < 4, img) plt.rcParams['figure.figsize'] = 30, 30 to_proj = ccrs.AlbersEqualArea(central_longitude=-95, central_latitude=38.0000) titles = ['Intense Cells (Black Outlines)', 'Intense Cells Merged (Black Outlines)', 'Slices (Black Outlines)'] for i, data in enumerate([intense_cells, merged_lines, stratiform_merge]): ax = plt.subplot(1, 3, i+1, projection=to_proj) ax.set_extent([-98.5, -92.5, 38.1, 46]) ax.pcolormesh(lons, lats, quantize(img1), vmin=0, vmax=3, cmap='Greys', transform=from_proj) ax.contour(lons, lats, 1*(data>0), colors=['k',], linewidths=.5, linestyles='solid', transform=from_proj, zorder=5) draw_states(ax) scale_bar(ax, to_proj, 100) leg = ax.legend([mpatches.Patch(color=plt.cm.Greys(1/3)), mpatches.Patch(color=plt.cm.Greys(2/3)), mpatches.Patch(color=plt.cm.Greys(3/3))], ['≥ 20','≥ 40','≥ 50'], loc=4) ax.set_title(titles[i], fontsize=15) leg.set_title("dBZ", prop = {'size':'x-large'}) # - # # Absurd example # + conv_buffer = 50 # 50 * 2 * 3 = 300 km strat_buffer = 50 # 50 * 2 * 3 = 300 km intense_cells = get_intense_cells(img) merged_lines = connect_intense_cells(intense_cells, conv_buffer) long_lines = find_lines(img, conv_buffer) stratiform_merge = connect_stratiform_to_lines(find_lines(img, conv_buffer), img>=4, strat_buffer) img1 = np.ma.masked_where(img < 4, img) plt.rcParams['figure.figsize'] = 30, 30 to_proj = ccrs.AlbersEqualArea(central_longitude=-95, central_latitude=38.0000) titles = ['Intense Cells (Black Outlines)', 'Intense Cells Merged (Black Outlines)', 'Slices (Black Outlines)'] for i, data in enumerate([intense_cells, merged_lines, stratiform_merge]): ax = plt.subplot(1, 3, i+1, projection=to_proj) ax.set_extent([-105.5, -90.5, 32.1, 48]) ax.pcolormesh(lons, lats, quantize(img1), vmin=0, vmax=3, cmap='Greys', transform=from_proj) ax.contour(lons, lats, 1*(data>0), colors=['k',], linewidths=.5, linestyles='solid', transform=from_proj, zorder=5) draw_states(ax) scale_bar(ax, to_proj, 100) leg = ax.legend([mpatches.Patch(color=plt.cm.Greys(1/3)), mpatches.Patch(color=plt.cm.Greys(2/3)), mpatches.Patch(color=plt.cm.Greys(3/3))], ['≥ 20','≥ 40','≥ 50'], loc=4) ax.set_title(titles[i], fontsize=15) leg.set_title("dBZ", prop = {'size':'x-large'}) # -
mcs/notebooks/Slice_segmentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Pymaceuticals Inc. # --- # # ### Analysis # * Overall, it is clear that Capomulin outperforms all other treatment options in the screen. # * Capomulin was the only treatment to reduce tumor volume. It held to a 19% reduction in tumor volume over the course of trial, whereas all other drugs were correlated with an increase in tumor volume by roughly 40-50%. # * Capomulin greatly limited the spread of the tumor compared to other treatment options. By study end, the average mouse on Capomulin had only 1 new metastatic site, as opposed to the average 2-3 found in mice of other treatment options. # * Lastly, mice on the Capomulin treatment had the highest survival rate of any treatment in the screen. Over 90% of mice treated by Capomulin survived the full duration of the trial, compared to only 35-45% of mice on other treatment options. # + # Dependencies and Setup # %matplotlib inline import matplotlib.pyplot as plt import pandas as pd import numpy as np # Hide warning messages in notebook import warnings warnings.filterwarnings('ignore') # File to Load (Remember to Change These) mouse_drug_data_to_load = "data/mouse_drug_data.csv" clinical_trial_data_to_load = "data/clinicaltrial_data.csv" # Read the Mouse and Drug Data and the Clinical Trial Data mouse_drug_data = pd.read_csv(mouse_drug_data_to_load) clinical_data = pd.read_csv(clinical_trial_data_to_load) # Combine the data into a single dataset clinical_data_complete = pd.merge(clinical_data, mouse_drug_data, how="left", on=["Mouse ID", "Mouse ID"]) # Display the data table for preview clinical_data_complete.head() # - # ## Tumor Response to Treatment # + # Store the Mean Tumor Volume Data Grouped by Drug and Timepoint tumor_vols_mean = clinical_data_complete.groupby(["Drug", "Timepoint"]).mean()["Tumor Volume (mm3)"] # Convert to DataFrame tumor_vols_mean = pd.DataFrame(tumor_vols_mean) # Flatten the column headings tumor_vols_mean = tumor_vols_mean.reset_index() # Preview DataFrame tumor_vols_mean # + # Store the Standard Error of Tumor Volumes Grouped by Drug and Timepoint tumor_vols_sem = clinical_data_complete.groupby(["Drug", "Timepoint"]).sem()["Tumor Volume (mm3)"] # Convert to DataFrame tumor_vols_sem = pd.DataFrame(tumor_vols_sem) # Preview DataFrame tumor_vols_sem.head().reset_index() # + # Minor Data Munging to Re-Format the Data Frames tumor_vols_mean = tumor_vols_mean.reset_index() tumor_vols_pivot_mean = tumor_vols_mean.pivot(index="Timepoint", columns="Drug")["Tumor Volume (mm3)"] tumor_vols_sem = tumor_vols_sem.reset_index() tumor_vols_pivot_sem = tumor_vols_sem.pivot(index="Timepoint", columns="Drug")["Tumor Volume (mm3)"] # Preview that Reformatting worked tumor_vols_pivot_mean.head() # + # Generate the Plot (with Error Bars) plt.errorbar(tumor_vols_pivot_mean.index, tumor_vols_pivot_mean["Capomulin"], yerr=tumor_vols_pivot_sem["Capomulin"], color="r", marker="o", markersize=5, linestyle="dashed", linewidth=0.50) plt.errorbar(tumor_vols_pivot_mean.index, tumor_vols_pivot_mean["Infubinol"], yerr=tumor_vols_pivot_sem["Infubinol"], color="b", marker="^", markersize=5, linestyle="dashed", linewidth=0.50) plt.errorbar(tumor_vols_pivot_mean.index, tumor_vols_pivot_mean["Ketapril"], yerr=tumor_vols_pivot_sem["Ketapril"], color="g", marker="s", markersize=5, linestyle="dashed", linewidth=0.50) plt.errorbar(tumor_vols_pivot_mean.index, tumor_vols_pivot_mean["Placebo"], yerr=tumor_vols_pivot_sem["Placebo"], color="k", marker="d", markersize=5, linestyle="dashed", linewidth=0.50) plt.title("Tumor Response to Treatment") plt.ylabel("Tumor Volume (mm3)") plt.xlabel("Time (Days)") plt.grid(True) plt.legend(loc="best", fontsize="small", fancybox=True) # Save the Figure plt.savefig("analysis/Fig1.png") # Show the Figure plt.show() # - # ## Metastatic Response to Treatment # + # Store the Mean Met. Site Data Grouped by Drug and Timepoint met_sites_mean = clinical_data_complete.groupby(["Drug", "Timepoint"]).mean()["Metastatic Sites"] # Convert to DataFrame met_sites_mean = pd.DataFrame(met_sites_mean) # Preview DataFrame met_sites_mean.head() # + # Store the Standard Error associated with Met. Sites Grouped by Drug and Timepoint met_sites_sem = clinical_data_complete.groupby(["Drug", "Timepoint"]).sem()["Metastatic Sites"] # Convert to DataFrame met_sites_sem = pd.DataFrame(met_sites_sem) # Preview DataFrame met_sites_sem.head() # + # Minor Data Munging to Re-Format the Data Frames met_sites_mean = met_sites_mean.reset_index() met_sites_pivot_mean = met_sites_mean.pivot(index="Timepoint", columns="Drug")["Metastatic Sites"] met_sites_sem = met_sites_sem.reset_index() met_sites_pivot_sem = met_sites_sem.pivot(index="Timepoint", columns="Drug")["Metastatic Sites"] # Preview that Reformatting worked met_sites_pivot_mean.head() # + # Generate the Plot (with Error Bars) plt.errorbar(met_sites_pivot_mean.index, met_sites_pivot_mean["Capomulin"], yerr=met_sites_pivot_sem["Capomulin"], color="r", marker="o", markersize=5, linestyle="dashed", linewidth=0.50) plt.errorbar(met_sites_pivot_mean.index, met_sites_pivot_mean["Infubinol"], yerr=met_sites_pivot_sem["Infubinol"], color="b", marker="^", markersize=5, linestyle="dashed", linewidth=0.50) plt.errorbar(met_sites_pivot_mean.index, met_sites_pivot_mean["Ketapril"], yerr=met_sites_pivot_sem["Ketapril"], color="g", marker="s", markersize=5, linestyle="dashed", linewidth=0.50) plt.errorbar(met_sites_pivot_mean.index, met_sites_pivot_mean["Placebo"], yerr=met_sites_pivot_sem["Placebo"], color="k", marker="d", markersize=5, linestyle="dashed", linewidth=0.50) plt.title("Metastatic Spread During Treatment") plt.ylabel("Met. Sites") plt.xlabel("Treatment Duration (Days)") plt.grid(True) plt.legend(loc="best", fontsize="small", fancybox=True) # Save the Figure plt.savefig("analysis/Fig2.png") # Show the Figure plt.show() # - # ## Survival Rates # + # Store the Count of Mice Grouped by Drug and Timepoint (W can pass any metric) survival_count = clinical_data_complete.groupby(["Drug", "Timepoint"]).count()["Tumor Volume (mm3)"] # Convert to DataFrame survival_count = pd.DataFrame({"Mouse Count": survival_count}) # Preview DataFrame survival_count.head().reset_index() # + # Minor Data Munging to Re-Format the Data Frames survival_count = survival_count.reset_index() survival_count_pivot = survival_count.pivot(index="Timepoint", columns="Drug")["Mouse Count"] # Preview the Data Frame survival_count_pivot.head() # + # Generate the Plot (Accounting for percentages) plt.plot(100 * survival_count_pivot["Capomulin"] / 25, "ro", linestyle="dashed", markersize=5, linewidth=0.50) plt.plot(100 * survival_count_pivot["Infubinol"] / 25, "b^", linestyle="dashed", markersize=5, linewidth=0.50) plt.plot(100 * survival_count_pivot["Ketapril"] / 25, "gs", linestyle="dashed", markersize=5, linewidth=0.50) plt.plot(100 * survival_count_pivot["Placebo"] / 25 , "kd", linestyle="dashed", markersize=6, linewidth=0.50) plt.title("Survival During Treatment") plt.ylabel("Survival Rate (%)") plt.xlabel("Time (Days)") plt.grid(True) plt.legend(loc="best", fontsize="small", fancybox=True) # Save the Figure plt.savefig("analysis/Fig3.png") # Show the Figure plt.show() # - # ## Summary Bar Graph # + # Calculate the percent changes for each drug tumor_pct_change = 100 * (tumor_vols_pivot_mean.iloc[-1] - tumor_vols_pivot_mean.iloc[0]) / tumor_vols_pivot_mean.iloc[0] tumor_pct_change_sem = 100 * (tumor_vols_pivot_sem.iloc[-1] - tumor_vols_pivot_sem.iloc[0]) / tumor_vols_pivot_sem.iloc[0] # Display the data to confirm tumor_pct_change # + # Store all Relevant Percent Changes into a Tuple pct_changes = (tumor_pct_change["Capomulin"], tumor_pct_change["Infubinol"], tumor_pct_change["Ketapril"], tumor_pct_change["Placebo"]) # Splice the data between passing and failing drugs fig, ax = plt.subplots() ind = np.arange(len(pct_changes)) width = 1 rectsPass = ax.bar(ind[0], pct_changes[0], width, color='green') rectsFail = ax.bar(ind[1:], pct_changes[1:], width, color='red') # Orient widths. Add labels, tick marks, etc. ax.set_ylabel('% Tumor Volume Change') ax.set_title('Tumor Change Over 45 Day Treatment') ax.set_xticks(ind + 0.5) ax.set_xticklabels(('Capomulin', 'Infubinol', 'Ketapril', 'Placebo')) ax.set_autoscaley_on(False) ax.set_ylim([-30,70]) ax.grid(True) # Use functions to label the percentages of changes def autolabelFail(rects): for rect in rects: height = rect.get_height() ax.text(rect.get_x() + rect.get_width()/2., 3, '%d%%' % int(height), ha='center', va='bottom', color="white") def autolabelPass(rects): for rect in rects: height = rect.get_height() ax.text(rect.get_x() + rect.get_width()/2., -8, '-%d%% ' % int(height), ha='center', va='bottom', color="white") # Call functions to implement the function calls autolabelPass(rectsPass) autolabelFail(rectsFail) # Save the Figure fig.savefig("analysis/Fig4.png") # Show the Figure fig.show() # -
Pymaceuticals_Solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import os,sys sys.path.append('../..') sys.path.append('../../../RL_lib/Agents/PPO') sys.path.append('../../../RL_lib/Utils') # %load_ext autoreload # %load_ext autoreload # %autoreload 2 # %matplotlib nbagg import os print(os.getcwd()) # + language="html" # <style> # .output_wrapper, .output { # height:auto !important; # max-height:1000px; /* your desired max-height here */ # } # .output_scroll { # box-shadow:none !important; # webkit-box-shadow:none !important; # } # </style> # - # # Load Policy # + from env_mdr import Env from reward_terminal_mdr import Reward import env_utils as envu import attitude_utils as attu from dynamics_model import Dynamics_model from lander_model import Lander_model from ic_gen import Landing_icgen from agent_mdr2 import Agent from policy_ppo import Policy from value_function import Value_function from utils import Mapminmax,Logger,Scaler from flat_constraint import Flat_constraint from glideslope_constraint import Glideslope_constraint from attitude_constraint import Attitude_constraint from thruster_model import Thruster_model logger = Logger() dynamics_model = Dynamics_model() attitude_parameterization = attu.Quaternion_attitude() thruster_model = Thruster_model() thruster_model.max_thrust = 5000 thruster_model.min_thrust = 1000 lander_model = Lander_model(thruster_model, attitude_parameterization=attitude_parameterization, apf_v0=70, apf_atarg=15., apf_tau2=100.) lander_model.get_state_agent = lander_model.get_state_agent_tgo_alt reward_object = Reward(tracking_bias=0.01,tracking_coeff=-0.01, fuel_coeff=-0.05, debug=False, landing_coeff=10.) glideslope_constraint = Glideslope_constraint(gs_limit=-1.0) shape_constraint = Flat_constraint() attitude_constraint = Attitude_constraint(attitude_parameterization, attitude_penalty=-100,attitude_coeff=-10, attitude_limit=(10*np.pi, np.pi/2-np.pi/16, np.pi/2-np.pi/16)) env = Env(lander_model,dynamics_model,logger, reward_object=reward_object, glideslope_constraint=glideslope_constraint, shape_constraint=shape_constraint, attitude_constraint=attitude_constraint, tf_limit=120.0,print_every=10) env.ic_gen = Landing_icgen(mass_uncertainty=0.05, g_uncertainty=(0.0,0.0), attitude_parameterization=attitude_parameterization, l_offset=0., adapt_apf_v0=True, inertia_uncertainty_diag=100.0, inertia_uncertainty_offdiag=10.0, downrange = (0,2000 , -70, -10), crossrange = (-1000,1000 , -30,30), altitude = (2300,2400,-90,-70), yaw = (-np.pi/8, np.pi/8, 0.0, 0.0) , pitch = (np.pi/4-np.pi/8, np.pi/4+np.pi/16, -0.01, 0.01), roll = (-np.pi/8, np.pi/8, -0.01, 0.01)) env.ic_gen.show() obs_dim = 12 act_dim = 4 policy = Policy(obs_dim,act_dim,kl_targ=0.001,epochs=20, beta=0.1, shuffle=True, servo_kl=True) import utils fname = "optimize_4km" input_normalizer = utils.load_run(policy,fname) print(input_normalizer) # - # # Test 800m Divert policy.test_mode=True env.lander.divert=(800,800,1500) env.lander.apf_pot=env.lander.apf_pot4 env.ic_gen = Landing_icgen(mass_uncertainty=0.05, g_uncertainty=(0.0,0.0), adapt_apf_v0=True, attitude_parameterization=attitude_parameterization, downrange = (0,2000 , -70, -10), crossrange = (-1000,1000 , -30,30), altitude = (2300,2400,-90,-70), yaw = (-np.pi/8, np.pi/8, 0.0, 0.0) , pitch = (np.pi/4-np.pi/8, np.pi/4+np.pi/16, -0.0, 0.0), roll = (-np.pi/8, np.pi/8, -0.0, 0.0), noise_u=100*np.ones(3), noise_sd=100*np.ones(3)) env.test_policy_batch(policy,input_normalizer,5000,print_every=100) policy.close_sess()
AAS-18-290_6DOF_journal/Run/Run_4km_terminal/test_4km_2100_divert.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a id="title_ID"></a> # # JWST Pipeline Validation Testing Notebook: spec2, flat_field step # # <span style="color:red"> **Instruments Affected**</span>: NIRSpec # # Tested on CV3 data # # ### Table of Contents # <div style="text-align: left"> # # <br> [Imports](#imports_ID) <br> [Introduction](#intro_ID) <br> [Testing Data Set](#data_ID) <br> [Run the JWST pipeline and flat_field validation tests](#pipeline_ID): [FS Full-Frame test](#FULLFRAME), [FS ALLSLITS test](#ALLSLITS), [MOS test](#MOS), [IFU test](#IFU) <br> [About This Notebook](#about_ID)<br> [Results](#results) <br> # # </div> # <a id="imports_ID"></a> # # Imports # The library imports relevant to this notebook are aready taken care of by importing PTT. # # * astropy.io for opening fits files # * jwst.module.PipelineStep is the pipeline step being tested # * matplotlib.pyplot.plt to generate plot # # NOTE: This notebook assumes that the pipeline version to be tested is already installed and its environment is activated. # # To be able to run this notebook you need to install nptt. # # If all goes well you will be able to import PTT. # # [Top of Page](#title_ID) # + # Create a temporary directory to hold notebook output, and change the working directory to that directory. import shutil import os from tempfile import TemporaryDirectory use_tempdir = True if use_tempdir: # Create temporary directory data_dir = TemporaryDirectory() # Save original directory orig_dir = os.getcwd() # Move to new directory os.chdir(data_dir.name) # For info, print out where the script is running print("Running in {}".format(os.getcwd())) # + # Choose CRDS cache location use_local_crds_cache = False crds_cache_tempdir = False crds_cache_notebook_dir = True crds_cache_home = False crds_cache_custom_dir = False crds_cache_dir_name = "" if use_local_crds_cache: if crds_cache_tempdir: os.environ['CRDS_PATH'] = os.path.join(os.getcwd(), "crds") elif crds_cache_notebook_dir: try: os.environ['CRDS_PATH'] = os.path.join(orig_dir, "crds") except Exception as e: os.environ['CRDS_PATH'] = os.path.join(os.getcwd(), "crds") elif crds_cache_home: os.environ['CRDS_PATH'] = os.path.join(os.environ['HOME'], 'crds', 'cache') elif crds_cache_custom_dir: os.environ['CRDS_PATH'] = crds_cache_dir_name # + nbpresent={"id": "45177853-942e-4949-9e30-f544d70ef5f4"} import warnings import psutil from astropy.io import fits # Only print a DeprecationWarning the first time it shows up, not every time. with warnings.catch_warnings(): warnings.simplefilter("once", category=DeprecationWarning) import jwst from jwst.pipeline.calwebb_detector1 import Detector1Pipeline from jwst.assign_wcs.assign_wcs_step import AssignWcsStep from jwst.msaflagopen.msaflagopen_step import MSAFlagOpenStep from jwst.extract_2d.extract_2d_step import Extract2dStep from jwst.srctype.srctype_step import SourceTypeStep from jwst.wavecorr.wavecorr_step import WavecorrStep from jwst.flatfield.flat_field_step import FlatFieldStep # The latest version of NPTT is installed in the requirements text file at: # /jwst_validation_notebooks/environment.yml # import NPTT import nirspec_pipe_testing_tool as nptt # To get data from Artifactory from ci_watson.artifactory_helpers import get_bigdata # + # Print versions used for the pipeline and NPTT pipeline_version = jwst.__version__ nptt_version = nptt.__version__ print("Using jwst pipeline version: ", pipeline_version) print("Using NPTT version: ", nptt_version) # - # <a id="intro_ID"></a> # # Test Description # # The test is a direct comparison of the result of our implementation of the flat field step algorithm versus the pipeline's implementation, i.e.: # difference = absolute( Flat_nirspec_implementation - Flat_pipeline) # # We expect the absolute difference to be of the order of 1x10^-6. We set this threshold by assuming that the difference should yield computer precision 1x10^-7 numbers. We then relaxed one order of magnitude due to interpolation differences in the algorithms. # # For the test to be considered PASSED, every single slit (for FS data), slitlet (for MOS data) or slice (for IFU data) in the input file has to pass. If there is any failure, the whole test will be considered as FAILED. # # The code for this test for Fixed Slits (FS) can be obtained from: https://github.com/spacetelescope/nirspec_pipe_testing_tool/blob/master/nirspec_pipe_testing_tool/calwebb_spec2_pytests/auxiliary_code/flattest_fs.py. For Multi Object Spectroscopy (MOS), the code is in the same repository but is named ```flattest_mos.py```, and for Integral Field Unit (IFU) data, the test is named ```flattest_ifu.py```. # # The input file is defined in the variable ```input_file``` (see section [Testing Data Set and Variable Setup](#data_ID)). # # Step description: https://jwst-pipeline.readthedocs.io/en/latest/jwst/flatfield/main.html # # Pipeline code: https://github.com/spacetelescope/jwst/tree/master/jwst/flatfield # # # ### Test Results # # If the test **PASSED** this means that all slits, slitlets, or slices individually passed the test. However, if ony one individual slit (for FS data), slitlet (for MOS data) or slice (for IFU data) test failed, the whole test will be reported as **FAILED**. # # # ### Calibration WG Requested Algorithm: # # A short description and link to the page: https://outerspace.stsci.edu/display/JWSTCC/Vanilla+Spectral+Flat+Field+Correction # # # ### Defining Term # Acronymns used un this notebook: # # pipeline: calibration pipeline # # spec2: spectroscopic calibration pipeline level 2b # # PTT: NIRSpec pipeline testing tool (https://github.com/spacetelescope/nirspec_pipe_testing_tool) # # # [Top of Page](#title_ID) # <a id="pipeline_ID"></a> # # Run the JWST pipeline and assign_wcs validation tests # # The pipeline can be run from the command line in two variants: full or per step. # # Tu run the spec2 pipeline in full use the command: # # $ strun jwst.pipeline.Spec2Pipeline jwtest_rate.fits # # Tu only run the flat_field step, use the command: # # $ strun jwst.flat_field.FlatFieldStep jwtest_extract_2d.fits # # These options are also callable from a script with the testing environment active. The Python call for running the pipeline in full or by step are: # # $\gt$ from jwst.pipeline.calwebb_spec2 import Spec2Pipeline # # $\gt$ Spec2Pipeline.call(jwtest_rate.fits) # # or # # $\gt$ from jwst.flat_field.flat_field_step import FlatFieldStep # # $\gt$ FlatFieldStep.call(jwtest_extract_2d.fits) # # For the imaging pipeline the call would be as follows: # # $\gt$ from jwst.pipeline.calwebb_image2 import Image2Pipeline # # $\gt$ Image2Pipeline.call(jwtest_rate.fits) # # NPTT can run the spec2 pipeline either in full or per step, as well as the imaging pipeline in full. In this notebook we will use NPTT to run the pipeline and the validation tests. To run NPTT, follow the directions in the corresponding repo page. # # [Top of Page](#title_ID) # -> For each mode, the following variables will need to be set: # - output_directory = string, path where you want intermediary files and plots to be saved in, if writefile=True # - input_file = string or object, this is the output file from the previous step, e.g. jwtest1_NRS1_extract2d.fits # - dflat_path = string, path of where the D-flat reference fits files # - sflat_path = string, path of where the S-flat reference fits files # - fflat_path = string, path of where the F-flat reference fits files # - writefile = boolean, if True writes the fits files of the calculated flat, and the difference jpeg images # - save_figs = boolean, whether to save plots or not # - show_figs = boolean, whether to show plots or not # - threshold_diff = float, threshold difference between pipeline output and ESA file # # <a id="data_ID"></a> # # Testing Data Set # # All testing data is from the CV3 campaign. We chose these files because this is our most complete data set, i.e. all modes and filter-grating combinations. # # Data used was for testing: # - FS_PRISM_CLEAR # - FS_FULLFRAME_G395H_F290LP # - FS_ALLSLITS_G140H_F100LP # - MOS_G140M_LINE1 # - MOS_PRISM_CLEAR # - IFU_G395H_F290LP # # # [Top of Page](#title_ID) testing_data = { 'fs_prism_clear':{ 'uncal_file_nrs1': 'fs_prism_nrs1_uncal.fits', 'uncal_file_nrs2': 'fs_prism_nrs2_uncal.fits', 'sflat_nrs1': 'nirspec_FS_sflat_PRISM_OPAQUE_FLAT5_nrs1_f_01.01.fits', 'sflat_nrs2': 'nirspec_FS_sflat_PRISM_OPAQUE_FLAT5_nrs2_f_01.01.fits', 'fflat': 'nirspec_FS_fflat_CLEAR_01.01.fits', 'msa_shutter_config': None }, 'fs_fullframe_g395h_f290lp':{ 'uncal_file_nrs1': 'fs_fullframe_g395h_f290lp_nrs1_uncal.fits', 'uncal_file_nrs2': 'fs_fullframe_g395h_f290lp_nrs2_uncal.fits', 'sflat_nrs1': 'nirspec_FS_sflat_G395H_OPAQUE_FLAT3_nrs1_f_01.01.fits', 'sflat_nrs2': 'nirspec_FS_sflat_G395H_OPAQUE_FLAT3_nrs2_f_01.01.fits', 'fflat': 'nirspec_FS_fflat_F290LP_01.01.fits', 'msa_shutter_config': None }, # Commented out because the wavecor step crashes for this data set #'fs_allslits_g140h_f100lp':{ # 'uncal_file_nrs1': 'fs_allslits_g140h_f100lp_nrs1_uncal.fits', # 'uncal_file_nrs2': 'fs_allslits_g140h_f100lp_nrs2_uncal.fits', # 'sflat_nrs1': 'nirspec_FS_sflat_G140H_OPAQUE_FLAT1_nrs1_f_01.01.fits', # 'sflat_nrs2': 'nirspec_FS_sflat_G140H_OPAQUE_FLAT1_nrs2_f_01.01.fits', # 'fflat': 'nirspec_FS_fflat_F100LP_01.01.fits', # 'msa_shutter_config': None }, # Commented out because the wavecor step is not populating array for this data set #'bots_g235h_f170lp':{ # 'uncal_file_nrs1': 'bots_g235h_f170lp_nrs1_uncal.fits', # 'uncal_file_nrs2': 'bots_g235h_f170lp_nrs2_uncal.fits', # 'sflat_nrs1': 'nirspec_FS_sflat_G235H_OPAQUE_FLAT2_nrs1_f_01.01.fits', # 'sflat_nrs2': 'nirspec_FS_sflat_G235H_OPAQUE_FLAT2_nrs2_f_01.01.fits', # 'fflat': 'nirspec_FS_fflat_F170LP_01.01.fits', # 'msa_shutter_config': None }, 'mos_prism_clear':{ 'uncal_file_nrs1': 'mos_prism_nrs1_uncal.fits', 'uncal_file_nrs2': 'mos_prism_nrs2_uncal.fits', 'sflat_nrs1': 'nirspec_MOS_sflat_PRISM_OPAQUE_FLAT5_nrs1_f_01.01.fits', 'sflat_nrs2': 'nirspec_MOS_sflat_PRISM_OPAQUE_FLAT5_nrs2_f_01.01.fits', 'fflat': 'nirspec_MOS_fflat_CLEAR_01.01.fits', 'msa_shutter_config': 'V0030006000104_msa.fits' }, 'mos_g140m_f100lp':{ 'uncal_file_nrs1': 'mos_g140m_line1_NRS1_uncal.fits', 'uncal_file_nrs2': 'mos_g140m_line1_NRS2_uncal.fits', 'sflat_nrs1': 'nirspec_MOS_sflat_G140M_OPAQUE_FLAT1_nrs1_f_01.01.fits', 'sflat_nrs2': 'nirspec_MOS_sflat_G140M_OPAQUE_FLAT1_nrs2_f_01.01.fits', 'fflat': 'nirspec_MOS_fflat_F100LP_01.01.fits', 'msa_shutter_config': 'V8460001000101_msa.fits' }, 'ifu_prism_clear':{ 'uncal_file_nrs1': 'ifu_prism_nrs1_uncal.fits', 'uncal_file_nrs2': 'ifu_prism_nrs2_uncal.fits', 'sflat_nrs1': 'nirspec_IFU_sflat_PRISM_OPAQUE_FLAT5_nrs1_f_01.01.fits', 'sflat_nrs2': 'nirspec_IFU_sflat_PRISM_OPAQUE_FLAT5_nrs2_f_01.01.fits', 'fflat': 'nirspec_IFU_fflat_CLEAR_01.01.fits', 'msa_shutter_config': None }, 'ifu_g395h_f290lp':{ 'uncal_file_nrs1': 'ifu_g395h_f290lp_nrs1_uncal.fits', 'uncal_file_nrs2': 'ifu_g395h_f290lp_nrs2_uncal.fits', 'sflat_nrs1': 'nirspec_IFU_sflat_G395H_OPAQUE_FLAT3_nrs1_f_01.01.fits', 'sflat_nrs2': 'nirspec_IFU_sflat_G395H_OPAQUE_FLAT3_nrs2_f_01.01.fits', 'fflat': 'nirspec_IFU_fflat_F290LP_01.01.fits', 'msa_shutter_config': None } } # define function to pull data from Artifactory def get_artifactory_file(data_set_dict, detector): """This function creates a list with all the files needed per detector to run the test. Args: data_set_dict: dictionary, contains inputs for a specific mode and configuration detector: string, either nrs1 or nrs2 Returns: data: list, contains all files needed to run test """ files2obtain = ['uncal_file_nrs1', 'sflat_nrs1', 'fflat', 'msa_shutter_config'] data = [] for file in files2obtain: data_file = None try: if '_nrs' in file and '2' in detector: file = file.replace('_nrs1', '_nrs2') data_file = get_bigdata('jwst_validation_notebooks', 'validation_data', 'nirspec_data', data_set_dict[file]) except TypeError: data.append(None) continue data.append(data_file) return data # + # set the D-flat path (used for all test data) print('Getting D-Flats from Artifactory...') dflat_nrs1 = get_bigdata('jwst_validation_notebooks', 'validation_data', 'nirspec_data', 'nirspec_dflat_nrs1_f_01.03.fits') dflat_nrs2 = get_bigdata('jwst_validation_notebooks', 'validation_data', 'nirspec_data', 'nirspec_dflat_nrs2_f_01.03.fits') print('Got D-flats') # accepted threshold difference with respect to benchmark files threshold_diff = 9.999e-5 # set NPTT switches for this test writefile = False save_figs = False show_figs = True # Get the data results_dict = {} detectors = ['nrs1', 'nrs2'] print('Starting to run pipeline and test...') for mode_config, data_set_dict in testing_data.items(): for det in detectors: print('Testing files for detector: ', det) data = get_artifactory_file(data_set_dict, det) uncal_file, sflat, fflat, msa_shutter_config = data print('Working with uncal_file: ', uncal_file) uncal_basename = os.path.basename(uncal_file) dflat = dflat_nrs1 if '2' in det: dflat = dflat_nrs2 # Run the stage 1 pipeline rate_object = Detector1Pipeline.call(uncal_file) # Make sure the MSA shutter configuration file is set up correctly if msa_shutter_config is not None: msa_metadata = rate_object.meta.instrument.msa_metadata_file if msa_metadata is None or msa_metadata == 'N/A': rate_object.meta.instrument.msa_metadata_file = msa_shutter_config # Run the stage 2 pipeline steps try: pipe_object = AssignWcsStep.call(rate_object) skip_file = False except: print("No open slits fall on detector ", det) print("Skipping test for this file. \n") skip_file = True if not skip_file: # only run MSA flagging on IFU or MOS data if 'ifu' in uncal_basename.lower() or 'mos' in uncal_basename.lower(): pipe_object = MSAFlagOpenStep.call(pipe_object) # only run extract_2d on non IFU data if 'ifu' not in uncal_basename.lower(): pipe_object = Extract2dStep.call(pipe_object) pipe_object = SourceTypeStep.call(pipe_object) pipe_object = WavecorrStep.call(pipe_object) flat_field_object = FlatFieldStep.call(pipe_object, save_interpolated_flat=True) # Run the validation test # %matplotlib inline interpolated_flat = os.path.basename(uncal_file).replace('uncal', 'interpolatedflat') if 'fs' in uncal_file.lower(): print('Running flat field test for FS...') result, result_msg, log_msgs = nptt.calwebb_spec2_pytests.auxiliary_code.flattest_fs.flattest( flat_field_object, dflat_path=dflat, sflat_path=sflat, fflat_path=fflat, writefile=writefile, show_figs=show_figs, save_figs=save_figs, interpolated_flat=interpolated_flat, threshold_diff=threshold_diff, debug=False) if 'mos' in uncal_file.lower(): print('Running flat field test for MOS...') result, result_msg, log_msgs = nptt.calwebb_spec2_pytests.auxiliary_code.flattest_mos.flattest( flat_field_object, dflat_path=dflat, sflat_path=sflat, fflat_path=fflat, msa_shutter_conf=msa_shutter_config, writefile=writefile, show_figs=show_figs, save_figs=save_figs, interpolated_flat=interpolated_flat, threshold_diff=threshold_diff, debug=False) if 'ifu' in uncal_file.lower(): print('Running flat field test for IFU...') result, result_msg, log_msgs = nptt.calwebb_spec2_pytests.auxiliary_code.flattest_ifu.flattest( flat_field_object, dflat_path=dflat, sflat_path=sflat, fflat_path=fflat, writefile=writefile, mk_all_slices_plt=False, show_figs=show_figs, save_figs=save_figs, interpolated_flat=interpolated_flat, threshold_diff=threshold_diff, debug=False) else: result_msg = 'skipped' # Did the test passed print("Did flat_field validation test passed? ", result_msg, "\n\n") rd = {uncal_basename: result_msg} results_dict.update(rd) # close all open files psutil.Process().open_files() closing_files = [] for fd in psutil.Process().open_files(): if data_dir.name in fd.path: closing_files.append(fd) for fd in closing_files: try: print('Closing file: ', fd, '\n\n') open(fd.fd).close() except: print('File already closed: ', fd, '\n\n') # + # Quickly see if the test passed print('These are the final results of the tests: ') for key, val in results_dict.items(): print(key, val) # - # <a id="about_ID"></a> # ## About this Notebook # **Author:** <NAME>, Staff Scientist II - Systems Science Support, NIRSpec # <br>**Updated On:** April/7/2021 # [Top of Page](#title_ID) # <img style="float: right;" src="./stsci_pri_combo_mark_horizonal_white_bkgd.png" alt="stsci_pri_combo_mark_horizonal_white_bkgd" width="200px"/>
jwst_validation_notebooks/flat_field/jwst_flat_field_nirspec_test/jwst_flat_field_nirspec_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # <img src="Figs/Banner.png" width="100%" /> # <font face="Calibri"> # <br> # <font size="7"> <b> Offset Estimation <b> </font> # # <font size="5"> <b> The *ampcor* program<font color='rgba(200,0,0,0.2)'> </font> </b> </font> # # <br> <img src="Figs/NASALogo.png" width="250" align="right" /> <br> # <font size="4"> <b> <NAME></b> # <font size="3"> <br> # <font> <b>Date: </b> July 10, 2020 </font> # </font> # # + hideCode=true hideOutput=true hidePrompt=true slideshow={"slide_type": "-"} import warnings warnings.filterwarnings('ignore') bShowInline = True # Set = False for document generation # %matplotlib inline import matplotlib.pyplot as plt params = {'legend.fontsize': 'x-large', 'figure.figsize': (15, 5), 'axes.labelsize': 'x-large', 'axes.titlesize':'x-large', 'xtick.labelsize':'x-large', 'ytick.labelsize':'x-large'} plt.rcParams.update(params) def makeplot( plt, figlabel, figcaption): figname = figlabel+'.png' plt.savefig(figname) if bShowInline: plt.show() else: plt.close() strLatex=""" \\begin{figure}[b] \centering \includegraphics[totalheight=10.0cm]{%s} \caption{%s} \label{fig:%s} \end{figure}"""%(figname, figcaption, figlabel) return display(Latex(strLatex)) def sinc_interp(x, s, u): # x is the vector to be interpolated # s is a vector of sample points of x # u is a vector of the output sample points for the interpolation if len(x) != len(s): raise ValueError('x and s must be the same length') # Find the period T = s[1] - s[0] sincM = np.tile(u, (len(s), 1)) - np.tile(s[:, np.newaxis], (1, len(u))) y = np.dot(x, np.sinc(sincM/T)) return y # + [markdown] hideCode=false hidePrompt=true slideshow={"slide_type": "-"} # # Overview # + [markdown] hideCode=false hidePrompt=true slideshow={"slide_type": "-"} # In this notebook, we will describe the essential algorithms in the *ampcor* program, which is used to estimate the misregistration between two images. The basic methodology uses image chip cross-correlation in the image spatial domain. A reference image chip with a particular dimension is defined in the first image, as well as a search image chip in a second image. The search image is larger than the reference image so it can be translated in two dimensions for cross-correlation. This is done once at nominal image sampling to establish a coarse offset. Once the coarse offset is found, the images are oversampled by a factor of 2 and the cross-correlation is recomputed. The correlation surface is stored, and oversampled by a factor of 32, then the peak location is estimated by curve-fitting. The peak location is then recorded as the offset. Much of the program is book-keeping and oversampling. The basic functionality is just two-dimensional cross correlation. # # In this notebook, we go through the ampcor program step by step, and emulate the operations with computational python and with illustrations to ensure the algorithm and methods are clear. # # 1.0 [Setup](#section-1)<br> # > 1.1 [Sinc interpolator](#section-1.1) <br> # > 1.2 [Input data types](#section-1.2) <br> # > 1.3 [Bounds Checking on Input](#section-1.3) <br> # > 1.4 [The Ampcor Workflow](#section-1.4) <br> # # + [markdown] hidePrompt=true slideshow={"slide_type": "-"} # 2.0 [Read in the data](#section-2) <br> # > 2.1 [Complex Images](#section-2.1) <br> # > 2.2 [Real Images](#section-2.2) <br> # > 2.3 [Mag Images](#section-2.3) <br> # # 3.0 [Focusing SAR data - Range](#section-3) <br> # > 3.1 [Correlation to achieve fine range resolution - time domain](#section-3.1) <br> # > 3.2 [Correlation to achieve fine range resolution - frequency domain](#section-3.2) # # 4.0 [Focusing SAR data - Azimuth](#section-4) <br> # > 4.1 [Azimuth reference function](#section-4.2) <br> # > 4.2 [Correlation to achieve fine azimuth resolution - time domain](#section-4.2) <br> # > 4.3 [Correlation to achieve fine azimuth resolution - frequency domain](#section-4.3) <br> # > 4.4 [Backprojection](#section-4.4) # + [markdown] hidePrompt=true slideshow={"slide_type": "-"} # <a id="section-1"></a> # ## 1.0 Setup # + [markdown] hidePrompt=true slideshow={"slide_type": "-"} # <a id="section-1.1"></a> # ### 1.1 Sinc Interpolator # # Ampcor defines its own sinc interpolator function, which is designed to be fast on a cpu by precomputing a very finely sampled function (oversampled by 4K or 8K), and accessing the appropriate coefficients for a particular interpolating fraction by simply selecting the nearest appropriate offset. The sinc is obviously truncated, and an important feature of this function is the ability to taper the amplitude of the function for a smooth transition to zero at the edge. This helps preserve phase independent of the fraction of the pixel selected. # # #### Filter definition # # The elements of the function are # # | Parameter | Symbol | Variable | Ampcor Value | # | --- | --- | --- | --- | # | Sinc Oversample Factor | $F_{\rm sinc,os} $ | i_decfactor | 4096 | # | Relative Filter Length | $L_{\rm rel}$ | r_relfiltlen | 6. | # | Filter Length Scale | $\beta$ | r_beta | 0.75 | # | Weight Flag | $w_{b\rm bool}$ | i_weight | 1 | # | Weight Edge Pedestal Height | $A_{\rm sinc}$ | r_pedestal | 0. | # | Number of sinc samples | $L_{\rm sinc}$ | i_intplength | computed| # | Number of oversampled sinc samples | $N_{\rm sinc,os}$ | i_filtercoef | computed | # # From these input parameters, length $L_{\rm sinc}$ of the interpolating function is calculated from # # \begin{equation} # L_{\rm sinc} = [ L_{\rm rel} /\beta ] # \end{equation} # # where $[]$ denotes the nearest integer, rounding up for fractions $\ge0.5$. Then the number of coefficients computed in the highly oversampled sinc function is # # \begin{equation} # N_{\rm sinc,os} = L_{\rm sinc} F_{\rm sinc,os} # \end{equation} # # An array of coefficients is calculated over this number of samples to fit one finely sampled version of a sinc function, such that any fraction can be looked up in this function and retrieve $L_{\rm sinc}$ coefficients, spaced by $F_{\rm sinc,os}$ in the array for the appropriate fraction. This array $f_{\rm sinc,os}(s_m)$ is calculated over integers $m$ centered with 0 at the middle of the array. Thus, we define a variable # # \begin{equation} # s_m = m - m_{\rm off} ,\quad m=0\cdots N_{\rm sinc,os}-1 # \end{equation} # # where # # \begin{equation} # m_{\rm off} = \frac{N_{\rm sinc,os}-1}{2} # \end{equation} # is the offset to the center of the array. # # Since the sinc can have a symmetric raised cosine taper, tapering to $A_{\rm sinc}$ at the edge, we define a weight function # # \begin{equation} # W(s_m) = (1-H_{\rm sinc}) + H_{\rm sinc} \cos\left(\frac{\pi s_m}{m_{\rm off}}\right) # \end{equation} # # where $H_{\rm sinc} = (1-A_{\rm sinc})/2$. When $A_{\rm sinc}=0$, $H_{\rm sinc}=0.5$, such that $W(s_0) = W(s_{N_{\rm sinc,os}-1})= 0$, and $W(s_{m_{\rm off}}) = 1$. # # The oversampled sinc function is then: # # \begin{equation} # f_{\rm sinc,os}(s_m) = W(s_m) \frac{\sin \pi s_m}{\pi s_m},\quad m=0\cdots N_{\rm sinc,os}-1 # \end{equation} # # Since the array is 0 indexed, in its application, there is an intrinsic delay of half the interpolating kernel length $L_{\rm sinc}/2$ that must be applied to have the interpolated output align with the input. # # In the *ampcor* implementation, the application of these coefficients $f_{\rm sinc,os}(s_m)$ is done through an intermediary array that rearranges the terms into effectively a 2-dimensional array stored as 1-dimensional, with each "row" being the $L_{\rm sinc}$ coefficitions for successive delays up to $F_{\rm sinc,os}$. # # \begin{equation} # f_{\rm lookup}(n + m F_{\rm sinc,os}) = f_{\rm sinc,os}(m + n F_{\rm sinc,os}), \quad m=0\cdots F_{\rm sinc,os}-1, n=0\cdots L_{\rm sinc}-1 # \end{equation} # # #### Filter application # # To apply this sinc interpolator, then, it is a simple matter to translate the fraction to an index into this array, then look up the appropriate $L_{\rm sinc}$ sequential values. # # Let $s$ be the real valued coordinate lying somewhere between the integer indices $p$ of some function $g(p)$, $p=0,1,2,\cdots$. We define the integer part of $s$ # # \begin{equation} # s_{\rm int} = \lfloor s + L_{\rm sinc}/2 \rfloor # \end{equation} # # and the fractional part as # # # \begin{equation} # s_{\rm frac} = s + L_{\rm sinc}/2 - s_{\rm int} # \end{equation} # # Then # # \begin{equation} # g(s) = W_{\rm sinc} \sum_{k=0}^{L_{\rm sinc}-1} g(s_{\rm int}-k) f_{\rm lookup}(k + s_{\rm frac} F_{\rm sinc,os} L_{\rm sinc} ) # \end{equation} # # where # # \begin{equation} # W_{\rm sinc} = \sum_{k=0}^{L_{\rm sinc}-1} f_{\rm lookup}(k + s_{\rm frac} F_{\rm sinc,os} L_{\rm sinc} ) # \end{equation} # # and the $ L_{\rm sinc}/2 $ offset above accounts for the shift of the interpolating kernel in the buffers indexed from 0. # # # + [markdown] hideCode=false hideOutput=false hidePrompt=true slideshow={"slide_type": "-"} # # + [markdown] hideCode=false hidePrompt=true slideshow={"slide_type": "-"} # <a id="section-1.2"></a> # ### 1.2 Input Data Types # + [markdown] hideCode=false hideOutput=false hidePrompt=true slideshow={"slide_type": "-"} # *ampcor* cross-correlates two images: the reference image *refimg* and the search image *srchimg*. These images can be either real numbers or complex numbers. # # Inputs can be specified as # # real - two dimensional array of floats # # complex - two dimensional array of complex numbers arranged as float (r,i) pairs # # mag - *ampcor* reads a complex image but immediately detects it. # # # In the current implementation, all the correlations are done on detected (real-valued) images. In the first stage of correlation, the complex and mag specifications lead to the same outcome because the complex data are detected before correlation. # # However, there is a second stage of correlation that oversamples the original data by a factor of 2 before correlation. In this case the complex and mag results will be different. # # In a proper implementation, real or complex correlation should be selectable, and the computation done accordingly. # # - # <a id="section-1.3"></a> # ### 1.3 Bounds checking on input # # There is an extensive section of bounds and size checking of the input. This prevents users from doing things that will likely lead to bad results. Many of these are probably specific to this implementation. # # <a id="section-1.4"></a> # ### 1.4 The *ampcor* Workflow # # *ampcor* performs the following steps. # # 1. Read in a block of lines of image 1 and image 2. # 2. For the specified grid of locations to perform the correlation, detect images if complex and place detected data into buffers to correlate. The refimg is smaller in dimensions than the srchimg, so that the srchimg can be slid around the refimg to calculate a correlation surface # 3. The correlation surface is computed, along with a number of metrics, like SNR, variance in two dimensions, curvature. The peak location is detected and used to perform a second stage of interpolation. # 4. With the srchimg now centered at the estimated peak, the input data are oversampled by a factor of 2. If the data are complex, the oversampling is done on the complex data, then detected again for correlation. Oversampling is done by Fourier Transform. The array is transformed, then the buffers are doubled in size and padded with zeros in the regions where the spectrum did not exist. This needs to be done carefully because of rotations of zero frequency, depending on the kind of data it is. # 5. The correlation surface is re-computed, along with a number of metrics, like SNR, variance in two dimensions, curvature. The peak location is detected and used to perform a second stage of interpolation. # 6. Then the new correlation surface is oversampled by typically a large number to allow a fine measure of the shape of the surface. The procedure is similar to that for the complex data. # 7. The peak is estimated by searching for a peak and taking the nearest point. # + import numpy as np i_n2wsyi=8 i_n2wsxi=8 i_ovs=2 i_nn1=i_n2wsxi*i_ovs i_nn2=i_n2wsyi*i_ovs inarr = np.zeros(i_n2wsyi*i_n2wsxi,dtype=np.int) outarr = np.zeros(i_nn1*i_nn2,dtype=np.int) for k in range(1,i_n2wsyi//2+1): for l in range(1,i_n2wsxi//2+1): i_index = (k-1)*i_nn1 + l i_indexi = (k-1)*i_n2wsxi + l outarr[i_index-1] = 1 inarr[i_indexi-1] = 1 i_index = (i_nn2 - i_n2wsyi//2 + k - 1)*i_nn1 + l i_indexi = (k + i_n2wsyi//2 - 1)*i_n2wsxi + l outarr[i_index-1] = 2 inarr[i_indexi-1] = 2 i_index = (k-1)*i_nn1 + i_nn1 - i_n2wsxi//2 + l i_indexi = (k-1)*i_n2wsxi + i_n2wsxi//2 + l outarr[i_index-1] = 3 inarr[i_indexi-1] = 3 i_index = (i_nn2 - i_n2wsyi//2 + k - 1)*i_nn1 + i_nn1 - i_n2wsxi//2 + l i_indexi = (k + i_n2wsyi//2 - 1)*i_n2wsxi + l + i_n2wsxi//2 outarr[i_index-1] = 4 inarr[i_indexi-1] = 4 print ("in") for k in range(0,i_n2wsyi): print (inarr[k*i_n2wsyi:k*i_n2wsyi+i_n2wsxi]) print ("\nout") for k in range(0,i_nn2): print (outarr[k*i_nn2:k*i_nn2+i_nn1]) # -
doc/manual/Ampcor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/ptrckbnck/EPR/blob/main/epr.ipynb) # # EPR Repetitorium 10.12.2021 # Die Themen sind in der Reihenfolge des Python Tutorials https://docs.python.org/3/tutorial/ (sehr zu empfehlen) sortiert. Zu jedem Thema steht dabei, in welchem Foliensatz auf moodle dieser zu finden ist. # # Was ihr hier sehen könnt, nennt sich Jupyter Notebook https://jupyter.org/. Ein nützliches Werkzeug, um Code zu demonstrieren und Daten zu visualisieren. # # Autor: <NAME> import sys # !{sys.executable} --version # ## Simple Data Types # VE00 - Die allerersten Schritte # ### Boolean # + pycharm={"name": "#%%\n"} a = False b = True print("a:", a) print("b:", b) print() print("1 < 2:", 1 < 2) print("1 <= 2:", 1 <= 2) print("1 <= 2:", 1 <= 2) print("1 == 2:", 1 == 2) print("1 != 2:", 1 != 2) print() print("0 == False:",0 == False) print("1 == True:", 1 == True) print("not 0:", not 0) print("not 1:", not 1) print() print("2 == True:", 2 == True) print("2 == False:",2 == False) print("not 2:", not 2) # - # ### Numeric Types # + pycharm={"name": "#%%\n"} # int print(1) print(int("1")) # float print(2.0) print(float("2")) # complex print(4+4j) print(complex(4,4)) # - # ### Common Numeric Operations # Programmierhandzettel x = 3 y = 2 print(+x) print(-x) print(x**y) print(y*y) print(x/y) print(x%y) print(x//y) print(x+y) print(x-y) # ## Data Structures # VE04 - Aggregierte Datentypen # ### Sequence # + a = [1, 2, 3] print(a) b = (1, 2, 3) print(b) c = range(1,4) print(c) # - # ### Common Sequence Operations # + pycharm={"name": "#%%\n"} sequence = [1, 2, 3] print("sequence = [1, 2, 3]") print("1 in sequence:", 1 in sequence) print("1 not in sequence:", 1 not in sequence) print("[0] + sequence:", [0] + sequence) print("2 * sequence:", 2 * sequence) print("sequence[1:2]:", sequence[1:2]) print("sequence[0:3:2]:", sequence[0:3:2]) print("len(sequence):", len(sequence)) print("min(sequence):", min(sequence)) print("max(sequence):", max(sequence)) print("range(5).index(1):", range(5).index(1)) print("[1,2,2,3].count(2):", [1,2,2,3].count(2)) sequence.append(4) print(sequence) # - # ### Unpacking # + pycharm={"name": "#%%\n"} x = [1, 2, 3] print(x) a, b, c = x print(a, b, c) a, *b = x print(a, b) print(*x) a = 1 b = 2 a, b = b, a print(a, b) # + pycharm={"name": "#%%\n"} # Immutable Sequence Types tup = tuple([1, 2, 3]) # same as (1,2,3) print(tup) # Mutable Sequence Types l = list((1, 2, 3)) # same as [1,2,3] print(l) l[0] = 0 print(l) # - # ### Set Types # + pycharm={"name": "#%%\n"} # Mutable Set Types s = set([1, 2, 3]) # same as {1,2,3} print(s) # Immutable Set Types fs = frozenset([1, 2, 3]) print(fs) # + my_set = {1, 2, 3} print("my_set = {1, 2, 3}") print("1 in my_set:", 1 in my_set) print("1 not in my_set:", 1 not in my_set) print("len(my_set):", len(my_set)) print("min(my_set):", min(my_set)) print("max(my_set):", max(my_set)) print("list(my_set):", list(my_set)) my_set.add(4) print(my_set) # - # ### Text Sequence Type : Strings # + pycharm={"name": "#%%\n"} string = "Python" print(string[0]) print(len(string)) # - # ### Common String operations # + pycharm={"name": "#%%\n"} string = "Python" print(string.upper()) print("+".join(string)) print(string.split("y")) print(string.startswith("Py")) print(string.endswith("on")) print(string.zfill(10)) print(string.rjust(10, "_")) # - # ### format String # + pycharm={"name": "#%%\n"} print('{0}, {1}, {2}'.format('a', 'b', 'c')) print('{}, {}, {}'.format('a', 'b', 'c')) print('{2}, {1}, {0}'.format('a', 'b', 'c')) p = "Python" print(f"{p} is nice") # - # ### Dictionary Types # VE05 - Dict Copy Unterpr Parameter # + pycharm={"name": "#%%\n"} a = dict(one=1, two=2, three=3) b = {'one': 1, 'two': 2, 'three': 3} c = dict([('two', 2), ('one', 1), ('three', 3)]) d = dict({'one': 1, 'three': 3}, two=2) print(a) print(b) print(c) print(d) print(a==b==c==d) # + pycharm={"name": "#%%\n"} a = dict(one=1, two=2, three=3) print(a["one"]) print("one" in a) print(len(a)) print(list(a)) a["four"] = 4 print(a) # - # ## Control Flow Tools # ### If Statement # VE01 - Die nächsten Schritte # + pycharm={"name": "#%%\n"} x = 5 if x < 0: x = 0 print('Negative changed to zero') elif x == 0: print('Zero') elif x == 1: print('Single') else: print('More') # + pycharm={"name": "#%%\n"} x = 6 if 5 < x and x < 7: print(x) if 5 < x < 7: print(x) # - # ### Conditional Expressions # + pycharm={"name": "#%%\n"} condition = False x = 1 if condition else 0 print(x) # - # ### While Statement # VE02 - Weitere Kontrollstrukturen x = 0 while x < 3: print(x) x += 1 # ### For Statement # VE02 - Weitere Kontrollstrukturen # + pycharm={"name": "#%%\n"} words = ['cat', 'window', 'defenestrate'] for w in words: print(w) # + pycharm={"name": "#%%\n"} # range for i in range(5, 10): print(i, end=' ') print() for i in [5, 6, 7, 8, 9]: print(i, end=' ') # + pycharm={"name": "#%%\n"} # if you need index and value for i, v in enumerate(range(5, 10)): print(i, v) # + pycharm={"name": "#%%\n"} # break in Loops for n in range(2, 10): for x in range(2, n): if n % x == 0: print(n, 'equals', x, '*', n//x) break else: # loop fell through without finding a factor print(n, 'is a prime number') # example source: https://docs.python.org/3/tutorial/controlflow.html # + pycharm={"name": "#%%\n"} # continue in Loops for num in range(2, 10): if num % 2 == 0: print("Found an even number", num) continue print("Found an odd number", num) # example source: https://docs.python.org/3/tutorial/controlflow.html # + pycharm={"name": "#%%\n"} # for-else value = 0 for i in range(2, 10): if i == value: print(i) break else: print("List missing value.") # example source: https://docs.python.org/3/tutorial/controlflow.html # + pycharm={"name": "#%%\n"} # pass Statements for n in range(2, 10): pass print("something") # + pycharm={"name": "#%%\n"} # match Statements # funktioniert nur in Python >3.10, daher möglicherweise nicht in Colab status = 400 match status: case 400: print("Bad request") case 404: print("Not found") case 418: print("I'm a teapot") case _: print("Something's wrong with the internet") # - # ### List Comprehensions # + squares = [] for x in range(10): squares.append(x**2) print(squares) print([squares.append(1) for x in range(10)]) print([x**2 for x in range(10) if (x**2)%2 == 0]) # - # ### Functions # VE05 - Dict Copy Unterpr Parameter # Einige Regeln: # - Wählt selbsterklärende Namen # - Nutzt kurze und einfache Funktionen # - Eine Funktion sollte nur eine einzige Aufgabe übernehmen # - Nur so viele Argumente wie nötig # - Vermeidet Seiteneffekte (insbesondere kein global) # + pycharm={"name": "#%%\n"} def fib(n): """Print a Fibonacci series up to n.""" a, b = 0, 1 while a < n: print(a, end=' ') a, b = b, a+b print() print(fib) value = fib(10) print(value) # Quelle Beispiele: https://docs.python.org/3/tutorial/controlflow.html#defining-functions # + pycharm={"name": "#%%\n"} def fib2(n): """Return a list containing the Fibonacci series up to n.""" result = [] a, b = 0, 1 while a < n: result.append(a) # see below a, b = b, a+b return result print(fib2) value = fib2(10) print(value) # + pycharm={"name": "#%%\n"} # Using arguments def add(a, b): return a+b print(add(1, 2)) print(add(1, b=2)) print(add(a=1, b=2)) print(add(b=2, a=1)) # + pycharm={"name": "#%%\n"} # Default arguments def fib3(n = 10): """Return a list containing the Fibonacci series up to n.""" result = [] a, b = 0, 1 while a < n: result.append(a) # see below a, b = b, a+b return result print(fib3) value = fib3() print(value) value2 = fib3(10) print(value2) # + # Unpacking in Functions def sum_all(a, b, c, d): return a + b + c + d a = [1, 2, 3, 4] print(sum_all(*a)) # + # Functions as arguments def add_one(x): return x+1 def my_map(func, iterable): return [func(x) for x in iterable] print(add_one(1)) mapped = map(add_one, [1,2]) print(mapped) print(list(mapped)) print(my_map(add_one, [1,2])) # + pycharm={"name": "#%%\n"} # Never use mutable default values! def func(a, L=[]): L.append(a) return L print(func(1)) print(func(2)) print(func(3)) print() print(func(7,[5,6])) print(func(8,[5,6])) # + pycharm={"name": "#%%\n"} # Function Annotations PEP 3107 & 484 def add(a, b): return a+b def add2(a: int, b:int ) -> int: return a+b print(add.__annotations__) print(add2.__annotations__) # - # ### Docstring & Typing # Programmierhandbuch # + pycharm={"name": "#%%\n"} from typing import Callable, Iterable, List # Use docstrings PEP 257 def add3(a: int, b: int) -> int: """Returns the sum of a and b :param a: first value :param b: second value :return: sum of a and b """ return a+b def my_map(func: Callable[[int], int], iterable: Iterable[int]) -> Iterable[int]: """My implementation of map :param func: The function to be apllied on each element of iterable :param iterable: The iterable :return: Generator yielding modified elements of iterable """ return (func(x) for x in iterable) # - # ## Testing # VE06 - Glassbox-Tests # ### Doctest # + pycharm={"name": "#%%\n"} import math import doctest import random def factorial(n): """Return the factorial of n, an exact integer >= 0. >>> [factorial(n) for n in range(6)] [1, 1, 2, 6, 24, 120] >>> factorial(30) 265252859812191058636308480000000 >>> factorial(-1) Traceback (most recent call last): ... ValueError: n must be >= 0 Factorials of floats are OK, but the float must be an exact integer: >>> factorial(30.1) Traceback (most recent call last): ... ValueError: n must be exact integer >>> factorial(30.0) 265252859812191058636308480000000 It must also not be ridiculously large: >>> factorial(1e100) Traceback (most recent call last): ... OverflowError: n too large >>> random.seed(0) >>> random.randint(1,10) 7 """ if not n >= 0: raise ValueError("n must be >= 0") if math.floor(n) != n: raise ValueError("n must be exact integer") if n+1 == n: # catch a value like 1e300 raise OverflowError("n too large") result = 1 factor = 2 while factor <= n: result *= factor factor += 1 return result doctest.testmod(verbose=False) # Quelle https://docs.python.org/3/library/doctest.html # - # ### Unittest # + pycharm={"name": "#%%\n"} import unittest class TestStringMethods(unittest.TestCase): def test_upper(self): self.assertEqual('foo'.upper(), 'FOO') def test_isupper(self): self.assertTrue('FOO'.isupper()) self.assertFalse('Foo'.isupper()) def test_split(self): s = 'hello world' self.assertEqual(s.split(), ['hello', 'world']) # check that s.split fails when the separator is not a string with self.assertRaises(TypeError): s.split(2) unittest.main(argv=['first-arg-is-ignored'], exit=False, verbosity=0) # attributes only for using in jupyter # Quelle https://docs.python.org/3/library/unittest.html
rep1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import requests url = 'http://localhost:9696/predict' customer_id = 'xyz-123' customer = {"contract": "two_year", "tenure": 1, "monthlycharges": 10} response = requests.post(url, json=customer).json() print(response) if response['churn'] == True: print('sending promo email to %s' % customer_id) else: print('not sending promo email to %s' % customer_id) # -
Week 5/.ipynb_checkpoints/Untitled-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import urllib.request import json from pprint import pprint import MySQLdb as mariadb connection = mariadb.connect('192.168.132.139','user','p','SCEBase') cursor = connection.cursor() # + query = ("SELECT id,post_title,post_name,post_date," +"post_excerpt,post_content,post_type FROM scecms_posts WHERE post_status='publish'") cursor.execute(query) # + post_types = {} counter = 1 posts = [] for doc in cursor: post_type = doc[6] docdict = { 'id': doc[0], 'title': doc[1], 'name': doc[2], 'date': doc[3], 'excerpt': doc[4], 'content': doc[5], 'type': doc[6] } posts.append(docdict) if post_type in post_types: post_types[post_type] += 1 else: post_types[post_type] = 1 # - print(len(post_types)) pprint (post_types) print(len(posts)) pposts = [] for post in posts: if post['type'] == 'overview': pposts.append(post) pprint(pposts[0]) url = 'https://www.scencyclopedia.org/sce/wp-json/wp/v2/posts?categories=1' response = urllib.request.urlopen(url) bytes = response.read() print(response.headers) text = bytes.decode('utf-8') ddata = json.loads(text) pprint(ddata) print(ddata['routes'].keys()) print
exploresc-server/json-api.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/adityasaxena-crypto/Ntcc-model-2/blob/main/ntcc_Xbg_.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="BGZ3GnyS9HLS" import pandas as pd import numpy as np import math import seaborn as sn import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn.tree import export_graphviz from sklearn.externals.six import StringIO from IPython.display import Image import pydotplus from sklearn.preprocessing import MinMaxScaler # + colab={"base_uri": "https://localhost:8080/"} id="G-q3jK3WkG9C" outputId="0c84c72b-2c99-44f4-a798-bbe231f3d524" from google.colab import drive drive.mount('/content/drive') # + [markdown] id="O9-3EN6cBj4p" # 1-Contains 220314 readings by 51 sensors taken at an interval of 1 minute over a period of 5 months for a large-scale industrial pump. # 2-Each sensor makes some important measurement used to determine the working condition of the pump, like vibration, operative voltage, current drawn, heat generated, RPM etc. # 3-The column 'machine_status' indicates the current working condition of the pump. If it is 0, it indicates that the pump is working as expected. If it is 1, the pump is malfunctioning and need repair. # + [markdown] id="7el836KuBj0L" # # + colab={"base_uri": "https://localhost:8080/"} id="n_583hY2-MMP" outputId="c2608430-6e83-4d29-de15-ba20436f9611" sensor_data = pd.read_csv('/content/drive/MyDrive/data/sensor.csv') sensor_data = sensor_data.dropna() print(sensor_data) # + id="CAC2GhLi-Og_" col_names = sensor_data.columns #print(col_names) indep_vars = col_names[1:52] depep_vars = col_names[52] # + id="dUnl2fxSo1oh" X = sensor_data[indep_vars] y = sensor_data[depep_vars] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) # + colab={"base_uri": "https://localhost:8080/"} id="BQwvr__X-r9P" outputId="eeafbf23-9cf8-439c-e78f-c26be48bc778" from xgboost import XGBClassifier model = XGBClassifier(n_estimators=1000) model.fit(X_train, y_train) model.score(X_test , y_test) # + id="t5jk0nPVJzoP" colab={"base_uri": "https://localhost:8080/"} outputId="76644df7-7076-47db-a662-bfda5782b569" print(y_test) # + id="JW8-i5T9DLxx" predictions = model.predict(X_test) predictionssmol= predictions[:60] y_test1=y_test[:60] # + colab={"base_uri": "https://localhost:8080/"} id="tUTMG-ntIIzh" outputId="8e2c0d36-0d87-4fef-cb74-17cbfd88ea41" rmse = np.sqrt(np.mean(predictions - y_test)**2) rmse # + colab={"base_uri": "https://localhost:8080/"} id="yGkxdWnGBv0g" outputId="b108bbd5-6aba-4a30-dd79-fa4231af3e67" from sklearn.neighbors import KNeighborsClassifier classifier= KNeighborsClassifier(n_neighbors=50, metric='minkowski', p=2 ) classifier.fit(X_train, y_train) classifier.score(X_test,y_test) # + id="ZT2uw9cBFJOP" predictions1 = classifier.predict(X_test) # + colab={"base_uri": "https://localhost:8080/"} id="WzFjYsRgEQgZ" outputId="16fd9ba6-6093-4866-e4df-96fb807be2e4" from sklearn.naive_bayes import GaussianNB clf = GaussianNB() clf.fit(X_train, y_train) clf.score(X_test,y_test) # + id="w4njXIIRFXMv" predictions2 = clf.predict(X_test) # + id="S6ekqix8hI_K" # %matplotlib inline import matplotlib.pyplot as plt import pandas as pd plt.style.use('fivethirtyeight') # + colab={"base_uri": "https://localhost:8080/", "height": 550} id="unK8p91ahZ8o" outputId="a3521dea-6167-4f13-8327-50fe8815dd9b" plt.style.use("ggplot") plt.figure(figsize=(16,8)) plt.title("Failure") plt.ylabel('Failure rate', fontsize=18) plt.xlabel('Number',fontsize=18) plt.plot(sensor_data['machine_status']) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 550} id="QCJ_m7WSDDfJ" outputId="1d28f042-b0bb-4fe1-f59e-5713d84e9ca5" plt.figure(figsize=(16,8)) plt.title('Predictions') plt.plot(predictions) plt.ylabel('Failure rate', fontsize=18) plt.xlabel('Number',fontsize=18) plt.legend(['predictions'],loc='lower right') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 550} id="sy5XHaNLHPK2" outputId="4030e2fa-6fb0-4e16-e016-d4ac55b40f09" plt.figure(figsize=(16,8)) plt.title('Predictions1') plt.plot(predictions1) plt.ylabel('Failure rate', fontsize=18) plt.xlabel('Number',fontsize=18) plt.legend(['predictions1'],loc='lower right') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 550} id="80P1lrOuKzZP" outputId="70faa3ef-349f-4e59-c704-cd9d60c92770" plt.figure(figsize=(16,8)) plt.title('Predictions2') plt.plot(predictions2) plt.ylabel('Failure rate', fontsize=18) plt.xlabel('Number',fontsize=18) plt.legend(['predictions2'],loc='lower right') plt.show() # + id="CngnTh52L7fQ" y_test=np.array(y_test) y_test1=y_test[:60] # + colab={"base_uri": "https://localhost:8080/", "height": 550} id="5xg-n-GbMB72" outputId="57fa9888-ab42-4172-9161-123532ded4b0" plt.figure(figsize=(16,8)) plt.title('Predictions1') plt.plot(y_test1) plt.ylabel('Failure rate', fontsize=18) plt.xlabel('Number',fontsize=18) plt.plot(predictionssmol) plt.legend(['y_test1','predictionssmol'],loc='lower right') plt.show()
ntcc_Xbg_.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Multiple Custom Grouping Aggregations # # This challenge is going to be fairly difficult, but should answer a question that many pandas users face - What is the best way to do a grouping operation that does many custom aggregations? In this context, a 'custom aggregation' is defined as one that is not directly available to use from pandas and one that you must write a custom function for. # # In Pandas Challenge 1, a single aggregation, which required a custom grouping function, was the desired result. In this challenge, you'll need to make several aggregations when grouping. There are a few different solutions to this problem, but depending on how you arrive at your solution, there could arise enormous performance differences. I am looking for a compact, readable solution with very good performance. # # ### Sales Data # # In this challenge, you will be working with some mock sales data found in the sales.csv file. It contains 200,000 rows and 9 columns. import pandas as pd df = pd.read_csv('sales.csv', parse_dates=['date']) df.head() df.shape # ### Challenge # # There are many aggregations that you will need to return and it will take some time to understand what they are and how to return them. The following definitions for two time periods will be used throughout the aggregations. # # Period **2019H1** is defined as the time period beginning January 1, 2019 and ending June 30, 2019. # Period **2018H1** is defined as the time period beginning January 1, 2018 and ending June 30, 2018. # # ### Aggregations # Now, I will list all the aggregations that are expected to be returned. Each bullet point represents a single column. Use the first word after the bullet point as the new column name. # # For every country and region, return the following: # * recency: Number of days between today's date (9/9/2019) and the maximum value of the 'date' column # * fast_and_fastest: Number of unique customer_id in period 2019H1 with delivery_type either 'fast' or 'fastest' # * rev_2019: Total revenue for the period 2019H1 # * rev_2018: Total revenue for the period 2018H1 # * cost_2019: Total cost for period 2019H1 # * cost_2019_exp: Total cost for period 2019H1 with cost_type 'expert' # * other_cost: Difference between cost_2019 and cost_2019_exp # * rev_per_60: Total of revenue when duration equals 60 in period 2019H1 divided by number of unique customer_id when duration equals 60 in period 2019H1 # * profit_margin: Take the difference of rev_2019 and cost_2019_exp then divide by rev_2019. Return as percentage # * cost_exp_per_60: Total of cost when duration is 60 and cost_type is 'expert' in period 2019H1 divided by the number of unique customer_id when duration equals 60 and cost_type is 'expert' in period 2019H1 # * growth: Find the percentage growth from revenue in period 2019H1 compared to the revenue in period 2018H1 # + from datetime import datetime def date_dif(df): d1=datetime.strptime("2019-09-09","%Y-%m-%d") d2=max(df["date"]) return (d2-d1).days df[["region","country","date"]].groupby(["region", "country"]).apply(date_dif) # - start_date=datetime.strptime("2019-01-01","%Y-%m-%d") end_date= datetime.strptime("2019-06-06","%Y-%m-%d") mask=(df["date"]<=end_date) & (df["date"]>=start_date) new_df=df[mask][["customer_id","delivery_type", "country","region"]] new_df new_df["fast_and_fastest"]=new_df[df["delivery_type"].isin(["fast","fastest"])].groupby([ "region","country","delivery_type"]).transform("count") df[mask][["country","region","revenue"]].groupby(["country","region"]).sum() df[mask][["cost","country","region"]].groupby(["country","region"]).sum() # + def final_agg(df): start_date=datetime.strptime("2019-01-01","%Y-%m-%d") end_date= datetime.strptime("2019-06-06","%Y-%m-%d") H12019=df["date"].between(start_date,end_date) H12018=df["date"].between("2018-01-01","2018-06-30") duartion=df["duration"].isin(["60"]) exp=df["cost_type"].isin(["expert"]) h12019_exp_duration=H12019 & duartion & exp ##calculate fields: rev_2019=df.loc[H12019,"revenue"].sum()/1_000 rev_2018=df.loc[H12018,"revenue"].sum()/1_000 cost_2019=df.loc[H12019,"cost"].sum()/1_000 cost_2019_exp=df.loc[H12019,"cost"][df["cost_type"].isin(["expert"])].sum()/1_000 other_costs=cost_2019-cost_2019_exp rev_per_60=df.loc[H12019,"revenue"][df["duration"].isin(["60"])].sum()/df.loc[H12019,"customer_id"][df["duration"].isin(["60"])].nunique()/1_000 profit_margin=(rev_2019 - cost_2019)/rev_2019*100 growth=(rev_2019/rev_2018-1) *100 cost_exp_per_60= df.loc[h12019_exp_duration,"cost"].sum()/df.loc[h12019_exp_duration,"customer_id"].nunique()/1_000 d={ "Revenue 2019": rev_2019, "Revenue 2018": rev_2018, "Cost 2019": cost_2019,"Expert cost 2019": cost_2019_exp, "Other costs": other_costs, "Revenue per 60d": rev_per_60, "Profit margin":profit_margin, "Expert cost for 60 days":cost_exp_per_60, "Growth": growth } return pd.Series(d) df.groupby(["region","country"]).apply(final_agg) # - # # Become a pandas expert # # If you are looking to completely master the pandas library and become a trusted expert for doing data science work, check out my book [Master Data Analysis with Python][1]. It comes with over 300 exercises with detailed solutions covering the pandas library in-depth. # # [1]: https://www.dunderdata.com/master-data-analysis-with-python
Dunder Data Challenge 003 - Multiple Complex Grouping Aggregations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- class ProductOfNumbers: def __init__(self): self.p = [1] def add(self, num: int) -> None: if num == 0: self.p = [1] else: self.p.append(self.p[-1] * num) def getProduct(self, k: int) -> int: if k >= len(self.p): # 表示之前遇到了 0 return 0 return self.p[-1] // self.p[-k-1] obj = ProductOfNumbers() obj.add(2) param_2 = obj.getProduct(2) a = [1, 2, 3, 4] a[-2 - 1]
Array/1231/1352. Product of the Last K Numbers.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.5.2 # language: julia # name: julia-1.5 # --- # # PTDF with [PowerSimulations.jl](https://github.com/NREL-SIIP/PowerSimulations.jl) # **Originally Contributed by**: <NAME> # ## Introduction # PowerSimulations.jl supports linear PTDF optimal power flow formulation. This example shows a # single multi-period optimization of economic dispatch with a linearized DC-OPF representation of # using PTDF power flow and how to extract duals values or locational marginal prices for energy. # ## Dependencies # We can use the same RTS data and some of the initialization as in # [OperationsProblem example](../../notebook/3_PowerSimulations_examples/1_operations_problems.ipynb) # by sourcing it as a dependency. using SIIPExamples pkgpath = dirname(dirname(pathof(SIIPExamples))) include( joinpath(pkgpath, "test", "3_PowerSimulations_examples", "01_operations_problems.jl"), ); # Since we'll be retrieving duals, we need a solver that returns duals values # here we use Ipopt. using Ipopt solver = optimizer_with_attributes(Ipopt.Optimizer) # In the [OperationsProblem example](../../notebook/3_PowerSimulations_examples/1_operations_problems.ipynb) # we defined a unit-commitment problem with a copper plate representation of the network. # Here, we want do define an economic dispatch (linear generation decisions) with # linear DC-OPF using PTDF network representation. # So, starting with the network, we can select from _almost_ any of the endpoints on this # tree: TypeTree(PSI.PM.AbstractPowerModel, init_expand = 10, scopesep="\n") # For now, let's just choose a standard PTDF formulation. ed_template = template_economic_dispatch(network = StandardPTDFModel) # Currently energy budget data isn't stored in the RTS-GMLC dataset. ed_template.devices[:Hydro] = DeviceModel(HydroEnergyReservoir, HydroDispatchRunOfRiver) # Calculate the PTDF matrix. PTDF_matrix = PTDF(sys) # Now we can build a 4-hour economic dispatch / PTDF problem with the RTS data. # Here, we have to pass the keyword argument `constraint_duals` to OperationsProblem # with the name of the constraint for which duals are required for them to be returned in the results. problem = OperationsProblem( EconomicDispatchProblem, ed_template, sys, horizon = 4, optimizer = solver, balance_slack_variables = true, constraint_duals = [:CopperPlateBalance, :network_flow], PTDF = PTDF_matrix, ) # And solve the problem and collect the results res = solve!(problem); # Here we collect the dual values from the results for the `CopperPlateBalance` and `network_flow` # constraints. In the case of PTDF network formulation we need to compute the final LMP for each bus in the system by # subtracting the duals (μ) of `network_flow` constraint multiplied by the PTDF matrix # from the dual (λ) of `CopperPlateBalance` constraint. # Note:we convert the results from DataFrame to Array for ease of use. λ = convert(Array, res.dual_values[:CopperPlateBalance]) μ = convert(Array, res.dual_values[:network_flow]) # Here we create Dict to store the calculate congestion component of the LMP which is a product of μ and the PTDF matrix. buses = get_components(Bus, sys) congestion_lmp = Dict() for bus in buses congestion_lmp[get_name(bus)] = μ * PTDF_matrix[:, get_number(bus)] end congestion_lmp = DataFrame(congestion_lmp) # Finally here we get the LMP for each node in a lossless DC-OPF using the PTDF formulation. LMP = λ .- congestion_lmp # --- # # *This notebook was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).*
notebook/3_PowerSimulations_examples/11_PTDF.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # PLOT pCO2 with Full SOCAT CC minus line, Full SOCAT CC including line, # Full SOCAT CC minus line including Are's line import numpy as np import iris import iris.coord_categorisation import matplotlib.pyplot as plt import cartopy import cartopy.crs as ccrs import pandas as pd from mpl_toolkits.axes_grid1 import make_axes_locatable import matplotlib.pylab as pylab params = {'legend.fontsize': 'xx-large', 'axes.labelsize': 'xx-large', 'axes.titlesize':'xx-large', 'xtick.labelsize':'xx-large', 'ytick.labelsize':'xx-large'} pylab.rcParams.update(params) #matplotlib.rc('text', usetex = True) path = '/Data/Scratch/science/bradshaw-tracks/noresm/sampling/' LINE = 'Caribbean1' PLOT_START_INDEX = 265 PLOT_END_INDEX = 372 # locations = ['Finland_Germany', 'Europe_FrenchGuyana'] #locations = ['Denmark_Greenland', 'Finland_Germany', 'France_Brazil', 'Germany_Canada', # 'Iceland_USA', 'Japan_Australia', 'NorthSea', 'Europe_FrenchGuyana', # 'USA_Australia', 'USA_Japan', 'All_PseudoSOCATv5'] #locations = ['Germany_Canada'] abs_cmap = plt.get_cmap('jet') #abs_cmap = truncate_colormap(abs_cmap, 0.1, 0.9) diff_cmap = plt.get_cmap('seismic') #for location in locations: # print location fig = plt.figure(figsize=(25, 14)) full_socat_fname = path + 'SOCATv5_full_test/fco2.nc' pseudo_fname = path + LINE + '/with_are_ship/fco2.nc' socat_fname = path + LINE + '/without_ship/fco2.nc' pseudo_cube = iris.load_cube(pseudo_fname, 'fco2') socat_cube = iris.load_cube(socat_fname, 'fco2') full_socat_cube = iris.load_cube(full_socat_fname, 'fco2') pseudo_cube_ex = pseudo_cube.extract(iris.Constraint(time=lambda time: time >= PLOT_START_INDEX)) socat_cube_ex = socat_cube.extract(iris.Constraint(time=lambda time: time >= PLOT_START_INDEX)) full_socat_cube_ex = full_socat_cube.extract(iris.Constraint(time=lambda time: time >= PLOT_START_INDEX)) all_pseudo_cube = pseudo_cube_ex.collapsed(['time'], iris.analysis.MEAN) all_socat_cube = socat_cube_ex.collapsed(['time'], iris.analysis.MEAN) all_full_socat_cube = full_socat_cube_ex.collapsed(['time'], iris.analysis.MEAN) all_diff_cube1 = all_full_socat_cube - all_socat_cube all_diff_cube2 = all_pseudo_cube - all_socat_cube diff_diff_cube = all_diff_cube1 - all_diff_cube2 pCO2_socat_real = all_full_socat_cube.data pCO2_socat_without = all_socat_cube.data pCO2_socat_pseudo = all_pseudo_cube.data pCO2_diff_socat_with_without_real = all_diff_cube1.data pCO2_diff_socat_with_without_pseudo = all_diff_cube2.data pCO2_diff_diff = diff_diff_cube.data socat_lats = all_socat_cube.coord('latitude').points socat_lons = all_socat_cube.coord('longitude').points ax_socat_real = plt.subplot2grid((3, 6), (0, 0), colspan=2, projection=ccrs.PlateCarree()) ax_socat_without = plt.subplot2grid((3, 6), (0, 2), colspan=2, projection=ccrs.PlateCarree()) ax_socat_pseudo = plt.subplot2grid((3, 6), (0, 4), colspan=2, projection=ccrs.PlateCarree()) ax_diff_socat_real_without = plt.subplot2grid((3, 6), (1, 1), colspan=2, projection=ccrs.PlateCarree()) ax_diff_socat_real_pseudo = plt.subplot2grid((3, 6), (1, 3), colspan=2, projection=ccrs.PlateCarree()) ax_diff_diff = plt.subplot2grid((3, 6), (2, 2), colspan=2, projection=ccrs.PlateCarree()) p_socat_real = ax_socat_real.pcolormesh(socat_lons, socat_lats, pCO2_socat_real, cmap=abs_cmap, vmin=200, vmax=400) p_socat_without = ax_socat_without.pcolormesh(socat_lons, socat_lats, pCO2_socat_without, cmap=abs_cmap, vmin=200, vmax=400) p_socat_pseudo = ax_socat_pseudo.pcolormesh(socat_lons, socat_lats, pCO2_socat_pseudo, cmap=abs_cmap, vmin=200, vmax=400) cbar_ax1 = plt.gcf().add_axes([0.91, 0.66, 0.015, 0.25]) cbar_1 = plt.colorbar(p_socat_pseudo, cax=cbar_ax1, extend = 'both') cbar_1.set_label('pCO2 ($\\mu$atm)', rotation=90) p_diff_socat_real_without = ax_diff_socat_real_without.pcolormesh(socat_lons, socat_lats, pCO2_diff_socat_with_without_real, cmap=diff_cmap, vmin=-50, vmax=50) p_diff_socat_real_without = ax_diff_socat_real_pseudo.pcolormesh(socat_lons, socat_lats, pCO2_diff_socat_with_without_pseudo, cmap=diff_cmap, vmin=-50, vmax=50) cbar_ax2 = plt.gcf().add_axes([0.76, 0.34, 0.015, 0.25]) cbar_2 = plt.colorbar(p_diff_socat_real_without, cax=cbar_ax2, extend = 'both') cbar_2.set_label('pCO2 anomaly ($\\mu$atm)', rotation=90) p_diff_diff = ax_diff_diff.pcolormesh(socat_lons, socat_lats, pCO2_diff_diff, cmap=diff_cmap, vmin=-10, vmax=10) cbar_ax3 = plt.gcf().add_axes([0.61, 0.03, 0.015, 0.25]) cbar_3 = plt.colorbar(p_diff_diff, cax=cbar_ax3, extend = 'both') cbar_3.set_label('pCO2 anomaly ($\\mu$atm)', rotation=90) titles = ['1: Full SOCATv5 CC (inc SOCATv5 line)', '2: Full SOCATv5 CC (ex SOCATv5 line)', '3: Full SOCATv5 CC (inc PseudoSOCATv5 line)', '4: 1 - 2', '5: 3 - 2', '6: 4 - 5' ] for ax, title in zip([ax_socat_real, ax_socat_without, ax_socat_pseudo, ax_diff_socat_real_without, ax_diff_socat_real_pseudo, ax_diff_diff], titles): ax.add_feature(cartopy.feature.LAND) ax.add_feature(cartopy.feature.COASTLINE) plt.gca().set_title = title ax.title.set_text(title) map_plot_dic = {'bottom':0.01, 'top':0.93, 'left':0.01, 'right':0.89, 'wspace':0.2, 'hspace':0.1} plt.subplots_adjust(**map_plot_dic) fig.suptitle(LINE + ' 2007-2016 average', fontsize=30, fontweight='bold') fname = path + LINE + '/' + LINE + '_pco2_validation_map.png' fig.savefig(fname, dpi=300) print fname plt.close()
jupyter/SOCAT validation plots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix, recall_score, precision_score, f1_score, classification_report, accuracy_score def get_summary(dist, network, split): path = f'{dist}/figures/{network}_{split}.csv' df = pd.read_csv(path) precision = round(precision_score(df.true, df.pred, average='weighted'), 2) recall = round(recall_score(df.true, df.pred), 2) accuracy = round(accuracy_score(df.true, df.pred), 2) report = classification_report(df.true, df.pred, output_dict=True) precision = round(report['0']['precision'], 2) recall = round(report['0']['recall'], 2) accuracy = round(report['accuracy'], 2) return network, split, precision, recall, accuracy for network in ('vgg16','vgg19','inceptionV3','xception','resnet50'): for split in ('val','test'): print(get_summary('d3', network, split)) # + def get_cm(network, split): path_1 = f'd1/figures/{network}_{split}.csv' path_2 = f'd2/figures/{network}_{split}.csv' path_3 = f'd3/figures/{network}_{split}.csv' df1 = pd.read_csv(path_1) df2 = pd.read_csv(path_2) df3 = pd.read_csv(path_3) cm1 = confusion_matrix(df1.true, df1.pred) cm2 = confusion_matrix(df2.true, df2.pred) cm3 = confusion_matrix(df3.true, df3.pred) # All items add up to 100% cm = (cm1 + cm2 + cm3)//3 cm_100 = cm / cm.sum() plt.figure(figsize=(4,4)) ax = sns.heatmap(cm_100, cmap='Blues', annot=True, fmt='.0%', cbar=False, annot_kws={'size':18}) ax.set_ylabel('Prediction', fontsize=16); ax.set_xlabel('Actual', fontsize=16); ax.set_xticklabels([], fontsize=14) ax.set_yticklabels([], fontsize=14) ax.tick_params(axis='both', which='both', length=0) plt.text(0.5,-0.1, "Flexible", horizontalalignment='center', fontsize=14) plt.text(1.5,-0.1, "Rigid", horizontalalignment='center', fontsize=14) plt.text(2.2,0.3, "Flexible", horizontalalignment='center', rotation="vertical", fontsize=14) plt.text(2.2,1.5-.1, "Rigid", horizontalalignment='center', rotation="vertical", fontsize=14) plt.show() # All rows add up to 100% cm = (cm1 + cm2 + cm3)//3 cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] plt.figure(figsize=(4,4)) ax = sns.heatmap(cm_norm, cmap='Blues', annot=True, fmt='.0%', cbar=False, annot_kws={'size':18}) ax.set_ylabel('Prediction', fontsize=16); ax.set_xlabel('Actual', fontsize=16); ax.set_xticklabels([], fontsize=14) ax.set_yticklabels([], fontsize=14) ax.tick_params(axis='both', which='both', length=0) plt.text(0.5,-0.1, "Flexible", horizontalalignment='center', fontsize=14) plt.text(1.5,-0.1, "Rigid", horizontalalignment='center', fontsize=14) plt.text(2.2,0.3, "Flexible", horizontalalignment='center', rotation="vertical", fontsize=14) plt.text(2.2,1.5-.1, "Rigid", horizontalalignment='center', rotation="vertical", fontsize=14) plt.show() # - get_cm('vgg19', 'test')
code/metrics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="mrDzC9FUyWJk" # # 머신 러닝 교과서 3판 # + [markdown] id="90ZMzS7wyWJm" # # 16장 - 순환 신경망으로 순차 데이터 모델링 (2/2) # + [markdown] id="i-YXfR8UyWJm" # **아래 링크를 통해 이 노트북을 주피터 노트북 뷰어(nbviewer.jupyter.org)로 보거나 구글 코랩(colab.research.google.com)에서 실행할 수 있습니다.** # # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://nbviewer.org/github/rickiepark/python-machine-learning-book-3rd-edition/blob/master/ch16/ch16_part2.ipynb"><img src="https://jupyter.org/assets/share.png" width="60" />주피터 노트북 뷰어로 보기</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/rickiepark/python-machine-learning-book-3rd-edition/blob/master/ch16/ch16_part2.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a> # </td> # </table> # + [markdown] id="5_u_b0r4yWJn" # ### 목차 # + [markdown] id="G-xguWppyWJn" # - 텐서플로로 시퀀스 모델링을 위한 RNN 구현하기 # - 두 번째 프로젝트: 텐서플로로 글자 단위 언어 모델 구현 # - 데이터셋 전처리 # - 문자 수준의 RNN 모델 만들기 # - 평가 단계 - 새로운 텍스트 생성 # - 트랜스포머 모델을 사용한 언어 이해 # - 셀프 어텐션 메카니즘 이해하기 # - 셀프 어텐션 기본 구조 # - 쿼리, 키, 값 가중치를 가진 셀프 어텐션 메카니즘 # - 멀티-헤드 어텐션과 트랜스포머 블록 # - 요약 # + id="RLnk-L-ZyWJn" from IPython.display import Image # + [markdown] id="k8wSLT9byWJn" # ## 두 번째 프로젝트: 텐서플로로 글자 단위 언어 모델 구현 # + colab={"base_uri": "https://localhost:8080/", "height": 240} id="-tysyJWSyWJn" outputId="875e3eb2-08c6-47c5-98e0-8a3109f05838" Image(url='https://git.io/JLdVE', width=700) # + [markdown] id="xrqTH2wvyWJo" # ### 데이터셋 전처리 # + colab={"base_uri": "https://localhost:8080/"} id="GtxzLN-RyWJo" outputId="f1244917-4632-465c-cd5c-4fcbdeeeda1d" # 코랩에서 실행할 경우 다음 코드를 실행해 주세요. # !wget https://raw.githubusercontent.com/rickiepark/python-machine-learning-book-3rd-edition/master/ch16/1268-0.txt # + colab={"base_uri": "https://localhost:8080/"} id="mRwzImO7yWJo" outputId="c819b301-e54e-43e9-f833-cc8a341b384d" import numpy as np ## 텍스트 읽고 전처리하기 with open('1268-0.txt', 'r', encoding='UTF8') as fp: text=fp.read() start_indx = text.find('THE MYSTERIOUS ISLAND') end_indx = text.find('End of the Project Gutenberg') print(start_indx, end_indx) text = text[start_indx:end_indx] char_set = set(text) print('전체 길이:', len(text)) print('고유한 문자:', len(char_set)) # + colab={"base_uri": "https://localhost:8080/", "height": 288} id="HjzozZLuyWJp" outputId="6441ec8d-ca80-431d-9c1f-54a8a58ba70c" Image(url='https://git.io/JLdVz', width=700) # + colab={"base_uri": "https://localhost:8080/"} id="X3zvJimcyWJp" outputId="8b44153d-f7a9-40d0-b48f-5936a4d70cca" chars_sorted = sorted(char_set) char2int = {ch:i for i,ch in enumerate(chars_sorted)} char_array = np.array(chars_sorted) text_encoded = np.array( [char2int[ch] for ch in text], dtype=np.int32) print('인코딩된 텍스트 크기: ', text_encoded.shape) print(text[:15], ' == 인코딩 ==> ', text_encoded[:15]) print(text_encoded[15:21], ' == 디코딩 ==> ', ''.join(char_array[text_encoded[15:21]])) # + colab={"base_uri": "https://localhost:8080/", "height": 418} id="c9JdWEpwyWJp" outputId="4934fada-5a31-4645-8d63-f1ad02d5b0af" Image(url='https://git.io/JLdVV', width=700) # + colab={"base_uri": "https://localhost:8080/"} id="_E5CojZ5yWJp" outputId="4d603d78-8118-4754-86db-382d661eb4f8" import tensorflow as tf ds_text_encoded = tf.data.Dataset.from_tensor_slices(text_encoded) for ex in ds_text_encoded.take(5): print('{} -> {}'.format(ex.numpy(), char_array[ex.numpy()])) # + colab={"base_uri": "https://localhost:8080/"} id="Ilw6v7iKyWJq" outputId="e511bb03-cbc3-4ea9-c945-e5538b600478" seq_length = 40 chunk_size = seq_length + 1 ds_chunks = ds_text_encoded.batch(chunk_size, drop_remainder=True) ## inspection: for seq in ds_chunks.take(1): input_seq = seq[:seq_length].numpy() target = seq[seq_length].numpy() print(input_seq, ' -> ', target) print(repr(''.join(char_array[input_seq])), ' -> ', repr(''.join(char_array[target]))) # + colab={"base_uri": "https://localhost:8080/", "height": 515} id="9Ot6_7ptyWJq" outputId="9e1f2007-f771-4da5-97fa-dddb829850f0" Image(url='https://git.io/JLdVr', width=700) # + colab={"base_uri": "https://localhost:8080/"} id="oDIGchMbyWJq" outputId="86366505-3d9e-473a-917d-bb3dae88210a" ## x & y를 나누기 위한 함수를 정의합니다 def split_input_target(chunk): input_seq = chunk[:-1] target_seq = chunk[1:] return input_seq, target_seq ds_sequences = ds_chunks.map(split_input_target) ## 확인: for example in ds_sequences.take(2): print('입력 (x):', repr(''.join(char_array[example[0].numpy()]))) print('타깃 (y):', repr(''.join(char_array[example[1].numpy()]))) print() # + colab={"base_uri": "https://localhost:8080/"} id="EwXM1x7MyWJr" outputId="01b273b3-cb3a-41a8-937f-e00a79bccef8" # 배치 크기 BATCH_SIZE = 64 BUFFER_SIZE = 10000 tf.random.set_seed(1) ds = ds_sequences.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)# drop_remainder=True) ds # + [markdown] id="ALd_KVXbyWJr" # ### 문자 수준의 RNN 모델 만들기 # + colab={"base_uri": "https://localhost:8080/"} id="2DQqLCqAyWJr" outputId="f948ea26-0e7f-4218-a344-5cfb1bf9ec37" def build_model(vocab_size, embedding_dim, rnn_units): model = tf.keras.Sequential([ tf.keras.layers.Embedding(vocab_size, embedding_dim), tf.keras.layers.LSTM( rnn_units, return_sequences=True), tf.keras.layers.Dense(vocab_size) ]) return model charset_size = len(char_array) embedding_dim = 256 rnn_units = 512 tf.random.set_seed(1) model = build_model( vocab_size = charset_size, embedding_dim=embedding_dim, rnn_units=rnn_units) model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="6oDKVX0JyWJr" outputId="a2516e61-0465-407a-a845-159f02ab5c9b" model.compile( optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True )) model.fit(ds, epochs=20) # + [markdown] id="HPxMUjzzyWJr" # ### 평가 단계 - 새로운 텍스트 생성 # + colab={"base_uri": "https://localhost:8080/"} id="qJ8Wf-ofyWJs" outputId="ec5fd140-1312-4039-c8fe-08f343196ed6" tf.random.set_seed(1) logits = [[1.0, 1.0, 1.0]] print('확률:', tf.math.softmax(logits).numpy()[0]) samples = tf.random.categorical( logits=logits, num_samples=10) tf.print(samples.numpy()) # + colab={"base_uri": "https://localhost:8080/"} id="PZd-fbXByWJs" outputId="c9ac6d96-9881-4576-dd10-5ec4c8f5fbf4" tf.random.set_seed(1) logits = [[1.0, 1.0, 3.0]] print('확률:', tf.math.softmax(logits).numpy()[0]) samples = tf.random.categorical( logits=logits, num_samples=10) tf.print(samples.numpy()) # + colab={"base_uri": "https://localhost:8080/"} id="R_b2-tLDyWJs" outputId="b603f6e4-a9c8-472a-f94c-00d179a8629d" def sample(model, starting_str, len_generated_text=500, max_input_length=40, scale_factor=1.0): encoded_input = [char2int[s] for s in starting_str] encoded_input = tf.reshape(encoded_input, (1, -1)) generated_str = starting_str model.reset_states() for i in range(len_generated_text): logits = model(encoded_input) logits = tf.squeeze(logits, 0) scaled_logits = logits * scale_factor new_char_indx = tf.random.categorical( scaled_logits, num_samples=1) new_char_indx = tf.squeeze(new_char_indx)[-1].numpy() generated_str += str(char_array[new_char_indx]) new_char_indx = tf.expand_dims([new_char_indx], 0) encoded_input = tf.concat( [encoded_input, new_char_indx], axis=1) encoded_input = encoded_input[:, -max_input_length:] return generated_str tf.random.set_seed(1) print(sample(model, starting_str='The island')) # + [markdown] id="Stm4dLH2yWJs" # * **예측 가능성 대 무작위성** # + colab={"base_uri": "https://localhost:8080/"} id="-UKHgrqTyWJs" outputId="5946a2f8-15bd-4e0b-c714-e2bbbfcc1993" logits = np.array([[1.0, 1.0, 3.0]]) print('스케일 조정 전의 확률: ', tf.math.softmax(logits).numpy()[0]) print('0.5배 조정 후 확률: ', tf.math.softmax(0.5*logits).numpy()[0]) print('0.1배 조정 후 확률: ', tf.math.softmax(0.1*logits).numpy()[0]) # + colab={"base_uri": "https://localhost:8080/"} id="ickgML_8yWJt" outputId="1512fbd2-7bd1-4847-f1a1-58a64218759b" tf.random.set_seed(1) print(sample(model, starting_str='The island', scale_factor=2.0)) # + colab={"base_uri": "https://localhost:8080/"} id="UF4sqqpcyWJt" outputId="d4ab8e54-9a1e-4113-e60d-a9aa1976eb2e" tf.random.set_seed(1) print(sample(model, starting_str='The island', scale_factor=0.5)) # + [markdown] id="_V30iCf5yWJt" # # 트랜스포머 모델을 사용한 언어 이해 # # ## 셀프 어텐션 메카니즘 이해하기 # # ### 셀프 어텐션 기본 구조 # + colab={"base_uri": "https://localhost:8080/", "height": 350} id="aWx-IXRtyWJt" outputId="9cec74f0-6f38-4dbb-a48d-b64e0716984f" Image(url='https://git.io/JLdVo', width=700) # + [markdown] id="EXNdJdxnyWJt" # ### 쿼리, 키, 값 가중치를 가진 셀프 어텐션 메카니즘 # + [markdown] id="o99EBntFyWJt" # ## 멀티-헤드 어텐션과 트랜스포머 블록 # + colab={"base_uri": "https://localhost:8080/", "height": 278} id="Klo3F7nvyWJu" outputId="6d4ae9f3-ffd4-41d0-9a03-204620bb52cf" Image(url='https://git.io/JLdV6', width=700)
ch16/ch16_part2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # OpenMined Grid (Mockup) # # This is a mockup notebook of desired functionality. For current progress on this notebook, please check out # + import warnings import numpy as np import phe as paillier from sonar.contracts import ModelRepository,Model from syft.he.paillier.keys import KeyPair from syft.nn.linear import LinearClassifier import numpy as np from sklearn.datasets import load_diabetes def get_balance(account): return repo.web3.fromWei(repo.web3.eth.getBalance(account),'ether') warnings.filterwarnings('ignore') from sonar import GridClient # + # for the purpose of the simulation, we're going to split our dataset up amongst # the relevant simulated users diabetes = load_diabetes() y = diabetes.target X = diabetes.data validation = (X[0:5],y[0:5]) # we're also going to initialize the model trainer smart contract, which in the # real world would already be on the blockchain (managing other contracts) before # the simulation begins # ATTENTION: copy paste the correct address (NOT THE DEFAULT SEEN HERE) from truffle migrate output. grid = GridClient('0xcc37157ad0f60637025e33d68eeeaf546a360dc6', ipfs_host='localhost', web3_host='localhost') # blockchain hosted model repository # - diabetes_classifier = LinearClassifier(desc="DiabetesClassifier",n_inputs=10,n_labels=1) diabetes_model = Model(owner=cure_diabetes_inc, syft_obj = diabetes_classifier, bounty = 1000, training_input = X, training_target = y ) model_id = grid.submit_model(diabetes_model) grid.model_status(model_id) grid.model_status(model_id) grid.model_status(model_id) trained_diabetes_model = grid.get_model(model_id)
notebooks/OpenMined Grid Mockup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import cv2 import numpy as np import matplotlib.pyplot as plt # %matplotlib inline eye_classifier = cv2.CascadeClassifier('Models/haarcascade_eye.xml') img = cv2.imread('Images/Sheen.jpg') fix_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.imshow(fix_img) def eyes_detect(fix_img): eyes_rect = eye_classifier.detectMultiScale(fix_img) for (x,y,w,h) in eyes_rect: cv2.rectangle(fix_img, (x,y),(x+w, y+h),(234,0,0),3) return fix_img result = eyes_detect(fix_img) plt.imshow(result)
Human face and eye detection project/Image_eye_detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # This reads in spectra from a directory and overplots them, to give a cursory sense of # how good the normalization routine was # Created 2021 July 13 by E.S. # + import glob import pandas as pd import matplotlib.pyplot as plt # %matplotlib qt # - stem = "/Users/bandari/Documents/git.repos/rrlyrae_metallicity/rrlyrae_metallicity/" + \ "calib_application/realizations_output/norm/final/" file_list = glob.glob(stem + "*.dat*") # + for i in range(0,len(file_list)): print(i) df = pd.read_csv(file_list[i], names=["wavel","norm_flux"]) plt.plot(df["wavel"], df["norm_flux"], alpha=0.3) plt.xlabel("Wavelength (angs)") plt.ylabel("Flux (normalized)") plt.savefig("junk.png") # -
notebooks_for_development/overplot_normalized_spectra_fyi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Pandas # ## Loading data # We'll start by looking at loading data in Pandas. First we'll load a simple CSV file. The file looks like this: # # ``` # sales_USD,year,genre # 1561.2300000000005,2009,Rock # 185.13,2009,Jazz # 502.92000000000013,2009,Metal # 466.29000000000025,2009,Alternative & Punk # ``` # We can load the data and look at the top and bottom of the file with `head` and `tail`: import pandas as pd df = pd.read_csv('data/chinook_data.csv') df.head() df.tail() # Two of the fundamental data structures in pandas are Series and DataFrames. A series is 1D, while a DataFrame is 2D. df['year'] type(df['year']) type(df) # The documentation for [`read_csv`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html) has a lot of options, all described well in the docs. One useful option combination is for parsing dates: df = pd.read_csv('data/chinook_data.csv', parse_dates=['year'], infer_datetime_format=True) df.head() # The column on the left is the index. Since we did not specify an index column when loading the data, it was generated for us. df.index # Write back to CSV with new date format. Since the index was automatically created, we will not write it to the file with `index=False`. df.to_csv('data/chinook_data_2.csv', index=False) # You could now try loading the file again, or looking at it in a text editor or Excel or OpenOffice. # # Sometimes we have other separators in our CSV files, like a tab character (`\t`) or a pipe (`|`). We can specify the separator with `read_csv`: df = pd.read_csv('data/chinook_data.tsv', sep='\t') df.head() # We can also leave the `sep` argument as `None`, and Python auto-detects it: df = pd.read_csv('data/chinook_data.tsv', sep=None) df.head() # We can also read CSV from a URL. For example, we can load the CSV from the GitHub URL. url = 'https://raw.githubusercontent.com/PacktPublishing/Practical-Data-Science-with-Python/main/Chapter4/chinook_data.csv' df = pd.read_csv(url) df.head() # Many files are stored in Excel format. Pandas comes with `read_excel` to handle those. df = pd.read_excel('data/chinook_data.xlsx', sheet_name='sales_data', parse_dates=['year']) df.head() # Similarly to CSVs, we can write Excel files with the same paradigm: df.to_excel('data/chinook_data_2.xlsx', index=False) # Pandas can be used to interface with SQL databases as well. Here, we collect data from the chinook.db SQLite file from Chapter 3. We must first make a connection to the database; here we use SQLAlchemy but we could also use a `sqlite3` connection. # ### Write and read HDF df.to_hdf('data/chinook_data.hdf', key='chinook_sales', mode='a', complevel=3) df.to_hdf('data/chinook_data.hdf', key='chinook_sales2', mode='a', complevel=3) df2 = pd.read_hdf('data/chinook_data.hdf', key='chinook_sales') df2.head() df3 = pd.read_hdf('data/chinook_data.hdf', key='chinook_sales2', start=-10) df3.head() df3.shape # ### Write and read feather. # Feather format is good for reading/writing to and from R and Python (sharing data between the two), but is not good for storing data long-term since the format could change. # # You may get the error `ImportError: Missing optional dependency 'pyarrow'. Use pip or conda to install pyarrow.`; if so, install with `conda install -c conda-forge pyarrow -y`. df.to_feather('data/chinook_data.ft') df4 = pd.read_feather('data/chinook_data.ft') df4.head() # ## Load data from SQL # First, we create a connection with SQLalchemy, then load data into a DataFrame. `read_sql_table` reads an entire table; to limit results or join tables, we can instead us `read_sql_query` with a query string (like in chapter 3). from sqlalchemy import create_engine engine = create_engine('sqlite:///data/chinook.db') connection = engine.connect() df = pd.read_sql_table('tracks', connection) df.head() # ### Create DataFrame from Python lists. # We will use the bitcoin data from "test your knowledge" from chapter 3 -- the high prices for each year from 2013 to 2020. btc_data = [(1138.5, '2013'), (919.24, '2014'), (466.01, '2015'), (981.7, '2016'), (19210.0, '2017'), (17169.0, '2018'), (12876.0, '2019'), (19131.0, '2020')] # We can create a DataFrame directly from a list of data like we have: df = pd.DataFrame(data=btc_data, columns=['high_price', 'year']) df.head() # We can also create a DataFrame from lists. First we need to get the two lists out of our data. high_price = [b[0] for b in btc_data] year = [b[1] for b in btc_data] df = pd.DataFrame(data={'high_price': high_price, 'year': year}) df.head() # ### Convert JSON into a DataFrame. # We sometimes encounter JSON data when downloading data from a website or API. For a .json file, we can simply load it with `read_json`: df = pd.read_json('data/bitcoin_price.json') df.head() # write to csv to test out chunksize later df.to_csv('data/bitcoin_price.csv', index=False) # We can also take JSON data and parse it into a pandas DataFrame: import json with open('data/bitcoin_price.json') as f: json_data = json.load(f) json_data df = pd.io.json.json_normalize(json_data) df.head() # The `json_normalize` function used to be in the `pd.io.json module`, but has moved to the top-level `pd` module for simplicity. The `FutureWarning` about deprecation above tells us that. df = pd.json_normalize(json_data) df.head() # ### Reading big data in chunks # Sometimes we will be dealing with very large data files, like CSV files or other files that are in the 10s of GB. We can chunk through these files to manage memory. This is available for CSV, JSON, HDF, SAS, SQL, and Stata files. # # There are about 2700 lines in the bitcoin data, so we get 3 chunks with `chunksize=1000`. for chunk in pd.read_csv('data/bitcoin_price.csv', chunksize=1000): print(chunk.head()) # # Basic Exploratory Data Analysis (EDA) and Plotting with Pandas # # ## Numeric EDA # There are a few useful EDA functions in pandas to use often: btc_df = pd.read_csv('data/bitcoin_price.csv') btc_df.describe() btc_df.info() # It's a little annoying to subtract the missing values from the total number of values, so we can use `.isna()` with `.sum()`: btc_df.isna().sum() # We can find the number of rows and columns like so: btc_df.shape # We might also want to see the datatypes of our DataFrame. btc_df.dtypes # We can also get some other stats, such as the median and mode: btc_df.median() btc_df['symbol'].mode() # It can also be useful to look at unique values and value counts. We'll look at some unique values and counts from the tracks from our chinook database. from sqlalchemy import create_engine engine = create_engine('sqlite:///data/chinook.db') connection = engine.connect() df = pd.read_sql_table('tracks', connection) df.head() df['UnitPrice'].unique() df['UnitPrice'].value_counts() # ## Selecting Columns and Rows, and Filtering btc_df = pd.read_csv('data/bitcoin_price.csv', index_col='time') btc_df.head() # We can view the index and columns like so. print(btc_df.columns) print(btc_df.index) # To select a column, we use square brackets with the column name. To select a row, we can use square brackets with the index value for that row. # select a column btc_df['volume'] # select the first row btc_df.loc[1364688000000] # select a row and column btc_df.loc[1364688000000, 'volume'] # select a row with the number of the row (first, second, etc), not the index btc_df.iloc[0] # select volume from the first row btc_df.iloc[0, 5] # We can also filter DataFrames much like the WHERE statement in SQL. For example, all rows with a volume above 3000 can be obtained like so: btc_df[btc_df['volume'] > 3000] # The inner part of the statement returns a boolean Pandas Series: btc_df['volume'] > 3000 type(btc_df['volume'] > 3000) # A few other methods for filtering are useful: `isna`, `isin()` and `.str.contains()`. df = pd.DataFrame(data={'text': ['A good dog!', 'A bad dog.', 'This cat is good.']}) df[df['text'].str.contains('good')] # We can negate a filter with `~`. df[~df['text'].str.contains('good')] # For finding if a row contains a value in a set or list, we can use `isin`: df = pd.DataFrame(data={'City': ['Los Angeles', 'San Francisco', 'New York', 'Omaha', 'Atlantis'], 'State': ['CA', 'CA', 'NY', 'NE', pd.NA]}) df[df['State'].isin(['CA', 'NY'])] df[df['State'].isna()] df[df['State'].notnull()] # Finally, we can combine filtering statements with & for "and" and | for "or". You should encapsulate each condition in parentheses. df[(df['State'] == 'CA') & (df['City'] == 'Los Angeles')] # ## Plotting # Pandas can easily plot data, natively. Let's first plot a histogram of song lengths. # # If plotting in IPython, simply running `%matplotlib` allows you to continue to run code after you've plotted something. # # `%matplotlib inline` in a Jupyter Notebook shows the plots automatically, but they are not interactive. import matplotlib.pyplot as plt # %matplotlib notebook from sqlalchemy import create_engine engine = create_engine('sqlite:///data/chinook.db') connection = engine.connect() df = pd.read_sql_table('tracks', connection) _ = df['Milliseconds'].hist() # A handy trick is to use `value_counts` with a barplot: _ = df['GenreId'].value_counts().plot.bar() # If we have a timeseries, pandas can plot that very well too. It helps to have our index to be a time value. btc_df = pd.read_csv('data/bitcoin_price.csv', index_col='time') btc_df.drop(['volume', 'low'], axis=1).plot(logy=True) # # Preparing and Cleaning Data with Pandas # Setting and Resetting the Index # # Math (adding, subtracting, etc) # # Converting datatypes # # Timestamps -- conversions # We can drop columns with .drop(): btc_df.drop('volume', axis=1) # Rows can be dropped with axis=0, which is the default: # drops the first row which has this value for 'time' btc_df.drop(1364688000000) # These drop and other transformation methods often have an 'inplace' argument which does not return a new dataframe, but modifies the existing one in-place. btc_df.drop('volume', axis=1, inplace=True) # It can also be helpful to drop missing values. The easiest way is with dropna: # + df = pd.DataFrame(data={'City': ['Los Angeles', 'San Francisco', 'New York', 'Omaha', 'Atlantis'], 'State': ['CA', 'CA', 'NY', 'NE', pd.NA]}) df.dropna() # - df.dropna(subset=['State']) # ## Filling missing values df.fillna({'City': df['City'].mode()[0], 'State': df['State'].mode()[0]}) df.City.mode() df.State.mode()[0] # ## Detecting outliers # ## swifter # # Using Essential pandas Data Wrangling Tools # # ## apply # ## group by # ## reduce and map # ## datetimes and timezones # ## Iterating through dataframes # # Understanding NumPy Basics # # arrays # converting lists to arrays # # matrices # # shapes # # dtypes # # getting numpy from pandas # # concatenating arrays # # Using NumPy Mathematical Functions # # multiplication, addition # # array addition # # log # # sin # # euclidean distance, cosine distance
4-Chapter-4/Pandas_and_Numpy_Cheatsheet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- "|IMPORT PACKAGES|" import numpy as np import pandas as pd import datetime from math import pi from bokeh.plotting import show, figure, output_file, save from bokeh.io import show, output_notebook, curdoc, export_png from bokeh.models import ColumnDataSource,LinearAxis, Range1d, NumeralTickFormatter, LabelSet, Label, BoxAnnotation, DatetimeTickFormatter, Text, Span from bokeh.models.tools import HoverTool from bokeh.models import Arrow, NormalHead, OpenHead, VeeHead from bokeh.transform import dodge from datetime import datetime as dt # + "|IMPORT DATA|" path = r'https://github.com/ncachanosky/research/blob/master/Economic%20Series/' file = r'Resumen%20Estadistico%20-%20Internacional.xlsx?raw=true' #file = r'Resumen%20Estadistico%20-%20Argentina.xlsx' IO = path + file sheet = 'DATA' data = pd.read_excel(IO, sheet_name = sheet, usecols="A:E,M", skiprows=2, engine='openpyxl') # Be patient... data = data.rename(columns={"GDP (PPP)":"GDP"}) data = data[data.YEAR == 2018].reset_index() data = data.drop(['index'], axis = 1) # - "|CHECK DATA|" data # + "|BUILD PLOT|" colormap = {'Africa' :'maroon', 'Latin America':'teal' , 'North America':'red' , 'Asia' :'olive' , 'Europe' :'green' , 'Oceania' :'blue'} colors = [colormap[i] for i in data['REGION']] data['COLORS'] = colors x = data['EFW'] y = data['GDP'] cds = ColumnDataSource(data) #BUILD FIGURE 1 p = figure(title = "EL HUB ECONÓMICO | ÍNDICE DE LIBERTAD ECONÓMICA Y PBI PER CAPITA (PPP, 2011 US$)", x_range = (2,10), x_axis_label = "(-) Índice de libertad económica (+)", plot_height = 400, plot_width = 700) year = Label(x=2.5, y=120000, text="1998", text_font_size='20px', text_alpha=0.75) p.add_layout(year) #AXIS 1 (LEFT) p1 = p.circle('EFW', 'GDP' , fill_color='COLORS', fill_alpha=0.50, line_color='COLORS', legend_group='REGION', line_alpha=0.50, size=8, source=cds) p2 = p.circle('EFW', 'COUNTRY', fill_alpha=0, line_alpha=0, size=1, source=cds) p3 = p.circle('EFW', 'REGION' , fill_alpha=0, line_alpha=0, size=1, source=cds) p.add_tools(HoverTool(renderers=[p1], tooltips = [("País","@COUNTRY"),("Regions","@REGION"),("Índce", "@EFW{0.0}"),("PBI", "@GDP{$0,0}")])) #ARROW FOR ARGENTINA ARG_text = Label(x = 5 , y = 40000 , text = "Argentina", text_font_size = '12px' , text_color = 'teal' , text_align = 'center') ARG_arrow = Arrow(end=VeeHead(line_color = 'teal', line_width = 2 , line_alpha = 0.5 , size = 10 , fill_color = 'teal', fill_alpha = 0.75), x_start = 5 , x_end = data.iloc[62,4], y_start = 40000 , y_end = data.iloc[62,5], line_color = 'teal' , line_alpha = 0.5) p.add_layout(ARG_text) p.add_layout(ARG_arrow) CHL_text = Label(x = 9 , y = 27000 , text = "Chile" , text_font_size = '12px' , text_color = 'teal' , text_align = 'left' , text_baseline = 'middle') CHL_arrow = Arrow(end=VeeHead(line_color = 'teal', line_width = 2 , line_alpha = 0.5 , size = 10 , fill_color = 'teal', fill_alpha = 0.75), x_start = 9 , x_end = data.iloc[73,4], y_start = 27000 , y_end = data.iloc[73,5], line_color = 'teal' , line_alpha = 0.5) p.add_layout(CHL_text) p.add_layout(CHL_arrow) USA_text = Label(x = 8.5 , y = 85000 , text = "Estados Unidos", text_font_size = '12px' , text_color = 'red' , text_align = 'left' , text_baseline = 'middle') USA_arrow = Arrow(end=VeeHead(line_color = 'red' , line_width = 2 , line_alpha = 0.5 , size = 10 , fill_color = 'red' , fill_alpha = 0.75) , x_start = 8.5 , x_end = data.iloc[116,4], y_start = 85000 , y_end = data.iloc[116,5], line_color = 'red' , line_alpha = 0.5) p.add_layout(USA_text) p.add_layout(USA_arrow) SWE_text = Label(x = 7.25, y = 75000 , text = "Suecia", text_font_size = '12px' , text_color = 'green' , text_align = 'center', text_baseline = 'middle') SWE_arrow = Arrow(end=VeeHead(line_color = 'green', line_width = 2 , line_alpha = 0.5 , size = 10 , fill_color = 'green', fill_alpha = 0.75) , x_start = 7.25 , x_end = data.iloc[192,4], y_start = 75000 , y_end = data.iloc[192,5], line_color = 'green' , line_alpha = 0.5) p.add_layout(SWE_text) p.add_layout(SWE_arrow) LUX_text = Label(x = 7 , y = data.iloc[215,5], text = "Luxemburgo" , text_font_size = '12px' , text_color = 'green' , text_align = 'right' , text_baseline = 'middle') LUX_arrow = Arrow(end=VeeHead(line_color = 'green', line_width = 2 , line_alpha = 0.5 , size = 10 , fill_color = 'green', fill_alpha = 0.75) , x_start = 7 , x_end = data.iloc[215,4], y_start = data.iloc[215,5], y_end = data.iloc[215,5], line_color = 'green' , line_alpha = 0.5) p.add_layout(LUX_text) p.add_layout(LUX_arrow) SGP_text = Label(x = 9 , y = 110000 , text = "Singapur" , text_font_size = '12px' , text_color = 'olive' , text_align = 'left' , text_baseline = 'middle') SGP_arrow = Arrow(end=VeeHead(line_color = 'olive', line_width = 2 , line_alpha = 0.5 , size = 10 , fill_color = 'olive', fill_alpha = 0.75), x_start = 9 , x_end = data.iloc[136,4], y_start = 110000 , y_end = data.iloc[136,5], line_color = 'olive' , line_alpha = 0.5) p.add_layout(SGP_text) p.add_layout(SGP_arrow) NZL_text = Label(x = 8.75 , y = 50000 , text = "<NAME>", text_font_size = '12px' , text_color = 'blue' , text_align = 'left' , text_baseline = 'middle') NZL_arrow = Arrow(end=VeeHead(line_color = 'blue', line_width = 2 , line_alpha = 0.5 , size = 10 , fill_color = 'blue', fill_alpha = 0.75), x_start = 8.75 , x_end = data.iloc[223,4], y_start = 50000 , y_end = data.iloc[223,5], line_color = 'blue' , line_alpha = 0.5) p.add_layout(NZL_text) p.add_layout(NZL_arrow) #TOP 10 MARKER top_10 = BoxAnnotation(left=data.iloc[184,4], right=10, line_color="gray", fill_color="gray", fill_alpha=0.1) x = (data.iloc[184,4] + 10)/2 top_10_N = Label(x=x, y=2000, text="Top 10", text_font_size="10pt", text_color='gray', text_align="center", angle_units="deg", angle=0) p.add_layout(top_10) p.add_layout(top_10_N) #LEGEND year = Label(x=2.5, y=10000, text="2018", text_font_size='20px', text_color='black', text_align='center', text_baseline='middle') p.add_layout(year) p.yaxis[0].formatter = NumeralTickFormatter(format="$0,0") p.legend.location = "top_left" p.legend.orientation = "vertical" show(p) # + "|EXPORT .PNG FILE|" export_png(p, filename="efw_gdp.png") # + "|# CREATE HTML FILE|" output_file(filename="efw_gdp.html", title="EFW y PBI") save(p) # + "|CREATE JSON FILE|" import json import bokeh.embed from bokeh.embed import json_item j = json.dumps(json_item(p, "efw_gdp")) with open("efw_gdp.json", "w") as fp: json.dump(j, fp) # -
static/Jupyter Notebooks/07.07_efw_gdp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.10 64-bit (''PythonData'': conda)' # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt import os from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.manifold import TSNE from sklearn.cluster import KMeans # + # read in crypocurrency dataset file = os.path.join("crypto_data.csv") df = pd.read_csv(file) df.head() # - df.info() # + # keep only traded cryptos df = df[df.IsTrading == True] # drop unneccesary 'IsTrading' column df.drop('IsTrading', axis=1, inplace=True) df.head() # - df.info() # + # drop rows with null values df.dropna(inplace=True) df.isnull().sum().sum() # + # filter out coins that have not been mined df = df[df.TotalCoinsMined > 0] # drop coin name and Trading symbol columns to prep for ML algorithm df_dropped_names = df.drop(['Unnamed: 0', 'CoinName'], axis = 1) df_dropped_names.info() # + # convert Total Coin Supply to float value df_dropped_names['TotalCoinSupply'] = df_dropped_names['TotalCoinSupply'].apply(lambda x: float(x)) df_dropped_names.info() # + # convert categorical data to numeric indicators for PCA analysis later df_dummies = pd.get_dummies(df_dropped_names) df_dummies.info() # + # Standardize the dataset # initialize scaler scaler = StandardScaler() # fit it to dataset scaler.fit(df_dummies) # scale dataset X_scaled = scaler.fit_transform(df_dummies) X_scaled[:1] # - len(X_scaled[:1][0]) # ## PCA reduction # + # PCA analysis # initialize PCA pca = PCA(n_components=0.90) crypto_pca = pca.fit_transform(X_scaled) df_crypto_pca = pd.DataFrame(crypto_pca) df_crypto_pca.head() # - # # PCA reduction decreased the number of features from 98 to 74 to preserve 90% explained variance # + # t-SNE reduction to visualize the clustering # initialize t-sne tsne = TSNE(learning_rate=250) tsne_features = tsne.fit_transform(df_crypto_pca) # - # checking features are reduced to 2 tsne_features.shape # plotting tsne reduction features plt.scatter(tsne_features[:,0], tsne_features[:,1]) plt.show() # The previous t-SNE reduction has created approximately two clusters with two outliers that consistently show up. # ## K-Means Clustering # Inertia plot indicates that between k = 2 and k = 3 the elbow occurs. def get_clusters(k, data): # Initialize the K-Means model model = KMeans(n_clusters=k, random_state=0) # Train the model model.fit(data) # Predict clusters predictions = model.predict(data) # Create return DataFrame with predicted clusters data["class"] = model.labels_ return data # # + inertia = [] k = list(range(1,10)) # loop through k values and save inertia for each one for i in k: km = KMeans(n_clusters=i, random_state=0) km.fit(tsne_features) inertia.append(km.inertia_) elbow_data = {'k': k, 'inertia': inertia} df_elbow = pd.DataFrame(elbow_data) df_elbow.head() # - # plotting elbow curve data plt.plot(df_elbow['k'], df_elbow['inertia']) plt.xticks(range(1,11)) plt.xlabel('k value') plt.ylabel('inertia') plt.title('Inertia vs. k-value, Crypto K-Means Clustering, t-SNE reduced data') plt.show() # + df_crypto_tsne = pd.DataFrame(tsne_features, columns=['Feature 1', 'Feature 2']) clusters_tsne = get_clusters(4, df_crypto_tsne) clusters_tsne # + fig, ax = plt.subplots(1,1, figsize=(7,5)) scatter = ax.scatter(x=clusters_tsne['Feature 1'], y=clusters_tsne['Feature 2'], c=clusters_tsne['class']) legend = ax.legend(*scatter.legend_elements(), loc='upper left') ax.add_artist(legend) plt.xlabel('Feature 1') plt.ylabel('Feature 2') plt.title('K-means clustering on t-SNE reduced dataset') plt.show() # + df_crypto_tsne_clusted = pd.merge(df, clusters_tsne['class'], left_index=True, right_index=True) df_crypto_tsne_clusted.head() # - df_crypto_tsne_clusted[df_crypto_tsne_clusted['class'] == 2] # ## Recommendation: # # Based on the K-means clustering findings, I believe the crypto currencies can be clustered. The previous plot above shows adequate clustering using 4 distinct clusters. #
crypto_currencies_clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.3 64-bit (''ironhack'': conda)' # language: python # name: python38364bitironhackcondae22397c42f6b4fd29a9381c81ac70f68 # --- import requests as req from bs4 import BeautifulSoup import pandas as pd import time # # Matches id # # query_url = "https://api.opendota.com/api/explorer?sql=SELECT%0A%20%20%20%20%20%20%20%20matches.match_id%20%2C%0Aavg(kills)%20%22AVG%20Kills%22%2C%0Acount(distinct%20matches.match_id)%20count%2C%0Asum(case%20when%20(player_matches.player_slot%20%3C%20128)%20%3D%20radiant_win%20then%201%20else%200%20end)%3A%3Afloat%2Fcount(1)%20winrate%2C%0A((sum(case%20when%20(player_matches.player_slot%20%3C%20128)%20%3D%20radiant_win%20then%201%20else%200%20end)%3A%3Afloat%2Fcount(1))%20%0A%2B%201.96%20*%201.96%20%2F%20(2%20*%20count(1))%20%0A-%201.96%20*%20sqrt((((sum(case%20when%20(player_matches.player_slot%20%3C%20128)%20%3D%20radiant_win%20then%201%20else%200%20end)%3A%3Afloat%2Fcount(1))%20*%20(1%20-%20(sum(case%20when%20(player_matches.player_slot%20%3C%20128)%20%3D%20radiant_win%20then%201%20else%200%20end)%3A%3Afloat%2Fcount(1)))%20%2B%201.96%20*%201.96%20%2F%20(4%20*%20count(1)))%20%2F%20count(1))))%0A%2F%20(1%20%2B%201.96%20*%201.96%20%2F%20count(1))%20winrate_wilson%2C%0Asum(kills)%20sum%2C%0Amin(kills)%20min%2C%0Amax(kills)%20max%2C%0Astddev(kills%3A%3Anumeric)%20stddev%0A%20%20%0AFROM%20matches%0AJOIN%20match_patch%20using(match_id)%0AJOIN%20leagues%20using(leagueid)%0AJOIN%20player_matches%20using(match_id)%0AJOIN%20heroes%20on%20heroes.id%20%3D%20player_matches.hero_id%0ALEFT%20JOIN%20notable_players%20ON%20notable_players.account_id%20%3D%20player_matches.account_id%0ALEFT%20JOIN%20teams%20using(team_id)%0AWHERE%20TRUE%0AAND%20kills%20IS%20NOT%20NULL%20%0AAND%20matches.start_time%20%3E%3D%20extract(epoch%20from%20timestamp%20%272020-01-04T22%3A00%3A00.000Z%27)%0AAND%20matches.start_time%20%3C%3D%20extract(epoch%20from%20timestamp%20%272020-07-05T22%3A00%3A00.000Z%27)%0AGROUP%20BY%20matches.match_id%0AHAVING%20count(distinct%20matches.match_id)%20%3E%3D%201%0AORDER%20BY%20%22AVG%20Kills%22%20DESC%2Ccount%20DESC%20NULLS%20LAST" # # - Scrape the matches id from the website # **1. Create a get_id function** # # - Takes folder path as imput # - Has a range of dates # - Loops each tuple of dates and makes a request to the api # - Drops duplicate records # - Adds the data to a csv file def get_ids(folder: str): # our dates dates = [ ("2019-07-01", "2019-08-01"), ("2019-08-02", "2019-09-01"), ("2019-09-02", "2019-10-01"), ("2019-10-02", "2019-11-01"), ("2019-11-02", "2019-12-01"), ("2019-12-02", "2020-01-01"), ("2020-01-02", "2020-02-01"), ("2020-02-02", "2020-03-01"), ("2020-03-02", "2020-04-01"), ("2020-04-02", "2020-05-01"), ("2020-05-02", "2020-06-01"), ("2020-06-02", "2020-07-06"),] # data container match_ids = [] # get ids for from_date, to_date in dates: try: url = f"https://api.opendota.com/api/explorer?sql=SELECT%0A%20%20%20%20%20%20%20%20matches.match_id%20%2C%0Aavg(kills)%20%22AVG%20Kills%22%2C%0Acount(distinct%20matches.match_id)%20count%2C%0Asum(case%20when%20(player_matches.player_slot%20%3C%20128)%20%3D%20radiant_win%20then%201%20else%200%20end)%3A%3Afloat%2Fcount(1)%20winrate%2C%0A((sum(case%20when%20(player_matches.player_slot%20%3C%20128)%20%3D%20radiant_win%20then%201%20else%200%20end)%3A%3Afloat%2Fcount(1))%20%0A%2B%201.96%20*%201.96%20%2F%20(2%20*%20count(1))%20%0A-%201.96%20*%20sqrt((((sum(case%20when%20(player_matches.player_slot%20%3C%20128)%20%3D%20radiant_win%20then%201%20else%200%20end)%3A%3Afloat%2Fcount(1))%20*%20(1%20-%20(sum(case%20when%20(player_matches.player_slot%20%3C%20128)%20%3D%20radiant_win%20then%201%20else%200%20end)%3A%3Afloat%2Fcount(1)))%20%2B%201.96%20*%201.96%20%2F%20(4%20*%20count(1)))%20%2F%20count(1))))%0A%2F%20(1%20%2B%201.96%20*%201.96%20%2F%20count(1))%20winrate_wilson%2C%0Asum(kills)%20sum%2C%0Amin(kills)%20min%2C%0Amax(kills)%20max%2C%0Astddev(kills%3A%3Anumeric)%20stddev%0A%20%20%0AFROM%20matches%0AJOIN%20match_patch%20using(match_id)%0AJOIN%20leagues%20using(leagueid)%0AJOIN%20player_matches%20using(match_id)%0AJOIN%20heroes%20on%20heroes.id%20%3D%20player_matches.hero_id%0ALEFT%20JOIN%20notable_players%20ON%20notable_players.account_id%20%3D%20player_matches.account_id%0ALEFT%20JOIN%20teams%20using(team_id)%0AWHERE%20TRUE%0AAND%20kills%20IS%20NOT%20NULL%20%0AAND%20matches.start_time%20%3E%3D%20extract(epoch%20from%20timestamp%20%27{from_date}T22%3A00%3A00.000Z%27)%0AAND%20matches.start_time%20%3C%3D%20extract(epoch%20from%20timestamp%20%27{to_date}T22%3A00%3A00.000Z%27)%0AGROUP%20BY%20matches.match_id%0AHAVING%20count(distinct%20matches.match_id)%20%3E%3D%201%0AORDER%20BY%20%22AVG%20Kills%22%20DESC%2Ccount%20DESC%20NULLS%20LAST" res = req.get(url) match_ids.extend([x["match_id"] for x in res.json()["rows"]]) except: print(f"There was an error code {res.status_code}") data = pd.DataFrame({"match_id": match_ids}) data = data.drop_duplicates() data.to_csv(f"{folder}matches_ids.csv", index=False) return "Done." # **2. Execute our function** get_ids("../matchesdata/") # **3. Check data integrity** id_list = pd.read_csv("../matchesdata/matches_ids.csv") id_list # # Matches Info # # url: f"https://api.opendota.com/api/matches/{match_id}" # folder: "../matchesdata/" # # - Request to the api matches per match_id # - Every iteration substract # - "match_id" # - "victor_team" # - "radiant_team" # - "dire_team" # - Save the matches to a .csv file # id 5497477213 def get_matches_id(id_list: list, folder: str): done = False error = "" # containers victor_teams = [] radiant_teams = [] dire_teams = [] # extract each match in the list for i, match_id in enumerate(id_list): # make request res = req.get(f"https://api.opendota.com/api/matches/{match_id}") # check for errors if res.status_code == 200: # extract useful data victor_teams.append("Radiant" if res.json()["radiant_win"] == True else "Dire") radiant_teams.append('-'.join([str(x["hero_id"]) for x in res.json()["players"] if x["isRadiant"] == True])) dire_teams.append('-'.join([str(x["hero_id"]) for x in res.json()["players"] if x["isRadiant"] == False])) done = True else: error = f"{res.status_code}" done = False # wait a second time.sleep(1) # log request number print(f"request n° {i+1}") # create data frame print("Creating DataFrame..") data = pd.DataFrame({ "match_id": id_list, "victor_team": victor_teams, "radiant_team": radiant_teams, "dire_team": dire_teams, }) # export csv file print("Saving file..") data.to_csv(f"{folder}{int(time.time())}.csv", index=False) # end return "Done" if done == True else f"Error {error}" # **Test the function** get_matches_id([5497477213], "../matchesdata/") # **Check result** pd.read_csv("../matchesdata/1594059904.csv")
notebooks/deprecated/match_id_mining.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline df = pd.read_csv("portlandhousing.txt") df.columns = ['Size','bedroom','Price'] Port_housing = df Port_housing.head() Port_housing.info() Port_housing.describe() Port_housing.columns sns.pairplot(Port_housing) sns.distplot(Port_housing['Price']) sns.heatmap(Port_housing.corr(), annot =True) # # Prediction Port_housing.columns X = Port_housing[['Size', 'bedroom']] y = Port_housing['Price'] from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) # + # training model # - from sklearn.linear_model import LinearRegression lm = LinearRegression() lm.fit(X_train,y_train) print(lm.intercept_) lm.coef_ X_train.columns cdf = pd.DataFrame(lm.coef_, X.columns, columns=["coeff"]) cdf # Interpreting the coefficients: # # - Holding all other features fixed, a 1 unit increase in **Size** is associated with an **increase of 145.30 **. # - Holding all other features fixed, a 1 unit increase in **bedroom** is associated with an **decrease of -11812.03 **. predictions = lm.predict(X_test) predictions y_test.head() plt.scatter(y_test, predictions) sns.distplot(y_test - predictions) from sklearn import metrics metrics.mean_absolute_error(y_test, predictions) metrics.mean_squared_error(y_test, predictions) np.sqrt(metrics.mean_squared_error(y_test, predictions))
Portland Housing price.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dog Breeds - Data Science project # # Step 4 : Clustering and Modeling Data # # In this step, we will Clustering the data into groups, modeling and compare the algorithms results # + id="mCD5cE181Xux" from __future__ import division, print_function, unicode_literals import numpy as np import os import pandas as pd # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns import warnings from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.cluster import AgglomerativeClustering from sklearn.preprocessing import StandardScaler, normalize import scipy.cluster.hierarchy as shc from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.metrics import classification_report, confusion_matrix from sklearn.utils.multiclass import unique_labels #Import scikit-learn metrics module for accuracy calculation from sklearn import metrics # To make this notebook's output stable across runs np.random.seed(42) # + id="0k3zrhUF4xzb" mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # + id="I4XQFxTL40VW" #set style of plots sns.set_style('white') # + colab={"base_uri": "https://localhost:8080/", "height": 85} id="Ula3ktyq49ef" outputId="2f6d00a8-09a2-4c43-ed3c-7c4a7a62fa37" #define a custom palette customPalette = ["#e41a1c","#984ea3","#a65628","#377eb8","#ffff33","#4daf4a","#ff7f00"] sns.set_palette(customPalette) sns.palplot(customPalette) # + id="UUWgN9dF5IQt" # Ignore useless warnings (see SciPy issue #5998) warnings.filterwarnings(action="ignore", message="^internal gelsd") # + colab={"base_uri": "https://localhost:8080/", "height": 426} id="Rc1ISxNv18xb" outputId="1002a7c5-3fb0-4be6-ce2b-21dc451c6629" # Load the dataset df = pd.read_csv("dog_breed_data_handling_missing_values.csv",index_col = 0) # Print the first 5 rows of the dataframe. df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 275} id="1IZyLfcQ2eIV" outputId="7ba0dc3f-bf3e-415a-dd95-d0b2726f3193" df_features = df.copy(deep='true') # drop the columns with string values df_features = df_features.drop(['breed name','Dog Breed Group','Height','Weight','Life Span'],axis=1) df_features.head() # + colab={"base_uri": "https://localhost:8080/", "height": 547} id="VlgXptkYHsFi" outputId="eb0c6816-efe8-465d-8e69-30aae1c1ae6c" df.head() # + id="YJmN_4Cm8SQi" Sum_of_squared_distances = [] K = range(1,8) for k in K: km = KMeans(n_clusters=k) km = km.fit(df_features) Sum_of_squared_distances.append(km.inertia_) # + colab={"base_uri": "https://localhost:8080/"} id="Hc2QTPVW8fRR" outputId="52da9e2f-1ea0-496c-ee8b-861c2f0ad536" kmeans = KMeans(n_clusters=8) kmeans.fit(df_features) # + id="qMT_fWIY8m4_" # PCA to reduce our data to 2 dimensions for visualisation y_kmeans = kmeans.predict(df_features) pca = PCA(n_components=2) principal_components = pca.fit_transform(df_features) # + colab={"base_uri": "https://localhost:8080/", "height": 387} id="1B72UYza8wWK" outputId="1e1d6b60-ae47-469b-bd2c-d3a4b22cfc20" pc = pd.DataFrame(principal_components) pc['label'] = y_kmeans pc.columns = ['x', 'y','label'] #plot data with seaborn cluster = sns.lmplot(data=pc, x='x', y='y', hue='label', fit_reg=False, legend=True, legend_out=True) # + id="DwdYqU5xAPcI" # Scaling the data so that all the features become comparable scaler = StandardScaler() X_scaled = scaler.fit_transform(df_features) # Normalizing the data so that the data approximately # follows a Gaussian distribution X_normalized = normalize(X_scaled) # Converting the numpy array into a pandas DataFrame X_normalized = pd.DataFrame(X_normalized) # + id="-wAPwSvcBSR0" pca = PCA(n_components = 2) X_principal = pca.fit_transform(X_normalized) X_principal = pd.DataFrame(X_principal) X_principal.columns = ['P1', 'P2'] # + colab={"base_uri": "https://localhost:8080/", "height": 499} id="rZ_4sEP-BYFi" outputId="3fb4c3a9-b3a2-4d1e-ec3c-a69dcfc46be7" plt.figure(figsize =(8, 8)) plt.title('Visualising the data') Dendrogram = shc.dendrogram((shc.linkage(X_principal, method ='ward'))) # + colab={"base_uri": "https://localhost:8080/", "height": 377} id="pQqvBDWECF7j" outputId="280f45ad-54b5-4189-f52d-58a4d8fbbd29" # applied agglomerative clustering with value of k=8 (Hierarchical clustering) ac6 = AgglomerativeClustering(n_clusters = 8) plt.figure(figsize =(6, 6)) plt.scatter(X_principal['P1'], X_principal['P2'], c = ac6.fit_predict(X_principal), cmap ='rainbow') plt.show() # + id="4tAkcdcJF1ro" df['breed group after clustering'] = y_kmeans # + colab={"base_uri": "https://localhost:8080/"} id="oxyWoGykGEm-" outputId="5f65952c-7f97-426c-bb15-3b63ffc6b4f4" df = df.sample(frac=1) df['breed group after clustering'].value_counts() # + colab={"base_uri": "https://localhost:8080/", "height": 547} id="fyOFJnBXHiO2" outputId="9b9a4234-84f7-4408-b51d-ebea99cc40fc" df.sort_index(inplace=True) df.head() # + colab={"base_uri": "https://localhost:8080/"} id="GQwqMxy3KIxE" outputId="fa977ab5-04fb-44c0-98b6-5c0b2bfcacf0" X =df_features y = y_kmeans X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33) rfc = RandomForestClassifier(n_estimators=100,criterion='gini') rfc.fit(X_train,y_train) # + id="GT_AYYkxKPGu" y_pred = rfc.predict(X_test) # + id="bTlJ3l6XKU5G" def plot_confusion_matrix(y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if not title: if normalize: title = 'Normalized confusion matrix' else: title = 'Confusion matrix' # Compute confusion matrix cm = confusion_matrix(y_true, y_pred) # Only use the labels that appear in the data if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) fig, ax = plt.subplots() im = ax.imshow(cm, interpolation='nearest', cmap=cmap) ax.figure.colorbar(im, ax=ax) # We want to show all ticks... ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), # ... and label them with the respective list entries xticklabels=classes, yticklabels=classes, title=title, ylabel='True label', xlabel='Predicted label') # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black") fig.tight_layout() return ax # + colab={"base_uri": "https://localhost:8080/", "height": 477} id="G2ENhXPrKaCn" outputId="037f9899-6408-4d08-cf55-ae5f41ad2527" # Confusion matrix definitions = ['Afador', 'Affenhuahua','Affenpinscher','Afghan Hound','Airedale Terrier','Akbash','Akita','Akita Pit'] # reversefactor = dict(zip(range(4),definitions)) # actual = np.vectorize(reversefactor.get)(y_test) # pred = np.vectorize(reversefactor.get)(y_pred) # print(pd.crosstab(actual, pred, rownames=['Actual Mood'], colnames=['Predicted Mood'])) plot_confusion_matrix(y_test, y_pred, classes=definitions, title='Confusion matrix for Random Forest') # + colab={"base_uri": "https://localhost:8080/"} id="ttjTR8dULyN-" outputId="bfca7336-af60-4a99-b416-d71cce1ab7c8" # Model Accuracy, how often is the classifier correct? print("Accuracy:",metrics.accuracy_score(y_test, y_pred)) # - df.to_csv('data-after-clustering.csv')
Step 4 - Clustering and Modeling Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf # + # Simple hello world using TensorFlow # Create a Constant op # The op is added as a node to the default graph. # # The value returned by the constructor represents the output # of the Constant op. # - helloworld = tf.constant("hello, TensorFlow") print("Tensor:", helloworld) print("Value :", helloworld.numpy())
1-Introduction/helloworld.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Challenges # # - Without more context, search may return irrelevant answers # - Could use *seed* references, and then augment the reference based on those (e.g. only references that agree with seed references in a certain way?) # + from dotenv import load_dotenv load_dotenv() import os import itertools import json from azure.cognitiveservices.search.websearch import WebSearchClient from azure.cognitiveservices.search.websearch.models import SafeSearch from msrest.authentication import CognitiveServicesCredentials # - # # Bing API setup subscription_key = os.getenv("COGNITIVE_SERVICE_KEY") endpoint = os.getenv("COGNITIVE_ENDPOINT") client = WebSearchClient(endpoint=endpoint, credentials=CognitiveServicesCredentials(subscription_key)) web_data = client.web.search(query="Yosemite", count=10) web_data.web_pages.value[0].url # # Atomic Edits data_path = '/mnt/nlp-storage/data/processed/atomic-edits/atomic-edits-04192020.json' edits = [] with open(data_path, 'r') as data: for line in data: edits.append(json.loads(line)) page_title, sect_title = edits[0]['page_title'], edits[0]['section_title'] (page_title,sect_title) print(' '.join(edits[0]['target']['context'])) print(edits[0]['source']['sentence'],'\n', edits[0]['target']['sentence']) bing_search = client.web.search(query=page_title, count=10) for res in bing_search.web_pages.value: print(res.name) print(res.url) print(res.snippet + '\n') bing_search_st = client.web.search(query=' '.join((page_title, sect_title)), count=10) for res in bing_search_st.web_pages.value: print(res.name) print(res.url) print(res.snippet) # # Potential Issues to Address # # - Sites that duplicate wikipedia information # - Could remove domains that contain the word "wiki" # - Incomplete snippets # - May not be an issue, but could also complete the text using the url # - Irrelevant results # - Maybe this is a downstream concern, not all information in the grounding corpus will be relevant to each edit # - What happens when the page is very short? Not a lot to go off of to determine what information is relevant # # # Advantages of Bing API # - Can form advanced queries to leverage Bing capabilities. # - Relatively fast (and cheap), provides snippets # - Can consider as a "pre" information retrievel step # - Reproducible in the sense that we can release our dataset, but also in the sense that people could build upon our work and use the Bing API to make their own datasets (debatable here because Microsoft product, but this approach should work with any other search engine too) # # # - Bing API tiers # - Free: 3TPS, 1000 transactions per month # - S1: 250TPS, \$7 per 1000 transactions # - S2: 100TPS, \$3 per 1000 transactions # # Number of pages to query page_titles = set() for e in edits: page_titles.add(e['page_title']) # convert to list page_titles = list(page_titles) len(page_titles)
notebooks/exploratory/bing-web-search-api.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="uU9D21DXQmsg" # The nuts and bolts of Deep Graph Learning, with loading graph data and training a Vanilla GCN in PyTorch. # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="qDXNmqzP7jkV" outputId="2590a62d-c1b2-4828-88e3-82835bf57de3" import torch torch.__version__ # + colab={} colab_type="code" id="31Cv_pAy8x_s" # downloading the Cora Dataset # ! wget -q https://github.com/tkipf/pygcn/raw/master/data/cora/cora.cites # ! wget -q https://github.com/tkipf/pygcn/raw/master/data/cora/cora.content # + colab={} colab_type="code" id="PB0Ogz8UEF70" # sparse matrices have a different encoding that saves space (doesn't store lots of zeros) import numpy as np import scipy.sparse as sp import torch # + colab={} colab_type="code" id="1qXsZbm40boo" # one-hot encode a list def encode_onehot(labels): classes = set(labels) classes_dict = {c : np.identity(len(classes))[i, :] for i, c in enumerate(classes)} labels_oh = np.array(list(map(classes_dict.get, labels)), dtype=np.int32) return labels_oh # + colab={"base_uri": "https://localhost:8080/", "height": 84} colab_type="code" id="Otls5JeCmTBH" outputId="19235d96-9d77-49ae-d5a9-a416afd5adb2" encode_onehot(['apple', 'mango', 'banana', 'apple']) # + [markdown] colab_type="text" id="cD0y3irJ2ziT" # ## The Dataset (Cora) # # The Cora dataset consists of ML papers, classified into one of the following seven classes: # - Case_Based # - Genetic_Algorithms # - Neural_Networks # - Probabilistic_Methods # - Reinforcement_Learning # - Rule_Learning # - Theory # # The papers were selected in a way such that in the final corpus every paper cites or is cited by atleast one other paper. There are 2708 papers in the whole corpus. # # (This means the graph is a fully spanning graph with 2708 reachable nodes) # # After stemming and removing stopwords we were left with a vocabulary of size 1433 unique words. # # `cora.content` contains descriptions of the papers in the following format: # # <paper_id> <word_attributes> <class_label> # # The first entry in each line contains the unique string ID of the paper followed by binary values indicating whether each word in the vocabulary is present (indicated by 1) or absent (indicated by 0) in the paper. Finally, the last entry in the line contains the class label of the paper. # # `cora.cites` contains the citation graph of the corpus. Each line describes a link in the following format: # # <ID of cited paper> <ID of citing paper> # # Each line contains two paper IDs. The first entry is the ID of the paper being cited and the second ID stands for the paper which contains the citation. The direction of the link is from right to left. If a line is represented by "paper1 paper2" then the link is "paper2->paper1". # + colab={} colab_type="code" id="5MGxSEDOioU1" # normalize a matrix row-wise def normalize(mx): rowsum = np.array(mx.sum(1)) r_inv = np.power(rowsum, -1).flatten() r_inv[np.isinf(r_inv)] = 0. r_mat_inv = sp.diags(r_inv) mx = r_mat_inv.dot(mx) return mx # + colab={} colab_type="code" id="b1daJFw_rkF8" def dataloader(path='./', dataset='cora'): idxs, features, labels = load_nodes(path, dataset) idxs_map = {j : i for i, j in enumerate(idxs)} adj = load_adj(path, dataset, idxs_map, labels) features = normalize(features) adj = normalize(adj + sp.eye(adj.shape[0])) # add self loops idx_train, idx_val, idx_test = train_val_test_split(adj.shape[0]) features = torch.FloatTensor(np.array(features.todense())) labels = torch.LongTensor(np.where(labels)[1]) adj = sparse_mx_to_torch_sparse_tensor(adj) return adj, features, labels, idx_train, idx_val, idx_test # + colab={} colab_type="code" id="nGy_SLugrkM8" def load_nodes(path, dataset): nodes = np.genfromtxt(f'{path}{dataset}.content', dtype=np.dtype(str)) # index, features.., label features = sp.csr_matrix(nodes[:, 1:-1], dtype=np.float32) # one-hot dictionary words labels = encode_onehot(nodes[:, -1]) # the target to predict idxs = np.array(nodes[:, 0], dtype=np.int32) # the index of the papers (UID) return idxs, features, labels # + colab={} colab_type="code" id="yI7hUlUcrkSH" def load_adj(path, dataset, idxs_map, labels): edges_unordered = np.genfromtxt(f'{path}{dataset}.cites', dtype=np.int32) edges = np.array(list(map(idxs_map.get, edges_unordered.flatten())), dtype=np.int32) edges = edges.reshape(edges_unordered.shape) edges_t = (np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])) adj = sp.coo_matrix(edges_t, shape=(labels.shape[0], labels.shape[0]), dtype=np.float32) adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj) # undirected graph return adj # + colab={} colab_type="code" id="xnAv-GFNrkKC" def sparse_mx_to_torch_sparse_tensor(sparse_mx): sparse_mx = sparse_mx.tocoo().astype(np.float32) indices = torch.from_numpy(np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64)) values = torch.from_numpy(sparse_mx.data) shape = torch.Size(sparse_mx.shape) return torch.sparse.FloatTensor(indices, values, shape) # + colab={} colab_type="code" id="OQ6V5rOom1UL" def train_val_test_split(num_nodes, train=0.15, val=0.15): idx_train = np.random.choice(range(num_nodes), int(train * num_nodes), replace=False) idx_vt = list(set(range(num_nodes)) - set(idx_train)) idx_val = np.random.choice(idx_vt, int(val * num_nodes), replace=False) idx_test = list(set(idx_vt) - set(idx_val)) idx_train = torch.LongTensor(idx_train) idx_val = torch.LongTensor(idx_val) idx_test = torch.LongTensor(idx_test) return idx_train, idx_val, idx_test # + colab={} colab_type="code" id="VWwtAx_n1QZ5" def accuracy(output, labels): preds = output.max(1)[1].type_as(labels) correct = preds.eq(labels).double() correct = correct.sum() return correct / len(labels) # + colab={} colab_type="code" id="qS2WImyf1QeU" adj, features, labels, idx_train, idx_val, idx_test = dataloader() # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="s6mmOVexAd0Z" outputId="36c05121-da70-4bb5-eff3-bdcaee37771e" idx_train.shape, idx_val.shape, idx_test.shape # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="f3kvNBRCC4fh" outputId="ae61e90b-55ee-4223-ac69-7690117fb9a9" features.shape # + colab={} colab_type="code" id="L1_GUNxp9F8J" import math import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import matplotlib.pyplot as plt # %matplotlib inline # + colab={} colab_type="code" id="fmCLk5B7DDdq" class GraphConv(nn.Module): def __init__(self, in_features, out_features): super(GraphConv, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.Tensor(in_features, out_features)) self.bias = nn.Parameter(torch.Tensor(out_features)) self.reset_parameters() def reset_parameters(self): stdv = 1. / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.spmm(adj, support) # permutation inv sum of all neighbor features return output + self.bias def __repr__(self): return self.__class__.__name__ +' ('+str(self.in_features)+' -> '+str(self.out_features)+')' # + colab={} colab_type="code" id="3-i7AIU4D-Lf" class VanillaGCN(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(VanillaGCN, self).__init__() self.gc1 = GraphConv(nfeat, nhid) self.gc3 = GraphConv(nhid, nhid) self.gc2 = GraphConv(nhid, nclass) self.dropout = dropout def forward(self, x, adj): x = F.relu(self.gc1(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = F.relu(self.gc3(x, adj)) x = self.gc2(x, adj) return F.log_softmax(x, dim=1) # + colab={} colab_type="code" id="ah8cnedjDV5k" CUDA = torch.cuda.is_available() SEED = 42 torch.manual_seed(SEED) if CUDA: torch.cuda.manual_seed(SEED) np.random.seed(SEED) # + colab={} colab_type="code" id="wYTovIOKTsHY" lr = 0.01 # epochs = 200 epochs = 200 wd = 5e-4 hidden = 16 dropout = 0.5 fastmode = False # + colab={} colab_type="code" id="f_VgUNPOT_ad" model = VanillaGCN(nfeat=features.shape[1], nhid=hidden, nclass=labels.max().item() + 1, dropout=dropout) optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wd) # + colab={} colab_type="code" id="VRBuPNDMD5Ad" if CUDA: model.cuda() features = features.cuda() adj = adj.cuda() labels = labels.cuda() idx_train = idx_train.cuda() idx_val = idx_val.cuda() idx_test = idx_test.cuda() # + colab={} colab_type="code" id="5lRmy_Y6SVqQ" def train(epoch): t = time.time() model.train() optimizer.zero_grad() output = model(features, adj) loss_train = F.nll_loss(output[idx_train], labels[idx_train]) acc_train = accuracy(output[idx_train], labels[idx_train]) loss_train.backward() optimizer.step() if not fastmode: model.eval() output = model(features, adj) loss_val = F.nll_loss(output[idx_val], labels[idx_val]) acc_val = accuracy(output[idx_val], labels[idx_val]) if epoch % 10 == 0: print('Epoch: {:04d}'.format(epoch+1), 'loss_train: {:.4f}'.format(loss_train.item()), 'loss_val: {:.4f}'.format(loss_val.item()), 'acc_val: {:.4f}'.format(acc_val.item())) return loss_train.item(), loss_val.item() # + colab={} colab_type="code" id="7qEw8X6gSlet" def test(): model.eval() output = model(features, adj) loss_test = F.nll_loss(output[idx_test], labels[idx_test]) acc_test = accuracy(output[idx_test], labels[idx_test]) print("Test set results:", "loss= {:.4f}".format(loss_test.item()), "accuracy= {:.4f}".format(acc_test.item())) # + colab={"base_uri": "https://localhost:8080/", "height": 386} colab_type="code" id="1BjAiO1tSOSd" outputId="770edebf-7370-4f84-b811-b56727615abd" import time t_total = time.time() train_losses, val_losses = [], [] for epoch in range(epochs): loss_train, loss_val = train(epoch) train_losses.append(loss_train) val_losses.append(loss_val) print("Optimization Finished!") print("Total time elapsed: {:.4f}s".format(time.time() - t_total)) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="2aOHpfjmTyQb" outputId="aba266a9-c8f7-4e1b-9b48-88f7490afa14" test() # + colab={"base_uri": "https://localhost:8080/", "height": 296} colab_type="code" id="GLmJbYibYj_n" outputId="869dbe1c-6ab9-488d-fb9b-4d7580127b11" plt.plot(train_losses, label='Train Loss') plt.plot(val_losses, label='Val Loss') plt.grid() plt.xlabel('Epochs') plt.ylabel('NLLLoss') plt.legend() # + [markdown] colab_type="text" id="gilds_d0Ve82" # Sometime soon, we'll have week-13 of [this](https://www.youtube.com/watch?v=f01J0Dri-6k&list=PLLHTzKZzVU9eaEyErdV26ikyolxOsz6mq&index=24&t=0s) and that should be really good! # # Also, ICML 2020 had a workshop on [GRL+](https://slideslive.com/icml-2020/graph-representation-learning-and-beyond-grl), should definitely watch relevant presentations # + colab={} colab_type="code" id="JbIoYTSaXbue" # + colab={} colab_type="code" id="IbYum6i8VeRp" # + [markdown] colab_type="text" id="ys_LmAYSs7xb" # ### Experiment Journal: # # 1. Default hyperparams, default splits (140, 300, 1000) => 0.81 # 2. Default hyperparams, splits (200, 300, 1000) => 0.83 # 3. Default hyperparamas, random splits (0.1, 0.2, 0.7) => 0.8044 # 4. Default hyperparamas, random splits (0.5, 0.2, 0.3) => 0.8918 # 5. Default hyperparamas, random splits (0.5, 0.2, 0.3) without bias => 0.8512 # 5. 100 epochs, 1 hidden layer, random splits (0.15, 0.15, 0.7) without bias => 0.7342 # 5. 100 epochs, 2 hidden layer, random splits (0.15, 0.15, 0.7) without bias => 0.7453 # + colab={} colab_type="code" id="0Ok_6Jpgx8jU"
src/.ipynb_checkpoints/33 > Breaking Up Brass Tacks-checkpoint.ipynb