path
stringlengths
8
399
content_id
stringlengths
40
40
detected_licenses
list
license_type
stringclasses
2 values
repo_name
stringlengths
6
109
repo_url
stringlengths
25
128
star_events_count
int64
0
52.9k
fork_events_count
int64
0
7.07k
gha_license_id
stringclasses
9 values
gha_event_created_at
timestamp[us]
gha_updated_at
timestamp[us]
gha_language
stringclasses
28 values
language
stringclasses
1 value
is_generated
bool
1 class
is_vendor
bool
1 class
conversion_extension
stringclasses
17 values
size
int64
317
10.5M
script
stringlengths
245
9.7M
script_size
int64
245
9.7M
/outlier_using_zscore_and_std.ipynb
45bd86904554885add1e0491eb946e72602b3222
[]
no_license
syedawaisalishah/Django-ecomerce-project
https://github.com/syedawaisalishah/Django-ecomerce-project
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
57,753
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/syedawaisalishah/Django-ecomerce-project/blob/master/outlier_using_zscore_and_std.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="i3-XyhPitBRE" import pandas as pd import matplotlib.pyplot as plt import numpy as np from scipy.stats import norm # %matplotlib inline # + id="tICEBC1YuOHe" df=pd.read_csv('https://raw.githubusercontent.com/codebasics/py/master/ML/FeatureEngineering/2_outliers_z_score/heights.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="NKSySZRCurMy" outputId="b0e4fe53-3eb2-44f5-ef68-cbb1f0a641f1" df # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="2NRGD1cdurmD" outputId="ff2935b9-e261-49fb-b049-6ae2141b6ef2" df.describe() # + id="0IK4ZBrSuwYe" a=df.height.min() # + id="GP6QJkvpuVI5" b=df.height.max() # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="vUOnOAzKvBg7" outputId="2bef8f35-5363-4d1c-c047-8502046181cc" plt.hist(df.height,bins=20,rwidth=0.8,density=True) plt.xlabel('height(inches)') plt.ylabel('count') rng=np.arange(a,b,0.1) plt.plot(rng,norm.pdf(rng,df.height.mean(),df.height.std())) # + id="7K00NjoJxXS1" uper_limit=df.height.mean()+3*df.height.std() # + colab={"base_uri": "https://localhost:8080/"} id="LZfbSfND3fEo" outputId="8bbe9567-76e3-4b7c-d869-f3dfa1c81e0c" uper_limit # + colab={"base_uri": "https://localhost:8080/"} id="C0Fl_Dvp3grh" outputId="2afb8929-b011-4cd8-d1f9-e7ded4bc7b3d" lower_limit=df.height.mean()-3*df.height.std() lower_limit # + colab={"base_uri": "https://localhost:8080/", "height": 266} id="9WMfG94T3npH" outputId="154e733d-c162-416a-c426-02ab927773b3" outlier=df[(df.height<lower_limit)|(df.height>uper_limit)] outlier # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="zopbdSVJ4Eyi" outputId="87b084cf-1fb0-4570-b3c8-eef2acc832bc" df2=df[(df.height>lower_limit)&(df.height<uper_limit)] df2 # + id="ahDJalSu4elV" df3=df2.to_csv('outlier_remove',index=False) # + id="mWlsoWXl5e4e" df3 # + [markdown] id="RgdJQ9czDb5r" # # Remove outlier using z score # + [markdown] id="iN_4XlZUEDhy" # ![download (1).png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAU8AAACWCAMAAABpVfqTAAABQVBMVEX///8AAADT09P5+fmCgoKysrLLy8u6urp2dnYyn9htbW38///5+/xOq+Acm9j+/vuCwOQynNtstuHx8fFWrd0zntkamtj2jh/H4/ItLS3AwMD1hwDd7/elpaXs9fyYm5qVy+hYWFj3m0cAAAqs1OzPdxqLi4v2ggD+7eLd3d0iIiKaWBfR6fH4tHz0hwAAAAn3kCxAQECIThP3pFz69Oz2toP5sHH3fQDm5ub/jxxubm5hYWFQLA+vZhr84Mf559fwwJLvlDP1o178iSD3wp3vjRD3u4360LD54MX49OXwtHz0mED917j0olL4wo7wmDb6sX79t4760abytXH6qGkTExM6Ozo1JRiIVyGETCR0RRHciCy5dDDbhx6qay+ybCHZeh1pQhNHJARhORdBJxAYCwUqEwAnGA1JKg0jEAwzGggEGuelAAALZ0lEQVR4nO2cj1vavBaAW1CnhhE3lKqZv9AZ1BFQCso3BUEFFfHu23R3193r5tTN+f//AfckhWkRKWhalC+vz9OWUii8JOechIqmKRRPmoBfIQnh83OPQg7v+7jP/o72jm7C94IvlU9Z9CmfUlE+5aJ8ykX5lEtnfSLk9hmw5vopbHTUJ0bIdaPe6uxw+9zL5kx3z7AZO0DY3VPY8M4nSlAOI2nrJka7ebpKVzdbfoY8ZfAEq7l2zhqhxbZe5WPx0Ge6kkwWIltku9oFTUry2/s7uy33yEgmkUxWIu35JEVPu7yX/R3eFyqQvDVlgNEmS0AAhYZq5QykwUZtC/ENzHsqv7MqJEIOkXWLP6p6KH8qbK2QtcAizWEsQrPlUxzqSbf3NH5iVGK01h5RkSX4m9c0P8ql2SqJacjc36LsKIqwWUiXDIpRrkJpPlcVCm6EFL6b0Pwm+M8lY9uMacVCqUBJem+f0IqJtFyh4MeomNzUEPdpxiDUlL2Jo97mI3OLpGqNCu1S9skULSdLaCIWSSF/he6k8tTYQyY1KDFwlJB0jJJqD49YoUI8spJmdB9pUWoQkkBpxkiFMINWGCmA7lVqIugKaSTaJ6aRYoSwtuLEQ/HUJzpihlmrCRFOU5Y4xHDzXywJbSeAPhIaRabB0tynEcUoSfYh7nIvnAiLfIjFPiCUMJIIlbjnKMnkTazFGImimEFK2kdK9lCOMDhNuuoToShCJmNFL+Kop/EzS+kBj37RaC6X8yOtZDBW3tNyjOZEZyywIyhIi9DgTEK3uQSSNc0SKVuPjxgGYXQHEhkEDYwTdBt8kgO4J0YLGtqFBq3tMfhIcoSAz6OaT4gj2QPGYl68Ry994gT44jknvwrswoZ/k7ICOiAJEdpQAjq0Hx0ww28SsgveoQcDhFmPj5AUNs0QylFiBrBWga4cpSwK98S4uiyF/Gbyz4YfwH3Gqj4PCaUG+9Bd7ROjfdENwdtudjd7YAqH24zgAwJRAFV9YtBrYGifWU3LrRofS5ybfKSJZg7Pg1CFFXn7tPnUoElHNe4TQX+v+tymrBSKkW5rnzl4W9WCyCp1uMIsozhKSVYYO2IVxOWUkeUTGltWHGll5mp+xyZlWQgGPFze4xPisAbBt9rfy+xvpH2iXeYTl1mex83cHrZ6d7ZkamaFlZHfMMq8RoJ8BGJzBigwiVBcYGVoaKZVsf7xqeVZBcpXnrGhJQqfNI2gSkhgDB9BVDMJO9SykNd4DgOf5BMyEyztwZv00OcBJYTw8WKyWi8dQXonvB6F+oewyE4aYah5kgbNQzNbpeATQ6I2ChG6az1DmVopGjIPKcOhkLGilPIyKEYhkWXpjh9UQvzUkpTB4JQUEIrQv9EhNZIJQo66K37mUkVB6qP1vtBuuryTiEFmxyibThgRiJ14s7xV3oQxjrmfivK6ag/uyBesyhFtpw6wNTjKpROJJC/Q91Ip3j5LqRJCueIh5KPiPgRm81Misb+b+qihzUM4LGUkPpVS293l85+B8ikX5VMuyqdclE+5KJ9yUT7lonzKRfmUi/IpF+VTLsqnXJRPuSifclE+5aJ8ykX5lIvyKRflUy7Kp1yUT7kon3JRPuWifMrl2fpcW1hYWKtuw6bP3+RY/82xtzZd4fn61AHLzDhszTQ7NgQH9N9s9rj4sp6tT60XzAyKrUXYatY8b0vkm/Muvqpn6zOgzYAa/msIU7Aeb3qs8tkKXM2itRp0PlL5dGQC3Exo/U69XflslWWQ43Ps7cpnq4gcX9fb/e9r3OxzwycS/89355LSZ+1TpCL92NbbQ3oN+z7ZPv0BfiGwVv8/d8/bZ+hO89RCizXsh0n2GRh4NRAMD93Z/7x99ouG2Od02ON82vs0HhVwnwMjQ+61z9BaqI610OOftTl9vGKCDu/42uzjo9Z9Yiz+n8S263U4HH710m2fM3oDPgce/bzN8IPLnr4WBHGJ1fS00IZP/GaI88a+8/UAMOyqTyReZgN8j3teB3rgDCFRNC00PzBkpSf+6c63M35/Fx4ZeRUete98PQIih5G7Pn36v5eAjS9flr78Z2XJ4spdn7zynLJWn5sfKXxO8a2+uwmsCe/C4KyxT5fbp08/yWSMzNfZ/57r8dOMAWQyK+76HIPIyZvcoBgmNcOqofr7+nqsbjPuMJ6q0VmfiZ238W9XcX3DsHDZJ++54sULW01z301Nqi9POB59Q80nFlR3euXT2PoaPz3X9aWMo89An40HnZN382Vrc+pmszEiqVs6Q/5jWLWYJ6s+J+c4tZ/L8Kx9/m/29ErXTw3D0eet9mIfyLSBDz4IazrZzz+e8WZNjp/PF5rqn+cfnX+if6rFc1R9vgmHg8FwwGOfX2dP12/rbOLTb9PpWD464NzYQq0U/Q2o+QyCwqDHPte/nb49XTr9/vYP38/u9TlfQ3fMJTJo36f1C0Sd9BmfvYtjPppyrnVkwPuD05xeA/C7YKd8rk01Yt7hm0Qx3+ZQi0sBTtPbzvGT09PTQ0OjnWqfVrZtf3y03M5g5TG0G1UmB16NBMPTb275tPIRVE1e+fzxbWNj4/waFm/X419gde3ks1dCMmqN5TY/t0luK2jzGQyMvhweHh7yzKcYIC2twyKzMctr0DMHn6Jqqgtri2M2FiVNJPZNTLQVPxv6nIPR/MCwdz4T4HDlgldKK/HvLfgcvJn1+UN9wKi/3yMa+dTmggMjAy+9i5+XgB7XL3W+jMd1WDbz2ddo4NdbxwOysgyegM/4zxXgxyWfZbrST2C7efwMLHpSej6MJ+CTx89MZukEImdm4xiCaOakqc95p0F3I3oG/xqUS+Oh55PwaRg7maUzbnIjboDWpj4XHlR6LjYuyx7BXw3P8xR8xi9+XP2Ix6/Oz8/1+PX19cX99SeMuD/rDb93qH+7TS+bc4+n4PP3ydnJyfXlCXARP4flrybtk89CLjbY/2zyu6vfd1T7O4+fVv2pQ3/PrNzvUww0G81QjNfxsLnRRzMZDAPgMwh/o3wVDuO5MKzBJ7/xMjDyCtZDdy4QccfnLM9NTXy+1/98gfskCUxzRifF6k3gDQdP8uWkNsf3zSFx19ydR8rz+ZP39J+XZ6KnQ38/u7+/8wuK9YkXFr1tzVR4SYDPrGINBQAN81sBax8M4wNadThfhzyfl1fApX51tb7+S7/g2/fW82P2IPnYk9vxv5h/P3PDg+ZbAgPhYAuEp130eWJUx+/GzsYsdHrj/v5+7J7PUE9dRnuYz+Hhl60w54VPXn82j58Tdh578luM24qD5bFF6yvQtgm0yJ0HyvN5zS9hWP8llnG+vHCen5dNr6hZX6yFfD1WxvO3+E27NKTOhxxfxuOXv3//jsePLy8hfHrtU7TOaoNs5bomF5DZ37cMY2mdf7EJ403DYbzpBiFbVWtd3eQ1sudDznkU3dAd6k934AZvgjGfIfC+FHPj+46Nszhfrjef/5QOb55jNzf9D03uj6Kj38fJZcLWPIXPxrNHbiLJp7+xT0/j13v7Cdeec/t8Cizaurv2Qu/E/H/3+PTXTZf2ex1vBF3rk99sNMPqMt3jM6DbroWa70h37yKf4uqdPxdM+TrTPLvJ58StDu/zvFqr0kU+xZXcM/w705C4rrQj89Rd5LM6qBib+azfmhfxmG7yqS3cTPzPuPkjNk3oKp9QxA9ypcs9Xlyk25Au89lxlE+5KJ9yUT7lonzKRfmUi/IpF+VTLsqnXJRPuSifclE+5aJ8ykX5lIvyKRflUy7Kp1yUT7kon3JRPuWifMpF+ZSL8ikX5VMuyqdclE+5KJ9yUT7lonzKRfmUi/IpF+VTLsqnXJRPuSifclE+5aJ8ykX5lIvyKRflUy6Wz5kFn0IKE8LneP3vaiseitc/SKZQtMf/Aag3yBa7st1MAAAAAElFTkSuQmCC) # + id="W4ID03Y5Etfl" df=pd.read_csv('https://raw.githubusercontent.com/codebasics/py/master/ML/FeatureEngineering/2_outliers_z_score/heights.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="kkG9fgGqD5LM" outputId="f9f6f7b6-abe0-4fbf-e618-29e3bb98b794" df['zscore']=(df.height-df.height.mean())/df.height.std() df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="UCjVIFv8E4gv" outputId="f18d72be-be96-495f-fd95-0a6c8f6b8eb5" df[df.zscore>3] # + colab={"base_uri": "https://localhost:8080/", "height": 111} id="zqn-3aIYFeeX" outputId="a020403a-f437-47e7-ce90-90aa4bebf773" df[df.zscore<-3] # + colab={"base_uri": "https://localhost:8080/", "height": 266} id="iqFyr8CCG932" outputId="7a3f5bf9-5e49-4bcb-c812-27de7ab06888" df[(df.zscore<-3)|(df.zscore>3)] # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="2p1xZrFnQ0ar" outputId="46600d65-f458-40c2-e51a-38b51f98cc29" df[(df.zscore>-3)&(df.zscore<3)] # + id="C07GpaQbRINJ" gsCount) - Find 3 most popular tags and 3 most popular words in the train data. - 5 points # + [markdown] id="x2mT1f7gWKSu" colab_type="text" # Note: The words which appear the most are considered as popular in this case! # + id="q-eQoxipTF-v" colab_type="code" colab={} words_counts = {} for sentences in x_train: words = sentences.split() for word in words: if word in words_counts: words_counts[word] += 1 else: words_counts[word] = 1 # + id="eQLVOFUWcgrn" colab_type="code" colab={} sorted_words = sorted(words_counts.items(), key=lambda kv: kv[1], reverse=True) # + id="jmugyE95eWqe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0c3ebc3d-a19e-4ea1-bb56-74b9831d3f35" sorted_words[0:3] # + id="d4gT8ZUt4BTF" colab_type="code" colab={} obj = {} count = 0 for (k,v) in sorted_words[0:3]: obj[k] = count count+=1 # + id="-8DGtrcQ4o7B" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6ad13d8a-9c9a-4537-f267-bb64b28a228f" obj.keys() # + id="TEvvOT5bg1it" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="99d3ba64-a881-450d-db10-b547fa563ba7" type(y_train[0]) # + id="bSaezVjXy4rQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="307d9a86-7d9f-4c0c-9ac4-466058e25674" y_train.head() # + id="zW-in_-gyjKC" colab_type="code" colab={} # + id="OQ_AN148iOVB" colab_type="code" colab={} # + [markdown] id="xkwg8_WNTF-3" colab_type="text" # We are assuming that *tags_counts* and *words_counts* are dictionaries like `{'some_word_or_tag': frequency}`. After applying the sorting procedure, results will be look like this: `[('most_popular_word_or_tag', frequency), ('less_popular_word_or_tag', frequency), ...]`. # # eg: # Tag 1 - 100 Tag 2 - 65 Tag 3 - 250 <br> # after sorting looks like, <br> # Tag 3 - 250 Tag 1 - 100 Tag 2 - 65 # + id="RSCPa54uTF-4" colab_type="code" colab={} tags_counts = {} for tags in y_train: # y_train = literal_eval(y_train[:]) words = literal_eval(tags) for word in words: if word in tags_counts: tags_counts[word] += 1 else: tags_counts[word] = 1 # + id="O1n__k8tgDUh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="48fb5c88-b15a-4a08-9381-9acba43db104" sorted_tags = sorted(tags_counts.items(), key=lambda kv: kv[1], reverse=True) sorted_tags[0:3] # + [markdown] id="Xk7M4uJ_TF-8" colab_type="text" # ## Task - 3 Transforming text to a vector (10 points) # # Machine Learning algorithms work with numeric data and we cannot use the provided text data "as is". There are many ways to transform text data to numeric vectors. In this task you will try to use two of them. # # #### Bag of words # # One of the well-known approaches is a *bag-of-words* representation. To create this transformation, follow the steps: # 1. Find *N* most popular words in train corpus and numerate them. Now we have a dictionary of the most popular words. # 2. For each title in the corpora create a zero vector with the dimension equals to *N*. # 3. For each text in the corpora iterate over words which are in the dictionary and increase by 1 the corresponding coordinate. # # Let's try to do it for a toy example. Imagine that we have *N* = 4 and the list of the most popular words is # # ['hi', 'you', 'me', 'are'] # # Then we need to numerate them, for example, like this: # # {'hi': 0, 'you': 1, 'me': 2, 'are': 3} # # And we have the text, which we want to transform to the vector: # # 'hi how are you' # # For this text we create a corresponding zero vector # # [0, 0, 0, 0] # # And iterate over all words, and if the word is in the dictionary, we increase the value of the corresponding position in the vector: # # 'hi': [1, 0, 0, 0] # 'how': [1, 0, 0, 0] # word 'how' is not in our dictionary # 'are': [1, 0, 0, 1] # 'you': [1, 1, 0, 1] # # The resulting vector will be # # [1, 1, 0, 1] # # Implement the described encoding in the function *my_bag_of_words* with the size of the dictionary equals to 5000. To find the most common words use train data. You can test your code using the function *test_my_bag_of_words*. # + id="6ER1oq48TF-9" colab_type="code" colab={} DICT_SIZE = 5000 WORDS_TO_INDEX = {} count = 0 for (k,v) in sorted_words[0:5000]: WORDS_TO_INDEX[k] = count count+=1 count = 0 INDEX_TO_WORDS = {} for (k,v) in sorted_words[0:5000]: INDEX_TO_WORDS[count] = k count += 1 ####### YOUR CODE HERE ####### ALL_WORDS = WORDS_TO_INDEX.keys() def my_bag_of_words(text, words_to_index, dict_size): text = text.split(" ") result_vector = [0] * dict_size for word in text: if word in words_to_index: index = words_to_index[word] result_vector[index] = 1 return result_vector # + id="qnLYWnsaTF_A" colab_type="code" colab={} def test_my_bag_of_words(): words_to_index = {'hi': 0, 'you': 1, 'me': 2, 'are': 3} examples = ['hi how are you'] answers = [[1, 1, 0, 1]] for ex, ans in zip(examples, answers): if (my_bag_of_words(ex, words_to_index, 4) != ans): return "Wrong answer for the case: '%s'" % ex return 'Basic tests are passed.' # + [markdown] id="mOlSiUqEW3jD" colab_type="text" # Execute the test_text_prepare function <br> # *<u>Note:</u> You should pass the above test to ensure BOW is working correctly!* # + id="cqWhoF24TF_D" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c377559e-4b67-4c5a-d752-6faa4a306143" test_my_bag_of_words() # + [markdown] id="BXW3ALCITF_H" colab_type="text" # Now apply the implemented function to all samples (this might take up to a minute): # + id="m39xHB2yTF_H" colab_type="code" colab={} from scipy import sparse as sp_sparse # + id="6DZwPWB1TF_K" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="02729055-6b05-4317-e625-fdc4e492a97b" X_train_mybag = sp_sparse.vstack([sp_sparse.csr_matrix(my_bag_of_words(text, WORDS_TO_INDEX, DICT_SIZE)) for text in x_train]) X_val_mybag = sp_sparse.vstack([sp_sparse.csr_matrix(my_bag_of_words(text, WORDS_TO_INDEX, DICT_SIZE)) for text in x_validation]) X_test_mybag = sp_sparse.vstack([sp_sparse.csr_matrix(my_bag_of_words(text, WORDS_TO_INDEX, DICT_SIZE)) for text in x_test]) print('X_train shape ', X_train_mybag.shape) print('X_val shape ', X_val_mybag.shape) print('X_test shape ', X_test_mybag.shape) # + [markdown] id="WPXTUeNSTF_N" colab_type="text" # As you might notice, we transform the data to sparse representation, to store the useful information efficiently. There are many types: of such representations, however sklearn algorithms can work only with csr matrix, so we will use this one.<br> # <u>Documentations on sparse matrix:</u> <br> # (https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html#scipy.sparse.csr_matrix) # (https://docs.scipy.org/doc/scipy/reference/sparse.html) # # # + [markdown] id="i8AMzsVxTF_Q" colab_type="text" # # For the 11th row in *X_train_mybag* find how many non-zero elements it has. In this task the answer (variable *non_zero_elements_count*) should be a number, e.g. 20. # + id="bOetzSLzTF_S" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="4c72f24f-b6e9-4bdf-9251-e8b0793f80d5" X_train_mybag[10] # + [markdown] id="GYAFKGfKTF_V" colab_type="text" # ## Task 4 - TF-IDF (5 points) # # The second approach extends the bag-of-words framework by taking into account total frequencies of words in the corpora. It helps to penalize too frequent words and provide better features space. # # Implement function *tfidf_features* using class from *scikit-learn*. Use *train* corpus to train a vectorizer. Don't forget to take a look into the arguments that you can pass to it. We suggest that you filter out too rare words (occur less than in 5 titles) and too frequent words (occur more than in 90% of the titles). Also, use bigrams along with unigrams in your vocabulary. # + [markdown] id="1s3uEKmjYpuh" colab_type="text" # ## Write a function which takes x_train, x_val and x_test as input and return the tf-idf features of the same and the vocabulary # + id="lkz1RYsBTF_Z" colab_type="code" colab={} from sklearn.feature_extraction.text import TfidfVectorizer # + id="o6V7FPirTF_i" colab_type="code" colab={} def tfidf_features(X_train, X_val, X_test): vect = TfidfVectorizer(stop_words='english') X_train_tfidf = vect.fit_transform(X_train) X_val_tfidf = vect.transform(X_val) X_test_tfidf = vect.transform(X_test) features = vect.get_feature_names() return X_train_tfidf, X_val_tfidf, X_test_tfidf, vect.vocabulary_ # + id="ySnV2zCBTF_p" colab_type="code" colab={} X_train_tfidf, X_val_tfidf, X_test_tfidf, tfidf_vocab = tfidf_features(x_train, x_validation, x_test) tfidf_reversed_vocab = {i:word for word,i in tfidf_vocab.items()} # + id="TOem50gtAmXY" colab_type="code" colab={} # + [markdown] id="o_aopTTNL5G1" colab_type="text" # Print the index of string "C#" in the vocabulary # + id="kZ-3wqTTMCRi" colab_type="code" colab={} tag = "C#" for (k,v) in tfidf_reversed_vocab.items(): if (v == "C#"): print(k) # + [markdown] id="ZYYO2kdiTF_6" colab_type="text" # ## Task 5: Classification (15 points) # MultiLabel classifier # # As we have noticed before, in this task each example can have multiple tags. To deal with such kind of prediction, we need to transform labels in a binary form and the prediction will be a mask of 0s and 1s. For this purpose it is convenient to use MultiLabelBinarizer from sklearn. <br> # <u>Documentation:</u> <br> # http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MultiLabelBinarizer.html # + id="ojeCe_e_TF_7" colab_type="code" colab={} from sklearn.preprocessing import MultiLabelBinarizer # + id="XSFix5veBdNY" colab_type="code" colab={} # + id="p8JYQr8YTF_-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="82931a0c-3c95-4bac-f365-bea9a31c5bcf" mlb = MultiLabelBinarizer(classes=sorted(tags_counts.keys())) y_train = mlb.fit_transform(y_train) y_val = mlb.fit_transform(y_validation) # + id="uW6zGZvmUpYf" colab_type="code" colab={} # + [markdown] id="qN1NUA3DTGAB" colab_type="text" # In this task we suggest to use One-vs-Rest approach, which is implemented in [OneVsRestClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html) class. In this approach *k* classifiers (= number of tags) are trained. As a basic classifier, use [LogisticRegression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html). It is one of the simplest methods, but often it performs good enough in text classification tasks. It might take some time, because a number of classifiers to train is large. # # **OneVsRest multi-label strategy** # # The Multi-label algorithm accepts a binary mask over multiple labels. The result for each prediction will be an array of 0s and 1s marking which class labels apply to each row input sample. # # **Logistic Regression & SVM** # # OneVsRest strategy can be used for multi-label learning, where a classifier is used to predict multiple labels for instance. LR & SVM supports multi-class, but we are in a multi-label scenario, therefore, we wrap classifiers in the OneVsRestClassifier. # # *If you want to learn more about OneVsRest, check out these links:* # - *https://towardsdatascience.com/multi-label-text-classification-with-scikit-learn-30714b7819c5* # - *https://towardsdatascience.com/journey-to-the-center-of-multi-label-classification-384c40229bff* # - *https://medium.com/coinmonks/multi-label-classification-blog-tags-prediction-using-nlp-b0b5ee6686fc* # + id="2Pj6MDkbTGAC" colab_type="code" colab={} from sklearn.multiclass import OneVsRestClassifier from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.multioutput import ClassifierChain # + id="eUHZR2TWTGAF" colab_type="code" colab={} def train_classifier(X_train, y_train): # Create and fit LogisticRegression and LinearSVC wraped into OneVsRestClassifier. ###################################### ######### YOUR CODE HERE ############# ###################################### lr = OneVsRestClassifier(LogisticRegression()) lsvc = OneVsRestClassifier(LinearSVC()) clf = ClassifierChain(lr) clf = ClassifierChain(lsvc) clf.fit(X_train, y_train) return clf ### clf is the model # + [markdown] id="k660NfRFTGAL" colab_type="text" # Train the classifiers for different data transformations: *bag-of-words* and *tf-idf*. # classifier_mybag = model for # + id="jXyhN-yDTGAP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="8022ac25-d649-41c0-aad5-d953aabbc228" classifier_mybag = train_classifier(X_train_mybag, y_train) classifier_tfidf = train_classifier(X_train_tfidf, y_train) # + [markdown] id="ijQZCd9cTGAU" colab_type="text" # Now you can create predictions for the data. You will need two types of predictions: labels and scores. # + id="0QT7uWddTGAV" colab_type="code" colab={} y_val_predicted_labels_mybag = classifier_mybag.predict(X_val_mybag) y_val_predicted_scores_mybag = classifier_mybag.decision_function(X_val_mybag) y_val_predicted_labels_tfidf = classifier_tfidf.predict(X_val_tfidf) y_val_predicted_scores_tfidf = classifier_tfidf.decision_function(X_val_tfidf) # + id="FwwkhVJISTAm" colab_type="code" colab={} # + [markdown] id="p9n0TEtHTGAj" colab_type="text" # Now, we would need to compare the results of different predictions, e.g. to see whether TF-IDF transformation helps or to try different regularization techniques in logistic regression. For all these experiments, we need to setup evaluation procedure. # + [markdown] id="RVqAxDxqTGAk" colab_type="text" # ## Evaluation (10 points) # # To evaluate the results we will use several classification metrics: # - [Accuracy](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html) # - [F1-score](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html) # - [Area under ROC-curve](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html) # - [Area under precision-recall curve](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html#sklearn.metrics.average_precision_score) # # Make sure you are familiar with all of them. If you want a refresher, you can click the link to their documentation # + [markdown] id="_YmNhVsU9K9-" colab_type="text" # ## Import the necessary libraries for the above metrics # + id="tBvrkOUYTGAl" colab_type="code" colab={} from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, roc_curve, precision_score, multilabel_confusion_matrix # + [markdown] id="R643V4uvNbAh" colab_type="text" # # + [markdown] id="vNkYBl_wTGAp" colab_type="text" # Define the function *print_evaluation_scores* which takes y_val and predicted as input calculates and prints the following output: # - *accuracy* # - *F1-score - Average = 'weighted'* # - *Precision - Average = 'macro'* # + id="-4nevWnhTGAq" colab_type="code" colab={} def print_evaluation_scores(y_val, predicted): print("accuracy score", accuracy_score(y_val, predicted)) print("f1 score", f1_score(y_val, predicted, average='weighted')) print("precision", precision_score(y_val, predicted, average='macro') ) # multilabel_confusion_matrix(y_val, predicted) # + id="G92GIO4dTGAt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 275} outputId="24151823-8112-4124-b8cc-3eea47a215ce" print('Bag-of-words') print_evaluation_scores(y_val, y_val_predicted_labels_mybag) print('Tfidf') print_evaluation_scores(y_val, y_val_predicted_labels_tfidf) # + id="y5QdC0b3UXE2" colab_type="code" colab={} # + [markdown] id="kT7oso8ZTGAv" colab_type="text" # You might also want to plot some form of the [ROC curve](http://scikit-learn.org/stable/modules/model_evaluation.html#receiver-operating-characteristic-roc) for the case of multi-label classification. The input parameters for the roc curve are: # - true labels # - decision functions scores # - number of classes # + [markdown] id="x4hbW3SeBVxz" colab_type="text" # Import the roc_auc function from the metrics.py file provided # + id="G_9M0AI4TGAv" colab_type="code" colab={} import matplotlib.pyplot as plt import numpy as np from sklearn.metrics import roc_curve, auc from scipy import interp from itertools import cycle def roc_auc(y_test, y_score, n_classes): """Plots ROC curve for micro and macro averaging.""" # Compute ROC curve and ROC area for each class fpr = {} tpr = {} roc_auc = {} for i in range(n_classes): fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) # Compute micro-average ROC curve and ROC area fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel()) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) # Compute macro-average ROC curve and ROC area all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)])) mean_tpr = np.zeros_like(all_fpr) for i in range(n_classes): mean_tpr += interp(all_fpr, fpr[i], tpr[i]) mean_tpr /= n_classes fpr["macro"] = all_fpr tpr["macro"] = mean_tpr roc_auc["macro"] = auc(fpr["macro"], tpr["macro"]) # Plot all ROC curves plt.figure() plt.plot(fpr["micro"], tpr["micro"], label='micro-average ROC curve (area = {0:0.2f})'.format(roc_auc["micro"]), color='deeppink', linestyle=':', linewidth=4) plt.plot(fpr["macro"], tpr["macro"], label='macro-average ROC curve (area = {0:0.2f})'.format(roc_auc["macro"]), color='navy', linestyle=':', linewidth=4) colors = cycle(['aqua', 'darkorange', 'cornflowerblue']) for i, color in zip(range(0,3), colors): plt.plot(fpr[i], tpr[i], color=color, lw=2, label='ROC curve of class {0} (area = {1:0.2f})'.format(i, roc_auc[i])) plt.plot([0, 1], [0, 1], 'k--', lw=2) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Some extension of ROC to multi-class') plt.legend(loc="lower right") plt.show() # + id="sZaymN4uTGA0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 349} outputId="2b8ade3b-4776-43af-f6a8-ec57f9ec2dc6" roc_auc(y_val_predicted_labels_mybag, y_val_predicted_scores_mybag, 100) # + id="XwNpUEVLTGA2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 349} outputId="0374ef9e-198c-49be-b804-db33124de2a0" roc_auc(y_val_predicted_labels_tfidf, y_val_predicted_scores_tfidf, 100) # + [markdown] id="9S3Ddz7LTGA3" colab_type="text" # ## Task 4 (MultilabelClassification) - Optional # ** Once we have the evaluation set up, we suggest that you experiment a bit with training your classifiers. We will use *F1-score weighted* as an evaluation metric. Our recommendation: # - compare the quality of the bag-of-words and TF-IDF approaches and chose one of them. # - for the chosen one, try *L1* and *L2*-regularization techniques in Logistic Regression with different coefficients (e.g. C equal to 0.1, 1, 10, 100). # # # + id="Ers40KuOTGA4" colab_type="code" colab={} ###################################### ######### YOUR CODE HERE ############# ###################################### # + [markdown] id="mdqTO269TGA8" colab_type="text" # When you are happy with the quality, create predictions for *test* set. # + id="M51F10f9TGBB" colab_type="code" colab={}
27,848
/DDQN_with_gold_miner_env_RLComp2020.ipynb
23e125982701eb3a39c2e026aabbc197c27fa74c
[]
no_license
19521242bao/RLComp2020
https://github.com/19521242bao/RLComp2020
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
310,364
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="xHlNhHKuPrK6" colab_type="text" # # Import libraries # + [markdown] id="cg9PRQKRQj5m" colab_type="text" # ## Imported libraries for DDQN # + id="cLpeqT-NHsEg" colab_type="code" colab={} ''' Before run into source code, we recommend using provided Docker image Provided and document: TODO: INSERT LINK TO THE DOCUMENT HERE ''' # import os # Import TF and Keras # from keras.layers import Dense, Activation # from keras.models import Sequential, load_model, model_from_json from keras.optimizers import Adam # from keras import backend as K # import tensorflow as tf from random import random, randrange # import numpy as np # + [markdown] id="1tv9U3BtQweh" colab_type="text" # ## Default imported libraries for RLComp2020 # + id="SmwLTdS0JIsi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 90} outputId="e43db08d-49a4-4207-dc9a-0b9c4ab810de" import sys import numpy as np import pandas as pd import datetime import json from array import * import os import math # from random import randrange # import random from keras.models import Sequential from keras.models import model_from_json from keras.layers import Dense, Activation from keras import optimizers import tensorflow.compat.v1 as tf from tensorflow.compat.v1.keras import backend as K tf.disable_v2_behavior() # + [markdown] id="u2gkR5WCPvVA" colab_type="text" # # GAME_SOCKET_DUMMY # + [markdown] id="kfvqwjQb10iG" colab_type="text" # ## ObstacleInfo # + id="5ZadqPaIJKfc" colab_type="code" colab={} #Classes in GAME_SOCKET_DUMMY.py class ObstacleInfo: # initial energy for obstacles: Land (key = 0): -1, Forest(key = -1): 0 (random), Trap(key = -2): -10, Swamp (key = -3): -5 types = {0: -1, -1: 0, -2: -10, -3: -5} def __init__(self): self.type = 0 self.posx = 0 self.posy = 0 self.value = 0 class GoldInfo: def __init__(self): self.posx = 0 self.posy = 0 self.amount = 0 def loads(self, data): golds = [] for gd in data: g = GoldInfo() g.posx = gd["posx"] g.posy = gd["posy"] g.amount = gd["amount"] golds.append(g) return golds class PlayerInfo: STATUS_PLAYING = 0 STATUS_ELIMINATED_WENT_OUT_MAP = 1 STATUS_ELIMINATED_OUT_OF_ENERGY = 2 STATUS_ELIMINATED_INVALID_ACTION = 3 STATUS_STOP_EMPTY_GOLD = 4 STATUS_STOP_END_STEP = 5 def __init__(self, id): self.playerId = id self.score = 0 self.energy = 0 self.posx = 0 self.posy = 0 self.lastAction = -1 self.status = PlayerInfo.STATUS_PLAYING self.freeCount = 0 class GameInfo: def __init__(self): self.numberOfPlayers = 1 self.width = 0 self.height = 0 self.steps = 100 self.golds = [] self.obstacles = [] def loads(self, data): m = GameInfo() m.width = data["width"] m.height = data["height"] m.golds = GoldInfo().loads(data["golds"]) m.obstacles = data["obstacles"] m.numberOfPlayers = data["numberOfPlayers"] m.steps = data["steps"] return m class UserMatch: def __init__(self): self.playerId = 1 self.posx = 0 self.posy = 0 self.energy = 50 self.gameinfo = GameInfo() def to_json(self): return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) class StepState: def __init__(self): self.players = [] self.golds = [] self.changedObstacles = [] def to_json(self): return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) # + [markdown] id="_FDdbIIq14Rr" colab_type="text" # ## GameSocket # + id="EHPyXk0nJNZ5" colab_type="code" colab={} #Main class in GAME_SOCKET_DUMMY.py class GameSocket: bog_energy_chain = {-5: -20, -20: -40, -40: -100, -100: -100} def __init__(self): self.stepCount = 0 self.maxStep = 0 self.mapdir = "Maps" # where to load all pre-defined maps self.mapid = "" self.userMatch = UserMatch() self.user = PlayerInfo(1) self.stepState = StepState() self.maps = {} # key: map file name, value: file content self.map = [] # running map info: 0->Land, -1->Forest, -2->Trap, -3:Swamp, >0:Gold self.energyOnMap = [] # self.energyOnMap[x][y]: <0, amount of energy which player will consume if it move into (x,y) self.E = 50 self.resetFlag = True self.craftUsers = [] # players that craft at current step - for calculating amount of gold self.bots = [] self.craftMap = {} # cells that players craft at current step, key: x_y, value: number of players that craft at (x,y) def init_bots(self): self.bots = [Bot1(2), Bot2(3), Bot3(4)] # use bot1(id=2), bot2(id=3), bot3(id=4) for (bot) in self.bots: # at the beginning, all bots will have same position, energy as player bot.info.posx = self.user.posx bot.info.posy = self.user.posy bot.info.energy = self.user.energy bot.info.lastAction = -1 bot.info.status = PlayerInfo.STATUS_PLAYING bot.info.score = 0 self.stepState.players.append(bot.info) self.userMatch.gameinfo.numberOfPlayers = len(self.stepState.players) print("numberOfPlayers: ", self.userMatch.gameinfo.numberOfPlayers) def reset(self, requests): # load new game by given request: [map id (filename), posx, posy, initial energy] # load new map self.reset_map(requests[0]) self.userMatch.posx = int(requests[1]) self.userMatch.posy = int(requests[2]) self.userMatch.energy = int(requests[3]) self.userMatch.gameinfo.steps = int(requests[4]) self.maxStep = self.userMatch.gameinfo.steps # init data for players self.user.posx = self.userMatch.posx # in self.user.posy = self.userMatch.posy self.user.energy = self.userMatch.energy self.user.status = PlayerInfo.STATUS_PLAYING self.user.score = 0 self.stepState.players = [self.user] self.E = self.userMatch.energy self.resetFlag = True self.init_bots() self.stepCount = 0 def reset_map(self, id): # load map info self.mapId = id self.map = json.loads(self.maps[self.mapId]) self.userMatch = self.map_info(self.map) self.stepState.golds = self.userMatch.gameinfo.golds self.map = json.loads(self.maps[self.mapId]) self.energyOnMap = json.loads(self.maps[self.mapId]) for x in range(len(self.map)): for y in range(len(self.map[x])): if self.map[x][y] > 0: # gold self.energyOnMap[x][y] = -4 else: # obstacles self.energyOnMap[x][y] = ObstacleInfo.types[self.map[x][y]] def connect(self): # simulate player's connect request print("Connected to server.") for mapid in range(len(Maps)): filename = "map" + str(mapid) print("Found: " + filename) self.maps[filename] = str(Maps[mapid]) def map_info(self, map): # get map info # print(map) userMatch = UserMatch() userMatch.gameinfo.height = len(map) userMatch.gameinfo.width = len(map[0]) i = 0 while i < len(map): j = 0 while j < len(map[i]): if map[i][j] > 0: # gold g = GoldInfo() g.posx = j g.posy = i g.amount = map[i][j] userMatch.gameinfo.golds.append(g) else: # obstacles o = ObstacleInfo() o.posx = j o.posy = i o.type = -map[i][j] o.value = ObstacleInfo.types[map[i][j]] userMatch.gameinfo.obstacles.append(o) j += 1 i += 1 return userMatch def receive(self): # send data to player (simulate player's receive request) if self.resetFlag: # for the first time -> send game info self.resetFlag = False data = self.userMatch.to_json() for (bot) in self.bots: bot.new_game(data) # print(data) return data else: # send step state self.stepCount = self.stepCount + 1 if self.stepCount >= self.maxStep: for player in self.stepState.players: player.status = PlayerInfo.STATUS_STOP_END_STEP data = self.stepState.to_json() for (bot) in self.bots: # update bots' state bot.new_state(data) # print(data) return data def send(self, message): # receive message from player (simulate send request from player) if message.isnumeric(): # player send action self.resetFlag = False self.stepState.changedObstacles = [] action = int(message) # print("Action = ", action) self.user.lastAction = action self.craftUsers = [] self.step_action(self.user, action) for bot in self.bots: if bot.info.status == PlayerInfo.STATUS_PLAYING: action = bot.next_action() bot.info.lastAction = action # print("Bot Action: ", action) self.step_action(bot.info, action) self.action_5_craft() for c in self.stepState.changedObstacles: self.map[c["posy"]][c["posx"]] = -c["type"] self.energyOnMap[c["posy"]][c["posx"]] = c["value"] else: # reset game requests = message.split(",") print("Reset game: ", requests) self.reset(requests) def step_action(self, user, action): switcher = { 0: self.action_0_left, 1: self.action_1_right, 2: self.action_2_up, 3: self.action_3_down, 4: self.action_4_free, 5: self.action_5_craft_pre } func = switcher.get(action, self.invalidAction) func(user) def action_5_craft_pre(self, user): # collect players who craft at current step user.freeCount = 0 if self.map[user.posy][user.posx] <= 0: # craft at the non-gold cell user.energy -= 10 if user.energy <= 0: user.status = PlayerInfo.STATUS_ELIMINATED_OUT_OF_ENERGY user.lastAction = 6 #eliminated else: user.energy -= 5 if user.energy > 0: self.craftUsers.append(user) key = str(user.posx) + "_" + str(user.posy) if key in self.craftMap: count = self.craftMap[key] self.craftMap[key] = count + 1 else: self.craftMap[key] = 1 else: user.status = PlayerInfo.STATUS_ELIMINATED_OUT_OF_ENERGY user.lastAction = 6 #eliminated def action_0_left(self, user): # user go left user.freeCount = 0 user.posx = user.posx - 1 if user.posx < 0: user.status = PlayerInfo.STATUS_ELIMINATED_WENT_OUT_MAP user.lastAction = 6 #eliminated else: self.go_to_pos(user) def action_1_right(self, user): # user go right user.freeCount = 0 user.posx = user.posx + 1 if user.posx >= self.userMatch.gameinfo.width: user.status = PlayerInfo.STATUS_ELIMINATED_WENT_OUT_MAP user.lastAction = 6 #eliminated else: self.go_to_pos(user) def action_2_up(self, user): # user go up user.freeCount = 0 user.posy = user.posy - 1 if user.posy < 0: user.status = PlayerInfo.STATUS_ELIMINATED_WENT_OUT_MAP user.lastAction = 6 #eliminated else: self.go_to_pos(user) def action_3_down(self, user): # user go right user.freeCount = 0 user.posy = user.posy + 1 if user.posy >= self.userMatch.gameinfo.height: user.status = PlayerInfo.STATUS_ELIMINATED_WENT_OUT_MAP user.lastAction = 6 #eliminated else: self.go_to_pos(user) def action_4_free(self, user): # user free user.freeCount += 1 if user.freeCount == 1: user.energy += int(self.E / 4) elif user.freeCount == 2: user.energy += int(self.E / 3) elif user.freeCount == 3: user.energy += int(self.E / 2) else: user.energy = self.E if user.energy > self.E: user.energy = self.E def action_5_craft(self): craftCount = len(self.craftUsers) # print ("craftCount",craftCount) if (craftCount > 0): for user in self.craftUsers: x = user.posx y = user.posy key = str(user.posx) + "_" + str(user.posy) c = self.craftMap[key] m = min(math.ceil(self.map[y][x] / c), 50) user.score += m # print ("user", user.playerId, m) for user in self.craftUsers: x = user.posx y = user.posy key = str(user.posx) + "_" + str(user.posy) if key in self.craftMap: c = self.craftMap[key] del self.craftMap[key] m = min(math.ceil(self.map[y][x] / c), 50) self.map[y][x] -= m * c if self.map[y][x] < 0: self.map[y][x] = 0 self.energyOnMap[y][x] = ObstacleInfo.types[0] for g in self.stepState.golds: if g.posx == x and g.posy == y: g.amount = self.map[y][x] if g.amount == 0: self.stepState.golds.remove(g) self.add_changed_obstacle(x, y, 0, ObstacleInfo.types[0]) if len(self.stepState.golds) == 0: for player in self.stepState.players: player.status = PlayerInfo.STATUS_STOP_EMPTY_GOLD break; self.craftMap = {} def invalidAction(self, user): user.status = PlayerInfo.STATUS_ELIMINATED_INVALID_ACTION user.lastAction = 6 #eliminated def go_to_pos(self, user): # player move to cell(x,y) if self.map[user.posy][user.posx] == -1: user.energy -= randrange(16) + 5 elif self.map[user.posy][user.posx] == 0: user.energy += self.energyOnMap[user.posy][user.posx] elif self.map[user.posy][user.posx] == -2: user.energy += self.energyOnMap[user.posy][user.posx] self.add_changed_obstacle(user.posx, user.posy, 0, ObstacleInfo.types[0]) elif self.map[user.posy][user.posx] == -3: user.energy += self.energyOnMap[user.posy][user.posx] self.add_changed_obstacle(user.posx, user.posy, 3, self.bog_energy_chain[self.energyOnMap[user.posy][user.posx]]) else: user.energy -= 4 if user.energy <= 0: user.status = PlayerInfo.STATUS_ELIMINATED_OUT_OF_ENERGY user.lastAction = 6 #eliminated def add_changed_obstacle(self, x, y, t, v): added = False for o in self.stepState.changedObstacles: if o["posx"] == x and o["posy"] == y: added = True break if added == False: o = {} o["posx"] = x o["posy"] = y o["type"] = t o["value"] = v self.stepState.changedObstacles.append(o) def close(self): print("Close socket.") # + [markdown] id="RTf9bySDP0mR" colab_type="text" # # DEFAULT BOTS # + [markdown] id="uV6Fuqd5Q4RH" colab_type="text" # ## Bot1 # + id="p1zShuEtJN2y" colab_type="code" colab={} #Bots :bot1 class Bot1: ACTION_GO_LEFT = 0 ACTION_GO_RIGHT = 1 ACTION_GO_UP = 2 ACTION_GO_DOWN = 3 ACTION_FREE = 4 ACTION_CRAFT = 5 def __init__(self, id): self.state = State() self.info = PlayerInfo(id) def next_action(self): if self.state.mapInfo.gold_amount(self.info.posx, self.info.posy) > 0: if self.info.energy >= 6: return self.ACTION_CRAFT else: return self.ACTION_FREE if self.info.energy < 5: return self.ACTION_FREE else: action = self.ACTION_GO_UP if self.info.posy % 2 == 0: if self.info.posx < self.state.mapInfo.max_x: action = self.ACTION_GO_RIGHT else: if self.info.posx > 0: action = self.ACTION_GO_LEFT else: action = self.ACTION_GO_DOWN return action def new_game(self, data): try: self.state.init_state(data) except Exception as e: import traceback traceback.print_exc() def new_state(self, data): # action = self.next_action(); # self.socket.send(action) try: self.state.update_state(data) except Exception as e: import traceback traceback.print_exc() # + [markdown] id="sDJxsi68Q6Bt" colab_type="text" # ## Bot2 # + id="DGWC4byNJQqc" colab_type="code" colab={} #Bots :bot2 class Bot2: ACTION_GO_LEFT = 0 ACTION_GO_RIGHT = 1 ACTION_GO_UP = 2 ACTION_GO_DOWN = 3 ACTION_FREE = 4 ACTION_CRAFT = 5 def __init__(self, id): self.state = State() self.info = PlayerInfo(id) def next_action(self): if self.state.mapInfo.gold_amount(self.info.posx, self.info.posy) > 0: if self.info.energy >= 6: return self.ACTION_CRAFT else: return self.ACTION_FREE if self.info.energy < 5: return self.ACTION_FREE else: action = np.random.randint(0, 4) return action def new_game(self, data): try: self.state.init_state(data) except Exception as e: import traceback traceback.print_exc() def new_state(self, data): # action = self.next_action(); # self.socket.send(action) try: self.state.update_state(data) except Exception as e: import traceback traceback.print_exc() # + [markdown] id="jvTLZtrlQ7dF" colab_type="text" # ## Bot3 # + id="eGy-fBHcJQ_p" colab_type="code" colab={} #Bots :bot3 class Bot3: ACTION_GO_LEFT = 0 ACTION_GO_RIGHT = 1 ACTION_GO_UP = 2 ACTION_GO_DOWN = 3 ACTION_FREE = 4 ACTION_CRAFT = 5 def __init__(self, id): self.state = State() self.info = PlayerInfo(id) def next_action(self): if self.state.mapInfo.gold_amount(self.info.posx, self.info.posy) > 0: if self.info.energy >= 6: return self.ACTION_CRAFT else: return self.ACTION_FREE if self.info.energy < 5: return self.ACTION_FREE else: action = self.ACTION_GO_LEFT if self.info.posx % 2 == 0: if self.info.posy < self.state.mapInfo.max_y: action = self.ACTION_GO_DOWN else: if self.info.posy > 0: action = self.ACTION_GO_UP else: action = self.ACTION_GO_RIGHT return action def new_game(self, data): try: self.state.init_state(data) except Exception as e: import traceback traceback.print_exc() def new_state(self, data): # action = self.next_action(); # self.socket.send(action) try: self.state.update_state(data) except Exception as e: import traceback traceback.print_exc() # + [markdown] id="bIrKRmp6P3zx" colab_type="text" # # MinerState # + id="vMEak6KuJSSl" colab_type="code" colab={} #MinerState.py def str_2_json(str): return json.loads(str, encoding="utf-8") class MapInfo: def __init__(self): self.max_x = 0 #Width of the map self.max_y = 0 #Height of the map self.golds = [] #List of the golds in the map self.obstacles = [] self.numberOfPlayers = 0 self.maxStep = 0 #The maximum number of step is set for this map def init_map(self, gameInfo): #Initialize the map at the begining of each episode self.max_x = gameInfo["width"] - 1 self.max_y = gameInfo["height"] - 1 self.golds = gameInfo["golds"] self.obstacles = gameInfo["obstacles"] self.maxStep = gameInfo["steps"] self.numberOfPlayers = gameInfo["numberOfPlayers"] def update(self, golds, changedObstacles): #Update the map after every step self.golds = golds for cob in changedObstacles: newOb = True for ob in self.obstacles: if cob["posx"] == ob["posx"] and cob["posy"] == ob["posy"]: newOb = False #print("cell(", cob["posx"], ",", cob["posy"], ") change type from: ", ob["type"], " -> ", # cob["type"], " / value: ", ob["value"], " -> ", cob["value"]) ob["type"] = cob["type"] ob["value"] = cob["value"] break if newOb: self.obstacles.append(cob) #print("new obstacle: ", cob["posx"], ",", cob["posy"], ", type = ", cob["type"], ", value = ", # cob["value"]) def get_min_x(self): return min([cell["posx"] for cell in self.golds]) def get_max_x(self): return max([cell["posx"] for cell in self.golds]) def get_min_y(self): return min([cell["posy"] for cell in self.golds]) def get_max_y(self): return max([cell["posy"] for cell in self.golds]) def is_row_has_gold(self, y): return y in [cell["posy"] for cell in self.golds] def is_column_has_gold(self, x): return x in [cell["posx"] for cell in self.golds] def gold_amount(self, x, y): #Get the amount of golds at cell (x,y) for cell in self.golds: if x == cell["posx"] and y == cell["posy"]: return cell["amount"] return 0 def get_obstacle(self, x, y): # Get the kind of the obstacle at cell(x,y) for cell in self.obstacles: if x == cell["posx"] and y == cell["posy"]: return cell["type"] return -1 # No obstacle at the cell (x,y) class State: STATUS_PLAYING = 0 STATUS_ELIMINATED_WENT_OUT_MAP = 1 STATUS_ELIMINATED_OUT_OF_ENERGY = 2 STATUS_ELIMINATED_INVALID_ACTION = 3 STATUS_STOP_EMPTY_GOLD = 4 STATUS_STOP_END_STEP = 5 def __init__(self): self.end = False self.score = 0 self.lastAction = None self.id = 0 self.x = 0 self.y = 0 self.energy = 0 self.mapInfo = MapInfo() self.players = [] self.stepCount = 0 self.status = State.STATUS_PLAYING def init_state(self, data): #parse data from server into object game_info = str_2_json(data) self.end = False self.score = 0 self.lastAction = None self.id = game_info["playerId"] self.x = game_info["posx"] self.y = game_info["posy"] self.energy = game_info["energy"] self.mapInfo.init_map(game_info["gameinfo"]) self.stepCount = 0 self.status = State.STATUS_PLAYING self.players = [{"playerId": 2, "posx": self.x, "posy": self.y}, {"playerId": 3, "posx": self.x, "posy": self.y}, {"playerId": 4, "posx": self.x, "posy": self.y}] def update_state(self, data): new_state = str_2_json(data) for player in new_state["players"]: if player["playerId"] == self.id: self.x = player["posx"] self.y = player["posy"] self.energy = player["energy"] self.score = player["score"] self.lastAction = player["lastAction"] self.status = player["status"] self.mapInfo.update(new_state["golds"], new_state["changedObstacles"]) self.players = new_state["players"] for i in range(len(self.players), 4, 1): self.players.append({"playerId": i, "posx": self.x, "posy": self.y}) self.stepCount = self.stepCount + 1 # + [markdown] id="j4F6veYsQ_Wk" colab_type="text" # # Miner Environment # + id="5hOgGbfYJTzV" colab_type="code" colab={} #MinerEnv.py TreeID = 1 TrapID = 2 SwampID = 3 class MinerEnv: def __init__(self): self.socket = GameSocket() self.state = State() self.score_pre = self.state.score#Storing the last score for designing the reward function def start(self): #connect to server self.socket.connect() def end(self): #disconnect server self.socket.close() def send_map_info(self, request):#tell server which map to run self.socket.send(request) def reset(self): #start new game try: message = self.socket.receive() #receive game info from server self.state.init_state(message) #init state except Exception as e: import traceback traceback.print_exc() def step(self, action): #step process self.socket.send(action) #send action to server try: message = self.socket.receive() #receive new state from server self.state.update_state(message) #update to local state except Exception as e: import traceback traceback.print_exc() # Functions are customized by client def get_state(self): # Building the map view = np.zeros([self.state.mapInfo.max_x + 1, self.state.mapInfo.max_y + 1], dtype=int) for i in range(self.state.mapInfo.max_x + 1): for j in range(self.state.mapInfo.max_y + 1): if self.state.mapInfo.get_obstacle(i, j) == TreeID: # Tree view[i, j] = -TreeID if self.state.mapInfo.get_obstacle(i, j) == TrapID: # Trap view[i, j] = -TrapID if self.state.mapInfo.get_obstacle(i, j) == SwampID: # Swamp view[i, j] = -SwampID if self.state.mapInfo.gold_amount(i, j) > 0: view[i, j] = self.state.mapInfo.gold_amount(i, j) DQNState = view.flatten().tolist() #Flattening the map matrix to a vector # Add position and energy of agent to the DQNState DQNState.append(self.state.x) DQNState.append(self.state.y) DQNState.append(self.state.energy) #Add position of bots for player in self.state.players: if player["playerId"] != self.state.id: DQNState.append(player["posx"]) DQNState.append(player["posy"]) #Convert the DQNState from list to array for training DQNState = np.array(DQNState) return DQNState def get_reward(self): # Calculate reward reward = 0 score_action = self.state.score - self.score_pre self.score_pre = self.state.score if score_action > 0: #If the DQN agent crafts golds, then it should obtain a positive reward (equal score_action) reward += score_action #If the DQN agent crashs into obstacels (Tree, Trap, Swamp), then it should be punished by a negative reward if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == TreeID: # Tree reward -= TreeID if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == TrapID: # Trap reward -= TrapID if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == SwampID: # Swamp reward -= SwampID # If out of the map, then the DQN agent should be punished by a larger nagative reward. if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP: reward += -10 #Run out of energy, then the DQN agent should be punished by a larger nagative reward. if self.state.status == State.STATUS_ELIMINATED_OUT_OF_ENERGY: reward += -10 # print ("reward",reward) return reward def check_terminate(self): #Checking the status of the game #it indicates the game ends or is playing return self.state.status != State.STATUS_PLAYING # + [markdown] id="ctYzcOxpQJYJ" colab_type="text" # # MAPS # + id="2g_Ph1PTJX6e" colab_type="code" colab={} #Creating Maps #This function is used to create 05 maps instead of loading them from Maps folder in the local def CreateMaps(): map1 = [ [0, 0, -2, 100, 0, 0, -1, -1, -3, 0, 0, 0, -1, -1, 0, 0, -3, 0, -1, -1,0], [-1,-1, -2, 0, 0, 0, -3, -1, 0, -2, 0, 0, 0, -1, 0, -1, 0, -2, -1, 0,0], [0, 0, -1, 0, 0, 0, 0, -1, -1, -1, 0, 0, 100, 0, 0, 0, 0, 50, -2, 0,0], [0, 0, 0, 0, -2, 0, 0, 0, 0, 0, 0, 0, -1, 50, -2, 0, 0, -1, -1, 0,0], [-2, 0, 200, -2, -2, 300, 0, 0, -2, -2, 0, 0, -3, 0, -1, 0, 0, -3, -1, 0,0], [0, -1, 0, 0, 0, 0, 0, -3, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, -2, 0,0], [0, -1, -1, 0, 0, -1, -1, 0, 0, 700, -1, 0, 0, 0, -2, -1, -1, 0, 0, 0,100], [0, 0, 0, 500, 0, 0, -1, 0, -2, -2, -1, -1, 0, 0, -2, 0, -3, 0, 0, -1,0], [-1, -1, 0,-2 , 0, -1, -2, 0, 400, -2, -1, -1, 500, 0, -2, 0, -3, 100, 0, 0,0] ] map2 = [ [0, 0, -2, 0, 0, 0, -1, -1, -3, 0, 0, 0, -1, -1, 0, 0, -3, 0, -1, -1,0], [-1,-1, -2, 100, 0, 0, -3, -1, 0, -2, 100, 0, 0, -1, 0, -1, 0, -2, -1, 0,0], [0, 0, -1, 0, 0, 0, 0, -1, -1, -1, 0, 0, 0, 0, 0, 0, 50, 0, -2, 0,0], [0, 200, 0, 0, -2, 0, 0, 0, 0, 0, 0, 0, -1, 50, -2, 0, 0, -1, -1, 0,0], [-2, 0, 0, -2, -2, 0, 0, 0, -2, -2, 0, 0, -3, 0, -1, 0, 0, -3, -1, 0,0], [0, -1, 0, 0, 300, 0, 0, -3, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, -2, 0,0], [500, -1, -1, 0, 0, -1, -1, 0, 700, 0, -1, 0, 0, 0, -2, -1, -1, 0, 0, 0,0], [0, 0, 0, 0, 0, 0, -1, 0, -2, -2, -1, -1, 0, 0, -2, 0, -3, 100, 0, -1,0], [-1, -1, 0,-2 , 0, -1, -2, 400, 0, -2, -1, -1, 0, 500, -2, 0, -3, 0, 0, 100,0] ] map3= [ [0, 0, -2, 0, 0, 0, -1, -1, -3, 0, 100, 0, -1, -1, 0, 0, -3, 0, -1, -1,0], [-1,-1, -2, 0, 0, 0, -3, -1, 0, -2, 0, 0, 0, -1, 0, -1, 0, -2, -1, 0,0 ], [0, 0, -1, 0, 0, 0, 100, -1, -1, -1, 0, 0, 50, 0, 0, 0, 50, 0, -2, 0,0], [0, 200, 0, 0, -2, 0, 0, 0, 0, 0, 0, 0, -1, 0, -2, 0, 0, -1, -1, 0,0], [-2, 0, 0, -2, -2, 0, 0, 0, -2, -2, 0, 0, -3, 0, -1, 0, 0, -3, -1, 0,0], [0, -1, 0, 300, 0, 0, 0, -3, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, -2, 0,0], [0, -1, -1, 0, 0, -1, -1, 700, 0, 0, -1, 0, 0, 0, -2, -1, -1, 0, 0, 0,0], [0, 0, 0, 0, 0, 500, -1, 0, -2, -2, -1, -1, 0, 0, -2, 0, -3, 0, 700, -1,0], [-1, -1, 0,-2 , 0, -1, -2, 400, 0, -2, -1, -1, 0, 500, -2, 0, -3, 0, 0, 100,0] ] map4=[ [0, 0, -2, 0, 0, 0, -1, -1, -3, 0, 0, 0, -1, -1, 0, 0, -3, 0, -1, -1,0], [-1,-1, -2, 0, 0, 0, -3, -1, 0, -2, 0, 0, 100, -1, 0, -1, 0, -2, -1, 0,0], [0, 0, -1, 0, 100, 0, 0, -1, -1, -1, 0, 0, 0, 0, 50, 0, 50, 0, -2, 0,0], [0, 200, 0, 0, -2, 0, 0, 0, 0, 0, 0, 0, -1, 0, -2, 0, 0, -1, -1, 0,0], [-2, 0, 0, -2, -2, 0, 0, 0, -2, -2, 0, 0, -3, 0, -1, 0, 0, -3, -1, 0,0], [0, -1, 0, 0, 0, 0, 300, -3, 0, 700, -1, -1, 0, 0, 0, 0, 0, 0, -2, 0,0], [0, -1, -1, 0, 0, -1, -1, 0, 0, 0, -1, 0, 0, 0, -2, -1, -1, 0, 0, 100,0], [500, 0, 0, 0, 0, 0, -1, 0, -2, -2, -1, -1, 0, 0, -2, 0, -3, 0, 0, -1,0], [-1, -1, 0,-2 , 0, -1, -2, 400, 0, -2, -1, -1, 0, 500, -2, 0, -3, 0, 0, 100,0] ] map5=[ [0, 0, -2, 0, 100, 0, -1, -1, -3, 0, 0, 0, -1, -1, 0, 0, -3, 0, -1, -1,0], [-1,-1, -2, 0, 0, 0, -3, -1, 0, -2, 100, 0, 0, -1, 0, -1, 0, -2, -1, 0,0], [0, 0, -1, 0, 0, 0, 0, -1, -1, -1, 0, 0, 0, 0, 50, 0, 0, 0, -2, 0,0], [0, 200, 0, 0, -2, 0, 0, 0, 0, 0, 0, 0, -1, 0, -2, 0, 50, -1, -1, 0,0], [-2, 0, 0, -2, -2, 0, 0, 0, -2, -2, 0, 0, -3, 0, -1, 0, 0, -3, -1, 0,0], [0, -1, 0, 0, 300, 0, 0, -3, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, -2, 0,0], [500, -1, -1, 0, 0, -1, -1, 0, 0, 700, -1, 0, 0, 0, -2, -1, -1, 0, 0, 100,0], [0, 0, 0, 0, 0, 0, -1, 0, -2, -2, -1, -1, 0, 0, -2, 0, -3, 0, 0, -1,0], [-1, -1, 0,-2 , 0, -1, -2, 400, 0, -2, -1, -1, 0, 500, -2, 0, -3, 0, 0, 100,0] ] Maps = (map1,map2,map3,map4,map5) return Maps # + [markdown] id="MQ7iT6e3QSqx" colab_type="text" # # Replay Buffer # + id="Rh8XaOCtI0bo" colab_type="code" colab={} class ReplayBuffer: def __init__( self, max_size, # max size of buffer input_shape, n_actions, # The number of action discrete = False ): self.mem_size = max_size self.mem_counter = 0 # Contain index of last element self.discrete = discrete self.state_memory = np.zeros((self.mem_size, input_shape)) self.new_state_memory = np.zeros((self.mem_size, input_shape)) dtype = np.int8 if self.discrete else np.float32 # type setting for memory saving self.action_memory = np.zeros((self.mem_size, n_actions), dtype = dtype) self.reward_memory = np.zeros(self.mem_size) self.terminal_memory = np.zeros(self.mem_size, dtype = np.float32) def store_transition(self, state, action, reward, state_next, done): index = self.mem_counter % self.mem_size # TODO: not clear this line self.new_state_memory[index] = state_next if self.discrete: actions = np.zeros(self.action_memory.shape[1]) actions[action] = 1.0 self.action_memory[index] = actions else: self.action_memory[index] = action self.reward_memory[index] = reward self.terminal_memory[index] = 1 - int(done) # flag self.mem_counter = self.mem_counter + 1 def sample_buffer(self, batch_size): max_mem = min(self.mem_counter, self.mem_size) batch = np.random.choice(max_mem, batch_size) states = self.state_memory[batch] new_states = self.new_state_memory[batch] actions = self.action_memory[batch] terminal = self.terminal_memory[batch] rewards = self.reward_memory[batch] return states, actions, rewards, new_states, terminal # + [markdown] id="MBSRWihCQcGC" colab_type="text" # # Build DQN model # + id="tkaVfwYVI2Z0" colab_type="code" colab={} def build_dqn(learning_rate, n_actions, input_dims, fc1_dims, fc2_dims): u''' fc1_dims : fully connected layers dimension fc2_dims : fully connected layers dimension ''' model = Sequential([ Dense(fc1_dims, input_dim = input_dims), Activation('relu'), Dense(fc2_dims), Activation('relu'), Dense(n_actions), ]) model.compile(optimizer=Adam(lr=learning_rate), loss='mse') return model # model = Sequential([ # Dense(fc1_dims, input_shape(input_dims,)), # Activation('relu'), # Dense(fc2_dims), # Activation('relu'), # Dense(n_actions), # ]) # model.compile(optimizer=Adam(lr=learning_rate), loss='mse') # return model # + [markdown] id="3-O9j9r4QVFD" colab_type="text" # # Double Deep Q Network off-policy # + id="4182dfZ9I4xu" colab_type="code" colab={} # Double Deep Q Network off-policy class DDQNAgent: def __init__( self, input_dims, # input dimension n_actions, # the total numbere of actions batch_size, # batch size which taken from memory for experience replay alpha = 0.00025, # learning rate gamma = 0.99, # discount factor epsilon = 1, # exploration factor for epsilon greedy policy epsilon_decay = 0.9999, epsilon_min = 0.01, fname = 'ddqn_model.h5', mem_size = 100000, replace_target = 300, # replace target network with online network ): # print("Debug DDQNAgent, constructor called") self.n_actions = n_actions self.action_space = [i for i in range(self.n_actions)] self.batch_size = batch_size self.replace_target = replace_target self.model = fname self.memory = ReplayBuffer(mem_size, input_dims, n_actions, discrete=True) # Hyperparameters self.gamma = gamma self.epsilon = epsilon self.epsilon_decay = epsilon_decay self.epsilon_min = epsilon_min # Creating network for evaluation and target self.q_eval = build_dqn(alpha, n_actions, input_dims, 300, 300) self.q_target = build_dqn(alpha, n_actions, input_dims, 300, 300) def model_info(self): self.q_eval.summary() self.q_target.summary() def remember(self, state, action, reward, new_state, done): self.memory.store_transition(state, action, reward, new_state, done) # Action for each state with epsilon greedy policy def choose_act(self,state): state = state.reshape(1,len(state)) a_max = np.argmax(self.q_eval.predict(state)) if random() < self.epsilon: a_chosen = randrange(self.n_actions) else: a_chosen = a_max return a_chosen # def experience_replay(self): # TODO: NEED FURTHER UNDERSTANDING def learn(self): if self.memory.mem_counter > self.batch_size: state, action, reward, new_state, done = self.memory.sample_buffer( self.batch_size) action_values = np.array(self.action_space, dtype = np.int8) action_indices = np.dot(action, action_values) q_next = self.q_target.predict(new_state) q_eval = self.q_eval.predict(new_state) q_pred = self.q_eval.predict(state) max_actions = np.argmax(q_eval, axis = 1) q_target = q_pred batch_index = np.arange(self.batch_size, dtype = np.int32) q_target[batch_index, action_indices] = reward + self.gamma*q_next[batch_index, max_actions.astype(int)]*done _ = self.q_eval.fit(state, q_target, verbose = 0) # # Decay epsilon after each iteration # self.epsilon = self.epsilon*self.epsilon_decay if self.epsilon > self.epsilon_min else self.epsilon_min if self.memory.mem_counter % self.replace_target == 0: self.update_network_parameters() def update_epsilon(self): self.epsilon = self.epsilon*self.epsilon_decay self.epsilon = max(self.epsilon_min,self.epsilon) # TODO: FIXED # def update_network_parameters(self): # self.q_target.model.set_weights(self.q_eval.model.get_weights()) # this line, problem with model. def update_network_parameters(self): self.q_target.set_weights(self.q_eval.get_weights()) # Based on DQNModel from RLComp2020 source code def save_model(self, path, model_name): model_json = self.q_eval.to_json() with open(path + model_name + ".json", "w") as json_file: json_file.write(model_json) # serialize weights to HDF5 self.q_eval.save_weights(path + model_name + ".h5") print("Saved model to disk") # # DID NOT TEST YET # def load_model(self): # self.q_eval.load_model(self.model_file) # if self.epsilon <= self.epsilon_min: # self.update_network_parameters() # + [markdown] id="5ME5trqZRXeM" colab_type="text" # # DDQN with Lunar Lander v2 in Gym from OpenAI # + id="YLiG-jUKI8QR" colab_type="code" colab={} # MAIN TRAINING ALGORITHM FOR DDQN # import gym # # from gym import wrappers # import numpy as np # env = gym.make('LunarLander-v2') # # ddqn_agent = DDQNAgent(alpha = 0.0005, gamma = 0.99, epsilon = 1.0, # # input_dims=8, batch_size = 64, n_actions = 4) # ddqn_agent = DDQNAgent(input_dims = 8, n_actions = 4, batch_size = 64) # n_games = 500 # ddqn_scores = [] # eps_history = [] # # Print model info # ddqn_agent.model_info() # for i in range(n_games): # done = False # score = 0 # observation = env.reset() # while not done: # action = ddqn_agent.choose_act(observation) # observation_, reward, done, info = env.step(action) # score += reward # ddqn_agent.remember(observation, action, reward, observation_, done) # observation = observation_ # ddqn_agent.learn() # eps_history.append(ddqn_agent.epsilon) # ddqn_scores.append(score) # avg_score = np.mean(ddqn_scores[max(0, i-100):(i+1)]) # # print('episode ',i,'score %.2f ' % score, 'average score %.2f' % avg_score) # print('----------------------------------------') # print('episode ', i) # print('score %.2f' %score) # print('average score %.2f' %avg_score) # print('----------------------------------------') # # DEBUG # if i == 5: # ddqn_agent.save_model("/", "DDQNModel_" + "ep_" + str(i)) # # if i % 100 == 0 and i > 0: # # ddqn_agent.save_model() # + [markdown] id="DM7-Rf3MRE-c" colab_type="text" # # Main DDQN Training Algorithm # + [markdown] id="lw_cTRALTpS_" colab_type="text" # ## Sample main traninig algorithm from RLComp sample source code # + id="pH8W7l2PK5ar" colab_type="code" colab={} # # Initialize network and memory # # DQNAgent = DQN(INPUTNUM,ACTIONNUM) # # memory = Memory(MEMORY_SIZE) # # Initialize environment # Maps = CreateMaps()#Creating 05 maps # minerEnv = MinerEnv()#Creating a communication environment between the DQN model and the game environment # minerEnv.start() #Connect to the game # train = False #The variable is used to indicate that the replay starts, and the epsilon starts decrease. # #Training Process # #the main part of the deep-q learning agorithm # for episode_i in range(0,N_EPISODE): # try: # # Choosing a map in the list # mapID = np.random.randint(0, 5) #Choosing a map ID from 5 maps in Maps folder randomly # posID_x = np.random.randint(MAP_MAX_X) #Choosing a initial position of the DQN agent on X-axes randomly # posID_y = np.random.randint(MAP_MAX_Y) #Choosing a initial position of the DQN agent on Y-axes randomly # #Creating a request for initializing a map, initial position, the initial energy, and the maximum number of steps of the DQN agent # request = ("map" + str(mapID) + "," + str(posID_x) + "," + str(posID_y) + ",50,100") # #Send the request to the game environment # minerEnv.send_map_info(request) # # Getting the initial state # minerEnv.reset() #Initialize the game environment # s = minerEnv.get_state()#Get the state after reseting. # #This function (get_state()) is an example of creating a state for the DQN model # total_reward = 0 #The amount of rewards for the entire episode # terminate = False #The variable indicates that the episode ends # maxStep = minerEnv.state.mapInfo.maxStep #Get the maximum number of steps for each episode in training # #Start an episde for training # for step in range(0, maxStep): # action = DQNAgent.act(s) # Getting an action from the DQN model from the state (s) # minerEnv.step(str(action)) # Performing the action in order to obtain the new state # s_next = minerEnv.get_state() # Getting a new state # reward = minerEnv.get_reward() # Getting a reward # terminate = minerEnv.check_terminate() # Checking the end status of the episode # # Add this transition to the memory batch # memory.push(s, action, reward, terminate, s_next) # # Sample batch memory to train network # if (memory.length > INITIAL_REPLAY_SIZE): # #If there are INITIAL_REPLAY_SIZE experiences in the memory batch # #then start replaying # batch = memory.sample(BATCH_SIZE) #Get a BATCH_SIZE experiences for replaying # DQNAgent.replay(batch, BATCH_SIZE)#Do relaying # train = True #Indicate the training starts # total_reward = total_reward + reward #Plus the reward to the total rewad of the episode # s = s_next #Assign the next state for the next step. # #Saving data to file # '''save_data = np.hstack([episode_i+1,step+1,reward,total_reward,action, DQNAgent.epsilon, terminate]).reshape(1,7) # with open(filename, 'a') as f: # pd.DataFrame(save_data).to_csv(f,encoding='utf-8', index=False, header = False)''' # if terminate == True: # #If the episode ends, then go to the next episode # break # # Iteration to save the network architecture and weights # if (np.mod(episode_i + 1, SAVE_NETWORK) == 0 and train == True): # DQNAgent.target_train() # Replace the learning weights for target model with soft replacement # #Save the DQN model # now = datetime.datetime.now() #Get the latest datetime # DQNAgent.save_model( "DQNmodel_" + now.strftime("%Y%m%d-%H%M") + "_ep" + str(episode_i+1)) # #Print the training information after the episode # print('Episode %d ends. Number of steps is: %d. Accumlated Reward = %.2f. Epsilon = %.2f .Termination code: %d' % (episode_i+1, step+1, total_reward, DQNAgent.epsilon, terminate)) # #Decreasing the epsilon if the replay starts # if train == True: # DQNAgent.update_epsilon() # except Exception as e: # import traceback # traceback.print_exc() # #print("Finished.") # break # + [markdown] id="FHRJVDFwTx9u" colab_type="text" # # DDQN Gold Miner parameter settings # + id="aVttyVFAyDIG" colab_type="code" colab={} # Parameters for training a DQN model N_EPISODE = 100000 #The number of episodes for training MAX_STEP = 1000 #The number of steps for each episode BATCH_SIZE = 64 #The number of experiences for each replay MEMORY_SIZE = 1000000 #The size of the batch for storing experiences SAVE_NETWORK = 100 # After this number of episodes, the DQN model is saved for testing later. INITIAL_REPLAY_SIZE = 1000 #The number of experiences are stored in the memory batch before starting replaying INPUTNUM = 198 #The number of input values for the DQN model ACTIONNUM = 6 #The number of actions output from the DQN model MAP_MAX_X = 21 #Width of the Map MAP_MAX_Y = 9 #Height of the Map # + id="vt1iqw--Ib6S" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 123} outputId="d46ac72d-2230-49d2-f90d-5d412ef98943" # Initialize environment Maps = CreateMaps()#Creating 05 maps minerEnv = MinerEnv()#Creating a communication environment between the DQN model and the game environment minerEnv.start() #Connect to the game # train = False #The variable is used to indicate that the replay starts, and the epsilon starts decrease. # + id="XNT-LZ8OUCby" colab_type="code" colab={} ddqn_agent = DDQNAgent(input_dims = INPUTNUM, n_actions = ACTIONNUM, batch_size = BATCH_SIZE) n_games = 500 ddqn_scores = [] eps_history = [] # + id="11SPNruf9CGe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 656} outputId="77cb2fbd-c7b9-488d-f000-f94dc1202dc9" ddqn_agent.model_info() # + id="M6rjIBx3psQE" colab_type="code" colab={} # # Debug purpose # ddqn_agent.return_eps() # print(random()) # + [markdown] id="naoEojpeM__P" colab_type="text" # # DDQN Main Training Algorithm for Gold Miner Environment # + id="jCAFIR0qxAkZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 123} outputId="fb7d2c8e-abbc-4f7e-a022-661a89ad1bdd" # for episode_i in range(0,N_EPISODE): # try: # # Choosing a map in the list # mapID = np.random.randint(0, 5) #Choosing a map ID from 5 maps in Maps folder randomly # posID_x = np.random.randint(MAP_MAX_X) #Choosing a initial position of the DQN agent on X-axes randomly # posID_y = np.random.randint(MAP_MAX_Y) #Choosing a initial position of the DQN agent on Y-axes randomly # #Creating a request for initializing a map, initial position, the initial energy, and the maximum number of steps of the DQN agent # request = ("map" + str(mapID) + "," + str(posID_x) + "," + str(posID_y) + ",50,100") # #Send the request to the game environment (GAME_SOCKET_DUMMY.py) # minerEnv.send_map_info(request) # # Getting the initial state # minerEnv.reset() #Initialize the game environment # s = minerEnv.get_state()#Get the state after reseting. # #This function (get_state()) is an example of creating a state for the DQN model # maxStep = minerEnv.state.mapInfo.maxStep # Get the maximum number of steps for each episode in training # done = False # Flag indicates that the episode ends # score = 0 # Score for entire episode # # for debug purpose # list_action_per_eps = [] # while not done: # action = ddqn_agent.choose_act(s) # Getting action from the model # minerEnv.step(str(action)) # Performing the action in order to obtain new state # done = minerEnv.check_terminate() # Checking the end status of the episode # s_next = minerEnv.get_state() # Getting a new state # reward = minerEnv.get_reward() # Getting a reward # # list_action_per_eps.append(action) # # Store current state and perform learning for current state # ddqn_agent.remember(s,action, reward, s_next,done) # Push current state to memory for experience replay # ddqn_agent.learn() # Performing learn and push current state to the memory # score += reward # Total reward of an episode # s = s_next # Assign the next state for the next step # eps_history.append(ddqn_agent.epsilon) # ddqn_scores.append(score) # avg_score = np.mean(ddqn_scores[max(0, episode_i-100):(episode_i+1)]) # # print('episode ',i,'score %.2f ' % score, 'average score %.2f' % avg_score) # print('----------------------------------------') # print('episode ', episode_i) # print('score %.2f' %score) # print('epsilon %.2f' %ddqn_agent.epsilon) # print('average score %.2f' %avg_score) # print('list of action: ', list_action_per_eps) # print('----------------------------------------') # # Save DDQN model for every 100 iteration # if episode_i % SAVE_NETWORK == 0 and episode_i > 0: # ddqn_agent.save_model("/", "DDQNModel_" + "ep_" + str(episode_i)) # # # Save DDQN model for every 100 iteration # # if episode_i % 100 == 0: # # ddqn_agent.save_model("/", "DDQNModel_episode_" + str(i)) # except Exception as e: # import traceback # traceback.print_exc() # break # + [markdown] id="pv-RKOcphVfd" colab_type="text" # # [not done] DDQN main training with maxStep per episode implementation # + id="FbxKxR_8htym" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="4da6116c-929d-4550-f8af-8518cfc010d4" for episode_i in range(0,N_EPISODE): try: # Choosing a map in the list mapID = np.random.randint(0, 5) #Choosing a map ID from 5 maps in Maps folder randomly posID_x = np.random.randint(MAP_MAX_X) #Choosing a initial position of the DQN agent on X-axes randomly posID_y = np.random.randint(MAP_MAX_Y) #Choosing a initial position of the DQN agent on Y-axes randomly #Creating a request for initializing a map, initial position, the initial energy, and the maximum number of steps of the DQN agent request = ("map" + str(mapID) + "," + str(posID_x) + "," + str(posID_y) + ",50,100") #Send the request to the game environment (GAME_SOCKET_DUMMY.py) minerEnv.send_map_info(request) # Getting the initial state minerEnv.reset() #Initialize the game environment s = minerEnv.get_state()#Get the state after reseting. #This function (get_state()) is an example of creating a state for the DQN model maxStep = minerEnv.state.mapInfo.maxStep # Get the maximum number of steps for each episode in training done = False # Flag indicates that the episode ends score = 0 # Score for entire episode for step in range(0,maxStep): action = ddqn_agent.choose_act(s) # Getting action from the model minerEnv.step(str(action)) # Performing the action in order to obtain new state s_next = minerEnv.get_state() # Getting a new state reward = minerEnv.get_reward() # Getting a reward done = minerEnv.check_terminate() # Checking the end status of the episode # list_action_per_eps.append(action) # For debugging purpose # Store current state and perform learning for current state ddqn_agent.remember(s,action, reward, s_next,done) # Push current state to memory for experience replay ddqn_agent.learn() # Performing learn and push current state to the memory score += reward # Total reward of an episode s = s_next # Assign the next state for the next step if done == True: break # Update epsilon for every episode ddqn_agent.update_epsilon() # For output purpose eps_history.append(ddqn_agent.epsilon) ddqn_scores.append(score) avg_score = np.mean(ddqn_scores[max(0, episode_i-100):(episode_i+1)]) print('----------------------------------------') print('episode ', episode_i) print('score %.2f' %score) print('epsilon %.2f' %ddqn_agent.epsilon) print('average score %.2f' %avg_score) print('----------------------------------------') # Save DDQN model for every 100 iteration if episode_i % SAVE_NETWORK == 0 and episode_i > 0: ddqn_agent.save_model("/", "DDQNModel_" + "ep_" + str(episode_i)) except Exception as e: import traceback traceback.print_exc() break # + [markdown] id="AI1y6yPMAHSQ" colab_type="text" # # Plotting training process # + id="ZFgMpFOrXE4j" colab_type="code" colab={} import pandas as pd # + id="KPn-T8XeCL4A" colab_type="code" colab={} # eps_history # ddqn_scores # avg_score
56,623
/Notebooks/PDF_CDF_Normal.ipynb
368aaef5451836766d9351d29e8fc56c29a41bf4
[]
no_license
Andre-Williams22/DS-1.1-Data-Analysis
https://github.com/Andre-Williams22/DS-1.1-Data-Analysis
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
160,740
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # DFS, recursion class Solution(object): def permute(self, nums): """ :type nums: List[int] :rtype: List[List[int]] """ results = [] self.dfs(nums, [], results) return results def dfs(self, nums, res, results): # base case if len(nums) == 1: results.append(res + [nums[0]]) return for i in range(len(nums)): next_nums = nums[:i] + nums[i+1:] self.dfs(next_nums, res+[nums[i]], results) s = Solution() s.permute([1, 2, 3]) # - visualize this data, we'll use the [seaborn](https://seaborn.pydata.org/) library import seaborn as sns sns.distplot(df['Age'].dropna(), hist=True, kde=False, bins=16) # - Now let's plot the PDF of Age in Titanic import seaborn as sns sns.distplot(df['Age'].dropna(), hist=True, kde=True, bins=16) # ## Activity: In PDFs, where does the y axes numbers come from? # # For example, at Age 20, why is the y-value around 0.030? # Y IS THE PROBABILTY OF THE AGE BEING 20 122/714/5 # + import pandas as pd # custom histogram function # ls = input list # num_interval = number of intervals def custom_hist(ls, num_interval): hist_ls_dict = {} # minimum value in ls min_ls = np.min(ls) # maximum value in ls max_ls = np.max(ls) # Interval interval = np.ceil((max_ls - min_ls) / num_interval) for j in range(len(ls)): # Create how many elements in ls are at each interval j if j in hist_ls_dict: hist_ls_dict[j] += 1 else: hist_ls_dict[j]= 1 return pd.DataFrame([hist_ls_dict]).plot.hist(grid=True, bins=20, rwidth=0.9) #turns data into a pandas df print(custom_hist(df['Sex'].str.contains('Female'), df['Age'].value_counts())) # - hist_dict = custom_hist(df['Age'].dropna().values, 16) sum(hist_dict.values()) 122/714/4.97375 # ## Activity: What percent of passengers are younger than 40? # Find the number of passengers younger than 40 How_many_younger_40 = df[df['Age'] <= 40] # Find the percentage of passengers who are younger than 40 # Do this by dividing the number of passengers younger than 40 by the total number of passengers (with an age) pr_below_40 = len(How_many_younger_40)/len(df['Age'].dropna()) pr_below_40 # ## It is not easy to calculate this percentage from PDFs as we should compute the area # ## Cumulative Density Function (CDF) # # - In above example, we could not easily obtain the percentage from a PDF, although it is possible. # # - This is much easier if we use a CDF. A CDF calculates the probability that a random variable is less than a threshold value # # - Let's learn CDF by example: given an array of numbers (our random variable) and a threshold value as input: # # 1. Find the minimum value in the array # 1. Set the threshold to be the minimum value of the array # 1. For a given array of numbers and a given threshold, count all of the elements in the array that are less than the threshold, and divide that count by the length of the array # 1. Repeat step three, increasing the threshold by one, until you go through step three where threshold is equail to the maximum value in the array # # # + def count_lower(array, threshold): #min_array = min(array) #max_array = max(array) counter = 0 for i in array: if i < threshold: counter += 1 total = counter/len(array) def count_upper(array, step_size): thres_hold_range = range(min(array), max(array), step_size) for i in thres_hold_range: count_lower(array, thres_hold_range) # + ls_age = df['Age'].dropna().values def calculate_cdf(x, threshold): return np.sum(x <= threshold) # Create an array cdf_age where each value is the cdf of the age for each threshold cdf_age = [calculate_cdf(ls_age, r)/len(ls_age) for r in range(int(np.min(ls_age)), int(np.max(ls_age)))] print(cdf_age) # + import matplotlib.pyplot as plt plt.plot(range(int(np.min(ls_age)), int(np.max(ls_age))), cdf_age) # - # ## More about PDFs # # Below we show a Violin plot. Per [Wikipedia](https://en.wikipedia.org/wiki/Violin_plot), a violin plot is a method of plotting numeric data. It is similar to a box plot, with the addition of a rotated kernel density plot on each side. # # Violin plots are similar to box plots, except that they also show the **probability density (what a PDF shows!)** of the data at different values, usually smoothed by a kernel density estimator. sns.violinplot(x="Sex", y="Age", data=df) # ## Normal Distribution # # - It is possible that when we plot a histogram or PDF of an array, it has a Bell Shape # # - The name of this histogram is **Normal** # + import numpy as np import seaborn as sns # Generate 1000 samples with 60 as its mean and 10 as its std a = np.random.normal(60, 10, 1000) sns.distplot(a, hist=True, kde=True, bins=20) #plotting normal distribution # - sns.distplot(a, hist=True, kde=True, bins=20) # ## Activity: # # - The intrsuctor of DS graded last term's final exam. They are reporting that the mean was 60 (with scale of 100) with standard deviation of 10. # # _What is the probability that students got more than 70?_ # + from scipy.stats import norm print(norm.sf(70, loc=60, scale=10)) # or 1 - norm.cdf(70, loc=60, scale=10) # - # ## Normal Distribution Properties: # # When the data is Normally distributed: # # - 68% of the data is captured within one standard deviation from the mean. # - 95% of the data is captured within two standard deviations from the mean. # - 99.7% of the data is captured within three standard deviations from the mean. # <br><img src="http://www.oswego.edu/~srp/stats/images/normal_34.gif" /><br> # ## Activity: # # - Show that about 68% of the values are in the [50, 70] range # norm.cdf(70, loc=60, scale=10) - norm.cdf(50, loc=60, scale=10) # ## If we scale the Normal Distribution, the result is zero mean and unit std # # **Unit std** means you are converting your data from its original units to units of standard deviation b = (a - 60)/10 sns.distplot(b, hist=True, kde=True, bins=20) b = np.random.normal(60, 5, 1000) sns.distplot(b, hist=True, kde=True, bins=20) sns.distplot(a, hist=True, kde=True, bins=20) # ### Subract All elements in a from its mean and then divide all the elements by its std. # + new_lst = [] for i in a: num = (np.mean(a) - i) / np.std(a) new_lst.append(num) sns.distplot(new_lst, hist=True, kde=True, bins=20) # mean should be 0 # std is 1 # - # ## Z-Distribution # # - Z-distribution is another name for _standard Normal distribution_ # # - When the samples of our numerical array are Normal with an arbitrary mean and std # # - If scale each element by subtracting elements from the mean, and divide over the std, then the new array would be a Normal distribution with zero mean, and std 1 # - The Z-Distribution is used for hypothesis testing
7,231
/notebooks/williamgao666/visualization-of-wine-attributes.ipynb
b02b702f1f02d934ed0fbf7d95849bb0cb22ca1f
[]
no_license
Sayem-Mohammad-Imtiaz/kaggle-notebooks
https://github.com/Sayem-Mohammad-Imtiaz/kaggle-notebooks
5
6
null
null
null
null
Jupyter Notebook
false
false
.py
6,545
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline from sklearn.svm import SVC from yellowbrick.features import Rank2D #3D plot from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot; import cufflinks as cf; init_notebook_mode(connected = True); cf.go_offline() import plotly.graph_objs as go #sklearn from sklearn.preprocessing import normalize from sklearn.model_selection import train_test_split # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" #Loading dataset wine = pd.read_csv('../input/winequality-red.csv');wine.head() # + _uuid="9310b97fd021326ba8c7a471283b3f91276bfea4" wine.info() # + _uuid="d98e2f7e01929db689bdc3ee57467040f6446d3c" wine.describe() # + _uuid="97d1b328c146f83e66efdc0ff2ec5d2ca551265e" def grading(row): if row['quality'] > 6: return 1 else: return 0 #1 = worth buying, 0 = not worth buying wine['grade'] = wine.apply(grading, axis=1) wine.head() # + _uuid="30f660bd5896ca14ca777abdda856ef1f874faf9" g = sns.FacetGrid(wine, col="grade", hue="quality") g = (g.map(plt.scatter, "alcohol", "quality", edgecolor="w").add_legend()) # + _uuid="4d55eb45eb1f7351e8de0c4e251f1ac56ec86241" plt.subplots(figsize=(20,15)) ax = plt.axes() ax.set_title("Wine Grade Heatmap") corr = wine.corr() #heat map only take 2d array #insert mask # mask = np.zeros_like(corr) # mask[np.triu_indices_from(mask)] = True with sns.axes_style("white"): sns.heatmap(corr, mask=None, xticklabels=corr.columns.values, yticklabels=corr.columns.values) #volatile, acidity, alcholhol # + _uuid="386901c68e2f160f9ab3601610c2b5f16eecede3" sns.pairplot(wine) # + _uuid="0dd6cf4625e9b4fc2018e15426b8c842f606b344" trace1 = go.Scatter3d( x=wine["volatile acidity"], y=wine["alcohol"], z=wine["quality"], mode='markers', text=wine["total sulfur dioxide"], marker=dict( size=12, color=wine["pH"], # set color to an array/list of desired values colorscale='Viridis', # choose a colorscale opacity=0.8 ) ) data = [trace1] layout = go.Layout( margin=dict( l=0, r=0, b=0, t=0 ), title="Wine Quality", scene = dict( xaxis = dict(title='X: volatile acidity'), yaxis = dict(title="Y: alcohol"), zaxis = dict(title="Z: quality"), ), width=1000, height=900, ) fig = go.Figure(data=data, layout=layout) iplot(fig) # + _uuid="a90e4f7c3d5f15f2d94f477ef66bb8a4cf34352b" modified_df = wine[["grade", "alcohol", "volatile acidity"]]; modified_df.head() # + _uuid="7704cce97a06ad056fd6ed1a7c9c4549f62506d3" y = modified_df.grade X = modified_df[["grade", "alcohol", "volatile acidity"]] classes = ["good","bad"] # Create the train and test data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) # + _uuid="3b7d72be1658d637e56b12da3ad706b5de9cecc9" from yellowbrick.classifier import ROCAUC from sklearn.linear_model import LogisticRegression # Instantiate the visualizer with the classification model visualizer = ROCAUC(LogisticRegression(), classes=classes) visualizer.fit(X_train, y_train) # Fit the training data to the visualizer visualizer.score(X_test, y_test) # Evaluate the model on the test data # + _uuid="914519707bb84d2888cb8c00d9dd153cbe8647b5" # + _uuid="b75b8529ff85c37c87345ec4241c467c9522c633" g = visualizer.poof() # + _uuid="f4d2bae69b1cbe414b937ee73c7160c3532a4bea" # + _uuid="5ad0169edc806a2e9d62683e624b531acc8e8d03" # + _uuid="e23c3a018f0dd32abe8e06eb99b0d23c51bb66f1" # + _uuid="c2c612bf378a1c5e62a2dbb595ce00e6e55c4239" # + _uuid="c501c7f8619218056101c2ffb881690a6fa4e106" # + _uuid="cc960cc49b85f837cafd1270d0edcddf2cb42fdd" # + _uuid="e9edd4b6d2d1a9f6eac51399b943181709f743e8"
4,315
/OpenCv/3 Resimler Üzerinde Pixel İşlemleri ve Performans.ipynb
06621872cf9dc0b1b75bf2fdf78d29324ee4a0bc
[]
no_license
emrahtema/emozade_tasimacilik
https://github.com/emrahtema/emozade_tasimacilik
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
58,792
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import cv2 as cv import numpy as np import matplotlib as mp from matplotlib import pyplot as plt #https://docs.opencv.org/4.0.1/d7/d16/tutorial_py_table_of_contents_core.html # - # # 1 Pixellere Erişmek ve Oynamak # + img = cv.imread('sabertooth.jpg') px = img[100,100] print(px) # Sadece mavi pixele erişmek. Blue 0, Red 2, Green 1 blue = img[100,100,0] print(blue) img.itemset((100,100,2),100)# kırmızı renk değerini değiştirelim print(img.item(100,100,2)) # Pixel değerlerini de aynı şekilde değiştirebiliyoruz img[100,100] = [255,255,255] print("----") # Resmin özelliklerine erişebiliriz print(img.shape) print(img.size) print(img.dtype) print("----") # Resimdeki bir takım pixelleri başka yere taşıyabiliriz secilen_alan = img[300:500, 300:400] img[100:300, 100:200] = secilen_alan #plt.subplot(121); plt.imshow(img) # Mavi kanalları sıfır yapalım img[:,:,0] = 0 plt.subplot(121); plt.imshow(img) #------------------------------ # 2'de Resim birleştirme, birbirine ekleme, belirli bir rengi silip saydam yapma gibi özellikler var. # 3'te OpenCV işlemlerinde performansla ilgili şeyler var. # -
1,382
/fig_ipynb/chap04/.ipynb_checkpoints/fig4.12right2-checkpoint.ipynb
b14b04fe0a2b1fdb35aaaae99721df66b12c0986
[]
no_license
tokadome/textbookDSP
https://github.com/tokadome/textbookDSP
11
1
null
null
null
null
Jupyter Notebook
false
false
.py
2,294
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python [Root] # name: Python [Root] # --- # + ein.hycell=false ein.tags="worksheet-0" slideshow={"slide_type": "-"} # #%matplotlib osx # %matplotlib inline import matplotlib.pyplot as plt import matplotlib.lines as ln import numpy as np plt.rcParams['font.family'] = 'Times New Roman' plt.rcParams['mathtext.fontset'] = 'cm' step = 0.01 fig = plt.figure(dpi=300) #fig = plt.figure() fig.patch.set_alpha(0.) #(a) ax =fig.add_subplot(111) omega = 2 theta = 0.4 t = np.arange(-5, 5, step) x = (1/2)*(np.cos(5*omega*(t-theta))) plt.xlim([-5, 5]) plt.ylim([-1.2, 1.2]) plt.axhline(0, color='k', linewidth=2) ax.xaxis.set_label_coords(1.02, 0.52) ax.yaxis.set_label_coords(0.05,1.02) plt.xlabel('$t$', fontsize=18) plt.ylabel('$x(t)$', fontsize=18, rotation=0) #plt.xticks([0,1,2,3,4,5,6]) plt.yticks([-1, 0, 1]) plt.plot(t, x, color='k') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=1.0) #plt.savefig('./figures/fig4.12right2.eps') plt.savefig('./figures/fig4.12right2.png') plt.show() # + ein.hycell=false ein.tags="worksheet-0" slideshow={"slide_type": "-"} plt.close()
1,330
/RecommendReview_Analysis.ipynb
04cd0ba4654be30b652f2c5a594e1bde6d2810d2
[]
no_license
lauralesmana/Analyzing-Fall-Guys-Review
https://github.com/lauralesmana/Analyzing-Fall-Guys-Review
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
62,215
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #notes, links, etc. #FRED data: #dataset: https://fred.stlouisfed.org/series/CCUSMA02CZM618N# #link structure API: https://api.stlouisfed.org/fred/series?series_id=GNPCA&api_key=abcdefghijklmnopqrstuvwxyz123456&file_type=json key = '290c57887172f5dd570f2f3e19b006dd' series_ID = 'CCUSMA02CZM618N' #COVID-Data covid_url = 'https://covid.ourworldindata.org/data/owid-covid-data.csv' # - #import packages import requests import pandas as pd import matplotlib.pyplot as plt # + #Requests class: initialized with url to get data from and request type (kind) class Req: def __init__(self, url, kind, data = None): self.url = url self.kind = kind #class method to request data from url depending on type of requests (API, Direct Download, ...) #note API request yet specifically written for FRED request -> should be more general def request_data(self, index): if self.kind == 'DirectDl': self.data = pd.read_csv(self.url).set_index(index) elif self.kind == 'API': self.data = pd.DataFrame(requests.get(self.url).json()['observations']).set_index([index]).drop(['realtime_start', 'realtime_end'], axis=1) else: print('Request-type not eglible.') pass # - #create instances of Req for FRED and OWID request FRED_Req = Req('https://api.stlouisfed.org/fred/series/observations?series_id=CCUSMA02CZM618N&api_key=290c57887172f5dd570f2f3e19b006dd&file_type=json', 'API') OWID_Req = Req('https://covid.ourworldindata.org/data/owid-covid-data.csv', 'DirectDl') # + #request data from FRED + OWID, save as df + covid_df OWID_Req.request_data('date') FRED_Req.request_data('date') czk_df = FRED_Req.data covid_df = OWID_Req.data # - #make df from FRED data, convert index to datetime czk_df.index = pd.to_datetime(czk_df.index) czk_df = df.loc['2020-03-01':,:] czk_df.columns = ['CZK/USD'] czk_df['CZK/USD'] = czk_df['CZK/USD'].astype(float) #df #read COVID CSV, cut data on Czech Republic, drop empty columns and rows, convert index to datetime covid_df = covid_df.where(covid_df['location'] == 'Czechia').dropna(how='all') covid_df = covid_df.dropna(axis=1, how='all') covid_df.index = pd.to_datetime(covid_df.index) #covid_df #problem: FRED data monthly, OWID data daily -> need to convert OWID data #add up cases of a month, save in a list list_monthly_cases = [] list_monthly_cases_index = [] monthly_cases = covid_df.iloc[0]['new_cases'] for i in range(len(covid_df)-1): if covid_df.index[i].month == covid_df.index[i+1].month: monthly_cases = monthly_cases + covid_df.iloc[i+1]['new_cases'] else: list_monthly_cases.append(monthly_cases) list_monthly_cases_index.append(covid_df.index[i+1]) monthly_cases = covid_df.iloc[i+1]['new_cases'] #control if idea works -> it does print(list_monthly_cases) print(covid_df.loc['2020-04-30']['total_cases']-covid_df.loc['2020-03-31']['total_cases']) covid_df.loc['2020-03-31']['total_cases'] #safe list of monthly cases as df, concat with df on CZK/USD data monthly_cases_summed = pd.DataFrame(data = list_monthly_cases, index = list_monthly_cases_index, columns = ['Summed Monthly Cases']) result = pd.concat([czk_df,monthly_cases_summed], axis = 1) result # + #functions to plot data #get x axes def get_x_axes(data): x = data.index return x #get y axes def get_y_axes(data, column): y = data[f'{column}'] return y #plot data def plot_data(data, y, xticks, ylabel): x = get_x_axes(data) plt.plot(x, get_y_axes(data, y))# Plot some data on the axes. plt.xticks(x[::xticks]) plt.xlabel('Date') # Add an x-label to the axes. plt.ylabel(f'{ylabel}') # Add a y-label to the axes. plt.title(f'Plotted data from {x[0]} until {x[-1]}') # Add a title to the axes. #plt.legend() # Add a legend. # - plot_data(result, 'Summed Monthly Cases', 3, 'Summed Monthly Cases') plot_data(result, 'CZK/USD', 3, 'CZK/USD') , 'Composure', 'Crossing', 'Dribbling', 'FKAccuracy', 'Finishing', 'GKDiving', 'GKHandling', 'GKKicking', 'GKPositioning', 'GKReflexes', 'HeadingAccuracy', 'Interceptions', 'Jumping', 'LongPassing', 'LongShots', 'Marking', 'Penalties' ) for i, val in df.groupby(df['Position'])[player_features].mean().iterrows(): print('Position {}: {}, {}, {}, {}'.format(i, *tuple(val.nlargest(4).index))) # + # Foot preference of players df['Preferred Foot'] # - left = df.loc[df['Preferred Foot'] == 'Left'].count()[0] # 0 is the starting index left right = df.loc[df['Preferred Foot'] == 'Right'].count()[0] right foot = df.groupby("Preferred Foot").size().reset_index(name='NumberOfPlayers') foot # + # plot pie chart for preferred foot plt.figure(figsize=(16,8)) plt.title('Preferred Foot', fontsize=20, fontweight='bold') labels = 'Left Foot', 'Right Foot' explode=(0, 0.05) plt.rcParams['font.size'] = 20.0 plt.pie(foot["NumberOfPlayers"], explode=explode, labels=labels, autopct='%0.1f%%', startangle=90) plt.axis('equal') plt.show() # + # Weight of Professional Soccer Players (lbs) df.Weight # - df.Weight=[x.strip('lbs') if type(x)==str else x for x in df.Weight] # list comprehension df.Weight # take first value df.Weight[0] # we are getting output in str so convert it to integer df.Weight=[int(x.strip('lbs')) if type(x)==str else x for x in df.Weight] df.Weight[0] # find out light weight light = df.loc[df.Weight<125].count()[0] light # find out light medium weight light_medium = df.loc[(df.Weight>=125)&(df.Weight<150)].count()[0] # & statement light_medium # find out medium weight medium = df.loc[(df.Weight>=150)&(df.Weight<175)].count()[0] medium # find out medium heavy weight medium_heavy = df.loc[(df.Weight>=175)&(df.Weight<200)].count()[0] medium_heavy # find out medium heavy weight heavy = df.loc[df.Weight>=200].count()[0] heavy # + # plot pie chart for weight plt.figure(figsize=(8,5), dpi=100) #dots per inch plt.style.use('ggplot') df.Weight = [int(x.strip('lbs')) if type(x)==str else x for x in df.Weight] light = df.loc[df.Weight < 125].count()[0] light_medium = df[(df.Weight >= 125) & (df.Weight < 150)].count()[0] medium = df[(df.Weight >= 150) & (df.Weight < 175)].count()[0] medium_heavy = df[(df.Weight >= 175) & (df.Weight < 200)].count()[0] heavy = df[df.Weight >= 200].count()[0] weights = [light,light_medium, medium, medium_heavy, heavy] label = ['under 125', '125-150', '150-175', '175-200', 'over 200'] explode = (.4,.2,0,0,.4) plt.title('Weight of Professional Soccer Players (lbs)') plt.pie(weights, labels=label, explode=explode, pctdistance=0.8,autopct='%.2f %%') plt.show() # + # convert lbs to kg # + # 1 pound = 0.453592 kilogram # - chosen_columns=['Name','Weight'] ds = pd.DataFrame(df, columns = chosen_columns) ds.head() ds.Weight = ds.Weight.astype(float) ds['Weight-kg'] = 0.453592*ds['Weight'] ds.head() ds.drop(['Weight'],axis=1,inplace=True) ds.head() bins = np.linspace(min(ds["Weight-kg"]), max(ds["Weight-kg"]), 6) bins group_names = ['light', 'light_medium', 'medium', 'medium_heavy', 'heavy'] ds['weight-binned'] = pd.cut(ds['Weight-kg'], bins, labels=group_names, include_lowest=True) ds[['Name','Weight-kg','weight-binned']].head() plt.figure(figsize = (10, 10)) ax = sns.countplot(x = 'weight-binned', data = ds, palette = 'pastel') ax.set_title(label = 'Count of players based on their weight group', fontsize = 20) ax.set_xlabel(xlabel = 'Weight group', fontsize = 16) ax.set_ylabel(ylabel = 'Number of players', fontsize = 16) plt.show()
7,805
/Disaggregated-Data/weather-like-plot-HICP-by-item-ver-2.ipynb
1fea66bb37f8855615f33434add6df701725aef1
[ "MIT" ]
permissive
BadWizard/Inflation
https://github.com/BadWizard/Inflation
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
161,789
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Supervised Learning - Classification Example # Before reading through this program, it is recommended to make sure you read through the Supervised Learning - Regression article as there will be less basic explanations in this program. # # ### Overview # This program will make use of the MNIST dataset, a dataset containing handwritten digits, which is known as the Hello World of machine learning. This tutorial will go through creating and training the machine learning model, but later in the series we will use TensorFlow.js to embed this model into an interactive webpage to be able to draw our own digits which can then be classified. # # ### Importing Libraries # - Matplotlib is a library for creating graphs and other visuals # - Numpy is a high-performance mathematics library # - TensorFlow is the platform that the AI code will run on, with Keras being the interface to TF # + # %load_ext tensorboard import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from tensorflow import keras print(tf.__version__) # - # ### Loading the Dataset # Instead of manually downloading the dataset like in the regression example, TensorFlow includes some example dataset which are already formatted that we can use. We make a reference to the MNIST dataset object and load the data which is in the form of 2 tuples of 2 arrays each. # # We output the shape the train set, it tells us there are 60000 images which are each 28 x 28 pixels in size. mnist = keras.datasets.mnist (trainImages, trainClasses), (testImages, testClasses) = mnist.load_data() trainImages.shape # We can also matplotlib to output some example images with their corresponding labels. This block of code creates a figure of (relative) size 10x10, and then iterates through the first 25 training images, creating a subplot for each, and showing the images in black and white with their label. plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.imshow(trainImages[i], cmap=plt.cm.binary) plt.xlabel(trainClasses[i]) plt.show() # ### Normalising the Data # We need to look at how the images are represented, to see if we need to make any changes to how the data is structured. Here we just take the first index in the train Images list, and look at the middle row. print(trainImages[0][14]) print("Max Value = " + str(trainImages.max())) print("Min Value = " + str(trainImages.min())) # Above, we see the values are in the range 0 - 255. The previous article explained that neural networks like to with values that are between 0 - 1 or -1 - 1. Since we know the values won't go below 0, we can normalise the data to be between 0 and 1. This is simply done by dividing every value by 255 (which we know is maximum value): trainImages = trainImages / 255 testImages = testImages / 255 trainImages[0][14] # ### Building our Model # # We will use a deep neural network using the Keras sequential model. We start with a layer that flattens the 28x28 pixel input into a 784x1 input. # We then add a dense layer of size 128 with the reLU activation, with a dropout rate of 0.1. # # Dropout is used to prevent overfitting which is where the model gets too fixated on the data it is learning from and would not be able to generalise well. This is done by randomly setting some of the weights to 0 during training which forces the model to constantly adapt and change. # # We then add another dense layer but this time with 32 nodes with reLU. # # And finally, for the output we have a dense layer with 10 nodes - one for each possible number (0-9) # # We compile it using the adam optimiser, and to use the Sparce Categorical Cross Entropy loss function (which is a reommended loss function of classification problems). We also include the metrics parameter which will output the accuracy as it trains. # # We can also run the evaluate function before training to see how well (or badly) it would do without training - it was getting 8-12% accuracy (roughly 1 in 10) which is the probability you would get if you randomly picked an answer each time. # + model = keras.Sequential([ keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(128, activation='relu'), keras.layers.Dropout(0.1), keras.layers.Dense(32, activation='relu'), keras.layers.Dense(10) ]) model.compile(optimizer='adam', loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) preTrain = model.evaluate(testImages, testClasses, verbose=2) # - # ### Training our Model # # Now the model has been made, we can train it with the train images and train classes, and telling it to run 10 iterations (epochs). By the end of training, this was getting an accuracy of ~0.99 (99% accurate) on the training data! model.fit(trainImages, trainClasses, epochs=10) # ### Evaluating our Model # # Once trained, we can evaluate the model using the test images and classes. Here we also use the verbose parameter to just output the results at the end of evaluating - it was getting ~98%. # # The evaluation accuracy will be usually than the training accuracy due to overfitting like mentioned earlier. Overfitting can be reduced through the user of dropout and by increasing the volume and variation of data - it's very difficult to get exactly the same performance between training and testing. testEval = model.evaluate(testImages, testClasses, verbose=2) # ### Making Predictions # # The model currently outputs in the form of Logits, we can add softmax layer which transforms the Logit values into typical probabilities that are easier to interpret. # # Once we create this predictor model, we can then feed it all the testing images to get an array of the predicted values. # # You could also create your own images and use a library such as Pillow to convert the image into a 28x28 monochrome image that can be fed into the model - this is something we will do with TensorFlow.js in a future tutorial predictor = keras.Sequential() predictor.add(model) predictor.add(keras.layers.Softmax()) predictions = predictor.predict(testImages) # With that list of predicted values, we can use matplotlib to visualise the image the model sees, and the output of the model in terms of the probabilities of each value # # To do this, we randomly pick an index of a test image to look at. We create a subplot , plot the image in one plot like we did previously and then create a bar chart # import random for count in range(5): index = random.randint(0, len(testImages)) plt.figure(figsize=(6,3)) ## Image plt.subplot(1,2,1) plt.imshow(testImages[index], cmap=plt.cm.binary) plt.xlabel(testClasses[index]) plt.xticks([]) plt.yticks([]) ## Prediction Chart plt.subplot(1,2,2) plt.xticks(range(10)) plt.yticks([]) bar = plt.bar(range(10), predictions[index], color='gray') bar[np.argmax(predictions[index])].set_color('red') bar[testClasses[index]].set_color('green') * csv (comma separated values): arquivos txt que contém dados tabulares separados por vírgulas # * tsv (tab separated values): idem ao caso acima, contudo o separador é o caracter 'tab' # * xlsx: arquivos de excel # * e vários outros. Experimente digitar 'pd.read' e depois 'tab' pd.read_excel()
7,588
/Fish.ipynb
3b88f6b3ecfbc6750789da8d410c4dda631b93a5
[]
no_license
vesko-vujovic/competitive-programming
https://github.com/vesko-vujovic/competitive-programming
1
0
null
null
null
null
Jupyter Notebook
false
false
.py
3,353
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd dataset=pd.read_csv(r"C:\Users\Lasya Priya\Downloads\Telegram Desktop\Fish.csv") dataset.head() dataset.isnull().any() dataset['Species'].unique() x=dataset.iloc[:,0:6].values y=dataset.iloc[:,6].values x.shape x from sklearn.preprocessing import OneHotEncoder from sklearn.compose import ColumnTransformer ct = ColumnTransformer([("oh",OneHotEncoder(),[0])],remainder="passthrough") #converts data into binary values x=ct.fit_transform(x) x x.shape from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test = train_test_split(x,y,test_size = 0.2, random_state=1) from sklearn.linear_model import LinearRegression mlr=LinearRegression() mlr.fit(x_train,y_train) ypred=mlr.predict(x_test) ypred y_test x_test from sklearn.metrics import r2_score accuracy=r2_score (ypred,y_test) accuracy yp=mlr.predict(ct.transform(dataset.head())) yp dataset.head() a=["Bream",34,56,2,34,344] b=ct.transform([a]) b yp=mlr.predict(b) yp
1,278
/Bike_Decision_and_Random_Forest_Tree_Hot_Encoding_and_algo_params_change.ipynb
b05536b7b3c6195bbccb93fe84ab42e397728511
[]
no_license
Dhatu/ML-DL-Projects
https://github.com/Dhatu/ML-DL-Projects
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
21,275
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Transfer Learning # # In this notebook, you'll learn how to use pre-trained networks to solved challenging problems in computer vision. Specifically, you'll use networks trained on [ImageNet](http://www.image-net.org/) [available from torchvision](http://pytorch.org/docs/0.3.0/torchvision/models.html). # # ImageNet is a massive dataset with over 1 million labeled images in 1000 categories. It's used to train deep neural networks using an architecture called convolutional layers. I'm not going to get into the details of convolutional networks here, but if you want to learn more about them, please [watch this](https://www.youtube.com/watch?v=2-Ol7ZB0MmU). # # Once trained, these models work astonishingly well as feature detectors for images they weren't trained on. Using a pre-trained network on images not in the training set is called transfer learning. Here we'll use transfer learning to train a network that can classify our cat and dog photos with near perfect accuracy. # # With `torchvision.models` you can download these pre-trained networks and use them in your applications. We'll include `models` in our imports now. # + # %matplotlib inline # %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt import torch from torch import nn from torch import optim import torch.nn.functional as F from torchvision import datasets, transforms, models # - # Most of the pretrained models require the input to be 224x224 images. Also, we'll need to match the normalization used when the models were trained. Each color channel was normalized separately, the means are `[0.485, 0.456, 0.406]` and the standard deviations are `[0.229, 0.224, 0.225]`. # + data_dir = 'Cat_Dog_data' # TODO: Define transforms for the training data and testing data train_transforms = transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) test_transforms = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) # Pass transforms in here, then run the next cell to see how the transforms look train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms) test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms) trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True) testloader = torch.utils.data.DataLoader(test_data, batch_size=64) # - # We can load in a model such as [DenseNet](http://pytorch.org/docs/0.3.0/torchvision/models.html#id5). Let's print out the model architecture so we can see what's going on. model = models.densenet121(pretrained=True) model # This model is built out of two main parts, the features and the classifier. The features part is a stack of convolutional layers and overall works as a feature detector that can be fed into a classifier. The classifier part is a single fully-connected layer `(classifier): Linear(in_features=1024, out_features=1000)`. This layer was trained on the ImageNet dataset, so it won't work for our specific problem. That means we need to replace the classifier, but the features will work perfectly on their own. In general, I think about pre-trained networks as amazingly good feature detectors that can be used as the input for simple feed-forward classifiers. # + # Freeze parameters so we don't backprop through them for param in model.parameters(): param.requires_grad = False from collections import OrderedDict classifier = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(1024, 500)), ('relu', nn.ReLU()), ('fc2', nn.Linear(500, 2)), # 2 => cat & dog ('output', nn.LogSoftmax(dim=1)) ])) # attach our untrained classifier to the model model.classifier = classifier # - # With our model built, we need to train the classifier. However, now we're using a **really deep** neural network. If you try to train this on a CPU like normal, it will take a long, long time. Instead, we're going to use the GPU to do the calculations. The linear algebra computations are done in parallel on the GPU leading to 100x increased training speeds. It's also possible to train on multiple GPUs, further decreasing training time. # # PyTorch, along with pretty much every other deep learning framework, uses [CUDA](https://developer.nvidia.com/cuda-zone) to efficiently compute the forward and backwards passes on the GPU. In PyTorch, you move your model parameters and other tensors to the GPU memory using `model.to('cuda')`. You can move them back from the GPU with `model.to('cpu')` which you'll commonly do when you need to operate on the network output outside of PyTorch. As a demonstration of the increased speed, I'll compare how long it takes to perform a forward and backward pass with and without a GPU. # + import time for device in ['cpu', 'cuda']: criterion = nn.NLLLoss() # Only train the classifier parameters, feature parameters are frozen optimizer = optim.Adam(model.classifier.parameters(), lr=0.001) model.to(device) for ii, (inputs, labels) in enumerate(trainloader): # Move input and label tensors to the GPU inputs, labels = inputs.to(device), labels.to(device) start = time.time() outputs = model.forward(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() if ii==3: break print(f"Device = {device}; Time per batch: {(time.time() - start)/3:.3f} seconds") # - # You can write device agnostic code which will automatically use CUDA if it's enabled like so: # ```python # # at beginning of the script # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # # ... # # # then whenever you get a new Tensor or Module # # this won't copy if they are already on the desired device # input = data.to(device) # model = MyModule(...).to(device) # ``` # # From here, I'll let you finish training the model. The process is the same as before except now your model is much more powerful. You should get better than 95% accuracy easily. # # >**Exercise:** Train a pretrained models to classify the cat and dog images. Continue with the DenseNet model, or try ResNet, it's also a good model to try out first. Make sure you are only training the classifier and the parameters for the features part are frozen. # + ## TODO: Use a pretrained model to classify the cat and dog images # Use GPU if it's available device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = models.densenet121(pretrained=True) # Freeze parameters so we don't backprop through them for param in model.parameters(): param.requires_grad = False model.classifier = nn.Sequential(nn.Linear(1024, 256), nn.ReLU(), nn.Dropout(0.2), nn.Linear(256, 2), nn.LogSoftmax(dim=1)) criterion = nn.NLLLoss() # Only train the classifier parameters, feature parameters are frozen optimizer = optim.Adam(model.classifier.parameters(), lr=0.003) model.to(device); # - epochs = 1 steps = 0 running_loss = 0 print_every = 5 for epoch in range(epochs): for inputs, labels in trainloader: steps += 1 # Move input and label tensors to the default device inputs, labels = inputs.to(device), labels.to(device) optimizer.zero_grad() logps = model.forward(inputs) loss = criterion(logps, labels) loss.backward() optimizer.step() running_loss += loss.item() if steps % print_every == 0: test_loss = 0 accuracy = 0 #turn model to evaluation mode model.eval() with torch.no_grad(): for inputs, labels in testloader: inputs, labels = inputs.to(device), labels.to(device) logps = model.forward(inputs) batch_loss = criterion(logps, labels) test_loss += batch_loss.item() # Calculate accuracy ps = torch.exp(logps) top_p, top_class = ps.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) accuracy += torch.mean(equals.type(torch.FloatTensor)).item() print(f"Epoch {epoch+1}/{epochs}.. " f"Train loss: {running_loss/print_every:.3f}.. " f"Test loss: {test_loss/len(testloader):.3f}.. " f"Test accuracy: {accuracy/len(testloader):.3f}") running_loss = 0 model.train()
9,716
/project.ipynb
96fe443ecc270fd3bf376338e44336d1d8e6341c
[]
no_license
Yasin901/Graph-theory
https://github.com/Yasin901/Graph-theory
1
0
null
null
null
null
Jupyter Notebook
false
false
.py
210,153
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # MTH5001: Introduction to Computer Programming 2018/19 # # ## Final Report Project: "Networks" # ### Instructions: # # First, please type your name and student number into the Markdown cell below: # **Name:** Yasin Rehman Butt-Shah # # **Student number:** 160226966 # You must write your answers in this Jupyter Notebook, using either Markdown or Python code as appropriate. (You should create new code and/or Markdown cells in the appropriate places, so that your answers are clearly visible.) # # Your code must be well documented. As a rough guide, you should aim to include one line of comments for each line of code (but you may include more or fewer comments depending on the situation). You should also use sensible variable names, so that your code is as clear as possible. If your code works but is unduly difficult to read, then you may lose marks. # # For this project, you will need to use the Python package [NetworkX](https://networkx.github.io/) extensively. However, to test your coding skills, in certain questions you will be restricted to using only specific functions. # These restrictions are made clear below (see questions 4 and 8). # # ### Submission deadline: # # You must submit your work via QMPlus (to the "Final Report Project" assignment in the "Final Report Project" section). # # The submission deadline is **11:55pm on Monday 29 April, 2019**. Late submissions will be penalised according to the School's [guidelines](https://qmplus.qmul.ac.uk/mod/book/view.php?id=807735&chapterid=89105). # # Your lecturers will respond to project-related emails until 5:00pm on Friday 26 April, 2019, only. You should aim to have your project finished by this time. # # ### Marking: # # The project is worth 70% of your final mark for this module. # # The total number of marks available for the project is 100. # # Attempt all parts of all questions. # # When writing up your project, good writing style is even more important than in written exams. # According to the [advice in the student handbook](https://qmplus.qmul.ac.uk/mod/book/view.php?id=807735&chapterid=87786), # # > To get full marks in any assessed work (tests or exams) you must normally not only give the right answers but also explain your working clearly and give reasons for your answers by writing legible and grammatically correct English sentences. Mathematics is about logic and reasoned arguments and the only way to present a reasoned and logical argument is by writing about it clearly. Your writing may include numbers and other mathematical symbols, but they are not enough on their own. You should copy the writing style used in good mathematical textbooks, such as those recommended for your modules. **You can expect to lose marks for poor writing (incorrect grammar and spelling) as well as for poor mathematics (incorrect or unclear logic).** # # ### Plagiarism warning: # # Your work will be tested for plagiarism, which is an assessment offence, according to the [School's policy on Plagiarism](https://qmplus.qmul.ac.uk/mod/book/view.php?id=807735&chapterid=87787). In particular, while only academic staff will make a judgement on whether plagiarism has occurred in a piece of work, we will use the plagiarism detection software "Turnitin" to help us assess how much of work matches other sources. You will have the opportunity to upload your work, see the Turnitin result, and edit your work accordingly before finalising your submission. # # You may summarise relevant parts of books, online notes, or other resources, as you see fit. # However, you must use your own words as far as possible (within reason, e.g. you would not be expected to change the wording of a well-known theorem), and you **must** [reference](https://qmplus.qmul.ac.uk/mod/book/view.php?id=807735&chapterid=87793) any sources that you use. Similarly, if you decide to work with other students on parts of the project, then you **must** write up your work individually. You should also note that most of the questions are personalised in the sense that you will need to import and manipulate data that will be unique to you (i.e. no other student will have the same data). # ## Background information # # In this project you will learn about a field of mathematics called [graph theory](https://en.wikipedia.org/wiki/Graph_theory). # A **graph** (or **network**) is simply a a collection of **nodes** (or **vertices**), which may or may not be joined by **edges**. # (Note that this is not the same as the 'graph' of a function.) # # Graphs can represent all sorts of real-world (and, indeed, mathematical) objects, e.g. # # * social networks (nodes represent people, edges represent 'friendship'), # * molecules in chemistry/physics (nodes represent atoms, edges represent bonds), # * communications networks, e.g. the internet (nodes represent computers/devices, edges represent connections). # # In this project we will only consider **undirected** graphs (see the above Wikipedia link for a definition). # # Conventiently, Python has a package, called [NetworkX](https://networkx.github.io/), for constructing and analysing graphs. # Let's look at an example. # Below we create the famous [Petersen graph](https://en.wikipedia.org/wiki/Petersen_graph) and use some basic NetworkX functions to learn a bit about it. # + # import NetworkX and other useful packages import numpy as np import numpy.linalg as la import matplotlib.pyplot as plt import networkx as nx # create the Petersen graph, storing it in a variable called "PG" PG = nx.petersen_graph() # - # Before we doing anything else, it would make sense to draw the graph, to get an idea of what it looks like. # We can do this using the NetworkX function `draw_networkx` (together with our old favourtie, matplotlib). nx.draw_networkx(PG, node_color = 'orange', edge_color = 'blue', with_labels=True) plt.xticks([]) plt.yticks([]) plt.show() # We can see that the graph has 10 nodes, labelled by the integers $0,1,\ldots,9$. # It is also possible to label nodes with other data types, most commonly strings, but we won't do that in this project. # The nodes of a graph can be accessed via the method `nodes()`: PG.nodes() # You can convert this to a Python list if you need to: list(PG.nodes()) # This (hopefully) makes it clear that the node labels do in fact have type `int`, at least in our example. # You can also see from the picture that the graph has 15 edges. # These can be accessed using the method `edges()`: PG.edges() # Again, you can convert this to a list if you need to (try it), and you will see that the elements of the list are tuples. In either case, if you compare the output with the picture, it should become clear what it means, i.e. two nodes labelled $i$ and $j$ are joined by an edge if and only if the pair $(i, j)$ appears in `PG.edges()`. # So far we haven't said much about how graphs are related to **mathematics**. It turns out that a graph can be completely defined by its [adjacency matrix](https://en.wikipedia.org/wiki/Adjacency_matrix). This is simply a matrix $A$ defined as follows: # # * $A$ has size $n \times n$, where $n$ is the number of nodes in the graph; # * if the nodes labelled $i$ and $j$ form an edge, then the $(i,j)$-entry of $A$ is $1$; if they don't form an edge, the $(i,j)$-entry of $A$ is $0$. # # This idea is the foundation of [algebraic graph theory](https://en.wikipedia.org/wiki/Algebraic_graph_theory), a field of mathematics used to study graphs by analysing certain matrices. # # Not surprisingly, you can compute the adjacency matrix of a graph using an appropriate NetworkX function. Let's do this for the Petersen graph: A = nx.adjacency_matrix(PG) # Note that if you print this 'adjacency matrix' (try it), it doesn't actually look much like a matrix. This is because it doesn't have type `numpy.ndarray` like the matrices/arrays we've worked with in class: type(nx.adjacency_matrix(PG)) # However, you can convert it to a `numpy.ndarray` by calling the method `toarray()`: A = A.toarray() type(A) # After doing this, the adjacency matrix looks like you would expect, so you can use all the usual `numpy.linalg` functions on it: print(A) # Make sure that you understand what all these $0$'s and $1$'s mean: the $(i,j)$-entry of the adjacency matrix is $1$ if and only if the edges labelled $i$ and $j$ form an edge in the graph; otherwise, it is $0$. For example (remembering that Python starts counting from $0$, not from $1$): the $(0,4)$ entry is $1$, and in the picture above we see that nodes $0$ and $4$ form an edge; on the other hand, the $(1,7)$ entry is $0$, and accordingly nodes $1$ and $7$ don't form an edge. # You will be working with matrices related to graphs quite a lot in this project, so before you begin you should make sure that you understand what the code we've given you above is doing. # You may also like to work through the official [NetworkX tutorial](https://networkx.github.io/documentation/stable/tutorial.html) before attempting the project, bearing in mind that not everything in the tutorial is relevant to the project. # (Alternatively, Google for another tutorial if you don't like that one.) # # **A final remark before we get to the project itself:** # # You can rest assured that the graphs we consider this project all have the following nice properties: # # * They are **connected**. This means that for every pair of nodes $i$ and $j$, there is a 'path' of edges joining $i$ to $j$. For example, the Petersen graph is connected, e.g. the nodes labelled $6$ and $7$ do not form an edge, but we can still reach node $7$ from node $6$ via the edges $(6,9)$ and $(9,7)$. # * They are **simple**. This means that there is never an edge from a node to itself. # # You may come across these terms when you are researching the relevant mathematics for various parts of the project, so you should know what they mean. # ## The project # # As we have already mentioned, in this project you will make extensive use of the Python package [NetworkX](https://networkx.github.io/), which allows you to create and analyse graphs. # You are expected to read the relevant parts of the NetworkX documentation, or otherwise learn how to use whatever Python functions you need to complete the project. # However, the mini-tutorial which we have provided above should be enough to get you started. # # You will also need to research and summarise some mathematics related to graphs, and to use your findings to write certain pieces of code 'from scratch', instead of of using NetworkX functions. # In these cases (questions 4 and 8), it is **strongly recommended** that you use NetworkX to check your answers. # # You should structure your report as follows: # # ### Part I: Data import and preliminary investigation [10 marks] # # You have been provided with a Python file called **"data.py"** on QMPlus, which you should save in the same directory as this Jupyter notebook. # This file contains a function `create_graph` which constructs a random graph that you will be analysing throughout the project. # By following the instructions in question 1 (below), you will create a graph that is unique to you, i.e. no two students will have the same graph. # # **1. [5 marks]** Execute the following code cell to create your graph, storing it in a variable called `G` (you can change the variable name if you like, but we recommend leaving it as it is). You **must** replace the number "123456789" with your 9-digit student number. # # *Important note: If you do not do this correctly, you will score 0 for this question, and if you are found to have used the same input as another student (rather than your individual student number), then your submission will be reviewed for plagiarism.* # + from data import create_graph # Replace "123456789" below with your 9-digit student number G = create_graph(160226966) # Replace "123456789" above with your 9-digit student number # - # **2. [5 marks]** Draw your graph, and calculate how many nodes and edges it has. # + # Draws graph and hide x and y axis nx.draw_networkx(G, node_color = 'orange', edge_color = 'blue', with_labels=True) plt.xticks([]) plt.yticks([]) plt.show() # Prints number of nodes and edges print("Number of nodes: " + str(len(G.nodes()))) print("Number of edges: " + str(len(G.edges()))) # - # ### Part II: Distance matrices and shortest paths [30 marks] # # Many properties of graphs can be analysed by using matrices/linear algebra. # The rest of your report will involve researching/summarising some of the relevant mathematics, and writing/using Python code to analyse certain properties of your graph from Part I. # As explained above, you are allowed to summarise information from books and/or web pages, but you must use your own words and clearly reference any sources you use. # # **3. [10 marks]** Explain what a "path" between two nodes in a graph is, and what the "distance" between two nodes is. Explain also what the "distance matrix" of a graph is, and how it can be computed using the adjacency matrix. # Here you should discuss arbitrary (undirected, simple, connected) graphs, not your specific graph from Part I. # # Note: You do **not** need to give any proofs, but you must reference any material you use, as explained in the plagiarism warning above. # A path between two nodes is a sequence of edges (finite or infinite) which joins a sequence of nodes to one another. The distance between two nodes is the minimum path connecting them. The distance matrix is a matrix showing the distance between all nodes; in arbitrary terms it shows the distance between all N<sub>i</sub> and N<sub>j</sub>, where N<sub>i</sub> and N<sub>j</sub> are the nodes of a graph. The adjacency matrix tells us whether two nodes are connected; it has (i,j)-entries equal to the number of edges between two nodes, in our case, the (i,j)-entries will be 1 if the nodes are connected, or 0 otherwise. To compute the distance matrix from the adjacency matrix (A), take powers of A (A<sup>n</sup>) such that the distance between two nodes is represented by the smallest possible n, such that the entries of A<sup>n</sup> are non-zero. # # **References:** # # Barile, Margherita. "Graph Distance." From MathWorld--A Wolfram Web Resource, created by Eric W. Weisstein. http://mathworld.wolfram.com/GraphDistance.html # # # **4. [10 marks]** Write a function `distance_matrix` which computes the distance matrix of a graph. # Your function should return a matrix, represented as an array of type `numpy.ndarray`, of the same shape as the adjacency matrix of the graph. # You may use the NetworkX function `adjacency_matrix` to compute the adjacency matrix of the input graph, but you **must not use any other NetworkX functions**. # Function that takes a graph as an input and outputs the distance matrix in the form of a numpy 2d array def distance_matrix(G): adjacency = nx.adjacency_matrix(G).toarray() # Convert the graph into an adjacency matrix of type numpy 2d array length = len(adjacency) # Extract the length of the adjacency matrix array distance = adjacency # Make a copy of the adjacency matrix (to be modified into a distance matrix) for i in range(0, length): for j in range(0, length): # For each (i,j)-entry of the adjacency matrix if i != j and adjacency[i, j] != 1: # Check that it is not a diagonal entry and its value is not equal to 1 k = 1 while True: a2 = la.matrix_power(adjacency, k) # Compute the kth power of the adjacency matrix (starting at 1) if a2[i, j] == 0: # If the (i,j)-entry of the 'new' adjacency matrix is zero increment the power by 1 (k+1) k += 1 else: # If the (i,j)-entry is not zero assign the kth power to the (i,j)-entry of the distance matrix distance[i, j] = k # And exit the while loop break return distance # Return the distance matrix # **5. [5 marks]** Using your function from Question 4, find a pair of nodes $(i,j)$ in your graph from Part I with the property that the distance from $i$ to $j$ is maximal amongst all pairs of nodes in the graph. # # Note: This means that for every *other* pair of nodes $(i',j')$, the distance from $i'$ to $j'$ is less than or equal to the distance from $i$ to $j$. # + D = distance_matrix(G) # Compute the distance matrix of the given graph maximum = 0 nodes = () for i in range(0, len(D)): for j in range(0, len(D)): # For each (i,j)-entry of the distance matrix if D[i, j] > maximum: # If it is greater than the maximum (initially zero) assign that as the new maximum and save the nodes maximum = D[i, j] nodes = (i, j) print(nodes) # - # **6. [5 marks]** Find a shortest path between your nodes from Question 5, i.e. one with the shortest possible length, and re-draw your graph so that this path is clearly visible. # You should use one colour for the nodes and edges in the path, and a different colour for all other nodes and edges. # # Hint: You should be able to find a NetworkX function that computes a shortest path. # + # Set a fixed position for the layout of the graph pos = nx.spring_layout(G) # Use networkx to find the shortest path in the form of a list of nodes shortest_path_nodes = nx.shortest_path(G, nodes[0], nodes[1]) # Create an edge list from the node list by zipping the node list with a copy of the node list minus the first node shortest_path_edges = set(zip(shortest_path_nodes, shortest_path_nodes[1:])) print("Shortest path as nodes: ", shortest_path_nodes) print("Shortest path as edges:", shortest_path_edges) # Draw graph with all nodes and edges with colour orange nx.draw_networkx(G, pos, node_color='orange', edge_color='orange', with_labels=True) # Re draw nodes for the shortest path as red nx.draw_networkx_nodes(G, pos, nodelist=shortest_path_nodes, node_color='red') # Re draw edges for shortest path as red nx.draw_networkx_edges(G, pos, edgelist=shortest_path_edges, edge_color='red', width=3) # Hides x and y axis and show the graph plt.xticks([]) plt.yticks([]) plt.show() # - # ### Part III: Laplacian matrices and spanning trees [30 marks] # # So far you have learned about two matrices associated with a graph: the adjacency matrix, and the distance matrix. Now you will study a third matrix: the Laplacian. # # **7. [10 marks]** Explain what the "degree" of a node in a graph is, what the "Laplacian matrix" of a graph is, what a "spanning tree" of a graph is, and how the Laplacian matrix can be used to calculate the number of spanning trees in a graph. # Here, again, you should discuss arbitrary (undirected, simple, connected) graphs, not your specific graph from Part I. # # Note: You do **not** need to give any proofs, but you must reference any material you use, as explained in the plagiarism warning above. # The degree of a node v given a graph, is the number of edges which touch v. The Laplacian matrix of a graph is a matrix representation of a graph, defined by L=D-A, where L is the Laplacian matrix, D is the degree matrix(a diagonal symmetric matrix showing the number of edges connected to all nodes in the graph) and A the adjacency matrix. The entries of the Laplacian matrix for a graph G(V,E) where V={v<sub>1</sub>.....v<sub>n</sub>), with n nodes and E edges are as follows: # # $$ L_{ij}= \left\{ # \begin{array}{ll} # deg(v_{j}) & if \; i=j\\ # -1 & if \; i \neq j \; and \; (v_{i}, v_{j}) \in E \\ # 0 & otherwise \\ # \end{array} # \right. $$ # # A spanning tree of a graph is a subgraph, containing all n nodes and n-1 edges, such that the number of edges are minimal between all nodes. To calculate the number of spanning trees, choose a node v<sub>j</sub> and delete the j<sup>th</sup> column and row from the associated Laplacian matrix L to form a new matrix L<sub>j</sub>. Next compute the determinant of L<sub>j</sub> to give N<sub>T</sub> (number of spanning trees). # # **References:** # # Weisstein, Eric W. "Spanning Tree." From MathWorld--A Wolfram Web Resource. # http://mathworld.wolfram.com/SpanningTree.html # # Weisstein, Eric W. "Laplacian Matrix." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/LaplacianMatrix.html # # Manchester University. "The Matrix-Tree Theorems." From: # https://personalpages.manchester.ac.uk/staff/mark.muldoon/Teaching/DiscreteMaths/LectureNotes/IntroToMatrixTree.pdf # # **8. [10 marks]** Write a function `number_of_spanning_trees` which takes as input a graph $G$ and returns the number of spanning trees in $G$. You may use the NetworkX function `adjacency_matrix` to compute the adjacency matrix of the input graph, but you **may not use any other NetworkX functions**. # # Note: You will probably need to compute the determinant of a certain matrix somewhere in your code. If you use the function `numpy.linalg.det` then your determinant will only be computed approximately, i.e. to a certain numerical precision. This is fine; you will not lose any marks if your code is otherwise correct. # + # Function that takes a graph as an input and outputs the degree matrix in the form of a numpy 2d array def degree_matrix(graph): adjacency_matrix = nx.adjacency_matrix(graph).toarray() # Convert the graph into an adjacency matrix of type numpy 2d array length = len(adjacency_matrix) # Extract the length of the adjacency matrix array # Make an empty matrix of the same dimensions of the adjacency matrix degree_matrix = np.zeros(adjacency_matrix.shape, dtype='int') for i in range(0, length): # For each row in the adjacency matrix degree = sum(adjacency_matrix[i]) # Sum the (i,j)-entries in the row to calculate the degree of each node degree_matrix[i, i] = degree # Assign the degree to the (i,i)-entry return degree_matrix # Return the degree matrix # Function that takes two matrices (adjacency_matrix, degree_matrix) and computes degree_matrix minus adjacency_matrix def laplacian_matrix(adjacency_matrix, degree_matrix): return np.subtract(degree_matrix, adjacency_matrix) # Function that takes a graph as an input and outputs the number of spanning trees def number_of_spanning_trees(graph): d = degree_matrix(graph) # Get the degree matrix of this graph a = nx.adjacency_matrix(graph).toarray() # Get the adjacency matrix of this graph l = laplacian_matrix(a, d) # Get the Laplacian matrix of this graph subset_of_laplacian_matrix = np.delete(l, 0, axis=0) # Remove the first row subset_of_laplacian_matrix = np.delete(subset_of_laplacian_matrix, 0, axis=1) # Remove the first column # Now a sub matrix of the Laplacian matrix is formed return np.linalg.det(subset_of_laplacian_matrix) # Return the determinant of the sub matrix # - # **9 [5 marks]** Use your function from Question 8 to calculate the number of spanning trees in your graph from Part I. print("Number of spanning trees:", number_of_spanning_trees(G)) # **10 [5 marks]** Find a minimal spanning tree of your graph from Part I, i.e. one with the smallest possible number of edges. Re-draw your graph in such a way that this spanning tree is clearly visible. You should use one colour for the edges in the spanning tree, and a different colour for all other edges. # # Hint: You should be able to find a NetworkX function that computes a minimal spanning tree. # + spanning_tree = nx.minimum_spanning_tree(G) # Get the graph of the spanning tree # Draw the graph with the fixed position nx.draw_networkx(G, pos, node_color='orange', edge_color='orange', with_labels=True) # Draw the spanning tree over the graph with the same fixed position nx.draw_networkx_edges(spanning_tree, pos, node_color='orange', edge_color='blue', width=3) # Hides x and y axis and show the graph plt.xticks([]) plt.yticks([]) plt.show() # - # ### Part IV: Eigenvalues and triangles [30 marks] # # By now you have seen that certain matrices associated with a graph can tell us a lot about the structure of the graph. # In this final part of the project, you will investigate this further, by learning how eigenvalues can be used to reveal even more information about graphs. # # **11. [5 marks]** Explain what a "triangle" in a graph is, and quote a formula for calculating the number of triangles in a graph from the eigenvalues of the adjacency matrix. # # Note: You do **not** need to give any proofs, but you must reference any material you use, as explained in the plagiarism warning above. # A triangle in a graph is where any 3 nodes are directly connected with 3 edges (a clique with 3 nodes). The total number of triangles in an undirected graph (∆G), is equal to the trace of A<sup>3</sup> divided by 6, and "to the sum of the cubes of the eigenvalues lambda<sub>i</sub> of the adjacency matrix divided by 6, i.e: # $$∆(G)=\frac{1}{6}trace(A^3)=\frac{1}{6}\sum_{i=1}^{n}\lambda_{i}^3$$ # # **References:** # # Changyu, Yang. "Triangle Counting in Large Networks." (Page 7) From:<br/> https://pdfs.semanticscholar.org/1727/09e8e7177affe516c22d217a0e543b92a0d1.pdf # **12. [5 marks]** Calculate the number of triangles in your graph from Part I, using the formula discussed in question 11. Your answer **must** be an integer. # # Hint: What is the "trace" of a matrix and how is it related to the eigenvalues? # + # Function that takes a graph as an input and returns the number of triangles as an integer def number_of_triangles(graph): adjacency_matrix = nx.adjacency_matrix(graph).toarray() # Compute the adjacency matrix of the graph adjacency_matrix_cubed = la.matrix_power(adjacency_matrix, 3) # Cube the adjacency matrix length = len(adjacency_matrix_cubed) # Calculate the length of the adjacency matrix sum = 0 for i in range(0, length): # For each (i,i)-entry of the adjacency matrix (diagonal entry) sum += adjacency_matrix_cubed[i, i] # Add the value to the sum return int(sum / 6) # Return the sum divided by 6 print("Number of triangles:", number_of_triangles(G)) # - # **13. [10 marks]** Write a function `all_triangles` which finds all of the triangles in a graph. Use your function to count the number of triangles in your graph, and compare with your answer to question 12. (The two answers should, of course, be the same.) # # Note: You will need to use your function in the next question, so you should think carefully about what kind of data structure you want it to output. # + from itertools import permutations # Function that takes a graph as an input and returns the all triangles as a list of edge pairs that form them def all_triangles(graph): clique_list = list(nx.clique.enumerate_all_cliques(graph)) # Get all cliques in the graph # A clique is a triangle if there are three nodes # Therefore find all cliques with the length of three triangles = [x for x in clique_list if len(x) == 3] triangle_edges = [] for triangle_nodes in triangles: # For each list of nodes within the triangles list # Given a list of nodes that form a triangle # All possible edges can be computed by calculating all permutations of size 2 edges = permutations(triangle_nodes, 2) for pair in edges: # For each pair append that to the main list of edges triangle_edges.append(pair) return triangle_edges # Return a list of edge pairs triangles = all_triangles(G) # Get a list of edges that form triangles in the graph number = int(len(triangles)/6) # Divide edges by 6 since each triangle is counted twice (one in each direction) for each node print("Number of triangles (from all_triangles):", number) print("Number of triangles (from number_of_triangles):", number_of_triangles(G)) print("Are both functions equal?") print(number_of_triangles(G) == number) # - # **14. [10 marks]** Re-draw your graph from Part I once more, so that all of its triangles are clearly visible. # You should use one colour for the edges that appear in at least one triangle, and a different colour for all other edges. # + # Draw the graph with the fixed position nx.draw_networkx(G, pos, node_color='orange', edge_color='orange', with_labels=True) # Draw the edges that form the triangles with the same fixed position nx.draw_networkx_edges(G, pos=pos, edge_color="blue", edgelist=triangles, width=3) # Hides x and y axis and show the graph plt.xticks([]) plt.yticks([]) plt.show()
28,957
/1.-Python/6.-Rock–Paper–Scissors/rock-paper-scissors_thomas_spitzer.ipynb
02bbf756196c9c1663bd2ef6d45521559e89bbfe
[]
no_license
thomas-cares/data-prework
https://github.com/thomas-cares/data-prework
0
0
null
2020-08-14T16:05:57
2020-07-15T14:23:20
null
Jupyter Notebook
false
false
.py
15,074
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="https://bit.ly/2VnXWr2" width="100" align="left"> # # Rock, Paper & Scissors # # Let's play the famous game against our computer. You can check the rules [here](https://en.wikipedia.org/wiki/Rock%E2%80%93paper%E2%80%93scissors). # # ## Task # Create a program that imitates the playability of the well known game of rock, paper, scissors. Follow the guidelines provided. # # ## Tools # 1. Loop: **for/while** # 2. Functions: **input(), print()...** # 3. Conditional statements: **if, elif, else** # 4. Definition of functions. Modular programming # 5. Import modules # # **To solve this challenge, the use of functions is recommended.** # # #### 1. Import the choice function of the random module. import random # #### 2. Create a list that includes the 3 possible gesture options of the game: 'rock', 'paper' or 'scissors'. Store the list in a variable called `gestures`. gestures=['rock', 'paper', 'scissors'] # #### 3. Create a variable called `n_rounds` to store the maximum number of rounds to play in a game. # Remember that the number of rounds must be odd: 1, 3, 5, ... n_rounds=33 # #### 4. Create a variable called `rounds_to_win` to store the number of rounds that a player must win to win the game. # **Hint**: the value stored in `rounds_to_win` depends on the value of `n_rounds`. rounds_to_win=int(n_rounds/2)+1 # #### 5. Create two variables to store the number of rounds that the computer and the player have won. Call these variables `cpu_score` and `player_score`. computer_score=0 player_score=0 # #### 6. Define a function that randomly returns one of the 3 gesture options. # You will use this function to simulate the gesture choice of the computer. computer_choice=random.choice(gestures) print("computer chose: ",computer_choice) # #### 7. Define a function that asks the player which is the gesture he or she wants to show: 'rock', 'paper' or 'scissors'. # The player should only be allowed to choose one of the 3 gesture options. If the player's choice is not rock, paper or scissors, keep asking until it is. # + player_choice=input("Make your choice: rock, paper or scissors !: ") while player_choice not in gestures: player_choice=input("Invalid! You may only chose rock, paper or scissor !: ") print('Good! Your choice is', player_choice) # - # #### 8. Define a function that checks who won a round. # The function should return 0 if there is a tie, 1 if the computer wins and 2 if the player wins. # + victory=3 if player_choice == computer_choice: victory= 0 print('its a tie') elif player_choice == "rock": if computer_choice == "paper": print("you lose") victory= 1 else: print('you win') victory = 2 elif player_choice == "paper": if computer_choice == "scissors": print("you lose") victory = 1 else: print("you win") victory = 2 elif player_choice == "scissors": if computer_choice == "rock": print ("you lose") victory = 1 else: print("you win") victory = 2 print ("You: ",player_choice) print ("Computer: ",computer_choice) print("victory-code:", victory) # - # #### 9. Define a function that prints the choice of the computer, the choice of the player and a message that announces who won the current round. # You should also use this function to update the variables that count the number of rounds that the computer and the player have won. The score of the winner increases by one point. If there is a tie, the score does not increase. # + if victory == 1: computer_score+=1 elif victory == 2: player_score+=1 else: player_score+=0 computer_score+=0 print('Player: ', player_score) print ('Computer: ', computer_score) # - # #### 10. Now it's time to code the execution of the game using the functions and variables you defined above. # # First, create a loop structure that repeats while no player reaches the minimum score necessary to win and the number of rounds is less than the maximum number of rounds to play in a game. # # Inside the loop, use the functions and variables above to create the execution of a round: ask for the player's choice, generate the random choice of the computer, show the round results, update the scores, etc. # + import random gestures=['rock', 'paper', 'scissors'] computer_score=0 player_score=0 victory=3 n_rounds=int(input("Chose the maximum amounts of rounds:")) rounds_to_win=int(n_rounds/2)+1 round=0 for round in range(n_rounds): print ("ROUND:", round+1) player_choice=input("Make your choice: rock, paper or scissors !: ") while player_choice not in gestures: player_choice=input("Invalid! You may only chose rock, paper or scissors !: ") print('PLAYER:', player_choice) computer_choice=random.choice(gestures) print("MACHINE:",computer_choice) if player_choice == computer_choice: victory= 0 elif player_choice == "rock": if computer_choice == "paper": victory= 1 else: victory = 2 elif player_choice == "paper": if computer_choice == "scissors": victory = 1 else: victory = 2 elif player_choice == "scissors": if computer_choice == "rock": victory = 1 else: victory = 2 if victory == 1: computer_score+=1 elif victory == 2: player_score+=1 else: player_score+=0 computer_score+=0 print ('Score') print ('Player: ',player_score," /// Computer: ",computer_score) print ('-------') #EARLY VICTORY if player_score==rounds_to_win: print("FINISH!") print ('----Total Score----') print ('Player: ',player_score," /// Computer: ",computer_score) print("YOU ACHIEVED VICTORY") round=n_rounds + 1 elif computer_score==rounds_to_win: print("FINISH!") print ('----Total Score----') print ('Player: ',player_score," /// Computer: ",computer_score) print("THE MACHINE WON") round = n_rounds + 1 #MAXIMUM ROUNDS REACHED print("FINISH! The maximum rounds have been reached!") print ('-------TOTAL SCORE-------') print ('Player: ',player_score," /// Computer: ",computer_score) if player_score == computer_score: print("THE GAME ENDS IN A TIE") elif player_score > computer_score: print("YOU ACHIEVED VICTORY") elif computer_score > player_score: print("YOU LOSE! THE MACHINE WON!") # - # #### 11. Print the winner of the game based on who won more rounds. # Remember that the game might be tied. if player_score == computer_score: print("THE GAME ENDS IN A TIE") elif player_score > computer_score: print("Player won: YOU ACHIEVED VICTORY!") elif computer_score > player_score: print("YOU LOSE! THE MACHINE WON!") # # Bonus: Rock, Paper, Scissors, Lizard & Spock # ![](images/rpsls.jpg) # # In this challenge, you need to improve the previous game by adding two new options. To know more about the rules of the improved version of rock, paper, scissors, check this [link](http://www.samkass.com/theories/RPSSL.html). # # In addition, you will also need to improve how the game interacts with the player: the number of rounds to play, which must be an odd number, will be requested to the user until a valid number is entered. Define a new function to make that request. # # **Hint**: Try to reuse the code that you already coded in the previous challenge. If your code is efficient, this bonus will only consist of simple modifications to the original game. dom_state=RANDOM_STATE) # NOTE: Setting the `warm_start` construction parameter to `True` disables # support for parallelized ensembles but is necessary for tracking the OOB # error trajectory during training. ensemble_clfs = [ ("RandomForestClassifier, max_features='sqrt'", RandomForestClassifier(warm_start=True, oob_score=True, max_features="sqrt", random_state=RANDOM_STATE)), ("RandomForestClassifier, max_features='log2'", RandomForestClassifier(warm_start=True, max_features='log2', oob_score=True, random_state=RANDOM_STATE)), ("RandomForestClassifier, max_features=None", RandomForestClassifier(warm_start=True, max_features=None, oob_score=True, random_state=RANDOM_STATE)) ] # Map a classifier name to a list of (<n_estimators>, <error rate>) pairs. error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs) # Range of `n_estimators` values to explore. min_estimators = 15 max_estimators = 175 for label, clf in ensemble_clfs: for i in range(min_estimators, max_estimators + 1): clf.set_params(n_estimators=i) clf.fit(X, y) # Record the OOB error for each `n_estimators=i` setting. oob_error = 1 - clf.oob_score_ error_rate[label].append((i, oob_error)) # Generate the "OOB error rate" vs. "n_estimators" plot. for label, clf_err in error_rate.items(): xs, ys = zip(*clf_err) plt.plot(xs, ys, label=label) plt.xlim(min_estimators, max_estimators) plt.xlabel("n_estimators") plt.ylabel("OOB error rate") plt.legend(loc="upper right") plt.show() # + [markdown] slideshow={"slide_type": "slide"} # #### Feature importance # RandomForests provide more reliable feature importances, based on many alternative hypotheses (trees) # + def plot_feature_importances_cancer(model): n_features = cancer.data.shape[1] plt.barh(range(n_features), model.feature_importances_, align='center') plt.yticks(np.arange(n_features), cancer.feature_names) plt.xlabel("Feature importance") plt.ylabel("Feature") plt.ylim(-1, n_features) X, y = cancer.data, cancer.target forest = RandomForestClassifier(random_state=0, n_estimators=512, n_jobs=-1) forest.fit(X,y) plt.rcParams.update({'font.size':8}) plot_feature_importances_cancer(forest) # + [markdown] slideshow={"slide_type": "slide"} # #### Strengths, weaknesses and parameters # RandomForest are among most widely used algorithms: # # * Don't require a lot of tuning # * Typically very accurate models # * Handles heterogeneous features well # * Implictly selects most relevant features # # Downsides: # # * less interpretable, slower to train (but parallellizable) # * don't work well on high dimensional sparse data (e.g. text) # + [markdown] slideshow={"slide_type": "slide"} # ## Adaptive Boosting (AdaBoost) # * Builds an ensemble of _weighted_ weak learners # * Typically shallow trees or stumps # * Each base model tries to correct the mistakes of the previous ones # * Sequential, not parallel (like RandomForest) # * We give misclassified samples more weight # * Force next model to get these points right by either: # * Passing on the weight to the loss (e.g. weighted Gini index) # * Sample data with probability = sample weights # * Misclassified samples are sampled multiple times so they get a higher weight # * Do weighted vote over all models # + [markdown] slideshow={"slide_type": "slide"} # ## AdaBoost algorithm # * Reset sample weights to $\frac{1}{N}$ # * Build a model, using it's own algorithm (e.g. decision stumps with gini index) # * Give it a weight related to its error $E$ # $$w_{i} = \lambda\log(\frac{1-E}{E})$$ # * Good trees get more weight than bad trees # * Error is mapped from [0,Inf] to [-1,1], use small minimum error to avoid infinities # * Learning rate $\lambda$ (shrinkage) decreases impact of individual classifiers # * Small updates are often better but requires more iterations # * Update the sample weights # * Increase weight of incorrectly predicted samples: # $s_{n,i+1} = s_{n,i}e^{w_i}$ # * Decrease weight of correctly predicted samples: # $s_{n,i+1} = s_{n,i}e^{-w_i}$ # * Normalize weights to add up to 1 # * Sample new points according to $s_{n,i+1}$ # * Repeat for $I$ rounds # + [markdown] slideshow={"slide_type": "slide"} # Visualization # + from matplotlib.colors import ListedColormap from sklearn.tree import DecisionTreeClassifier import ipywidgets as widgets from ipywidgets import interact, interact_manual from sklearn.preprocessing import normalize # Code from https://xavierbourretsicotte.github.io/AdaBoost.html def AdaBoost_scratch(X,y, M=10, learning_rate = 0.5): #Initialization of utility variables N = len(y) estimator_list, y_predict_list, estimator_error_list, estimator_weight_list, sample_weight_list = [],[],[],[],[] #Initialize the sample weights sample_weight = np.ones(N) / N sample_weight_list.append(sample_weight.copy()) #For m = 1 to M for m in range(M): #Fit a classifier estimator = DecisionTreeClassifier(max_depth = 1, max_leaf_nodes=2) estimator.fit(X, y, sample_weight=sample_weight) y_predict = estimator.predict(X) #Misclassifications incorrect = (y_predict != y) #Estimator error estimator_error = np.mean( np.average(incorrect, weights=sample_weight, axis=0)) #Boost estimator weights estimator_weight = learning_rate * np.log((1. - estimator_error) / estimator_error) #Boost sample weights sample_weight *= np.exp(estimator_weight * incorrect * ((sample_weight > 0) | (estimator_weight < 0))) sample_weight *= np.exp(-estimator_weight * np.invert(incorrect * ((sample_weight > 0) | (estimator_weight < 0)))) sample_weight /= np.linalg.norm(sample_weight) #Save iteration values estimator_list.append(estimator) y_predict_list.append(y_predict.copy()) estimator_error_list.append(estimator_error.copy()) estimator_weight_list.append(estimator_weight.copy()) sample_weight_list.append(sample_weight.copy()) #Convert to np array for convenience estimator_list = np.asarray(estimator_list) y_predict_list = np.asarray(y_predict_list) estimator_error_list = np.asarray(estimator_error_list) estimator_weight_list = np.asarray(estimator_weight_list) sample_weight_list = np.asarray(sample_weight_list) #Predictions preds = (np.array([np.sign((y_predict_list[:,point] * estimator_weight_list).sum()) for point in range(N)])) #print('Accuracy = ', (preds == y).sum() / N) return estimator_list, estimator_weight_list, sample_weight_list, estimator_error_list def plot_decision_boundary(classifier, X, y, N = 10, scatter_weights = np.ones(len(y)) , ax = None ): '''Utility function to plot decision boundary and scatter plot of data''' x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1 y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1 # Get current axis and plot if ax is None: ax = plt.gca() cm_bright = ListedColormap(['#FF0000', '#0000FF']) ax.scatter(X[:,0],X[:,1], c = y, cmap = cm_bright, s = scatter_weights * 40) ax.set_xlabel('$X_1$') ax.set_ylabel('$X_2$') # Plot classifier background if classifier is not None: xx, yy = np.meshgrid( np.linspace(x_min, x_max, N), np.linspace(y_min, y_max, N)) #Check what methods are available if hasattr(classifier, "decision_function"): zz = np.array( [classifier.decision_function(np.array([xi,yi]).reshape(1,-1)) for xi, yi in zip(np.ravel(xx), np.ravel(yy)) ] ) elif hasattr(classifier, "predict_proba"): zz = np.array( [classifier.predict_proba(np.array([xi,yi]).reshape(1,-1))[:,1] for xi, yi in zip(np.ravel(xx), np.ravel(yy)) ] ) else: zz = np.array( [classifier(np.array([xi,yi]).reshape(1,-1)) for xi, yi in zip(np.ravel(xx), np.ravel(yy)) ] ) # reshape result and plot Z = zz.reshape(xx.shape) ax.contourf(xx, yy, Z, 2, cmap='RdBu', alpha=.5, levels=[0,0.5,1]) #ax.contour(xx, yy, Z, 2, cmap='RdBu', levels=[0,0.5,1]) from sklearn.datasets import make_circles Xa, ya = make_circles(n_samples=400, noise=0.15, factor=0.5, random_state=1) cm_bright = ListedColormap(['#FF0000', '#0000FF']) estimator_list, estimator_weight_list, sample_weight_list, estimator_error_list = AdaBoost_scratch(Xa, ya, M=60, learning_rate = 0.9) @interact def plot_adaboost(iteration=(0,60,1)): if iteration == 0: s_weights = (sample_weight_list[0,:] / sample_weight_list[0,:].sum() ) * 40 plot_decision_boundary(None, Xa, ya, N = 20, scatter_weights =s_weights) else: s_weights = (sample_weight_list[iteration,:] / sample_weight_list[iteration,:].sum() ) * 40 plot_decision_boundary(estimator_list[iteration-1], Xa, ya, N = 20, scatter_weights =s_weights ) print("Base model {}, error: {:.2f}, weight: {:.2f}".format( iteration,estimator_error_list[iteration-1],estimator_weight_list[iteration-1])) # + slideshow={"slide_type": "slide"} for iteration in [1, 5, 38, 55]: plot_adaboost(iteration) plt.show() # + slideshow={"slide_type": "slide"} import mglearn.plot_classifiers as pc from sklearn.ensemble import AdaBoostClassifier names = ["AdaBoost 1 tree", "AdaBoost 3 trees", "AdaBoost 100 trees"] classifiers = [ AdaBoostClassifier(n_estimators=1, random_state=0, learning_rate=0.5), AdaBoostClassifier(n_estimators=3, random_state=0, learning_rate=0.5), AdaBoostClassifier(n_estimators=100, random_state=0, learning_rate=0.5) ] pc.plot_classifiers(names, classifiers, figuresize=(20,8)) # + [markdown] slideshow={"slide_type": "slide"} # AdaBoost reduces bias (and a little variance) # * Boosting too much will eventually increase variance # - X, y = cancer.data, cancer.target ab = AdaBoostClassifier(random_state=0, n_estimators=1024) plot_bias_variance_rf(ab, X, y) # + [markdown] slideshow={"slide_type": "slide"} # ## AdaBoost Recap # * Representation: weighted ensemble of base models # * Base models can be built by any algorithm # * Classification: weighted vote over all base models # * Regression: $$y = \sum_{i=1}^{N} w_i tree_i(X)$$ # * Loss function: weighted loss function of base models # * Optimization: Greedy search # + [markdown] slideshow={"slide_type": "slide"} # # ## Gradient Boosted Regression Trees (Gradient Boosting Machines) # Several differences to AdaBoost: # * Start with initial guess (e.g. 1 leaf, average value of all samples) # * Base-models are shallow trees (depth 2-4, not stumps) # * Models are weighted (scaled) by same amount (learning rate) # * Subsequent models aim to predict the error of the previous model # * _Additive model_: final prediction is the sum of all base-model predictions # * Iterate until $I$ trees are built (or error converges) # # # + [markdown] slideshow={"slide_type": "slide"} # ## GradientBoosting Intuition (Regression) # * Do initial prediction $M_0$ (e.g. average target value) # * Compute the _pseudo-residual_ (error) _for every sample_ $n$: $r_{n} = y_n - y^{(M_{i})}_n$ # * Where $y^{(M_{i})}_n$ is the prediction for $y_n$ by model $M_{i}$ # # # * Build new model $M_1$ to predict the pseudo-residual of $M_0$ # * New prediction at step $I$: # $$y_{n} = y^{(M_{i-1})}_n + \lambda * y^{(M_{i})}_n = y^{(M_{0})}_n + \sum_{i=1}^I \lambda * y^{(M_{i})}_n$$ # * $\lambda$ is the learning rate (or _shrinkage_) # * Taking small steps in right direction reduces variance (but requires more iterations) # * Compute new pseudo-residuals, and repeat # * Each step, the pseudo-residuals get smaller # * Stop after given number of iterations, or when the residuals don't decrease anymore (early stopping) # + [markdown] slideshow={"slide_type": "slide"} # ## GradientBoosting Algorithm (Regression) # # * Dataset of $n$ points $D = \{(x_i,y_i)\}_{i=1}^{n}$ where $y_i$ is a numeric target # * Differentiable loss function $\mathcal{L}(y_i,F(x))$. Typically $\mathcal{L} = \frac{1}{2}(y_i - \hat{y}_i)^2$ # * $\frac{\partial \mathcal{L}}{\partial \hat{y}} = 2 * \frac{1}{2}(y_i - \hat{y}_i) * (-1) = \hat{y}_i - y_i$ # * Initialize model with constant output value $F_0(x) = \underset{\gamma}{\operatorname{argmin}}\sum_{i=1}^{n}\mathcal{L}(y_i,\gamma)$ # * Compute $\frac{\partial \mathcal{L}}{\partial \gamma} = 0$. For $\mathcal{L}= \frac{1}{2}(y_i - \hat{y}_i)^2$, $F_0(x)$ is the average of $y_i$ # * For m=1..M (e.g. M=100 trees): # * For i=1..n, compute pseudo-residuals $r_{im} = - \left[ \frac{\partial \mathcal{L}(y_i,F(x_i))}{\partial F(x_i)} \right]_{F(x)=F_{m-1}(x)}$ # * Fit a regression model to $r_{im}$, create terminal regions (a.k.a. leafs) $R_{jm}, j=1..J_m$ # * For each leaf j, optimize output $\gamma_{jm} = \underset{\gamma}{\operatorname{argmin}}\sum_{x_i \in R_{ij}}\mathcal{L}(y_i,F_{m-1}(x_i) + \gamma)$ # * For $\mathcal{L}= \frac{1}{2}(y_i - \hat{y}_i)^2$, the optimum is the mean of all values in the leaf. # * Update $F_{m}(x) = F_{m-1}(x) + \lambda \sum_{j=1}^{J_m} \gamma_m I(x \in R_{jm}))$ # # # # # + [markdown] hide_input=false slideshow={"slide_type": "slide"} # ``` python # gbrt = GradientBoostingClassifier(random_state=0) # gbrt.fit(X_train, y_train) # ``` # + hide_input=true slideshow={"slide_type": "-"} from sklearn.ensemble import GradientBoostingClassifier from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( cancer.data, cancer.target, random_state=0) gbrt = GradientBoostingClassifier(random_state=0) gbrt.fit(X_train, y_train) print("Accuracy on training set: {:.3f}".format(gbrt.score(X_train, y_train))) print("Accuracy on test set: {:.3f}".format(gbrt.score(X_test, y_test))) # - # ``` python # gbrt = GradientBoostingClassifier(random_state=0, max_depth=1) # gbrt.fit(X_train, y_train) # ``` # + hide_input=true # We are overfitting. We can decrease max_depth gbrt = GradientBoostingClassifier(random_state=0, max_depth=1) gbrt.fit(X_train, y_train) print("Accuracy on training set: {:.3f}".format(gbrt.score(X_train, y_train))) print("Accuracy on test set: {:.3f}".format(gbrt.score(X_test, y_test))) # - # ``` python # gbrt = GradientBoostingClassifier(random_state=0, learning_rate=0.01) # gbrt.fit(X_train, y_train) # ``` # + hide_input=true # or decrease the learning rate (less effect) gbrt = GradientBoostingClassifier(random_state=0, learning_rate=0.01) gbrt.fit(X_train, y_train) print("Accuracy on training set: {:.3f}".format(gbrt.score(X_train, y_train))) print("Accuracy on test set: {:.3f}".format(gbrt.score(X_test, y_test))) # + [markdown] slideshow={"slide_type": "slide"} # Gradient boosting machines use much simpler trees # - Hence, tends to completely ignore some of the features # + gbrt = GradientBoostingClassifier(random_state=0, max_depth=1) gbrt.fit(X_train, y_train) plt.rcParams.update({'font.size':8}) plot_feature_importances_cancer(gbrt) # + [markdown] slideshow={"slide_type": "slide"} # ##### Strengths, weaknesses and parameters # * Among the most powerful and widely used models # * Work well on heterogeneous features and different scales # * Require careful tuning, take longer to train. # * Does not work well on high-dimensional sparse data # # Main hyperparameters: # # * `n_estimators`: Higher is better, but will start to overfit # * `learning_rate`: Lower rates mean more trees are needed to get more complex models # * Set `n_estimators` as high as possible, then tune `learning_rate` # * `max_depth`: typically kept low (<5), reduce when overfitting # * `n_iter_no_change`: early stopping: algorithm stops if improvement is less than a certain tolerance `tol` for more than `n_iter_no_change` iterations. # + [markdown] slideshow={"slide_type": "slide"} # ### XGBoost # # XGBoost is another python library for gradient boosting # Install separately, `conda install -c conda-forge xgboost` # # - The main difference lies the use of approximation techniques to make it faster. # - About 5x faster _per core_. Thus more boosting iterations in same amount of time # - Sketching: Given 10000 possible splits, it will only consider 300 "good enough" splits by default # - Controlled by the `sketch_eps` parameter (default 0.03) # - Loss function approximation with Taylor Expansion: more efficient way to evaluate splits # - Allows plotting of the learning curve # - Allows to stop and continue later (warm-start) # # Further reading: # [XGBoost Documentation](https://xgboost.readthedocs.io/en/latest/parameter.html#parameters-for-tree-booster) # [Paper](http://arxiv.org/abs/1603.02754) # + [markdown] slideshow={"slide_type": "slide"} # ### LightGBM # Another fast boosting technique # # * Uses _gradient-based sampling_: # - use all instances with large gradients (e.g. 10% largest) # - randomly sample instances with small gradients, ignore the rest # - intuition: samples with small gradients are already well-trained. # - requires adapted information gain criterion # * Does smarter encoding of categorical features # + [markdown] slideshow={"slide_type": "slide"} # Comparison # + hide_input=true import mglearn.plot_classifiers as pc names = ["Decision tree", "Random Forest 10", "Gradient Boosting"] classifiers = [ DecisionTreeClassifier(), RandomForestClassifier(max_depth=5, n_estimators=100, max_features=1), GradientBoostingClassifier(random_state=0, learning_rate=0.5) ] pc.plot_classifiers(names, classifiers, figuresize=(20,8)) # + [markdown] slideshow={"slide_type": "slide"} # ### Algorithm overview # # | Name | Representation | Loss function | Optimization | Regularization | # |---|---|---|---|---| # | Classification trees | Decision tree | Information Gain (KL div.) / Gini index | Hunt's algorithm | Tree depth,... | # | Regression trees | Decision tree | Min. quadratic distance | Hunt's algorithm | Tree depth,... | # | Bagging | Ensemble of any model | / | / | Number of models,... | # | RandomForest | Ensemble of random trees | / | / | Number of trees,... | # | AdaBoost | Ensemble of models (trees) | Weighted loss of base models | Greedy search | Number of trees,... | # | GradientBoosting | Ensemble of models (trees) | Ensemble loss | Gradient descent | Number of trees,... | # + [markdown] slideshow={"slide_type": "slide"} # ### Summary # - Bagging / RandomForest is a variance-reduction technique # - Build many high-variance (overfitting) models # - Typically deep (randomized) decision trees # - The more different the models, the better # - Aggregation (soft voting or averaging) reduces variance # - Parallellizes easily # - Boosting is a bias-reduction technique # - Build many high-bias (underfitting) models # - Typically shallow decision trees # - Sample weights are updated to create different trees # - Aggregation (soft voting or averaging) reduces bias # - Doesn't parallelize easily. Slower to train, much faster to predict. # - Smaller models, typically more accurate than RandomForests. # - You can build ensembles with other models as well # - Especially if they show high variance or bias # - It is also possible to build _heterogeneous_ ensembles # - Models from different algorithms # - Often a meta-classifier is trained on the predictions: Stacking
28,320
/Final Project/Code/.ipynb_checkpoints/6-Lnr_Rndm_uplow_Livindex-checkpoint.ipynb
8821c1cbc00dc8cd832ba35a52ad16c0fd084ecb
[]
no_license
juanw31/fintech-Project-2
https://github.com/juanw31/fintech-Project-2
0
1
null
null
null
null
Jupyter Notebook
false
false
.py
3,717,115
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import matplotlib as plt import seaborn as sns employee=pd.read_csv('WA_Fn-UseC_-HR-Employee-Attrition.csv'); print(employee) employee.Attrition.value_counts() employees_frame=pd.DataFrame(employee,columns=['DistanceFromHome',"JobRole","Attrition"]) employees_frame sns.barplot(x="Attrition",y="DistanceFromHome",hue="JobRole",data=employees_frame) sns.barplot(x="Attrition",y="DistanceFromHome",data=employees_frame) sns.barplot(x="JobRole",y="DistanceFromHome",data=employees_frame) employees_income_frame=pd.DataFrame(employee,columns=['MonthlyIncome',"Education","Attrition"]) employees_income_frame employees_income_frame.groupby(['Education'])["MonthlyIncome"].mean() employees_income_frame.groupby(['Attrition'])["MonthlyIncome"].mean() sns.barplot(x="Attrition",y="MonthlyIncome",data=employees_income_frame) sns.barplot(x="Education",y="MonthlyIncome",data=employees_income_frame) er. First we check the Regression of only price and sq_ft. Then with every iteration we add on more variable to X to see the impact of the addition to the accuracy. The iteration with the best accuracy is consoidered. # # <p> 2. Random Forest Regression Model: We consider all variable for X except price. # import pandas as pd # import numpy as np # from pathlib import Path from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LinearRegression filepath=("../Data/cleaned_data.csv") data=pd.read_csv(filepath,infer_datetime_format=True) data.head() clean=data.copy() clean.drop(columns=['Unnamed: 0','Status','Days_on_market','Date','Acres'], inplace=True) #'City',,'ZipCode' clean.head() clean.info() clean['Liv_index']=clean['Liv_index'].astype(int) # Changed the dtype for Zipcode from float to int clean['Liv_index'].dtype clean.groupby(['City', 'ZipCode']).agg(['min','max']) #Grouped by City and then Liv_index to see the count. Both City and Liv_index represent same values.Whereas a city can have many Zipcodes clean.groupby(['City','Liv_index']).count() pd.unique(clean['Liv_index']) # So we can drop zipcode # clean1=clean.drop(columns=['ZipCode']) clean1=clean.copy() clean1.groupby(['City']).agg(['min','max']) # We can see the min/max price/bed/baths/sqft for each city. # ## Model - Linear Regression # from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.pipeline import Pipeline from sklearn.linear_model import LogisticRegression,LinearRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestRegressor # Linear regression using price as target (y) and sqft as X df=clean1.copy() df=df.drop(columns=['City']) df.head() ss = StandardScaler() df_scaled = pd.DataFrame(ss.fit_transform(df),columns = df.columns) df_scaled.head() X=df_scaled.copy() X=X.drop(columns = ['Price','Beds','Baths','Lot_sqft','Liv_index','ZipCode']) X.shape y=df_scaled['Price'] y.shape # ### 1) Linear model with only Sqft as X and Price as y # + X=df_scaled.copy() X=X.drop(columns = ['Price','Beds','Baths','Lot_sqft','Liv_index','ZipCode']) y=df_scaled['Price'] # Model X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=1) model=LinearRegression() model.fit(X_train,y_train) accuracy=model.score(X_test,y_test) print(f'Model: {X.columns.values}') print(f'Linear Regression accuracy : {accuracy*100}') print('coefficients :',model.coef_) print('intercept :',model.intercept_) # - # ### 2) Linear model with Sqft and Zipcode as X and Price as y # + X=df_scaled.copy() X=X.drop(columns = ['Price','Baths','Lot_sqft','Liv_index','Beds']) y=df_scaled['Price'] # Model X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=1) model=LinearRegression() model.fit(X_train,y_train) accuracy=model.score(X_test,y_test) print(f'Model: {X.columns.values}') print(f'Linear Regression accuracy: {accuracy*100}') print('coefficients :',model.coef_) print('intercept :',model.intercept_) # + # We can see tha there is a slight improvement in the accuarcy on adding Zipcode and coefficients are +ve # - # ### 3) Linear model with only Sqft, ZipCode, Beds as X and Price as y. (Adding Beds to X) # + X=df_scaled.copy() X=X.drop(columns = ['Price','Baths','Lot_sqft','Liv_index']) y=df_scaled['Price'] # Model X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=1) model=LinearRegression() model.fit(X_train,y_train) accuracy=model.score(X_test,y_test) print(f'Model: {X.columns.values}') print(f'Linear Regression accuracy: {accuracy*100}') print('coefficients :',model.coef_) print('intercept :',model.intercept_) # - # ### 4) Linear model with only Sqft, ZipCode, Beds, Baths as X and Price as y. Now adding Beds to X # + X=df_scaled.copy() X=X.drop(columns = ['Price','Lot_sqft','Liv_index']) y=df_scaled['Price'] # Model X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=1) model=LinearRegression() model.fit(X_train,y_train) accuracy=model.score(X_test,y_test) print(f'Model: {X.columns.values}') print(f'Linear Regression accuracy: {accuracy*100}') print('coefficients :',model.coef_) print('intercept :',model.intercept_) # - # ### 5) Linear model with only Sqft, ZipCode, Beds, Baths and Lot_sqft as X and Price as y. (Adding Lot_sqft to X) # + X=df_scaled.copy() X=X.drop(columns = ['Price','Liv_index']) y=df_scaled['Price'] # Model X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=1) model=LinearRegression() model.fit(X_train,y_train) accuracy=model.score(X_test,y_test) print(f'Model: {X.columns.values}') print(f'Linear Regression accuracy: {accuracy*100}') print('coefficients :',model.coef_) print('intercept :',model.intercept_) # - # ### 6) Linear model with only Sqft, ZipCode, Beds, Baths, Lot_sqft and Liv_index as X and Price as y. (Adding Liv_index to X) # + X=df_scaled.copy() X=X.drop(columns = ['Price','Lot_sqft']) y=df_scaled['Price'] # Model X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=1) model=LinearRegression() model.fit(X_train,y_train) accuracy=model.score(X_test,y_test) print(f'Model: {X.columns.values}') print(f'Linear Regression accuracy: {accuracy*100}') print('coefficients :',model.coef_) print('intercept :',model.intercept_) # - # We can see that Sqft has the highest impact on the price of the home followed by -ve effect from no of Beds.Zipcode has a +ve impact on the price which confirms that the area/location also impacts the price. In the next iteration we have dropped Beds. # # ### 7) Linear model with only Sqft, ZipCode, Baths, Lot_sqft and Liv_index as X and Price as y. (Dropping Beds to X) # + # Linear model with only Sqft as X and Price as y. Now adding **Beds (deleted),Baths,Lot_sqft to X. remove Lot_sqft and add Liv_index X=df_scaled.copy() X=X.drop(columns = ['Price','ZipCode']) y=df_scaled['Price'] # Model X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=1) model=LinearRegression() model.fit(X_train,y_train) accuracy=model.score(X_test,y_test) print(f'Model: {X.columns.values}') print(f'Linear Regression accuracy: {accuracy*100}') print('coefficients :',model.coef_) print('intercept :',model.intercept_) # - # There is a fall of 1.5 in the accuracy. So we can retain no of beds in the model. # ## B) Random Forest Regressor # # ### For this model, we want to see the impact of Liv index alongwith sqft on the price. So, we have selected unique Liv_index values # df.to_csv('df_rf.csv') # Get unique values for Liv_index unq_liv=df.Liv_index.unique() unq_liv # ### Lower Range: We select 68,63,66,67,59 Liv_index at the bottom of the range 50-90 # + # We select 68,63,66,67,59 df1=df.loc[df.Liv_index.isin([68,66,67,63,59])] df1.head() # - mean_price=df1['Price'].mean() mean_price # Create a groupbby df by ZipCode and Beds to see min & max values as per groupby df3=df1.groupby(['ZipCode','Beds']).agg(['min','max']) # df3.head(25) df1['Liv_index']=df1['Liv_index'].astype('str') #So that we can use pd dummies df1['Liv_index'].dtype df1.describe() # + X=df1.copy() X=X.drop(columns = ['Price']) X=pd.get_dummies(X) # - X.head() X.shape ss = StandardScaler() df_scaled = pd.DataFrame(ss.fit_transform(X),columns = X.columns) X_scaled = ss.fit_transform(X) y=df1['Price'] y.shape # Model considering all variables- ZipCode, X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=1) model = RandomForestRegressor(n_estimators=200) model.fit(X_train, y_train) y_pred = model.predict(X_test) from sklearn.metrics import mean_squared_log_error, mean_absolute_error,mean_squared_error print('MSE:',mean_squared_error(y_test, y_pred)) print('MAE:', mean_absolute_error(y_test, y_pred)) # + # Build a plot import matplotlib.pyplot as plt plt.scatter(y_pred, y_test) plt.xlabel('Prediction') plt.ylabel('Actual Price') # Now add the perfect prediction line diagonal = np.linspace(0, np.max(y_test), 100) plt.plot(diagonal, diagonal, '-r') plt.show() # + df2 = pd.DataFrame({'Actual': y_test.ravel(), 'Predicted': y_pred.ravel()}) df2.shape # - df2['Diff']=round((df2.Actual-df2.Predicted),0) df2.head() print(df2.Diff.std()) print(df2.corr()) df2['Adj_predicted']=df2['Predicted']+50000 df2.plot(figsize=(20,10)) # <p> We can see that there is significant variation in the predicted values cf. actuals in the test data. import seaborn as sns sns.relplot(x='Predicted',y='Actual',data=df2,kind='scatter') # ### Upper Range: We select Liv_index at the upper of the range 50-90 # # 81, 85, 84, 79, 71, 83, 78, 74, 80, 77, 82, 86, # + df_up=df.loc[df.Liv_index.isin([81, 85, 84, 79, 71, 83, 78, 74, 80, 77, 82, 86])] # df3=df1.groupby(['ZipCode','Beds']).agg(['min','max']) df_up['Liv_index']=df_up['Liv_index'].astype('str') #So that we can use pd dummies df_up['Liv_index'].dtype # - df_up.head() mean_up=df['Price'].mean() mean_up df_up.describe() df_up.info() X=df_up.copy() X=X.drop(columns = ['Price']) X=pd.get_dummies(X) ss = StandardScaler() df_scaled = pd.DataFrame(ss.fit_transform(X),columns = X.columns) X.shape y=df_up['Price'] y.shape # Model considering all variables- ZipCode, X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) model = RandomForestRegressor(n_estimators=200, random_state=42) model.fit(X_train, y_train) y_pred_up = model.predict(X_test) from sklearn.metrics import mean_squared_log_error, mean_absolute_error,mean_squared_error print('MSE:',mean_squared_error(y_test, y_pred_up)) print('MAE:', mean_absolute_error(y_test, y_pred_up)) # + # Build a plot import matplotlib.pyplot as plt plt.scatter(y_pred_up, y_test) plt.xlabel('Prediction') plt.ylabel('Real value') # Now add the perfect prediction line diagonal = np.linspace(0, np.max(y_test), 100) plt.plot(diagonal, diagonal, '-r') plt.show() df22 = pd.DataFrame({'Actual': y_test.ravel(), 'Predicted': y_pred_up.ravel()}) df22['Diff']=round((df22.Actual-df22.Predicted),0) df22.head() print(df22.Diff.std()) print(df22.corr()) # - df22.plot(figsize=(20,10)) # This shows a fairly good prediction for the homes in the upper range of the Liv_index. # # # ## Price Adjustment . We increase the predicted price by 100K (est.) we are able to decrease the differnce between the Actual and Adjusted Predicted. # + import seaborn as sns adj=200000 # increase the predicted price by an estimated amount df22[f'Predicted + {adj}']=df22['Predicted']+adj # increase the predicted price by an estimated amount df22['New_diff']=df22[f'Predicted + {adj}']-df22['Actual'] df22.head() # - sns.relplot(y='Actual',x=f'Predicted + {adj}',data=df22,kind='scatter') # df23=df22.drop(columns=['Diff']) import hvplot.pandas df22.hvplot(title='Predicted + 100K ')
12,179
/notebooks/shakemap_damage_est.ipynb
3e5d2983008621d1cca2d5dc52f18717d6892f67
[ "MIT" ]
permissive
iwbailey/shakemap_lookup
https://github.com/iwbailey/shakemap_lookup
1
0
MIT
2021-03-25T22:13:55
2020-10-16T07:31:58
Jupyter Notebook
Jupyter Notebook
false
false
.py
2,305
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: shakemap # language: python # name: shakemap # --- # # Estimate damage at a set of locations based on shakemap def checkplot_interpolatedamage(frag, locns): """Read in fragility function, compare to the interpolated results we've output to file Plot the fragility curves as read in IN: Pandas data frame column 1 is named 'MMI' columns 2+ are named the damage states OUT: pyplot figure handle """ fig, ax = plt.subplots(1, 1, facecolor='white') # Plot underlying fragility curve for c in frag.damagestates(): ax.plot(frag.intensities, frag.exceedprob[c].values, label=c) # Plot result of the location lookup fnmIntens = frag.intensitymeasure + '_med' for c in frag.damagestates(): # Fieldname for the probabilities at these locations fnmDamage = 'prob_' + c ax.plot(locns.df[fnmIntens].values, locns.df[fnmDamage].values, '+', label='locations') ax.set_xlabel(frag.intensitymeasure) ax.set_ylabel('ProbExceedance') ax.grid('on') ax.legend(loc=2, frameon=False, framealpha=0.5) plt.title('Check fragility calculation') # Show the plot print('Pausing while plot is shown...') pylab.show(block=True) return
1,484
/NN_from_scratch/.ipynb_checkpoints/Implement_nn_fromScratch-checkpoint.ipynb
a037327e063df283227613f4a0f2535a9dbe3a75
[]
no_license
muditpassi/Machine_Learning
https://github.com/muditpassi/Machine_Learning
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
709,932
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- from sklearn.datasets import make_moons from sklearn.linear_model import LogisticRegression import numpy as np import matplotlib.pyplot as plt # %matplotlib inline np.random.seed(0) X, y = make_moons(200, noise=0.20) plt.scatter(X[:,0],X[:,1], s=40, c=y, cmap=plt.cm.Spectral) # Helper function to plot a decision boundary. # If you don't fully understand this function don't worry, it just generates the contour plot below. def plot_decision_boundary(pred_func): # Set min and max values and give it some padding x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 h = 0.01 # Generate a grid of points with distance h between them xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Predict the function value for the whole gid Z = pred_func(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the contour and training examples plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral) lr = LogisticRegression() lr.fit(X,y) lr = LogisticRegression() lr.fit(X,y) plot_decision_boundary(lambda x: lr.predict(x)) # + num_examples = len(X) # training set size nn_input_dim = 2 # input layer dimensionality nn_output_dim = 2 # output layer dimensionality # Gradient descent parameters (I picked these by hand) epsilon = 0.01 # learning rate for gradient descent reg_lambda = 0.01 # regularization strength # - # Helper function to evaluate the total loss on the dataset def calculate_loss(model): W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2'] # Forward propagation to calculate our predictions z1 = X.dot(W1) + b1 a1 = np.tanh(z1) z2 = a1.dot(W2) + b2 exp_scores = np.exp(z2) probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) # Calculating the loss corect_logprobs = -np.log(probs[range(num_examples), y]) data_loss = np.sum(corect_logprobs) # Add regulatization term to loss (optional) data_loss += reg_lambda/2 * (np.sum(np.square(W1)) + np.sum(np.square(W2))) return 1./num_examples * data_loss # Helper function to predict an output (0 or 1) def predict(model, x): W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2'] # Forward propagation z1 = x.dot(W1) + b1 a1 = np.tanh(z1) z2 = a1.dot(W2) + b2 exp_scores = np.exp(z2) probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) return np.argmax(probs, axis=1) # This function learns parameters for the neural network and returns the model. # - nn_hdim: Number of nodes in the hidden layer # - num_passes: Number of passes through the training data for gradient descent # - print_loss: If True, print the loss every 1000 iterations def build_model(nn_hdim, num_passes=20000, print_loss=False): # Initialize the parameters to random values. We need to learn these. np.random.seed(0) W1 = np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim) b1 = np.zeros((1, nn_hdim)) W2 = np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim) b2 = np.zeros((1, nn_output_dim)) # This is what we return at the end model = {} # Gradient descent. For each batch... for i in xrange(0, num_passes): # Forward propagation z1 = X.dot(W1) + b1 a1 = np.tanh(z1) z2 = a1.dot(W2) + b2 exp_scores = np.exp(z2) probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) # Backpropagation delta3 = probs delta3[range(num_examples), y] -= 1 dW2 = (a1.T).dot(delta3) db2 = np.sum(delta3, axis=0, keepdims=True) delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2)) dW1 = np.dot(X.T, delta2) db1 = np.sum(delta2, axis=0) # Add regularization terms (b1 and b2 don't have regularization terms) dW2 += reg_lambda * W2 dW1 += reg_lambda * W1 # Gradient descent parameter update W1 += -epsilon * dW1 b1 += -epsilon * db1 W2 += -epsilon * dW2 b2 += -epsilon * db2 # Assign new parameters to the model model = { 'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2} # Optionally print the loss. # This is expensive because it uses the whole dataset, so we don't want to do it too often. if print_loss and i % 1000 == 0: print "Loss after iteration %i: %f" %(i, calculate_loss(model)) return model model = build_model(3, print_loss=True) plot_decision_boundary(lambda x: predict(model, x)) plt.figure(figsize=(16, 32)) hidden_layer_dimensions = [1, 2, 3, 4, 5, 20, 50] for i, nn_hdim in enumerate(hidden_layer_dimensions): plt.subplot(5, 2, i+1) plt.title('Hidden Layer size %d' % nn_hdim) model = build_model(nn_hdim) plot_decision_boundary(lambda x: predict(model, x)) plt.show() r']=df.year.astype(int) # Oppos we got an error. Something is not right. Its trying to convert some python datatype: `None` into an int. This usually means data was missing. Was it? df[df.year.isnull()] # Aha, we had some incomplete data. Lets get rid of it df = df[df.year.notnull()] df.shape # We removed those 7 rows. Lets try the type conversion again df['rating_count']=df.rating_count.astype(int) df['review_count']=df.review_count.astype(int) df['year']=df.year.astype(int) df.dtypes # Much cleaner now! # ###Visualizing # # Pandas has handy built in visualization. df.rating.hist(); # We can do this in more detail, plotting against a mean, with cutom binsize or number of bins. Note how to label axes and create legends. sns.set_context("notebook") meanrat=df.rating.mean() #you can get means and medians in different ways print meanrat, np.mean(df.rating), df.rating.median() with sns.axes_style("whitegrid"): df.rating.hist(bins=30, alpha=0.4); plt.axvline(meanrat, 0, 0.75, color='r', label='Mean') plt.xlabel("average rating of book") plt.ylabel("Counts") plt.title("Ratings Histogram") plt.legend() #sns.despine() # One can see the sparseness of review counts. This will be important when we learn about recommendations: we'll have to *regularize* our models to deal with it. df.review_count.hist(bins=np.arange(0, 40000, 400)) # The structure may be easier to see if we rescale the x-axis to be logarithmic. df.review_count.hist(bins=100) plt.xscale("log"); # Here we make a scatterplot in matplotlib of rating against year. By setting the alpha transparency low we can how the density of highly rated books on goodreads has changed. plt.scatter(df.year, df.rating, lw=0, alpha=.08) plt.xlim([1900,2010]) plt.xlabel("Year") plt.ylabel("Rating") # ###Pythons and ducks # # Notice that we used the series in the x-list and y-list slots in the `scatter` function in the `plt` module. # # In working with python I always remember: a python is a duck. # # What I mean is, python has a certain way of doing things. For example lets call one of these ways listiness. Listiness works on lists, dictionaries, files, and a general notion of something called an iterator. # # A Pandas series plays like a python list: alist=[1,2,3,4,5] # We can construct another list by using the syntax below, also called a list comprehension. asquaredlist=[i*i for i in alist] asquaredlist # And then we can again make a scatterplot plt.scatter(alist, asquaredlist); print type(alist) # In other words, something is a duck if it quacks like a duck. A Pandas series quacks like a python list. They both support something called the iterator protocol, an notion of behaving in a "listy" way. And Python functions like `plt.scatter` will accept anything that behaves listy. Indeed here's one more example: plt.hist(df.rating_count.values, bins=100, alpha=0.5); print type(df.rating_count), type(df.rating_count.values) # Series and numpy lists behave similarly as well. # # # ### Vectorization # # Numpy arrays are a bit different from regular python lists, and are the bread and butter of data science. Pandas Series are built atop them. alist + alist np.array(alist) np.array(alist)+np.array(alist) np.array(alist)**2 # In other words, operations on numpy arrays, and by extension, Pandas Series, are **vectorized**. You can add two numpy lists by just using `+` whereas the result isnt what you might expect for regular python lists. To add regular python lists elementwise, you will need to use a loop: newlist=[] for item in alist: newlist.append(item+item) newlist # **Vectorization** is a powerful idiom, and we will use it a lot in this class. And, for almost all data intensive computing, we will use numpy arrays rather than python lists, as the python numerical stack is based on it. # # You have seen this in idea in spreadsheets where you add an entire column to another one. # # Two final examples # + a=np.array([1,2,3,4,5]) print type(a) b=np.array([1,2,3,4,5]) print a*b # - a+1
9,287
/.ipynb_checkpoints/P1_giladgressel-checkpoint.ipynb
ed110461eb1572c4c3e612342b3116783d89aade
[]
no_license
giladgressel/CarND-LaneLines-P1
https://github.com/giladgressel/CarND-LaneLines-P1
1
0
null
2016-10-31T14:30:06
2016-10-31T12:57:53
null
Jupyter Notebook
false
false
.py
496,305
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ### Imports # Import the required libraries # + # package(s) related to time, space and id import logging import datetime, time import platform import itertools # you need these dependencies (you can get these from anaconda) # package(s) related to the simulation import simpy import pandas as pd # spatial libraries import pyproj import shapely.geometry from simplekml import Kml, Style # package(s) for data handling import numpy as np import matplotlib.pyplot as plt # OpenTNSim import opentnsim # Used for mathematical functions import math import tqdm # Used for making the graph to visualize our problem import networkx as nx import plotly.express as px from plotly.subplots import make_subplots #logging.basicConfig(level=logging.DEBUG) #turn on all the debug messages logging.basicConfig(level=logging.INFO)# turn off all the debug messages # - # ### Create graph # # + # specify a number of coordinate along your route (coords are: lon, lat) coords = [ [0,0], [1.715753,0], ] # for each edge (between above coordinates) specify the depth (m) depths = [6] # check of nr of coords and nr of depths align assert len(coords) == len(depths) + 1, 'nr of depths does not correspond to nr of coords' # + # create a graph based on coords and depths FG = nx.DiGraph() nodes = [] path = [] # add nodes Node = type('Site', (opentnsim.core.Identifiable, opentnsim.core.Locatable), {}) for index, coord in enumerate(coords): data_node = {"name": "Node " + str(index), "geometry": shapely.geometry.Point(coord[0], coord[1])} nodes.append(Node(**data_node)) positions = {} for node in nodes: positions[node.name] = (node.geometry.x, node.geometry.y) FG.add_node(node.name, geometry = node.geometry) # add edges path = [[nodes[i], nodes[i+1]] for i in range(len(nodes)-1)] for index, edge in enumerate(path): # For the energy consumption calculation we add info to the graph. We need depth info for resistance. # NB: the CalculateEnergy routine expects the graph to have "Info" that contains "GeneralDepth" # this may not be very generic! FG.add_edge(edge[0].name, edge[1].name, weight = 1, Info = {"GeneralDepth": depths[index]}) # toggle to undirected and back to directed to make sure all edges are two way traffic FG = FG.to_undirected() FG = FG.to_directed() # - # show graph nx.draw(FG, positions) plt.show() # This cell just shows that now a depth of 10 m is added to the edges. # You can use this to vary depths, e.g. make the middle edge shallower. Note that edges are bi-directional for edge in FG.edges: print('General depth for edge {} is {}'.format(edge, FG.get_edge_data(edge[0], edge[1])["Info"]["GeneralDepth"])) # ### Create vessel - add VesselProperties and ConsumesEnergy mixins # # Make your preferred class out of available mix-ins. TransportResource = type( "Vessel", ( opentnsim.core.Identifiable, opentnsim.core.Movable, opentnsim.core.VesselProperties, # needed to add vessel properties opentnsim.energy.ConsumesEnergy, opentnsim.core.ExtraMetadata, ), {}, ) # needed to calculate resistances # + # Create a dict with all important settings data_vessel = { "env": None, "name": 'Vessel', "route": None, "geometry": None, "v": 1, # m/s "type": None, "B": 11.4, "L": 110, "H_e": None, "H_f": None, "T": 3.7, "safety_margin": 0.3, # for tanker vessel with rocky bed the safety margin is recommended as 0.3 m "h_squat": True, # if consider the ship squatting while moving, set to True, otherwise set to False. Note that here we have disabled h_squat calculation since we regard the water depth h_0 is already reduced by squat effect. This applies to figures 3, 5, 7, 8 and 9. "payload":None, "vessel_type":"Tanker", #vessel types: "Container","Dry_SH","Dry_DH","Barge","Tanker". ("Dry_SH" means dry bulk single hull, "Dry_DH" means dry bulk double hull) "P_installed": 1322, # kW "P_tot_given": None, # kW "bulbous_bow": False, # if a vessel has no bulbous_bow, set to False; otherwise set to True. "P_hotel_perc": 0, "P_hotel": None, # None: calculate P_hotel from percentage "x": 2,# number of propellers "L_w": 3.0 , "C_B":0.9, "C_year": 2000, } # - # ### Define paths # We are going to create a simulation in which a vessel moves from *Node 1* to *Node 4*. Therefore, we need to create a vessel to which we will need to append this paths. Therefore, first let's create the path. path = nx.dijkstra_path(FG, nodes[0].name, nodes[1].name) # ### Run simulation # def run_simulation(V_s): # Start simpy environment simulation_start = datetime.datetime.now() env = simpy.Environment(initial_time = time.mktime(simulation_start.timetuple())) env.epoch = time.mktime(simulation_start.timetuple()) # Add graph to environment env.FG = FG # Add environment and path to the vessel # create a fresh instance of vessel vessel = TransportResource(**data_vessel) vessel.env = env #the created environment vessel.name = 'Vessel No.1' vessel.route = path #the route (the sequence of nodes, as stored as the second column in the path) vessel.geometry = env.FG.nodes[path[0]]['geometry'] #a shapely.geometry.Point(lon,lat) (here taken as the starting node of the vessel) vessel.v = V_s # vessel.P_tot_given = P_tot_given # Start the simulation env.process(vessel.move()) env.run() return vessel input_data = {'V_s': [3.67]} input_data # + # create empty plot data plot_data = {} # loop through the various input data for index, value in enumerate(input_data['V_s']): # Run a basic simulation with V_s and P_tot_given combi vessel = run_simulation(input_data['V_s'][index]) # create an EnergyCalculation object and perform energy consumption calculation energycalculation = opentnsim.energy.EnergyCalculation(FG, vessel) energycalculation.calculate_energy_consumption() # create dataframe from energy calculation computation df = pd.DataFrame.from_dict(energycalculation.energy_use) # add/modify some comlums to suit our plotting needs df['fuel_kg_per_km'] = (df['total_diesel_consumption_C_year_ICE_mass']/1000) / (df['distance']/1000) # g/m --> kg/km df['CO2_g_per_km'] = (df['total_emission_CO2']) / (df['distance']/1000) df['PM10_g_per_km'] = (df['total_emission_PM10']) / (df['distance']/1000) df['NOx_g_per_km'] = (df['total_emission_NOX']) / (df['distance']/1000) label = 'V_s = ' + str(input_data['V_s'][index]) # Note that we make a dict to collect all plot data. # We use labels like ['V_s = None P_tot_given = 274 fuel_kg_km'] to organise the data in the dict # The [0, 0, 1, 1, 2, 2] below creates a list per section (I think this is the way you use it in your plot) # plot_data[label + ' v'] = list(df.distance[[0, 0, 1, 1, 2, 2]]/df.delta_t[[0, 0, 1, 1, 2, 2]]) # plot_data[label + ' P_tot'] = list(df.P_tot[[0, 0, 1, 1, 2, 2]]) plot_data[label + ' fuel_kg_per_km'] = list(df.fuel_kg_per_km[[0, 0, ]]) plot_data[label + ' CO2_g_per_km'] = list(df.CO2_g_per_km[[0, 0, ]]) plot_data[label + ' PM10_g_per_km'] = list(df.PM10_g_per_km[[0, 0, ]]) plot_data[label + ' NOx_g_per_km'] = list(df.NOx_g_per_km[[0, 0, ]]) # - # this is now in the plot data plot_data # + fig, axs = plt.subplots(5, 1, figsize=(10, 10), sharex=True,constrained_layout=True) fontsize=12 ax=plt.subplot(511) # Longitudinal section of the waterway plt.title('Rotterdam-Kampen, MTS110: L$_s$ = 110 m, B$_s$ = 11.4 m, T$_s$ = 3.7 m, payload = 2182 ton, P$_{installed}$ = 1322 kW',y=1.05) # these values you probably want to extract from the graph, or from the log (minimise the manual work!!) dist=[0,191] water_level=[2.3,2.3] bed_level=[1,1] draft=[3.7,3.7] plt.stackplot(dist,bed_level,water_level,draft, labels=['bed','water','actual draught'],colors=['#7f7053','#75bbfd','#95d0fc']) plt.ylabel('Longitudinal section \n of the waterway',fontsize=12,fontweight='bold',labelpad=25) plt.xlim(0, 191) plt.ylim(0, 14) plt.xticks( weight = 'bold') plt.yticks([1,3.3,7], weight = 'bold', fontsize=12) ax.legend(loc='upper left') ax.text(191*0.95, 12, '(a)', fontsize=12,weight = 'bold') ax=plt.subplot(512) # fuel (diesel) consumption kg/km # diesel_fv1=(plot_data['V_s = 3.0 P_tot_given = None fuel_kg_per_km']) # use values from the output_plot variable # diesel_fv2=(plot_data['V_s = 3.5 P_tot_given = None fuel_kg_per_km']) # diesel_fp1=(plot_data['V_s = None P_tot_given = 276 fuel_kg_per_km']) diesel_fp2=(plot_data['V_s = 3.67 fuel_kg_per_km']) # plt.plot(dist, diesel_fv1,':',color='#f1da7a',linewidth='2',label='Vs=3 m/s') # plt.plot(dist, diesel_fv2,'-.',color='#bf9005',linewidth='2',label='Vs=3.5 m/s') # plt.plot(dist, diesel_fp1,color='#f1da7a',linewidth='2',label='P=276 kW') plt.plot(dist, diesel_fp2,color='#bf9005',linewidth='2',label='Vs = 13.2 km/h') plt.ylabel('Fuel use\n (kg/km)',fontsize=12,fontweight='bold',labelpad=28) plt.ylim(0, 20) plt.xlim(0, 191) plt.xticks( weight = 'bold') plt.yticks( weight = 'bold', fontsize=12) ax.legend(loc='upper left') ax.text(191*0.95, 20*11/13, '(b)', fontsize=12,weight = 'bold') ax=plt.subplot(513) # CO2 emission rates g/km ax.legend(loc='upper left') # EMrCO2_fv1=(plot_data['V_s = 3.0 P_tot_given = None CO2_g_per_km']) # EMrCO2_fv2=(plot_data['V_s = 3.5 P_tot_given = None CO2_g_per_km']) # EMrCO2_fp1=(plot_data['V_s = None P_tot_given = 276 CO2_g_per_km']) EMrCO2_fp2=(plot_data['V_s = 3.67 CO2_g_per_km']) # plt.plot(dist, EMrCO2_fv1,':',color='yellow',linewidth='2',label='Vs=3 m/s') # plt.plot(dist, EMrCO2_fv2,'-.',color='orange',linewidth='2',label='Vs=3.5 m/s') # plt.plot(dist, EMrCO2_fp1,color='yellow',linewidth='2',label='P=276 kW') plt.plot(dist, EMrCO2_fp2,color='orange',linewidth='2',label='Vs = 13.2 km/h') ax.legend(loc='upper left') plt.ylabel('CO2 emission \n rate (g/km)',fontsize=12,fontweight='bold',labelpad=2) plt.ylim(0, 60000) plt.xlim(0, 191) plt.xticks( weight = 'bold') plt.yticks( weight = 'bold', fontsize=12) ax.text(191*0.95, 60000*11/13, '(c)', fontsize=12,weight = 'bold') ax=plt.subplot(514) # PM10 emission rates g/km # EMrPM10_fv1=(plot_data['V_s = 3.0 P_tot_given = None PM10_g_per_km']) # EMrPM10_fv2=(plot_data['V_s = 3.5 P_tot_given = None PM10_g_per_km']) # EMrPM10_fp1=(plot_data['V_s = None P_tot_given = 276 PM10_g_per_km']) EMrPM10_fp2=(plot_data['V_s = 3.67 PM10_g_per_km']) # plt.plot(dist, EMrPM10_fv1,':',color='paleturquoise',linewidth='2',label='Vs=3 m/s') # plt.plot(dist, EMrPM10_fv2,'-.',color='mediumseagreen',linewidth='2',label='Vs=3.5 m/s') # plt.plot(dist, EMrPM10_fp1,color='paleturquoise',linewidth='2',label='P=276 kW') plt.plot(dist, EMrPM10_fp2,color='mediumseagreen',linewidth='2',label='Vs = 13.2 km/h') ax.legend(loc='upper left') plt.ylabel('PM10 emission \n rate (g/km)',fontsize=12,fontweight='bold',labelpad=26) plt.ylim(0, 40) plt.xlim(0, 191) plt.xticks( weight = 'bold') plt.yticks( weight = 'bold', fontsize=12) ax.text(191*0.95, 40*11/13, '(d)', fontsize=12,weight = 'bold') ax=plt.subplot(515) # NOx emission rates g/km # EMrNOx_fv1=(plot_data['V_s = 3.0 P_tot_given = None NOx_g_per_km']) # EMrNOx_fv2=(plot_data['V_s = 3.5 P_tot_given = None NOx_g_per_km']) # EMrNOx_fp1=(plot_data['V_s = None P_tot_given = 276 NOx_g_per_km']) EMrNOx_fp2=(plot_data['V_s = 3.67 NOx_g_per_km']) # plt.plot(dist, EMrNOx_fv1,':',color='pink',linewidth='2',label='Vs=3 m/s') # plt.plot(dist, EMrNOx_fv2,'-.',color='deeppink',linewidth='2',label='Vs=3.5 m/s') # plt.plot(dist, EMrNOx_fp1,color='pink',linewidth='2',label='P=276 kW') plt.plot(dist, EMrNOx_fp2,color='deeppink',linewidth='2',label='Vs = 13.2 km/h') plt.ylabel('NOx emission \n rate (g/km)',fontsize=12,fontweight='bold',labelpad=10) plt.xlabel('distance (km)',fontsize=12,fontweight='bold',labelpad=3) plt.ylim(0, 1000) plt.xlim(0, 191) ax.legend(loc='upper left') plt.xticks([0,191], weight = 'bold', fontsize=12) plt.yticks( weight = 'bold', fontsize=12) ax.text(191*0.95, 1000*11/13, '(e)', fontsize=12,weight = 'bold') plt.savefig('n15_Rotterdam_Kampen_MTS110.pdf',bbox_inches = 'tight', dpi=600, format='pdf') # - # ### kg/km, g/km # + jupyter={"source_hidden": true} df['fuel_kg_per_km'] = (df['total_diesel_consumption_C_year_ICE_mass']/1000) / (df['distance']/1000) # g/m --> kg/km df['CO2_g_per_km'] = (df['total_emission_CO2']) / (df['distance']/1000) df['PM10_g_per_km'] = (df['total_emission_PM10']) / (df['distance']/1000) df['NOx_g_per_km'] = (df['total_emission_NOX']) / (df['distance']/1000) # + jupyter={"source_hidden": true} df_km=df[['fuel_kg_per_km','CO2_g_per_km','PM10_g_per_km','NOx_g_per_km']] df_km # - # ### g/tkm in sections payload_ton = 2182 # + jupyter={"source_hidden": true} df['fuel_g_per_tkm']=(df['total_diesel_consumption_C_year_ICE_mass']) / (df['distance']/1000)/payload_ton df['CO2_g_per_tkm']=df['CO2_g_per_km']/payload_ton df['PM10_g_per_tkm']=df['PM10_g_per_km']/payload_ton df['NOx_g_per_tkm']=df['NOx_g_per_km']/payload_ton # + jupyter={"source_hidden": true} df_tkm=df[['fuel_g_per_tkm','CO2_g_per_tkm','PM10_g_per_tkm','NOx_g_per_tkm']] df_tkm # - # ### g/tkm mean value # + jupyter={"source_hidden": true} df_tkm_mean = df_tkm.mean() df_tkm_mean = pd.DataFrame(df_tkm_mean) df_tkm_mean.T # - # ### g/s df['fuel_g_per_sec'] = (df['total_diesel_consumption_C_year_ICE_mass']) / 3600 # kg/s df['CO2_g_per_sec'] = (df['total_emission_CO2']) / 3600 # g/s df['PM10_g_per_sec'] = (df['total_emission_PM10']) / 3600 # g/s df['NOx_g_per_sec'] = (df['total_emission_NOX']) / 3600 # g/s # + jupyter={"source_hidden": true} df_s=df[['fuel_g_per_sec','CO2_g_per_sec','PM10_g_per_sec','NOx_g_per_sec']] df_s # - # ### ton, kg per round-trip (laden going and returning) # + jupyter={"source_hidden": true} df['fuel_ton']=(df['total_diesel_consumption_C_year_ICE_mass'].sum()/1000000)*2 #ton df['CO2_ton']=(df['total_emission_CO2'].sum()/1000000)*2 #ton df['PM10_kg']=(df['total_emission_PM10'].sum()/1000)*2 #kg df['NOx_kg']=(df['total_emission_NOX'].sum()/1000)*2 #kg # + jupyter={"source_hidden": true} df_roundtrip=df[['fuel_ton','CO2_ton','PM10_kg','NOx_kg']] df_roundtrip.head(1) # - # ### ton per year num_roundtrip = 97 # + jupyter={"source_hidden": true} df['fuel_ton_per_yr']=df['fuel_ton']*num_roundtrip df['CO2_ton_per_yr'] = df['CO2_ton']*num_roundtrip df['PM10_ton_per_yr'] = df['PM10_kg']*num_roundtrip/1000 df['NOx_ton_per_yr'] = df['NOx_kg']*num_roundtrip/1000 # - df_year = df[['fuel_ton_per_yr','CO2_ton_per_yr','PM10_ton_per_yr','NOx_ton_per_yr']] df_year.head(1) # ## alternatives in mass and volume per round-trip (laden going and returning) # + df['LH2_PEMFC_ton']=(df['total_LH2_consumption_PEMFC_mass'].sum()/1000000)*2 #ton df['LH2_SOFC_ton']=(df['total_LH2_consumption_SOFC_mass'].sum()/1000000)*2 #ton df['eLNG_PEMFC_ton']=(df['total_eLNG_consumption_PEMFC_mass'].sum()/1000000)*2 #ton df['eLNG_SOFC_ton']=(df['total_eLNG_consumption_SOFC_mass'].sum()/1000000)*2 #ton df['eLNG_ICE_ton']=(df['total_eLNG_consumption_ICE_mass'].sum()/1000000)*2 #ton df['eMethanol_PEMFC_ton']=(df['total_eMethanol_consumption_PEMFC_mass'].sum()/1000000)*2 #ton df['eMethanol_SOFC_ton']=(df['total_eMethanol_consumption_SOFC_mass'].sum()/1000000)*2 #ton df['eMethanol_ICE_ton']=(df['total_eMethanol_consumption_ICE_mass'].sum()/1000000)*2 #ton df['eNH3_PEMFC_ton']=(df['total_eNH3_consumption_PEMFC_mass'].sum()/1000000)*2 #ton df['eNH3_SOFC_ton']=(df['total_eNH3_consumption_SOFC_mass'].sum()/1000000)*2 #ton df['eNH3_ICE_ton']=(df['total_eNH3_consumption_ICE_mass'].sum()/1000000)*2 #ton df['Li_NMC_Battery_ton']=(df['total_Li_NMC_Battery_mass'].sum()/1000000)*2 #ton df_alternatives_mass_roundtrip=df[['LH2_PEMFC_ton','LH2_SOFC_ton','eLNG_PEMFC_ton','eLNG_SOFC_ton','eLNG_ICE_ton', 'eMethanol_PEMFC_ton','eMethanol_SOFC_ton','eMethanol_ICE_ton','eNH3_PEMFC_ton', 'eNH3_SOFC_ton','eNH3_ICE_ton','Li_NMC_Battery_ton']] df_alternatives_mass_roundtrip.head(1) # + df['LH2_PEMFC_m3']=(df['total_LH2_consumption_PEMFC_vol'].sum())*2 df['LH2_SOFC_m3']=(df['total_LH2_consumption_SOFC_vol'].sum())*2 df['eLNG_PEMFC_m3']=(df['total_eLNG_consumption_PEMFC_vol'].sum())*2 df['eLNG_SOFC_m3']=(df['total_eLNG_consumption_SOFC_vol'].sum())*2 df['eLNG_ICE_m3']=(df['total_eLNG_consumption_ICE_vol'].sum())*2 df['eMethanol_PEMFC_m3']=(df['total_eMethanol_consumption_PEMFC_vol'].sum())*2 df['eMethanol_SOFC_m3']=(df['total_eMethanol_consumption_SOFC_vol'].sum())*2 df['eMethanol_ICE_m3']=(df['total_eMethanol_consumption_ICE_vol'].sum())*2 df['eNH3_PEMFC_m3']=(df['total_eNH3_consumption_PEMFC_vol'].sum())*2 df['eNH3_SOFC_m3']=(df['total_eNH3_consumption_SOFC_vol'].sum())*2 df['eNH3_ICE_m3']=(df['total_eNH3_consumption_ICE_vol'].sum())*2 df['Li_NMC_Battery_m3']=(df['total_Li_NMC_Battery_vol'].sum())*2 df_alternatives_vol_roundtrip=df[['LH2_PEMFC_m3','LH2_SOFC_m3','eLNG_PEMFC_m3','eLNG_SOFC_m3','eLNG_ICE_m3', 'eMethanol_PEMFC_m3','eMethanol_SOFC_m3','eMethanol_ICE_m3','eNH3_PEMFC_m3', 'eNH3_SOFC_m3','eNH3_ICE_m3','Li_NMC_Battery_m3']] df_alternatives_vol_roundtrip.head(1) # - # ## alternatives in ton per year # + df['LH2_PEMFC_ton_per_year']= df['LH2_PEMFC_ton']*num_roundtrip df['LH2_SOFC_ton_per_year']= df['LH2_SOFC_ton']*num_roundtrip df['eLNG_PEMFC_ton_per_year']= df['eLNG_PEMFC_ton']*num_roundtrip df['eLNG_SOFC_ton_per_year']= df['eLNG_SOFC_ton']*num_roundtrip df['eLNG_ICE_ton_per_year']= df['eLNG_ICE_ton']*num_roundtrip df['eMethanol_PEMFC_ton_per_year']= df['eMethanol_PEMFC_ton']*num_roundtrip df['eMethanol_SOFC_ton_per_year']= df['eMethanol_SOFC_ton']*num_roundtrip df['eMethanol_ICE_ton_per_year']= df['eMethanol_ICE_ton']*num_roundtrip df['eNH3_PEMFC_ton_per_year']= df['eNH3_PEMFC_ton']*num_roundtrip df['eNH3_SOFC_ton_per_year']= df['eNH3_SOFC_ton']*num_roundtrip df['eNH3_ICE_ton_per_year']= df['eNH3_ICE_ton']*num_roundtrip df['Li_NMC_Battery_ton_per_year']= df['Li_NMC_Battery_ton']*num_roundtrip df_alternatives_mass_per_year=df[['LH2_PEMFC_ton_per_year','LH2_SOFC_ton_per_year','eLNG_PEMFC_ton_per_year','eLNG_SOFC_ton_per_year','eLNG_ICE_ton_per_year', 'eMethanol_PEMFC_ton_per_year','eMethanol_SOFC_ton_per_year','eMethanol_ICE_ton_per_year','eNH3_PEMFC_ton_per_year', 'eNH3_SOFC_ton_per_year','eNH3_ICE_ton_per_year','Li_NMC_Battery_ton_per_year']] df_alternatives_mass_per_year.head(1)
19,139
/01 Python-Programming/98 Python Datatype Memory allocation/98 Python Memory Allocation.ipynb
ed86af5d7a4a6b2e5b12f55b8dd97673cf7b5ac9
[]
no_license
fenilmaisuria/Python
https://github.com/fenilmaisuria/Python
1
0
null
null
null
null
Jupyter Notebook
false
false
.py
12,818
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Python Datatype Memory allocation # > The sys.getsizeof() Built-in Function The standard library's sys module provides the [getsizeof()] # # > The size of an object in bytes import sys # ### **Integer Size** # Integer Size sys.getsizeof(int()) i = int(0) print(i,type(i)) print('Integer value with 0 : ',sys.getsizeof(i)) i = 1 print('Integer value with 1 : ',sys.getsizeof(i)) # --- # ### **Float Size** # Float Size sys.getsizeof(float()) f = float(0) print(f,type(f)) print('Float value with 0 : ',sys.getsizeof(f)) f = 5.5 print('Float value with 5.5 : ',sys.getsizeof(f)) # --- # ### **Complex Size** # Complex Size sys.getsizeof(complex()) c = complex(3.14j) print(c,type(c)) print('Complex value with 1j : ',sys.getsizeof(c)) # --- # ### **List Size** # List Size sys.getsizeof(list()) l = ["ML", "DL", "NLP","CV"] print(l,type(l)) print('List Memory Size : ',sys.getsizeof(l)) # --- # ### Tuple Size # Tuple Size sys.getsizeof(tuple()) t = ("apple", "banana", "orange") print(t,type(t)) print('Tuple Memory Size : ',sys.getsizeof(t)) # --- # ### Range Size # Range Size sys.getsizeof(range(10)) r = range(6) print(r,type(r)) print('Range Memory Size : ',sys.getsizeof(r)) # --- # ### Dictionary Size # Dictionary Size sys.getsizeof(dict()) d = {"name" : "Fenil", "EnrollmentNo" : 54} print(d,type(d)) print('Dictionary Memory Size : ',sys.getsizeof(d)) # --- # ### Set Size # Set Size sys.getsizeof(set()) s = {"apple", "banana", "orange"} print(s,type(s)) print('Set Memory Size : ',sys.getsizeof(s)) # --- # ### Frozenset Size # Frozenset Size sys.getsizeof(frozenset()) fs = frozenset({"apple", "banana", "orange"}) print(fs,type(fs)) print('Frozenset Memory Size : ',sys.getsizeof(fs)) # --- # ### Boolean Size # Boolean Size sys.getsizeof(bool()) b = True print(b,type(b)) print('Boolean Memory Size : ',sys.getsizeof(b)) # --- # ### Binary Size # Binary Size sys.getsizeof(bytes()) bn = b"Hello" print(bn,type(bn)) print('Binary Memory Size : ',sys.getsizeof(bn)) # Binary Array Size sys.getsizeof(bytearray()) ba = bytearray(5) print(ba,type(ba)) print('Binary Array Memory Size : ',sys.getsizeof(ba)) # MemoryView Size sys.getsizeof(memoryview(bytes(5))) mv = memoryview(bytes(10)) print(mv,type(mv)) print('Binary Array Memory Size : ',sys.getsizeof(mv)) et" model to perform semantic segmentation of relatively noisy atomic images (i.e. separate different atomic species from everything else), which can be used to quickly identify positions of all atoms. We note that this approach is not limited to blobs and can be used to identify different classes of vacancy defects in 2D materials, as well as for mesoscale images. # # Here, we are going to use a training set prepared from a large (~3000 px x 3000 px) experimental scanning transmission electron microscopy image of BiFeO$_3$ with various distortions where every atom was "labeled" using the information about its (x, y) coordinates. You can find a description of how the training set was generated [here](https://colab.research.google.com/github/ziatdinovmax/atomai/blob/master/examples/notebooks/atomai_training_data.ipynb#scrollTo=C6_FS-KAEh9s). Alternatively, one can use simulated data. The goal is to train a neural network that could generalize and be used to perform fast identification of atomic positions on various kind of images with different noise levels from the same (or similar) system. # # **Note:** It is always a good idea to train your model on a diverse training set (e.g. many labeled expermental images, or simulated data for a variety of atomic structures/positions). Here we used a training set made from a single image just to show a *quick* example of how this works. Such a model will not generalize very well. # # --- # # # + [markdown] id="JHeeOSZmX88o" # Imports: # + id="FaaN7T5a2ORP" import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import numpy as np import matplotlib.pyplot as plt import os import warnings warnings.filterwarnings("ignore", module="torch.nn.functional") # + [markdown] id="alOdwe8ZSszX" # Define a helper function that set seeds for generating random numbers (for reproducibility): # + id="gZtKXst9SOGu" def rng_seed(seed): torch.manual_seed(seed) np.random.seed(seed) torch.cuda.empty_cache() torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # + [markdown] id="X0TvG2KxRAU4" # # + [markdown] id="B2RZ2DQa5ooh" # Download training data: # # (can take from one to several minutes, depending on the internet connection) # + id="MdU3seSk5rqT" colab={"base_uri": "https://localhost:8080/"} outputId="3cdac9e0-42c2-482a-af36-b25d10aa9a34" download_link = 'https://drive.google.com/uc?id=1-4-IQ71m--OelQb1891GnbG1Ako1-DKh' # !gdown -q $download_link -O 'training_data-m.npy' dataset = np.load('training_data-m.npy') images_all = dataset['X_train'] labels_all = dataset['y_train'] images_test_all = dataset['X_test'] labels_test_all = dataset['y_test'] print(images_all.shape, labels_all.shape) # + [markdown] id="7ctUGOhn_xc1" # The training data is based on a single labeled experimental STEM image of Sm-doped BFO containing ~20,000 atomic unit cells (see *arXiv:2002.04245* and the associated notebook). The original image was ~3000 px x 3000 px. We randomly cropped about 2000 image-label pairs of 256 x 256 resolution and then applied different image "distortions" (noise, blurring, zoom, etc.) to each cropped image, treating two atomic sublattices as two different classes. The training/test images and labels represent 4 separate numpy arrays with the dimensions ```(n_images, n_channels=1, image_height, image_width)``` for training/test images, and ```(n_images, image_height, image_width)``` for the associated labels. The reason that our images have 4 dimensions, while our labels have only 3 dimensions is because of how the cross-entropy loss is calculated in PyTorch (see [here](https://pytorch.org/docs/stable/nn.html#nllloss)). Basically, if you have multiple channels corresponding to different classes in your labeled data, you'll need to [map your target classes to tensor indices](https://discuss.pytorch.org/t/multi-class-cross-entropy-loss-function-implementation-in-pytorch/19077/20). Here, we already did this for our labeled data during the training data preparation stage and so everything is ready for training. # # Plot some image-label pairs: # + id="ewAt1n_Ybn7V" colab={"base_uri": "https://localhost:8080/", "height": 419} outputId="ee291f4c-67e8-435a-884c-0547988043c3" n = 5 # number of images to plot n = n + 1 fig = plt.figure( figsize=(30, 8)) for i in range(1, n): ax = fig.add_subplot(2, n, i) ax.imshow(images_all[i-1,0,:,:], cmap='gray') ax.set_title('Augmented image ' + str(i)) ax.grid(alpha = 0.5) ax = fig.add_subplot(2, n, i+n) ax.imshow(labels_all[i-1], interpolation='Gaussian', cmap='jet') ax.set_title('Ground truth ' + str(i)) ax.grid(alpha=0.75) # + [markdown] id="xH3NHQt5_0qv" # Let's start constucting a deep fully convolutional neural network for "semantic" segmentation of atomic images. The neural network will be trained to take the images in the top row (see plot above) as the input and to output clean images of circular-shaped "blobs" on a uniform background in the bottom row, from which one can identify the xy centers of atoms. # # First we write two classes that will define two important blocks of our neural network, namely, *conv2dblock* (convolutional layer followed by [leaky ReLU](https://towardsdatascience.com/activation-functions-and-its-types-which-is-better-a9a5310cc8f) activation with optional dropout (before activation) and/or batch normalization (after activation) layers) and *upsample_block* (upsampling either with a bilinear or nearest interpolation followed by one-by-one convolutional layer). # <br><br> # Notice that in PyTorch we have to specify number of input and outpur channels and take care of image size padding (we cannot just use padding = 'same' like we can do in e.g. Tensorflow). This formula will be very useful: # # $o = [i + 2*p - k - (k-1)*(d-1)]/s + 1$, # # where *o* is output, *p* is padding, *k* is kernel_size, *s* is stride and *d* is dilation. # <br><br> # Each block is defined in two steps. First, we specify model parameters using ```__init__```, and then outline how they are applied to the inputs using ```forward```. # + id="_WkffvP5Disz" class conv2dblock(nn.Module): ''' Creates block(s) consisting of convolutional layer, leaky relu and (optionally) dropout and batch normalization ''' def __init__(self, nb_layers, input_channels, output_channels, kernel_size=3, stride=1, padding=1, use_batchnorm=False, lrelu_a=0.01, dropout_=0): '''Initializes module parameters''' super(conv2dblock, self).__init__() block = [] for idx in range(nb_layers): input_channels = output_channels if idx > 0 else input_channels block.append(nn.Conv2d(input_channels, output_channels, kernel_size=kernel_size, stride=stride, padding=padding)) if dropout_ > 0: block.append(nn.Dropout(dropout_)) block.append(nn.LeakyReLU(negative_slope=lrelu_a)) if use_batchnorm: block.append(nn.BatchNorm2d(output_channels)) self.block = nn.Sequential(*block) def forward(self, x): '''Forward path''' output = self.block(x) return output class upsample_block(nn.Module): ''' Defines upsampling block. The upsampling is performed using bilinear or nearest interpolation followed by 1-by-1 convolution (the latter can be used to reduce a number of feature channels). ''' def __init__(self, input_channels, output_channels, scale_factor=2, mode="bilinear"): '''Initializes module parameters''' super(upsample_block, self).__init__() assert mode == 'bilinear' or mode == 'nearest',\ "use 'bilinear' or 'nearest' for upsampling mode" self.scale_factor = scale_factor self.mode = mode self.conv = nn.Conv2d( input_channels, output_channels, kernel_size=1, stride=1, padding=0) def forward(self, x): '''Defines a forward path''' x = F.interpolate( x, scale_factor=self.scale_factor, mode=self.mode) return self.conv(x) # + [markdown] id="gv6kauOtFFXp" # Now let's build our model. It will have a contracting path consisting of regular convolutional operations followed by max-pooling operation, a bottleneck layer, and an expanding path with bilinear upsampling that has the same convolutional blocks as the contracting path but in a reversed order. The skip connections provide a flow of information between contracting and expanding pathes. The final layer provides tensor of raw (non-normalized) predictions, which is then passed to a softmax normalization function for a pixel-wise classification of the input data (it is also possible to just add the softmax "layer" at the end of our model). # + id="4WL-RY4zEjXw" class smallUnet(nn.Module): ''' Builds a fully convolutional Unet-like neural network model Args: nb_classes: int number of classes in the ground truth nb_filters: int number of filters in 1st convolutional block (gets multibplied by 2 in each next block) use_dropout: bool use / not use dropout in the 3 inner layers batch_norm: bool use / not use batch normalization after each convolutional layer upsampling mode: str "bilinear" or "nearest" upsampling method. Bilinear is usually more accurate, but adds additional (small) randomness; for full reproducibility, consider using 'nearest' (this assumes that all other sources of randomness are fixed) ''' def __init__(self, nb_classes=3, nb_filters=16, use_dropout=False, batch_norm=True, upsampling_mode="nearest"): super(smallUnet, self).__init__() dropout_vals = [.1, .2, .1] if use_dropout else [0, 0, 0] self.c1 = conv2dblock( 1, 1, nb_filters, use_batchnorm=batch_norm ) self.c2 = conv2dblock( 2, nb_filters, nb_filters*2, use_batchnorm=batch_norm ) self.c3 = conv2dblock( 2, nb_filters*2, nb_filters*4, use_batchnorm=batch_norm, dropout_=dropout_vals[0] ) self.bn = conv2dblock( 3, nb_filters*4, nb_filters*8, use_batchnorm=batch_norm, dropout_=dropout_vals[1] ) self.upsample_block1 = upsample_block( nb_filters*8, nb_filters*4, mode=upsampling_mode) self.c4 = conv2dblock( 2, nb_filters*8, nb_filters*4, use_batchnorm=batch_norm, dropout_=dropout_vals[2] ) self.upsample_block2 = upsample_block( nb_filters*4, nb_filters*2, mode=upsampling_mode) self.c5 = conv2dblock( 2, nb_filters*4, nb_filters*2, use_batchnorm=batch_norm ) self.upsample_block3 = upsample_block( nb_filters*2, nb_filters, mode=upsampling_mode) self.c6 = conv2dblock( 1, nb_filters*2, nb_filters, use_batchnorm=batch_norm ) self.px = nn.Conv2d(nb_filters, nb_classes, 1, 1, 0) self.maxpool = F.max_pool2d self.concat = torch.cat def forward(self, x): '''Defines a forward path''' # Contracting path c1 = self.c1(x) d1 = self.maxpool(c1, kernel_size=2, stride=2) c2 = self.c2(d1) d2 = self.maxpool(c2, kernel_size=2, stride=2) c3 = self.c3(d2) d3 = self.maxpool(c3, kernel_size=2, stride=2) # Bottleneck layer bn = self.bn(d3) # Expanding path u3 = self.upsample_block1(bn) u3 = self.concat([c3, u3], dim=1) u3 = self.c4(u3) u2 = self.upsample_block2(u3) u2 = self.concat([c2, u2], dim=1) u2 = self.c5(u2) u1 = self.upsample_block3(u2) u1 = self.concat([c1, u1], dim=1) u1 = self.c6(u1) # Final layer used for pixel-wise convolution px = self.px(u1) return px # + [markdown] id="KdcUmBfpF2lR" # We can also vizualize our model's structure using *torchviz* package: # + id="y0c-qBqXF-Ja" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="3809e1a2-eed4-4f2e-d2e3-79a8bcf39163" # !pip install -q torchviz from torchviz import make_dot model = smallUnet() x = torch.randn(1, 1, 256, 256).requires_grad_(True) y = model(x) make_dot(y, params=dict(list(model.named_parameters()) + [('x', x)])) # + [markdown] id="6ye4bYRbIOGc" # Next we initialize our model, move it to GPU, and select weights optimizer and loss function. # # Notice that if we use a *softmax* operation as a final "layer" of our neural network then we should use ```torch.nn.NLLLoss()``` loss function instead of ```torch.nn.CrossEntropyLoss()``` used below (which combines a *softmax* operation and ```nn.NLLLoss()``` into a single class). However, the current understanding is that the torch.nn.NLLLoss() is a more numerically stable option. # + id="oF1n0Rat1vek" rng_seed(1) # (re)set seed for reproducibility # Initialize a model model = smallUnet() # move our model to GPU model.cuda() # specify loss function criterion = torch.nn.CrossEntropyLoss() # specify optimizer optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) # lr is a learniing rate # + [markdown] id="loZFQsRdHDD_" # Specify basic parameters for training loop: # + id="OzeZpTP8HA4a" batch_size = 32 # number of images in one batch epochs = 500 # Number of epochs (here 1 epoch == 1 batch) print_loss = 100 # print loss every m-th epoch. savedir = './' # dir to save results # + [markdown] id="KnhSdZtmHLDj" # Here we are going to feed one batch per epoch to our neural network during training (as opposed to feeding all the batches during a single epoch and then calculating the average loss). We therefore split our numpy arrays into four different python lists where each element in a list is a numpy array with the selected above batch size. # + id="MypqDjzjG8xy" n_train_batches, _ = np.divmod(labels_all.shape[0], batch_size) n_test_batches, _ = np.divmod(labels_test_all.shape[0], batch_size) images_all = np.split( images_all[:n_train_batches*batch_size], n_train_batches) labels_all = np.split( labels_all[:n_train_batches*batch_size], n_train_batches) images_test_all = np.split( images_test_all[:n_test_batches*batch_size], n_test_batches) labels_test_all = np.split( labels_test_all[:n_test_batches*batch_size], n_test_batches) # + [markdown] id="y4tWIynhuFCr" # Finally, we train our model: # + id="QLOCHZnG18hJ" colab={"base_uri": "https://localhost:8080/"} outputId="ce338d7d-c8b8-4b9c-95a0-8930bcca12ab" # Generate sequence of random numbers for batch selection during training/testing batch_ridx = [np.random.randint(0, len(images_all)) for _ in range(epochs)] batch_ridx_t = [np.random.randint(0, len(images_test_all)) for _ in range(epochs)] # Start training train_losses, test_losses = [], [] for e in range(epochs): model.train() # Generate batch of training images with corresponding ground truth images = images_all[batch_ridx[e]] labels = labels_all[batch_ridx[e]] # Transform images and ground truth to torch tensors and move to GPU images = torch.from_numpy(images).float() labels = torch.from_numpy(labels).long() images, labels = images.cuda(), labels.cuda() # Forward --> Backward --> Optimize optimizer.zero_grad() prob = model.forward(images) loss = criterion(prob, labels) loss.backward() optimizer.step() train_losses.append(loss.item()) # Now test the current model state using test data model.eval() # turn off batch norm and/or dropout units images_ = images_test_all[batch_ridx_t[e]] labels_ = labels_test_all[batch_ridx_t[e]] images_ = torch.from_numpy(images_).float() labels_ = torch.from_numpy(labels_).long() images_, labels_ = images_.cuda(), labels_.cuda() with torch.no_grad(): # deactivate autograd engine during testing (saves memory) prob = model.forward(images_) loss = criterion(prob, labels_) test_losses.append(loss.item()) # Print statistics if e == 0 or (e+1) % print_loss == 0: print('Epoch {} .... Training loss: {} .... Test loss: {}'.format( e+1, np.around(train_losses[-1], 8), np.around(test_losses[-1], 8)) ) # Save the best model weights if e > 100 and test_losses[-1] < min(test_losses[: -1]): torch.save(model.state_dict(), os.path.join(savedir, 'smallUnet-1-best_weights.pt')) # Save final weights torch.save(model.state_dict(), os.path.join(savedir, 'smallUnet-1-final_weights.pt')) # + [markdown] id="oyD0Vrk_Jes2" # In the interest of time, we trained our model only for 500 cycles/epochs. We plot training history below: # + id="enCnSWxDiCYA" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="5f9182a2-e767-4f7c-ba3d-325685a78b72" plt.plot(train_losses, label='train') plt.plot(test_losses, label='test') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() # + [markdown] id="OUFWGQFuuL5H" # Next we evaluate model performance on the test data. Notice that our model returns a tensor of raw (non-normalized) predictions, which are usually referred to as "logits". The ```torch.nn.functional.softmax``` function (```F.softmax``` in the cell below) converts these logits to "probabilities" for each class. # + id="ZOb3MF8wjd0B" colab={"base_uri": "https://localhost:8080/", "height": 385} outputId="b060ab0e-2493-4600-b21a-73e63ff9c00f" k, im = 6, 5 # batch, image test_img = images_test_all[k][im] test_lbl = labels_test_all[k][im] # Convert to 4D tensor (required, even if it is a single image) test_img = test_img[np.newaxis, ...] # Convert to pytorch format and move to GPU test_img_ = torch.from_numpy(test_img).float().cuda() # make a prediction prediction = model.forward(test_img_) prediction = F.softmax(prediction, dim=1).cpu().detach().numpy() prediction = np.transpose(prediction, [0, 2, 3, 1]) # rearange dimensions for plotting # plot results _, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6)) ax1.imshow(test_img[0,0], cmap='gray') ax2.imshow(prediction[0], Interpolation='Gaussian') ax1.set_title('Test image') ax2.set_title('Model prediction') # + [markdown] id="y8eLZ4Rmj_Ic" # The model prediction is a multichannel image where each channel corresponds to a different atomic class (+ background). In each channel, we have a set of circular-shaped "blobs" on a uniform background whose centers correspond to atomic centers (if the network was trained properly). What we have to do now is to convert pixel data from the network output into coordinate data. Below we define a class 'FindAtoms' that will do this. # + id="Zc_Ki8Q1arJG" import cv2 from scipy import ndimage class FindAtoms: ''' Transforms pixel data from NN output into coordinate data ''' def __init__(self, nn_output, threshold=0.5, dist_edge=5, dim_order='channel_last'): if nn_output.shape[-1] == 1: # Add background class for 1-channel data nn_output_b = 1 - nn_output nn_output = np.concatenate( (nn_output[:, :, :, None], nn_output_b[:, :, :, None]), axis=3) if dim_order == 'channel_first': # make channel dim the last dim nn_output = np.transpose(nn_output, (0, 2, 3, 1)) elif dim_order == 'channel_last': pass else: raise NotImplementedError( 'For dim_order, use "channel_first" (e.g. pytorch)', 'or "channel_last" (e.g. tensorflow)') self.nn_output = nn_output self.threshold = threshold self.dist_edge = dist_edge def get_all_coordinates(self): '''Extract all atomic coordinates in image via CoM method & store data as a dictionary (key: frame number)''' def find_com(image_data): '''Find atoms via center of mass methods''' labels, nlabels = ndimage.label(image_data) coordinates = np.array( ndimage.center_of_mass( image_data, labels, np.arange(nlabels) + 1)) coordinates = coordinates.reshape(coordinates.shape[0], 2) return coordinates d_coord = {} for i, decoded_img in enumerate(self.nn_output): coordinates = np.empty((0, 2)) category = np.empty((0, 1)) # we assume that class backgrpund is always the last one for ch in range(decoded_img.shape[2]-1): _, decoded_img_c = cv2.threshold( decoded_img[:, :, ch], self.threshold, 1, cv2.THRESH_BINARY) coord = find_com(decoded_img_c) coord_ch = self.rem_edge_coord(coord) category_ch = np.zeros((coord_ch.shape[0], 1)) + ch coordinates = np.append(coordinates, coord_ch, axis=0) category = np.append(category, category_ch, axis=0) d_coord[i] = np.concatenate((coordinates, category), axis=1) return d_coord def rem_edge_coord(self, coordinates): '''Remove coordinates at the image edges''' def coord_edges(coordinates, w, h): return [coordinates[0] > w - self.dist_edge, coordinates[0] < self.dist_edge, coordinates[1] > h - self.dist_edge, coordinates[1] < self.dist_edge] w, h = self.nn_output.shape[1:3] coord_to_rem = [ idx for idx, c in enumerate(coordinates) if any(coord_edges(c, w, h)) ] coord_to_rem = np.array(coord_to_rem, dtype=int) coordinates = np.delete(coordinates, coord_to_rem, axis=0) return coordinates # + [markdown] id="K8O84I_SoV7V" # Let's now transform our neural network prediction on the test image to coordinates: # + id="u3KHrT02cze_" coordinates = FindAtoms(prediction).get_all_coordinates() # + [markdown] id="u_V_9CYKqpyM" # Plot results: # + id="fh-rdagRoVFY" colab={"base_uri": "https://localhost:8080/", "height": 407} outputId="94f852ad-ccc5-4bcb-b198-5c94f7186798" y, x, c = coordinates[0].T plt.figure(figsize=(6, 6)) plt.imshow(test_img[0,0], cmap='gray') plt.scatter(x, y, c=c, cmap='RdYlGn', s=32); plt.title('Test data + predicted coordinates') # + [markdown] id="V_8VMaE-SGEv" # Now let's apply the trained model to experimental image from a diferent but similar system. The experimental image was obtained on LBFO system by Chris Nelson (CNMS ORNL) and published earlier in *Applied Physics Letters 115, 052902 (2019)*. # # (Down)load experimental data: # # + id="CzQfevSCokts" # !gdown -q https://drive.google.com/uc?id=1hdWfUdKFNLIEbadLpKFnEgEVF8KX1ANr expdata = np.load('lbfo_expdata.npy') # + [markdown] id="h0yAUWpdouCB" # Run decoding: # + id="tQ9XTYS7UwwL" # Convert to 4D tensor (required, even if it is a single image) expdata = expdata[None, None, ...] # Convert to pytorch format and move to GPU expdata_ = torch.from_numpy(expdata).float().cuda() # make a prediction prediction = model.forward(expdata_) prediction = F.softmax(prediction, dim=1).cpu().detach().numpy() prediction = np.transpose(prediction, [0, 2, 3, 1]) # get coordinates coordinates_e = FindAtoms(prediction).get_all_coordinates() # + [markdown] id="TG_HFmepSd_n" # Plot results: # + id="khAiYCNKQ5bc" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="806c8323-d010-4780-813d-1c45fbd159b0" y, x, c = coordinates_e[0].T plt.figure(figsize=(20, 20)) plt.imshow(expdata[0,0], cmap='gray') plt.scatter(x, y, c=c, cmap='RdYlGn', s=12) plt.title('Experimental data + predicted coordinates'); # + [markdown] id="W5803LRyRZqh" # Looks like our model performed quite well on the previously unseen data! There is no ground truth for the experimental data, so evaluating accuracy is somewhat tricky, but we can sometimes compare our results with the results of other methods. # # Notice that here we analyzed image with resolution of 1024x1024 even though our network was trained only using 256x256 images. That's the beauty of fully convolutional neural networks (i.e. network without full-connected, dense layers) - it is not sensitive to the size of input image as long as it can be divided by $2^{n}$ where n is a number of max-pooling layers in the network. That said, there is always some optimal pixel-to-angstrom ratio (or, roughly, number of pixels per atom/defect) at which a network will generate the best results. It might be a good idea to determine such a ratio and (if possible) make adjustments to the resolution of experimental images. # + [markdown] id="a2ZmewZySEL2" # Once we have all atomic coordiantes and "cleaned" images we can perform various forms of analysis on local image descriptors (for example, identifying different ferroic domains). # + id="CW9had_xVbH2"
28,123
/NW dashboard code main.ipynb
c20c8f70bfc7fcf7fa8e3eece604a284ff916bcd
[]
no_license
ckelly146/covid-19analysis
https://github.com/ckelly146/covid-19analysis
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
248,445
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # DIY Covid-19 Dashboard - North West region # # - Data from Public Health England (PHE) on Covid-19 # # There are 2 sections of data: # # - Part 1 looks at overall cases, hospital cases requiring ventilator support and deaths. # # - Part 2 looks at how age and gender affect cases. # # (This has been repeated for the region of London for comparison.) # # Cases # + import pandas as pd import matplotlib.pyplot as plt import json # an iPython "magic" that enables the embedding of matplotlib output # %matplotlib inline # make figures larger plt.rcParams['figure.dpi'] = 100 with open("NW.json", "rt") as INFILE: data=json.load(INFILE) # - datalist=data['data'] # sorts dates of data dates=[dictionary['date'] for dictionary in datalist ] dates.sort() def parse_date(datestring): """ Convert a date string into a pandas datetime object """ return pd.to_datetime(datestring, format="%Y-%m-%d") # produces startdata to enddata: 2020-02-11 00:00:00 to 2020-10-21 00:00:00 startdate=parse_date(dates[0]) enddate=parse_date(dates[-1]) # creates a template for data from start date to end date index=pd.date_range(startdate, enddate, freq='D') NWdf=pd.DataFrame(index=index, columns=['Cases', 'Ventilators', 'Deaths']) # + # inputs data for entry in datalist: # each entry is a dictionary with date, cases, ventilator cases and deaths date=parse_date(entry['date']) for column in ['Cases', 'Ventilators', 'Deaths']: if pd.isna(NWdf.loc[date, column]): value= float(entry[column]) if entry[column]!=None else 0.0 # replace None with 0 in our data # and put index,column in a single set of [ ] NWdf.loc[date, column]=value #.loc accesses a specific location in the dataframe # fill in any remaining "holes" due to missing dates NWdf.fillna(0.0, inplace=True) # - # plot and style graph import seaborn as sns sns.set_style("dark") NWdf.plot() plt.title("North West Covid-19 Cases and Deaths 2020") plt.ylabel("Cases") import seaborn as sns sns.set_style("dark") NWdf.plot(logy=True) plt.title("North West Covid-19 Cases and Deaths 2020") plt.ylabel("Cases in Log") import ipywidgets as wdg import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # make figures larger plt.rcParams['figure.dpi'] = 100 # + # API access function to download current Public Health England data def access_api(button): print("Downloading data from the API...") print("...complete.") apibutton=wdg.Button( description='Refresh data', disabled=False, button_style='', tooltip='Download current Public Health England data here', icon='Download' ) # register the callback function with the button apibutton.on_click(access_api) # display the widgets display(apibutton) # - # ... and now the option to toggle between variables. # + # creates interactive graph series=wdg.SelectMultiple( options=['Cases', 'Ventilators', 'Deaths'], value=['Cases', 'Ventilators', 'Deaths'], rows=3, description='Stats:', disabled=False ) scale=wdg.RadioButtons( options=['linear', 'log'], description='Scale:', disabled=False ) controls=wdg.HBox([series, scale]) def timeseries_graph(gcols, gscale): if gscale=='linear': logscale=False else: logscale=True ncols=len(gcols) if ncols>0: NWdf[list(gcols)].plot(logy=logscale) else: print("Click to select data for graph") print("(CTRL-Click to select more than one category)") graph=wdg.interactive_output(timeseries_graph, {'gcols': series, 'gscale': scale}) display(controls, graph) # - # # Age and Gender import pandas as pd import matplotlib.pyplot as plt import json # %matplotlib inline plt.rcParams['figure.dpi'] = 100 # open age data file to read with open("NWage.json", "rt") as INFILE: data=json.load(INFILE) # create a dictionary for the data datadic=data['data'][0] # data['data'] is a list datalist=data['data'] Males=datadic['Males'] Females=datadic['Females'] # focusing only on Males to create age ranges as they have the same as Females ageranges=[x['age'] for x in Males] # definining age at which categories begin e.g. min_age of '85_to_90' = 85 def min_age(agerange): agerange=agerange.replace('+','') # remove the + from 90+ start=agerange.split('_')[0] return int(start) # sort age ranges into order ageranges.sort(key=min_age) ageNW_df=pd.DataFrame(index=ageranges, columns=['Males','Females', 'Total']) # + for entry in Males: # each entry is a dictionary ageband=entry['age'] # our index position ageNW_df.loc[ageband, 'Males']=entry['value'] for entry in Females: ageband=entry['age'] ageNW_df.loc[ageband, 'Females']=entry['value'] # total option ageNW_df['Total']=ageNW_df['Males']+ageNW_df['Females'] # ageNW_df produces data in table form # - import seaborn as sns sns.set_style("dark") ageNW_df.plot(kind='bar', y=['Males','Females','Total']) plt.title("North West Covid-19 Cases 2020 by Age and Gender") plt.xlabel("Age") plt.ylabel("Cases") import seaborn as sns sns.set_style("dark") ageNW_df.plot(kind='bar', y='Total') plt.title("North West Covid-19 Cases 2020 by Age") plt.xlabel("Age") plt.ylabel("Cases") # data manipulation to create pie chart MALETOTAL = sum(ageNW_df['Males']) print("Male cases: ",MALETOTAL) FEMALETOTAL =sum(ageNW_df['Females']) print("Female cases: ",FEMALETOTAL) # ... and now the interactive option comparing male, female and total data. # + # create pie chart import matplotlib.pyplot as plt labels = 'Males', 'Females' sizes = [MALETOTAL,FEMALETOTAL] explode = (0, 0.1) # explode one slide fig1, ax1 = plt.subplots() ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.title("North West Covid-19 Cases 2020 by Gender") plt.show() # + agecols=wdg.SelectMultiple( options=['Males', 'Females', 'Total'], value=['Males', 'Females'], rows=3, # rows of the selection box description='Sex', disabled=False ) def age_graph(graphcolumns): # our callback function. ncols=len(graphcolumns) if ncols>0: ageNW_df.plot(kind='bar', y=list(graphcolumns)) else: # if the user has not selected any column, print a message instead print("Select data for graph above") print("(CTRL-Click or highlight with cursor to select multiple categories)") # keep calling age_graph(graphcolumns=value_of_agecols); capture output in variable output output=wdg.interactive_output(age_graph, {'graphcolumns': agecols}) display(agecols, output)
6,968
/updates/Ex 4 Part 2 of 3.ipynb
055d7e7372ad1033270d1e7a47a7da9962b2e6e3
[]
no_license
juliaknoblauch/adams
https://github.com/juliaknoblauch/adams
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
22,427
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Regiment # ### Introduction: # # Special thanks to: http://chrisalbon.com/ for sharing the dataset and materials. # # ### Step 1. Import the necessary libraries import pandas as pd import numpy as np # ### Step 2. Create the DataFrame with the following values: raw_data = {'regiment': ['Nighthawks', 'Nighthawks', 'Nighthawks', 'Nighthawks', 'Dragoons', 'Dragoons', 'Dragoons', 'Dragoons', 'Scouts', 'Scouts', 'Scouts', 'Scouts'], 'company': ['1st', '1st', '2nd', '2nd', '1st', '1st', '2nd', '2nd','1st', '1st', '2nd', '2nd'], 'name': ['Miller', 'Jacobson', 'Ali', 'Milner', 'Cooze', 'Jacon', 'Ryaner', 'Sone', 'Sloan', 'Piger', 'Riani', 'Ali'], 'preTestScore': [4, 24, 31, 2, 3, 4, 24, 31, 2, 3, 2, 3], 'postTestScore': [25, 94, 57, 62, 70, 25, 94, 57, 62, 70, 62, 70]} # ### Step 3. Assign it to a variable. # #### Don't forget to name each column df = pd.DataFrame(raw_data) # ### Step 4. What is the mean preTestScore from the regiment Nighthawks? df.head() df.groupby('regiment').mean() # ### Step 5. Present general statistics by company df.groupby('company').describe() # ### Step 6. What is the mean each company's preTestScore? df.groupby('company')['preTestScore'].mean() # ### Step 7. Present the mean preTestScores grouped by regiment and company df.groupby(['company', 'regiment'])['preTestScore'].mean() # ### Step 8. Present the mean preTestScores grouped by regiment and company without heirarchical indexing df.groupby(['company', 'regiment'])['preTestScore'].mean().unstack() # ### Step 9. Group the entire dataframe by regiment and company df.groupby(['regiment', 'company']).mean() # ### Step 10. What is the number of observations in each regiment and company df.groupby(['regiment', 'company']).size() # ### Step 11. Iterate over a group and print the name and the whole data from the regiment for name, group in df.groupby('regiment'): print(name) print(group) nLayer_size, inputLayer_size)) # Random weight initialization weightsHiddenToOutput = np.random.uniform(-limit, limit, (outputLayer_size, hiddenLayer_size)) # Random weight initialization biasInputToHidden = np.ones((hiddenLayer_size,1)) # Bias initialization biasHiddenToOutput = np.ones((outputLayer_size,1)) # Bias initialization # + # Activation function that we will use in this scenario and its derivative def ReLU(x): return np.maximum(0, x) # 0 if input is negative, x if input is positive def ReLU_derivative(x): return (ReLU(x) > 0).astype(int) # - # First forward pass inputs = np.array(X[5]).reshape((inputLayer_size, 1)) # observation 5 target = y[5].reshape((outputLayer_size, 1)) # true value of target hiddenLayer_inputs = np.dot(weightsInputToHidden, inputs) + biasInputToHidden # first hidden layer inputs hiddenLayer_outputs = ReLU(hiddenLayer_inputs) # hidden layer output (after activation) outputLayer_inputs = np.dot(weightsHiddenToOutput, hiddenLayer_outputs) + biasHiddenToOutput # input to output layer outputLayer_outputs = ReLU(outputLayer_inputs) # activation applied to output layer = prediction! outputLayer_outputs.shape # shape of first forward pass predictions # Let's compare the loss and target for this observation. We can see that the random weight initialization is pretty far off. print(outputLayer_outputs, target) # # Back Propagation # # ## Motivation: # With a set of random coefficients, would a prediction be any good? Probably not. However, the magic of neural networks comes from the method to update this complex system of weights. This is known as back propagation or backprop. # # ## Calculating loss # The first step in back propagation is to calculate the loss (remember: loss is another term for error) which can be done in several ways. For **regression**, we would concentrate on mean square error, mean absolute error or even others such as mean square log error. There are also many other loss functions which act similarly to these. After computing the loss, we then want to minimize it. # # Note that for some loss functions like the sigmoid for logistic regression, it is best to calculate the log likelihood. You can then derive this for maximization. # mean squared error def mse(true, pred): sse = 0 for i in range(len(true)): sse += (true[i] - pred[i])**2 mse = (1 / len(true)) * np.sum(sse) return mse # For now, let's calculate the loss from our first model: loss = mse(target, outputLayer_outputs) loss # ## Finding a minimum # Like a linear regression, we are working with input values along with layers of weights and a biases (= intercepts in the regression). Our loss function is a function of these three elements. Of these three elements, weights and bias are what we can adjust. So, let’s derive our loss function based on those. We will focus on deriving with respect to weights here, but the process for biases is similar. # # Here is the mean square error. # $$ MSE = \frac{1}{2n} \sum_{i=1}^n (\hat y - y)^2 $$ # Note that there is a 2 in the initial fraction. This is just for the convenience when we derive it later. # # <img src="MSElossderivation.PNG" alt="Derivative of MSE" style="width: 600px;"/> # # note that if $ w_0 $ is the intercept, we would set $ x=1 $. # # Consider: Our loss function is a function of the true value and our NN’s output. Our NN’s output is a function of the last nodes’ outputs. Our last nodes’ outputs are all a function of their own input values, weights and biases. We’re lucky that we can simply use the chain rule to calculate the derivatives for each parameter! We will then have to organize our derivatives into gradients which are vectors of derivatives of different variables from the same function. For full optimization, we will need to calculate this derivative for every observation as well. We will discuss a simplification technique for this called stochastic gradient descent later. # # So for each observation, we have to calculate: # # $$ \Bigg[ \frac{\partial MSE }{\partial w_0}, \frac{\partial MSE }{\partial w_1}, \frac{\partial MSE }{\partial w_2}, ... , \frac{\partial MSE }{\partial w_k} \Bigg] $$ # + def mse_derivative(true, pred, x): n = len(x) loss_d = (1/n) * np.sum((pred-true)*x) return loss_d.reshape(1, 1) def ReLU_derivative(x): return (x > 0)*1.0 # - loss_derivative = mse_derivative(y[5], outputLayer_outputs, X[5]) # loss derivative loss_derivative activation_derivative = ReLU_derivative(outputLayer_inputs) # activation derivative activation_derivative # Remember that the output layer's output is a function of the following: # - the output layer's inputs # - the hidden layer's outputs # - the hidden layer's inputs # - the original X values # # We will need to update all weights connecting these together. # + # Update weights furthest back in the network (between hidden and output layer) gradient_HiddenToOutput = np.dot(loss_derivative*activation_derivative, np.transpose(hiddenLayer_outputs)) gradient_HiddenToOutput.shape # Update output layer biases gradient_HiddenToOutput_bias = loss_derivative*activation_derivative # Save the error of the output layer outputLayer_errors = loss_derivative * activation_derivative # Find gradient for next step for backpropagation: gradient to update weights between hidden and input layer gradient_InputToHidden = np.dot(weightsHiddenToOutput.T, outputLayer_errors) print(f'Shape of first weights {weightsHiddenToOutput.shape}') # Next propagation backwards: derivative of the hidden layer output wrt the hidden layer input (ReLU derivative) gradient_InputToHidden = gradient_InputToHidden * ReLU_derivative(hiddenLayer_inputs) print(f'Shape of gradient vector {gradient_InputToHidden.shape}') # Last propagation: derivate of the hidden layer input wrt to the weight matrix connecting the hidden layer to inputs X gradient_InputToHidden = np.dot(gradient_InputToHidden, np.transpose(inputs)) print(f'Shape of input {inputs.shape}') # - # ## Moving towards a minimum # We could update all weights by trying to take a large step down the gradient. However, since there are so many combinations of weights and bias, this just reach one local minimum of many, or it may overshoot the local minimum. It could be that there is a lower local minimum that we cannot yet identify. If we take smaller steps down the gradient, update weights/biases, make predictiongradient_HiddenToOutputs again with these new values and recalculate the gradient, we may find a steeper path which indicates the existence of a lower minimum. This step size is called a learning rate. This slow and iterative way of updating weights is called gradient descent. # # $$ w_j' = w_j + lr \cdot \frac{\partial MSE }{\partial w_1} $$ # Below are a few graphics. The one on the left shows the example of trying to fit a curve to points. The figure on the right is a depiction of slowly moving down a error surface to find a minimum. # + language="html" # <iframe src="https://giphy.com/embed/8tvzvXhB3wcmI" width="1000" height="400" frameBorder="0" class="giphy-embed" allowFullScreen></iframe> # <p><a href="https://giphy.com/gifs/deep-learning-8tvzvXhB3wcmI">via GIPHY</a></p> # - # We can come back to our scenario and begin the process of updating our weights based on our learning rate * derivative. gradient_InputToHidden_bias = np.dot(weightsHiddenToOutput.T, outputLayer_errors) * ReLU_derivative(hiddenLayer_inputs) # + # Gradient descent step learningRate = 0.00001 # define some learning rate # Update weights between hidden and output layer (furthest back) weightsHiddenToOutput -= learningRate * gradient_HiddenToOutput # Update bias in output layer biasHiddenToOutput -= learningRate * gradient_HiddenToOutput_bias # Update weights between input and hidden layer (furthest forward) weightsInputToHidden -= learningRate * gradient_InputToHidden # Update bias in hidden layer biasInputToHidden -= learningRate * gradient_InputToHidden_bias # - # ## One solver for gradient descent: SGD # As you can imagine, the number of computations for this process can be quite large. It requires one derivative per observation per parameter per step! Stochastic gradient descent (SGD) is one way to greatly simplify this task. This method picks samples randomly and only calculates their derivatives for optimization. By the strictest definition, SGD picks one sample per optimization step, but **it is common to optimize with batches of observations**. This could lead to a more stable path to a local minimum. In the code below, we will select a new variable batch_size, which will randomly select samples to optimize parameters. # ## Stopping rules # What if we find the program is going through too many iterations with little improvement? We need to set a stopping rule to decide when the program should end. There are two very commonly chosen options: specifying a maximum number of epochs (= iterations) allowed or specifying a maximum allowed number of epochs without improvement in the accuracy of the validation set. # + learningRate = 0.00001 epochs = 10 # stopping rule, how many corrective iterations we will allow batch_size = 100 input_dim = X.shape[1] # number of variables output_dim = 1 # should be equal to 1 since we are only finding the predicted value for 1 regression # + iteration = 0 loss_log = [] while iteration < epochs: # Put all the steps done before in a loop: # Process one observation per iteration squared_residuals = np.zeros(X.shape[0]) # to keep track of the loss random_obs = random.sample(range(0, len(X)), batch_size) # randomly select a new sample of dataset for this iteration's optimization for obs in random_obs: inputs = np.array(X[obs]).reshape((input_dim, 1)) # select current case i target = y[obs].reshape((1, 1)) # and its target value # Compute the forward pass through the network all the way up to the final output hiddenLayer_inputs = np.dot(weightsInputToHidden, inputs) + biasInputToHidden hiddenLayer_outputs = ReLU(hiddenLayer_inputs) outputLayer_inputs = np.dot(weightsHiddenToOutput, hiddenLayer_outputs) + biasHiddenToOutput outputLayer_outputs = ReLU(outputLayer_inputs) # final output # Compute the gradients: # gradient for the weights between hidden and output layers gradient_HiddenToOutput = mse_derivative(y[obs], outputLayer_outputs, X[obs]) * ReLU_derivative(outputLayer_outputs) outputLayer_errors = gradient_HiddenToOutput # gradient for the weights between input and hidden layers gradient_InputToHidden = np.dot(weightsHiddenToOutput.T, outputLayer_errors) * ReLU_derivative(hiddenLayer_inputs) # Perform gradient descent update biasHiddenToOutput -= learningRate * gradient_HiddenToOutput biasInputToHidden -= learningRate * gradient_InputToHidden weightsHiddenToOutput -= learningRate * np.dot(gradient_HiddenToOutput, np.transpose(hiddenLayer_inputs)) weightsInputToHidden -= learningRate * np.dot(gradient_InputToHidden, np.transpose(inputs)) # Store residual squared_residuals[obs] = np.sum((target-outputLayer_outputs)**2) inputs = np.array(X).reshape((inputLayer_size, n)) # all observations target = y.reshape((outputLayer_size, n)) hiddenLayer_inputs = np.dot(weightsInputToHidden, inputs) + biasInputToHidden # first hidden layer inputs hiddenLayer_outputs = ReLU(hiddenLayer_inputs) # hidden layer output (after activation) outputLayer_inputs = np.dot(weightsHiddenToOutput, hiddenLayer_outputs) + biasHiddenToOutput # input to output layer outputLayer_outputs = ReLU(outputLayer_inputs) iteration_loss_square = mse(target, outputLayer_outputs) iteration_loss = math.sqrt(iteration_loss_square) loss_log.append(iteration_loss) # Development of the loss as average over obs-level losses print(f'Epoch {iteration} with average error of {iteration_loss:.2f}.') iteration += 1 # - # Look at that! As the epochs continue, we can see iterations show improvement in errors in the current random batch of observations. Important questions: # - Why could it be the case that the error is not continuously falling? # - What are some other good ideas for stopping rules for our algorithm? Or how can we manipulate the learning rate? # - Instead of looking at average loss per batch, could you think of a better way to measure model performance? # ## Overview of back propagation # So, after setting up the neural network, we need to: # - Calculate loss from the forward pass # - Calculate the gradient of random observations (SGD) # - Update weights by moving down the gradient with a step size specified by your learning rate # - Make new predictions with the new weights # - Repeat the these steps until you reach a stopping rule
15,167
/Titanic_project.ipynb
d37f3a8483ed8c89969e53337c9f513b80f27883
[]
no_license
anmol1512/Py-Data_analytics_bootcamp_project
https://github.com/anmol1512/Py-Data_analytics_bootcamp_project
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
78,602
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="wuIjmCE0Dhj4" # # **Importing Pandas/Numpy/Seaborn/Matplotlib** # # --- # # # 1.We will import pandas with `import` keyword # # 2.Pandas is a python package which consist mainly two data structure: # - Series # - DataFrame # # + id="IrPj_bS7Di3l" import pandas as pd import numpy as np import seaborn as sb import matplotlib.pyplot as plt # + [markdown] id="rVb9uwFvEIeU" # # **Loading Titanic_database csv file** # # --- # # # # >Now we will load our Titanic database .csv file into a pandas DataFrame by using pandas `read_csv()` function. # # >We will also look at first 5 rows of the database to have a look what type of data is actually present in which we need to analyze. # this can be achieved by `head()` method. # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="d9SnnEcTEKub" outputId="a75afde5-77dd-47d1-e050-4d86b2d6c912" passengers=pd.read_csv('/content/Titanic database.csv') passengers.head() # + [markdown] id="C8dGny6gFoTV" # **Description of the attributes of the dataset** # # - PassengerId -> Unique passenger id # - Survived -> Survival(0=NO;1=YES) # - Pclass -> Passenger Class(1=1st class;2=2nd class;3=3rd class) # - Name -> name of the passenger # - Sex -> Male or Female # - Age -> age of passenger # - SibSp -> number of passenger's sibling/spouse Aboard # - Parch -> number of passenger's parents/children Aboard # - Ticket -> passenger's ticket no. # - Fare -> Passenger's fare(British pound) # - Cabin -> Passenger's cabin no. # - Embarked -> port of embarkation(C=Cherbourg; Q=Queenstown; S=Southampton) # + [markdown] id="EmOQwnqMKdD9" # # **1.Relation b/w Survived attribute and Pclass attribute** # # --- # # # # # + [markdown] id="dHfCG47gxANF" # >Here `c1s0`(class 1 survival 0) is a 2 dimentional pandas DataFrame in which we used boolean indexing such that `c1s0` has two columns `Pclass` and `Survived` and the contents i.e values in each row is any one of the # combination:`(True,True)`;`(True,False)`;`(False,True)`;`(False,False)`. # # >Similarly just like `c1s0` `c2s0` and `c3s0` are also 2 dimentional pandas DataFrame. # # >`x1`,`x2`,`x2` are 1 dimentional panda series with multi indexes and consists only 4 rows in it bceause there are only 4 unique combination,so the multiindex are as follows: `(True,True)`;`(True,False)`;`(False,True)`;`(False,False)`. # + colab={"base_uri": "https://localhost:8080/"} id="Fq45XqFwMsBt" outputId="295060eb-dbe7-492f-8194-76574c2cd626" c1s0=passengers[['Pclass','Survived']]==[1,0] c2s0=passengers[['Pclass','Survived']]==[2,0] c3s0=passengers[['Pclass','Survived']]==[3,0] x1=c1s0.value_counts() x2=c2s0.value_counts() x3=c3s0.value_counts() print('the number of people who died and belong to 1st class:',x1[(True,True)]) print('the number of people who died and belong to 2nd class:',x2[(True,True)]) print('the number of people who died and belong to 3rd class:',x3[(True,True)]) # + [markdown] id="GQVfXxQypTxh" # **CONCLUSION:** # # >The number of people who died and belong to 3rd class is very more as compared to other classes i.e the died people percentage is greater in 3rd class. # # > Hence as a conclusion we can say the 3rd class was not constructed well and hence was more dangerous than other classes. # # # # # # # + [markdown] id="OwY2l4M9236l" # ## **Data Visualization of Relation b/w Survived attribute and Pclass attribute** # # > This can also be visualized using Bar garph with the help of seaborn's `countplot()` function. # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="5kw6HiP6qfFN" outputId="d4f1f457-5e0f-4c92-b76f-921b91dcb0b2" i1={'died':[' no of 1st class passengers who died']*x1[(True,True)]} i2=[{'died':'no of 2nd class passengers who died'}]*x2[(True,True)] i3=[{'died':'no of 3rd class passengers who died'}]*x3[(True,True)] c1died=pd.DataFrame(i1) c2died=pd.DataFrame(i2) c3died=pd.DataFrame(i3) yaxis=c1died.append(c2died).append(c3died) sb.countplot(data=yaxis,y='died') # + [markdown] id="llpvrRRWdmds" # # **2.Relation b/w Survived attribute and Sex attribute** # # --- # + [markdown] id="zrPtBj1wD-oY" # # > Here first we check for `NaN `values with the help of `isna()` function in combination with `sum()` function. # # > Then we created two 2-dimentional Pandas DataFrame `sms0`(sex male survival 0) and `sfs0`(sex female survival 0) which contains True,False values i.e boolean values. # # > the variable `ms`(male survival) and `fs`(female survival) are two 1-dimentional pandas Series contains 4 rows each with muti indexs as `(True,True)`;`(True,False)`;`(False,True)`;`(False,False)`. # # # + colab={"base_uri": "https://localhost:8080/"} id="6PhzjiyzeFXL" outputId="6c425d5e-a644-45ea-e9c2-3e831297ca72" #first lets check that is there any Nan value in 'Sex' column x=passengers[['Sex','Survived']] if x.isna().sum()['Sex']==0: print('There is no NaN value') else: print('Sex column contains unknown values other than male and female') #Now lets analyse how many male died and how many female died. sms0=passengers[['Sex','Survived']]==['male',0] sfs0=passengers[['Sex','Survived']]==['female',0] ms=sms0.value_counts() fs=sfs0.value_counts() print('The number of male that died are:',ms[(True,True)]) print('The number of female that died are:',fs[(True,True)]) # + [markdown] id="Klf2uhSUngxZ" # **CONCLUSION:** # # > The number of male who died was more than the number of female who died with a huge difference. # # >Hence as a conclusion we can say male were more prone to death as compared to female. # + [markdown] id="4hGRkZCClbvi" # ## **Data Visualization of Relation b/w Survived attribute and Sex attribute** # # > This can also be visualized using Bar garph with the help of seaborn's `countplot()` function. # + colab={"base_uri": "https://localhost:8080/", "height": 298} id="IJkk2Il-lbFK" outputId="0ead4eed-9b9d-4390-bf91-47d0ff20f50e" item1={'died':['Male_died']*ms[(True,True)]} item2=[{'died':'Female_died'}]*fs[(True,True)] md=pd.DataFrame(item1) fd=pd.DataFrame(item2) xaxis=md.append(fd) sb.countplot(data=xaxis,x='died') # + [markdown] id="eJmcPoEdnKLC" # # **3.Relation b/w Survived attribute, SibSp attribute and Parch attribute** # # --- # # # + [markdown] id="a53wkN7P4u7K" # # > `new_df` is a new dataframe with columns `'SibSp','Parch','Survived'` which consist values either` True` or `False` in each column. # # > if for column `'SibSp','Parch','Survived'` the value is `True,True,True` respectively then he /she is alone and did not survived,similary if values are `False,False,True` respecively then he /she is not alone and did not survived # + colab={"base_uri": "https://localhost:8080/"} id="3QN7oRaEn63m" outputId="2f9336d0-0bb9-4a25-eb76-1cedeedbaac0" new_df=passengers[['SibSp','Parch','Survived']]==[0,0,0] alonedied=(new_df.value_counts())[(True,True,True)] notalonedied=(new_df.value_counts())[(False,False,True)] print('people who were alone and are dead:',alonedied) print('People who were with someone and are dead:',notalonedied) # + [markdown] id="L35EfbIu2sxY" # **CONCLUSION:** # # > People who were travelling alone and died are more than the people who were travelling with someone and died. # # > Hence as a conclusion we can say alone people were more prone to death rather than travelling with family. # + [markdown] id="jtrzwMcD3Wx1" # ## **Data Visualization of Relation b/w Survived attribute, SibSp attribute and Parch attribute** # # > This can also be visualized using Pie chart with the help of matplotlib's `pie()` function and to display the pie chat we use `show()` function. # # # # + colab={"base_uri": "https://localhost:8080/", "height": 248} id="mEYt7EYx3UgF" outputId="a150940d-9462-47c3-9d25-caac9e663e3a" visual=pd.Series(index=['Alone_died','Not_Alone_died'],data=[alonedied,notalonedied]) plt.pie(visual,labels=visual.index) plt.show() # + [markdown] id="bvUS55bgQieE" # # **4.Relation b/w Pclass attribute and Fare attribute** # # --- # + [markdown] id="lNcz69NJEJlH" # # > `c1f `(class 1 fare) is a 2 dimentional pandas DataFrame in which we have droped i.e deleted all the rows whose `'Pclass'` value is either 2 or 3,hence only those rows are there whose `'Pclass'` value is 1 # # > Similarly `c2f` and `c3f` are 2 dimentional pandas DataFramein which only those rows are there whose `'Pclass'` value is 2 and 3 respectively. # # + colab={"base_uri": "https://localhost:8080/"} id="0YEcw1jBJXmb" outputId="20a318ef-70a1-4193-b19f-7ce2b7d1680b" new=passengers[['Pclass','Fare']] indx1=new[(new['Pclass']==3) | (new['Pclass']==2)].index c1f=new.drop(indx1)#dropped /delete all the rows that contain 'Pclass' equal to 3 or 2 print('The average fare of 1st class passengers is:'+str(c1f['Fare'].sum()/c1f.shape[0])+' pound') indx2=new[(new['Pclass']==1) | (new['Pclass']==3)].index c2f=new.drop(indx2)#dropped /delete all the rows that contain 'Pclass' equal to 1 or 3 print('The average fare of 2nd class passengers is:'+str(c2f['Fare'].sum()/c2f.shape[0])+' pound') indx3=new[(new['Pclass']==1) | (new['Pclass']==2)].index c3f=new.drop(indx3)#dropped /delete all the rows that contain 'Pclass' equal to 1 or 2 print('The average fare of 3rd class passengers is:'+str(c3f['Fare'].sum()/c3f.shape[0])+' pound') # + [markdown] id="TCV9JvLIkObT" # **CONCLUSION:** # # > The average fare of 1st class passengers is very expensive as compared to 2ndclass and 3rd class.whereas,2nd class average fare and 3rd class average fare was very close to each other and also very cheap. # # > Hence as a conclusion 1st class passengers were very rich. # # # # # + [markdown] id="mmnEaFPwltj3" # ## **Data Visualization of Relation b/w Pclass attribute and Fare attribute** # # > This can also be visualized using Pie chart with the help of matplotlib's `pie()` function and to display the pie chat we use `show()` function.`colors` parameter is also used to set different colors for different labels. # # # + colab={"base_uri": "https://localhost:8080/", "height": 248} id="lctsC8ANiHim" outputId="337df0ad-b72d-46b2-c4e7-3880b1a4eb28" new_series=pd.Series([c1f['Fare'].sum()/c1f.shape[0],c2f['Fare'].sum()/c2f.shape[0],c3f['Fare'].sum()/c3f.shape[0]]) new_series.index=['AvgFare_of_1stclass','AvgFare_of_2ndclass','AvgFare_of_3rdclass'] plt.pie(new_series,labels=new_series.index,colors=['black','blue','green']) plt.show() # + [markdown] id="SIdzqLDVrLH1" # # **5.Relation b/w Survived attribute and Age attribute** # # --- # + [markdown] id="867doHW6ERAQ" # > First we eliminate all rows from `df` which contains `NaN`values and created dfnew then we eliminated all rows which has `'Survived'` value as 1. # # >After elimination of all passengers who lived we then eliminated all rows whose `'Age'` is gerater than equal to 18 to find how many young people died with the help of `age_0_18` variable # # >Similarly,from `dfnew` we eliminated all rows whose `'Age'` is between the range [18,60) to find how many mature people died with `age_18_60` variable # # > Similarly,from `dfnew` we eliminated all rows whose `'Age'` is greater than equal to 60 to find how many Senior passengers died with `age_60ab` variable # # + colab={"base_uri": "https://localhost:8080/"} id="U8ZYZik_re6M" outputId="8ec53c19-c455-41e3-b6ba-d0cee5531de5" df=passengers[['Survived','Age']] dfnew=df.dropna(axis=0)#dropped /delete all the rows that contain 'Age' value as NaN dfnew=dfnew.drop(dfnew[dfnew['Survived']==1].index) age_0_18=dfnew.drop(dfnew[dfnew['Age']>=18.0].index)#dropped /delete all the rows that contain 'Age' greater than equal to 18 print('The number of passengers whose age<18 and died in accident are:',age_0_18.shape[0]) age_18_60=dfnew.drop(dfnew[dfnew['Age']<18.0].index)#dropped /delete all the rows that contain 'Age' less than 18 age_18_60=dfnew.drop(dfnew[dfnew['Age']>=60.0].index)#dropped /delete all the rows that contain 'Age' greater than equal to 60 print('The number of passengers whose 18<=age<60 and died in accident are:',age_18_60.shape[0]) age_60ab=dfnew.drop(dfnew[dfnew['Age']<60.0].index)#dropped /delete all the rows that contain 'Age' less than 60 print('The number of passengers whose age>=60 and died in accident are:',age_60ab.shape[0]) # + [markdown] id="OZ1MZexL8rox" # **CONCLUSION:** # # # > Mature passengers(Age>=18 and Age<60) died more than other age group passengers. # # > Hence as a conclusion young passengers(Age<18 ) and Senior passengers(Age>=60) were more safe. # # # # # + [markdown] id="DHdvu7ORBBhs" # ## **Data Visualization of Relation b/w Age attribute and Survived attribute** # # > This can also be visualized using Pie chart with the help of matplotlib's `pie()` function and to display the pie chat we use `show()` function.`colors` parameter is also used to set different colors for different labels. # # # + colab={"base_uri": "https://localhost:8080/", "height": 248} id="e3VB1hQyBAYm" outputId="d0bbe173-68a2-462f-9d9a-db929bbb9199" ageseries=pd.Series([age_0_18.shape[0],age_18_60.shape[0],age_60ab.shape[0]]) ageseries.index=['Young passenger who died','Mature passenger who died','Senior passenger who died'] plt.pie(ageseries,labels=ageseries.index,colors=['blue','black','green']) plt.show() # + [markdown] id="vKgdEvW-CrQ1" # # **FINAL CONCLUSION** # # --- # * The number of people who died and belong to 3rd class is very more as compared to other classes i.e the died people percentage is greater in 3rd class. # # * The number of male who died was more than the number of female who died with a huge difference. # # * People who were travelling alone and died are more than the people who were travelling with someone and died. # # * The average fare of 1st class passengers is very expensive as compared to 2ndclass and 3rd class.whereas,2nd class average fare and 3rd class average fare was very close to each other and also very cheap. # # * Mature passengers(Age>=18 and Age<60) died more than other age group passengers. # # # # # # # # # #
14,174
/Medical_Exam.ipynb
47f8a72312509a54ab14744bfa21a96a57f4391f
[]
no_license
nist-anil/Medical-Data-Visualizer
https://github.com/nist-anil/Medical-Data-Visualizer
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
205,116
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline medical_exam=pd.read_csv(r"C:\Users\there\Desktop\DataAnalysis_Course\DataForDataAnalysis\medical_examination.csv") medical_exam.head() medical_exam.tail(3) medical_exam.isnull() print(medical_exam.isnull().any()) medical_exam.describe() # + # Data Cleaning # - # Find if dataset contain non-real values medical_exam[~medical_exam.applymap(np.isreal).all(1)] medical_exam.applymap(np.isreal) # - Normalize the data by making 0 always good and 1 always bad. If the value of cholesterol or gluc is 1, make the value 0. If the value is more than 1, make the value 1. medical_exam['cholesterol']= medical_exam['cholesterol'].apply(lambda x : 1 if x>1 else 0) medical_exam['gluc']= medical_exam['gluc'].apply(lambda x : 1 if x>1 else 0) medical_exam.head() # - Add an overweight column to the data. To determine if a person is overweight, first calculate their BMI by dividing their weight in kilograms by the square of their height in meters. If that value is > 25 then the person is overweight. Use the value 0 for NOT overweight and the value 1 for overweight. # + conditions = [ ((medical_exam['weight']/(medical_exam['height']/100)**2) > 25), ((medical_exam['weight']/(medical_exam['height']/100)**2) < 25) ] # create a list of the values we want to assign for each condition values = ['1', '0'] # create a new column and use np.select to assign values to it using our lists as arguments medical_exam['Overweight']=np.select(conditions, values) # display updated DataFram medical_exam.head() # - medical_exam.info() medical_exam.Overweight = medical_exam.Overweight.astype('float').astype('Int64') medical_exam['weight'] = medical_exam['weight'].astype(np.int64) medical_exam.info() medical_exam.loc[medical_exam['cholesterol']> 1, 'cholesterol'].count() medical_exam.loc[medical_exam['cholesterol']< 0, 'cholesterol'].count() medical_exam.loc[medical_exam['gluc']> 1, 'gluc'].count() medical_exam.loc[medical_exam['gluc']< 0, 'gluc'].count() # - Convert the data into long format and create a chart that shows the value counts of the categorical features using seaborn's catplot(). The dataset should be split by 'Cardio' so there is one chart for each cardio value. # Reshape Data medical_exam_r=pd.melt(medical_exam, id_vars=['cardio'], value_vars=['active','alco','cholesterol', 'gluc', 'Overweight', 'smoke'], value_name='Total') medical_exam_r medical_exam_r.columns # Catplot() import seaborn as sns sns.catplot(x='variable',hue='Total', col='cardio', kind='count', data=medical_exam_r) # ## Clean the data. Filter out the following patient segments that represent incorrect data: # - diastolic pressure is higher than systolic (Keep the correct data with (df['ap_lo'] <= df['ap_hi'])) # - Systolic blood pressure= ap_hi # Diastolic blood pressure= ap_lo # - height is less than the 2.5th percentile (Keep the correct data with (df['height'] >= df['height'].quantile(0.025))) # - height is more than the 97.5th percentile # - weight is less than the 2.5th percentile # - weight is more than the 97.5th percentile # medical_exam.head() medical_exam.describe() min_threshold_height,max_threshold_height= medical_exam.height.quantile([0.025,0.975]) min_threshold_height,max_threshold_height min_threshold_weight,max_threshold_weight= medical_exam.weight.quantile([0.025,0.975]) min_threshold_weight,max_threshold_weight medical_exam_filtered=medical_exam[(medical_exam['height']<max_threshold_height) & (medical_exam['height']>min_threshold_height) & (medical_exam['weight']<max_threshold_weight) & (medical_exam['weight']>min_threshold_weight) & (medical_exam['ap_lo']<=medical_exam['ap_hi'])] medical_exam_filtered medical_exam_filtered.sample(10) medical_exam_filtered.isnull().values.any() medical_exam_filtered.info() medical_exam_filtered.corr() medical_exam_filtered.info() # Mask upper matrix upp_mat = np.triu(medical_exam_filtered.corr()) # Fix the fif. size f, ax = plt.subplots(figsize=(16, 12)) # Plot heatmap cmap = sns.diverging_palette(230, 20, as_cmap=True) sns.heatmap(medical_exam_filtered.corr(), vmin = -.3, vmax = +.3, annot = True, cmap = 'RdBu', mask = upp_mat)
4,562
/date wine.ipynb
0f25645c1c6200ffc7e7e89c3b2e0a2ebb3ea418
[]
no_license
CrisESS/qm
https://github.com/CrisESS/qm
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
266,742
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sklearn import datasets import pandas as pd import matplotlib.pyplot as plt #graficos import numpy as np wine=datasets.load_wine() wine wine_dataframe=pd.DataFrame(data=wine.data,columns=wine.feature_names) wine_dataframe # # Data Set Wine # Estos datos son el resultado de un análisis químico de vinos cultivados en la misma región en Italia pero derivados de tres cultivares diferentes. El análisis determinó las cantidades de 13 componentes encontrados en cada uno de los tres tipos de vinos. # # # Los atributos son: # 1) Alcohol # 2) ácido málico # 3) Ceniza # 4) Alcalinidad de ceniza # 5) Magnesio # 6) Fenoles totales # 7) Flavonoides # 8) Fenoles no flavonoides # 9) Proanthocyanins # 10) intensidad del color # 11) Hue # 12) OD280 / OD315 de vinos diluidos # 13) Proline x=wine_dataframe print(x.mean()) wine_dataframe["target"]=wine.target wine_dataframe x=wine_dataframe["alcohol"] print(x.mean()) y=wine_dataframe["malic_acid"] col=(wine_dataframe["target"]+1)*2 print(y.mean()) alcohol=wine_dataframe["alcohol"]*50 malic_acid=wine_dataframe["malic_acid"] plt.scatter(x,y,c=col,s=alcohol,alpha=0.5) plt.xlabel("alcohol(mL)")plt.scatter(x,y,c=col,s=flavanoids,alpha=0.5) plt.xlabel("flavanoids") plt.ylabel("magnesium") plt.title("flavanoids vs magnesium") plt.grid(True) plt.show() plt.ylabel("ácido maleico (mL)") plt.title("Alcohol vs ácido maleico") plt.grid(True) plt.show() df2=wine_dataframe.groupby(by="target").mean() df2.iloc(1) wine_dataframe["target"]=wine.target wine_dataframe x=wine_dataframe["flavanoids"] print(x.mean()) y=wine_dataframe["magnesium"] col=(wine_dataframe["target"]+1)*2 print(y.mean()) flavanoids=wine_dataframe["flavanoids"]*50 magnesium=wine_dataframe["magnesium"] print(x.std()) print(y.std()) plt.scatter(x,y,c=col,s=flavanoids,alpha=0.5) plt.xlabel("flavanoids") plt.ylabel("magnesium") plt.title("flavanoids vs magnesium") plt.grid(True) plt.show() wine_dataframe["target"]=wine.target wine_dataframe x=wine_dataframe["ash"] print(x.mean()) y=wine_dataframe["proline"] col=(wine_dataframe["target"]+1)*2 print(y.mean()) ash=wine_dataframe["ash"]*50 proline=wine_dataframe["proline"] print(x.std()) print(y.std()) plt.scatter(x,y,c=col,s=flavanoids,alpha=0.5) plt.xlabel("ash (%)") plt.ylabel("proline (%)") plt.title("ceniza vs prolina") plt.grid(True) plt.show() wine_dataframe.groupby(by="target").std()wine_dataframe.groupby(by="target").mean() wine_dataframe.groupby(by="target").mean()
2,733
/HeroesOfPymoli/main-checkpoint.ipynb
c7002f5e2b51ef43ceab9cc280e8a9e826499231
[]
no_license
caseyjuergens/pandas-challenge
https://github.com/caseyjuergens/pandas-challenge
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
43,362
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- g = (x+1 for x in range(10)) next(g) # + def fib(): n = [1, 1] i = 0 while True: m = n[i] + n[i + 1] n.append(m) i += 1 yield n f = fib() for i in range(20): print(f.__next__()) # + def myyield(n): while n > 0: print('start yield') yield n print('finidh yield') n -= 1 for i in myyield(4): print(i) m = myyield(3) print('first yield') m.__next__() print('second yield') m.__next__() # - pd.DataFrame({"Total Players": [total_players]}) total_players_summary # + #---Purchasing Analysis (total)----------------------- # calculate: no. of unique items, avg purchase price, total number of purchases #and total revenue. create a data frame. add styling #calculations unique_items=len(purchase_data["Item ID"].unique()) avg_price=purchase_data["Price"].mean() total_purchases=purchase_data["Purchase ID"].count() total_revenue=purchase_data["Price"].sum() #styling/formating format_avg_price = "${:.2f}".format(avg_price) format_total_revenue = "${:.2f}".format(total_revenue) #create dataframe purchasing_summary = pd.DataFrame({"Unique Items": unique_items, "Average Price": [format_avg_price], "Total Purchases": [total_purchases], "Total Revenue": [format_total_revenue]}) purchasing_summary # + #---Gender Demographics----------------------------------- #find the count and percentage of male/female/other gender_grouped = purchase_data.groupby("Gender") gender_counts = gender_grouped["SN"].nunique() gender_percent = (gender_counts/total_players).mul(100).round(1).astype(str) + "%" #create data frame gender_summary = pd.DataFrame({"Count": gender_counts, "Percentage": gender_percent}) gender_summary # + #---Purchasing Analysis (gender)------------------------- #create new dataframes grouped by gender males_df = purchase_data.loc[purchase_data["Gender"] == "Male",:] females_df = purchase_data.loc[purchase_data["Gender"] == "Female",:] other_df = purchase_data.loc[purchase_data["Gender"] == "Other / Non-Disclosed",:] #calculate: purchase count, avg purchase price, total purchase, avg purchase total #break it down by gender #create new data frame summarizing all above # + #Males......... males_purchase_count=len(males_df["Purchase ID"].value_counts()) males_avg_price=males_df["Price"].mean() males_total_purchases=males_df["Price"].sum() males_avg_total= (males_total_purchases/484) #484 is the number of total male players #formatting format_males_avg_price = "${:.2f}".format(males_avg_price) format_males_total_purchases = "${:.2f}".format(males_total_purchases) format_males_avg_total = "${:.2f}".format(males_avg_total) # + #Females............ females_purchase_count=len(females_df["Purchase ID"].value_counts()) females_avg_price=females_df["Price"].mean() females_total_purchases=females_df["Price"].sum() females_avg_total= (females_total_purchases/81) #81 is the number of total female players #formatting format_females_avg_price = "${:.2f}".format(females_avg_price) format_females_total_purchases = "${:.2f}".format(females_total_purchases) format_females_avg_total = "${:.2f}".format(females_avg_total) # + #other/Non-Disclosed........ other_purchase_count=len(other_df["Purchase ID"].value_counts()) other_avg_price=other_df["Price"].mean() other_total_purchases=other_df["Price"].sum() other_avg_total= (other_total_purchases/11) #11 is the number of total other players #formatting format_other_avg_price = "${:.2f}".format(other_avg_price) format_other_total_purchases = "${:.2f}".format(other_total_purchases) format_other_avg_total = "${:.2f}".format(other_avg_total) # + #create new data frame purchasing_summary_gender = pd.DataFrame({"Purchase Count": [males_purchase_count, females_purchase_count, other_purchase_count], "Average Purchase Price": [format_males_avg_price, format_females_avg_price, format_other_avg_price], "Total Purchase Value": [format_males_total_purchases, format_females_total_purchases, format_other_total_purchases], "Avg Total Purchase Per Person": [format_males_avg_total, format_females_avg_total, format_other_avg_total]}) purchasing_summary_gender.index= ["Males", "Females", "Other/ Non-Disclosed"] purchasing_summary_gender # + #---Age Demographics------------------------------------------- #create bins for age. Categorize players by age(pd.cut). calculate numbers/%s per bin #create new summary table bins=[0, 9, 14, 19, 24, 29, 34, 39, 50] age_bin_labels=["<10", "10-14", "15-19", "20-24", "25-29", "30-34", "35-39", "40<"] purchase_data["Age Groups"]=pd.cut(purchase_data["Age"], bins, labels=age_bin_labels) age_groups= purchase_data.groupby("Age Groups") binned_age_counts=age_groups["SN"].nunique() binned_age_percent=(binned_age_counts/total_players)*100 #create summary data frame age_summary=pd.DataFrame({"Total Count": binned_age_counts, "Percentage of Players": binned_age_percent}) age_summary age_summary.style.format({"Percentage of Players": "{:.2f}%"}) # + #---Purchasing Analysis (Age)----------------------------------- #bin age groups. calculate purchase count, avg purchase price, total purchase volume, and avg total purchase per person #create summary data frame age_purchase_count = age_groups["Purchase ID"].count() age_avg_price = age_groups["Price"].mean() age_total_purchases = age_groups["Price"].sum() age_avg_total = (age_total_purchases/binned_age_counts) # + #create new summary data frame purchasing_age_analysis = pd.DataFrame({"Purchase Count": age_purchase_count, "Average Purchase Price": age_avg_price, "Total Purchase Value": age_total_purchases, "Avg Total Purchase per Person": age_avg_total}) purchasing_age_analysis purchasing_age_analysis.style.format({"Average Purchase Price": "${:.2f}", "Total Purchase Value": "${:.2f}", "Avg Total Purchase per Person": "${:.2f}"}) # + #---Top Spenders----------------------------------- #calculate purchace count, avg purchase price, and total purchase value for the top 5 spenders SN_group = purchase_data.groupby("SN") SN_purchase_count = SN_group["Age"].count() SN_avg_purchase_price = SN_group["Price"].mean() SN_total_purchases = SN_group["Price"].sum() #create summary data frame top_spenders_summary = pd.DataFrame({"Purchase Count": SN_purchase_count, "Average Purchase Price": SN_avg_purchase_price, "Total Purchase Value": SN_total_purchases}) #sort total purchase value in decending order top_spenders_summary = top_spenders_summary.sort_values("Total Purchase Value", ascending = False).head(5) top_spenders_summary.style.format({"Average Purchase Price": "${:.2f}", "Total Purchase Value": "${:.2f}"}) # + #---Most Popular Items----------------------------------- #retrieve item id, item name, and item price colums. group by item id and item name. #calculate purchase count, avg item price, total purchase value item_group = purchase_data.groupby(["Item ID", "Item Name"]) item_purchase_count = item_group["Age"].count() item_avg_purchase_price = item_group["Price"].mean() item_total_purchases = item_group["Price"].sum() #create summary data frame item_summary = pd.DataFrame({"Purchase Count": item_purchase_count, "Average Purchase Price": item_avg_purchase_price, "Total Purchase Value": item_total_purchases}) #sort by purchase count in descending order item_summary = item_summary.sort_values("Purchase Count", ascending = False).head(5) item_summary.style.format({"Average Purchase Price": "${:.2f}", "Total Purchase Value": "${:.2f}"}) # - #---Most Profitable Items----------------------------------- #sort above table in descending order by total purchase value item_summary = item_summary.sort_values("Total Purchase Value", ascending = False).head(5) item_summary item_summary.style.format({"Average Purchase Price": "${:.2f}", "Total Purchase Value": "${:.2f}"})
8,262
/Colab Notebooks/test_tpu.ipynb
643ac192afcfef1dd13342388b67ddd1def44c7e
[]
no_license
asit-pal/TIFR-H-Cavity-Ligand
https://github.com/asit-pal/TIFR-H-Cavity-Ligand
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
8,516
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="oA4GgdADgcXa" executionInfo={"status": "ok", "timestamp": 1614161341339, "user_tz": -330, "elapsed": 2911, "user": {"displayName": "Asit Pal", "photoUrl": "", "userId": "06025537331293253402"}} outputId="73574f39-0685-41f5-db93-796f2879e555" VERSION = "20200220" #@param ["20200220","nightly", "xrt==1.15.0"] # !curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py # !python pytorch-xla-env-setup.py --version $VERSION # + colab={"base_uri": "https://localhost:8080/"} id="PzoF1dNegsWS" executionInfo={"status": "ok", "timestamp": 1614161914758, "user_tz": -330, "elapsed": 969, "user": {"displayName": "Asit Pal", "photoUrl": "", "userId": "06025537331293253402"}} outputId="b4c4bc0d-032c-4f03-97d0-1eca9a88006e" # !nvidia-smi # + colab={"base_uri": "https://localhost:8080/"} id="Kebdl0ZdhCzZ" executionInfo={"status": "ok", "timestamp": 1614162888488, "user_tz": -330, "elapsed": 144750, "user": {"displayName": "Asit Pal", "photoUrl": "", "userId": "06025537331293253402"}} outputId="3f5d430d-0e77-47ff-9370-52faccf84131" # !pip install pyemma # + id="hKjEAgCWmNFn" E4 = alpha*(k - 4*np.pi)**2 return k,E1,E2,E3,E4 # + route = [Gamma,X,W,L,Gamma,K,X] E_val1 = np.empty(0) E_val2 = np.empty(0) E_val3 = np.empty(0) E_val4 = np.empty(0) for i in range(len(route) - 1): E_val1 = np.concatenate((E_val1,E(route[i],route[i+1])[1])) E_val2 = np.concatenate((E_val2,E(route[i],route[i+1])[2])) E_val3 = np.concatenate((E_val3,E(route[i],route[i+1])[3])) E_val4 = np.concatenate((E_val4,E(route[i],route[i+1])[4])) # + # %matplotlib inline f,ax = plt.subplots() xcoords = np.append(0,np.arange(1,len(route))*numpts-1) for xc in xcoords: plt.axvline(x=xc,linestyle='--', color = 'black') ax.set_xticks(xcoords) label=["$\Gamma$", "X","W", "L", "$\Gamma$","K","X"] ax.set_xticklabels(label, fontsize =18) ax.plot(E_val1) ax.plot(E_val2) #ax.plot(E_val3) #ax.plot(E_val4) plt.ylabel("E", fontsize =18) plt.show() # -
2,348
/car/other_models.ipynb
f9140714041b45562945f29975f95d1be9e0f585
[]
no_license
srikanthpragada/DS_28_SEP_2020
https://github.com/srikanthpragada/DS_28_SEP_2020
1
3
null
null
null
null
Jupyter Notebook
false
false
.py
104,031
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # import pandas library import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns cars = pd.read_csv("final_cars.csv") ## create X and Y y = cars['price'] X = cars.drop(columns=['price']) X = pd.get_dummies(X) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.2, random_state=99) # ## RandomForestRegressor from sklearn.ensemble import RandomForestRegressor model = RandomForestRegressor() model.fit(X_train, y_train) print(f'Train score : {model.score(X_train,y_train)}') from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error import sklearn.metrics y_pred = model.predict(X_test) r2score = r2_score(y_test,y_pred) print(f'Test Score : {r2score:0.2f}') mse = mean_squared_error(y_test,y_pred) print("MSE : ",mse) print("RMSE : ", np.sqrt(mse)) for f,v in zip (X_train.columns, model.feature_importances_): print(f"{f:30} {v:0.2f}") # Compare actual and predicted values plt.gcf().set_size_inches(20,10) sns.lineplot( y = y_test, x = X_test.index, label="Actual") sns.lineplot( y = y_pred, x = X_test.index, label="Predicted") # ### SGDRegressor from sklearn.linear_model import SGDRegressor X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.2, random_state=99) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() ## use same scale for X_train and X_test scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) model = SGDRegressor(random_state=100) model.fit(X_train, y_train) print(f'Train score : {model.score(X_train,y_train):f}') y_pred = model.predict(X_test) r2score = r2_score(y_test,y_pred) print(f'Test Score : {r2score:0.2f}') mse = mean_squared_error(y_test,y_pred) print("MSE : ",mse) print("RMSE : ", np.sqrt(mse))
2,136
/House Price Prediction Using Machine and Deep Learning/Code/1 House Price Prediction - Data Preprocessing.ipynb
d87480a5c938109e0828cf3756e71fa69e9d0534
[]
no_license
AbdallahElGhamry/Projects
https://github.com/AbdallahElGhamry/Projects
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
84,589
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import torch import torchvision import torchvision.transforms as transforms import os.path # + ############################################################################################ Datasets dataset_dir = os.path.join(os.path.expanduser("D:\\AAA\\CS\\2020-2021\\apprentisage_profond"), 'Datasets', 'FashionMNIST') valid_ratio = 0.2 # Going to use 80%/20% split for train/valid # Load the dataset for the training/validation sets train_valid_dataset = torchvision.datasets.FashionMNIST(root=dataset_dir, train=True, transform= None, #transforms.ToTensor(), download=True) # - train_valid_dataset # + # Split it into training and validation sets nb_train = int((1.0 - valid_ratio) * len(train_valid_dataset)) nb_valid = int(valid_ratio * len(train_valid_dataset)) train_dataset, valid_dataset = torch.utils.data.dataset.random_split(train_valid_dataset, [nb_train, nb_valid]) # - print(f"nb_train,{nb_train},nb_valid,{nb_valid}") # Load the test set test_dataset = torchvision.datasets.FashionMNIST(root=dataset_dir, transform= None, #transforms.ToTensor(), train=False) test_dataset # + class DatasetTransformer(torch.utils.data.Dataset): def __init__(self, base_dataset, transform): self.base_dataset = base_dataset self.transform = transform def __getitem__(self, index): img, target = self.base_dataset[index] return self.transform(img), target def __len__(self): return len(self.base_dataset) train_dataset = DatasetTransformer(train_dataset, transforms.ToTensor()) valid_dataset = DatasetTransformer(valid_dataset, transforms.ToTensor()) test_dataset = DatasetTransformer(test_dataset , transforms.ToTensor()) # + ############################################################################################ Dataloaders num_threads = 1 # Loading the dataset is using 4 CPU threads batch_size = 128 # Using minibatches of 128 samples # train_loader = torch.utils.data.DataLoader(dataset=train_dataset, # batch_size=batch_size, # 每个batch有多少个样本 # shuffle=True, # <-- this reshuffles the data at every epoch # num_workers=num_threads) # 有几个进程来处理data loading # valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset, # batch_size=batch_size, # shuffle=False, # num_workers=num_threads) # test_loader = torch.utils.data.DataLoader(dataset=test_dataset, # batch_size=batch_size, # shuffle=False, # num_workers=num_threads) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, # 每个batch有多少个样本 shuffle=True # <-- this reshuffles the data at every epoch ) # 有几个进程来处理data loading valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset, batch_size=batch_size, shuffle=False) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) print("The train set contains {} images, in {} batches".format(len(train_loader.dataset), len(train_loader))) print("The validation set contains {} images, in {} batches".format(len(valid_loader.dataset), len(valid_loader))) print("The test set contains {} images, in {} batches".format(len(test_loader.dataset), len(test_loader))) # + import matplotlib.pyplot as plt import matplotlib.cm as cm nsamples=10 classes_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal','Shirt', 'Sneaker', 'Bag', 'Ankle boot'] imgs, labels = next(iter(train_loader)) fig=plt.figure(figsize=(20,5),facecolor='w') for i in range(nsamples): ax = plt.subplot(1,nsamples, i+1) plt.imshow(imgs[i, 0, :, :], vmin=0, vmax=1.0, cmap=cm.gray) ax.set_title("{}".format(classes_names[labels[i]]), fontsize=15) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.savefig('fashionMNIST_samples.png', bbox_inches='tight') plt.show() # + import torch.nn as nn class LinearNet(nn.Module): def __init__(self, input_size, num_classes): super(LinearNet, self).__init__() self.input_size = input_size self.classifier = nn.Linear(self.input_size, num_classes) def forward(self, x): x = x.view(x.size()[0], -1) # (a,b,c) -> 输出维度 (a,b*c) y = self.classifier(x) return y model = LinearNet(1*28*28, 10) use_gpu = torch.cuda.is_available() if use_gpu: device = torch.device('cuda') else: device = torch.device('cpu') model.to(device) # + # instantiate the loss : f_loss = torch.nn.CrossEntropyLoss() # instantiate the optimizer: optimizer = torch.optim.Adam(model.parameters()) # + def train(model, loader, f_loss, optimizer, device): """ Train a model for one epoch, iterating over the loader using the f_loss to compute the loss and the optimizer to update the parameters of the model. Arguments : model -- A torch.nn.Module object loader -- A torch.utils.data.DataLoader f_loss -- The loss function, i.e. a loss Module optimizer -- A torch.optim.Optimzer object device -- a torch.device class specifying the device used for computation Returns : """ # We enter train mode. This is useless for the linear model # but is important for layers such as dropout, batchnorm, ... model.train() for i, (inputs, targets) in enumerate(loader): inputs, targets = inputs.to(device), targets.to(device) # Compute the forward pass through the network up to the loss outputs = model(inputs) loss = f_loss(outputs, targets) # Backward and optimize optimizer.zero_grad() # 清空过往梯度 loss.backward() # 反向传播,计算当前梯度 optimizer.step() # 根据梯度更新网络参数 # An example of calling train to learn over 10 epochs of the training set for i in range(10): train(model, train_loader, f_loss, optimizer, device) # - def test(model, loader, f_loss, device): """ Test a model by iterating over the loader Arguments : model -- A torch.nn.Module object loader -- A torch.utils.data.DataLoader f_loss -- The loss function, i.e. a loss Module device -- The device to use for computation Returns : A tuple with the mean loss and mean accuracy """ # We disable gradient computation which speeds up the computation # and reduces the memory usage with torch.no_grad(): # We enter evaluation mode. This is useless for the linear model # but is important with layers such as dropout, batchnorm, .. model.eval() # 不启用 BatchNormalization 和 Dropout # 训练完train样本后,生成的模型model要用来测试样本。在model(test)之前, # 需要加上model.eval(),否则的话,有输入数据,即使不训练,它也会改变权值。 N = 0 tot_loss, correct = 0.0, 0.0 for i, (inputs, targets) in enumerate(loader): # We got a minibatch from the loader within inputs and targets # With a mini batch size of 128, we have the following shapes # inputs is of shape (128, 1, 28, 28) # targets is of shape (128) # We need to copy the data on the GPU if we use one inputs, targets = inputs.to(device), targets.to(device) # Compute the forward pass, i.e. the scores for each input image outputs = model(inputs) # We accumulate the exact number of processed samples N += inputs.shape[0] # ([128, 1, 28, 28]) # We accumulate the loss considering # The multipliation by inputs.shape[0] is due to the fact # that our loss criterion is averaging over its samples tot_loss += inputs.shape[0] * f_loss(outputs, targets).item() # For the accuracy, we compute the labels for each input image # Be carefull, the model is outputing scores and not the probabilities # But given the softmax is not altering the rank of its input scores # we can compute the label by argmaxing directly the scores predicted_targets = outputs.argmax(dim=1) correct += (predicted_targets == targets).sum().item() return tot_loss/N, correct/N, correct # + import sys import random import time epochs = 5 def progress(loss, acc, correct): sys.stdout.write('Loss : {:2.4f}, Acc : {:2.4f}, Corect : {:2.4f}\r'.format(loss, acc,correct)) sys.stdout.flush() for t in range(epochs): # print("Epoch {}".format(t)) train(model, train_loader, f_loss, optimizer, device) val_loss, val_acc,correct = test(model, valid_loader, f_loss, device) progress(val_loss, val_acc, correct) # print(" Validation : Loss : {:.4f}, Acc : {:.4f}".format(val_loss, val_acc)) sys.stdout.write('\n') # sys.stdout的形式就是print的一种默认输出格式 # + # Saving the best model import os def generate_unique_logpath(logdir, raw_run_name): i = 0 while(True): run_name = raw_run_name + "_" + str(i) log_path = os.path.join(logdir, run_name) if not os.path.isdir(log_path): return log_path i = i + 1 ################################################### # Example usage : # 1- create the directory "./logs" if it does not exist top_logdir = "D:/AAA/CS/2020-2021/apprentisage_profond/logs" if not os.path.exists(top_logdir): os.mkdir(top_logdir) # 2- We test the function by calling several times our function logdir = generate_unique_logpath(top_logdir, "linear") print("Logging to {}".format(logdir)) # -> Prints out Logging to ./logs/linear_0 if not os.path.exists(logdir): os.mkdir(logdir) logdir = generate_unique_logpath(top_logdir, "linear") print("Logging to {}".format(logdir)) # -> Prints out Logging to ./logs/linear_1 if not os.path.exists(logdir): os.mkdir(logdir) # + class ModelCheckpoint: def __init__(self, filepath, model): self.min_loss = None self.filepath = filepath self.model = model def update(self, loss): if (self.min_loss is None) or (loss < self.min_loss): print("Saving a better model") torch.save(self.model.state_dict(), self.filepath) #torch.save(self.model, self.filepath) self.min_loss = loss ########################################### # Example usage # Define the callback object model_checkpoint = ModelCheckpoint(logdir + "/best_model.pt", model) # In the training loop for t in range(epochs): train(model, train_loader, f_loss, optimizer, device) model_checkpoint.update(val_loss) # + # Loading the best model model_path = "D:/AAA/CS/2020-2021/apprentisage_profond/logs/linear_1/best_model.pt" model = LinearNet(1*28*28, 10) model = model.to(device) model.load_state_dict(torch.load(model_path)) # Switch to eval mode model.eval() test_loss, test_acc, _ = test(model, test_loader, f_loss, device) print(" Test : Loss : {:.4f}, Acc : {:.4f}".format(test_loss, test_acc)) # + from torch.utils.tensorboard import SummaryWriter # 模拟输入数据 x = torch.randn(128, 1, 28, 28) x = x.view(x.size()[0], -1) # 导入已有模型 model.load_state_dict(torch.load(model_path)) # Switch to eval mode model.eval() # 声明writer对象,保存的文件夹,异己名称 writer = SummaryWriter(log_dir='D:/AAA/CS/2020-2021/apprentisage_profond/board/') with writer: writer.add_graph(model, (x,)) test_loss, test_acc, _ = test(model, test_loader, f_loss, device) # - for t in range(epochs): test_loss, test_acc, _ = test(model, test_loader, f_loss, device) writer.add_scalar('D:/AAA/CS/2020-2021/apprentisage_profond/val_loss', test_loss, t) writer.add_scalar('D:/AAA/CS/2020-2021/apprentisage_profond/val_acc', test_acc, t) # normalized_pixels = normalized_pixels * 255 rand_numbers = np.arange(examples_count) np.random.shuffle(rand_numbers) train_count = 10000 train_numbers = rand_numbers[:train_count] X_train = np.array([normalized_pixels[i] for i in range(examples_count) if i in train_numbers]) Y_train = np.array([one_hot_labels[i] for i in range(examples_count) if i in train_numbers]) X_test = np.array([normalized_pixels[i] for i in range(examples_count) if i not in train_numbers]) Y_test = np.array([mnist.target[i] for i in range(examples_count) if i not in train_numbers]) # - model.fit(X_train, Y_train) # biases are added automatically under the hood # Here you should get "RuntimeError: The model needs to be compiled before being used.". That's one of the signs that we take static approach. model.compile(loss='categorical_crossentropy', # we can put different loss here optimizer=keras.optimizers.sgd(), # we can have different optimizers metrics=['accuracy']) # we can measure our performance with many metrics # SGD stands for Stochastic Gradient Descent which is a modification of standard Gradient Descent. Here in each iteration you sample randomly a batch (let's say 64 samples) from your dataset and do update only on it. It is better because sometimes one sample is so big that you can't put your entire dataset in memory for computing (especially while using GPU, which allows us to train nets muuuuuuuuuuch faster). # # https://en.wikipedia.org/wiki/Stochastic_gradient_descent model.fit(X_train, Y_train) # We need to convert our labels to one-hot encoding. num_classes = 10 y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) model.fit(X_train, Y_train) # Let's talk a bit about what is batch (baby don't train me, don't train me, no more). https://youtu.be/3rzgrP7VA_Q # # And a bit what is epoch. model.evaluate(X_test, Y_test) #loss and accuracy on test set # Say it straight, your model is not good, it achieves only ~21% of accuracy. Maybe we can do better? model = keras.models.Sequential([ keras.layers.Dense(10, activation='softmax', input_shape=(784,)) ]) model.compile(loss='categorical_crossentropy', # we can put different loss here optimizer=keras.optimizers.SGD(), # we can have different optimizers metrics=['accuracy']) # we can measure our performance with many metrics model.fit(X_train, Y_train) # Here we've used softmax activation (which is used in almost every output layer in neural networks nowadays). # But our NN has only one layer which is output layer. Let's make a one hidden layer. model = keras.models.Sequential([ keras.layers.Dense(100, activation='relu', input_shape=(784,)), # notice RELU!!! keras.layers.Dense(10, activation='softmax') # input shape inferred automatically, yaay! ]) model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.sgd(), metrics=['accuracy']) # Did you noticed the relu activation! Let's stop for a bit an show how that function looks on blackboard. model.fit(X_train, Y_train) # Let's use better optimizer! model = keras.models.Sequential([ keras.layers.Dense(100, activation='relu', input_shape=(784,)), keras.layers.Dense(10, activation='softmax') # input shape inferred automatically, yaay! ]) model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), # dać nowy optimajzer? A dam! (hehe) metrics=['accuracy']) model.fit(x_train, y_train, epochs=10) # Woooooow! You've probably achieved score higher than 80%. At that's "only" with change of optimizer. "Adam" is the most widely used type of optimizer. They are all well described here, read it after our workshop! All used optimizers are variation of simple GD. # # http://ruder.io/optimizing-gradient-descent/ # To sum up. We can build a neural network like a lego blocks, set up parameters of every block and easily train it. We can change how we measure our loss, how we train our net, how we measure performance. We can set size of batch and size of epoch. # # There are many many different layers (convolutional, recurrent etc.) and each of them is used for different purpose. # # The official keras site is really great source of examples and documentation of all the parts described today. # # https://keras.io/ # # Let's look at it! # # Read the official tutorial on MNIST with Multi Layer Perceptron, which is just another name for normal feedforward (as opposed to recurrent) fully-connected (dense) neural network with many layers. # # https://github.com/fchollet/keras/blob/master/examples/mnist_mlp.py # # There are many really good examples in this repository, please read them and another parts of the code (how is everything implemented etc.) # ###### Now just experiment and build the network that achieves the best accuracy on MNIST!
17,838
/LinearRegression.ipynb
f65a6891a97de20f9eedc90f4116b00ca2bec635
[]
no_license
felipepasquali/MachineLearning
https://github.com/felipepasquali/MachineLearning
2
0
null
null
null
null
Jupyter Notebook
false
false
.py
148,047
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # PART 1 # This Program is the solution for ex1 of Machine Learning # Perform Linear Regression on one variable dataset using gradient descent import pandas as pd import matplotlib.pyplot as plt import numpy as np # + Data1 = pd.read_csv("data1ex1.txt", header=None,names=['X','Y']) Data1.insert(0,'Ones',1.0) # plt.scatter(Data1.iloc[:,0],Data1.iloc[:,1],marker="x") plt.scatter(Data1['X'],Data1['Y'],marker="x") plt.xlabel('Population of city in 10,000s') plt.ylabel('Profit in $10,000') plt.show() # - # The cost Function for linear Regression is implemented below. It has the form of: # $$ J(\theta) = \frac{1}{2m} \sum_{i=1}^{m}(h_\theta(x^{(i)})-y^{(i)})^2 $$ # # with the hypothesis: $ h_\theta(x) = \theta^Tx = \theta_0 + \theta_1x_1$ def LinRegrCostFunction(th,Y,X): # Cost Function for Linear Regression m = Y.size h = X @ th s = np.sum((h[:]-Y[:])**2) J = (1/(2*m))*s return J # The Gradient descent equation is shown below. # # $$\theta_j := \theta_j - \alpha \frac{1}{m} \sum_{i=1}^{m}(h_\theta(x^{(i)})-y^{(i)})x_j^{(i)} $$ # # The gradient descent works by updating the $\theta_j$ simultaneously for all $j$. $\alpha$ is the learning parameter which can be tuned to achieve convergence. The implementation is shown in the equation below. # def GradientDescent(th,Y,X,iterations=1500,alpha=0.01): #Gradient Descent for 2 variables Linear Regression m = Y.size JHistory = np.zeros(iterations) for i in range(iterations): h = X @ th th = th - (alpha/m)*X.T@(h-Y) JHistory[i] = LinRegrCostFunction(th,Y,X) return th,JHistory # + X = Data1[['Ones','X']].to_numpy() Y = Data1['Y'].to_numpy() m,n = X.shape theta = np.zeros(n) iterations = 1500 alpha = 0.01 # Learning Rate TH,HIST = GradientDescent(theta,Y,X,iterations,alpha) print('Theta:',TH) Xnew = np.linspace(np.amin(X[:,1]),np.amax(X[:,1])) n = Xnew.size Xnew = np.append(np.ones(n),Xnew).reshape((n,2), order='F') Ynew = np.dot(Xnew, TH) plt.plot(Xnew[:,1],Ynew,'r-') plt.scatter(X[:,1],Y,marker="x") plt.xlabel('Population of city in 10,000s') plt.ylabel('Profit in $10,000') plt.show() # - #Plotting the Convergence Data plt.plot(HIST) plt.xlabel('Iterations') plt.ylabel('Cost Fuctions') plt.title('Convergence of Gradient Descent') plt.show() # + # Visualizing the Contour of the Cost Function th0 = np.linspace(-10, 10, 200) th1 = np.linspace(-1, 4, 200) Jvals = np.zeros((th0.size,th1.size)) for i,ii in enumerate(th0): for j,jj in enumerate(th1): thetas = [th0[i],th1[j]] Jvals[i,j] = LinRegrCostFunction(thetas,Y,X) plt.contour(th0,th1,Jvals) plt.plot(TH[0],TH[1],"or") plt.show() # - # The contour plot above shows the contour lines and the red dot shows the minimum point. This is where the function minima is located which corresponds to the $\theta$ # PART 2 # LET'S APPLY THE SAME IDEA TO MULTIPLE VARIABLES # This Program is the solution for ex1 of Machine Learning (optional) # Linear Regression for Multiple Variables using Gradient Descent from mpl_toolkits.mplot3d import Axes3D def LinRegrCostFunction(th,Y,X): # Cost Function for Linear Regression m = Y.size h = (X @ th) - Y[:] J = (1/(2*m))*np.dot(h.T,h) return J def GradientDescent(th,Y,X,iterations=1500,alpha=0.01): #Gradient Descent for multiple variables Linear Regression m = Y.size JHistory = np.zeros(iterations) for i in range(iterations): h = X @ th th = th - (alpha/m)*np.dot(X.T,(h[:]-Y[:])) JHistory[i] = LinRegrCostFunction(th,Y,X) return th,JHistory # + Data2 = pd.read_csv("data2ex1.txt", header=None,names=['size','bedrooms','price']) Data2y = Data2['price'] Data2x = Data2[['size','bedrooms']] Data2xNorm = (Data2x - Data2x.mean()) / Data2x.std() Data2xNorm.insert(0, 'Ones', 1.0) X, Y = Data2xNorm.to_numpy(),Data2y.to_numpy() m,n = X.shape theta = np.zeros(n) iterations = 1500 alpha = 0.01 # Learning Rate TH,HIST = GradientDescent(theta,Y,X,iterations,alpha) print('Theta:',TH) k = 50 Xnew = np.append(np.linspace(np.amin(X[:,1]),np.amax(X[:,1]),num=k),np.linspace(np.amin(X[:,2]),np.amax(X[:,2]),num=k)) Xnew = np.append(np.ones(k),Xnew).reshape((k,n), order='F') Ynew = np.dot(Xnew, TH) print(Xnew.shape,Ynew.shape) # Plot the 3D line over the normalized training data fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot(Xnew[:,1], Xnew[:,2], Ynew, label='Prediction',c='b') ax.scatter(X[:,1], X[:,2], Y, label='Training Set',c='r') ax.legend() plt.show() # - #Plotting the Convergence Data plt.plot(HIST) plt.xlabel('Iterations') plt.ylabel('Cost Fuctions') plt.title('Convergence of Gradient Descent') plt.show()
4,961
/Step3_03.ipynb
7b5eda545ce57538ff2851ac783a6cdce22f4e85
[]
no_license
yusya23/DL_123
https://github.com/yusya23/DL_123
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
73,219
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Step3 演習03 Kerasチュートリアル # --- # 本演習は、Pythonで書かれた深層学習のフレームワーク Keras を使ったチュートリアルです。 # # Kerasを使い手書き数字の画像 MNISTの多クラス分類をおこないます。 # # **はじめに** # - for文やwhile文の利用は明示的な利用指示がない場所での利用は避けてください。 # # **本演習の目的** # - Kerasを使った基本的な記述手順を身につける。 # - Kerasを使ってMNISTの多クラス分類の実装してみる。 # **【課題1】** 採点を実行してください。 #Coursedele-02 Step3 QuestionNumber5 5e6e4c3e443950e8e029a9aa1f402f72 print("この課題の採点は全員正解とします。") # ** ファイルを保存後 **、次のセルを実行(Shift+Enter)で採点を行います。 # + language="bash" # ./validation_client.py dele-02 3 5 Step3_03.ipynb api.internal.zero2one.jp # - # ## ライブラリのインポート # # まずはじめに、本演習で利用するライブラリのインポートをします。ライブラリのインポートは作業を行うときには一番はじめに行うようにしましょう。 # # - [numpy](http://www.numpy.org) 数値計算を行うための基本パッケージの公式ドキュメント # - [matplotlib](http://matplotlib.org) グラフ描画ライブラリの基本パッケージの公式ドキュメント # - [Keras](https://keras.io/ja/) Pythonの深層学習ライブラリKerasの公式ドキュメント #ライブラリのインポート # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import keras keras.__version__ # ## データセット # この演習では "MNIST" と呼ばれる手書き数字のデータを用います。 # # 28ピクセル×28ピクセルのグレースケール画像が学習用データとして55,000枚、テスト用データとして10,000枚含まれています。今回はtensorflowで用意されている関数からデータを読み込みます。 # # read_data_setsの引数は # 1. ファイルの保存場所 # 2. one_hot: one-hotベクトル化するかどうか # 3. validation_size: 検証用データの枚数、今回は訓練データとテストデータのみ使用します # 4. source_url: データセットのダウンロード元 # # 使用しているネットワークの強さによりロードに多少時間がかかる場合があります。 from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # ダウンロードしたデータセットを**訓練データ**と**テストデータ**にそれぞれ定義しておきます。 # + X_train = mnist.train.images y_train = mnist.train.labels X_test = mnist.test.images y_test = mnist.test.labels print("X_train:訓練データ") print("y_train:訓練データの正解ラベル") print("X_test:テストデータ") print("y_test:テストデータの正解ラベル") # - # 訓練データとテストデータのshapeを確認します。 print("訓練データ数:{}".format(X_train.shape[0])) print("訓練データ数の特徴(ピクセル):{}".format(X_train.shape[1])) print("テストデータ数:{}".format(X_test.shape[0])) print("テストデータ数の特徴(ピクセル):{}".format(X_test.shape[1])) # "X_train:訓練データ"は、学習用データの55000枚の画像データを行列にしたもの(28×28=784)として格納されています。 # # 同様に、"X_test:テストデータ"は、テスト用データの10000枚の画像データを行列にしたもの(28×28=784)として格納されています。 # ### データの視覚化 # ここで学習データの画像をランダムに表示させてみます。セルを実行するたびに画像が変わります。 num = np.random.randint(0, len(mnist.train.images)) plt.imshow(mnist.train.images[num].reshape(28, 28), cmap='gray') print("数字:{}".format(np.argmax(mnist.train.labels[num]))) # 次に正解ラベルを見てみます。正解ラベルとなる y_train のshapeは(55000,10)です。次のセルを実行して訓練データとテストデータの正解ラベルのデータの性質を確認します。 print("訓練データの正解ラベル数:{}".format(y_train.shape[0])) print("訓練データ数の正解ラベルの配列数:{}".format(y_train.shape[1])) print("テストデータの正解ラベル数:{}".format(y_test.shape[0])) print("テストデータ数の正解ラベルの配列数:{}".format(y_test.shape[1])) # 訓練データの場合、正解ラベル数は55000となっており、それぞれに対して答えとなるラベル部分には「1」が与えられています。下のセルを実行してみましょう。 y_train[:5,:] # 例えば、数字の0に対応する正解ラベルは配列の0番目に1がはいっています。数字の1に対応するラベルには配列の1番目に1がはいっています。3以降も同様のルールです。下の画像はこれらのルールを可視化したものになります。 # <p> # # <div align="center" style="width:50%"> # <img src="./img/step3_mnist_label_019.png"></div> # ### データセットのシャッフル # MNISTのデータセットをシャッフルします。データセットをそのまま使わずにシャッフルするのはなぜでしょうか。例えば、データがラベル(y)に対してソートされていたとします。すると、そのまま冒頭3000サンプルを取って学習に使うと、0, 1と書いてあるデータばかりが集まってしまうことになってしまいます。一見分からないような規則でソートされている可能性もあるので、データセットの順番がバラバラに見えても必ず最初にシャッフルしましょう。 #データセットのシャッフル permutation = np.random.permutation(mnist.train.images.shape[0]) X_train = mnist.train.images[permutation] y_train = mnist.train.labels[permutation] # ### データセットの正規化 # データセット全体が平均0.0,分散1.0となるように正規化します。 mean = np.mean(X_train) std = np.std(X_train) X_train = (X_train - mean) / std X_test = (X_test - mean) / std # ## ニューラルネットワークモデル # 入力層のユニット数は特徴の数である784個、出力層は0~9の数字の10クラス、隱れ層のユニット数は100個とします。 # <br> # 今回は下に示すネットワークのモデルを構築します。 # # - 入力層のユニット数784個 # - 隠れ層のユニット数100個 # - 出力層のユニット数10個 # # # **モデル**: # <div style="width:40%"> # <img src="./img/step3_mlp_784-10.png"><div> # ## モデルの実装 # データセットの準備ができたので実際にKerasによるニューラルネットワークの実装を行いましょう。 # # Kerasの基本的なデータ構造はmodelでネットワークを構成します。モデルは keras.models.Sequential クラスを使って構築します。 #KerasのSequentialをインポート from keras.models import Sequential model = Sequential() # ネットワークの層を追加するには** .add()メソッド **を利用します。model.add()で層を追加することができ、Denseで主に下記の引数を指定します。 # # モデルを構築するには下記のように記述します。 # # model.add(Dense(引数を指定)) # # ```py # keras.layers.Dense(units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', # bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, # activity_regularizer=None, kernel_constraint=None, bias_constraint=None) # ``` # # - units:正の整数で出力の次元数を指定 # - activation:活性化関数 # - use_bias:バイアス$b$(デフォルトは"True") # - kernel_initializer:重みの初期化方法 # - bias_initializer: バイアスベクトルの初期化方法 # - kernel_regularizer:重み行列に適用される正則化関数 # # ** Denseが実行する操作 ** # # ニューラルネットワークの一般形の演算を行います。具体的には次の通りです。 # # $y = f(w^{\top}x + b)$ # # 各関数のドキュメントは次の通りです。 # # - [Keras:Dense](https://keras.io/ja/layers/core/) 全結合ニューラルネットワーク # - [Keras:Activation](https://keras.io/ja/activations/) 使用する活性化関数 # - [Keras:kernel_regularizer](https://keras.io/ja/regularizers/) 正則化 # # **【確認1】** 適切な 関数・変数・値 などにNoneを書き換えてください。 # + # DenseとActivationをインポートする from keras.layers import Dense, Activation #インスタンスを作成 model = Sequential() #一番初めののレイヤーではinput_shapeで、入力するデータの次元を与えます model.add(Dense(units=100, input_shape=(784,))) model.add(Activation('relu')) model.add(Dense(units=10)) model.add(Activation('softmax')) # - # ### コスト関数と最適化手法の決定 # 次にコスト関数と最適化アルゴリズムを定義します。 # # ** model.summary() ** と記述することで実装したモデルの構成を表で表示してくれます。<br> # ** model.compile() **でコスト関数や最適化手法を決定をします。 # # compileでは下記の3つの引数を指定します。 # # - loss(損失関数) # - optimizer(最適化アルゴリズム、rmspropやadagradなどを指定) # - metrics(評価関数) # # それぞれ指定します。下のセルを参考にしてください。 # # 各関数のドキュメントは次の通りです。 # - [Keras:summary](https://keras.io/ja/models/about-keras-models/) # - [Keras:compile](https://keras.io/ja/getting-started/sequential-model-guide/) # - [Keras:optimizer](https://keras.io/ja/optimizers/) model.summary() model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) # ### モデルの学習 # 構築したモデルの学習には** model.fit() **を使います。学習データ、バッチサイズ、エポック数、ログの出力、バリデーションデータの設定をします。 # # 下記に主な引数を示します。 # # - x:学習に使用する訓練データ # - y:学習に使用する正解ラベル # - batch_size:バッチサイズ # - epochs:エポック数 # - verbose:学習中のログ(0:標準出力にログを出力しません、1:プログレスバーで出力、2:エポックごとに1行のログを出力) # - validation_data:各エポックごとに利用するモデルを評価する # # 関数のドキュメント # # - [Keras:fit](https://keras.io/ja/models/model/) # 下のセルを実行すると学習がはじまります。 history = model.fit(X_train, y_train, # 訓練データ batch_size=100, #バッチサイズ epochs=10, # エポック数の指定 verbose=1, # ログ出力の指定. 0だとログが出ない validation_data=(X_test, y_test) # テストデータ ) # ### モデルの評価 # 学習後のモデルに対してテストデータで評価を行うには** model.evaluate() **を使います。訓練データ、正解ラベル、バッチサイズを渡します。 # # - x:評価に使用するテストデータ # - y:評価に使用する正解ラベル # - batch_size:バッチサイズ # - verbose:学習中のログ(0:表示しない、1:表示する) # # 関数のドキュメント # # - [Keras:evaluate](https://keras.io/ja/models/sequential/) score = model.evaluate(X_test, y_test, verbose=1) print('Test loss:', score[0]) print('Test accuracy:', score[1]) # ### モデルの誤差 # 最後に訓練データとテストデータにおける学習中のコスト関数の減少を確認し、モデルの誤差を評価します。 plt.plot(history.history['loss'], label='train') plt.plot(history.history['val_loss'], label='test') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend() # ## おつかれさまでした。 # Kerasを使ったニューラルネットワークの実装おつかれさまでした。 # # Kerasでは、model.add と記述し層を積み上げるだけで簡単にニューラルネットワークのモデルの構築、実装、評価を行うことができました。
7,638
/pandas/multicolumn_ranking.ipynb
9e218c4558bf2673d0b48f688d06f2aad0b93abe
[]
no_license
adkein/tutorials
https://github.com/adkein/tutorials
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
20,149
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import numpy as np import pandas as pd # + # Initialize a DataFrame with some metrics and some randomly generated data for them. metrics = ['metric_{}'.format(c) for c in 'ABCD'] data = pd.DataFrame( np.random.randint(0, 10, size=(1000, len(metrics))), columns=metrics, ) # - data.head() # + # Add a "user" column and promote it to index. users = ['Alvin', 'Simon', 'Theodore', 'Larry', 'Curly', 'Moe', 'Tom', 'Jerry'] data['user'] = np.random.choice(users, size=(len(data))) data = data.set_index('user') # - data.head() # + # Sum up all the metrics simultaneously, grouped by user. sums = data.groupby(level='user').sum() sums # + # For each metric, show the one user with the highest count. sums.apply(lambda c: c.argmax()) # + # For each metric simultaneously, rank the users by count for the metric. ranking = sums.apply(lambda c: c.sort_values().index) ranking # + # The "user" index left over is useless now. Let's reset it to a more useful integer index. ranking = ranking.reset_index(drop=True) ranking # + # Of course you can just focus on the top however-many. ranking.head(3).T # -
1,392
/media/seminar/nacsi-rl/5_basic_tensorflow.ipynb
9ca7fda33d482d742d6f504155828cbf3a6b43dd
[]
no_license
gritmind/review-paper
https://github.com/gritmind/review-paper
5
1
null
null
null
null
Jupyter Notebook
false
false
.py
8,516
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] deletable=true editable=true # # TensorFlow Tutorial # - # 2015년 12월 오픈소스로 공개된 TensorFlow, 속칭 tf는 Google의 주도로 개발된 기계학습(딥러닝) 라이브러리이다. Multi-GPU 환경에서도 동작가능한 아키텍처 기반의 TensorFlow는 Theano의 Computation Graph 방법을 채택하여 automatic gradient differentiation이 가능하다. # # TensorFlow는 CPU모드, GPU, 그리고 Multi-GPU 환경에서 모두 사용가능하며, 직관적이고 쉬운 문법으로 인한 높은 접근성, 구글의 강력한 지원하에 이루어지는 빠른 업데이트, 그리고 큰범위의 개발자생태계가 존재하여 쉽게 Trouble Shooting이 가능하다는 큰 장점을 갖고있다. # # # #### 관련 사이트 # TensorFlow 공식 홈페이지 : http://tensorflow.org/ # <br>TensorFlow Github : https://github.com/tensorflow/tensorflow/releases # <br>TensorFlow Contributors : https://github.com/tensorflow/tensorflow/graphs/contributors # <br>TensorFlow Playground : http://playground.tensorflow.org/ # <img src = "computation graph.png" width=600 > # https://delftswa.gitbooks.io/desosa2016/content/tensorflow/chapter.html # ### TensorFlow의 핵심 원리 # TensorFlow의 중요한 핵심 개념인 Computation Graph와 Session에 대해 알아보자. # Computation Graph란 tf에서 실행되는 모든 알고리즘 혹은 모델을 일련의 node와 edge들의 연결로 하나의 graph 로 나타내는 것이라고 할 수 있다. # # Computation Graph 기반의 프레임워크가 가진 장점은 모델 구현의 유연성이 크고, 자동화된 미분(Automatic Differentiation) 계산이 가능하다는 점이 있다. # # Session이란 한마디로 Computation Graph가 실행되는 환경 혹은 자원을 의미한다. 일반적으로 딥러닝 연산에는 GPU를 활용하게 되는데, 여기에서 확보가능한 메모리 및 프로세서 자원을 추상화시켜 Session이라고 표현할 수 있다. # # 정리하면 Computation Graph는 알고리즘 또는 딥러닝 모델, Session은 모델이 실행되는 GPU환경 이라고 할 수 있다. # # 그러면 tf의 기본적인 변수 선언 방법 및 연산 사용 방법을 알아보자. # + deletable=true editable=true import tensorflow as tf #configuration setting for gpu usage control config = tf.ConfigProto() config.gpu_options.allow_growth = True # + [markdown] deletable=true editable=true # # Simple matrix multiplication # + [markdown] deletable=true editable=true # 행렬을 정의하고 행렬곱을 수행하는 Computation Graph를 tensorflow로 구성한다. # tf.Session로 구성하고 sess.run으로 작동시키면 병렬적으로 모든 결과값을 처리 후 결과를 반환한다. # + deletable=true editable=true mat1 = tf.constant([[3., 3.], [4.,4.]]) # 2x2 matrix mat2 = tf.constant([[2.,],[2.]])#2x1 matrix product = tf.matmul(mat1, mat2) #1 sess = tf.Session(config=config) result = sess.run(product) # to get result print(result) sess.close() # + [markdown] deletable=true editable=true # # InteractiveSeesion # + [markdown] deletable=true editable=true # tf.InteractiveSession을 사용하게 될 경우 기존 tf.Session 에서 매번 오브젝트를 참조할 수고를 덜어준다. # # ex) # <br>**Session 사용 시** ## 깔끔한 코딩을 위해 session을 사용하자. # <pre> # t = tf.constant(42.0) # sess = tf.Session() # with sess.as_default(): # or `with sess:` to close on exit # assert sess is tf.get_default_session() # assert t.eval() == sess.run(t) # </pre> # # **InteractiveSession 사용 시** # <pre> # sess = tf.InteractiveSession() # t = tf.constant(42.0) # u = tf.constant(37.0) # tu = tf.mul(t, u) # ut = tf.mul(u, t) # with sess.as_default(): # tu.eval() # runs one step # </pre> # # 쉽게 생각하여 예를들면 for와 while 문과 같은 관계(?)라고 어느정도 예상 할 수 있다. # <br>(reference) http://stackoverflow.com/questions/33610685/in-tensorflow-what-is-the-difference-between-session-run-and-tensor-eval # + deletable=true editable=true sess = tf.InteractiveSession("",config=config) x = tf.Variable([1.0, 2.0]) #variable need initialization a = tf.constant([3.0, 3.0]) #constant no initiazliation #1 x.initializer.run() add_up = tf.add(x, a) result = add_up.eval() # to get result print(result) sess.close() # + [markdown] deletable=true editable=true # # Using variables # + [markdown] deletable=true editable=true # 값을 저장할 수 있는 컨테이너 역할을 하는 Variable. # <br>하지만 사용하기 위해서는 initialize가 필요하다. # <br>아래 예제에서 counter variable이 run(update)를 할때마다 1씩 증가하는것을 볼 수 있다. # + deletable=true editable=true counter = tf.Variable(0, name="counter") # tf.Variable(<initial-value>, name=<optional-name>) one = tf.constant(1) # constant 1 upcount = tf.add(counter, one) # add update = tf.assign(counter, upcount) # counter = upcount init_op = tf.initialize_all_variables() #initializer with tf.Session(config=config) as sess: #start with tf.Session() sess.run(init_op) #run (init_op) print(sess.run(counter)) #run (state) for _ in range(3): sess.run(update) #run(update) print(sess.run(counter)) # print after update sess.close() # + [markdown] deletable=true editable=true # # Using feed # + [markdown] deletable=true editable=true # tf.placeholder는 우리가 원하는 데이터를 Computation Graph에 입력시켜주는 역할을 하는 변수이다. 즉 input 을 받기 위한 변수라고 생각할 수 있다. # # Session이 수행될 때 해당 placeholder에 값을 입력해주는 방법으로 feed_dict 방법이 있다. # # 아래 예제에서는 input1과 input2에 값을 입력해주고 output을 fetch한다. # + deletable=true editable=true input1 = tf.placeholder(tf.float32) # recieve 1x1 matrix with type float32 input2 = tf.placeholder(tf.float32) # recieve 1x1 matrix with type float32 output = tf.add(input1, input2) with tf.Session(config=config) as sess: print "lucky" #1 print(sess.run([output], feed_dict={input1:[5.], input2:[2.]})) sess.close()
5,110
/python/PythonFundamentos-master/Cap03/Notebooks/DSA-Python-Cap03-03-While.ipynb
37b395e0f2d077cfc5a86f5f487faa3d3e0417ab
[]
no_license
eduardotrein/leonesto
https://github.com/eduardotrein/leonesto
0
0
null
2019-11-23T00:25:36
2019-11-23T00:14:22
null
Jupyter Notebook
false
false
.py
5,087
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 3</font> # # ## Download: http://github.com/dsacademybr # ### While # Usando o loop while para imprimir os valores de 0 a 9 counter = 0 while counter < 10: print(counter) counter = counter + 1 # + # Também é possível usar a claúsula else para encerrar o loop while x = 0 while x < 10: print ('O valor de x nesta iteração é: ', x) print (' x ainda é menor que 10, somando 1 a x') x += 1 else: print ('Loop concluído!') # - # ### Pass, Break, Continue counter = 0 while counter < 100: if counter == 4: break else: pass print(counter) counter = counter + 1 for verificador in "Python": if verificador == "h": continue print(verificador) # ### While e For juntos for i in range(2,30): j = 2 counter = 0 while j < i: if i % j == 0: counter = 1 j = j + 1 else: j = j + 1 if counter == 0: print(str(i) + " é um número primo") counter = 0 else: counter = 0 # # Fim # ### Obrigado - Data Science Academy - <a href="http://facebook.com/dsacademybr">facebook.com/dsacademybr</a>
1,499
/shiny_files/points_boundary.ipynb
369e671db3baf09a7791d7879617ab51c1f3be4d
[]
no_license
dingyif/Embolism_Detection
https://github.com/dingyif/Embolism_Detection
1
0
null
null
null
null
Jupyter Notebook
false
false
.py
977,973
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Pratical Data Science Tutorial Project # # pybedtools Tutorial # ## Introduction # # With the development of technology, biology is has been greatly affected by this change, allowing us to use computational models and techniques to further advance the field. One area of the field that is influenced by this change is genomics, which is the study of the collective interaction of the DNA and genes of an organism on a broader level. This is not to be confused with genetics, which looks at the effects of indivudal genes. # # `bedtools` is a revolutionary shell scripting toolset which allows for genome arthimatic and calculations. On their website, they refer it as a *"swiss-army knife of tools for a wide-range of genomics analysis tasks"*. This package is used by many from various disciplines for quick and efficient analysis. # # `pybedtools` is essentially a wrapper for `bedtools` in *Python*. You might be wondering now, *"`bedtools` is powerful, efficient, and easy to use using command line and `bash`. Why do we even need a Python version?"* Well, some might agree with this statement. However, that is not entirely the case. `pybedtools` allows for seemless integration of the `bedtools` toolset to Python, which is a language that many poeple know and understand well. This allows users to not know the intricacies of other shell scripting and programming language, such as `Perl`, `bash`, `awk`, which needed for more complicated uses of the package. Even better, run time is conserved for both `pybedtools` and `bedtools`. AWESOME!!! # # This tutorial will overview the basic, but important, methods and examples of genome analysis using the various functions of `pybedtools`. # ## Installing the Libraries # # In order to use `pybedtools`, `bedtools` must also be installed locally on the computer. There are many methods to install `bedtools`. Installing directly from GitHub is a popular and recommended option: https://github.com/arq5x/bedtools2. # # However, instructions for other methods can also be found here: http://bedtools.readthedocs.io/en/latest/content/installation.html. # # To install `pybedtools`, `pip` can be used: # + active="" # pip install pybedtools # - # To make sure the packages are installed properly, make sure the following command work properly: import pybedtools # ## Loading the Sample Data # # To describe the basic `bedtools` functions, we will be using sample data provided by the `pybedtools` library. The file format that we will use initially is called `bed`. This file type is used to store instances of genome sequences from chromosomes of humans, mice, rats, etc. # # At the very least, these files store intervals of genome sequences that have three fields: the chromosome number (*chr#*), the start position of the feature, and the end position of the feature. However, more features can be stored, including the name of the feature, the score, and the direction of the strand. For more information, please visit: https://useast.ensembl.org/info/website/upload/bed.html # # The following shows how to extract the example files, and how the `bed` sample data is formatted. We use the function, `example_bedtool`, because the file is an example stored within the library; however, to access files that are not in the library, the function `BedTool` suffices. a = pybedtools.example_bedtool('a.bed') b = pybedtools.example_bedtool('b.bed') print(type(a)) print(a) print(b) print(b[0]) # We see here that there are three fields in addition to the three required fields, which are described above. Even though the output is a `BedTool` object, we can call extract each line individual for element extraction (for parsing through files). # # There are also other file types that many of the functions that `bedtools` and `pybedtools` can be use, including `vcf` and `fasta`. However, for the purposes of this tutorial, `bed` files will be used for the function examples. # ## Essential `bedtools` Functions and Examples # # In this section, we will discuss many important `bedtools` functions, their implementation in `pybedtools`, and some examples. # # Note: For this section, file `A` refers to `a.bed` and file `B` refers to `b.bed`. # ### Intersect # # `Intersect` is the epitome and one of the most powerful functions of `bedtools`. Given two `bed` files, it outputs all the regions where the features overlap. a_with_b = a.intersect(b, u = True) a_with_not_b = a.intersect(b) print(a_with_b) print(a_with_not_b) # We see that the value of the `u` parameter is important. When `u` is `True`, it outputs the intersection of `A` with respect to `B`. Thus, we see all the values of `A`, but only those that are included in to some extent in `B`. For more clarification, `(100,200)`, the first data point, is not in the true intersection between `A` and `B`, as it should be `(155,200)`. But, since this range in `A` is a part of `B`, it includes all of `A`. However, it does not include `(1,100)`, because none of those values are within any ranges in `B`. On the other hand, when `u` is not defined (`False`), the result is the true intersection between `A` and `B`. # ### Merge # # Merge allows us to merge any overlapping regions within a file. We introduce the argument `s`, which is common for most functions. `s` determines whether to take into account the specific strand direction, and evaluates the function seperating the strands. Hence, this is why we see `(150,500)` when `s = True`. a_merge = a.merge() a_merge_strand = a.merge(s = True) print(a_merge) print(a_merge_strand) # ### Subtract # # Given `A` and `B`, `subtract` allows us to remove any sequences of `B` in `A`. An additional example is also provided to demonstrate the `s` parameter. a_subtract_b = a.subtract(b) a_subtract_b_strand = a.subtract(b, s = True) print(a_subtract_b) print(a_subtract_b_strand) # ### Complement # # `Complement` takes the opposite intervals from the input file. # # The `genome` parameter is necessary for this function. The genome field specifies the genome species and version. For example, `hg38`: `hg` refers to the human genome, and `38` is the human reference genome number. Another example is `mm10`, which refers to the `mice` reference genome number `10`. Below, we see that the there are differences in the output for the three genomes, even between the two human genomes. This is because the genomes are different between animals and are constantly updated. Thus, this field is important when working with functions that require information that is not provided through the current file. a_complement_hg38 = a.complement(genome = "hg38") a_complement_hg19 = a.complement(genome = "hg19") a_complement_mm = a.complement(genome = "mm10") a_complement_hg38.head(5) print("\n") a_complement_hg19.head(5) print("\n") a_complement_mm.head(5) # ### Closest # # Given `A` and `B`, `closest` returns the nearest features of `B` to `A`. Below is an example from the sample data. a_closest_b = a.closest(b, s = True, d = True) print(a_closest_b) # The `d` parameter (specific to `closest`) introduces another column to the output which calculates the distance from the original feature to the nearest feature. This can be helpful in filtering out sequences that are too either close or too far from each other. # ### Sort # # `Sort` is an important tool because it can sort input file based on many arguments, which are but not limited to: # * No arguments = sort by starting position # * `-size D` = sort by descending length # * `-size A` = sort by ascending length # * `-chrThenSizeD` = sort by chromosome than by descending length # * `-chrThenSizeA` = sort by chromosome than by ascending length # * `-faidx (names.txt)` = sort chromsomes by a file with the name `names.txt` # # One thing to note is that chromsome storing is lexicographical. In order to bypass this, the last argument above can be used. As `A` and `B` are both sorted, we will use the `a_complement_hg38` stored previously to demonstrate this function below. a_complement_hg38.sort().head(5) print("\n") a_complement_hg38.sort(sizeD = True).head(5) print("\n") a_complement_hg38.sort(sizeA = True).head(5) print("\n") a_complement_hg38.sort(chrThenSizeD = True).head(5) # ### Slop # # Given an input file `A`, `slop` allows us to increase the read range of the algorithm by a certain value. Below is an example from the sample data, where we increase the reads in each direction by a value of `100`. a_extended_hg = a.slop(b=100, genome='hg19') #The genome parameter is required. print(a_extended_hg) # ### Genomecov # # Another function is `genomecov`, which computes the frequency of each interval in the given file. a_gencov = a.genome_coverage(bg = True, genome ='hg19') #The genome parameter is required. a_gencov.head() # ## The Genome File Types and Conversions # # One of the most annoying, yet common, theme about genomes file is that there are many different file types to store information. Though, these file types are useful for their specific purposes, there are many times where it is needed to convert between file types. This section describes other common file types, outside of `bed`, which are important. # ### VCF # # VCF is an text file type that is commonly used to store variant/mutation information. Each line stores the chromosome number, position, the reference allele, the alternate allels, the quality of the reads. For more information: http://www.internationalgenome.org/wiki/Analysis/Variant%20Call%20Format/vcf-variant-call-format-version-40/. v = pybedtools.example_bedtool('v.vcf') print("#CHROM POS ID REF ALT QUAL FILTER INFO") # Just useful to look at the structure of the VCF file print(v) # ### FASTA # # The `fasta` file type, which stores the actual sequence of the interval in nucleotide base pairs (`A`,`T`,`G`,`C`). Below is an example of how we extract the `fasta` file from a `bed` file using the function `seqence`, which is the wrapper for the `bedtools` function `getfasta`. We use the argument, `fi`, to specify the input `fasta` file. fasta = pybedtools.example_filename('test.fa') a_fasta = a.sequence(fi=fasta) print(open(a_fasta.seqfn).read()) # Each new sequence is started with `>`, followed by a header, which is seperated by `:`. Here, we see that the chromosome number and the sequence information is stored. The `strand` field is important (shown below) because the sequence of amino acids is opposite for the `+` and `-` strands (the nucleotide bases `A` binds with `T` and `G` binds with `C`). Below, force strandness does output the opposite result for the interval between 150 and 500. a_fasta_strand = a.sequence(fi=fasta, s = True) print(open(a_fasta_strand.seqfn).read()) # ## Example Application: Promoters affected by SNPs # # Now, we are ready to do some heavy genome arthimatic and use the full force if `pybedtools`! In this final section, we will use what we have learned, and then some, to find some cool results. # # Our goal is figure out the concentration of SNPs (single nucleotide polymorphisms/mutations) that are near DNA promotor binding sites. Even one amino acid mutation can change the affinity of the protein binding to the bind site, modifying this entire pathway. Let us see what happens! # ### Loading the Data # # For this example, download the files from this folder to the same directory as this notebook: https://drive.google.com/drive/folders/1zs2YkDd--BR2PpeDAX4WkPl3Gu-1vi-o?usp=sharing from pybedtools import BedTool chromHmm = BedTool('hesc.chromHmm.bed') promoter = BedTool('RefGene_hg38_wg_reg.bed') exons = BedTool('hg38_exons.bed') vcf = BedTool('test1_truth.vcf') # The *`chromHmm`* file is the genome split into regions that describe what each regions does. The *`promotor`* and *`exon`* files is the location of all promotors and exons in the genome. The *`vcf`* file is the mutations at each location. Below is some example output to show the structure of these files. # # Note: The *`vcf`* file contains only sample mutations. The actual files are too large for our purposes. chromHmm.head(3) promoter.head(3) exons.head(3) vcf.head(3) # ### Subtract the Exons from the Promotor Sequences # # At an extremly high level, DNA is transcripted and translated to RNA to create protein. In order for this process to start, a protein needs to bind to the DNA for transcription to start. This binding site on the DNA is called a ***promoter***. Because this binding site is not necessary for created protein, during the processing stage before translation, non-coding regions (introns) are removed and coding regions (***exons***) are preserved. # # Because the promoter regions are difficult to define without additional informations, we will define it as the regions that are `-500` upstream base pairs from the transcriptional start site (*r = 500, l = 0*). Thus, we can have accurate results when we intersect it with the ***chromHmm*** file. Then, we keep the intervals feature that are promotors. exons_sorted = exons.sort() exons_sorted_merge = exons_sorted.merge() exons_sorted_merge.head(3) promoter = promoter.sort() promoter_slop = promoter.slop(r = 500, l = 0, s = True, genome='hg38') chromHmm_promoter = chromHmm.intersect(promoter_slop).sort() chromHmm_promoter_noexons = chromHmm_promoter.subtract(exons, s = True) chromHmm_promoter_noexons.head(3) # + # Take only the intervals that are promoters def seperate_promoters(bed_lines): lines = [] for bed in bed_lines: line = bed.fields if (line[3].find("Promoter") != -1): new_line = "\t".join(line) if new_line not in lines: lines.append("\t".join(line)) return lines seperate_result = seperate_promoters(chromHmm_promoter_noexons) chromHmm_promoter_noexons_filtered = pybedtools.BedTool("\n".join(seperate_result), from_string = True) chromHmm_promoter_noexons_filtered.head(5) # - # ### Seperate Categorize Mutations # # Below, we seperate the variants in ***vcf*** file into different mutation types. # + # Header is required for VCF files header = """##fileformat=VCFv4.0 ##fileDate=Jun 29, 2010 ##INFO=<ID=NS,Number=1,Type=Integer> ##INFO=<ID=DP,Number=1,Type=Integer> ##INFO=<ID=AC,Number=1,Type=Integer> ##INFO=<ID=AN,Number=1,Type=Integer> ##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype"> ##FORMAT=<ID=VR,Number=1,Type=Integer,Description="Number of variant reads"> ##FORMAT=<ID=DP,Number=1,Type=Integer,Description="Read Depth"> ##FORMAT=<ID=FT,Number=.,Type=String,Description="Filter"> ##FILTER=<ID=atlas> #CHROM POS ID REF ALT QUAL FILTER INFO""" #Check if the variant type is a repeat (duplication and repeats) def check_repeat(ref, alt): if (len(alt) % len(ref) != 0): return (False, 0) for base in range(len(alt)): if (ref[base % len(ref)] != alt[base]): return (False, 0) return (True, len(alt)/len(ref)) #Check if the variant type is a deletion and insertion def check_delins(ref, alt): if (len(ref) > len(alt)): a,b = ref,alt else: a,b = alt,ref for base in range(len(b)): if (b[base] != a[base]): return True return False #Seperate the variants by mutation type def seperate_by_type(vcf_input_lines): result = {"SNP":[],"DEL":[],"INS":[],"DELINS":[],"DUP":[],"REP":[]} #Seperate the variants line_num = 0 try: while True: line = vcf_input_lines[line_num] chrm,ref,alts = line[0],line[3],line[4].split(",") #VCF files have multiple variants for alt in alts: line[0] = "chr" + chrm line[4] = alt vcf_line = "\t".join(line) dup,num = check_repeat(ref,alt) #Check for duplication if (dup == True): #Repetition or duplication exists if (num == 2): #Duplication result[4].append(vcf_line) else: #Repetition result[5].append(vcf_line) elif (len(ref) == len(alt) == 1): #SNV result["SNP"].append(vcf_line) elif (check_delins(ref,alt) == True): #Deletion and insertion result["DELINS"].append(vcf_line) elif (len(ref) > len(alt)): #Deletion result["DEL"].append(vcf_line) elif (len(ref) < len(alt)): #Insertion result["INS"].append(vcf_line) else: #Unknown mutation and is "skipped" pass line_num += 1 except: return result vcf_result = seperate_by_type(vcf) # - # ### Converting to `VCF` and `BED` Format # # Now, as we only concerned with specifc mutations, let us convert the dictionary into `VCF` files, so we can use the `pybedtools` function. Just to make sure our algorithm is correct, we output the results below. SNP = header + "\n" + "\n".join(vcf_result["SNP"]) SNP_VCF = pybedtools.BedTool(SNP, from_string = True) SNP_VCF.head(3) DEL = header + "\n" + "\n".join(vcf_result["DEL"]) DEL_VCF = pybedtools.BedTool(DEL, from_string = True) DEL_VCF.head(3) INS = header + "\n" + "\n".join(vcf_result["INS"]) INS_VCF = pybedtools.BedTool(INS, from_string = True) INS_VCF.head(3) DELINS = header + "\n" + "\n".join(vcf_result["DELINS"]) DELINS_VCF = pybedtools.BedTool(DELINS, from_string = True) DELINS_VCF.head(3) # ### Find the SNPs that Affect Each Promotor # # Final step! Let us find the intervals of the promotors that interesect with the SNPs. promoter_intersect_snp = chromHmm_promoter_noexons_filtered.intersect(SNP_VCF, u = True) promoter_intersect_snp.head(10) promoter_intersect_snp_range = chromHmm_promoter_noexons_filtered.intersect(SNP_VCF, u = True) promoter_intersect_snp_range.head(5) # If we look at the first active promoter in the UCSC Genome Browser (https://genome.ucsc.edu/cgi-bin/hgGateway, entry: *"chr1 40723813 40724413"*), we do see that there is a promoter region for NFYC! # # (Actual Link: https://genome.ucsc.edu/cgi-bin/hgTracks?db=hg38&lastVirtModeType=default&lastVirtModeExtraState=&virtModeType=default&virtMode=0&nonVirtPosition=&position=chr1%3A40723814%2D40724413&hgsid=662259431_1MwbUH4ZdXEHXFMFUjJ8AmaRLKWH) # ## Summary, Further Exploration, and References # # This tutorial gives only a small glimpse of the power of `bedtools` and `pybedtools`. Here are some further links and references: # # * http://bedtools.readthedocs.io/en/latest/index.html # * https://daler.github.io/pybedtools/ # * http://pedagogix-tagc.univ-mrs.fr/courses/jgb53d-bd-prog/practicals/01_bedtools/index.html # * http://quinlanlab.org/tutorials/bedtools/bedtools.html # # Data From: # * ftp://ftp.ncbi.nih.gov/snp/organisms/human_9606/VCF/ # * https://genome.ucsc.edu/cgi-bin/hgTables # * http://rohsdb.cmb.usc.edu/GBshape/cgi-bin/hgFileUi?db=hg19&g=wgEncodeBroadHmm # "The sin result of pi_tensor: ", sin) # - # The resultant tensor <code>sin</code> contains the result of the <code>sin</code> function applied to each element in the <code>pi_tensor</code>.<br> # This is different from the previous methods. For <code><i>tensor_obj</i>.mean()</code>, <code><i>tensor_obj</i>.std()</code>, <code><i>tensor_obj</i>.max()</code>, and <code><i>tensor_obj</i>.min()</code>, the result is a tensor with only one number because these are aggregate methods.<br> # However, the <code>torch.sin()</code> is not. Therefore, the resultant tensors have the same length as the input tensor. # # <!--Empty Space for separating topics--> # # <h3>Create Tensor by <code>torch.linspace()</code></h3> # # A useful function for plotting mathematical functions is <code>torch.linspace()</code>. <code>torch.linspace()</code> returns evenly spaced numbers over a specified interval. You specify the starting point of the sequence and the ending point of the sequence. The parameter <code>steps</code> indicates the number of samples to generate. Now, you'll work with <code>steps = 5</code>. # # + jupyter={"outputs_hidden": false} # First try on using linspace to create tensor len_5_tensor = torch.linspace(-2, 2, steps = 5) print ("First Try on linspace", len_5_tensor) # - # <!--Empty Space for separating topics--> # # Assign <code>steps</code> with 9: # # + jupyter={"outputs_hidden": false} # Second try on using linspace to create tensor len_9_tensor = torch.linspace(-2, 2, steps = 9) print ("Second Try on linspace", len_9_tensor) # - # <!--Empty Space for separating topics--> # # Use both <code>torch.linspace()</code> and <code>torch.sin()</code> to construct a tensor that contains the 100 sin result in range from 0 (0 degree) to 2π (360 degree): # # + jupyter={"outputs_hidden": false} # Construct the tensor within 0 to 360 degree pi_tensor = torch.linspace(0, 2*np.pi, 100) sin_result = torch.sin(pi_tensor) # - # Plot the result to get a clearer picture. You must cast the tensor to a numpy array before plotting it. # # + jupyter={"outputs_hidden": false} # Plot sin_result plt.plot(pi_tensor.numpy(), sin_result.numpy()) # - # If you know the trigonometric function, you will notice this is the diagram of the sin result in the range 0 to 360 degrees. # # <!--Empty Space for separating topics--> # # <h3>Practice</h3> # # Construct a tensor with 25 steps in the range 0 to π/2. Print out the Maximum and Minimum number. Also, plot a graph showing the diagram that shows the result. # practice_tensor = np.linspace(0, np.pi/2, 25) practice_tensor # + # Practice: Create your tensor, print max and min number, plot the sin result diagram practice_tensor = torch.linspace(0, np.pi/2, 25) max_num, min_num = max(practice_tensor), min(practice_tensor) print(f"Max value: {max_num}, Min value: {min_num}") to_plot = torch.sin(practice_tensor) plt.plot(range(len(practice_tensor.numpy())), to_plot.numpy()) # + [markdown] jupyter={"source_hidden": true} # Double-click <b>here</b> for the solution. # # <!-- # pi_tensor = torch.linspace(0, np.pi/2, 100) # print("Max Number: ", pi_tensor.max()) # print("Min Number", pi_tensor.min()) # sin_result = torch.sin(pi_tensor) # plt.plot(pi_tensor.numpy(), sin_result.numpy()) # --> # # - # <!--Empty Space for separating topics--> # # <h2 id="Tensor_Op">Tensor Operations</h2> # # In the following section, you'll work with operations that you can apply to a tensor. # # <!--Empty Space for separating topics--> # # <h3>Tensor Addition</h3> # # You can perform addition between two tensors. # # Create a tensor <code>u</code> with 1 dimension and 2 elements. Then, create another tensor <code>v</code> with the same number of dimensions and the same number of elements: # # + jupyter={"outputs_hidden": false} # Create two sample tensors u = torch.tensor([1, 0]) v = torch.tensor([0, 1]) # - # Add <code>u</code> and <code>v</code> together: # # + jupyter={"outputs_hidden": false} # Add u and v w = u + v print("The result tensor: ", w) # - # The result is <code>tensor([1, 1])</code>. The behavior is <i>[1 + 0, 0 + 1]</i>. # # Plot the result to to get a clearer picture. # # + jupyter={"outputs_hidden": false} # Plot u, v, w plotVec([ {"vector": u.numpy(), "name": 'u', "color": 'r'}, {"vector": v.numpy(), "name": 'v', "color": 'b'}, {"vector": w.numpy(), "name": 'w', "color": 'g'} ]) # - # <!--Empty Space for separating topics--> # # <h3>Try</h3> # # Implement the tensor subtraction with <code>u</code> and <code>v</code> as u-v. # # + jupyter={"outputs_hidden": false} # Try by yourself to get a result of u-v u = torch.tensor([1, 0]) v = torch.tensor([0, 1]) print(u - v) # - # Double-click <b>here</b> for the solution. # # <!-- # print("The result tensor: ", u-v) # --> # # Tensors must be of the same data type to perform addition as well as other operations.If you uncomment the following code and try to run it you will get an error as the two tensors are of two different data types. **NOTE This lab was created on a older PyTorch version so in the current version we are using this is possible and will produce a float64 tensor.** # # + #torch.tensor([1,2,3],dtype=torch.int64)+torch.tensor([1,2,3],dtype=torch.float64) # - # <!--Empty Space for separating topics--> # # You can add a scalar to the tensor. Use <code>u</code> as the sample tensor: # # + # tensor + scalar u = torch.tensor([1, 2, 3, -1]) v = u + 1 print ("Addition Result: ", v) # - # The result is simply adding 1 to each element in tensor <code>u</code> as shown in the following image: # # <img src = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/chapter%201/brodcasting.gif" width = "500" alt="tensor addition" /> # # <!--Empty Space for separating topics--> # # <h3>Tensor Multiplication </h3> # # Now, you'll review the multiplication between a tensor and a scalar. # # Create a tensor with value <code>[1, 2]</code> and then multiply it by 2: # # + jupyter={"outputs_hidden": false} # tensor * scalar u = torch.tensor([1, 2]) v = 2 * u print("The result of 2 * u: ", v) # - # The result is <code>tensor([2, 4])</code>, so the code <code>2 \* u</code> multiplies each element in the tensor by 2. This is how you get the product between a vector or matrix and a scalar in linear algebra. # # <!--Empty Space for separating topics--> # # You can use multiplication between two tensors. # # Create two tensors <code>u</code> and <code>v</code> and then multiply them together: # # + jupyter={"outputs_hidden": false} # tensor * tensor u = torch.tensor([1, 2]) v = torch.tensor([3, 2]) w = u * v print ("The result of u * v", w) # - # The result is simply <code>tensor([3, 4])</code>. This result is achieved by multiplying every element in <code>u</code> with the corresponding element in the same position <code>v</code>, which is similar to <i>[1 * 3, 2 * 2]</i>. # # <!--Empty Space for separating topics--> # # <h3>Dot Product</h3> # # The dot product is a special operation for a vector that you can use in Torch. # # Here is the dot product of the two tensors <code>u</code> and <code>v</code>: # # + # Calculate dot product of u, v u = torch.tensor([1, 2]) v = torch.tensor([3, 2]) print("Dot Product of u, v:", torch.dot(u,v)) # - # The result is <code>tensor(7)</code>. The function is <i>1 x 3 + 2 x 2 = 7</i>. # # <!--Empty Space for separating topics--> # # <h3>Practice</h3> # # Convert the list <i>[-1, 1]</i> and <i>[1, 1]</i> to tensors <code>u</code> and <code>v</code>. Then, plot the tensor <code>u</code> and <code>v</code> as a vector by using the function <code>plotVec</code> and find the dot product: # # + jupyter={"outputs_hidden": false} # Practice: calculate the dot product of u and v, and plot out two vectors u = [-1, 1] v = [1, 1] print(type(u)) print(type(v)) u = torch.tensor(u) v = torch.tensor(v) print(u.type()) print(v.type()) print(f"Dot Product of u and v: {u.dot(v)}") plotVec([ {"vector": u.numpy(), "name": 'u', "color": 'r'}, {"vector": v.numpy(), "name": 'v', "color": 'b'}, ]) # - # Double-click <b>here</b> for the solution. # # <!-- # u= torch.tensor([-1, 1]) # v= torch.tensor([1, 1]) # plotVec([ # {"vector": u.numpy(), "name": 'u', "color": 'r'}, # {"vector": v.numpy(), "name": 'v', "color": 'b'} # ]) # print("The Dot Product is",np.dot(u, v)) # --> # # <!--Empty Space for separating topics--> # # See <a href="https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html">Broadcasting</a> for more information on numpy that is similar to PyTorch. # # <a href="http://cocl.us/pytorch_link_bottom"> # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/notebook_bottom%20.png" width="750" alt="PyTorch Bottom" /> # </a> # # <h2>About the Authors:</h2> # # <a href="https://www.linkedin.com/in/joseph-s-50398b136/">Joseph Santarcangelo</a> has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD. # # Other contributors: <a href="https://www.linkedin.com/in/michelleccarey/">Michelle Carey</a>, <a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a">Mavis Zhou</a> # # ## Change Log # # | Date (YYYY-MM-DD) | Version | Changed By | Change Description | # | ----------------- | ------- | ---------- | ----------------------------------------------------------- | # | 2020-09-21 | 2.0 | Shubham | Migrated Lab to Markdown and added to course repo in GitLab | # # <hr> # # Copyright © 2018 <a href="cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu">cognitiveclass.ai</a>. This notebook and its source code are released under the terms of the <a href="https://bigdatauniversity.com/mit-license/">MIT License</a>. #
29,204
/lesson5/lesson5.ipynb
d715fa9478e431ae95a04b831c66db7f4136f9a4
[]
no_license
myselfadmirer/Algorithms_for_Data_Analysis
https://github.com/myselfadmirer/Algorithms_for_Data_Analysis
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
157,550
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np from sklearn.ensemble import RandomForestClassifier # + #Sex male:0, female:1 #Embarked S:0, C:1, Q:2 train = pd.read_csv('train.csv').replace("male",0).replace("female",1).replace("S",0).replace("C",1).replace("Q",2) test = pd.read_csv('test.csv').replace("male",0).replace("female",1).replace("S",0).replace("C",1).replace("Q",2) y_test = pd.read_csv('gender_submission.csv') # - train.info() # + train["Age"].fillna(train.Age.mean(),inplace=True) train["Embarked"].fillna(train.Embarked.mean(),inplace=True) test["Age"].fillna(train.Age.mean(),inplace=True) test["Embarked"].fillna(train.Embarked.mean(),inplace=True) test["Fare"].fillna(train.Fare.mean(), inplace=True) #列削除 train.drop("Name",axis=1,inplace=True) train.drop("Cabin",axis=1,inplace=True) train.drop("Ticket",axis=1,inplace=True) test.drop("Name",axis=1,inplace=True) test.drop("Cabin",axis=1,inplace=True) test.drop("Ticket",axis=1,inplace=True) train_data = train.values x_train = train_data[:, 2:] # Pclass以降の変数 y_train = train_data[:, 1] # 正解データ test_data = test.values x_test = test_data[:, 1:] # Pclass以降の変数 y_test = y_test.values[:,1] # + #モデル作成 from sklearn import tree from sklearn.grid_search import GridSearchCV tuned_parameters = {'max_depth': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], # 木の深さを1-10で 'max_leaf_nodes': [2,4,6,8,10] # 最大終端ノード数を2,4,6,8,10で } # 上記で用意したパラメーターごとに交差検証を実施。最適な木の深さを確認する。 clf = GridSearchCV(tree.DecisionTreeClassifier(random_state=5,splitter='best'), tuned_parameters, scoring="accuracy",cv=5, n_jobs=1) clf = clf.fit(x_train, y_train) # モデル作成! print("complete!") # - print("Best Parameter: {}".format(clf.best_params_)) print("Best Parameterでの検証用データの精度: {:.2f}".format(clf.score(x_test, y_test))) print("Best Parameterで交差検証した精度の平均(訓練データ): {:.2f}".format(clf.best_score_)) # + clf = tree.DecisionTreeClassifier(max_depth=3,max_leaf_nodes=8) clf = clf.fit(x_train,y_train) print(clf.feature_importances_) # - from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_score, recall_score print(precision_score(y_test,y_pred)) print(recall_score(y_test,y_pred)) y_pred = clf.predict(x_test) print(confusion_matrix(y_test,y_pred)) test.info() y_test.info() tag_to_idx = tag_to_idx def vectorize_sequence(self, sequence, idx, remove_if_unk=False): if '<UNK>' in idx: unknown_index = idx['<UNK>'] chars = [idx.get(tok, unknown_index) for tok in sequence] if remove_if_unk: return [w for w in chars if w != unknown_index] else: return chars else: return [idx[tok] for tok in sequence] def __call__(self, example): name, tag = example vectorized_name = self.vectorize_sequence(name, self.char_to_idx) vectorized_tag = self.tag_to_idx[tag] return ( vectorized_name, vectorized_tag, ) vectorizer = Vectorizer(char_to_idx, tag_to_idx) # + train_data = [vectorizer(example) for example in train] valid_data = [vectorizer(example) for example in valid] test_data = [vectorizer(example) for example in test] datasets = {'train':train_data, 'val':valid_data, 'test':test_data} # - train_data[0], train[0] # Le concept de padding est extrêmement important. Il nous permet d'envoyer des tenseurs de longueurs différentes sur le GPU. # # Nous prenons donc le tenseur le plus long de notre minibatch pour créer une matrice d'exemple. # + import torch def pad_sequences(vectorized_seqs, seq_lengths): seq_tensor = torch.zeros((len(vectorized_seqs), seq_lengths.max())).long() for idx, (seq, seqlen) in enumerate(zip(vectorized_seqs, seq_lengths)): seq_tensor[idx, :seqlen] = torch.LongTensor(seq[:seqlen]) return seq_tensor def collate_examples(samples): names, tags = list(zip(*samples)) names_lengths = torch.LongTensor([len(s) for s in names]) padded_names = pad_sequences(names, names_lengths) tags = torch.LongTensor(tags) return padded_names, tags # + from torch.utils.data import DataLoader, Dataset batch_size = 16 train_loader = DataLoader( train_data, batch_size=batch_size, collate_fn=collate_examples, shuffle=True ) valid_loader = DataLoader( valid_data, batch_size=batch_size, collate_fn=collate_examples, shuffle=False ) test_loader = DataLoader( test_data, batch_size=batch_size, collate_fn=collate_examples, shuffle=False ) # - b = next(iter(train_loader)) b[0].shape, b # + from torch import nn from torch.nn import functional as F from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, pad_sequence class NameClassifier(nn.Module): def __init__(self, char_to_idx, tag_to_idx, embedding_size, hidden_layer_size): super(NameClassifier, self).__init__() self.embeddings = nn.Embedding(len(char_to_idx), embedding_size) self.rnn = nn.LSTM(embedding_size, hidden_layer_size) self.fully_connected = nn.Linear(hidden_layer_size, len(tag_to_idx)) self.loss_function = nn.CrossEntropyLoss() self.metrics = ['acc'] def forward(self, names): # Getting the length of sequences so we can pack them and send them to gpu. # We also sort the sequences by length, as required by the pack_padded_sequence function seq_lengths, perm_idx = (names > 0).sum(dim=1).sort(0, descending=True) # We need the reverse idx to unsort the sequence at the end of the forward _, rev_perm_idx = perm_idx.sort(0) # (batch_size, max_length) sorted_names = names[perm_idx] # (batch_size, max_length, embedding_size) embeds = self.embeddings(sorted_names) packed_names = pack_padded_sequence(embeds, seq_lengths, batch_first=True) # (1, batch_size, hidden_layer_size) _, (h_n, _) = self.rnn(packed_names) # (1, batch_size, num_tags) out = self.fully_connected(h_n) # (batch_size, num_tags) out = out.squeeze(0) return out[rev_perm_idx] # - dataloaders = {'train':train_loader, 'val':valid_loader, 'test':test_loader} dataset_sizes = {x: len(datasets[x]) for x in ['train', 'val', 'test']} from torch.optim import lr_scheduler device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") device # + jupyter={"source_hidden": true} def train_model(model, criterion, optimizer, scheduler, num_epochs=25): since = time.time() best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Iterate over data. for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # zero the parameter gradients optimizer.zero_grad() # forward # track history if only in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # backward + optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() # statistics running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) if phase == 'train': scheduler.step() epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] print('{} Loss: {:.4f} Acc: {:.4f}'.format( phase, epoch_loss, epoch_acc)) # deep copy the model if phase == 'val' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) print() time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) # load best model weights model.load_state_dict(best_model_wts) return model # - net = NameClassifier(char_to_idx, tag_to_idx, embedding_size=50, hidden_layer_size=100) net = net.to(device) optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, nesterov=True) loss_function = nn.CrossEntropyLoss() scheduler = lr_scheduler.StepLR(optimizer, 1, gamma=1) # no scheduling train_model(net, loss_function, optimizer, scheduler, num_epochs=25) net.cpu() idx_to_char = {v:k for k, v in char_to_idx.items()} idx_to_tag = {v:k for k,v in tag_to_idx.items()} predictions = [] for inputs, labels in dataloaders['test']: seq = list(map(lambda seq : ''.join([idx_to_char[int(c)] for c in seq if 'PAD' not in idx_to_char[int(c)]]), inputs) ) pred = [idx_to_tag[int(l)] for l in torch.argmax(net(inputs),dim=1)] label =[idx_to_tag[int(l)] for l in labels] predictions.extend(zip(seq, pred, label)) predictions ls, cmap = colors) plt.title(f'Number of trees - {tree}. Train accuracy={test_accuracy:.2f}') plt.show # - # #### Из-за малого размера датасета и количества признаков, модель получается переобученной. # #### 2. *Заменить в реализованном алгоритме проверку с помощью отложенной выборки на Out-of-Bag. # #### 3. *(На повторение) Переписать функцию calc_gini из урока про решающие деревья так, чтобы в качестве критерия использовалась энтропия Шэннона. Переименовать функцию в calc_entropy. def calc_entropy(labels): classes = {} for label in labels: if label not in classes: classes[label] = 0 classes[label] += 1 impurity = 0 for label in classes: p = classes[label] / len(labels) impurity -= p * np.log2(p) return impurity
10,781
/examples/pi-vae_simulated_data_continuous_label.ipynb
06f2963aaf3e1250b0da748299d0eef07da27f63
[]
no_license
stes/pi-vae
https://github.com/stes/pi-vae
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
235,683
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import sys import matplotlib.pyplot as plt sys.path.append("../code/") from pi_vae import * from util import * from keras.callbacks import ModelCheckpoint ## import plot packages from matplotlib.ticker import FormatStrFormatter from matplotlib import ticker # %load_ext autoreload # %autoreload 2 # %matplotlib inline # - # # continuous label data # ## load continuous label simulated data # ## or run simulate_data.ipynb to generate data dat = np.load('../data/sim/sim_100d_poisson_cont_label.npz'); u_true = dat['u']; z_true = dat['z']; x_true = dat['x']; # + x_all = x_true.reshape(50,300,-1); u_all = u_true.reshape(50,300,-1); x_train = x_all[:40]; u_train = u_all[:40]; x_valid = x_all[40:45]; u_valid = u_all[40:45]; x_test = x_all[45:]; u_test = u_all[45:]; # - # ## fit pi-vae np.random.seed(666); vae = vae_mdl(dim_x=x_all[0].shape[-1], dim_z=2, dim_u=u_all[0].shape[-1], gen_nodes=60, n_blk=2, mdl='poisson', disc=False, learning_rate=5e-4) model_chk_path = '../results/sim_cont_nflow_2d_666_pivae.h5' ##999, 777 mcp = ModelCheckpoint(model_chk_path, monitor="val_loss", save_best_only=True, save_weights_only=True) s_n = vae.fit_generator(custom_data_generator(x_train, u_train), steps_per_epoch=len(x_train), epochs=1000, verbose=1, validation_data = custom_data_generator(x_valid, u_valid), validation_steps = len(x_valid), callbacks=[mcp]); plt.plot(s_n.history['val_loss'][:]) vae.load_weights(model_chk_path); outputs = vae.predict_generator(custom_data_generator(x_all, u_all), steps = len(x_all)); # post_mean, post_log_var, z_sample,fire_rate, lam_mean, lam_log_var, z_mean, z_log_var # + length = 30; ll = 10000; c_vec = plt.cm.viridis(np.linspace(0,1,length)) bins = np.linspace(0,2*np.pi,length); centers = (bins[1:]+bins[:-1])/2; disc_loc = np.digitize(u_true[:,0],centers); c_all = c_vec[disc_loc]; fsz = 14; plt.figure(figsize=(12,4)); ax1 = plt.subplot(1,3,1) plt.scatter(z_true[:ll,0], z_true[:ll,1], c=c_all, s=1,alpha=0.5); ax1.set_xlabel('Latent 1',fontsize=fsz,fontweight='normal'); ax1.set_ylabel('Latent 2',fontsize=fsz,fontweight='normal'); ax1.spines['top'].set_visible(False) ax1.spines['right'].set_visible(False) plt.setp(ax1.get_xticklabels(), fontsize=fsz); plt.setp(ax1.get_yticklabels(), fontsize=fsz); ax2 = plt.subplot(1,3,2) plt.scatter(outputs[0][:ll,0], outputs[0][:ll,1], c=c_all, s=1,alpha=0.5); #ax2.set_xlabel('Latent 1',fontsize=fsz,fontweight='normal'); #ax2.set_ylabel('Latent 2',fontsize=fsz,fontweight='normal'); ax2.spines['top'].set_visible(False) ax2.spines['right'].set_visible(False) plt.setp(ax2.get_xticklabels(), fontsize=fsz); plt.setp(ax2.get_yticklabels(), fontsize=fsz); ax3 = plt.subplot(1,3,3) plt.scatter(outputs[6][:ll,0], outputs[6][:ll,1], c=c_all, s=1,alpha=0.5); #ax3.set_xlabel('Latent 1',fontsize=fsz,fontweight='normal'); #ax3.set_ylabel('Latent 2',fontsize=fsz,fontweight='normal'); ax3.spines['top'].set_visible(False) ax3.spines['right'].set_visible(False) plt.setp(ax3.get_xticklabels(), fontsize=fsz); plt.setp(ax3.get_yticklabels(), fontsize=fsz); plt.tight_layout(); # -
3,532
/tensor2tensor/notebooks/asr_transformer.ipynb
5c8b103a32ee38c48ef5b51ba407978b029cc545
[ "Apache-2.0" ]
permissive
LTHODAVDOPL/tensor2tensor
https://github.com/LTHODAVDOPL/tensor2tensor
1
0
Apache-2.0
2019-12-21T21:25:46
2019-12-21T21:25:35
null
Jupyter Notebook
false
false
.py
13,253
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # name: python2 # --- # + cellView="form" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="6uNrFWq5BRba" #@title # Copyright 2018 Google LLC. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="7tB9m_fw9Xkl" # !pip install -qq tensorflow # !pip install -qq tensor2tensor # !pip install -qq pydub # !apt-get -qq update # !apt-get -qq install -y ffmpeg # !apt-get -qq install -y sox # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="hF_ZmvGjEyJd" # %tensorflow_version 1.x import tensorflow as tf import matplotlib.pyplot as plt import numpy as np import os import collections import base64 import cStringIO import pydub import shutil from scipy.io import wavfile import IPython import google.colab from tensor2tensor import models from tensor2tensor import problems from tensor2tensor.layers import common_layers from tensor2tensor.utils import trainer_lib from tensor2tensor.utils import t2t_model from tensor2tensor.utils import registry from tensor2tensor.utils import metrics # Enable TF Eager execution tfe = tf.contrib.eager tf.enable_eager_execution() # Other setup Modes = tf.estimator.ModeKeys # Setup some directories data_dir = os.path.expanduser("~/t2t/data") tmp_dir = os.path.expanduser("~/t2t/tmp") train_dir = os.path.expanduser("~/t2t/train") checkpoint_dir = os.path.expanduser("~/t2t/checkpoints") tf.gfile.MakeDirs(data_dir) tf.gfile.MakeDirs(tmp_dir) tf.gfile.MakeDirs(train_dir) tf.gfile.MakeDirs(checkpoint_dir) gs_ckpt_dir = "gs://tensor2tensor-checkpoints/" # + [markdown] colab_type="text" id="LwPvdJJ4xN6y" # # ### Define problem, hparams, model, encoder and decoder # Definition of this model (as well as many more) can be found on tensor2tensor github [page](https://github.com/tensorflow/tensor2tensor). # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="hH0FEHhDIGjM" problem_name = "librispeech_clean" asr_problem = problems.problem(problem_name) encoders = asr_problem.feature_encoders(None) model_name = "transformer" hparams_set = "transformer_librispeech_tpu" hparams = trainer_lib.create_hparams(hparams_set,data_dir=data_dir, problem_name=problem_name) asr_model = registry.model(model_name)(hparams, Modes.PREDICT) def encode(x): waveforms = encoders["waveforms"].encode(x) encoded_dict = asr_problem.preprocess_example({"waveforms":waveforms, "targets":[]}, Modes.PREDICT, hparams) return {"inputs" : tf.expand_dims(encoded_dict["inputs"], 0), "targets" : tf.expand_dims(encoded_dict["targets"], 0)} def decode(integers): integers = list(np.squeeze(integers)) if 1 in integers: integers = integers[:integers.index(1)] return encoders["targets"].decode(np.squeeze(integers)) # + [markdown] colab_type="text" id="pGhUGptixYBd" # ### Define path to checkpoint # In this demo we are using a pretrained model. # Instructions for training your own model can be found in the [tutorial](https://github.com/tensorflow/tensor2tensor/blob/master/docs/tutorials/asr_with_transformer.md) on tensor2tensor page. # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="p9D8OJdFezsJ" # Copy the pretrained checkpoint locally ckpt_name = "transformer_asr_180214" gs_ckpt = os.path.join(gs_ckpt_dir, ckpt_name) print(gs_ckpt) # !gsutil cp -R {gs_ckpt} {checkpoint_dir} ckpt_path = tf.train.latest_checkpoint(os.path.join(checkpoint_dir, ckpt_name)) ckpt_path # + [markdown] colab_type="text" id="arS1sXFPxvde" # ### Define transcribe function # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="od7ZPT3wfkZs" # Restore and transcribe! def transcribe(inputs): encoded_inputs = encode(inputs) with tfe.restore_variables_on_create(ckpt_path): model_output = asr_model.infer(encoded_inputs, beam_size=2, alpha=0.6, decode_length=1)["outputs"] return decode(model_output) def play_and_transcribe(inputs): waveforms = encoders["waveforms"].encode(inputs) IPython.display.display(IPython.display.Audio(data=waveforms, rate=16000)) return transcribe(inputs) # + [markdown] colab_type="text" id="Qz5u2O5LvShm" # # Decoding prerecorded examples # # You can upload any .wav files. They will be transcribed if frame rate matches Librispeeche's frame rate (16kHz). # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="xAstJTeyvXMf" uploaded = google.colab.files.upload() prerecorded_messages = [] for fn in uploaded.keys(): print('User uploaded file "{name}" with length {length} bytes'.format( name=fn, length=len(uploaded[fn]))) mem_file = cStringIO.StringIO(uploaded[fn]) save_filename = os.path.join(tmp_dir, fn) with open(save_filename, 'w') as fd: mem_file.seek(0) shutil.copyfileobj(mem_file, fd) prerecorded_messages.append(save_filename) for inputs in prerecorded_messages: outputs = play_and_transcribe(inputs) print("Inputs: %s" % inputs) print("Outputs: %s" % outputs) # + [markdown] colab_type="text" id="mJvRjlHUrr65" # # Recording your own examples # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="oirqsdqVoElk" # Records webm file and converts def RecordNewAudioSample(filename=None, webm_filename=None): """Args: filename - string, path for storing wav file webm_filename - string, path for storing webm file Returns: string - path where wav file was saved. (=filename if specified) """ # Create default filenames in tmp_dir if not specified. if not filename: filename = os.path.join(tmp_dir, "recording.wav") if not webm_filename: webm_filename = os.path.join(tmp_dir, "recording.webm") # Record webm file form colab. audio = google.colab._message.blocking_request('user_media', {"audio":True, "video":False, "duration":-1}, timeout_sec=600) #audio = frontend.RecordMedia(True, False) # Convert the recording into in_memory file. music_mem_file = cStringIO.StringIO( base64.decodestring(audio[audio.index(',')+1:])) # Store webm recording in webm_filename. Storing is necessary for conversion. with open(webm_filename, 'w') as fd: music_mem_file.seek(0) shutil.copyfileobj(music_mem_file, fd) # Open stored file and save it as wav with sample_rate=16000. pydub.AudioSegment.from_file(webm_filename, codec="opus" ).set_frame_rate(16000).export(out_f=filename, format="wav") return filename # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="90BjliFTCQm9" # Record the sample my_sample_filename = RecordNewAudioSample() print my_sample_filename # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="PdBfEik0-pMv" print play_and_transcribe(my_sample_filename)
7,706
/pipeline/slim-analysis-ndvi-validate.ipynb
0d275f254557c3edde22bb44c624651b754bc94f
[ "LicenseRef-scancode-free-unknown", "MIT" ]
permissive
ajijohn/planet-snowcover
https://github.com/ajijohn/planet-snowcover
1
1
MIT
2020-01-21T19:59:09
2020-01-20T23:30:44
null
Jupyter Notebook
false
false
.r
296,206
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: R # language: R # name: ir # --- # + #planet-snowcover-snow/ASO_3M_SD_USCATE_20190705_4326_binary/15/5492/12642.tif # - library(tidyverse) library(raster) library(gdalUtils) # get all the rasters aso tiles #list files in a specific folder veg_aso <- list.files(path = "~/ndvi-validate/") # + asos <- list() preds <-list() for (vaso in veg_aso) { #print(vaso) recur = list.files(path=paste("~/ndvi-validate/",vaso,sep=""),recursive = TRUE) #print(recur) for (filename in recur) { if (startsWith(filename,'mask/') & endsWith(filename,"merged.tif")) asos <- append(asos,paste("~/ndvi-validate/",vaso,"/",filename,sep="")) if (startsWith(filename,'preds/') & endsWith(filename,"merged.tif")) preds <- append(preds,paste("~/ndvi-validate/",vaso,"/",filename,sep="")) } } print(asos) print(preds) # - str(asos) aso_un <- unlist(asos) pred_un <- unlist(preds) aso_un_list <- paste(aso_un, collapse=' ' ) pred_un_list <- paste(pred_un, collapse=' ' ) system("mkdir aso_ndvi_validate") system("mkdir aso_ndvi_validate_pred") file.copy(from=pred_un, to='./aso_ndvi_validate_pred', overwrite = TRUE, recursive = FALSE, copy.mode = TRUE) for(i in 1:length(aso_un)) { print(aso_un[i]) file.copy(from=aso_un[i],to=paste('./aso_ndvi_validate/',i,'-ASO_3M_SD_USCATE_20190705_4326_binary_merged.tif',sep="")) } # + fo <- list.files(path = "./aso_ndvi_validate", pattern = ".tif$", full.names = TRUE) rlo <- lapply(fo, raster) aso_dem <- do.call(merge, c(rlo, tolerance = 1)) # + f <- list.files(path = "./aso_ndvi_validate_pred", pattern = ".tif$", full.names = TRUE) rl <- lapply(f, raster) aso_dem_pred <- do.call(merge, c(rl, tolerance = 1)) # - aso_dem plot(aso_dem) aso_dem_pred plot(aso_dem_pred) # + # save the inter,ediate raster writeRaster(aso_dem, filename="aso_ndvi_validate_070519.tif", format="GTiff", overwrite=TRUE) # - writeRaster(aso_dem_pred, filename="aso_ndvi_pred_validate_070519.tif", format="GTiff", overwrite=TRUE) aso_dem <- raster("aso_ndvi_test_only.tif") aso_dem_pred <- raster("aso_ndvi_pred_test_only.tif") df <- stack(aso_dem,aso_dem_pred) veg_vegResamp <- raster("veg_reasampl2point1by1point6.tif") veg_crop<-crop(veg_vegResamp,extent(df)) veg_crop df veg_resampl_prj <- projectRaster(veg_crop,df) writeRaster(veg_resampl_prj, filename="aso_ndvi_ch_test_only.tif", format="GTiff", overwrite=TRUE) veg_resampl_prj <- raster("aso_ndvi_ch_test_only.tif") df <- stack(aso_dem,aso_dem_pred,veg_resampl_prj) data_matrix <- rasterToPoints(df) head(data_matrix) datafra_comb <- data_matrix %>% as.data.frame() nrow(datafra_comb) colnames(datafra_comb) <- c('x','y','aso_ndvi','aso_ndvi_pred','veg_height') datafra_comb1m <- datafra_comb %>% filter(!is.na(veg_height) & veg_height > 1) %>% as.data.frame() datafra_comb1m %>% group_by(aso_ndvi_pred) %>% summarize(count = n()) datafra_comb1m %>% group_by(aso_ndvi) %>% summarize(count = n()) high_veg <- datafra_comb1m %>% filter(!is.na(veg_height)) %>% filter(veg_height > 1 ) %>% as.data.frame() high_veg_perf<- confusionMatrix(high_veg$aso_ndvi, high_veg$aso_ndvi_pred,mode = "prec_recall") high_veg_perf class <- c("0", "1") f_score <- c(15059664,11868986) perf_pred<- data.frame(class, f_score) colnames(perf_pred) <- c('class','f_score') perf_pred %>% ggplot() + geom_bar(aes(class,f_score),stat="identity") + labs(x="Vegetation class > 1m " , y="Count",subtitle="+NDVI model - Prediction") + theme_minimal(base_size = 12) class <- c("0", "1") f_score <- c(24058205,2865239) perf_pred<- data.frame(class, f_score) colnames(perf_pred) <- c('class','f_score') perf_pred %>% ggplot() + geom_bar(aes(class,f_score),stat="identity") + (breaks = c(0,5,10,15,20,25,30,35,40,45))+ labs(x="Vegetation class > 1m " , y="Count",subtitle="+NDVI model - ASO") + theme_minimal(base_size = 12) hist(datafra_comb1m$veg_height) veg_crop df # + #data_matrix <- rasterToPoints(df) #head(data_matrix) #datafra_comb <- data_matrix %>% as.data.frame() # - veg_crop_aso <- crop(veg_crop, extent(df)) # mask all the pixels below 2 veg_crop_h <- veg_crop_aso veg_crop_h[veg_crop_h < 2] <- NA aso_demabove2 <- mask(aso_dem, veg_crop_h, filename="aso_ndviabove2_test.tif",overwrite=TRUE) aso_dem_predabove2 <- mask(aso_dem_pred, veg_crop_h,filename="pred_ndviabove2_test.tif",overwrite=TRUE) aso_dem_predabove2 <- raster("pred_demabove2_test.tif") aso_demabove2 <- raster("aso_demabove2_test.tif") vals_pred <- getValues(aso_dem_predabove2) library(caret) vals_aso <- getValues(aso_demabove2) high_veg_perf<- confusionMatrix(vals_aso, vals_pred,mode = "prec_recall") high_veg_perf #create below two mask all excpet the ones below .2 veg_crop_h <- veg_crop_aso veg_crop_h[veg_crop_h > .2] <- NA aso_vegbpoint2 <- mask(aso_dem, veg_crop_h, filename="aso_ndvibelowpoint2_test.tif",overwrite=TRUE) aso_dem_predvegbpoint2 <- mask(aso_dem_pred, veg_crop_h,filename="pred_ndvibelowpoint2_test.tif",overwrite=TRUE) vals_pred <- getValues(aso_dem_predvegbpoint2) vals_aso <- getValues(aso_vegbpoint2) low_veg_perf<- confusionMatrix(vals_aso, vals_pred,mode = "prec_recall") low_veg_perf #create below two and above .2 veg_crop_h <- veg_crop_aso veg_crop_h[veg_crop_h < .2 & veg_crop_h > 2] <- NA aso_vegabovepoint2below2 <- mask(aso_dem, veg_crop_h, filename="aso_ndviabovepoint2andbelow2_test.tif",overwrite=TRUE) aso_dem_predabovepoint2andbelow2 <- mask(aso_dem_pred, veg_crop_h,filename="pred_ndviabovepoint2andbelow2_test.tif",overwrite=TRUE) vals_pred <- getValues(aso_dem_predabovepoint2andbelow2) vals_aso <- getValues(aso_vegabovepoint2below2) med_veg_perf<- confusionMatrix(vals_aso, vals_pred,mode = "prec_recall") med_veg_perf med_veg_perf class <- c("< .2m", "> .2 & < 2m"," > 2m") f_score <- c(0.6209,0.7091,0.7526) perf<- data.frame(class, f_score) colnames(perf) <- c('class','f_score') perf %>% ggplot() + geom_bar(aes(class,f_score),stat="identity") + labs(x="Vegetation class" , y="F-Score",subtitle="+NDVI model") + theme_minimal(base_size = 12) ggsave(filename = "gg-ndvi+allclasses.png",dpi = 300,height=4,width=4,units="in") # + # check the result # mask all the pixels below 2 veg_crop_h <- veg_crop_aso veg_crop_h[veg_crop_h < 1] <- NA aso_demabove1 <- mask(aso_dem, veg_crop_h, filename="aso_ndviabove1_test.tif",overwrite=TRUE) # - aso_dem_pred veg_crop_h aso_dem_pred_prj <- projectRaster(aso_dem_pred,veg_crop_h) aso_dem_pred_prj aso_dem_predabove1 <- mask(aso_dem_pred_prj, veg_crop_h,filename="pred_ndviabove1_test.tif",overwrite=TRUE) aso_dem_predabove1 veg_crop_h writeRaster(veg_crop_h, filename="veg_above_1.tif", format="GTiff", overwrite=TRUE) onem_df <- stack(aso_dem_predabove1,veg_crop_h) data_matrix <- rasterToPoints(onem_df) head(data_matrix) datafra_comb <- data_matrix %>% as.data.frame() head(datafra_comb) colnames(datafra_comb) <- c('x','y','ndvi_pred','veg_height') nrow(datafra_comb) datafra_comb%>% filter(!is.na(ndvi_pred)) %>% nrow() data_not_na <- datafra_comb%>% filter(!is.na(ndvi_pred)) %>% as.data.frame() hist(data_not_na$ndvi_pred) hist(data_not_na$veg_height) save.image()
7,457
/workflow/ml_modelling/00_ml_workflow/al_plots_for_figure/ml_plots__v5.ipynb
a66d73e344dfa34f8034b4eb8ac98fec9d5dc94b
[]
no_license
flash-jaehyun/PROJ_IrOx_Active_Learning_OER
https://github.com/flash-jaehyun/PROJ_IrOx_Active_Learning_OER
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
12,893
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python [conda env:PROJ_irox] # language: python # name: conda-env-PROJ_irox-py # --- # # Creating the AL plots # # This whole script needs be run twice with the variable `color_custom_points` set to `True` and `False` and twice for IrO2 and IrO3 stoichs, so 4 times in total # + [markdown] Collapsed="false" # # Import Modules # + import os print(os.getcwd()) import sys import pickle import copy import plotly.graph_objects as go from plotly.subplots import make_subplots # ############################################################################# from plotting.my_plotly import my_plotly_plot # ######################################################### # Local Imports sys.path.insert(0, os.path.join(os.environ["PROJ_irox"], "data")) from proj_data_irox import ( bulk_dft_data_path, ids_to_discard__too_many_atoms_path, unique_ids_path, df_dij_path) # from proj_data_irox import main_AB2_run, main_AB3_run # + from al_data import main_AB2_run, main_AB3_run, gens_to_plot_dict from layout import layout as layout_base # + [markdown] Collapsed="false" # # Script Inputs # + stoich_i = "AB3" # stoich_i = "AB2" lowest_N_sys_to_track = 10 # + [markdown] Collapsed="false" # # Read Data # + Collapsed="false" comb_to_run = [ ("AB2", False), ("AB3", False), ("AB2", True), ("AB3", True), ] for (stoich_i, color_custom_points) in comb_to_run: print(stoich_i, color_custom_points) # ######################################################### if stoich_i == "AB3": path_i = main_AB3_run elif stoich_i == "AB2": path_i = main_AB2_run else: assert False, "No data here isjfisdifjisdjifjsidfjr89u8fh8wejf" print(stoich_i, "\n", path_i) with open(path_i, "rb") as fle: AL = pickle.load(fle) al_gen_dict = AL.al_gen_dict gens_to_plot = gens_to_plot_dict[stoich_i] # ######################################################### last_gen_key = list(al_gen_dict.keys())[-1] if gens_to_plot[-1] == "last": gen_4 = last_gen_key gens_to_plot[-1] = gen_4 AL_last = al_gen_dict[last_gen_key] model = AL_last.model model_i = model[ (model["duplicate"] == False) & \ (model["acquired"] == True) ].sort_values("y_real") top_ids_to_track = model_i.iloc[0:lowest_N_sys_to_track].index.tolist() color_list = [ "#fde725", "#b8de29", "#74d055", "#3cbc75", "#20a386", "#238a8d", "#2d708e", "#39558c", "#453781", "#481568", ] marker_color_dict = dict(zip( top_ids_to_track, color_list, )) # ######################################################### from active_learning.al_analysis import ALAnimation # color_custom_points = False # color_custom_points = True ALAnim = ALAnimation( ALBulkOpt=AL, marker_color_dict=marker_color_dict, verbose=True, # color_custom_points=False, color_custom_points=color_custom_points, ) # ######################################################### traces_list = [] traces_list_tracking = [] num_dft_list = [] # top_ten_tracking_dict = dict() for gen_i in gens_to_plot: print(gen_i) if gen_i < 0: gen_i = list(al_gen_dict.keys())[gen_i] AL_i = al_gen_dict[gen_i] model_i = AL_i.model tmp = model_i[model_i.acquired == True].shape num_dft_list.append(tmp[0]) num_systems_0 = AL_i.model.shape[0] num_dft_i = model_i[model_i["acquired"] == True].shape[0] print("num_dft_i:", num_dft_i) trace_i, other_data_dict = ALAnim.get_trace_j( AL_i, prediction_key="y", uncertainty_key="err", plot_dft_instead_of_pred=True, plot_validation_dft=False, # trace_all_dft=True, trace_horiz_lines=False, internally_order_df=True, dft_calc_al_gen_text_overlay=False, add_vertical_track_lines=True, just_traces=False, ) traces_list.append(trace_i) # ######################################################################### # ######################################################################### model__tracked = other_data_dict["model__tracked"] gen_traces_i = [] for i_ind, row_i in model__tracked.iterrows(): Y_main = row_i["Y_main"] x_ind = row_i["x_axis_ind"] acquired = row_i["acquired"] if acquired: color = "red" y = [4.7, 6] width = 0.8 else: color = "grey" y = [5., 6] width = 0.5 trace_i = go.Scatter( mode="lines", x=[x_ind, x_ind], y=y, name="top_ten", line=dict( width=width, color=color, ), ) # data.append(trace_i) # traces_list_tracking.append(trace_i) gen_traces_i.append(trace_i) traces_list_tracking.append(gen_traces_i) # # %%capture # ######################################################### a = 1 / len(traces_list) x = 0.1 y = a + x z = a - x / 4 column_widths = [z, z, y, z, z] # print("column_widths:", column_widths) fig = make_subplots( rows=1, cols=len(traces_list), column_widths=column_widths, shared_yaxes=True, horizontal_spacing=0.01) for i_ind, traces_i in enumerate(traces_list): for trace_i in traces_i: fig.add_trace(trace_i, row=1, col=i_ind + 1) if stoich_i == "AB3": # range_y = [-3.184, 5.529] range_y = [-0.8, 1.5] elif stoich_i == "AB2": range_y = None layout_override = dict( # height=200, # width=650, height=5.291667 * 37.795275591, # width=17.5 * 37.795275591, width=17.7 * 37.795275591, margin=go.layout.Margin( b=0, l=10, r=5, t=5), xaxis=dict( range=[-20, num_systems_0 + 10], showticklabels=False, # ticks="", ticks=None, ), yaxis=dict( range=range_y, mirror=True, showticklabels=False, # ticks="", ticks=None, ), ) layout_base_cpy = copy.deepcopy(layout_base) layout = layout_base_cpy.update(layout_override) fig.update_layout(layout) fig.update_xaxes(layout.xaxis) fig.update_yaxes(layout.yaxis) # ############################################################################# # fig.update_xaxes( linecolor="red", row=1, col=3) fig.update_yaxes( linecolor="red", row=1, col=3) # Update first subplot to have tick props fig.update_yaxes( showticklabels=True, ticks="outside", dtick=0.5, row=1, col=1) # ######################################################### fig_al_series = copy.deepcopy(fig) my_plotly_plot( figure=fig, plot_name=stoich_i + "_" + "al_5_gens_in_row", write_html=True, # write_png=True, # png_scale=10, # write_pdf=True, ) fig.layout.update(paper_bgcolor="white") # fig.show() # ######################################################### figs_dict = { # "fig_inset": fig_inset, # "fig_main": fig_main, "fig_al_series": fig_al_series, # "fig_al_series_top10_marked": fig_al_series_top10_marked, "traces_tracking": traces_list_tracking, "num_dft_list": num_dft_list, } # Pickling data ###################################################### directory = "out_data" if not os.path.exists(directory): os.makedirs(directory) # with open(os.path.join(directory, stoich_i + "_" + + "figs_dict__v2.pickle"), "wb") as fle: with open(os.path.join(directory, stoich_i + "_" + str(color_custom_points) + "_" + "figs_dict__v2.pickle"), "wb") as fle: pickle.dump(figs_dict, fle) # ##################################################################### # - print(20 * "# # ") print("All done!") assert False
8,755
/main.ipynb
397a7ccefe60383308cfb6b5ea85b3bf773dc10f
[]
no_license
CerJesus/ME233-Final-Project
https://github.com/CerJesus/ME233-Final-Project
0
1
null
null
null
null
Jupyter Notebook
false
false
.py
30,760
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1>ME 233 Final Project: How to house</h1> # <h3>By Jesus Cervantes, Nicholas Robles, Joanna Liu, Isaiah Drummond </h3> # # This project explores how different ways of assigning incoming students to dorms might impact the spread of COVID-19 on campus udner different quarantine scenarios. # # Package Imports # Put all package imports in here. # + import numpy as np import pandas as pd # get_results_by_dorm class code imports: from matplotlib import ticker import os import scipy.io import seaborn as sns import matplotlib.pyplot as plt # - # # Global Variables # + #Data download filename STUDENT_DATA_FILENAME = "student_state_merged.csv" #Dorm assignment schemes ASSIGN_UNIFORM = "UNIFORM" ASSIGN_BY_RISK = "RISK" #Interaction schemes INTERACT_UNIFORM = "UNIFORM" # - # # Downloading State Data # Here, we import state-based data including the number of students coming from each state and information about the virus in that state. We then return the merged data file that has the total number of students for the class years of interest and the relevant state-wide data. def read_data_from_file(student_filename, yrs): # student_filename: string, filename of the table with data about where students are coming from # yrs: int, col indices for class years to consider for moving in # state_filename: string, filename of the table with init conditions (S_0, E_0, etc.) by state # Add the data table (.csv, .xlsx, etc.) into the git repo, then read it in here # Do whatever cleaning up needs to be done and return the table data = pd.read_csv(student_filename) # Create data table using information from .csv student_data = pd.DataFrame(np.zeros((55,8)), columns=["State","Population","S_0","E_0","I_0","R_0","Rt","beta"]) student_data.State = data.State student_data.Population = data[years_to_consider].sum(axis=1) student_data.S_0 = data.S_0 student_data.E_0 = data.E_0 student_data.I_0 = data.I_0 student_data.R_0 = data.R_0 student_data.Rt = data.R_t student_data.beta = data.beta return student_data # + years_to_consider = ["UG1", "UG2"] #Subject to change pending what the state data looks like state_based_data = read_data_from_file(STUDENT_DATA_FILENAME, years_to_consider) # - # # Make dorms # # Here, we take our state data and make our dorms out of it. For each dorm, we assign a subset of the student population to be in that dorm. Once we have assigned the students to the dorm, we get a weighted average based on where students came from to get one single risk measure for that dorm (probably based on positivity rate of the state). A major part of our experiment is how we do the assignment, our control is assuming a uniform distribution throughout each dorm. def get_dorm_assignments(n_dorms, pop, state_data, assign, dorm_pops=None): # n_dorms: int, number of dorms # pop: int, total student population # state_data: np array or pd dataframe, information by state w # students from state + state pos. rate # assign: string, determines what kind of assignment mechanism to use # dorm_pops (Optional): dict mapping dorms to # students in that dorm, not using for now but in case we want to make dorms unique # RETURNS: dorms, pd dataframe, each row is a dorm, the columns in order are: # students, S_0, E_0, I_0, R_0, Rt, beta # Each elem in dorms is a float that is the weighted risk of that dorm dorms = pd.DataFrame(np.zeros((n_dorms,7)), columns=["Students", "S_0", "E_0","I_0", "R_0", "Rt", "beta"]) dorm_pop = pop / n_dorms # For the base case, we assign students uniformly to each dorm. Each dorm has same initial conditions # for SEIR model. if assign == ASSIGN_UNIFORM: dorms.Students = dorm_pop dorms.S_0 = sum(state_data.S_0 * state_data.Population) / pop dorms.E_0 = sum(state_data.E_0 * state_data.Population) / pop dorms.I_0 = sum(state_data.I_0 * state_data.Population) / pop dorms.R_0 = sum(state_data.R_0 * state_data.Population) / pop dorms.Rt = sum(state_data.Rt * state_data.Population) / pop dorms.beta = sum(state_data.beta * state_data.Population) / pop print("We are using uniform distribution") # For the alternative case, we order states by risk level and then assign equal number of students to each dorm. # However, we assign students based on risk level of state (i.e., students from high risk states will live in # same dorm whereas students from states with lower cases will live in the same space). elif assign == ASSIGN_BY_RISK: state_data.sort_values("beta") # Loop through all states in state data. Fill up each dorm with students and state demographic information # until we hit the correct population of students. Then start filling up next dorm until we hit the limit. current_pop = 0 i = 0 dorm_S0 = np.zeros((n_dorms,1)) dorm_E0 = np.zeros((n_dorms,1)) dorm_I0 = np.zeros((n_dorms,1)) dorm_R0 = np.zeros((n_dorms,1)) dorm_Rt = np.zeros((n_dorms,1)) dorm_beta = np.zeros((n_dorms,1)) for State in range(len(state_based_data["State"])): if current_pop + state_data.Population[State] <= dorm_pop: current_pop = current_pop + state_data.Population[State] dorm_S0[i] = dorm_S0[i] + (state_data.S_0[State] * state_data.Population[State]) / pop dorm_E0[i] = dorm_E0[i] + (state_data.E_0[State] * state_data.Population[State]) / pop dorm_I0[i] = dorm_I0[i] + (state_data.I_0[State] * state_data.Population[State]) / pop dorm_R0[i] = dorm_R0[i] + (state_data.R_0[State] * state_data.Population[State]) / pop dorm_Rt[i] = dorm_Rt[i] + (state_data.Rt[State] * state_data.Population[State]) / pop dorm_beta[i] = dorm_beta[i] + (state_data.beta[State] * state_data.Population[State]) / pop else: current_pop = current_pop + (dorm_pop - state_data.Population[State]) dorm_S0[i] = dorm_S0[i] + (state_data.S_0[State] * (dorm_pop - state_data.Population[State])) / pop dorm_E0[i] = dorm_E0[i] + (state_data.E_0[State] * (dorm_pop - state_data.Population[State])) / pop dorm_I0[i] = dorm_I0[i] + (state_data.I_0[State] * (dorm_pop - state_data.Population[State])) / pop dorm_R0[i] = dorm_R0[i] + (state_data.R_0[State] * (dorm_pop - state_data.Population[State])) / pop dorm_Rt[i] = dorm_Rt[i] + (state_data.Rt[State] * (dorm_pop - state_data.Population[State])) / pop dorm_beta[i] = dorm_beta[i] + (state_data.beta[State] * (dorm_pop - state_data.Population[State])) / pop current_pop = 0 i = i+1 # Using results from for loop, assign weighted SEIR information to the 10 dorms dorms.Students = dorm_pop dorms.S_0 = dorm_S0 dorms.E_0 = dorm_E0 dorms.I_0 = dorm_I0 dorms.R_0 = dorm_R0 dorms.Rt = dorm_Rt dorms.beta = dorm_beta print("We are using risk bucketing for dorm assignments") #feel free to add more assignment schemes! just make sure to add a global variable for the string return dorms # + # Input number of dorms that we want to put students in. Set total students to be sum of population of incoming students. n_dorms = 10 total_students = state_based_data.Population.sum(axis=0) # Run base case: students are uniformly assigned to dorms without regard for home state. dorms_uniform = get_dorm_assignments(n_dorms, total_students, state_based_data, ASSIGN_UNIFORM) # - #Repeat for other assignment types dorms_by_risk = get_dorm_assignments(n_dorms, total_students, state_based_data, ASSIGN_BY_RISK) # # Dorm Interactions # Now that we have what we need to know for each individual dorm, we get the dorm interaction table. This table has a row for each dorm and includes its initial conditions as before (S_0, E_0, I_0, R_0, Rt, beta) as well as how that dorm interacts with others. Stretch goal is to have one such table for each dorm and the interaction column is going to be how many people from each dorm visit the dorm who the table is for. For now, though, we will assume uniform interaction among dorms. # + def get_interaction_tables(dorms, interact_num, interact_type): # dorms: pd dataframe, table of dorms and their initial conditions # interact_num: int, num interacting residents # interact_type: string, scheme of interaction, currently just uniform if interact_type == INTERACT_UNIFORM: interact_col = np.ones(n_dorms)*interact_num interact_df = dorms interact_df["Incoming Passenger"] = interact_col return interact_df return None # + # students leaving each dorm, should maybe make function of dorm population interact_num = 10 interact_table_control = get_interaction_tables(dorms_uniform, interact_num, INTERACT_UNIFORM) interact_table_by_risk = get_interaction_tables(dorms_by_risk, interact_num, INTERACT_UNIFORM) # - # # GET RESULTS BY DORM # # So, now that we have our interaction table, we apply it to each of our dorms to get the SEIR model for each dorm. This is done by pretty much just for looping through the networking code they gave us for each of the dorm systems we have. # Put in any helpers here for making the networking code happen # + def get_Results_By_Dorm(dorm_interactions): # dorm_interactions: pd dataframe, table of pop, initial conditions, and interaction for each dorm # Returns: results, pd dataframe or np array, each row is dorm, time series of confirmed cases # Can return multiple tables if there are other time series we wanna measure (i.e SEIR) #idk if there's preliminary stuff we wanna do here results = {"0": [], "50":[], "75":[], "95":[]} # Class Network Code Starts: # Incoming Students Data - incoming passenger in the original code for class ################################### Need to edit the 'Incoming Passenger' title for the column in our data sheet A_all = df_summ['Incoming Passenger'].to_numpy() #calculate the incoming infectious as I_0*Incoming_passenger df_summ['Incoming Infectious']= df_summ['I_0'].mul(df_summ['Incoming Passenger']) # contact rates fb = df_summ['beta'].to_numpy() # Number of locations nodes = len(A_all) #%% ################### Settings ############################################## dt = 0.01 Tmax = 100 isteps = int(Tmax/dt) # Flag for apply quarantine to states with incoming infectious passengers higher than mean numbers only Flag_Iso = False Af = 2.56430 Cf = 6.5 fa = 1/Af fc = 1/Cf ## Here you can change the contact-rate for forecasting for our region of interest (Assumption) ## if you want to make forward predictions for different scenarios of our local reproduction number #R0_fin = 1.35 #fb[idx] = R0_fin*fc #for each dorm for d in range(len(dorm_interactions.index)): #%% ############## initialize model parameters and seed ###################### # Define output shape of the SEIR model Sout= np.empty((isteps,nodes)) Eout= np.empty((isteps,nodes)) Iout= np.empty((isteps,nodes)) Rout= np.empty((isteps,nodes)) # Clear incoming passengers for location of consideration A_all[d] = 0 # Percent of quarantine confirmed = forecasting(0,A_all,fb) confirmed_50 = forecasting(0.5,A_all,fb) confirmed_75 = forecasting(0.75,A_all,fb) confirmed_95 = forecasting(0.95,A_all,fb) results["0"].append(confirmed) results["50"].append(confirmed_50) results["75"].append(confirmed_75) results["95"].append(confirmed_95) #%% ################### Plot results ####################################### t = np.arange(0,Tmax, dt) days = np.arange(0,Tmax, 1) skip = int(1/dt) # color palette Color_p = sns.color_palette("RdBu", 6) fig, ax1 = plt.subplots(figsize=(500/72,300/72)) ax1.plot(days, confirmed[::skip],color = Color_p[0],lw=3,zorder=1, label=r'prediction 0% quarantine') # ax1.plot(days, confirmed_50[::skip],color = Color_p[2],lw=3,zorder=1, label=r'prediction 50% quarantine') # ax1.plot(days, confirmed_75[::skip],color = Color_p[4],lw=3,zorder=1, label=r'prediction 75% quarantine') # ax1.plot(days, confirmed_95[::skip],color = Color_p[5],lw=3,zorder=1, label=r'prediction 95% quarantine') # leg = plt.legend(loc='upper left',fontsize='large',frameon=True) leg.get_frame().set_linewidth(0.0) ax1.xaxis.set_tick_params(labelsize=14) ax1.yaxis.set_tick_params(labelsize=14) plt.ylabel('confirmed cases [-]',fontsize=14) plt.xlabel('time [days]',fontsize=14) plt.title('confirmed cases in location of interest') plt.tight_layout() # DO STUFF FOR THIS DORM, LOOK AT THE NETWORKING CODE, this bit gonna be thick and probably require a lot of decomposition # Feel free to add a cell above this one and make helper functions in there to stay organized (i.e plot_confirmed_cases()) print("analyzing results for dorm d") return results #%% ################## define SEIR forecasting function ############################ def forecasting(quarantine_scale,A_all,fb): Sarr = df_summ['S_0'].to_numpy() Earr = df_summ['E_0'].to_numpy() Iarr = df_summ['I_0'].to_numpy() Rarr = df_summ['E_0'].to_numpy() Daily_normalization = 365 # In this case passenger data are accumulated from 1 year #apply quarantine to states with incoming infectious passengers higher than mean numbers mean_incoming_infectious= df_summ['Incoming Infectious'].mean(axis=0) q_states = df_summ['Incoming Infectious']>mean_incoming_infectious # q_scaled_states = np.zeros_like(A_all) if Flag_Iso: q_scaled_states[q_states] = quarantine_scale else: q_scaled_states = quarantine_scale* np.ones_like(A_all) # A_all_scaled = ((1-q_scaled_states) * A_all)/Daily_normalization A_all_pop = A_all/Daily_normalization pop_daily_add = np.sum(A_all_pop) # Population of location of consideration pop = df_summ['Population'].to_numpy()[idx] pop_new = pop # Define an array to add SEIR values only to location of consideration Scaffold = np.zeros_like(Rarr) Scaffold[idx] = 1.0 for s in range(0,isteps): # Summing up incoming passenger categories # Sum of all daily incoming passengers is added to the total population in our region of interest S_add = np.dot(Sarr,A_all_scaled) * Scaffold/(pop_new) E_add = np.dot(Earr,A_all_scaled) * Scaffold/(pop_new) I_add = np.dot(Iarr,A_all_scaled) * Scaffold/(pop_new) R_add = np.dot(Rarr,A_all_scaled) * Scaffold/(pop_new) # Calculating the SEIR model, while adding daily passenger influx SarrNew = Sarr + (S_add - fb * Sarr * Iarr)* dt EarrNew = Earr + (E_add + (fb * Sarr * Iarr) - fa*Earr )* dt IarrNew = Iarr + (I_add + fa * Earr - fc * Iarr)* dt RarrNew = Rarr + (R_add + fc * Iarr )* dt Sarr, Earr, Iarr, Rarr = SarrNew, EarrNew, IarrNew, RarrNew Sout[s,:] = Sarr Eout[s,:] = Earr Iout[s,:] = Iarr Rout[s,:] = Rarr pop_new = pop_new + pop_daily_add*dt confirmed = pop_new*Iout[:,idx] + pop_new*Rout[:,idx] return confirmed # + dorm_results_control = get_Results_By_Dorm(interact_table_control) dorm_results_by_risk = get_Results_By_dorm(interact_table_by_risk) # - # # Aggregating Results # Here, we gather up all the dorm-specific data we have to get university-wide results for each assignment mechanism. def aggregate_results(dorm_results): # dorm_results: pd dataframe or np array, each row is time series of data for a dorm #Return: single time series that is weighted average of the dorm_results #Do stuff that should basically amount to getting weighted average of all the dorms #For each row, multiply the whole row by the pop of that dorm ####TODO#### #Sum all the rows together confirmed_total = np.mean(np.array(dorm_results["0"]), axis=0) confirmed_total_50 = np.mean(np.array(dorm_results["50"]), axis=0) confirmed_total_75 = np.mean(np.array(dorm_results["75"]), axis=0) confirmed_total_95 = np.mean(np.array(dorm_results["95"]), axis=0) #%% ################### Plot results ####################################### t = np.arange(0,Tmax, dt) days = np.arange(0,Tmax, 1) skip = int(1/dt) # color palette Color_p = sns.color_palette("RdBu", 6) fig, ax1 = plt.subplots(figsize=(500/72,300/72)) ax1.plot(days, confirmed_total[::skip],color = Color_p[0],lw=3,zorder=1, label=r'prediction 0% quarantine') # ax1.plot(days, confirmed_total_50[::skip],color = Color_p[2],lw=3,zorder=1, label=r'prediction 50% quarantine') # ax1.plot(days, confirmed_total_75[::skip],color = Color_p[4],lw=3,zorder=1, label=r'prediction 75% quarantine') # ax1.plot(days, confirmed_total_95[::skip],color = Color_p[5],lw=3,zorder=1, label=r'prediction 95% quarantine') # leg = plt.legend(loc='upper left',fontsize='large',frameon=True) leg.get_frame().set_linewidth(0.0) ax1.xaxis.set_tick_params(labelsize=14) ax1.yaxis.set_tick_params(labelsize=14) plt.ylabel('confirmed cases [-]',fontsize=14) plt.xlabel('time [days]',fontsize=14) plt.title('Confirmed Cases in Entire Campus') plt.tight_layout() #Divide by total pop, we now have weighted average return aggregated_results/total_students # + control_agg = aggregate_results(dorm_results_control) by_risk_agg = aggregate_results(dorm_results_by_risk) # - # # Aggregated Analysis and Plots # If there's any plots and stuff we wanna make with the aggregated results, put that here. # + state_based_data = state_based_data.sort("beta") pop = state_based_data.Population.sum(axis=0) dorm_pop = pop / 10 current_pop = 0 i = 0 dorm_S0 = np.zeros((n_dorms,1)) dorm_E0 = np.zeros((n_dorms,1)) dorm_I0 = np.zeros((n_dorms,1)) dorm_R0 = np.zeros((n_dorms,1)) dorm_Rt = np.zeros((n_dorms,1)) dorm_beta = np.zeros((n_dorms,1)) for State in sorted(range(len(state_based_data.sort_values("beta")))): if current_pop + state_based_data.Population[State] <= dorm_pop: current_pop = current_pop + state_based_data.Population[State] dorm_S0[i] = dorm_S0[i] + (state_based_data.S_0[State] * state_based_data.Population[State]) / pop dorm_E0[i] = dorm_E0[i] + (state_based_data.E_0[State] * state_based_data.Population[State]) / pop dorm_I0[i] = dorm_I0[i] + (state_based_data.I_0[State] * state_based_data.Population[State]) / pop dorm_R0[i] = dorm_R0[i] + (state_based_data.R_0[State] * state_based_data.Population[State]) / pop dorm_Rt[i] = dorm_Rt[i] + (state_based_data.Rt[State] * state_based_data.Population[State]) / pop dorm_beta[i] = dorm_beta[i] + (state_based_data.beta[State] * state_based_data.Population[State]) / pop else: current_pop = current_pop + (dorm_pop - state_based_data.Population[State]) dorm_S0[i] = dorm_S0[i] + (state_based_data.S_0[State] * (dorm_pop - state_based_data.Population[State])) / pop dorm_E0[i] = dorm_E0[i] + (state_based_data.E_0[State] * (dorm_pop - state_based_data.Population[State])) / pop dorm_I0[i] = dorm_I0[i] + (state_based_data.I_0[State] * (dorm_pop - state_based_data.Population[State])) / pop dorm_R0[i] = dorm_R0[i] + (state_based_data.R_0[State] * (dorm_pop - state_based_data.Population[State])) / pop dorm_Rt[i] = dorm_Rt[i] + (state_based_data.Rt[State] * (dorm_pop - state_based_data.Population[State])) / pop dorm_beta[i] = dorm_beta[i] + (state_based_data.beta[State] * (dorm_pop - state_based_data.Population[State])) / pop current_pop = 0 i = i+1 # -
20,890
/SAFER HBM/Rib strains/Rib_19/01_Lasso.ipynb
8b5e1ffa8b43c20ff6ad167c12f7e87e3a9eb1c7
[]
no_license
yash-n-p/FE_HBM_ML
https://github.com/yash-n-p/FE_HBM_ML
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
278,077
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ## 一、编写爬虫程序过程中遇到的问题及解决方法: # # ### 1. 问题:固定休眠时间触发反爬 # # ### 解决方法:最初以为西安二手房成交房源信息高达近6万套,爬取数据会需要很长时间,故计划只采集未央区的1万多套房源。却发现即使数据并不太多的情况下,若休眠时间固定,当程序只剩余后面的几千条数据时,也会因为触发反爬机制而被中断。故修改休眠时间为随机而非固定,成功解决问题。 # # ### 2. 问题:仅爬取未央区的1万多套房源,还是整个西安市的近6万套? # # ### 解决方法:在第1个问题的解决过程中意外发现,只要休眠时间随机,即使休眠时间很短,如只有十几秒,依然不会触发反爬。因此决定爬取西安市的近6万套房源,并设定随机休眠时间为1-2分钟。 # # ### 3. 问题:总价和单价的正则表达式总是无法匹配到目标数据 # # ### 解决方法:不断修改正则表达式,尽量比较短,以及减少换行造成的匹配失败。 # # ### 4. 问题:总价和单价的正则表达式在最简和能匹配上时完全相同,数据写入时难以识别二者区别 # # ### 解决方法:使用索引定位 # # totalprice = re.findall(findTotalPrice, item)[0] # unitprice = re.findall(findUnitPrice, item)[1] # # ### 5. 问题:由于少量房源下架,导致成交总价缺失,信息匹配不到,而使得第4个问题中的索引定位超出范围,程序无法运行 # ### 解决方法:使用try-except进行异常处理,重排添加数据的顺序 # # try: # 有个别房源因为下架,没有总价数据,使用try-except进行异常处理 # totalprice = re.findall(findTotalPrice, item)[0] # unitprice = re.findall(findUnitPrice, item)[1] # data.append(totalprice) # 添加成交总价 # data.append(unitprice) #添加成交单价 # except IndexError: # if int(re.findall(findTotalPrice, item)[0]) < 1000: # data.append(re.findall(findTotalPrice, item)) # data.append("0") # else: # data.append("0") # data.append(re.findall(findTotalPrice, item)) # # ### 6. 问题:因爬取数据多,且区域分布非常不均匀,而每组数据最多只显示30页,即3000条,分更小区域爬取会造成url复杂无规律 # # ### 解决方法:按照价格自定义区间设定爬取数据组,分成24组,每组2500条左右数据,这样baseurl和newurl都好设置。 # # # 爬虫代码: # + from bs4 import BeautifulSoup # 网页解析,获取数据 import re # 正则表达式,进行文字匹配 import urllib.request,urllib.error # 制定url,获取网页数据 import xlwt # 进行excel操作 import random import time def main(a): baseurl = "https://xa.ke.com/chengjiao/" # 1.爬取网页 datalist = getdata(baseurl) # 3.保存数据 savepath = "西安二手房成交信息.xls" savedata(datalist, savepath) # 1房源名称 findId = re.compile(r'target="_blank">(.*?)</a>') # 2房源链接 findLink = re.compile(r'href="(.*?)"') # 3房源朝向+装修 findInofIcon = re.compile(r'<span class="houseIcon"></span>(.*?)</div>', re.S) # 4房源成交单价 findUnitPrice = re.compile(r'<span class="number">(\d*)</span>') # 5房源成交总价 findTotalPrice = re.compile(r'<span class="number">(\d*)</span>') # 6房源成交周期 findCycle = re.compile(r'<span>成交周期(\d*)天</span>') # 7挂牌价 findListPrice = re.compile(r'<span>挂牌(.*?)万</span>') # 8房源楼层 findLevel = re.compile(r'<span class="positionIcon"></span>(.*?)</div>', re.S) # 9房源满几 findYears = re.compile(r'<span>房屋满(.*?)年</span>') # 10房源位置 findPosition = re.compile(r'<span>(距.*?)</span>') # 11成交日期 findDealDate = re.compile(r'<div class="dealDate">(.*?)</div>', re.S) def getdata(baseurl): datalist = [] priceRanges = { "p1": 61, # 40万以下的房源 "bp40ep50": 52, # 40-50万的房源,以下为相应价格区间 "bp50ep60": 91, "bp60ep65": 59, "bp65ep71": 73, "bp71ep79": 92, "bp79ep87": 94, "bp87ep95": 92, "bp95ep102": 89, "bp102ep108": 91, "bp108ep114": 90, "bp114ep119": 87, "bp119ep124": 85, "bp124ep130": 94, "bp130ep136": 88, "bp136ep143": 88, "bp143ep150": 74, "bp150ep157": 68, "bp157ep165": 63, "bp165ep180": 95, "bp180ep199": 84, "bp199ep230": 88, "bp230ep300": 89, "p8": 69, # 300万以上的房源 } for price, pg_sum in priceRanges.items(): print(price,"总共%d页"%pg_sum) for i in range(1, pg_sum+1): url = baseurl + "pg" + str(i) + price + "/" # print(i, url) html = askurl(url) # 获取网页源码 # 2.逐一解析数据 soup = BeautifulSoup(html, "html.parser") for item in soup.find_all('div', class_="info", limit=30): # 查找符合要求的所有字符串,形成列表 # print(item) # 测试查看一套房源item全部信息 data = [] # data列表用于保存一套房源的所有信息 item = str(item) # 保存一套房源的详细信息 id = re.findall(findId, item) data.append(id) # 1添加房源Id link = re.findall(findLink, item) data.append(link) # 2添加房源链接 orientation = re.findall(findInofIcon, item) data.append(orientation) # 3添加房源朝向和装修情况 try: # 有个别房源因为下架,没有总价数据,使用try-except进行异常处理 totalprice = re.findall(findTotalPrice, item)[0] unitprice = re.findall(findUnitPrice, item)[1] data.append(totalprice) # 添加成交总价 data.append(unitprice) #添加成交单价 except IndexError: if int(re.findall(findTotalPrice, item)[0]) < 1000: data.append(re.findall(findTotalPrice, item)) data.append("0") else: data.append("0") data.append(re.findall(findTotalPrice, item)) cycle = re.findall(findCycle, item) data.append(cycle) # 6添加房源成交周期 listprice = re.findall(findListPrice, item) data.append(listprice) # 7添加房源挂牌价 level = re.findall(findLevel, item) data.append(level) # 8添加房源楼层 years = re.findall(findYears, item) if years != 0: data.append(years) # 9添加房屋满几年信息 else: data.append(" ") # 9若没有房屋满几年信息,留空 position = re.findall(findPosition, item) if position != 0: data.append(position) # 10添加房屋位置信息 else: data.append(" ") # 10若没有房屋位置信息,留空 dealdate = re.findall(findDealDate, item) data.append(dealdate) # 11添加交易日期 # print(data) datalist.append(data) # 把处理好的套房源信息存于datalist中 print("已爬取%d条房源数据"%len(datalist)) sleeptime = (random.randint(60, 120)) print("睡眠", sleeptime/60, "分钟...............") time.sleep(sleeptime) # 每爬取2500左右的一组数据,随机睡眠1-2两分钟 return datalist def askurl(url): User_Agent= [ "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; InfoPath.2; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; 360SE) ", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)", "Mozilla/5.0 (Windows NT 5.1; zh-CN; rv:1.9.1.3) Gecko/20100101 Firefox/8.0", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)", "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; TencentTraveler 4.0; .NET CLR 2.0.50727)", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36" ] headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", "Accept - Encoding": "gzip, deflate, br", "Accept - Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6", "Cache - Control": "max-age=0", "Connection": "keep-alive", "Cookie": "lianjia_uuid=b64378bb-79f5-4521-b44f-7eeb4d677191; select_city=610100; Hm_lvt_9152f8221cb6243a53c83b956842be8a=1610088406,1611116495,1611142021,1611219120; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22176e0bf8ee9b5-0bd23fc8f76b1a-5a301e44-921600-176e0bf8eea3f8%22%2C%22%24device_id%22%3A%22176e0bf8ee9b5-0bd23fc8f76b1a-5a301e44-921600-176e0bf8eea3f8%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_utm_source%22%3A%22baidu%22%2C%22%24latest_utm_medium%22%3A%22pinzhuan%22%2C%22%24latest_utm_campaign%22%3A%22wyxian%22%2C%22%24latest_utm_content%22%3A%22biaotimiaoshu%22%2C%22%24latest_utm_term%22%3A%22biaoti%22%7D%7D; Hm_lpvt_9152f8221cb6243a53c83b956842be8a=1611220132; srcid=eyJ0Ijoie1wiZGF0YVwiOlwiNWM1OWE0NjRmMjc0MzhjZDEzNTViYTdmZDc4NGFkNWRmZDkyMTI4ZTdhOWMzNGJjMWIyYmMwNzc5OTAzMjYxNWY4ZDQ3ZjkyOTVmZjQ1MGRkY2EzZTkyMzhiMTZhNzM3NzkwYmJhZmM0Yjg5MzA4NjJkMDc3ZWU2ZTE0MDlhMmUwZjBkMzQwODk5MWRmODk2NDcwMTYwNTNlMjdjM2I2ODJmYjA5YTQxNjQyZWJlMWM0NzE3MzRjMGJmM2UwZWNiODcwNTZiYjIxODEzNjc4NmYxNWU4ZDYzZDBhN2FkYWI4MGRjNDg3MTM1NmM4YzI1MWM0MzNlNzg2YTBhYmNlYmJjMmVlMzUzYTg4ODhmMWMyM2VjYThiOTQ0NWYxMjMzYWE2NmRkYzY2YWQzYTIxYzg0NzExOWQzYjY4YTgxNDcyZDU0N2NjZGY4ZWNjNDBiYzNiNDY3NjBhMTAyNzc4NVwiLFwia2V5X2lkXCI6XCIxXCIsXCJzaWduXCI6XCI5NTcyN2Y4NVwifSIsInIiOiJodHRwczovL3hhLmtlLmNvbS9jaGVuZ2ppYW8vd2VpeWFuZy9wOC8iLCJvcyI6IndlYiIsInYiOiIwLjEifQ==", "Host": "xa.ke.com", "Referer": "https://xa.ke.com/chengjiao/", "Sec - Fetch - Dest": "document", "Sec - Fetch - Mode": "navigate", "Sec - Fetch - Site": "same - origin", "Sec - Fetch - User": "?1", "Upgrade - Insecure - Requests": "1", "User - Agent": random.choice(User_Agent) # 随机选用列表待选的user-Agent,伪装成从不同浏览器访问 } request = urllib.request.Request(url, headers=headers) html = "" try: response = urllib.request.urlopen(request) html = response.read().decode("utf-8") except urllib.error.URLError as e: if hasattr(e,"code"): print(e.code) if hasattr(e,"reason"): print(e.reason) return html # 3.保存数据 def savedata(datalist, savepath): print("saving......") book = xlwt.Workbook(encoding="utf-8", style_compression=8) #创建workbook对象 sheet = book.add_sheet("西安市未央区二手房成交信息", cell_overwrite_ok=True) #创建工作表 col = ("小区 户型 面积", "房源链接", "朝向 装修情况", "成交总价", "成交单价" , "成交周期", "挂牌价", "楼层", "满二满五情况", "交通便利度", "交易日期") for i in range(0, 11): sheet.write(0, i, col[i]) # 列名 for i in range(0, len(datalist)): # print("第%d条" % (i+1)) data = datalist[i] for j in range(0, 11): sheet.write(i+1, j, data[j]) # 数据 for j in range(0, 11): sheet.write(i+1, j, data[j]) book.save(savepath) # 保存数据 if __name__ == '__main__': # 调用函数 main(2) print("爬取完毕") # - # ## 二、数据可视化 # ## 1. 爬取的房源数据部分展示: # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl import pandas as pd data = pd.read_excel("xianershoufang.xlsx", index_col='id') print(data.head()) # 展示excel文件的头部信息 print(data.tail()) # 展示excel文件的尾部信息 # - # ## 2. 西安二手房成交信息中热词词云和西安市最受欢迎小区词云: # + from os import path from PIL import Image import numpy as np import matplotlib.pyplot as plt import jieba from wordcloud import WordCloud, STOPWORDS # 读取整个文件 file1 = open(path.join('西安二手房成交信息.txt')).read() # 西安二手房成交信息表中的文字text file2 = open(path.join('西安小区名.txt')).read() # 西安二手房成交信息表的小区名称text ##进行分词 default_mode1 =jieba.cut(file1) default_mode2 =jieba.cut(file2) text1 = " ".join(default_mode1) text2 = " ".join(default_mode2) alice_mask = np.array(Image.open(path.join("房子.jpg"))) wc1 = WordCloud( #设置字体,不指定就会出现乱码,这个字体文件需要下载 font_path=r'fonts/simhei.ttf', scale=10, background_color="white", max_words=2000, mask=alice_mask,) wc2 = WordCloud( #设置字体,不指定就会出现乱码,这个字体文件需要下载 font_path=r'fonts/simhei.ttf', scale=10, background_color="white", max_words=2000, mask=alice_mask,) # generate word cloud wc1.generate(text1) wc2.generate(text2) # store to file wc1.to_file(path.join("西安二手房词云.jpg")) wc2.to_file(path.join("西安最受欢迎小区词云.jpg")) # show plt.imshow(wc1, interpolation='bilinear') plt.axis("off") plt.figure() plt.imshow(wc2, interpolation='bilinear') plt.axis("off") plt.figure() # plt.imshow(alice_mask, cmap=plt.cm.gray, interpolation='bilinear') # plt.axis("off") # plt.show() # - # ## 3. 西安市二手房户型分布比例、成交单价排名top20、大户型小区排名top20: # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['font.sans-serif'] = ['Microsoft YaHei'] data = pd.read_excel("xianershoufang.xlsx", index_col='id') # print(data.head()) #输出西安市成交房源数最多的小区和最少的小区 print('西安市二手房成交数量最多和最少小区:') print(data['小区'].value_counts()) # 绘制户型占比饼图 data["户型"].value_counts().plot(kind="pie", autopct="%1.2f%%", explode=np.linspace(0,2.5,32)) plt.show() # 西安二手房成交单价最高Top20柱状图 unitprice_top = data.sort_values(by="成交单价(元/平)",ascending=False)[:20] unitprice_top = unitprice_top.sort_values(by="成交单价(元/平)") unitprice_top.set_index(unitprice_top["小区"],inplace=True) unitprice_top.index.name = "" fig = plt.figure(figsize=(12,7)) ax = fig.add_subplot(111) ax.set_ylabel("单价(元/平米)",fontsize=14) ax.set_title("西安二手房成交单价最高Top20",fontsize=18) unitprice_top["成交单价(元/平)"].plot(kind="barh",fontsize=12) plt.show() # 成交的大面积户型小区Top20 unitprice_top = data.sort_values(by="面积(平米)",ascending=False)[:20] unitprice_top = unitprice_top.sort_values(by="面积(平米)") unitprice_top.set_index(unitprice_top["小区"],inplace=True) unitprice_top.index.name = "" fig = plt.figure(figsize=(12,7)) ax = fig.add_subplot(111) ax.set_ylabel("面积(平米)",fontsize=14) ax.set_title("成交的大面积户型小区Top20",fontsize=18) unitprice_top["面积(平米)"].plot(kind="barh",fontsize=12) plt.show()
14,041
/research_development/Prashant/Preprocessing.ipynb
6fc4c94e24712ba28629befcf0b3d85a2fa2ecea
[ "MIT" ]
permissive
ayush572/VIRTUON
https://github.com/ayush572/VIRTUON
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
785,106
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 1.331642, "end_time": "2020-11-26T10:25:04.619193", "exception": false, "start_time": "2020-11-26T10:25:03.287551", "status": "completed"} # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # + papermill={"duration": 0.420952, "end_time": "2020-11-26T10:25:05.179311", "exception": false, "start_time": "2020-11-26T10:25:04.758359", "status": "completed"} import numpy as np import scipy.io as spio import os import cv2 import matplotlib.pyplot as plt import glob import numpy as np from PIL import Image # + papermill={"duration": 0.083673, "end_time": "2020-11-26T10:25:05.335451", "exception": false, "start_time": "2020-11-26T10:25:05.251778", "status": "completed"} image_path = r"../input/clothing-coparsing-dataset/images/" label_path = r"../input/clothing-coparsing-dataset/labels_raw/pixel_level_labels_mat/" # + papermill={"duration": 0.079351, "end_time": "2020-11-26T10:25:05.479200", "exception": false, "start_time": "2020-11-26T10:25:05.399849", "status": "completed"} valid_exts = [".jpg", ".mat"] # + papermill={"duration": 0.095661, "end_time": "2020-11-26T10:25:05.643808", "exception": false, "start_time": "2020-11-26T10:25:05.548147", "status": "completed"} image_list = [i for i in sorted(os.listdir(image_path)) if os.path.splitext(i)[1].lower() in valid_exts] label_list = [i for i in sorted(os.listdir(label_path)) if os.path.splitext(i)[1].lower() in valid_exts] # + papermill={"duration": 0.077993, "end_time": "2020-11-26T10:25:05.787912", "exception": false, "start_time": "2020-11-26T10:25:05.709919", "status": "completed"} print(image_list[0], len(image_list), len(label_list)) # + papermill={"duration": 0.086638, "end_time": "2020-11-26T10:25:05.958648", "exception": false, "start_time": "2020-11-26T10:25:05.872010", "status": "completed"} label_data_ex = spio.loadmat(label_path +'/0001.mat') # + papermill={"duration": 0.080225, "end_time": "2020-11-26T10:25:06.105438", "exception": false, "start_time": "2020-11-26T10:25:06.025213", "status": "completed"} print(label_data_ex) # + papermill={"duration": 0.061817, "end_time": "2020-11-26T10:25:06.239142", "exception": false, "start_time": "2020-11-26T10:25:06.177325", "status": "completed"} label_data_ex = np.array(label_data_ex['groundtruth'], dtype = np.uint32) # + papermill={"duration": 0.062515, "end_time": "2020-11-26T10:25:06.349007", "exception": false, "start_time": "2020-11-26T10:25:06.286492", "status": "completed"} label_data_ex.shape # + papermill={"duration": 0.077981, "end_time": "2020-11-26T10:25:06.475175", "exception": false, "start_time": "2020-11-26T10:25:06.397194", "status": "completed"} img = cv2.imread(image_path + image_list[0], 0) img.shape # + papermill={"duration": 0.062324, "end_time": "2020-11-26T10:25:06.585970", "exception": false, "start_time": "2020-11-26T10:25:06.523646", "status": "completed"} img = cv2.imread(image_path + image_list[0], 1) # + papermill={"duration": 0.325154, "end_time": "2020-11-26T10:25:06.958800", "exception": false, "start_time": "2020-11-26T10:25:06.633646", "status": "completed"} plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) # + papermill={"duration": 0.058742, "end_time": "2020-11-26T10:25:07.068808", "exception": false, "start_time": "2020-11-26T10:25:07.010066", "status": "completed"} mask_ex = spio.loadmat(label_path + label_list[0]) # + papermill={"duration": 0.061473, "end_time": "2020-11-26T10:25:07.182170", "exception": false, "start_time": "2020-11-26T10:25:07.120697", "status": "completed"} mask_ex['groundtruth'].shape # + papermill={"duration": 0.06043, "end_time": "2020-11-26T10:25:07.294847", "exception": false, "start_time": "2020-11-26T10:25:07.234417", "status": "completed"} mask_ex['groundtruth'] # + papermill={"duration": 0.115183, "end_time": "2020-11-26T10:25:07.469037", "exception": false, "start_time": "2020-11-26T10:25:07.353854", "status": "completed"} img = Image.fromarray(mask_ex['groundtruth']) img.save('test.png') # + papermill={"duration": 0.060057, "end_time": "2020-11-26T10:25:07.580769", "exception": false, "start_time": "2020-11-26T10:25:07.520712", "status": "completed"} label_img = cv2.imread('test.png', cv2.IMREAD_GRAYSCALE) # + papermill={"duration": 0.381649, "end_time": "2020-11-26T10:25:08.015144", "exception": false, "start_time": "2020-11-26T10:25:07.633495", "status": "completed"} plt.imshow(label_img) plt.colorbar() # + papermill={"duration": 0.131723, "end_time": "2020-11-26T10:25:08.241885", "exception": false, "start_time": "2020-11-26T10:25:08.110162", "status": "completed"} np.unique(label_img) # + papermill={"duration": 0.091512, "end_time": "2020-11-26T10:25:08.417103", "exception": false, "start_time": "2020-11-26T10:25:08.325591", "status": "completed"} label_img.shape # + papermill={"duration": 0.120469, "end_time": "2020-11-26T10:25:08.620042", "exception": false, "start_time": "2020-11-26T10:25:08.499573", "status": "completed"} save_dir = "./label_images/" # path where you want to save if not os.path.exists(save_dir): # if there is no exist, make the path os.makedirs(save_dir) # + papermill={"duration": 0.092368, "end_time": "2020-11-26T10:25:08.787304", "exception": false, "start_time": "2020-11-26T10:25:08.694936", "status": "completed"} def mat_to_img(label_names): for i, img_name in enumerate(label_names): mask_ex = spio.loadmat(label_path + label_list[i]) img = Image.fromarray(mask_ex['groundtruth']) img.save(save_dir+ str(i+1) + '.png')# png right number # + papermill={"duration": 15.923268, "end_time": "2020-11-26T10:25:24.769605", "exception": false, "start_time": "2020-11-26T10:25:08.846337", "status": "completed"} mat_to_img(label_list) # + papermill={"duration": 0.063487, "end_time": "2020-11-26T10:25:24.888794", "exception": false, "start_time": "2020-11-26T10:25:24.825307", "status": "completed"} IMG_H, IMG_W = 384, 256 # + papermill={"duration": 0.065613, "end_time": "2020-11-26T10:25:25.009045", "exception": false, "start_time": "2020-11-26T10:25:24.943432", "status": "completed"} x_train = np.zeros((len(image_list), IMG_H, IMG_W, 3), dtype=np.uint8) y_train = np.zeros((len(label_list), IMG_H, IMG_W, 1), dtype=np.uint8) # + papermill={"duration": 0.062173, "end_time": "2020-11-26T10:25:25.125490", "exception": false, "start_time": "2020-11-26T10:25:25.063317", "status": "completed"} save_dir = "./images/" # path where you want to save if not os.path.exists(save_dir): # if there is no exist, make the path os.makedirs(save_dir) # + papermill={"duration": 36.7829, "end_time": "2020-11-26T10:26:01.964005", "exception": false, "start_time": "2020-11-26T10:25:25.181105", "status": "completed"} for i, image in enumerate(image_list): img = cv2.imread(image_path + image, 1) img = cv2.resize(img, dsize=(IMG_W, IMG_H)) cv2.imwrite(save_dir + str(i+1) + '.png', img) x_train[i] = img # + papermill={"duration": 0.092921, "end_time": "2020-11-26T10:26:02.124900", "exception": false, "start_time": "2020-11-26T10:26:02.031979", "status": "completed"} x_train.shape # + papermill={"duration": 0.359081, "end_time": "2020-11-26T10:26:02.577628", "exception": false, "start_time": "2020-11-26T10:26:02.218547", "status": "completed"} plt.imshow(cv2.cvtColor(x_train[0,:], cv2.COLOR_BGR2RGB)) # + papermill={"duration": 0.108927, "end_time": "2020-11-26T10:26:02.790237", "exception": false, "start_time": "2020-11-26T10:26:02.681310", "status": "completed"} save_dir = './label_images/resize/' if not os.path.exists(save_dir): os.makedirs(save_dir) # + papermill={"duration": 3.308553, "end_time": "2020-11-26T10:26:06.202115", "exception": false, "start_time": "2020-11-26T10:26:02.893562", "status": "completed"} for i, image in enumerate(label_list): img = cv2.imread('./label_images/' + str(i+1) + '.png' , cv2.IMREAD_GRAYSCALE) img = cv2.resize(img, dsize=(IMG_W, IMG_H), interpolation= cv2.INTER_NEAREST) cv2.imwrite(save_dir + str(i+1) + '.png', img) y_train[i] = np.expand_dims(img, axis = 2) # + papermill={"duration": 0.284332, "end_time": "2020-11-26T10:26:06.544742", "exception": false, "start_time": "2020-11-26T10:26:06.260410", "status": "completed"} plt.imshow(y_train[0].reshape([IMG_H, IMG_W])) plt.colorbar() # + papermill={"duration": 0.068694, "end_time": "2020-11-26T10:26:06.674144", "exception": false, "start_time": "2020-11-26T10:26:06.605450", "status": "completed"} np.unique(y_train[0]) # + papermill={"duration": 0.068528, "end_time": "2020-11-26T10:26:06.803362", "exception": false, "start_time": "2020-11-26T10:26:06.734834", "status": "completed"} y_train.shape # + papermill={"duration": 0.070912, "end_time": "2020-11-26T10:26:06.932813", "exception": false, "start_time": "2020-11-26T10:26:06.861901", "status": "completed"} x_train.shape # + papermill={"duration": 0.339036, "end_time": "2020-11-26T10:26:07.332620", "exception": false, "start_time": "2020-11-26T10:26:06.993584", "status": "completed"} save_dir = "./dataset/" if not os.path.exists(save_dir): # os.makedirs(save_dir) np.save(save_dir+'x_train.npy', x_train[:900]) np.save(save_dir+'y_train.npy', y_train[:900]) np.save(save_dir+'x_test.npy', x_train[900:1000]) np.save(save_dir+'y_test.npy', y_train[900:1000]) # + papermill={"duration": 0.626202, "end_time": "2020-11-26T10:26:08.019357", "exception": false, "start_time": "2020-11-26T10:26:07.393155", "status": "completed"} x_train = np.load('dataset/x_train.npy').astype(np.float32) x_test = np.load('dataset/x_test.npy').astype(np.float32) y_train = np.load('dataset/y_train.npy').astype(np.float32) y_test = np.load('dataset/y_test.npy').astype(np.float32) print(x_train.shape, y_train.shape) print(x_test.shape, y_test.shape) # + papermill={"duration": 0.074381, "end_time": "2020-11-26T10:26:08.156240", "exception": false, "start_time": "2020-11-26T10:26:08.081859", "status": "completed"} def change_class(x): num_of_data = x.shape[0] x = x.reshape([num_of_data,-1]) result = np.zeros(x.shape) classes = [0, 41, 19] for k in range(num_of_data): for i, v in enumerate(x[k]): if v not in classes: result[k][i] = 3 # clothes else: if v == 0: result[k][i] = 0 # background elif v == 41: result[k][i] = 1 # skin else: result[k][i] = 2 # hair return result # + papermill={"duration": 733.503385, "end_time": "2020-11-26T10:38:21.720373", "exception": false, "start_time": "2020-11-26T10:26:08.216988", "status": "completed"} y_label_train = change_class(y_train) # + papermill={"duration": 0.073818, "end_time": "2020-11-26T10:38:21.863384", "exception": false, "start_time": "2020-11-26T10:38:21.789566", "status": "completed"} y_label_train.shape # + papermill={"duration": 0.076072, "end_time": "2020-11-26T10:38:22.003946", "exception": false, "start_time": "2020-11-26T10:38:21.927874", "status": "completed"} y_label_train = y_label_train.reshape([-1, IMG_H, IMG_W,1]) print(y_label_train.shape) # + papermill={"duration": 2.77529, "end_time": "2020-11-26T10:38:24.843532", "exception": false, "start_time": "2020-11-26T10:38:22.068242", "status": "completed"} np.unique(y_label_train) # + papermill={"duration": 0.228001, "end_time": "2020-11-26T10:38:25.138231", "exception": false, "start_time": "2020-11-26T10:38:24.910230", "status": "completed"} plt.imshow(np.squeeze(y_label_train[0])) # + papermill={"duration": 85.40657, "end_time": "2020-11-26T10:39:50.618148", "exception": false, "start_time": "2020-11-26T10:38:25.211578", "status": "completed"} y_label_test = change_class(y_test) # + papermill={"duration": 0.080257, "end_time": "2020-11-26T10:39:50.769621", "exception": false, "start_time": "2020-11-26T10:39:50.689364", "status": "completed"} y_label_test = y_label_test.reshape([-1, IMG_H, IMG_W,1]) y_label_test.shape # + papermill={"duration": 4.984338, "end_time": "2020-11-26T10:39:55.823886", "exception": false, "start_time": "2020-11-26T10:39:50.839548", "status": "completed"} from keras.utils import to_categorical # + papermill={"duration": 2.242969, "end_time": "2020-11-26T10:39:58.164364", "exception": false, "start_time": "2020-11-26T10:39:55.921395", "status": "completed"} y_label_train_onehot = to_categorical(y_label_train, num_classes = 4) # + papermill={"duration": 0.07586, "end_time": "2020-11-26T10:39:58.307045", "exception": false, "start_time": "2020-11-26T10:39:58.231185", "status": "completed"} y_label_train_onehot.shape # + papermill={"duration": 0.309827, "end_time": "2020-11-26T10:39:58.684477", "exception": false, "start_time": "2020-11-26T10:39:58.374650", "status": "completed"} y_label_test_onehot = to_categorical( y_label_test, num_classes = 4) y_label_test_onehot.shape # + papermill={"duration": 1.521705, "end_time": "2020-11-26T10:40:00.284366", "exception": false, "start_time": "2020-11-26T10:39:58.762661", "status": "completed"} save_dir = "./dataset/" np.save(save_dir+'y_train_onehot.npy', y_label_train_onehot) np.save(save_dir+'y_test_onehot.npy', y_label_test_onehot)
14,489
/results/singleUAV/analysis.ipynb
6328cb0d9a630549fead2159b0095ad674d48c6b
[]
no_license
Rezenders/mas_uav
https://github.com/Rezenders/mas_uav
13
5
null
null
null
null
Jupyter Notebook
false
false
.py
7,732
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## ANOMALY DETECTION TUTORIAL # ### Anomaly Detection - Introduction # # An anomaly is defined as a deviation from what is, considered normal, or expected. Anomaly detection is the term used to describe the processes that allow us to find these deviations in data sets. In terms of data anomalies are also described by other terms such as outliers, exceptions, discordant observations, peculiarities etc. Anomaly detection is a fundamental issue in numerous fields and is used to enhance cyber security, minimize credit card fraud, improve healthcare and insurance systems and numerous other fields. # ![alt text](cluster.png "Cluster") # Anomalies are patterns in data that do not conform to a well defined notion of normal behavior. Figure 1 illustrates anomalies in a simple 2-dimensional data set. The data has two normal regions, N1 and N2, since most observations lie in these two regions. Points that are sufficiently far away from the regions, e.g., points o1 and o2, and points in region O3, are anomalies. # The detection of anomalies allows us to make decisions on information which is more pertinent to the matter at hand. For example, Utilities use numerous sensors to track and forecast load. Sensors are prone to malfunctioning and this can produce a corrupted data set and affect the quality of the prediction. In this case anomaly detection becomes important. # # Anomaly detection comes with numerous challenges such as: # Since it is defined as a deviation from a defined normal, we need to define the range of deviation that is accepted. # What is considered “normal” can be adaptive. # The deviation is also domain specific, in the healthcare industry the deviation is bound to be extremely small whereas in the energy sector a wider range can be accepted. # Data may contain noise which may mask anomalies and make detection difficult. # # The various techniques that can be used for anomaly detection are divided into two main categories: Supervised and Unsupervised. Some of the 'supervised' algorithms are: # - k-Nearest Neighbors # - Bayesian Networks # - Neural Networks # - Support Vector Machine # - Decision Trees # # Some of the 'unsupervised' algorithms are: # - k-means clustering # - Fuzzy C-Means # - Expectation-Maximization Meta Algorithm # ### Unsupervised clustering algorithm # # The technique chosen for this tutorial is K means Clustering. This method groups objects based on their feature values into K disjoint clusters. K is the number of disjoint clusters and needs to be defined. It is done in the following steps: # - Define the number of clusters, K # - nitialize K cluster centroids # - Measure distance of all data points with all centroids and form K clusters # - Recalculate Centroids of clusters # - Repeat step 3 and 4 till centroids are constant # # The distance is measured using the euclidean distance function: # $$c = \sqrt{\sum_{i=0}^m \ (xi - yi)^2 }$$ # where x = (x1, ..., xm) and y = (y1, ..., ym) are two input vectors with m quantitative features. # # ![alt text](Outlier detection.png "Title") # If we use one cluster defined as normal with a specified maximum distance then we find that points P2 and P3 are anomalous. # ## Demonstration of anomaly detection using k-means clustering # # The electricity consumption (power) data is used as input to demonstrate the anomaly detection technique. It contains power consumption and date-time values for different points in the campus. For the scope of this demonstration, one particular point is selected and the power consumption trends for the entire year is mapped. This is a time-series data that varies between base load and peak load along with outliers that needs to be detected as anomalies. Anomalies can be detected in terms of unusual or unexpected behavior by clustering and finding the outliers. # + import numpy as np import matplotlib.pyplot as plt from sklearn.cluster import KMeans import datetime as dt # %matplotlib inline import platform platform.python_version() # - # "Numpy" library is primarily used to extract and process the data. Scikit-learn package is used to implement the K-means algorithm for clustering. This package uses numpy arrays which explains the choice of 'numpy' library. # The csv file is imported using 'genfromtxt' method with the appropriate data formats. Each column series is given a name to be able to reference them based on their names instead of the column indices. # IMPORTING POWER (ELECTRIC CONSUMPTION) DATA dateConverter = lambda d : dt.datetime.strptime(d,'%Y/%m/%d %H:%M:%S') power = np.genfromtxt('campusDemand.csv',delimiter=",",names=True,\ dtype=['S255',dt.datetime,'f8'],\ converters={1: dateConverter}) name, indices, counts = np.unique(power['Point_name'], return_index=True,return_counts=True) # The full database is stored in a different variable called 'power_allpoints' to be able to test the data at different points of campus without having to read the csv file again each time. A particular point of energy meter is then selected for anomaly detection analysis. On observing the trend of different energy meter points, 'Main Campus' point was selected as it shows anomalies in particular sections and the code can hence be checked to remove the same. power_allpoints = power power=power_allpoints[power_allpoints['Point_name']==name[3]] power = np.sort(power,order='Time') # #### Power consumption data trend: # The power (electric consumption) is plotted against its corresponding timestamp to observe the waveform pattern. fig1= plt.figure(figsize=(15,5)) plt.plot(power['Time'],power['Value']) plt.title(name[3]) plt.xlabel('Time') plt.ylabel('Power [Watts]') # The obtained waveform shown anomalous behavior towards the month of November along with some individual points in other months that are highly varying from the current observed trend. This waveform generated for electricity consumption for throughout the year is divided into different segments. The total number of data points for that meter point is used to suitably define the segmentation parameters. Segments are formed on a moving window basis to get a more smoothened dataset and avoid overfitting. #check the number of points in the dataset print 'size of dataset is ',(len(power)) power[0] # The data points are for every 5 minutes and hence a segment of 12 points is taken to select one unit of hourly power consumption. This is windowed afetr every 4 points, hence there is an overlap and smoothened dataset. # + segment_len = 12 slide_len = 4 segments = [] means = [] for start_pos in range(0, len(power), slide_len): end_pos = start_pos + segment_len segment = np.copy(power[start_pos:end_pos]) if len(segment) != segment_len: continue segments.append(segment) print("Produced %d waveform segments" % len(segments)) print segments[0] # print segments[1] # - # Once the segments are created, mean and standard deviation is taken for every segment containing 12 points each. Direct functions from numpy for mean and standard deviation are used to get the respective means and standard deviations of each segment. segment_power_std = [] segment_power_mean = [] for j in range(len(segments)): segments_powervalue = [] for i in range(segment_len): segments_powervalue.append(segments[j][i][2]) segment_power_mean.append(np.mean(segments_powervalue)) segment_power_std.append(np.std(segments_powervalue)) print "mean:",(segment_power_mean[0:5]) print "standard deviation:",(segment_power_std[0:5]) # #### Plot: # The individual series of means and standard deviations are plotted to observe the variation. First, line plots are plotted for the series indivudally and a scatter plot further of standard deviation against mean. plt.plot(segment_power_std, c='b',label='std') plt.hold(True) plt.plot(segment_power_mean, c='r',label='mean') plt.legend(loc='upper left') plt.scatter(segment_power_std,segment_power_mean) plt.xlabel('mean') plt.ylabel('standard deviation') # The 1D arrays for mean and standard deviations are combined to form one numpy array 'k'. This will now represent two dimensional datapoints to be clustered using k-means algorithm. The transpose is taken to convert into a [length(dataset,2] format. k = (segment_power_mean,segment_power_std) k = (np.transpose(k)) # ### Fitting K-Means algorithm using sklearn toolkit # # Unsupervised K-means algorithm clusters the data points based on the eucledian distances of points with the selected centroids. It is an iterative algorithm of selecting the desired number of centroids and calculating the sum of distances of all points from their nearest centroids. This optimization can be directly applied using the sklearn library (http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html). KMeans method is initialized with the number of clusters desired and option to specify the method of selecting the algorithm to define the initial centroids (in this case: k-means++). The final centroids and labels suggesting which cluster the point belongs to are obtained using the inbuilt functions in the sklearn library. # # Number of clusters are selected to be 2 as the standard deviation v/s mean graph is densely packed in two clusters, one of which is majorly formed by the anomalous data of November month. The november data has significantly lower mean than the usual trend in the rest of the year. This data needs to be identified as an anomaly separately. # + kmeans = KMeans(n_clusters=2,init='k-means++') kmeans.fit(k) centroid = kmeans.cluster_centers_ labels = kmeans.labels_ print (centroid) print(labels) # - # This clustering is plotted with different colors mapping the classification. # + LABEL_COLOR_MAP = {0 : 'r', 1 : 'c', } label_color = [LABEL_COLOR_MAP[l] for l in labels] plt.scatter(k[:,0],k[:,1],c=label_color) plt.scatter(centroid[:,0],centroid[:,1], marker = "x", s=150, linewidths = 5, zorder =10) plt.show() # - # The smaller segment in left represents the anomalous section of November month and can be removed along with the other point showing considerably higher variance. There is no hard boundary between the two clusters mainly because of the overlapping windowed segments that are used. # However, for any other dataset of power consumption of the 'main campus', the November month might not show similar anomaly and hence forming two clusters in that scenario is undesirable. As a result, the algorithm is applied for one cluster to more robustly cater to the different type if datasets that can be observed. # + kmeans = KMeans(n_clusters=1,init='k-means++') kmeans.fit(k) centroid = kmeans.cluster_centers_ labels = kmeans.labels_ print centroid # - label_color = [LABEL_COLOR_MAP[l] for l in labels] plt.scatter(k[:,0],k[:,1],c=label_color) plt.scatter(centroid[:,0],centroid[:,1], marker = "x", s=150, linewidths = 5, zorder =10) plt.show() # The main data's centroid has now shifted a little left from [ 9255.85, 48.65] to [ 8965.54, 51.74] on changing the number of cluster. Now, considering all points follow a gaussian distribution, we can assign all the points that lie outside the radius of (mean + (2 x std)) from the centroid. distance = map(lambda X,Y: np.sqrt(np.power((X-centroid[:,0]),2) + np.power((Y-centroid[:,1]),2)), k[:,0],k[:,1]) p_max = float((centroid[:,0]+(2*centroid[:,1]))) p_max combined_array = np.hstack((k,distance)) anomaly = combined_array[combined_array[:,2] < p_max] len(anomaly) # np.where(combined_array[:,2]>p_max) # combined_array[:,2]>p_max # combined array # + # import json # with open("Anomaly Detection Tutorial 2.ipynb") as f: # contents = json.load(f) # wordcount = 0 # codecount = 0 # for cell in contents["cells"]: # if cell["cell_type"] == "markdown": # for line in cell["source"]: # wordcount += len(line.split()) # elif cell["cell_type"] == "code": # for line in cell["source"]: # codecount += int(not line.startswith("#")) # print wordcount, codecount # - | # | TORD | Turnover Percentage Committed (Steal Rate) | # | ORB | Offensive Rebound Percentage | # | DRB | Defensive Rebound Percentage | # | FTR | Free Throw Rate (How often the given team shoots Free Throws) | # | FTRD | Free Throw Rate Allowed | # | 2P_O | Two-Point Shooting Percentage | # | 2P_D | Two-Point Shooting Percentage Allowed | # | 3P_O | Three-Point Shooting Percentage | # | 3P_D | Three-Point Shooting Percentage Allowed | # | ADJ_T | Adjusted Tempo (An estimate of the tempo (possessions per 40 minutes) a team would have against the team that wants to play at an average Division I tempo) | # | WAB | Wins Above Bubble (The bubble refers to the cut off between making the NCAA March Madness Tournament and not making it) | # | POSTSEASON | Round where the given team was eliminated or where their season ended (R68 = First Four, R64 = Round of 64, R32 = Round of 32, S16 = Sweet Sixteen, E8 = Elite Eight, F4 = Final Four, 2ND = Runner-up, Champion = Winner of the NCAA March Madness Tournament for that given year) | # | SEED | Seed in the NCAA March Madness Tournament | # | YEAR | Season | # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Load Data From CSV File # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Let's load the dataset [NB Need to provide link to csv file] # # + button=false new_sheet=false run_control={"read_only": false} df = pd.read_csv('https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0120ENv3/Dataset/ML0101EN_EDX_skill_up/cbb.csv') df.head() # - df.shape # ## Add Column # # Next we'll add a column that will contain "true" if the wins above bubble are over 7 and "false" if not. We'll call this column Win Index or "windex" for short. # df['windex'] = np.where(df.WAB > 7, 'True', 'False') # + [markdown] button=false new_sheet=false run_control={"read_only": false} # # Data visualization and pre-processing # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Next we'll filter the data set to the teams that made the Sweet Sixteen, the Elite Eight, and the Final Four in the post season. We'll also create a new dataframe that will hold the values with the new column. # # - df1 = df.loc[df['POSTSEASON'].str.contains('F4|S16|E8', na=False)] df1.head() # + button=false new_sheet=false run_control={"read_only": false} df1['POSTSEASON'].value_counts() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # 32 teams made it into the Sweet Sixteen, 16 into the Elite Eight, and 8 made it into the Final Four over 5 seasons. # # - # Lets plot some columns to underestand data better: # # + # notice: installing seaborn might takes a few minutes # # !conda install -c anaconda seaborn -y # + import seaborn as sns bins = np.linspace(df1.BARTHAG.min(), df1.BARTHAG.max(), 10) g = sns.FacetGrid(df1, col="windex", hue="POSTSEASON", palette="Set1", col_wrap=6) g.map(plt.hist, 'BARTHAG', bins=bins, ec="k") g.axes[-1].legend() plt.show() # + button=false new_sheet=false run_control={"read_only": false} bins = np.linspace(df1.ADJOE.min(), df1.ADJOE.max(), 10) g = sns.FacetGrid(df1, col="windex", hue="POSTSEASON", palette="Set1", col_wrap=2) g.map(plt.hist, 'ADJOE', bins=bins, ec="k") g.axes[-1].legend() plt.show() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # # Pre-processing: Feature selection/extraction # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Lets look at how Adjusted Defense Efficiency plots # # + button=false new_sheet=false run_control={"read_only": false} bins = np.linspace(df1.ADJDE.min(), df1.ADJDE.max(), 10) g = sns.FacetGrid(df1, col="windex", hue="POSTSEASON", palette="Set1", col_wrap=2) g.map(plt.hist, 'ADJDE', bins=bins, ec="k") g.axes[-1].legend() plt.show() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # We see that this data point doesn't impact the ability of a team to get into the Final Four. # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ## Convert Categorical features to numerical values # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Lets look at the postseason: # # + button=false new_sheet=false run_control={"read_only": false} df1.groupby(['windex'])['POSTSEASON'].value_counts(normalize=True) # + [markdown] button=false new_sheet=false run_control={"read_only": false} # 13% of teams with 6 or less wins above bubble make it into the final four while 17% of teams with 7 or more do. # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Lets convert wins above bubble (winindex) under 7 to 0 and over 7 to 1: # # + button=false new_sheet=false run_control={"read_only": false} df1['windex'].replace(to_replace=['False','True'], value=[0,1],inplace=True) df1.head() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Feature selection # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Lets defind feature sets, X: # # + button=false new_sheet=false run_control={"read_only": false} X = df1[['G', 'W', 'ADJOE', 'ADJDE', 'BARTHAG', 'EFG_O', 'EFG_D', 'TOR', 'TORD', 'ORB', 'DRB', 'FTR', 'FTRD', '2P_O', '2P_D', '3P_O', '3P_D', 'ADJ_T', 'WAB', 'SEED', 'windex']] X[0:5] # - # #### Harry - Saving Column Names of the Feature Set featureNames = X.columns featureNames # + [markdown] button=false new_sheet=false run_control={"read_only": false} # What are our lables? Round where the given team was eliminated or where their season ended (R68 = First Four, R64 = Round of 64, R32 = Round of 32, S16 = Sweet Sixteen, E8 = Elite Eight, F4 = Final Four, 2ND = Runner-up, Champion = Winner of the NCAA March Madness Tournament for that given year)| # # + button=false new_sheet=false run_control={"read_only": false} y = df1['POSTSEASON'].values y[0:5] # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ## Normalize Data # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Data Standardization give data zero mean and unit variance (technically should be done after train test split ) # # + button=false new_sheet=false run_control={"read_only": false} X= preprocessing.StandardScaler().fit(X).transform(X) X[0:5] # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ## Training and Validation # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Split the data into Training and Validation data. # # + button=false new_sheet=false run_control={"read_only": false} # We split the X into train and test to find the best k from sklearn.model_selection import train_test_split X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=4) print ('Train set:', X_train.shape, y_train.shape) print ('Validation set:', X_val.shape, y_val.shape) # - # #### Harry - Saving the label values of the Test (aka Validation) Set ytrain_labels = np.unique(y_train) #Harry - new ytrain_labels # + [markdown] button=false new_sheet=false run_control={"read_only": false} # # Classification # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Now, it is your turn, use the training set to build an accurate model. Then use the validation set to report the accuracy of the model # You should use the following algorithm: # # - K Nearest Neighbor(KNN) # - Decision Tree # - Support Vector Machine # - Logistic Regression # # - # # K Nearest Neighbor(KNN) # # <b>Question 1 </b> Build a KNN model using a value of k equals five, find the accuracy on the validation data (X_val and y_val) # # You can use <code> accuracy_score</cdoe> # # + from sklearn.metrics import accuracy_score from sklearn.neighbors import KNeighborsClassifier k = 5 knnmodel = KNeighborsClassifier(n_neighbors = k).fit(X_train,y_train) #Train on training set yhat_knn = knnmodel.predict(X_val) #Predit on Test Set #Evaulation Metric on KNN Model print("Train set Accuracy: %.5f" % accuracy_score(y_train, knnmodel.predict(X_train))) #training set print("Test set Accuracy: %.5f" % accuracy_score(y_val, yhat_knn)) #test set # - # <h6> Harry - I am courious to see the the different eval metrics on the KNN Model </h6> from sklearn.metrics import jaccard_score from sklearn.metrics import f1_score eval_knn = {'model_name' : 'KNN Model', 'accuracy' : accuracy_score(y_val, yhat_knn), 'accuracy_f1' : f1_score(y_val, yhat_knn, average='micro'), 'accuracy_jc' : jaccard_score(y_val, yhat_knn,average='micro') } df_eval_knn = pd.DataFrame(data=eval_knn, index=[0]) df_eval_knn # <b>Question 2</b> Determine and print the accuracy for the first 15 values of k the on the validation data: # # + Ks = 15 mean_acc = np.zeros((Ks)) std_acc = np.zeros((Ks)) for n in range(1,(Ks+1)): #Train Model and Predict knnmodel = KNeighborsClassifier(n_neighbors = n).fit(X_train,y_train) yhat_knn=knnmodel.predict(X_val) mean_acc[n-1] = accuracy_score(y_val, yhat_knn) #accuary score std_acc[n-1]=np.std(yhat_knn==y_val)/np.sqrt(yhat_knn.shape[0]) #standard deviation df_knn_acc = pd.DataFrame(list(zip(mean_acc,std_acc)), columns=['mean_acc','std_acc']) df_knn_acc.insert(0,'K_neighbors',df_knn_acc.index + 1) # Addiing a column for K-neighbors display_n(df_knn_acc,5) #display top 5 and bottom 5 rows # - # <h6>Harry - Extra - Plot the graph </h6> # + # Plot the graph plt.plot(range(1,(Ks+1)),mean_acc,'g') x_ticks = np.arange(0, (Ks+2), 1) #Used to help define the xtick mark range plt.xticks(x_ticks) # to help control the range for tick marks plt.xlim(0,(Ks+1)) # x-axis range plt.fill_between(range(1,(Ks+1)),mean_acc - 1 * std_acc, mean_acc + 1 * std_acc, alpha=0.10) #plot plus or minus std plt.legend(('Accuracy ', '+/- 1xstd')) plt.ylabel('Accuracy ') plt.xlabel('Number of Nabors (K)') plt.tight_layout() plt.show() # - print( "The best accuracy was with", mean_acc.max(), "with k=", mean_acc.argmax()+1) # <b> Harry - Used for the a later part of the lab </b> knnmodel = KNeighborsClassifier(n_neighbors = 5).fit(X_train,y_train) # # Decision Tree # # The following lines of code fit a <code>DecisionTreeClassifier</code>: # from sklearn.tree import DecisionTreeClassifier # <b>Question 3</b> Determine the minumum value for the parameter <code>max_depth</code> that improves results # <b> Answer is below: This will be used for a later part of lab </b> dtmodel = DecisionTreeClassifier(criterion="entropy", max_depth = 1) dtmodel.fit(X_train,y_train) #Train the model yhat_dt=dtmodel.predict(X_val) #Predict out-of-sample values TreeAccuracy=accuracy_score(y_val,yhat_dt) #Evaluate the out-sample values with actual values TreeAccuracy #Display accuracy # <b> The work to reach conclusion for the above </b> # + from sklearn.metrics import jaccard_score from sklearn.metrics import f1_score Ks = 15 # Let's try # initilize arrays to store accuarcy mean_acc_dt_test = np.zeros((Ks)) #for test set f1_score_dt = np.zeros((Ks)) #for test set mean_acc_dt_train = np.zeros((Ks))#for TRAIN set #keep track the best parameter c_best_parameter = 0 c_best_accuracy = 0 #Loop from 1 to Ks for different MaxDepth for n in range(1,(Ks+1)): #Train Model and Predict dtmodel2 = DecisionTreeClassifier(criterion="entropy", max_depth = n, random_state=7) #use the same random state value to see dtmodel2.fit(X_train,y_train) #Predit for test set yhat_dt=dtmodel2.predict(X_val) #evaluation metrics mean_acc_dt_test[n-1] = accuracy_score(y_val, yhat_dt) #accuary score for test set f1_score_dt[n-1] = f1_score(y_val, yhat_dt, average='weighted') #F1-score for test set (we're going to do weighted) mean_acc_dt_train[n-1] = accuracy_score(y_train, dtmodel2.predict(X_train)) #accuary score for train set if mean_acc_dt_test[n-1] > c_best_accuracy: c_best_accuracy = mean_acc_dt_test[n-1] c_best_parameter = n c_best_model = dtmodel2 print( "The best accuracy was with %.5f with k=%d" % (mean_acc_dt_test.max(), (mean_acc_dt_test.argmax()+1))) print("Best max_depth: %d ; Accuracy is %.5f" % (c_best_parameter,c_best_accuracy)) #used to double check # - # <b> Harry - Let's see in a table format for evaluation metrics for each Max_Depth </b> df_dt_acc = pd.DataFrame(list(zip(mean_acc_dt_test,f1_score_dt)), columns=['mean_acc','f1_acc']) df_dt_acc.insert(0,'Max Depth',df_knn_acc.index + 1) # Adding a column for MaxDepth display_n(df_dt_acc,5) #display top 5 and bottom 5 rows # <b> Harry - Let's display accuary to see how different max depth perform </b> # + fig = plt.figure(figsize=(25,10)) # create figure ax0 = fig.add_subplot(1, 2, 1) # add subplot 1 (1 row, 2 columns, first plot) ax1 = fig.add_subplot(1, 2, 2) # add subplot 2 (1 row, 2 columns, second plot). # Subplot 1: Line plot # Plot the graph ax0.set_title('Line Graph Accuracy Performance at different Max Depth', fontsize=20) ax0.plot(range(1,(Ks+1)), mean_acc_dt_test, 'g', linewidth=3, marker='o',markersize=8, label="Accuracy-TestSet") ax0.plot(range(1,(Ks+1)), mean_acc_dt_train, 'b', linewidth=3, marker='o',markersize=8,label="Accuracy-TrainSet") ax0.plot(range(1,(Ks+1)), f1_score_dt, 'r', linewidth=3, marker='o',markersize=8,label="Weighted F1-Score-Train Set") ax0.set_xlabel('Number of MaxDepth (N)', fontsize=15) x_ticks = np.arange(0, (Ks+2), 1) ax0.set_xlim(0,(Ks+1)) ax0.set_xticks(x_ticks) ax0.set_xticklabels(x_ticks, fontsize=12) ax0.set_ylabel('Accuracy Percentage', fontsize=15) ax0.tick_params(axis='y', labelsize=12) ax0.legend() # Subplot 2: Tree Graph from sklearn import tree tree.plot_tree(c_best_model #assigned variable to the best one from above code ,feature_names=featureNames ,class_names = ytrain_labels ,filled=True ,rounded=True ,ax=ax1 #add to the second subplot ) ax1.set_title('Best Performing Tree Graph', fontsize=20) plt.show() # - # # Support Vector Machine # # <b>Question 4</b> Train the support vector machine model and determine the accuracy on the validation data for each kernel. Find the kernel (linear, poly, rbf, sigmoid) that provides the best score on the validation data and train a SVM using it. # from sklearn import svm # + # for curiousity from sklearn.metrics import jaccard_score kernel_values=['linear', 'poly', 'rbf', 'sigmoid'] df_svm = pd.DataFrame(columns=['kernel_name', 'accuracy','accuracy_f1','accuracy_jc']) for input_parameter in kernel_values: clf2 = svm.SVC(kernel=input_parameter) clf2.fit(X_train, y_train) yhat_svm = clf2.predict(X_val) df_svm = df_svm.append({'kernel_name' : input_parameter, 'accuracy' : accuracy_score(y_val, yhat_svm), 'accuracy_f1' : f1_score(y_val, yhat_svm, average='micro'), 'accuracy_jc' : jaccard_score(y_val, yhat_svm,average='micro') }, ignore_index=True) df_svm # - c=df_svm['accuracy'].argmax() print("Best svm kernel: " , df_svm.loc[c][0] ) #based on the accuarcy column # <b> Harry - For a later part of the lab </b> # + from sklearn import svm svmmodel=svm.SVC(kernel='poly') svmmodel.fit(X_train,y_train) # Train the model yhat_svmm = svmmodel.predict(X_val) #Use the model to predict lables for Test set #Evaulation Metric on SVM Model print("Train set Accuracy: %.5f" % accuracy_score(y_train, svmmodel.predict(X_train))) #training set print("Test set Accuracy: %.5f" % accuracy_score(y_val, yhat_svmm)) #test set # - # # Logistic Regression # # <b>Question 5</b> Train a logistic regression model and determine the accuracy of the validation data (set C=0.01) # from sklearn.linear_model import LogisticRegression lrmodel = LogisticRegression(C=0.01, solver='liblinear').fit(X_train,y_train) #train the model # + from sklearn.metrics import classification_report, confusion_matrix from sklearn.metrics import log_loss from sklearn.metrics import f1_score yhat_lg = lrmodel.predict(X_val) #Predict yhat_prob = lrmodel.predict_proba(X_val) #Obtain probabilities #Evaluation metrics print("LogLoss: %.2f" % log_loss(y_val, yhat_prob)) #based on probability print("Jaccard_score : %.4f" % jaccard_score(y_val, yhat_lg, average='micro')) #based on the predicted label print("Accuracy score : %.4f" % accuracy_score(y_val,yhat_lg)) print("F1-score : %.4f" % f1_score(y_val, yhat_lg,average='micro')) #confusion matrix statistics print("\nConfusion Matrix: \n", confusion_matrix(y_val, yhat_lg, labels=['E8','F4','S16'])) #top-down order in the matrx print(classification_report(y_val, yhat_lg), "\n") #to help confirm s = "" for i in range(len(yhat_lg[0:5])): s += str(yhat_prob[i].argmax()+1) + " " #just confirm the index, or label with highest probablity print(s) print(yhat_lg[0:5]) print(yhat_prob[0:5]) # - # # Model Evaluation using Test set # from sklearn.metrics import f1_score # for f1_score please set the average parameter to 'micro' from sklearn.metrics import log_loss def jaccard_index(predictions, true): if (len(predictions) == len(true)): intersect = 0; for x,y in zip(predictions, true): if (x == y): intersect += 1 return intersect / (len(predictions) + len(true) - intersect) else: return -1 # <b>Question 5</b> Calculate the F1 score and Jaccard Similarity score for each model from above. Use the Hyperparameter that performed best on the validation data. **For f1_score please set the average parameter to 'micro'.** # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Load Test set for evaluation # # + button=false new_sheet=false run_control={"read_only": false} test_df = pd.read_csv('https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0120ENv3/Dataset/ML0101EN_EDX_skill_up/basketball_train.csv',error_bad_lines=False) test_df.head() # - test_df['windex'] = np.where(test_df.WAB > 7, 'True', 'False') test_df1 = test_df[test_df['POSTSEASON'].str.contains('F4|S16|E8', na=False)] test_Feature = test_df1[['G', 'W', 'ADJOE', 'ADJDE', 'BARTHAG', 'EFG_O', 'EFG_D', 'TOR', 'TORD', 'ORB', 'DRB', 'FTR', 'FTRD', '2P_O', '2P_D', '3P_O', '3P_D', 'ADJ_T', 'WAB', 'SEED', 'windex']] test_Feature['windex'].replace(to_replace=['False','True'], value=[0,1],inplace=True) test_X=test_Feature test_X= preprocessing.StandardScaler().fit(test_X).transform(test_X) test_X[0:5] test_y = test_df1['POSTSEASON'].values test_y[0:5] # <h3> Harry - I am going to use the below dataframe to capture the info </h3> df_multi_model = pd.DataFrame(columns=['Algorithm', 'Accuracy','Jaccard','F1-score','LogLoss']) # KNN # # + y_pred_knn=knnmodel.predict(test_X) # Predict on Test model - KNN ac = accuracy_score(test_y,y_pred_knn) f1 = f1_score(test_y,y_pred_knn, average='micro') js = jaccard_score(test_y, y_pred_knn, average='micro') lgs='NA' print(" Accuracy-KNN: %.6f" % ac) print(" Jaccard-KNN: %.6f" % js) print(" F1-KNN: %.6f" % f1) print(" Log Loss-KNN: ", lgs) df_multi_model = df_multi_model.append({'Algorithm' : 'KNN', 'Accuracy' : ac, 'Jaccard' : js, 'F1-score' : f1, 'LogLoss' : lgs }, ignore_index=True) # - # Decision Tree # # + y_pred_dt=dtmodel.predict(test_X) # Predict on Test model - Decision Tree ac = accuracy_score(test_y,y_pred_dt) f1 = f1_score(test_y,y_pred_dt, average='micro') js = jaccard_score(test_y, y_pred_dt, average='micro') lgs='NA' print(" Accuracy-Decision Tree: %.6f" % ac) print(" Jaccard-Decision Tree: %.6f" % js) print(" F1-Decision Tree: %.6f" % f1) print(" Log Loss-Decision Tree: ", lgs) df_multi_model = df_multi_model.append({'Algorithm' : 'Decision Tree', 'Accuracy' : ac, 'Jaccard' : js, 'F1-score' : f1, 'LogLoss' : lgs }, ignore_index=True) # - # SVM # # + y_pred_svm=svmmodel.predict(test_X)# Predict on Test model - svm ac = accuracy_score(test_y,y_pred_svm) f1 = f1_score(test_y,y_pred_svm, average='micro') js = jaccard_score(test_y, y_pred_svm, average='micro') lgs='NA' print(" Accuracy-KNN: %.6f" % ac) print(" Jaccard-KNN: %.6f" % js) print(" F1-KNN: %.6f" % f1) print(" Log Loss-KNN: ", lgs) df_multi_model = df_multi_model.append({'Algorithm' : 'SVM', 'Accuracy' : ac, 'Jaccard' : js, 'F1-score' : f1, 'LogLoss' : lgs }, ignore_index=True) # - # Logistic Regression # # + y_pred_lr=lrmodel.predict_proba(test_X) y_pred_lgs=lrmodel.predict(test_X)# Predict on Test model - Logistic ac = accuracy_score(test_y,y_pred_lgs) f1 = f1_score(test_y,y_pred_lgs, average='micro') js = jaccard_score(test_y, y_pred_lgs, average='micro') x = log_loss(test_y, y_pred_lr) s = format(x, '.5f') lgs=s print(" Accuracy-KNN: %.6f" % ac) print(" Jaccard-KNN: %.6f" % js) print(" F1-KNN: %.6f" % f1) print(" Log Loss-KNN: ", lgs) df_multi_model = df_multi_model.append({'Algorithm' : 'LogisticRegression', 'Accuracy' : ac, 'Jaccard' : js, 'F1-score' : f1, 'LogLoss' : lgs }, ignore_index=True) # - df_multi_model = df_multi_model.drop_duplicates() # # Report # # You should be able to report the accuracy of the built model using different evaluation metrics: # # | Algorithm | Accuracy | Jaccard | F1-score | LogLoss | # | ------------------ | -------- | -------- | -------- | ------- | # | KNN | 0.628571 | 0.458333 | 0.628571 | NA | # | Decision Tree | 0.642857 | 0.473684 | 0.642857 | NA | # | SVM | 0.685714 | 0.521739 | 0.685714 | NA | # | LogisticRegression | 0.685714 | 0.521739 | 0.685714 | 1.03719 | # df_multi_model #display dataframe to compare the above - it looks it is matching # Something to keep in mind when creating models to predict the results of basketball tournaments or sports in general is that is quite hard due to so many factors influencing the game. Even in sports betting an accuracy of 55% and over is considered good as it indicates profits. # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <h2>Want to learn more?</h2> # # IBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: <a href="https://www.ibm.com/analytics/spss-statistics-software">SPSS Modeler</a> # # Also, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at <a href="https://www.ibm.com/cloud/watson-studio">Watson Studio</a> # # - # ### Thank you for completing this lab! # # ## Author # # Saeed Aghabozorgi # # ### Other Contributors # # <a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank">Joseph Santarcangelo</a> # # ## Change Log # # | Date (YYYY-MM-DD) | Version | Changed By | Change Description | # | ----------------- | ------- | ---------- | ---------------------------------- | # | 2020-08-27 | 2.0 | Lavanya | Moved lab to course repo in GitLab | # | | | | | # | | | | | # # ## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/> #
48,414
/notebooks/Filter.ipynb
986abfd4c8d213271eedf9611837cbb57e784730
[]
no_license
jamesthesken/ZCU104_VideoDemo
https://github.com/jamesthesken/ZCU104_VideoDemo
1
0
null
2019-06-07T01:23:28
2019-01-01T15:47:14
null
Jupyter Notebook
false
false
.py
10,057
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import ipywidgets as widgets import numpy as np preset_values = { "Identity": { "filter": [[0,0,0,0,0], [0,0,0,0,0], [0,0,1,0,0], [0,0,0,0,0], [0,0,0,0,0]], "factor": 1, "colour": True, "absolute": False, "min": 0, "max": 255}, "Sobel X": { "filter": [[0, 0, 0, 0, 0], [0, 1, 0,-1, 0], [0, 2, 0,-2, 0], [0, 1, 0,-1, 0], [0, 0, 0, 0, 0]], "factor": 1, "colour": False, "absolute": True, "min": 32, "max": 216 }, "Sobel Y": { "filter": [[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0], [0,-1,-2,-1, 0], [0, 0, 0, 0, 0]], "factor": 1, "colour": False, "absolute": True, "min": 32, "max": 216 }, "Gaussian": { "filter": [[1, 4, 6, 4, 1], [4,16,24,16, 4], [6,24,36,24, 6], [4,16,24,16, 4], [1, 4, 6, 4, 1]], "factor": 256, "colour": True, "absolute": False, "min": 0, "max": 255 }, "Adaptive Threshold": { "filter": [[-1, -4, -6, -4, -1], [-4,-16,-24,-16, -4], [-6,-24,220,-24, -6], [-4,-16,-24,-16, -4], [-1, -4, -6, -4, -1]], "factor": 256, "colour": False, "absolute": False, "min": 0, "max": 1 }, "Sharpen": { "filter": [[-1,-1,-1,-1,-1], [-1, 2, 2, 2,-1], [-1, 2, 8, 2,-1], [-1, 2, 2, 2,-1], [-1,-1,-1,-1,-1]], "colour": True, "absolute": False, "factor": 8, "min": 0, "max": 255 }, "Threshold": { "filter": [[0,0,0,0,0], [0,0,0,0,0], [0,0,1,0,0], [0,0,0,0,0], [0,0,0,0,0]], "factor": 1, "colour": False, "absolute": False, "min": 128, "max": 128 } } from pynq import Overlay from demo_drivers import * ol = Overlay('./base_logo.bit', download=False) # + filter2d = ol.filter_pipeline_0 _updating = False def new_preset(preset_dict): global _updating _updating = True coeffs = preset_dict["filter"] factor = preset_dict["factor"] for i, ci in enumerate(coeffs): for j, cj in enumerate(ci): filter_matrix.children[i].children[j].value = cj / factor colour_sel.value = "Colour" if preset_dict['colour'] else "Greyscale" absolute_sel.value = preset_dict['absolute'] thresh_min.value = preset_dict['min'] thresh_max.value = preset_dict['max'] _updating = False def on_preset(change): value = presets.value if value in preset_values: new_preset(preset_values[value]) def update_coeff(change): global _updating o = change['owner'] o.element[:] = o.value * 256 if not _updating: presets.value = "Custom" def update_colour(change): global _updating o = change['owner'] filter2d.colour[:] = 1 if o.value == "Colour" else 0 if not _updating: presets.value = "Custom" def update_absolute(change): global _updating o = change['owner'] filter2d.absolute[:] = o.value if not _updating: presets.value = "Custom" def update_maxthreshold(change): global _updating o = change['owner'] filter2d.max_thresh[:] = o.value if not _updating: presets.value = "Custom" def update_minthreshold(change): global _updating o = change['owner'] filter2d.min_thresh[:] = o.value if not _updating: presets.value = "Custom" hboxes = [] for i in range(5): inputs = [] for j in range(5): w = widgets.BoundedFloatText(value=0, min=-4, max=4, step=0.1, layout={"width": "60px"}) w.observe(update_coeff) w.element = filter2d.coeffs[i:i+1,j:j+1] inputs.append(w) hboxes.append(widgets.HBox(inputs)) filter_matrix = widgets.VBox(hboxes, layout={"border":"solid 2px"}) colour_sel = widgets.RadioButtons(options=["Colour", "Greyscale"], description="Filter Mode") colour_sel.observe(update_colour) absolute_sel = widgets.Checkbox(description="Absolute Output") absolute_sel.observe(update_absolute) thresh_min = widgets.IntSlider(description="Threshold", min=0, max=255) thresh_min.observe(update_minthreshold) thresh_max = widgets.IntSlider(description=" ", min=0, max=255, value=255) thresh_max.observe(update_maxthreshold) config = widgets.VBox([colour_sel, absolute_sel, thresh_min, thresh_max]) presets = widgets.Select( options=["Custom", "Identity", "Sobel X", "Sobel Y", "Gaussian", "Sharpen", "Threshold", "Adaptive Threshold"], rows=8) presets.observe(on_preset) filter_control = widgets.HBox([filter_matrix, config, presets]) # + slideshow={"slide_type": "slide"} filter_control # + [markdown] slideshow={"slide_type": "slide"} # ![Filter Pipeline](FilterPipeline.png) # - from pynq import PL ol.is_loaded() PL._bitfile_name
5,573
/PY0101EN-3-1-Conditions.ipynb
6b8d83c8553222282f908743cdd74f10d686096c
[]
no_license
Kakanaporn/PY101EN
https://github.com/Kakanaporn/PY101EN
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
30,111
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt dfx=pd.read_csv('Train.csv') dfx.head() X=dfx.values print(X.shape) # + fig,axes=plt.subplots(2,2) axes[0,0].scatter(X[:,5],X[:,-1]) axes[1,0].scatter(X[:,1],X[:,-1]) plt.show() # - + # 多继承单继承栗子 class Fish(): def __init__(self,name): self.name = name def swim(self): print('会游泳') class Bird(): def __init__(self,name): self.name = name def fly(self): print('会飞翔') class Person(): def __init__(self,name): self.name = name def work(self): print('会工作') class SuperMan(Person,Bird,Fish): def __init__(self,name): self.name = name s = SuperMan('xiaoming') # 小明继承了各种功能 s.fly() s.work() s.swim() # - # ## 多继承问题 # - 菱形继承(钻石继承)问题 画图很好弄 # - 多个类继承自一个父类,而这多个类又被同一个类继承 # - 多重继承经典形象问题 # ### 多继承的MRO # - MRO 就是多继承中,用于保存继承顺序的一个列表 # - Python中的MRO是采用C3算法计算出来的 # - 大致规则: # - 子类永远在父类前面 # - 多个父类,按照括号内书写顺序计算 # - 多个父类继承了同一个爷爷类,孙子类会选择继承第一个父类的爷爷类 # + #菱形问题的代码结构 class A(): pass class B(A): pass class C(A): pass class D(B,C): pass # - # # 多态 # - 多态就是同一个对象在不同情况下有不同的状态 # - 多态不是语法,是一种设计思想 # - 多态性:同一中调用方法,不同的执行结果 # - 多态,同一事物的多种形态 # + # 只是继承了父类方法 # 拥有父类run方法 然后打印 Animal is running... class Animal(object): def run(self): print('Animal is running...') class Dog(Animal): pass class Cat(Animal): pass dog = Dog() dog.run() cat = Cat() cat.run() print('同一种方法,同一种执行结果') # + # 多态来咯 # 我们定义一个父类 他有run方法 class Animal(object): def run(self): print('Animal is running...') # 定义一个子类他也有 自身run方法 class Dog(Animal): def run(self): print('Dog is running...') # 定义另一个子类他还有自己的润方法 class Cat(Animal): def run(self): print('Cat is running...') # 实例化一只狗 dog = Dog() dog.run() # 实例化一只猫 cat = Cat() cat.run() print('同一种方法不同执行结果') # -
2,161
/.ipynb_checkpoints/Simple data clean -checkpoint.ipynb
19f6926052b00d43880fd87f4701d9e9b9d0abbd
[]
no_license
caseyalan/quantum-scattering-networks
https://github.com/caseyalan/quantum-scattering-networks
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
12,718
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import json import win32com.client import codecs outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI") inbox = outlook.GetDefaultFolder(6) # "6" refers to the inbox messages = inbox.Items message = messages.GetFirst() items = [] while message: try: d = dict() d['Subject'] = encodeit(getattr(message, 'Subject', '<UNKNOWN>')) d['SentOn'] = encodeit(getattr(message, 'SentOn', '<UNKNOWN>')) d['EntryID'] = encodeit(getattr(message, 'EntryID', '<UNKNOWN>')) d['Sender'] = encodeit(getattr(message, 'Sender', '<UNKNOWN>')) d['Size'] = encodeit(getattr(message, 'Size', '<UNKNOWN>')) d['Body'] = encodeit(getattr(message, 'Body', '<UNKNOWN>')) items.append(d) except Exception as inst: print "Error processing mail" message = messages.GetNext() def parseit(s): if hasattr(s, 'Sender'): def encodeit(s): if isinstance(s, str): return unicode(s, 'utf-8') else: return s len(items) items.sort(key=lambda tup: tup['SentOn']) for i in items: print i["SentOn"], i["Subject"] ns(grid, current_node): """ Returns a list of valid actions given a grid and current node. """ valid = [Action.UP, Action.LEFT, Action.RIGHT, Action.DOWN] n, m = grid.shape[0] - 1, grid.shape[1] - 1 x, y = current_node # check if the node is off the grid or # it's an obstacle if x - 1 < 0 or grid[x-1, y] == 1: valid.remove(Action.UP) if x + 1 > n or grid[x+1, y] == 1: valid.remove(Action.DOWN) if y - 1 < 0 or grid[x, y-1] == 1: valid.remove(Action.LEFT) if y + 1 > m or grid[x, y+1] == 1: valid.remove(Action.RIGHT) return valid # Define a function to visualize the path def visualize_path(grid, path, start): """ Given a grid, path and start position return visual of the path to the goal. 'S' -> start 'G' -> goal 'O' -> obstacle ' ' -> empty """ # Define a grid of string characters for visualization sgrid = np.zeros(np.shape(grid), dtype=np.str) sgrid[:] = ' ' sgrid[grid[:] == 1] = 'O' pos = start # Fill in the string grid for a in path: da = a.value sgrid[pos[0], pos[1]] = str(a) pos = (pos[0] + da[0], pos[1] + da[1]) sgrid[pos[0], pos[1]] = 'G' sgrid[start[0], start[1]] = 'S' return sgrid # ## Breadth-First Algorithm # # In this section you will implement breadth-first search. The main body of the function is already given. You will need to implement the remaining TODOs. def breadth_first(grid, start, goal): # Below: # "queue" is meant to contain your partial paths # "visited" is meant to contain your visited cells # TODO: Replace the None values for "queue" and "visited" with data structure types path = [] queue = PriorityQueue() # TODO: Choose a data structure type for your partial paths queue.put((start)) visited = set(start) # TODO: Choose a data structure type for your visited list branch = {} found = False # Run loop while queue is not empty while not queue.empty(): # e.g, replace True with queue.empty() if using a Queue: # TODO: Replace "None" to remove the first element from the queue item = queue.get() current_node = item # TODO: Replace "False" to check if the current vertex corresponds to the goal state if current_node == goal: print('Found a path.') found = True break else: # Get the new vertexes connected to the current vertex for a in valid_actions(grid, current_node): # delta of performing the action da = a.value next_node = (current_node[0] + da[0], current_node[1] + da[1]) # TODO: Check if the new vertex has not been visited before. # If the node has not been visited you will need to # 1. Mark it as visited # 2. Add it to the queue if next_node not in visited: visited.add(next_node) queue.put((next_node)) branch[next_node] = (current_node, a) path = [] if found: # retrace steps path = [] n = goal while branch[n][0] != start: path.append(branch[n][1]) n = branch[n][0] path.append(branch[n][1]) return path[::-1] # ## Executing the search # # Run breadth_first() and reference the grid to see if the path makes sense. path = breadth_first(grid, start, goal) print(path) # S -> start, G -> goal, O -> obstacle visualize_path(grid, path, start)
5,206
/Image.ipynb
37d413a6e49e46a3763a1059b2575447b75f2dbb
[]
no_license
kumarjay/Assignments
https://github.com/kumarjay/Assignments
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
359,893
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !pip install pytesseract import pytesseract from PIL import Image path= 'C:\Program Files\Tesseract-OCR\tesseract.exe' pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe' # + ii= cv2.imread(r'C:\Users\rstps.ithelpdesk3\Downloads\test 8.jpeg') img= pytesseract.image_to_string(Image.open(r'C:\Users\rstps.ithelpdesk3\Downloads\test 8.jpeg'), lang= 'eng') img # - img= img.split('/') img.strip()[0] # + xx= img.strip().splitlines() val_lst= [] inval_lst= [] for x in xx: if len(x)>0: x=x.split('/') for wrd in x: print('words are', wrd, wrd[0]) #print('striped..', wrd.strip()) str_= wrd.strip() if str_[0].isupper(): val_lst.append(str_) print(x) val_lst # - for ii in img: xx= ii.splitlines() xx xx[0][0] for x in xx: #print(x[0]) if len(x)>0: if x[0].isupper(): print(x) img= pytesseract.image_to_string(Image.open(r'C:\Users\rstps.ithelpdesk3\Downloads\test2_edit.jpeg')) img x= img.replace('\n', ' ') x # + #import file with open(x.strip()) as f: for line in f: line = line.replace("\r", "").replace("\n", "") # + import re x= re.sub('\n', '', img) x= x.strip() x= re.sub('/', '', x) x # + h, w, _ = ii.shape img= pytesseract.image_to_boxes(Image.open(r'C:\Users\rstps.ithelpdesk3\Downloads\test 8.jpeg')) # + import cv2 cv2.imshow('x', img) # + for b in img.splitlines(): b = b.split(' ') img = cv2.rectangle(ii, (int(b[1]), h - int(b[2])), (int(b[3]), h - int(b[4])), (0, 255, 0), 2) # show annotated image and wait for keypress cv2.imshow('filename', img) cv2.waitKey(0) # - for x in img.splitlines(): print(b.split(' ')) img # + custom_config = r'--oem 3 --psm 4' ii= cv2.imread(r'C:\Users\rstps.ithelpdesk3\Downloads\license.jpg') bgr= cv2.cvtColor(ii, cv2.COLOR_BGR2GRAY) img= pytesseract.image_to_string(bgr, config= custom_config, lang= 'eng') img # - import cv2 as cv import numpy as np from matplotlib import pyplot as plt img = cv.imread(r'C:\Users\rstps.ithelpdesk3\Downloads\test 8.jpeg',0) ret,thresh1 = cv.threshold(img,127,255,cv.THRESH_BINARY) ret,thresh2 = cv.threshold(img,127,255,cv.THRESH_BINARY_INV) ret,thresh3 = cv.threshold(img,127,255,cv.THRESH_TRUNC) ret,thresh4 = cv.threshold(img,127,255,cv.THRESH_TOZERO) ret,thresh5 = cv.threshold(img,127,255,cv.THRESH_TOZERO_INV) titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV'] images = [img, thresh1, thresh2, thresh3, thresh4, thresh5] for i in range(6): plt.subplot(2,3,i+1),plt.imshow(images[i],'gray',vmin=0,vmax=255) plt.title(titles[i]) plt.xticks([]),plt.yticks([]) plt.show() img = cv.imread(r'C:\Users\rstps.ithelpdesk3\Downloads\test 8.jpeg',0) img = cv.medianBlur(img,5) ret,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY) th2 = cv.adaptiveThreshold(img,255,cv.ADAPTIVE_THRESH_MEAN_C,\ cv.THRESH_BINARY,11,2) th3 = cv.adaptiveThreshold(img,255,cv.ADAPTIVE_THRESH_GAUSSIAN_C,\ cv.THRESH_BINARY,11,2) titles = ['Original Image', 'Global Thresholding (v = 127)', 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding'] images = [img, th1, th2, th3] for i in range(4): plt.subplot(2,2,i+1),plt.imshow(images[i],'gray') plt.title(titles[i]) plt.xticks([]),plt.yticks([]) plt.show() cv2.imshow('threshold', img) cv2.waitKey(0) cv2.destroyAllWindows() # + img = cv.imread(r'C:\Users\rstps.ithelpdesk3\Downloads\test 8.jpeg', 0) ret1,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY) # Otsu's thresholding ret2,th2 = cv.threshold(img,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU) # Otsu's thresholding after Gaussian filtering blur = cv.GaussianBlur(img,(5,5),0) ret3,th3 = cv.threshold(blur,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU) # plot all the images and their histograms images = [img, 0, th1, img, 0, th2, blur, 0, th3] titles = ['Original Noisy Image','Histogram','Global Thresholding (v=127)', 'Original Noisy Image','Histogram',"Otsu's Thresholding", 'Gaussian filtered Image','Histogram',"Otsu's Thresholding"] for i in range(3): plt.subplot(3,3,i*3+1),plt.imshow(images[i*3],'gray') plt.title(titles[i*3]), plt.xticks([]), plt.yticks([]) plt.subplot(3,3,i*3+2),plt.hist(images[i*3].ravel(),256) plt.title(titles[i*3+1]), plt.xticks([]), plt.yticks([]) plt.subplot(3,3,i*3+3),plt.imshow(images[i*3+2],'gray') plt.title(titles[i*3+2]), plt.xticks([]), plt.yticks([]) plt.show() # - blur = cv.GaussianBlur(img,(5,5),0) # find normalized_histogram, and its cumulative distribution function hist = cv.calcHist([blur],[0],None,[256],[0,256]) hist_norm = hist.ravel()/hist.sum() Q = hist_norm.cumsum() bins = np.arange(256) fn_min = np.inf thresh = -1 for i in range(1,256): p1,p2 = np.hsplit(hist_norm,[i]) # probabilities q1,q2 = Q[i],Q[255]-Q[i] # cum sum of classes if q1 < 1.e-6 or q2 < 1.e-6: continue b1,b2 = np.hsplit(bins,[i]) # weights # finding means and variances m1,m2 = np.sum(p1*b1)/q1, np.sum(p2*b2)/q2 v1,v2 = np.sum(((b1-m1)**2)*p1)/q1,np.sum(((b2-m2)**2)*p2)/q2 # calculates the minimization function fn = v1*q1 + v2*q2 if fn < fn_min: fn_min = fn thresh = i # find otsu's threshold value with OpenCV function ret, otsu = cv.threshold(blur,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU) print( "{} {}".format(thresh,ret) ) import nltk from nltk.tag.stanford import StanfordNERTagger st = StanfordNERTagger('stanford-ner/english.all.3class.distsim.crf.ser.gz', 'stanford-ner/stanford-ner.jar') # + def extract_entities(text): for sent in nltk.sent_tokenize(text): for chunk in nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(sent))): if hasattr(chunk, 'node'): print(chunk.node, ' '.join(c[0] for c in chunk.leaves())) extract_entities(img) # - nltk.download('averaged_perceptron_tagger') nltk.download('punkt') nltk.download('stopwords') import nltk from nltk.corpus import stopwords stop = stopwords.words('english') document = ' '.join([i for i in img.replace('.', '').replace('?', ' '). replace('!', ' ').replace(',', ' ').split() if i not in stop]) sentences = nltk.sent_tokenize(document) print(document) print(sentences) sentences = [nltk.word_tokenize(sent) for sent in sentences] print(sentences) sentences = [nltk.pos_tag(sent) for sent in sentences] sentences nouns = [word for (word, pos) in sentences if pos in ['NN','NNP','NNS','NNPS']] noun= [word for word, tag in sentences[0] if tag in ['NNP']] digits= [word for word, tag in sentences[0] if tag in ['CD']] dob = [word for word, tag in sentences[0] if tag in ['JJ']] print(noun) print(digits) print(dob) name_= [] for word, tag in sentences[0]: if tag in ['NNP']: if word=='Name': print(word) sentences[0][0][0] # + #for i in range(len(sentences[0])): name= [] father= [] add= [] for i, (word, tag) in enumerate(sentences[0]): #print(word) if tag in ['NNP']: if word=='Name': if sentences[0][i][0] == 'Name': while sentences[0][i][1] in ['NNP']: i+=1 name.append(sentences[0][i][0]) print('1st is', i, name) if sentences[0][i+1][0] == 'S/DW': while sentences[0][i][1] in ['NNP']: i+=1 print('2st is', i) father.append(sentences[0][i][0]) if sentences[0][i+1][0] == 'Add': while sentences[0][i][1] in ['NNP']: i+=1 print('3st is', i) add.append(sentences[0][i][0]) break else: pass else: pass else: print('looping') # - name add father # + if 'Male' in noun: gender= 'male' elif 'Female' in noun: gender= 'famale' else: gender= 'None' gender # + stopwords = nltk.corpus.stopwords.words('english') stopwords.extend(['DOB', 'Male', 'Year', 'Birth', 'Female']) document = ' '.join([i for i in noun if len(i)>1 and i not in stopwords]) document # - dig= ' '.join([i for i in digits]) dig # + import datefinder matches = list(datefinder.find_dates(img)) matches # - k =matches[1].date() k doc='' pd.Series(cols).to_frame(['name', 'DOB', 'doc no', 'gender']) # + cols= {'name': [document], 'DOB':[matches[0].date()], 'doc no':[doc], 'gender':[gender]} df2= pd.DataFrame(data= cols) df2 # - # + import dateutil.parser as dparser dparser.parse(dig,fuzzy=True) # - pip install datefinder document stopwords # + import pandas as pd data= pd.DataFrame(columns= ['name', 'DOB', 'doc no', 'gender', ]) # + cols= {'name': ['jay'], 'DOB':['vfdkj'], 'doc no':['jkfn'], 'gender':['nbjsnf']} df2= pd.DataFrame(data= cols) df2 # - d = {'col1': [1, 2], 'col2': [3, 4]} df = pd.DataFrame(data=d) df #df= pd.DataFrame(['jay', '1989'], columns=['name', 'DOB']) data= pd.concat([data, df2], axis= 0, ignore_index= True) data data token_comment = nltk.word_tokenize(sentences[0]) tagged_comment = nltk.pos_tag(token_comment) sentences.pos_tag() for sent in sentences: print(nltk.pos_tag(sent)) nltk.tree.Tree # + names= [] for tagged_sentence in sentences: for chunk in nltk.ne_chunk(tagged_sentence): print(chunk) if type(chunk) == nltk.tree.Tree: print(chunk.label()) if chunk.label() == 'PERSON': names.append(' '.join([c[0] for c in chunk])) names # - nltk.download('maxent_ne_chunker') nltk.download('words')
10,367
/Natural Language Processing/Semantics and Sentiment Analysis/Semantics_and_WordVectors_with_Spacy.ipynb
25ac41bc3c2cd404b8b8f454e86591dbdf7710c2
[]
no_license
NooreldeanKoteb/Natural-Language-Processing-with-Spacy-and-Nltk
https://github.com/NooreldeanKoteb/Natural-Language-Processing-with-Spacy-and-Nltk
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
5,227
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import os import sys import sklearn.metrics as metrics from pyss3 import SS3 from pyss3.server import Live_Test from pyss3.util import Evaluation, span module_path = os.path.abspath(os.path.join("../../..")) if module_path not in sys.path: sys.path.append(module_path) from src.config import END_OF_POST_TOKEN, PATH_INTERIM_CORPUS # noqa: E402 # - CORPUS_KIND = "reddit" CORPUS_NAME = "gambling" input_file_path_train = os.path.join( PATH_INTERIM_CORPUS, CORPUS_KIND, CORPUS_NAME, f"{CORPUS_NAME}-train-raw.txt" ) input_file_path_test = os.path.join( PATH_INTERIM_CORPUS, CORPUS_KIND, CORPUS_NAME, f"{CORPUS_NAME}-test-raw.txt" ) y_train = [] x_train = [] with open(input_file_path_train) as f: for line in f: label, document = line.split(maxsplit=1) y_train.append(label) posts = " ".join(document.split(END_OF_POST_TOKEN)) x_train.append(posts) y_test = [] x_test = [] with open(input_file_path_test) as f: for line in f: label, document = line.split(maxsplit=1) y_test.append(label) posts = " ".join(document.split(END_OF_POST_TOKEN)) x_test.append(posts) # First train a base SS3 model. We can use the hyper-parameters found for more complex models later. clf = SS3(name=f"{CORPUS_KIND}-{CORPUS_NAME}") # + s, l, p, _ = clf.get_hyperparameters() print("Smoothness(s):", s) print("Significance(l):", l) print("Sanction(p):", p) # - clf.fit(x_train, y_train) clf.get_categories() # Subset of values to search for each hyper-parameter. s_vals = span(0.2, 0.8, 6) # [0.2 , 0.32, 0.44, 0.56, 0.68, 0.8] l_vals = span(0.1, 2, 6) # [0.1 , 0.48, 0.86, 1.24, 1.62, 2] p_vals = span(0.5, 2, 6) # [0.5, 0.8, 1.1, 1.4, 1.7, 2] k_fold = 12 best_s, best_l, best_p, _ = Evaluation.grid_search( clf, x_train, y_train, k_fold=k_fold, s=s_vals, l=l_vals, p=p_vals, cache=True, tag=f"grid search ({k_fold}-fold)", ) print("The hyperparameter values that obtained the best accuracy are:") print("Smoothness(s):", best_s) print("Significance(l):", best_l) print("Sanction(p):", best_p) # + best_s, best_l, best_p, _ = Evaluation.get_best_hyperparameters( metric="f1-score", metric_target="macro avg" ) print(f"s={best_s:.2f}, l={best_l:.2f}, and p={best_p:.2f}") # - clf.set_hyperparameters(s=best_s, l=best_l, p=best_p) # + y_pred = clf.predict(x_test) classification_report = metrics.classification_report(y_test, y_pred) confusion_matrix = metrics.confusion_matrix(y_test, y_pred) print(classification_report) print(confusion_matrix) # - # Train a SS3 model using 3-grams words with the hyper-parameters found earlier. clf_ngrams = SS3(name=f"{CORPUS_KIND}-{CORPUS_NAME}-ngrams") clf_ngrams.train(x_train, y_train, n_grams=3) # + y_pred_ngrams = clf_ngrams.predict(x_test) classification_report_ngrams = metrics.classification_report(y_test, y_pred_ngrams) confusion_matrix_ngrams = metrics.confusion_matrix(y_test, y_pred_ngrams) print(classification_report_ngrams) print(confusion_matrix_ngrams) # - # Use the best parameters found in cross-validation during training the base model. clf_ngrams.set_hyperparameters(s=best_s, l=best_l, p=best_p) # + y_pred_ngrams_best = clf_ngrams.predict(x_test) classification_report_ngrams_best = metrics.classification_report( y_test, y_pred_ngrams_best ) confusion_matrix_ngrams_best = metrics.confusion_matrix(y_test, y_pred_ngrams_best) print(classification_report_ngrams_best) print(confusion_matrix_ngrams_best) # - clf_ngrams.save() Live_Test.run(clf_ngrams, x_test, y_test, 9876)
3,860
/Bokeh/tutorial/06 - server.ipynb
210b5240de5d32cf697f44e1a437df5ee93ae2f3
[]
no_license
bismayan/bigdata_python
https://github.com/bismayan/bigdata_python
2
0
null
null
null
null
Jupyter Notebook
false
false
.py
8,629
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # <table style="float:left; border:none"> # <tr style="border:none"> # <td style="border:none"> # <a href="http://bokeh.pydata.org/"> # <img # src="assets/images/bokeh-transparent.png" # style="width:50px" # > # </a> # </td> # <td style="border:none"> # <h1>Bokeh Tutorial</h1> # </td> # </tr> # </table> # # <div style="float:right;"><h2>06. Bokeh Applications</h2></div> # The architecture of Bokeh is such that high-level “model objects” (representing things like plots, ranges, axes, glyphs, etc.) are created in Python, and then converted to a JSON format that is consumed by the client library, BokehJS. Using the Bokeh Server, it is possible to keep the “model objects” in python and in the browser in sync with one another, creating powerful capabilities: # # * respond to UI and tool events generated in a browser with computations or queries using the full power of python # * automatically push updates the UI (i.e. widgets or plots), in a browser # * use periodic, timeout, and asychronous callbacks drive streaming updates # # ***This capability to synchronize between python and the browser is the main purpose of the Bokeh Server.*** # <center><div style="font-size: 18pt;color: firebrick;"> NOTE: Exercises below require work outside the notebook <div></center> # # Hello Bokeh # # To try out the example below, copy the code into a file ``hello.py`` and then execute: # ```bash # bokeh serve --show hello.py # ``` # ---- # ```python # # hello.py # # from bokeh.io import curdoc # from bokeh.layouts import column # from bokeh.models.widgets import TextInput, Button, Paragraph # # # create some widgets # button = Button(label="Say HI") # input = TextInput(value="Bokeh") # output = Paragraph() # # # add a callback to a widget # def update(): # output.text = "Hello, " + input.value # button.on_click(update) # # # create a layout for everything # layout = column(button, input, output) # # # add the layout to curdoc # curdoc().add_root(layout) # ``` # Let's try an exercise to modify this example to add another widget. # + # EXERCISE: add a Select widget to this example that offers several different greetings # - # # Linking Plots and Widgets # # To try out the example below, copy the code into a file ``app.py`` and then execute: # ```bash # bokeh serve --show app.py # ``` # ---- # ```python # # app.py # # from numpy.random import random # # from bokeh.io import curdoc # from bokeh.layouts import column, row # from bokeh.plotting import ColumnDataSource, Figure # from bokeh.models.widgets import Select, TextInput # # def get_data(N): # return dict(x=random(size=N), y=random(size=N), r=random(size=N) * 0.03) # # source = ColumnDataSource(data=get_data(200)) # # p = Figure(tools="", toolbar_location=None) # r = p.circle(x='x', y='y', radius='r', source=source, # color="navy", alpha=0.6, line_color="white") # # COLORS = ["black", "firebrick", "navy", "olive", "goldenrod"] # select = Select(title="Color", value="navy", options=COLORS) # input = TextInput(title="Number of points", value="200") # # def update_color(attrname, old, new): # r.glyph.fill_color = select.value # select.on_change('value', update_color) # # def update_points(attrname, old, new): # N = int(input.value) # source.data = get_data(N) # input.on_change('value', update_points) # # layout = column(row(select, input, width=400), row(p)) # # curdoc().add_root(layout) # ``` # EXERCISE: add more widgets to change more aspects of this plot # # Streaming Data # # It is possible to efficiently stream new data to column data sources by using the ``stream`` method. This method accepts two argmuments: # * ``new_data`` &mdash; a dictionary with the same structure as the column data source # * ``rollover`` &mdash; a maximum column length on the client (earlier data is dropped) *[optional]* # # If no ``rollover`` is specified, data is never dropped on the client and columns grow without bound. # # It is often useful to use periodic callbacks in conjuction with streaming data The ``add_periodic_callback`` method of ``curdoc()`` accepts a callback function, and a time interval (in ms) to repeatedly execute the callback. # # To try out the example below, copy the code into a file ``stream.py`` and then execute: # ```bash # bokeh serve --show stream.py # ``` # ---- # ```python # # stream.py # from math import cos, sin # # from bokeh.io import curdoc # from bokeh.models import ColumnDataSource # from bokeh.plotting import figure # # p = figure(x_range=(-1.1, 1.1), y_range=(-1.1, 1.1)) # p.circle(x=0, y=0, radius=1, fill_color=None, line_width=2) # # # this is the data source we will stream to # source = ColumnDataSource(data=dict(x=[1], y=[0])) # p.circle(x='x', y='y', size=12, fill_color='white', source=source) # # def update(): # x, y = source.data['x'][-1], source.data['y'][-1] # # # construct the new values for all columns, and pass to stream # new_data = dict(x=[x*cos(0.1) - y*sin(0.1)], y=[x*sin(0.1) + y*cos(0.1)]) # source.stream(new_data, rollover=8) # # curdoc().add_periodic_callback(update, 150) # curdoc().add_root(p) # ``` ### EXERCISE: starting with the above example, create your own streaming plot # # Custom Page Templates # # *New in Bokeh 0.12* # # Custom Page Templates will allow you to specify a Jinja2 Template to use for the application page. With this capability, it is possible to deliver single page applications directly from the Bokeh server, without having to embed the Bokeh application in pages from a different server (Flask, Django, etc).
5,967
/Guided_Projects/Guided_Project_Exploring_Ebay_Car_Sales_Data/Basics.ipynb
9cc2d60e953dda5bc76d836f922d903dc45c96d1
[]
no_license
nveenverma1/Dataquest
https://github.com/nveenverma1/Dataquest
0
1
null
null
null
null
Jupyter Notebook
false
false
.py
176,681
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exploring E-bay Car Sales Data # - This Data is taken from Kaggle Competitions # # ### Data Dictionary # # > <font color=blue>**_dateCrawled_**</font> - When this ad was first crawled. All field-values are taken from this date.<br> # > <font color=blue>**_name_**</font> - Name of the car.<br> # > <font color=blue>**_seller_**</font> - Whether the seller is private or a dealer.<br> # > <font color=blue>**_offerType_**</font> - The type of listing<br> # > <font color=blue>**_price_**</font> - The price on the ad to sell the car.<br> # > <font color=blue>**_abtest_**</font> - Whether the listing is included in an A/B test.<br> # > <font color=blue>**_vehicleType_**</font> - The vehicle Type.<br> # > <font color=blue>**_yearOfRegistration_**</font> - The year in which which year the car was first registered.<br> # > <font color=blue>**_gearbox_**</font> - The transmission type.<br> # > <font color=blue>**_powerPS_**</font> - The power of the car in PS.<br> # > <font color=blue>**_model_**</font> - The car model name.<br> # > <font color=blue>**_kilometer_**</font> - How many kilometers the car has driven.<br> # > <font color=blue>**_monthOfRegistration_**</font> - The month in which which year the car was first registered.<br> # > <font color=blue>**_fuelType_**</font> - What type of fuel the car uses. # > <font color=blue>**_brand_**</font> - The brand of the car.<br> # > <font color=blue>**_notRepairedDamage_**</font> - If the car has a damage which is not yet repaired.<br> # > <font color=blue>**_dateCreated_**</font> - The date on which the eBay listing was created.<br> # > <font color=blue>**_nrOfPictures_**</font> - The number of pictures in the ad.<br> # > <font color=blue>**_postalCode_**</font> - The postal code for the location of the vehicle.<br> # > <font color=blue>**_lastSeenOnline_**</font> - When the crawler saw this ad last online.<br> # # ### <font color=Orange>_Aim_</font> # We aim to clean the data and analyze the included used car listings # import pandas as pd import numpy as np autos = pd.read_csv('autos.csv', encoding="Latin-1") autos autos.info() autos.head() # ### _Key Observations:_ # # - The Dataset includes of 20 Columns, most of which are Strings # # - Some Columns have null values, but none have more than 20% null values. column_names = autos.columns column_names # + import re def clean_col(col): col.strip() col = col.replace("yearOfRegistration", "registration_year") col = col.replace("monthOfRegistration", "registration_month") col = col.replace("notRepairedDamage", "unrepaired_damage") col = col.replace("dateCreated", "ad_created") return re.sub('([a-z0-9])([A-Z])', r'\1_\2',col).lower() autos.columns = [clean_col(c) for c in autos.columns] autos.columns # - autos.head() # ***Changed Names for all the columnms from camelCase to snake_case. For e.g. 'nrOfPictures' became 'nr_of_pictures'*** autos.describe(include='all') # ### _Key Observations:_ # - **nr_of_pictures**, **seller** and **offer_type** columns have mostly a single value. And they should be dropped. # - Columns **price** and **odometer** are shown as text (Object) type, whereas they should be numeric (int) type and should be converted. autos["price"] = autos["price"].replace( {'\$':'',',':''}, regex=True).astype(int) autos["price"] autos["odometer"] = autos["odometer"].replace( {'km':'',',':''}, regex=True).astype(int) autos.rename(columns={'odometer':'odometer_km'}, inplace=True) autos['odometer_km'] autos.drop(columns= ['nr_of_pictures','seller','offer_type'], inplace=True) autos.describe(include='all') autos['price'].value_counts().sort_index() autos = autos[autos['price'].between(100,1000000)] autos['price'].value_counts().sort_index() # ***Observed outliers (below 100 or above 1000000), in 'price' column and removed them.*** autos['odometer_km'].value_counts().sort_index() autos = autos[autos['registration_month']!=0] autos['registration_month'].value_counts() autos['registration_year'].value_counts().sort_index() autos = autos[autos['registration_year'].between(1900,2017)] autos['registration_year'].value_counts().sort_index() # ***Observed outliers for 'Registration month' and 'Registration year'.***<br> # ***For 'Registration month' it was value of 0 and 'Registration Year' it was below 1900 and above 2017<br>Removed those outliers and cleaned the Registration Month and Registration Year Column.*** autos.head() # + autos['date_crawled'] = autos['date_crawled'].str[:10] autos['date_crawled'] = autos['date_crawled'].str.replace('-','').astype(int) autos['last_seen'] = autos['last_seen'].str[:10] autos['last_seen'] = autos['last_seen'].str.replace('-','').astype(int) autos['ad_created'] = autos['ad_created'].str[:10] autos['ad_created'] = autos['ad_created'].str.replace('-','').astype(int) # - autos.head() autos['date_crawled'].value_counts(normalize=True, dropna=False).sort_index() # ***DATE CRAWLED - The above distribbution look like a uniform ditribution.<br>Which means that the ads were crawled on a regular basis, with an average of 3% ads being crawled daily for 34 days.<br>*** autos['ad_created'].value_counts(normalize=True, dropna=False).sort_index() # ***AD CREATED - The above distribbution look like a left skewed ditribution.Which means that most of the ads were created recently.<br>It could be beacuse the cars that were posted earlier (like a month or two ago) would have got sold, hence their ad was pulled down. <br>Leaving only a very small number of ads (of unsold cars) posted from a long period of time.*** autos['last_seen'].value_counts(normalize=True, dropna=False).sort_index() # ***LAST SEEN - Here the distribution is heavily left skewed, with almost 50% of the ads being seen in last 3 days of the dataset.<br>We can give a similar analogy to 'ad_created', older car ads are in less quantity in last seen, as they might be sold already.<br>And, as a consequence most of the last seens ads are of recent sellers, who are interested in selling their cars ASAP.*** # # autos['registration_year'].describe() autos['registration_year'].value_counts(normalize=True, dropna=False).sort_index() # ***REGISTRATION YEAR - We can observe that the distribution is left skewed.<br>We can observe that most of the cars are registered between 1998-2008.<br>Since, most of the people use their cars for 10-15 years and then sell/discard them.*** # + brands = autos['brand'].value_counts(normalize=True) results = brands >= 0.05 # Finding names of brands wit more then 5% share of ads top_brands_name = brands[results].index print(top_brands_name) # + brand_mean_price = {} for car in top_brands_name: mean_price = autos.loc[autos['brand']== car,'price'].mean() brand_mean_price[car] = round(mean_price, 2) brand_mean_price # - # ***We found Automobile Brands with more then 5% share of the Ads.<br>And from those names we derived the average price of those top brands vehicles.<br>Those average prices are as follows:*** # # > **Volkswagen:** 5943.71<br> # > **BMW:** 8637.41<br> # > **Opel:** 3173.85<br> # > **Mercedes Benz:** 8846.68<br> # > **Audi:** 9718.9<br> # > **Ford:** 4289.74<br> # # # ***Here we may notice that the most expensive vehicles are from Audi, BMW and Mercedes.<br>While Ford, Opel are the most economical options, Volkswagen is in between*** # + brand_mean_mileage = {} for car in top_brands_name: mean_mileage = autos.loc[autos['brand'] == car, 'odometer_km'].mean() brand_mean_mileage[car] = round(mean_mileage, 2) brand_mean_mileage # - bmp_series = pd.Series(brand_mean_price).sort_values(ascending=False) bmm_series = pd.Series(brand_mean_mileage).sort_values(ascending=False) print(bmp_series) bmm_series top_brands_df = pd.DataFrame(bmp_series, columns=['mean_price']) top_brands_df top_brands_df['mean_mileage'] = bmm_series top_brands_df # ***Here we can observe that car mileage/brand does not vary much as compared to the car prices/brand.<br><hr>While mean price difference for most expensive car brand (Audi) and most economical car brand (Opel) was a gigantic 71%.<br>However, same is not true for mean mileage, with total difference between top mileage giving brand(BMW) & least mileage giving brand(Ford) being mere 7%.***
8,671
/.ipynb_checkpoints/Artistic Style Transfer_Dinesh Vangumalli-Copy19-checkpoint.ipynb
af257f369a9f844efffd5ea12c1423f5ad79cee1
[]
no_license
DineshVangumalli/artistic-style-transfer
https://github.com/DineshVangumalli/artistic-style-transfer
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
3,712,854
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import psycopg2 import pandas as pd from sklearn.cross_validation import train_test_split def load_labels(): db = psycopg2.connect(database='martinrasumoff') cursor = db.cursor() #Loading Labels cursor.execute("select * from labels_water_pumps;") list_labels = cursor.fetchall() labels_id =[] labels_class =[] for each in list_labels: labels_id.append(each[0]) labels_class.append(each[1]) series_labels = pd.DataFrame(labels_class, index=labels_id) return series_labels df_labels = load_labels() df_labels.columns = ['label'] print df_labels.head() print df_labels.tail() # + def load_wells(): db = psycopg2.connect(database='martinrasumoff') cursor = db.cursor() #Loading Pump Data cursor.execute("select * from water_pumps;") list_pumps = cursor.fetchall() list_pumps_id = [] list_pumps_fields = [] for each in list_pumps: list_pumps_id.append(each[0]) list_pumps_fields.append(each) #Converting wells data into a pandas Data Frame df_pumps = pd.DataFrame(list_pumps_fields, index=list_pumps_id) return df_pumps df_data = load_wells() print df_data.head() print df_data.tail() # + df_data.columns = ['id','amount_tsh', 'date_recorded', 'funder', 'gps_height', 'installer', 'longitude', 'latitude', 'wpt_name', 'num_private', 'basin', 'subvillage', 'region', 'region_code', 'district_code', 'lga', 'ward', 'population', 'public_meeting', 'recorded_by', 'scheme_management', 'scheme_name', 'permit', 'construction_year', 'extraction_type', 'extraction_type_group', 'extraction_type_class', 'management', 'management_group', 'payment', 'payment_type', 'water_quality', 'quality_group', 'quantity', 'quantity_group', 'source', 'source_type', 'source_class', 'waterpoint_type', 'waterpoint_type_group'] print df_data.head() print df_data.tail() # - #df_data_installer = pd.get_dummies(df_data['installer']) #print 'installer:',df_data_installer.shape #df_data_wpt_name = pd.get_dummies(df_data['wpt_name']) #print 'wpt_name:',df_data_wpt_name.shape df_data_basin = pd.get_dummies(df_data['basin']) print 'basin:',df_data_basin.shape #df_data_subvillage = pd.get_dummies(df_data['subvillage']) #print 'subvillage:',df_data_subvillage.shape #df_data_lga = pd.get_dummies(df_data['lga']) #print 'lga:',df_data_lga.shape #df_data_ward = pd.get_dummies(df_data['ward']) #print 'ward',df_data_ward.shape df_data_public_meeting = pd.get_dummies(df_data['public_meeting']) print 'Public Meeting:',df_data_public_meeting.shape df_data_scheme_mgmnt = pd.get_dummies(df_data['scheme_management']) print 'Scheme Management:',df_data_scheme_mgmnt.shape df_data_permit = pd.get_dummies(df_data['permit']) print 'permit:',df_data_permit.shape df_data_extraction_type_group = pd.get_dummies(df_data['extraction_type_group']) print 'Data Extraction:',df_data_extraction_type_group.shape df_data_extraction_type_class = pd.get_dummies(df_data['extraction_type_class']) print 'Data Extr Type:',df_data_extraction_type_class.shape df_data_management_group = pd.get_dummies(df_data['management_group']) print 'Management Group:',df_data_management_group.shape df_data_payment_type = pd.get_dummies(df_data['payment_type']) print 'Payment Type:',df_data_payment_type.shape df_data_quality_group = pd.get_dummies(df_data['quality_group']) print 'Quality Group:',df_data_quality_group.shape df_data_quantity_group = pd.get_dummies(df_data['quantity_group']) print 'Quantity Group:',df_data_quantity_group.shape df_data_source_type = pd.get_dummies(df_data['source_type']) print 'Source Type:',df_data_source_type.shape df_data_source_class = pd.get_dummies(df_data['source_class']) print 'Source Class:',df_data_source_class.shape df_data_waterpoint_type_group = pd.get_dummies(df_data['waterpoint_type_group']) print 'Waterpoint Type:',df_data_waterpoint_type_group.shape # + df_data = df_data[['id','amount_tsh','gps_height','longitude','latitude','region_code','district_code',\ 'population']] df_coded = pd.concat([df_data,df_data_basin,df_data_public_meeting,df_data_scheme_mgmnt,df_data_permit,\ df_data_extraction_type_group,df_data_extraction_type_class,df_data_management_group,\ df_data_payment_type,df_data_quality_group,df_data_quantity_group,\ df_data_source_type,df_data_source_class,df_data_waterpoint_type_group],axis=1) print df_data.shape print df_coded.shape # + df_coded.sort_index(inplace=True) df_labels.sort_index(inplace=True) print (df_labels.index == df_coded.index).sum() print df_coded.shape print len(df_labels) print len(df_coded) #for each in df_labels.index: # if any(df_coded.id == each): # print '-' # else: # print '+++++++++',each # - print df_coded.tail() print df_labels.tail() # + print (df_labels.index == df_coded.index).sum() print df_coded.shape # + X_pumps_train, X_pumps_test, y_pumps_train, y_pumps_test = train_test_split(df_coded, df_labels, test_size=0.25, random_state=79) print '-' print X_pumps_train.head() print '-' print y_pumps_train.head() print '-' print X_pumps_test.head() print '-' print y_pumps_test.head() # - import pylab as pl import numpy as np import statsmodels.api as sm from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import accuracy_score from sklearn.cross_validation import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score from sklearn.metrics import precision_recall_fscore_support # + # %matplotlib inline results = [] for n in range(1, 10): clf = KNeighborsClassifier(n_neighbors=n) knn = clf.fit(X_pumps_train, y_pumps_train) preds = clf.predict(X_pumps_test) knn_score = clf.score(X_pumps_test,y_pumps_test) print knn_score results.append([n,knn_score]) results = pd.DataFrame(results, columns=["n", "accuracy"]) pl.plot(results.n, results.accuracy) pl.title("Accuracy with Increasing K") pl.show() # + X_pumps_train, X_pumps_test, y_pumps_train, y_pumps_test = train_test_split(df_coded, df_labels, test_size=0.25, random_state=79) y_pumps_train_lbl = y_pumps_train[['label']] logreg = LogisticRegression() logreg.fit(X_pumps_train, y_pumps_train_lbl) a = logreg.predict(X_pumps_test) result = logreg.score(X_pumps_test, y_pumps_test[['label']]) print 'Logistic Regression Score:',str(result) # + from sklearn.naive_bayes import GaussianNB X_pumps_train, X_pumps_test, y_pumps_train, y_pumps_test = train_test_split(df_coded, df_labels, test_size=0.25, random_state=79) y_pumps_train_lbl = y_pumps_train[['label']] y_pumps_test_lbl = y_pumps_test[['label']] gaussian = GaussianNB() gaussian.fit(X_pumps_train, y_pumps_train_lbl) result = gaussian.score(X_pumps_test, y_pumps_test_lbl) a = gaussian.predict(X_pumps_test) v = gaussian.predict_proba(X_pumps_test) result = gaussian.score(X_pumps_test, y_pumps_test_lbl) print 'Gaussian Regression Score:',str(result) # + from sklearn.svm import SVC X_pumps_train, X_pumps_test, y_pumps_train, y_pumps_test = train_test_split(df_coded, df_labels, test_size=0.25, random_state=79) y_pumps_train_lbl = y_pumps_train[['label']] y_pumps_test_lbl = y_pumps_test[['label']] svc = SVC() svc.fit(X_pumps_train, y_pumps_train_lbl) a = svc.predict(X_pumps_test) result = svc.score(X_pumps_test, y_pumps_test_lbl) print 'SVC Regression Score:',str(result) # + from sklearn.tree import DecisionTreeClassifier X_pumps_train, X_pumps_test, y_pumps_train, y_pumps_test = train_test_split(df_coded, df_labels, test_size=0.25, random_state=79) y_pumps_train_lbl = y_pumps_train[['label']] y_pumps_test_lbl = y_pumps_test[['label']] dtc = DecisionTreeClassifier() dtc.fit(X_pumps_train, y_pumps_train_lbl) a = dtc.predict(X_pumps_test) result = dtc.score(X_pumps_test, y_pumps_test_lbl) print 'DTC Regression Score:',str(result) # + from sklearn.ensemble import RandomForestClassifier from numpy import genfromtxt, savetxt X_pumps_train, X_pumps_test, y_pumps_train, y_pumps_test = train_test_split(df_coded, df_labels, test_size=0.25, random_state=79) y_pumps_train_lbl = y_pumps_train[['label']] y_pumps_test_lbl = y_pumps_test[['label']] rfc = RandomForestClassifier() x = rfc.fit(X_pumps_train, y_pumps_train_lbl) a = rfc.predict(X_pumps_test) result = rfc.score(X_pumps_test, y_pumps_test_lbl) print 'RFC Regression Score:',str(result) # + test = pd.read_csv('/Users/martinrasumoff/Desktop/metis/mcnulty/Pumpitup/test.csv') print test.head() test = test[['id','amount_tsh','gps_height','longitude','latitude','region_code','district_code',\ 'population','basin','public_meeting','scheme_management','permit',\ 'extraction_type_group','extraction_type_class','management_group',\ 'payment_type','quality_group','quantity_group',\ 'source_type','source_class','waterpoint_type_group']] print test.head() # - tf_data_basin = pd.get_dummies(test['basin']) print 'basin:',tf_data_basin.shape tf_data_public_meeting = pd.get_dummies(test['public_meeting']) print 'Public Meeting:',tf_data_public_meeting.shape tf_data_scheme_mgmnt = pd.get_dummies(test['scheme_management']) print 'Scheme Management:',tf_data_scheme_mgmnt.shape tf_data_permit = pd.get_dummies(test['permit']) print 'permit:',tf_data_permit.shape tf_data_extraction_type_group = pd.get_dummies(test['extraction_type_group']) print 'Data Extraction:',tf_data_extraction_type_group.shape tf_data_extraction_type_class = pd.get_dummies(test['extraction_type_class']) print 'Data Extr Type:',tf_data_extraction_type_class.shape tf_data_management_group = pd.get_dummies(test['management_group']) print 'Management Group:',tf_data_management_group.shape tf_data_payment_type = pd.get_dummies(test['payment_type']) print 'Payment Type:',tf_data_payment_type.shape tf_data_quality_group = pd.get_dummies(test['quality_group']) print 'Quality Group:',tf_data_quality_group.shape tf_data_quantity_group = pd.get_dummies(test['quantity_group']) print 'Quantity Group:',tf_data_quantity_group.shape tf_data_source_type = pd.get_dummies(test['source_type']) print 'Source Type:',tf_data_source_type.shape tf_data_source_class = pd.get_dummies(test['source_class']) print 'Source Class:',tf_data_source_class.shape tf_data_waterpoint_type_group = pd.get_dummies(test['waterpoint_type_group']) print 'Waterpoint Type:',tf_data_waterpoint_type_group.shape # + tf_data = test[['id','amount_tsh','gps_height','longitude','latitude','region_code','district_code',\ 'population']] tf_coded = pd.concat([tf_data,tf_data_basin,tf_data_public_meeting,tf_data_scheme_mgmnt,tf_data_permit,\ tf_data_extraction_type_group,tf_data_extraction_type_class,tf_data_management_group,\ tf_data_payment_type,tf_data_quality_group,tf_data_quantity_group,\ tf_data_source_type,tf_data_source_class,tf_data_waterpoint_type_group],axis=1) print tf_data.shape print tf_coded.shape # + #print list(tf_coded.columns.values) #print "-" #print list(df_coded) #df_coded.drop('None', axis=1, inplace=True) #for each in range(len(df_coded.columns.values)): # print tf_coded.columns.values[each]," - ",df_coded.columns.values[each] X_pumps_train, X_pumps_test, y_pumps_train, y_pumps_test = train_test_split(df_coded, df_labels, test_size=0.25, random_state=79) y_pumps_train_lbl = y_pumps_train[['label']] y_pumps_test_lbl = y_pumps_test[['label']] rfc = RandomForestClassifier() x = rfc.fit(X_pumps_train, y_pumps_train_lbl) a = rfc.predict(X_pumps_test) result = rfc.score(X_pumps_test, y_pumps_test_lbl) print 'RFC Regression Score:',str(result) #pred = rfc.predict(tf_coded) #rfc = RandomForestClassifier() #pred = rfc.predict(X_pumps_test) print result # - print a # + pred = a to_submit = [] print len(tf_coded) for ind in range(len(tf_coded)): ids = tf_coded.id[ind] to_pred = pred[ind] #print ids,' - ',to_pred to_submit.append([ids,to_pred]) df_submit = pd.DataFrame(to_submit) df_submit.columns = ['id','status_group'] print df_submit.head() print df_submit.tail() df_submit.to_csv('/Users/martinrasumoff/submission_5.csv', sep=',', index=False) # + active="" # # - print df_submit['status_group'].unique() ''' sklearn.featureselection.VarianceThresholds '''
13,488
/notebooks/vaibss/plotting-with-seaborn.ipynb
e087e91a80071331c50a4638e5fa1da5f4e9880d
[]
no_license
Sayem-Mohammad-Imtiaz/kaggle-notebooks
https://github.com/Sayem-Mohammad-Imtiaz/kaggle-notebooks
5
6
null
null
null
null
Jupyter Notebook
false
false
.py
20,026
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="38b8db4c-7e28-4c7b-8079-d6a633025be3" _uuid="4f61bee808da4c7d72017ebe6a5348e10499695f" # # Plotting with seaborn # # <table> # <tr> # <td><img src="https://i.imgur.com/3cYy56H.png" width="350px"/></td> # <td><img src="https://i.imgur.com/V9jAreo.png" width="350px"/></td> # <td><img src="https://i.imgur.com/5a6dwtm.png" width="350px"/></td> # <td><img src="https://i.imgur.com/ZSsHzrA.png" width="350px"/></td> # </tr> # <tr> # <td style="font-weight:bold; font-size:16px;">Count (Bar) Plot</td> # <td style="font-weight:bold; font-size:16px;">KDE Plot</td> # <td style="font-weight:bold; font-size:16px;">Joint (Hex) Plot</td> # <td style="font-weight:bold; font-size:16px;">Violin Plot</td> # </tr> # <tr> # <td>sns.countplot()</td> # <td>sns.kdeplot()</td> # <td>sns.jointplot()</td> # <td>sns.violinplot()</td> # </tr> # <tr> # <td>Good for nominal and small ordinal categorical data.</td> # <td>Good for interval data.</td> # <td>Good for interval and some nominal categorical data.</td> # <td>Good for interval data and some nominal categorical data.</td> # </tr> # </table> # # In the previous two sections we explored data visualization using the `pandas` built-in plotting tools. In this section, we'll do the same with `seaborn`. # # `seaborn` is a standalone data visualization package that provides many extremely valuable data visualizations in a single package. It is generally a much more powerful tool than `pandas`; let's see why. # + _cell_guid="dc06ff32-9804-49ed-b76a-5076774adb8b" _uuid="3e6df9a405b1c795d8b18401eab8b1bf64d4cb8c" import pandas as pd reviews = pd.read_csv("../input/wine-reviews/winemag-data_first150k.csv", index_col=0) import seaborn as sns # + [markdown] _cell_guid="9397fd15-d742-41f3-9209-5b17a33ea3b7" _uuid="58ea16434bcf678af906dd3c8e2b1a2b51c54345" # ## Countplot # # The `pandas` bar chart becomes a `seaborn` `countplot`. # + _cell_guid="0ffc5715-142b-4bd7-a4b6-8f1811e613c0" _uuid="0e8836597f86822908b6a10f08296bed46a80483" sns.countplot(reviews['points']) # + [markdown] _cell_guid="3efba799-aa2f-4ebf-af8a-e91064661d21" _uuid="8207ca9c7173fc6517db698fc46e6c11134affcf" # Comparing this chart with the bar chart from two notebooks ago, we find that, unlike `pandas`, `seaborn` doesn't require us to shape the data for it via `value_counts`; the `countplot` (true to its name) aggregates the data for us! # # `seaborn` doesn't have a direct analogue to the line or area chart. Instead, the package provides a `kdeplot`: # + [markdown] _cell_guid="56482637-9022-48da-b99e-a7536e53a55f" _uuid="ef56a55e6a5ab98c7a0c1ec01892be7351daa81b" # ## KDE Plot # + _cell_guid="fcdc8bb0-a4be-475b-94d8-469b1d752c09" _uuid="e41c6e54ed366b6c79e5be1360d7648d4db98c81" sns.kdeplot(reviews.query('price < 200').price) # + [markdown] _cell_guid="d4e2d513-7d16-497f-87a2-84ab36d1c3c4" _uuid="ef5fa23c8a444ebf40ec40e378a726fb4209c2ef" # KDE, short for "kernel density estimate", is a statistical technique for smoothing out data noise. It addresses an important fundamental weakness of a line chart: it will buff out outlier or "in-betweener" values which would cause a line chart to suddenly dip. # # For example, suppose that there was just one wine priced 19.93\$, but several hundred prices 20.00\$. If we were to plot the value counts in a line chart, our line would dip very suddenly down to 1 and then back up to around 1000 again, creating a strangely "jagged" line. The line chart with the same data, shown below for the purposes of comparison, has exactly this problem! # + _cell_guid="88c9485f-5f60-48db-a23f-3b237e433553" _uuid="89d919218ca76b700a276fbd8826de131e3ebab0" reviews[reviews['price'] < 200]['price'].value_counts().sort_index().plot.line() # + [markdown] _cell_guid="31111f7a-c16f-4847-ad72-195b9aaf81b8" _uuid="41f40c60d5821b078d33905592e56db921e1a789" # A KDE plot is better than a line chart for getting the "true shape" of interval data. In fact, I recommend always using it instead of a line chart for such data. # # However, it's a worse choice for ordinal categorical data. A KDE plot expects that if there are 200 wine rated 85 and 400 rated 86, then the values in between, like 85.5, should smooth out to somewhere in between (say, 300). However, if the value in between can't occur (wine ratings of 85.5 are not allowed), then the KDE plot is fitting to something that doesn't exist. In these cases, use a line chart instead. # # KDE plots can also be used in two dimensions. # + _cell_guid="047332b2-bdff-4a46-a9fe-8a673c30c5c1" _uuid="2ee4987ede1d583dda57d5c09ad5fe9057f0723d" sns.kdeplot(reviews[reviews['price'] < 200].loc[:, ['price', 'points']].dropna().sample(5000)) # + [markdown] _cell_guid="35d53cb7-fce4-44bb-a529-84e780ced888" _uuid="c0c76d49e572f430aac62cb4d4e2f39615a6bc0b" # Bivariate KDE plots like this one are a great alternative to scatter plots and hex plots. They solve the same data overplotting issue that scatter plots suffer from and hex plots address, in a different but similarly visually appealing. However, note that bivariate KDE plots are very computationally intensive. We took a sample of 5000 points in this example to keep compute time reasonable. # + [markdown] _cell_guid="437a0051-1ea2-48c6-afb0-746be27d81d3" _uuid="febe6f453d21aefa958873c2b2f31f648af96481" # ## Distplot # # The `seaborn` equivalent to a `pandas` histogram is the `distplot`. Here's an example: # + _cell_guid="6e45384f-cf4c-46e5-8ee2-a47f199ec4b1" _uuid="2fb9fa2ac02895f84c0b07d2bec710f133e7a993" sns.distplot(reviews['points'], bins=10, kde=False) # + [markdown] _cell_guid="f0639d7e-f08c-42f4-baca-e4226e4df863" _uuid="73f53b37ee47f27575a23c62bcf418a57ffe2f09" # The `distplot` is a composite plot type. In the example above we've turned off the `kde` that's included by default, and manually set the number of bins to 10 (two possible ratings per bin), to get a clearer picture. # + [markdown] _cell_guid="c35f185c-1be3-47dd-8bb9-1d10e5699d9c" _uuid="84569d191b8e65dd291d984c793dddb5d0fd3f6b" # ## Scatterplot and hexplot # + [markdown] _cell_guid="020f4f72-9edc-436b-a1df-15b6df4a1a46" _uuid="7d890d0dbb68ebceb3ac48442c5c3cac8a3ddb13" # To plot two variables against one another in `seaborn`, we use `jointplot`. # + _cell_guid="6d6f9edf-4cc8-4aa4-8193-896e8ab712a0" _uuid="33dea6ec448556990f191a1abd92f5935160e1e7" sns.jointplot(x='price', y='points', data=reviews[reviews['price'] < 100]) # + [markdown] _cell_guid="e52d9f24-64a5-4235-ae24-cbb535ebda94" _uuid="b576254a3f92026955c123c465a337e42610af21" # Notice that this plot comes with some bells and whistles: a correlation coefficient is provided, along with histograms on the sides. These kinds of composite plots are a recurring theme in `seaborn`. Other than that, the `jointplot` is just like the `pandas` scatter plot. # # As in `pandas`, we can use a hex plot (by simply passing `kind='hex'`) to deal with overplotting: # + _cell_guid="59713edb-16a3-4511-b61a-45716e8acf70" _uuid="a9c79f960286c9a2d85edac768401474bf3f441a" sns.jointplot(x='price', y='points', data=reviews[reviews['price'] < 100], kind='hex', gridsize=20) # + [markdown] _cell_guid="65121cde-56ed-40a9-8221-ba18a8741158" _uuid="445e536d348d4746b092981a944bc664cd7d0c76" # ## Boxplot and violin plot # # `seaborn` provides a boxplot function. It creates a statistically useful plot that looks like this: # + _cell_guid="46e57167-2d55-405e-9cf5-e7d4f6ec9224" _uuid="518f21f75c0bef67b685379e95deed90e4b6037e" df = reviews[reviews.variety.isin(reviews.variety.value_counts().head(5).index)] sns.boxplot( x='variety', y='points', data=df ) # + [markdown] _cell_guid="3f2d2928-05a7-4796-8d77-bbf039cfd8c8" _uuid="0aee5e04c60e8c432b6630114305c78403859ca7" # The center of the distributions shown above is the "box" in boxplot. The top of the box is the 75th percentile, while the bottom is the 25th percentile. In other words, half of the data is distributed within the box! The green line in the middle is the median. # # The other part of the plot, the "whiskers", shows the extent of the points beyond the center of the distribution. Individual circles beyond *that* are outliers. # # This boxplot shows us that although all five wines recieve broadly similar ratings, Bordeaux-style wines tend to be rated a little higher than a Chardonnay. # # Boxplots are great for summarizing the shape of many datasets. They also don't have a limit in terms of numeracy: you can place as many boxes in the plot as you feel comfortable squeezing onto the page. # # However, they only work for interval variables and nominal variables with a large number of possible values; they assume your data is roughly normally distributed (otherwise their design doesn't make much sense); and they don't carry any information about individual values, only treating the distribution as a whole. # # I find the slightly more advanced `violinplot` to be more visually enticing, in most cases: # + _cell_guid="0c88d116-c1ca-4240-873d-d58f4f10b7ab" _uuid="6e972117854b4398fb8873eb25fab41a7c7d8b33" sns.violinplot( x='variety', y='points', data=reviews[reviews.variety.isin(reviews.variety.value_counts()[:5].index)] ) # + [markdown] _cell_guid="1017c1be-745c-405a-bd7f-62c06ae70ab8" _uuid="a32780a72c2d5ee1b2998c31dd34f80016fb5cb7" # A `violinplot` cleverly replaces the box in the boxplot with a kernel density estimate for the data. It shows basically the same data, but is harder to misinterpret and much prettier than the utilitarian boxplot. # + [markdown] _cell_guid="b9e4770d-8d21-42bc-8462-1efafb4e94cf" _uuid="b57fa929a624c7679af8c2e792703fc491dc746f" # ## Why seaborn? # # Having now seen both `pandas` plotting and the `seaborn` library in action, we are now in a position to compare the two and decide when to use which for what. # # Recall the data we've been working with in this tutorial is in: # + _cell_guid="487d140a-1046-406e-9fd8-b1b040b9f8d8" _uuid="b786b6567d641ac84d16257510e7cfaf28aa6c4f" reviews.head() # + [markdown] _cell_guid="1b3b2bd3-e60c-4181-b23b-45bfd984a307" _uuid="872af24634858957b7cd62e2add0c6306047d5e8" # This data is in a "record-oriented" format. Each individual row is a single record (a review); in aggregate, the list of all rows is the list of all records (all reviews). This is the format of choice for the most kinds of data: data corresponding with individual, unit-identifiable "things" ("records"). The majority of the simple data that gets generated is created in this format, and data that isn't can almost always be converted over. This is known as a "tidy data" format. # # `seaborn` is designed to work with this kind of data out-of-the-box, for all of its plot types, with minimal fuss. This makes it an incredibly convenient workbench tool. # # `pandas` is not designed this way. In `pandas`, every plot we generate is tied very directly to the input data. In essence, `pandas` expects your data being in exactly the right *output* shape, regardless of what the input is. # # <!-- # In the previous section of this tutorial, we purposely evaded this issue by using supplemental datasets in a "just right" shape. Starting from the data that we already have, here's what it would take to generate a simple histogram: # # ```python # import numpy as np # top_five_wines_scores = ( # reviews # .loc[np.where(reviews.variety.isin(reviews.variety.value_counts().head(5).index))] # .loc[:, ['variety', 'points']] # .groupby('variety') # .apply(lambda df: pd.Series(df.points.values)) # .unstack() # .T # ) # top_five_wines_scores.plot.hist() # ``` # # As we demonstrated above, to do the same thing in `seaborn`, all we need is: # # ```python # sns.distplot(reviews.points, bins=10, kde=False) # ``` # # The difference is stark! # --> # # Hence, in practice, despite its simplicity, the `pandas` plotting tools are great for the initial stages of exploratory data analytics, but `seaborn` really becomes your tool of choice once you start doing more sophisticated explorations. # # <!-- # My recommendations are: # * Bar plot: # * `pd.Series.plot.bar` # * `sns.countplot` # * Scatter plot: # * `pd.Series.plot.scatter` # * `sns.jointplot` # * Hex plot: # * `pd.Series.plot.hex` # * `sns.jointplot` # * Line/KDE plot: # * `pd.Series.plot.line` for nominal categorical variables # * `sns.kdeplot` for interval variables # * Box/Violin plot: # * `sns.boxplot` # * `sns.violinplot` # * Histogram: # * `sns.distplot` # --> # + [markdown] _cell_guid="7e175119-20cd-49b3-8119-fe09d4771c36" _uuid="e9372bfddfbed428c0faaa3dc5786514a67f335f" # # Examples # # As in previous notebooks, let's now test ourselves by answering some questions about the plots we've used in this section. Once you have your answers, click on "Output" button below to show the correct answers. # # 1. A `seaborn` `countplot` is equivalent to what in `pandas`? # 2. A `seaborn` `jointplot` is equivalent to a what in `pandas`? # 3. Why might a `kdeplot` not work very well for ordinal categorical data? # 4. What does the "box" in a `boxplot` represent? # + _cell_guid="8a2e2e47-e8dc-47bf-bae2-29b3677118b5" _kg_hide-input=true _uuid="7bfca747756a4e6ebb5b2211d1d8ef8ecf789bfc" _kg_hide-output=true from IPython.display import HTML HTML(""" <ol> <li>A seaborn countplot is like a pandas bar plot.</li> <li>A seaborn jointplot is like a pandas hex plot.</li> <li>KDEPlots work by aggregating data into a smooth curve. This is great for interval data but doesn't always work quite as well for ordinal categorical data.</li> <li>The top of the box is the 75th percentile. The bottom of the box is the 25th percentile. The median, the 50th percentile, is the line in the center of the box. So 50% of the data in the distribution is located within the box!</li> </ol> """) # + [markdown] _cell_guid="4ebe52aa-809f-439b-9f64-87ea7b17c32e" _uuid="a2eb95d31ce81dfa63cc495dc6f560572528c893" # Next, try forking this kernel, and see if you can replicate the following plots. To see the answers, click the "Input" button to unhide the code and see the answers. Here's the dataset we've been working with: # + _cell_guid="ac1be017-3896-475b-98e7-eb90cef98c2a" _uuid="fbb3a01f209e864d91faa6c625af2bc8ce80b031" pokemon = pd.read_csv("../input/pokemon/Pokemon.csv", index_col=0) pokemon.head() # + [markdown] _cell_guid="89317f1f-f0c4-4a02-bfa7-d8ca0bf7e183" _uuid="f83cdf2f6443afc0ce6b9db8a904a300c4482d5c" # And now, the plots: # + _cell_guid="895001a2-78c7-4295-a21f-2cbca456c5ce" _kg_hide-input=true _uuid="46e2d301ac9cbb290890ff9433a0cdb662c87df7" sns.countplot(pokemon['Generation']) # + _cell_guid="4ada9e6f-4b95-47a2-8480-83317513d086" _kg_hide-input=true _uuid="8b199797e18a63fc065ca76b64df6456543cd634" sns.distplot(pokemon['HP']) # + _cell_guid="ed8599fe-c6cd-4136-a9bd-5ba2fd6f7ca8" _kg_hide-input=true _uuid="7f55b695c06850a4edfb9fdfa8d355b5dd6b816f" sns.jointplot(x='Attack', y='Defense', data=pokemon) # + _cell_guid="078563f6-b739-4d3e-bd1d-67fa06d7cf11" _kg_hide-input=true _uuid="ad40c576b8eeef06ec8db0fa46a410dcfccf5ff3" sns.jointplot(x='Attack', y='Defense', data=pokemon, kind='hex') # + [markdown] _cell_guid="e4a5414c-c4fa-45f0-8a20-273cd9fab20f" _uuid="de119c2d78687482415ae428ba2954a62eb72eed" # Hint: for the next exercise, use the variables `HP` and `Attack`. # + _cell_guid="c785026c-8354-4b3e-8230-63ddcaab106f" _kg_hide-input=true _uuid="37b7e1086f26c43c7417720cbaa513222dcd6f86" sns.kdeplot(pokemon['HP'], pokemon['Attack']) # + _cell_guid="1aecf1db-e82b-4e30-addc-60dbd40621aa" _kg_hide-input=true _uuid="0ccbfe9cb565a55b558fb928bc1818d38f15732b" sns.boxplot(x='Legendary', y='Attack', data=pokemon) # + _cell_guid="6343db7c-e03e-4bf0-9ce5-38bb1d42966a" _kg_hide-input=true _uuid="d2cf8a4635b21bc32095356685325c6c67bd1355" sns.violinplot(x='Legendary', y='Attack', data=pokemon) # + [markdown] _cell_guid="a4dd72b3-2369-43f2-8187-e3013825ea88" _uuid="36c5658d25fed1256b3f868e23e448c0661ba234" # ## Conclusion # # `seaborn` is one of the most important, if not *the* most important, data visualization tool in the Python data viz ecosystem. In this notebook we looked at what features and capacities `seaborn` brings to the table. There's plenty more that you can do with the library that we won't cover here or elsewhere in the tutorial; I highly recommend browsing the terrific `seaborn` [Gallery page](https://seaborn.pydata.org/examples/index.html) to see more beautiful examples of the library in action. # # [Click here to go to the next section, "Faceting with seaborn"](https://www.kaggle.com/residentmario/faceting-with-seaborn).
16,889
/Exercício 4 - jupyter.ipynb
28e5f07fe423aa0eb21c59d14858c1e997e78c1d
[]
no_license
GabrielFiletti/Exerc_Jupyter
https://github.com/GabrielFiletti/Exerc_Jupyter
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
942
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python [conda env:exerjup] * # language: python # name: conda-env-exerjup-py # --- # # Exercício 4 - Como utilizar caracteres especiais no jupyter # # # Para utilizar os caracteres especiais que realizam funções dentro de uma célula Markdown é necessário inserir uma barra ( \ ) antes do caracter: # # # Teste caracter especial # \# Teste caracter especial #
576
/2_ga/1_GAs.ipynb
066fb5441ac5cc25d86ac808719f4c8eef17867d
[ "Apache-2.0" ]
permissive
lthongkham/evolution
https://github.com/lthongkham/evolution
0
0
Apache-2.0
2020-04-15T10:13:51
2020-04-14T15:26:37
null
Jupyter Notebook
false
false
.jl
2,304,395
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Julia 1.4.0 # language: julia # name: julia-1.4 # --- # + [markdown] slideshow={"slide_type": "slide"} # <img src="../imgs/logo.png" width="20%" align="right" style="margin:0px 20px"> # # # # Evolutionary Computation # # ## 2.1 Genetic Algorithms # # <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/"><img alt="Creative Commons License" align="left" src="https://i.creativecommons.org/l/by-sa/4.0/80x15.png" /></a>&nbsp;| Dennis G. Wilson | <a href="https://d9w.github.io/evolution/">https://d9w.github.io/evolution/</a> # + [markdown] slideshow={"slide_type": "slide"} # ## Outline # # 1. [Genetic Algorithm Overview](#overview) # 2. [Initialization](#populations) # 3. [Selection](#selection) # 4. [Mutation](#mutation) # 5. [Crossover](#crossover) # 6. [Putting it all together](#together) # 7. [Elitism](#elitism) # + slideshow={"slide_type": "slide"} # Included in Julia using Random using Statistics using StatsBase # Installing using Pkg.add("name") or "]" -> "pkg> add name" from the REPL using Plots using BlackBoxOptimizationBenchmarking # + [markdown] slideshow={"slide_type": "slide"} # ## <a id="overview"></a>Genetic Algorithm Overview # + [markdown] slideshow={"slide_type": "fragment"} # Recall the outline of evolutionary algorithms that we discussed in section 1.2. The outline of an evolutionary algorithm is to start with an initial population and then to iterate through a cycle of evaluation, selection, and modification, passing the modified population on as the next generation. # # <img src="../imgs/AlgoG.png"> # # This is the underlying algorithm of a genetic algorithm. This term is general and can cover a variety of more specific algorithms such as NSGA-II and NEAT, but in this section we'll discuss a classic genetic algorithm. # + [markdown] slideshow={"slide_type": "slide"} # The defining features of a classic genetic algorithm are a large population initialized at random and the mutation and crossover operators during the modification step. In a classic GA, we will use a selection method to randomly select two individuals from the population, combine them using crossover, mutate the resulting individual, and add it to the new population. We will repeat this until the new population is full. Let's write that basic outline as a function. # + slideshow={"slide_type": "slide"} abstract type Individual end function step_ga(population::Array{<:Individual}) evaluate!(population) max_fit = maximum([i.fitness for i in population]) new_population = Array{Individual}(undef, 0) while length(new_population) < length(population) parent1 = select(population) parent2 = select(population) child1 = crossover(parent1, parent2) child1 = mutate(child1) push!(new_population, child1) end new_population, max_fit end # + [markdown] slideshow={"slide_type": "slide"} # We've defined a single step of the genetic algorithm and can fill in the functions later. Let's now define the full evolution, keeping track of the maximum fitness in the population at each step. # + slideshow={"slide_type": "fragment"} function ga(n_generations::Int) population = initialize() fits = zeros(n_generations) for i in 1:n_generations population, max_fit = step_ga(population) fits[i] = max_fit end fits end # + [markdown] slideshow={"slide_type": "slide"} # ## <a id="initialization"></a>Initialization # + [markdown] slideshow={"slide_type": "fragment"} # Earlier we saw a new bit of Julia: # ``` # abstract type Individual end # ``` # This let us use the type `Array{Individual}` in our function definition even though a concrete definition of `Individual` hasn't been used yet. In the last notebook, we had to redefine functions in order to switch between the boolean `Individual` and the `FloatIndividual`. Abstract types let us define functions which can work for either type, as long as they are subtypes of this abstract. This is known as multiple dispatch and we'll use it to define the later parts of our genetic algorithm. Let's define our two `Individual` types for now. # + slideshow={"slide_type": "slide"} mutable struct BitInd <: Individual genes::BitArray fitness::Float64 end function BitInd(n::Int) BitInd(bitrand(n), -Inf) end # + slideshow={"slide_type": "slide"} mutable struct FloatInd <: Individual genes::Array{Float64} fitness::Float64 end function FloatInd(n::Int) FloatInd(rand(n), -Inf) end # + [markdown] slideshow={"slide_type": "slide"} # This will be very useful later, but for now just remember that we still have our two different types, `BitInd` and `FloatInd`, but that we can refer to both using `Individual`. Let's create population initialization methods for these two types. # + slideshow={"slide_type": "fragment"} function bit_init(population_size::Int, n::Int) [BitInd(n) for i in 1:population_size] end function float_init(population_size::Int, n::Int) [FloatInd(n) for i in 1:population_size] end # + [markdown] slideshow={"slide_type": "slide"} # Let's see our different populations: # + slideshow={"slide_type": "fragment"} population = bit_init(100, 4) population[1:3] # + slideshow={"slide_type": "fragment"} population = float_init(100, 4) population[1:3] # + [markdown] slideshow={"slide_type": "slide"} # ## <a id="populations"></a>Evaluation # # The first part of the genetic algorithm step is the evaluation. We'll look at two evaluation functions, one for our binary individual type, and one for the floating point individuals. # + [markdown] slideshow={"slide_type": "slide"} # Looking at our genetic algorithm implementation, we use an `evaluate!` function to evaluate the entire population. Let's write that general function. # + slideshow={"slide_type": "fragment"} function evaluate!(population::Array{<:Individual}) for i in population i.fitness = objective(i) end end # + [markdown] slideshow={"slide_type": "fragment"} # Note the `<:Individual` in our definition of `population`. This means that population is expected to be an `Array` of any subtype of `Individual`, meaning `BitInd` or `FloatInd`. This evaluate function will work for both types then. Let's write individual objective functions for these two different types. For the binary type, we'll use the OneMax function again. # + slideshow={"slide_type": "slide"} function objective(i::BitInd) sum(i.genes) end # + [markdown] slideshow={"slide_type": "fragment"} # Let's test that. # + slideshow={"slide_type": "fragment"} population = bit_init(10, 4) evaluate!(population) print([i.fitness for i in population]) # + [markdown] slideshow={"slide_type": "slide"} # For a floating point problem, let's look at something a bit more difficult than the sphere problem. Specifically, we'll use the [Rastrigin function](https://en.wikipedia.org/wiki/Test_functions_for_optimization). # + slideshow={"slide_type": "fragment"} rastrigin(x) = 10*length(x) .+ sum(x.^2 .- 10 .* cos.(2*pi*x)) x = -5:0.1:5 y = -5:0.1:5 fz(x, y) = rastrigin([x, y]) plot(plot(x, y, fz, st=:surface), plot(x, y, fz, st=:contour), size = (800, 300)) # + [markdown] slideshow={"slide_type": "slide"} # Instead of using the original function, we'll use the [BlackBoxOptimizationBenchmarking](https://github.com/jonathanBieler/BlackBoxOptimizationBenchmarking.jl) library which implements the [BBOB](https://coco.gforge.inria.fr/) function set, including the Rastrigin function. This package adds random rotations to the functions to test the robustness of optimization methods. Here I'm using `Random.seed!` to start the Random Number Generator at a specific point, but if you change `41` or remove this line, you can test different random rotations (after restarting the kernel). # + slideshow={"slide_type": "fragment"} Random.seed!(41) rast = BlackBoxOptimizationBenchmarking.F15 x = 0:0.01:1 y = 0:0.01:1 fz(x, y) = rast([x, y]) plot(plot(x, y, fz, st=:surface), plot(x, y, fz, st=:contour), size = (800, 300)) # + [markdown] slideshow={"slide_type": "slide"} # Let's define our second objective function using this. Note that we'll also just call it `objective`, like the one we defined before. However, we'll be making a second function which applies to the `FloatInd` type. # + slideshow={"slide_type": "fragment"} function objective(i::FloatInd) rast(i.genes) end # + [markdown] slideshow={"slide_type": "fragment"} # We can see the different functions we've defined used `methods` # + slideshow={"slide_type": "fragment"} methods(objective) # + [markdown] slideshow={"slide_type": "slide"} # Let's see what our first population of floating point individuals looks like in the Rastrigin function **search space**. # + slideshow={"slide_type": "fragment"} population = float_init(100, 10) xs = [i.genes[1] for i in population] ys = [i.genes[2] for i in population] plot(x, y, fz, st=:contour) scatter!(xs, ys, label="pop", legend=:none) # + [markdown] slideshow={"slide_type": "slide"} # As we can see, having a large population of individuals means we have a chance of already starting out in a good position. However, some others might be in **local maxima** - points which are higher than the areas around them, but not the **global maximum**. These points can be deceptive if we tend to select only the individuals in high points. What we want to do with selection in a genetic algorithm is maintain **diversity** while moving towards the **global maximum**. Let's see how we can do that. # + [markdown] slideshow={"slide_type": "fragment"} # <div class="alert alert-success"> # <b>Exercise 1</b> # <br/> # Visualize some of the other functions from the BBOB set. Why do you think the Rastrigin function is more difficult than the sphere function? Is there any other function which looks more difficult? # <br/> # </div> # + active="" # Rastrigin contient de nombreux maximum locaux. Risque de converger dans ces maximum sans qu'ils soient le maximum global. # D'autres fonctions ont ce problème: Schwefel Function par exemple. # + [markdown] slideshow={"slide_type": "slide"} # ## <a id="selection"></a>Selection # + [markdown] slideshow={"slide_type": "fragment"} # Which individuals should pass on their genetic information to the next generation? We could imagine a simple schemes of taking the best individuals globally, say 20% of them, and mutating each of these experts for the next generation. In a simple problem like this one, such a method might work. However, we would lose important genetic diversity, one of the main advantages of our large population. Let's explore some different selection methods. To do so, we'll need to evaluate all individuals. # + [markdown] slideshow={"slide_type": "slide"} # For this first proposed method of taking the top individuals, which is called **truncation selection**, we'll need to order them. In Julia, we can override any function, including the basic functions of the language. To order our population, we can use the main `sort!` method, which relies on being able to compare individuals using `<`. So let's make a new definition for the `<`, or `isless`, function. # + slideshow={"slide_type": "fragment"} import Base.isless function isless(i1::Individual, i2::Individual) i1.fitness < i2.fitness end evaluate!(population) println(population[1]) println(population[2]) println(population[1] < population[2]) # + [markdown] slideshow={"slide_type": "fragment"} # Now that we have a way of comparing individuals, we can order lists of them using the `sort!` function. We'll sort from biggest to smallest to have the best individuals at the beginning of the population. # + slideshow={"slide_type": "slide"} population = float_init(100, 2) evaluate!(population) fits = [i.fitness for i in population] println("First: ", population[1].fitness, ", last :", population[end].fitness) println("Max: ", maximum(fits), ", min: ", minimum(fits)) # + slideshow={"slide_type": "fragment"} sort!(population, rev=true) println("First: ", population[1].fitness, ", last :", population[end].fitness) println("Max: ", maximum(fits), ", min: ", minimum(fits)) # + [markdown] slideshow={"slide_type": "slide"} # Let's plot where our best individuals are # + slideshow={"slide_type": "fragment"} xs = [i.genes[1] for i in population]; ys = [i.genes[2] for i in population] plot(x, y, fz, st=:contour) scatter!(xs[21:end], ys[21:end], label="pop", legend=:outerright) scatter!(xs[1:20], ys[1:20], label="experts") # + [markdown] slideshow={"slide_type": "fragment"} # As we can see, using this selection method, we risk optimizing towards local maxima, especially if we completely discard the rest of the population. Let's look at other selection methods for now, and we'll keep truncation selection in mind for later. # + [markdown] slideshow={"slide_type": "slide"} # We will instead assign a probability to each individual based on their fitness. This is known as **fitness proportionate selection**. Specifically, we will use the probability # # $p_i = \frac{f_i}{\sum_{j=1}^N f_j}$ # # The probability is each fitness divided by the sum. We'll use the `.` dot broadcast operator in Julia to divide each element of `fits` by the sum. # + slideshow={"slide_type": "fragment"} fits = [i.fitness for i in population] p = fits ./ sum(fits); # + slideshow={"slide_type": "slide"} histogram(p) # + [markdown] slideshow={"slide_type": "slide"} # We can use these weights to sample from our population using the `sample` function from the `StatsBase` package # + slideshow={"slide_type": "fragment"} sample(population, Weights(p)) # + [markdown] slideshow={"slide_type": "fragment"} # Let's write this according to the `select` function definition we used before, which takes in the population and gives out a single individual. # + slideshow={"slide_type": "fragment"} function fp_select(population::Array{<:Individual}) fits = [i.fitness for i in population] p = fits ./ sum(fits) sample(population, Weights(p)) end # + [markdown] slideshow={"slide_type": "slide"} # Does this perform better than our previous method? Let's select 20 individuals using this method. # + slideshow={"slide_type": "fragment"} selected = Array{Individual}(undef, 20) for i in eachindex(selected) selected[i] = fp_select(population) end # + slideshow={"slide_type": "slide"} sxs = [i.genes[1] for i in selected] sys = [i.genes[2] for i in selected] xs = [i.genes[1] for i in population] ys = [i.genes[2] for i in population] plot(x, y, fz, st=:contour) scatter!(xs, ys, label="pop", legend=:outertopright) scatter!(sxs, sys, label="fp_select") # + [markdown] slideshow={"slide_type": "slide"} # As we can see here, individuals are not only found in local maxima but throughout the search space, which is good for diversity. The last selection method we'll use is called tournament selection. This method creates small random tournaments and selects the winner from this smaller subset for every new individual. We'll use a tournament size of 3, so we'll randomly select 3 individuals and then return the best individual from those 3. # + slideshow={"slide_type": "fragment"} function tournament_select(population::Array{<:Individual}) tournament = sample(population, 3) sort!(tournament, rev=true)[1] end # + [markdown] slideshow={"slide_type": "slide"} # Let's look at 20 selections from the tournament selection method and compare it to the fitness proportionate method. # + slideshow={"slide_type": "fragment"} winners = Array{Individual}(undef, 20) for i in eachindex(winners) winners[i] = tournament_select(population) end # + slideshow={"slide_type": "slide"} wxs = [i.genes[1] for i in winners] wys = [i.genes[2] for i in winners] p1 = plot(x, y, fz, st=:contour) scatter!(xs, ys, legend=:none) scatter!(sxs, sys) title!("Fitness Proportionate") p2 = plot(x, y, fz, st=:contour) scatter!(xs, ys, legend=:none) scatter!(wxs, wys) title!("Tournament") plot(p1, p2, size=(950, 400)) # + [markdown] slideshow={"slide_type": "slide"} # <div class="alert alert-success"> # <b>Exercise 2</b> # <br/> # Plot the histogram of fitness values of 100 selected individuals using these two methods, and using a larger $n$ value. How do the distributions from the two methods compare? Try it also for a population of binary individuals on the OneMax function. Finally, try increasing the tournament size. What effect does that have on the selected individuals' fitness distribution? # <br/> # </div> # - pop_size = 500 population = float_init(pop_size, 40) #population = bit_init(pop_size, 10) evaluate!(population) selected = Array{Individual}(undef, pop_size) for i in eachindex(selected) selected[i] = fp_select(population) end fits_p = [i.fitness for i in selected] winners = Array{Individual}(undef, pop_size) for i in eachindex(winners) winners[i] = tournament_select(population) end fits_t = [i.fitness for i in winners] t = histogram(fits_t) title!("Tournament") p = histogram(fits_p) title!("Fitness Proportionate") plot(p, t, size=(950, 400)) # + [markdown] slideshow={"slide_type": "slide"} # ## <a id="mutation"></a>Mutation # + [markdown] slideshow={"slide_type": "fragment"} # For this genetic algorithm, we'll simply reuse the functions we defined in section 1.3. We will flip the gene bit for binary individuals and use `rand` for floating point individuals. # + slideshow={"slide_type": "slide"} function mutate(ind::BitInd; mutation_rate::Float64=1.0/length(ind.genes)) new_genes = copy(ind.genes) for i in eachindex(new_genes) if rand() < mutation_rate new_genes[i] = ~ind.genes[i] end end BitInd(new_genes, -Inf) end # + slideshow={"slide_type": "slide"} function mutate(ind::FloatInd; mutation_rate::Float64=1.0/length(ind.genes)) new_genes = copy(ind.genes) for i in eachindex(new_genes) if rand() < mutation_rate new_genes[i] = rand() end end FloatInd(new_genes, -Inf) end # + [markdown] slideshow={"slide_type": "slide"} # Let's test it # + slideshow={"slide_type": "fragment"} population = bit_init(100, 4) println(population[1]) println(mutate(population[1])) # + [markdown] slideshow={"slide_type": "slide"} # ## <a id="crossover"></a>Crossover # # Considering we have such a large population, is there some way to combine individual solutions to lead to better solutions? For example, could we make an individual which inherits information from two parent individuals? This is the idea behind crossover, the other operator in genetic algorithms besides mutation. It is based on sexual reproduction where the genetic information of two parent individuals is mixed to create an offspring individual. The idea of combining the information from multiple individuals together to create the next generation is something we'll explore in more detail next class when discussing evolutionary strategies. For now, let's look at ways to combine two individuals. # + [markdown] slideshow={"slide_type": "slide"} # <img src="../imgs/crossover.png" width="80%" height="auto"> # + [markdown] slideshow={"slide_type": "fragment"} # The first methods we'll look at is single-point crossover. Here, the child genes are composed of two continuous sections, the first from one parent and the second from the other parent. # + slideshow={"slide_type": "slide"} function one_point_crossover(p1::Individual, p2::Individual) child = copy(p1.genes) n = rand(1:length(p2.genes)) child[n:end] = copy(p2.genes[n:end]) typeof(p1)(child, -Inf) end # + [markdown] slideshow={"slide_type": "slide"} # Let's test that # + slideshow={"slide_type": "fragment"} p1 = BitInd(15) p2 = BitInd(15) println("P1: ", p1) println("P2: ", p2) child = one_point_crossover(p1, p2) println("C : ", child) # + [markdown] slideshow={"slide_type": "slide"} # This is most useful when sequential sections of a genome should be passed together, which is the case in biological evolution but not always necessary in artificial evolution. This method is also the basis of k-point crossover, which follows the same method but splits at $k$ points, alternating between parents at each crossing point. Note that this method can generate two children at once, but for the sake of coherence we'll just use the first child. # + [markdown] slideshow={"slide_type": "slide"} # The second crossover method we'll look at is uniform crossover, which randomly chooses a different parent for each gene. # + slideshow={"slide_type": "fragment"} function uniform_crossover(p1::Individual, p2::Individual) child = copy(p1.genes) for i in eachindex(child) if rand() < 0.5 child[i] = p2.genes[i] end end typeof(p1)(child, -Inf) end # + [markdown] slideshow={"slide_type": "slide"} # And the test # + slideshow={"slide_type": "fragment"} p1 = FloatInd(5) p2 = FloatInd(5) println("P1: ", p1) println("P2: ", p2) child = one_point_crossover(p1, p2) println("C : ", child) # + [markdown] slideshow={"slide_type": "slide"} # <div class="alert alert-success"> # <b>Exercise 3</b> # <br/> # Which crossover method do you expect to work better for the OneMax problem? What about for the Rastrigin function? Explain your reasoning. # <br/> # </div> # + active="" # # + [markdown] slideshow={"slide_type": "slide"} # ## <a id="together"></a>Putting it all together # + [markdown] slideshow={"slide_type": "fragment"} # Let's look back at the functions we defined earlier # + slideshow={"slide_type": "slide"} function step_ga(population::Array{<:Individual}) evaluate!(population) max_fit = maximum([i.fitness for i in population]) new_population = Array{Individual}(undef, 0) while length(new_population) < length(population) parent1 = select(population) parent2 = select(population) child1 = crossover(parent1, parent2) child1 = mutate(child1) push!(new_population, child1) end new_population, max_fit end # + slideshow={"slide_type": "slide"} function ga(n_generations::Int) population = initialize() fits = zeros(n_generations) for i in 1:n_generations population, max_fit = step_ga(population) fits[i] = max_fit end fits end # + [markdown] slideshow={"slide_type": "slide"} # We need to define our function choices for `initialize`, `select`, and `crossover`. Let's focus on the Rastrigin function and use uniform crossover. We'll compare the two selection methods, starting with fitness proporitionate selection. # + slideshow={"slide_type": "fragment"} initialize() = float_init(100, 10) crossover = uniform_crossover # + slideshow={"slide_type": "slide"} select = fp_select fits_fp = zeros(100, 10) for i in 1:10 fits_fp[:, i] = ga(100) println(i, " ", fits_fp[end, i]) end # + slideshow={"slide_type": "slide"} select = tournament_select fits_tourney = zeros(100, 10) for i in 1:10 fits_tourney[:, i] = ga(100) println(i, " ", fits_tourney[end, i]) end # + slideshow={"slide_type": "slide"} μ_fp = mean(fits_fp, dims=2) σ_fp = std(fits_fp, dims=2) μ_tourney = mean(fits_tourney, dims=2) σ_tourney = std(fits_tourney, dims=2) plot(μ_fp, ribbon=σ_fp, label="FP", legend=:outertopright) plot!(μ_tourney, ribbon=σ_tourney, label="Tournament") # + [markdown] slideshow={"slide_type": "slide"} # While the randomness of tournament selection may appear disadvantageous, the diversity it provides in the search allows us to continue looking through new areas where we might get stuck with fitness proportionate selection. Tournament selection also gaurantees improvement by selecting individuals that are at least better than two other individuals in the population, while fitness proportionate selection may randomly choose bad individuals often. # + [markdown] slideshow={"slide_type": "fragment"} # Our results seem to fluctuate quite a bit. Why is that? We didn't see that before with the $(1+1)$ and $(1+\lambda)$ EAs... # + [markdown] slideshow={"slide_type": "slide"} # ## <a id="elitism"></a>Elitism # + [markdown] slideshow={"slide_type": "fragment"} # An advantage that the $(1+1)$ and $(1+\lambda)$ EAs have over the genetic algorithm we've coded here is that their convergence is **monotonically increasing**. This means as evolution goes on, the result will only improve and never get worse. This is because the best fitness is only ever removed when the expert is replaced by an equally good or better individual. However, neither of the selection methods we compared have this guarantee: the best individual can leave the population easily! While completely basing our selection on global competition may not be a good practice, we do want to keep at least the best individual. This is known as **elitism** and is the practice of directly passing on a certain number of individuals *without mutation* into the next generation. This is similar to the truncation selection we used at the beginning, but the individuals selected from truncation selection will bypass the modification step entirely. # + slideshow={"slide_type": "slide"} num_elites = 5 select = tournament_select function step_ga(population::Array{<:Individual}) evaluate!(population) sort!(population, rev=true) max_fit = maximum([i.fitness for i in population]) new_population = Array{Individual}(undef, 0) append!(new_population, population[1:num_elites]) while length(new_population) < length(population) parent1 = select(population) parent2 = select(population) child1 = crossover(parent1, parent2) child1 = mutate(child1) push!(new_population, child1) end new_population, max_fit end # + slideshow={"slide_type": "slide"} select = fp_select fits_fp = zeros(100, 10) for i in 1:10 fits_fp[:, i] = ga(100) println(i, " ", fits_fp[end, i]) end # + slideshow={"slide_type": "slide"} select = tournament_select fits_tourney = zeros(100, 10) for i in 1:10 fits_tourney[:, i] = ga(100) println(i, " ", fits_tourney[end, i]) end # + slideshow={"slide_type": "slide"} μ_fp = mean(fits_fp, dims=2) σ_fp = std(fits_fp, dims=2) μ_tourney = mean(fits_tourney, dims=2) σ_tourney = std(fits_tourney, dims=2) plot(μ_fp, ribbon=σ_fp, label="FP", legend=:outertopright) plot!(μ_tourney, ribbon=σ_tourney, label="Tournament") # + [markdown] slideshow={"slide_type": "slide"} # <div class="alert alert-success"> # <b>Exercise 4</b> # <br/> # We now have many parameter choices: population size, mutation rate, tournament size if using tournament selection, crossover function, number of elites. Using a maximum of 10000 <b>evaluations</b> (ie 100 generations with a population of 100), compare these parameter choices. What is the best value you can reach with $n=10$? Do the parameter results you get generalize to higher values of $n$? The best value should include the average and standard deviation over multiple runs. # <br/> # </div> # + ##Mutation rate increasing function mutate(ind::FloatInd, mutation_rate::Float64) new_genes = copy(ind.genes) for i in eachindex(new_genes) if rand() < mutation_rate new_genes[i] = rand() end end FloatInd(new_genes, -Inf) end num_elites = 5 select = tournament_select function step_ga(population::Array{<:Individual}, mutation_rate::Float64) evaluate!(population) sort!(population, rev=true) max_fit = maximum([i.fitness for i in population]) new_population = Array{Individual}(undef, 0) append!(new_population, population[1:num_elites]) while length(new_population) < length(population) parent1 = select(population) parent2 = select(population) child1 = crossover(parent1, parent2) child1 = mutate(child1, mutation_rate) push!(new_population, child1) end new_mutation_rate = mutation_rate if(mutation_rate<0.9) new_mutation_rate = new_mutation_rate + 0.01 end new_population, max_fit, new_mutation_rate end function ga(n_generations::Int) population = initialize() fits = zeros(n_generations) mutation_rate = 0.001 for i in 1:n_generations population, max_fit, mutation_rate = step_ga(population, mutation_rate) fits[i] = max_fit end fits end select = fp_select fits_fp = zeros(100, 10) for i in 1:10 fits_fp[:, i] = ga(100) println(i, " ", fits_fp[end, i]) end select = tournament_select fits_tourney = zeros(100, 10) for i in 1:10 fits_tourney[:, i] = ga(100) println(i, " ", fits_tourney[end, i]) end μ_fp = mean(fits_fp, dims=2) σ_fp = std(fits_fp, dims=2) μ_tourney = mean(fits_tourney, dims=2) σ_tourney = std(fits_tourney, dims=2) plot(μ_fp, ribbon=σ_fp, label="FP", legend=:outertopright) plot!(μ_tourney, ribbon=σ_tourney, label="Tournament") # + [markdown] slideshow={"slide_type": "fragment"} # <div class="alert alert-success"> # <b>Exercise 5</b> # <br/> # Run the same analysis for the binary problem OneMax. Compare this result to the $(1+1)$ and $(1+\lambda)$ EAs. Which method is the best? # <br/> # </div> # - initialize() = bit_init(100, 10) crossover = uniform_crossover select = fp_select fits_fp = zeros(100, 10) for i in 1:10 fits_fp[:, i] = ga(100) println(i, " ", fits_fp[end, i]) end select = tournament_select fits_tourney = zeros(100, 10) for i in 1:10 fits_tourney[:, i] = ga(100) println(i, " ", fits_tourney[end, i]) end μ_fp = mean(fits_fp, dims=2) σ_fp = std(fits_fp, dims=2) μ_tourney = mean(fits_tourney, dims=2) σ_tourney = std(fits_tourney, dims=2) plot(μ_fp, ribbon=σ_fp, label="FP", legend=:outertopright) plot!(μ_tourney, ribbon=σ_tourney, label="Tournament") select = tournament_select fits_tourney = zeros(100, 10) for i in 1:10 fits_tourney[:, i] = ga(100) println(i, " ", fits_tourney[end, i]) end μ_tourney = mean(fits_tourney, dims=2) σ_tourney = std(fits_tourney, dims=2) plot!(μ_tourney, ribbon=σ_tourney, label="Tournament")
30,330
/CNN_Assignment_Skin_Cancer_Classification.ipynb
52b9c9910e0c7a2577cb10c213de8008315adc8e
[]
no_license
ViswaSaiRaman/Skin-Cancer-Detection-using-CNN
https://github.com/ViswaSaiRaman/Skin-Cancer-Detection-using-CNN
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
1,456,798
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="yDriIbfa5lwD" # #**Problem statement:** # To build a CNN based model which can accurately detect melanoma. Melanoma is a type of cancer that can be deadly if not detected early. It accounts for 75% of skin cancer deaths. A solution which can evaluate images and alert the dermatologists about the presence of melanoma has the potential to reduce a lot of manual effort needed in diagnosis. # + [markdown] id="JfcpIXQZN2Rh" # ### Importing all the important libraries # + id="WC8xCQuELWms" import pathlib import tensorflow as tf import matplotlib.pyplot as plt import numpy as np import pandas as pd import os import PIL from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.models import Sequential # + id="XVQlyMZ8H5ST" #making the random picking of model consistent by setting the seed from numpy.random import seed seed(1) tf.random.set_seed(2) # + [markdown] id="lvR7ppk77v31" # ### **Importing Skin Cancer Data** # # + colab={"base_uri": "https://localhost:8080/"} id="Ff6ftXanjdl1" outputId="6eacd201-a6a6-4217-b1c0-bf5925d6d3d9" #mounting the google drive account to read data #any one running this file should have the dataset uploaded into their google drive 'My Drive' Folder from google.colab import drive drive.mount('/content/gdrive') # + id="D57L-ovIKtI4" # Defining the path for train and test images # Defininf the root path of the dataset root = "/content/gdrive/My Drive/Skin-Cancer-CNN/" #defining paths for train and test data data_dir_train = pathlib.Path(root+"Train") data_dir_test = pathlib.Path(root+"Test") # + id="DqksN1w5Fu-N" colab={"base_uri": "https://localhost:8080/"} outputId="2975cb4b-de13-4a97-feba-9b4b86b2b3cc" #printing the shape of the data image_count_train = len(list(data_dir_train.glob('*/*.jpg'))) print(image_count_train) image_count_test = len(list(data_dir_test.glob('*/*.jpg'))) print(image_count_test) # + [markdown] id="O8HkfW3jPJun" # ### **Load using keras.preprocessing** # # Loading these images off disk using the helpful image_dataset_from_directory utility. # + [markdown] id="cDBKZG3jPcMc" # #### **Creating a dataset** # # Define some parameters for the loader: # + id="VLfcXcZ9LjGv" #defining parameters batch_size = 32 img_height = 180 img_width = 180 directory = root+"Train/" # + [markdown] id="Y5f5y43GPog1" # Using 80% of the images for training, and 20% for validation. # + id="G1BWmDzr7w-5" colab={"base_uri": "https://localhost:8080/"} outputId="086daa35-c449-44d9-cf31-b4a0ff3bf22f" # defining the train dataset with image resized into 180X180 and a batch size of 32 train_ds = tf.keras.preprocessing.image_dataset_from_directory( directory, batch_size=32, image_size=(180, 180), shuffle=True, seed=123, validation_split=0.2, subset='training', smart_resize=False ) # + id="LYch6-SR-i2g" colab={"base_uri": "https://localhost:8080/"} outputId="831dd5e7-17a6-4bef-921b-bddd6e387b98" # defining the test dataset with the same parameters as above val_ds = tf.keras.preprocessing.image_dataset_from_directory( directory, batch_size=32, image_size=(180, 180), shuffle=True, seed=123, validation_split=0.2, subset='validation', smart_resize=False ) # + id="Bk0RV7G7-nad" colab={"base_uri": "https://localhost:8080/"} outputId="d2d8916f-e62a-4571-cfc2-807e8a73784f" # listing out the distinct class names class_names = train_ds.class_names print(class_names) # + [markdown] id="jbsm5oYiQH_b" # ### **Data Visualization** # # + colab={"base_uri": "https://localhost:8080/", "height": 591} id="dqwqu63A8Qp4" outputId="e2364461-b3f1-4efa-c071-f664991eadb8" # plotting a sample image from each of the 9 classes plt.figure(figsize=(10,10)) for i in range(len(class_names)): filtered_ds = train_ds.filter(lambda x, l: tf.math.equal(l[0], i)) for image, label in filtered_ds.take(1): ax = plt.subplot(3, 3, i+1) plt.imshow(image[0].numpy().astype('uint8')) plt.title(class_names[label.numpy()[0]]) plt.axis('off') # + [markdown] id="8cAZPYaeQjQy" # The `image_batch` is a tensor of the shape `(32, 180, 180, 3)`. This is a batch of 32 images of shape `180x180x3` (the last dimension refers to color channels RGB). The `label_batch` is a tensor of the shape `(32,)`, these are corresponding labels to the 32 images. # + [markdown] id="jzVXBHiyQ7_I" # `Dataset.cache()` keeps the images in memory after they're loaded off disk during the first epoch. # # `Dataset.prefetch()` overlaps data preprocessing and model execution while training. # + id="7wZlKRBEGNtU" AUTOTUNE = tf.data.experimental.AUTOTUNE train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE) val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE) # + [markdown] id="1JEAF6-sRyz8" # ### **Create the model** # #### Creating a CNN model, which can accurately detect 9 classes present in the dataset. # + colab={"base_uri": "https://localhost:8080/"} id="y-OgaaGEoxZd" outputId="1e953de2-0c72-40c4-fbab-3d40405efbe7" num_classes = 9 model = Sequential() #adding a rescale layer where each value of the pixel is brought between 0 and 1. model.add(layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3))) #Adding the first set of convolutional and pooling layer model.add(layers.Conv2D(16, 3, padding='same', activation='relu')) model.add(layers.MaxPooling2D(pool_size=(2,2))) #Adding the second set of convolutional and pooling layer model.add(layers.Conv2D(32, 3, padding='same', activation='relu')) model.add(layers.MaxPooling2D(pool_size=(2,2))) #Adding the third set of convolutional and pooling layer model.add(layers.Conv2D(64, 3, padding='same', activation='relu')) model.add(layers.MaxPooling2D(pool_size=(2,2))) #Adding the Flatten layer model.add(layers.Flatten()) #Adding the FC layers model.add(layers.Dense(256, activation='relu')) model.add(layers.Dense(128, activation='relu')) layers.Dense(num_classes) # + [markdown] id="SDKzJmHwSCtt" # ### Compile the model # Choose an appropirate optimiser and loss function for model training # + id="XB8wKtiPGe1j" ### Choosing an optmizer and loss function and compiling the model model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) # + id="_ZGWN4MZGhtJ" colab={"base_uri": "https://localhost:8080/"} outputId="597fe6ef-d0b8-4243-dc89-9305645d7e66" # View the summary of all layers model.summary() # + [markdown] id="ljD_83rwSl5O" # ### Train the model # + id="Kkfw2rJXGlYC" colab={"base_uri": "https://localhost:8080/"} outputId="1fd02f81-b2f9-48eb-ee45-04c0cae0b68c" # Training the model with 20 epochs epochs = 20 history = model.fit( train_ds, validation_data=val_ds, epochs=epochs ) # + [markdown] id="w3679V8OShSE" # ### Visualizing training results # + id="R1xkgk5nGubz" colab={"base_uri": "https://localhost:8080/", "height": 503} outputId="42faca05-94d6-4548-ac4a-caa01e39da70" # visualizing the results acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(epochs) plt.rcParams.update({'font.size': 12}) plt.figure(figsize=(8, 8)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, label='Training Accuracy') plt.plot(epochs_range, val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label='Training Loss') plt.plot(epochs_range, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show() # + [markdown] id="3vRTPbJEn-pX" # ### Observations # # * The training and validation accuracy are quite far from each other which clearly shows that the model has overfitted. # # * Let's add some randomization by changing the orientation of images to get some variation # # + id="22hljAl6GykA" # Changing the oriantation by using data augumentation. data_augmentation = keras.Sequential( [ layers.experimental.preprocessing.RandomFlip("horizontal", input_shape=(img_height, img_width, 3)), layers.experimental.preprocessing.RandomRotation(0.1), layers.experimental.preprocessing.RandomZoom(0.1), ] ) # + id="XEjPWh8GG0C7" colab={"base_uri": "https://localhost:8080/", "height": 575} outputId="7e879180-7ee7-4862-dbc7-a7ae47919e73" # Todo, visualize how your augmentation strategy works for one instance of training image. # Your code goes here plt.figure(figsize=(10, 10)) for images, _ in train_ds.take(1): for i in range(9): augmented_images = data_augmentation(images) ax = plt.subplot(3, 3, i + 1) plt.imshow(augmented_images[0].numpy().astype("uint8")) plt.axis("off") # + [markdown] id="XhKDHlUdTuSX" # # ### Compiling the model after applying the augmentation # # + id="W3V4l-O9G3dM" colab={"base_uri": "https://localhost:8080/"} outputId="06ea9c58-bede-4601-e355-41d19a9b0df1" # Adding dropouts in addition to the augmentation layer to reduce overfitting num_classes = 9 model = Sequential() model.add(layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3))) model.add(layers.Conv2D(16, 3, padding='same', activation='relu')) model.add(layers.MaxPooling2D(pool_size=(2,2))) model.add(layers.Dropout(0.25)) model.add(layers.Conv2D(32, 3, padding='same', activation='relu')) model.add(layers.MaxPooling2D(pool_size=(2,2))) model.add(layers.Dropout(0.25)) model.add(layers.Conv2D(64, 3, padding='same', activation='relu')) model.add(layers.MaxPooling2D(pool_size=(2,2))) model.add(layers.Dropout(0.25)) model.add(layers.Flatten()) model.add(layers.Dropout(0.25)) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dropout(0.25)) model.add(layers.Dense(128, activation='relu')) layers.Dense(num_classes) # + [markdown] id="FfUWFp96UIAN" # ### Compiling the model # + id="_-7yTm8IG8zR" colab={"base_uri": "https://localhost:8080/"} outputId="3a7ffd37-1cb9-407e-d834-7795e743fea7" # Compiling the model model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) model.summary() # + [markdown] id="kC-D_RWOURp6" # ### Training the model # + id="UcPfkUASHBf9" colab={"base_uri": "https://localhost:8080/"} outputId="34a1bbb1-42f9-4fb6-a87b-f1824399d2cf" ## Training the model with 20 epochs epochs = 20 history = model.fit( train_ds, validation_data=val_ds, epochs=epochs ) # + [markdown] id="IhNOKtSyUYzC" # ### Visualizing the results # + id="vjN_F4QxHIsh" colab={"base_uri": "https://localhost:8080/", "height": 503} outputId="ea95dfef-64f3-42fe-bd5a-78fbf381164d" acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(epochs) plt.figure(figsize=(8, 8)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, label='Training Accuracy') plt.plot(epochs_range, val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label='Training Loss') plt.plot(epochs_range, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show() # + [markdown] id="0-AUR_b7UcaK" # #### Observations : # # * We now see significant improvement as we have overcome the problem of overfitting to a good extent. # # * However, the accuracy is not so proimising and is to be worked on. # + [markdown] id="7TdDi4u-VTkW" # #### **Finding the distribution of classes in the training dataset.** # #### **Context:** Many times real life datasets can have class imbalance, one class can have proportionately higher number of samples compared to the others. Class imbalance can have a detrimental effect on the final model quality. Hence as a sanity check it becomes important to check what is the distribution of classes in the data. # + id="HAhwYgtTQRzq" colab={"base_uri": "https://localhost:8080/", "height": 890} outputId="11acd4bb-9732-45a1-e00e-cbf329d9e4e0" # visualizing the distributing of classes category=[] count=[] import os for cls in os.listdir(directory): if cls=='.DS_Store': continue category.append(cls) count.append(len(os.listdir(directory+cls))) plt.rcParams.update({'font.size': 22}) plt.figure(figsize=(15,10)) plt.bar(x=category,height=count) plt.xticks(rotation = 90) plt.show() # + [markdown] id="4csQL1dvO0b2" # # #### - Which class has the least number of samples? - **Seborrheic Keratosis** # #### - Which classes dominate the data in terms proportionate number of samples? - **Pigmented Benign Keratosis** # # + [markdown] id="Hb-stKyHPf8v" # #### **Rectifying the class imbalance** # # + id="ItAg4rU-SzJh" colab={"base_uri": "https://localhost:8080/"} outputId="698dca01-6212-4e1c-e991-5754ace15009" # !pip install Augmentor # + [markdown] id="BZKzTe3zWL4O" # # # + colab={"base_uri": "https://localhost:8080/"} id="9Egt9EHjR-Dd" outputId="e2f93425-580a-413c-dfcf-a43ae8f8ae79" # adding 500 samples into each of the classes to get the proportions set right path_to_training_dataset=directory import Augmentor for i in class_names: p = Augmentor.Pipeline(path_to_training_dataset + i) p.rotate(probability=0.7, max_left_rotation=10, max_right_rotation=10) p.sample(500) ## We are adding 500 samples per class to make sure that none of the classes are sparse. # + [markdown] id="CcBIFZGbWuFa" # Augmentor has stored the augmented images in the output sub-directory of each of the sub-directories of skin cancer types.. Lets take a look at total count of augmented images. # + colab={"base_uri": "https://localhost:8080/"} id="jxWcMqZhdRWz" outputId="e83b1393-51ac-4b51-b00c-97a4db18e064" image_count_train = len(list(data_dir_train.glob('*/output/*.jpg'))) print(image_count_train) # + [markdown] id="IJ5KarKq4kWJ" # ### Lets see the distribution of augmented data after adding new images to the original training data. # + id="6tODrYIY2nxJ" colab={"base_uri": "https://localhost:8080/"} outputId="58571604-9f9b-44c5-c976-e22a31bd7992" from glob import glob path_list = [x for x in glob(os.path.join(data_dir_train, '*','output', '*.jpg'))] path_list # + id="nZvVdF7g3E1z" colab={"base_uri": "https://localhost:8080/"} outputId="cf502a11-23f3-4941-84a0-cd72da947a51" lesion_list_new = [os.path.basename(os.path.dirname(os.path.dirname(y))) for y in glob(os.path.join(data_dir_train, '*','output', '*.jpg'))] lesion_list_new # + id="okcqVFAA2nxK" dataframe_dict_new = dict(zip(path_list, lesion_list_new)) # + id="njzBxTNT2nxK" colab={"base_uri": "https://localhost:8080/"} outputId="36f01dea-d706-4f32-9293-a05590a31be3" df2 = pd.DataFrame(list(dataframe_dict_new.items()),columns = ['Path','Label']) df2['Label'].value_counts() # + id="is6ozUb0MEPB" colab={"base_uri": "https://localhost:8080/"} outputId="24f8f2a2-7b8f-4566-deec-cb7a3d4f4991" old_path_list = [x for x in glob(os.path.join(data_dir_train, '*','*.jpg'))] lesion_list_old = [os.path.basename(os.path.dirname(y)) for y in glob(os.path.join(data_dir_train, '*','*.jpg'))] dataframe_dict_old = dict(zip(old_path_list, lesion_list_old)) df1 = pd.DataFrame(list(dataframe_dict_old.items()),columns = ['Path','Label']) df1['Label'].value_counts() # + colab={"base_uri": "https://localhost:8080/"} id="rPQASDevQ63O" outputId="3c05ad60-2017-406c-c8ae-319d6255aff5" # creating a new dataframe to visualize the sample size in each class after adding the 500 samples in each class new_df = df1.append(df2) new_df['Label'].value_counts() # + [markdown] id="9NirFBvGPmgI" # So, now we have added 500 images to all the classes to maintain some class balance. We can add more images as we want to improve training process. # + [markdown] id="9EnspeMbRWNs" # #### **Todo**: Train the model on the data created using Augmentor # + id="hFcj1XgndRWz" #setting up the parameters batch_size = 32 img_height = 180 img_width = 180 # + [markdown] id="0haOU11Ey8ey" # #### **Todo:** Create a training dataset # + id="H4ZY11judRWz" colab={"base_uri": "https://localhost:8080/"} outputId="31480cd5-bad6-416b-f74d-4bff717044da" #setting the train data after the sample addition data_train_dir = root+"Train/" train_ds = tf.keras.preprocessing.image_dataset_from_directory( data_dir_train, seed=123, validation_split = 0.2, subset = 'training', image_size=(img_height, img_width), batch_size=batch_size) # + [markdown] id="mwNJVDuBP5kf" # #### **Todo:** Create a validation dataset # + id="TX191d_3dRW0" colab={"base_uri": "https://localhost:8080/"} outputId="686ab450-e60a-4862-8d6e-9adede89f308" #setting the validation data after the sample addition val_ds = tf.keras.preprocessing.image_dataset_from_directory( data_dir_train, seed=123, validation_split = 0.2, subset = 'validation', image_size=(img_height, img_width), batch_size=batch_size) # + [markdown] id="JaoWeOEpVjqH" # #### **Todo:** Create your model (make sure to include normalization) # + id="Ch0MuKvFVr7O" colab={"base_uri": "https://localhost:8080/"} outputId="5242026f-1cba-417b-ab29-de018c35cbfd" ## creating a third model with the new data set and slightly modified dropouts num_classes = 9 model = Sequential() model.add(layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3))) model.add(layers.Conv2D(16, 3, padding='same', activation='relu')) model.add(layers.MaxPooling2D(pool_size=(2,2))) model.add(layers.Dropout(0.25)) model.add(layers.Conv2D(32, 3, padding='same', activation='relu')) model.add(layers.MaxPooling2D(pool_size=(2,2))) model.add(layers.Dropout(0.25)) model.add(layers.Conv2D(64, 3, padding='same', activation='relu')) model.add(layers.MaxPooling2D(pool_size=(2,2))) model.add(layers.Dropout(0.25)) model.add(layers.Flatten()) model.add(layers.Dropout(0.2)) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dropout(0.2)) model.add(layers.Dense(128, activation='relu')) layers.Dense(num_classes) # + [markdown] id="Bu5N9LxkVx1B" # #### **Todo:** Compile your model (Choose optimizer and loss function appropriately) # + id="H47GWmLbdRW1" colab={"base_uri": "https://localhost:8080/"} outputId="34c285e3-a915-4142-a5f2-f885a5e76d7c" ## compiling the model model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) model.summary() # + [markdown] id="9gS-Y1bJV7uy" # #### **Todo:** Train your model # + id="fcV6OdI4dRW1" colab={"base_uri": "https://localhost:8080/"} outputId="6f1b5500-17a7-40ec-bdf4-f080b4482946" # Training the model with 30 epochs epochs = 30 history = model.fit( train_ds, validation_data=val_ds, epochs=epochs ) # + [markdown] id="iuvfCTsBWLMp" # #### **Todo:** Visualize the model results # + id="lCTXwfkTdRW1" colab={"base_uri": "https://localhost:8080/", "height": 503} outputId="9fe79f49-a58a-412d-bded-e77896d7fa86" #visualizing the model results acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(epochs) plt.rcParams.update({'font.size': 12}) plt.figure(figsize=(8, 8)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, label='Training Accuracy') plt.plot(epochs_range, val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label='Training Loss') plt.plot(epochs_range, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show() # + [markdown] id="Way4lakC4_p0" # ## **Analysis** # # 1. The the accuracy certainly improved after rebalancing the train data set from 64 to 87 # # 2. The overfitting although isn't the best but has certainly improved from the last mode. # # 3. We can add more convolutional and FC layers to get more accurate and reliable model # # # + id="pTOQCwIeSrYd"
20,596
/Untitled.ipynb
3f80f524ba721e2be194d21b1f0727080eaea221
[]
no_license
laneliz/scw_june_capstone_data
https://github.com/laneliz/scw_june_capstone_data
0
0
null
2016-06-12T22:19:35
2016-06-12T14:53:19
null
Jupyter Notebook
false
false
.py
2,019
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # > In this notebook, the best performing model (model 3) will be evaluated. # > # > There are 10 versions of model 3, each with one image left out during the training phase for testing. # > # > Here, each of the 10 versions will make predictions on their respective left out image. # > # #### Imports # + import os import os.path import matplotlib.pyplot as plt import numpy as np import skimage.io import skimage.morphology import tensorflow as tf import keras import utils.metrics import utils.model_builder import utils.dirtools # - # #### Config # + # build session running on GPU 1 configuration = tf.compat.v1.ConfigProto() configuration.gpu_options.allow_growth = True configuration.gpu_options.visible_device_list = "0" session = tf.compat.v1.Session(config = configuration) # apply session tf.compat.v1.keras.backend.set_session(session) # - config_vars = {'root_directory': '/home/jupyter/cell-segmentation/', 'max_training_images': 0, 'create_split_files': False, 'training_fraction': 0.5, 'validation_fraction': 0.25, 'transform_images_to_PNG': True, 'pixel_depth': 8, 'min_nucleus_size': 25, 'boundary_size': 2, 'augment_images': False, 'elastic_points': 16, 'elastic_distortion': 5, 'elastic_augmentations': 10, 'learning_rate': 0.0001, 'epochs': 15, 'steps_per_epoch': 500, 'batch_size': 9 , 'val_batch_size': 1, 'rescale_labels': True, 'crop_size': 256, 'cell_min_size': 1, 'boundary_boost_factor': 1, 'object_dilation': 3, 'raw_images_dir': '/home/jupyter/cell-segmentation/raw_images/', 'raw_annotations_dir': '/home/jupyter/cell-segmentation/raw_annotations/', 'path_files_training': '/home/jupyter/cell-segmentation/training.txt', 'path_files_validation': '/home/jupyter/cell-segmentation/validation.txt', 'path_files_test': '/home/jupyter/cell-segmentation/test.txt', 'normalized_images_dir': '/home/jupyter/cell-segmentation/resized_aitslab_png_images/', 'boundary_labels_dir': '/home/jupyter/cell-segmentation/aitslab_boundary_labels/resized_images/', 'experiment_dir': '/home/fvitez/nuclei_detection/experiments/bbbc/out/', 'probmap_out_dir': '/home/fvitez/nuclei_detection/experiments/bbbc/out/prob/', 'labels_out_dir': '/home/fvitez/nuclei_detection/experiments/bbbc/out/segm/', 'model_file': '/home/fvitez/nuclei_detection/experiments/bbbc/model.hdf5', 'csv_log_file': '/home/fvitez/nuclei_detection/experiments/bbbc/log.csv'} # + filename = sorted(os.listdir('/home/jupyter/cell-segmentation/aitslab_boundary_labels/resized_images/')) image_names = [os.path.join('/home/jupyter/cell-segmentation/resized_aitslab_png_images/', f) for f in filename] image_buffer = skimage.io.imread_collection(image_names) images = image_buffer.concatenate() # + #model names #model3_MFGTMPcx7_170702000001_G07f02d0 #model3_MFGTMPcx7_170702090001_B22f15d0 #model3_MFGTMPcx7_170702090001_N06f14d0 #model3_MFGTMPcx7_170731090001_K24f10d0 #model3_MFGTMPcx7_170801050001_A01f03d0 #model3_MFGTMPcx7_170803210001_A01f29d0 #model3_MFGTMPcx7_170803210001_A05f27d0 #model3_MFGTMPcx7_170803210001_B18f27d0 #model3_MFGTMPcx7_170803210001_B19f26d0 #model3_MFGTMPcx7_170803210001_J12f29d0 # - # ##### 1/10 filename[0] # + #from config import config_vars # Partition of the data to make predictions (test or validation) #partition = "validation" image = images[0] experiment_name = 'model3_MFGTMPcx7_170702000001_G07f02d0' config_vars = utils.dirtools.setup_experiment(config_vars, experiment_name) #data_partitions = utils.dirtools.read_data_partitions(config_vars) #config_vars # + for n,i in enumerate(images): image = np.asarray([images[n]]) experiment_name = 'model3_' + filename[n][:-4] config_vars = utils.dirtools.setup_experiment(config_vars, experiment_name) dim1 = image.shape[1] dim2 = image.shape[2] image = image.reshape((-1, dim1, dim2, 1)) # preprocess (assuming images are encoded as 8-bits in the preprocessing step) image = image / 255 # build model and load weights model = utils.model_builder.get_model_3_class(dim1, dim2) #model.load_weights('/home/jupyter/cell-segmentation/model.hdf5') model.load_weights(config_vars["model_file"]) # Normal prediction time predictions = model.predict(image, batch_size=1) if n == 1: break # - image = np.asarray([images[0]]) image.shape np.asarray([images[0]])
4,648
/AmirO/Final_Project_CSVs/2020/Cleaning_Data 2020.ipynb
52c95b087df6c95ff10ba62288221487b8bbddb1
[]
no_license
a56k/Final_Project_AQI_Level_SF
https://github.com/a56k/Final_Project_AQI_Level_SF
0
0
null
2021-05-09T22:28:58
2021-05-09T22:28:42
Jupyter Notebook
Jupyter Notebook
false
false
.py
116,517
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: PythonData # language: python # name: pythondata # --- #Import dependencies import pandas as pd import os import numpy as np #Load CSV file_to_load = ("SF 2020 CO.csv") CO_2020 =pd.read_csv(file_to_load) CO_2020.head() #Check Columns CO_2020.columns #Check Null Values CO_2020.columns.isnull() #Drop Columns CO_2020= CO_2020.drop(columns=['Source','Site ID','POC','AQS_PARAMETER_CODE','DAILY_OBS_COUNT','PERCENT_COMPLETE','AQS_PARAMETER_DESC', 'CBSA_CODE','CBSA_NAME','STATE_CODE','STATE','COUNTY_CODE', 'SITE_LATITUDE', 'SITE_LONGITUDE','COUNTY'], axis=1) #Check DataFrame CO_2020.head() #Rename Site Name & Daily Concentration Columns CO_2020 = CO_2020.rename(columns={"Site Name": "Site_Name"}) CO_2020 = CO_2020.rename(columns={"Daily Max 8-hour CO Concentration":"Daily_Max_8-hr_CO_Concentration"}) CO_2020.head() #Filter for SF #CO_2020 = CO_2020[(CO_2020.Site_Name == "Oakland West") | (CO_2020.Site_Name == "San Francisco")] CO_2020 = CO_2020[(CO_2020.Site_Name == "San Francisco")] CO_2020.head(10) #Check Count//LEAP YEAR CO_2020.count() #To CSV CO_2020.to_csv(r'2020_co.csv', header= True, index=False) ##Load CSV for NO2 file_to_load = ("SF 2020 NO2.csv") NO2_2020 =pd.read_csv(file_to_load) NO2_2020.head() #Check Columns NO2_2020.columns #Check Null Values NO2_2020.columns.isnull() #Drop Columns NO2_2020 = NO2_2020.drop(columns=['Source','Site ID','POC','AQS_PARAMETER_CODE','DAILY_OBS_COUNT','PERCENT_COMPLETE','AQS_PARAMETER_DESC', 'CBSA_CODE','CBSA_NAME','STATE_CODE','STATE','COUNTY_CODE', 'SITE_LATITUDE', 'SITE_LONGITUDE','COUNTY'], axis=1) #Check DataFrame NO2_2020.head() #Rename Site Name & Daily Concentration Columns NO2_2020 = NO2_2020.rename(columns={"Site Name": "Site_Name"}) NO2_2020 = NO2_2020.rename(columns={"Daily Max 1-hour NO2 Concentration":"Daily_Max_1-hr_NO2_Concentration"}) NO2_2020.head() #Filter for SF #NO2_2020 = NO2_2020[(NO2_2020.Site_Name == "Oakland West") | (NO2_2020.Site_Name == "San Francisco")] NO2_2020 = NO2_2020[(NO2_2020.Site_Name == "San Francisco")] NO2_2020.head(10) #Check Count for SF//LEAP YEAR NO2_2020.count() # To CSV NO2_2020.to_csv(r'2020_no2.csv', header= True, index=False) ##Load CSV for Ozone file_to_load = ("SF 2020 Ozone.csv") Ozone_2020 =pd.read_csv(file_to_load) Ozone_2020.head() #Check Columns Ozone_2020.columns #Check Null Values Ozone_2020.columns.isnull() #Drop Columns Ozone_2020 = Ozone_2020.drop(columns=['Source','Site ID','POC','AQS_PARAMETER_CODE','DAILY_OBS_COUNT','PERCENT_COMPLETE','AQS_PARAMETER_DESC', 'CBSA_CODE','CBSA_NAME','STATE_CODE','STATE','COUNTY_CODE', 'SITE_LATITUDE', 'SITE_LONGITUDE','COUNTY'], axis=1) #Check DataFrame Ozone_2020.head() #Rename Site Name & Daily Concentraion Columns Ozone_2020 = Ozone_2020.rename(columns={"Site Name": "Site_Name"}) Ozone_2020 = Ozone_2020.rename(columns={"Daily Max 8-hour Ozone Concentration": "Daily_Max_8-hr_Ozone_Concentration"}) Ozone_2020.head() #Filter for SF #Ozone_2020 = Ozone_2020[(Ozone_2020.Site_Name == "Oakland West") | (Ozone_2020.Site_Name == "San Francisco")] Ozone_2020 = Ozone_2020[(Ozone_2020.Site_Name == "San Francisco")] Ozone_2020.head(10) #Check count for SF// LEAP YEAR Ozone_2020.count() #To CSV Ozone_2020.to_csv(r'2020_oz.csv', header= True, index=False) # + ##Load CSV for Pb #file_to_load = ("SF 2020 Pb.csv") #Pb_2020 =pd.read_csv(file_to_load) #Pb_2020.head() # + #Check Columns #Pb_2020.columns # + #Check Null Values #Pb_2020.columns.isnull() # + #Drop Columns #Pb_2020 = Pb_2020.drop(columns=['Source','Site ID','POC','Site Name','AQS_PARAMETER_CODE','AQS_PARAMETER_DESC', 'CBSA_CODE','STATE_CODE','STATE','COUNTY_CODE'], axis=1) # + #Check DataFrame #Pb_2020.head() # + #Check count #Pb_2020.count() # - ##Load CSV for PM2.5 file_to_load = ("SF 2020 PM2.5.csv") PM2_5_2020 =pd.read_csv(file_to_load) PM2_5_2020.head() #Check Columns PM2_5_2020.columns #Check Null Values PM2_5_2020.columns.isnull() #Drop Columns PM2_5_2020 = PM2_5_2020.drop(columns=['Source','Site ID','POC','AQS_PARAMETER_CODE','DAILY_OBS_COUNT','PERCENT_COMPLETE','AQS_PARAMETER_DESC', 'CBSA_CODE','CBSA_NAME','STATE_CODE','STATE','COUNTY_CODE', 'SITE_LATITUDE', 'SITE_LONGITUDE','COUNTY'], axis=1) #Check DataFrame PM2_5_2020.head() #Rename Site Name & Daily Concentration Columns PM2_5_2020 = PM2_5_2020.rename(columns={"Site Name": "Site_Name"}) PM2_5_2020 = PM2_5_2020.rename(columns={"Daily Mean PM2.5 Concentration": "Daily_Mean_PM2.5_Concentration"}) PM2_5_2020.head() #Filter for SF #PM2_5_2020 = PM2_5_2020[(PM2_5_2020.Site_Name == "Oakland West") | (PM2_5_2020.Site_Name == "San Francisco")] PM2_5_2020 = PM2_5_2020[(PM2_5_2020.Site_Name == "San Francisco")] PM2_5_2020.head(10) #Check count for SF// LEAP YEAR PM2_5_2020.count() #To CSV PM2_5_2020.to_csv(r'2020_pm2_5.csv', header= True, index=False) # + ##Load CSV for SO2 #file_to_load = ("SF 2020 SO2.csv") #SO2_2020 =pd.read_csv(file_to_load) #SO2_2020.head() # + #Check Columns #SO2_2020.columns # + #Check Null Values #SO2_2020.columns.isnull() # + #Drop Columns #SO2_2020 = SO2_2020.drop(columns=['Source','Site ID','POC','AQS_PARAMETER_CODE','DAILY_OBS_COUNT','PERCENT_COMPLETE','AQS_PARAMETER_DESC', 'CBSA_CODE','CBSA_NAME','STATE_CODE','STATE','COUNTY_CODE', 'SITE_LATITUDE', 'SITE_LONGITUDE','COUNTY'], axis=1) # + #Check DataFrame #SO2_2020.head() # + #Rename Site Name Column #SO2_2020 = SO2_2020.rename(columns={"Site Name": "Site_Name"}) #SO2_2020.head() # + #Rename Site Name & Daily Concentration Columns #SO2_2020 = SO2_2020.rename(columns={"Site Name": "Site_Name"}) #SO2_2020 = SO2_2020.rename(columns={"Daily Max 1-hour SO2 Concentration": "Daily_Max_1-hour_SO2_Concentration"}) #SO2_2020.head() # + #Check Count for SF & Oakland #SO2_2020.count() # + #To CSV #SO2_2020.to_csv(r'SO2_2020_clean.csv', header= True, index=False) # - ##Load CSV for PM10 file_to_load = ("SF 2020 PM10.csv") PM10_2020 =pd.read_csv(file_to_load) PM10_2020.head() #Check Columns PM10_2020.columns #Check Null Values PM10_2020.columns.isnull() #Drop Columns PM10_2020 = PM10_2020.drop(columns=['Source','Site ID','POC','AQS_PARAMETER_CODE','DAILY_OBS_COUNT','PERCENT_COMPLETE','AQS_PARAMETER_DESC', 'CBSA_CODE','CBSA_NAME','STATE_CODE','STATE','COUNTY_CODE', 'SITE_LATITUDE', 'SITE_LONGITUDE','COUNTY'], axis=1) #Check DataFrame PM10_2020.head() #Rename Site Name & Concentration Columns PM10_2020 = PM10_2020.rename(columns={"Site Name": "Site_Name"}) PM10_2020 = PM10_2020.rename(columns={"Daily Mean PM10 Concentration": "Daily_Mean_PM10_Concentration"}) PM10_2020.head() #Filter for SF #PM10_2020 = PM10_2020[(PM10_2020.Site_Name == "Oakland West") | (PM10_2020.Site_Name == "San Francisco")] PM10_2020 = PM10_2020[(PM10_2020.Site_Name == "San Francisco")] PM10_2020.head(10) #Check Count SF// LEAP YEAR PM10_2020.count() #To CSV PM10_2020.to_csv(r'2020_pm10.csv', header= True, index=False)
7,087
/UAS_KECERDASAN_BUATAN.ipynb
58ce20e11726bb3deb3736e4ebac443bcd33ddae
[]
no_license
hafizh22/UAS-Kecerdasan-Buatan
https://github.com/hafizh22/UAS-Kecerdasan-Buatan
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
18,596
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="ms0u7DyE8Wz2" outputId="35f16658-e324-4b6c-b584-78d89dd64cb1" import numpy as np """ f(x) = a +2b + 3c +4d -30 """ # Inisialiasi jumlah populasi, jumlah gen jml_populasi = 6 jml_gen = 4 kromosom = np.random.randint(0,30 ,(jml_populasi,jml_gen)) print("Kromosom :",kromosom) epoch = 0 while epoch < jml_populasi : # penjumlahan fungsi objektif fo = abs((kromosom[:,0] + 2*kromosom[:,1] + 3*kromosom[:,2] + 4*kromosom[:,3]) - 30 ) print("Fungsi objektif kromosom :", fo) #total fungsi objektif total_fo = fo.sum() print("Total fungsi objectif : ", total_fo) #rata - rata fungsi objektif rata2 = total_fo/jml_populasi print("Rata - rata fungsi objektif : ",rata2 ) # Seleksi fitness kromosom fitness = 1/(1 + fo) print("Fitness :",fitness) # fungsi penjumlahan total fitness total = fitness.sum() print("Total Fitness :",total) # penjumlahan probabilitas setiap kromosom prob = fitness/total print("Probabilitas :",prob) # seleksi menggunakan Roulette Wheel dan penjumlahan komulatif probabilitas C = np.cumsum(prob) print("Penjumlahan Komulatif :", C) # membuat Random Numbers pada range 0-1 Random_num = np.random.random((kromosom.shape[0])) print("Random Numbers :",Random_num) # Membuat matrik kromosom baru kromosom2 = np.zeros((kromosom.shape[0],jml_gen)) for i in range(Random_num.shape[0]): for j in range(kromosom.shape[0]): if Random_num[i] < C[j]: kromosom2[i,:] = kromosom[j,:] break kromosom = kromosom2 print("Kromosom Baru :",kromosom) # crossover R = [np.random.random() for i in range(jml_populasi)] print("Nilai Random :",R) # Crossover Rate pc = 0.25 flag = Random_num < pc print("Flagged Values :",flag) # Menentukan Kromosom Silang # Jika Random Numbers kurang dari crossover rate (0.25) maka True kromosom_silang = kromosom[[(i == True) for i in flag]] print("Kromosom Silang :",kromosom_silang) panjang_krom_silang = len(kromosom_silang) # Menjumlahkan nilai silang nilai_silang = np.random.randint(1,3,panjang_krom_silang) print("Nilai Silang :",nilai_silang) copy_kromosom = np.zeros(kromosom_silang.shape) # Copy nilai kromosom untuk penjumlahan for i in range(kromosom_silang.shape[0]): copy_kromosom[i , :] = kromosom_silang[i , :] if panjang_krom_silang == 1: kromosom_silang = kromosom_silang else : for i in range(panjang_krom_silang): c_val = nilai_silang[i] if i == panjang_krom_silang - 1 : kromosom_silang[i , c_val:] = copy_kromosom[0 , c_val:] else : kromosom_silang[i , c_val:] = copy_kromosom[i+1 , c_val:] print("Kromosom Silang:",kromosom_silang) index_kromosom = 0 index_kromosom_baru = 0 for i in flag : if i == True : kromosom[index_kromosom, :] = kromosom_silang[index_kromosom_baru, :] index_kromosom_baru = index_kromosom_baru + 1 index_kromosom = index_kromosom + 1 print("Kromosom Baru:", kromosom) # Mejumlahkan total generation a ,b = kromosom.shape[0] ,kromosom.shape[1] total_gen = a*b print("Total Generations :",total_gen) #rate mutasi = pm pm = 0.1 no_of_mutations = int(np.round(pm * total_gen)) print("Jumlah mutasi :" ,no_of_mutations) # Penjumlahan angka generasi gen_num = np.random.randint(0,total_gen - 1, no_of_mutations) print(" Menentukan Angka Random : " , gen_num) # Menentukan angka random yang akan menggantikan kromosom yang terseleksi untuk dimutasi Replacing_num = np.random.randint(0,30, no_of_mutations) print(" Angka yang akan digantikan : " , Replacing_num) for i in range(no_of_mutations): a = gen_num[i] row = a//4 col = a%4 kromosom[row , col] = Replacing_num[i] print(" Kromosom Setelah Mutasi : " , kromosom) epoch = epoch + 1
4,469
/pt1/09_modules/01-what-is-a-module.ipynb
2f0e964f36567392fd35e18fa380c5f088f04e6a
[]
no_license
mfleader42/deepdive-mine
https://github.com/mfleader42/deepdive-mine
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
20,070
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import math math import fractions fractions junk = math junk is math globals() globals()['math'] type(globals()) type(math) id(math) import math id(math) import sys type(sys.modules) sys.modules['math'] math id(sys.modules['math']) math.__name__ math.__dict__ math.sqrt math.__dict__['sqrt'] dir(fractions) fractions.__spec__ type(fractions) type(print) import types isinstance(math, types.ModuleType) mod = types.ModuleType('test', 'this is a test module.') mod.__dict__ mod.pi = 3.14 mod.__dict__ mod.hello = lambda: 'hello' mod.hello() hello = mod.hello 'hello' in globals() 'mod' in globals() hello() from collections import namedtuple mod.Point = namedtuple('Point', 'x y') p1 = mod.Point(0,0) p2 = mod.Point(1,1) dir(mod) pt = getattr(mod, 'Point') pt(20,20) pt2 = mod.__dict__['Point'](2,2) pt2
1,115
/module2-regression-2/Adewale_Adeagbo_DS13_LS_DS_212_assignment.ipynb
bac92c41851d7e8a62117ea79ec315d4e936d4e3
[ "MIT" ]
permissive
adxpillar/DS-Unit-2-Linear-Models
https://github.com/adxpillar/DS-Unit-2-Linear-Models
0
0
MIT
2020-03-09T18:15:26
2020-03-04T12:07:37
null
Jupyter Notebook
false
false
.py
36,177
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="JcW7apsEiGVw" outputId="0996b78e-1005-4752-e9b5-94e909d85541" # !pip install keras-pos-embd # !pip install tensorflow-addons # + id="OkEw7Oykt0DJ" #importing libraries from __future__ import print_function from functools import reduce import json import os import re import tarfile import tempfile import keras import keras.backend as K import tensorflow as tf from tensorflow import keras from keras import layers from keras.callbacks import EarlyStopping, ModelCheckpoint from keras.layers import merge, recurrent, Dense, Input, Dropout, TimeDistributed, concatenate, Layer from keras.layers.embeddings import Embedding from keras.layers.normalization import BatchNormalization from keras.layers.wrappers import Bidirectional from keras.models import Model from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.regularizers import l2 from keras_pos_embd import TrigPosEmbedding from keras.utils import np_utils import numpy as np from tensorflow.keras import layers from tensorflow import keras import tensorflow_addons as tfa import matplotlib.pyplot as plt import tensorflow as tf import numpy as np np.random.seed(1337) # + [markdown] id="HhF9X3RUhcw7" # ###*Notes* # + id="1fvREZVj0Qa_" colab={"base_uri": "https://localhost:8080/", "height": 120} outputId="2907c8df-994a-444c-a6bf-d487e21bacb2" '''paper proposes three different modules: ViT lite, Compact Vision Transformers, Compact Convolutional Transformers fewer encoder layers, MHSA heads, and smaller dimensions of patches ViT lite => ViT, with smaller patch sizing, compact version of ViT => sequence pooling => forming compact vision transformer => adding convolutional blocks to the tokenization step to introduce inductive biases CVT => (uses patch-based tokenization) ViT with seqpool => sequential pooling methods, SeqPool, pools sequential based information that results from transformer encoder and eliminates the need for extra classification token CCT => (uses convolitional tokeniztion) convolutional based patching method, which preserves local information and is able to encode relationships between patched, doesnt depend on positional embedding 12 layers, 2 convolutional blocks with 7x7 convolutions uses stochastic depth with p = 0.1 ViT additional designs class token MLP classifier head on the left positional encoding uses learnable embedding as opposed to sinusoidal embeddings transformers lacks inductive biases convolutional block x0 = max_pool(ReLU(Conv2d(x))) d = 64 filters helps mainting local spatial information transformer based back-bone encoder - transformer block - MSA layer and an MLP head layer norm after positional embeddings GELU activation SeqPool to replace class tokens maps nxd output to nx1 vector sequential outputs => single class assigning important weights across sequence of data pool outputs of the transformed backbone across the sequence T: R(bXnXd) => R(bXd) xL = f(x0) where xL is output of L layer of transformer, b is mini-batch size, n is sequence length, d is embedding dimension => pass xL to linear layer g(xL) and apply softmax activation: => xL' = softmax(g(xL).T) => z = xL'@xL => pooling = > linear classifier (1xd) output ablation ideas 1) study the effects of seqpool, convolutional layers, 2) study the effects of learning positional embedding, sinusoidal embedding 3) check whether the model needs high dimensions instead of high number of training data 4) using involution instead of convolutions 5) removing lowest attnetions and using attention rollout https://arxiv.org/pdf/2005.00928.pdf 6) using different loss functions (patch wise contrastive loss, patch wise mixing loss) https://arxiv.org/pdf/2104.12753.pdf 7) using bio-medical dataset with low train data to test 8)study the features learnt by the different models. class activation maps for CNNs and visualize the self-attention maps for transformers. https://arxiv.org/abs/2105.01601 https://arxiv.org/abs/2105.08050 http://proceedings.mlr.press/v139/tay21a.html https://arxiv.org/abs/2106.10270 https://github.com/jacobgil/vit-explain https://github.com/ashishpatel26/Vision-Transformer-Keras-Tensorflow-Pytorch-Examples/blob/main/Vision_Transformer_with_tf2.ipynb overview of architecture inputs => convolution => features(reshape) => optional positional embedding => transformer encoder => sequence pooling => mlp head => classification ''' # + [markdown] id="W6FYVa2Hhrzs" # ###*Common functions* # + colab={"base_uri": "https://localhost:8080/"} id="NcqYc5uuglVg" outputId="4d1ff7d8-7e48-479f-ae1b-2f5ad5b9de2d" #loading dataset num_classes = 10 input_shape = (32, 32, 3) (x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data() y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) # print(f"x_Train shape = {x_train.shape}, y_train shape = {y_train.shape}, x_test shape = {x_test.shape}, y_test shape = {y_test.shape}") # + id="UY1fZhKln__A" positional_emb = True conv_layers = 2 projection_dim = 128 num_heads = 2 transformer_units = [ projection_dim, projection_dim, ] transformer_layers = 2 stochastic_depth_rate = 0.1 learning_rate = 0.001 weight_decay = 0.0001 batch_size = 128 num_epochs = 30 image_size = 32 # + id="HFOXhKBxgZrE" #function for multi-layer perceptron def mlp(x, hidden_units, dropout_rate): for units in hidden_units: x = layers.Dense(units, activation=tf.nn.gelu)(x) x = layers.Dropout(dropout_rate)(x) return x # + id="9ZBilUxIgl7S" # Referred from: github.com:rwightman/pytorch-image-models. class StochasticDepth(layers.Layer): def __init__(self, drop_prop, **kwargs): super(StochasticDepth, self).__init__(**kwargs) self.drop_prob = drop_prop def call(self, x, training=None): if training: keep_prob = 1 - self.drop_prob shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1) random_tensor = keep_prob + tf.random.uniform(shape, 0, 1) random_tensor = tf.floor(random_tensor) return (x / keep_prob) * random_tensor return x # + id="vlyhPDb8gs1m" # data augmentation augmentation = keras.Sequential( [ layers.experimental.preprocessing.Rescaling(scale=1.0 / 255), layers.experimental.preprocessing.RandomCrop(image_size, image_size), layers.experimental.preprocessing.RandomFlip("horizontal"), ] ) # + [markdown] id="xyVqPAYebnvt" # ###*ViT-lite* # # + id="bHC0HbvZVUzY" learning_rate = 0.001 weight_decay = 0.0001 batch_size = 256 num_epochs = 30 image_size = 32 patch_size = 6 num_patches = (image_size // patch_size) ** 2 projection_dim = 64 #projecting patches to this feature size num_heads = 4 #paramerization of skip connections transformer_units = [ projection_dim * 2, projection_dim, ] # Size of the transformer layers transformer_layers = 8 mlp_head_units = [2048, 1024] # Size of the dense layers of the final classifier # + id="0JzrS_V1bllu" #implementing patch creation as a layer class Patches(layers.Layer): def __init__(self, patch_size): super(Patches, self).__init__() self.patch_size = patch_size def call(self, images): batch_size = tf.shape(images)[0] patches = tf.image.extract_patches( images=images, sizes=[1, self.patch_size, self.patch_size, 1], strides=[1, self.patch_size, self.patch_size, 1], rates=[1, 1, 1, 1], padding="VALID", ) patch_dims = patches.shape[-1] patches = tf.reshape(patches, [batch_size, -1, patch_dims]) return patches # + id="mowjgpp6bqV3" class PatchEncoder(layers.Layer): def __init__(self, num_patches, projection_dim): super(PatchEncoder, self).__init__() self.num_patches = num_patches self.projection = layers.Dense(units=projection_dim) #applying embedding layer on patches self.position_embedding = layers.Embedding( input_dim=num_patches, output_dim=projection_dim ) def call(self, patch): positions = tf.range(start=0, limit=self.num_patches, delta=1) encoded = self.projection(patch) + self.position_embedding(positions) return encoded # + id="uA6RlMZ-cDhj" #transformer with sequence pooling y = [] def transformer( image_size = image_size, input_shape = input_shape, num_heads = num_heads, projection_dim = projection_dim, transformer_units = transformer_units ): inputs = layers.Input(input_shape) #data augmentation augmented = augmentation(inputs) #create patches patches = Patches(patch_size)(inputs) #enocde patches encoded_patches = PatchEncoder(num_patches, projection_dim)(patches) #adding positional embedding # if positional_emb: # embed_layer, seq_len = Tokenizer.positional_embedding(image_size) # positions = tf.range(start=0, limit=seq_len, delta=1) # positional_embeddings = embed_layer(positions) # encoded_patches += positional_embeddings #creating layers from transformer block # Create multiple layers of the Transformer block. for _ in range(transformer_layers): # Layer normalization 1. x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches) # Create a multi-head attention layer. attention_output = layers.MultiHeadAttention( num_heads=num_heads, key_dim=projection_dim, dropout=0.1 )(x1, x1) y.append(attention_output) # Skip connection 1. x2 = layers.Add()([attention_output, encoded_patches]) # Layer normalization 2. x3 = layers.LayerNormalization(epsilon=1e-6)(x2) # MLP. x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1) # Skip connection 2. encoded_patches = layers.Add()([x3, x2]) # Create a [batch_size, projection_dim] tensor. representation = layers.LayerNormalization(epsilon=1e-6)(encoded_patches) representation = layers.Flatten()(representation) representation = layers.Dropout(0.5)(representation) #no sequence pooling in ViT lite # Add MLP. features = mlp(representation, hidden_units=transformer_units, dropout_rate=0.5) #sequence pooling # representation = layers.LayerNormalization(epsilon=1e-5)(encoded_patches) # attention_weights = tf.nn.softmax(layers.Dense(1)(representation), axis=1) # weighted_representation = tf.matmul( # attention_weights, representation, transpose_a=True # ) # weighted_representation = tf.squeeze(weighted_representation, -2) # Classify outputs. logits = layers.Dense(num_classes)(features) # Create the Keras model. model = keras.Model(inputs=inputs, outputs=logits) return model # + id="paP8dZV8cGOP" def training(model): optimizer = tfa.optimizers.AdamW(learning_rate=0.001, weight_decay=0.0001) filepath = "/sample_data/tmp/checkpoint" model.compile( optimizer=optimizer, loss=keras.losses.CategoricalCrossentropy( from_logits=True, label_smoothing=0.1 ), metrics=[ keras.metrics.CategoricalAccuracy(name="accuracy"), # keras.metrics.TopKCategoricalAccuracy(5, name="top-5-accuracy"), ], ) es = keras.callbacks.ModelCheckpoint( filepath, monitor="val_accuracy", save_best_only=True, save_weights_only=True, ) history = model.fit(x = x_train, y = y_train, batch_size = batch_size, epochs = num_epochs, validation_split= 0.1, callbacks = [es]) accuracy = model.evaluate(x_test, y_test) # print(f"Test accuracy: {round(accuracy * 100, 2)}%") # print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%") return history # + id="x14gT1jKE42p" colab={"base_uri": "https://localhost:8080/"} outputId="3f48a2af-a71a-43e0-99fd-3cd63d7ddaab" model = transformer() train = training(model) # + colab={"base_uri": "https://localhost:8080/", "height": 546} id="fR8-i3r9zb7L" outputId="4f290809-4b18-4162-9c5e-99f5022d3848" import matplotlib.pyplot as plt plt.figure(figsize=(4, 4)) image = x_train[np.random.choice(range(x_train.shape[0]))] plt.imshow(image.astype("uint8")) plt.axis("off") plt.savefig("original.png", dpi=300, bbox_inches="tight") resized_image = tf.image.resize( tf.convert_to_tensor([image]), size=(image_size, image_size) ) patches = Patches(patch_size)(resized_image) print(f"Image size: {image_size} X {image_size}") print(f"Patch size: {patch_size} X {patch_size}") print(f"Patches per image: {patches.shape[1]}") print(f"Elements per patch: {patches.shape[-1]}") n = int(np.sqrt(patches.shape[1])) plt.figure(figsize=(4, 4)) for i, patch in enumerate(patches[0]): ax = plt.subplot(n, n, i + 1) patch_img = tf.reshape(patch, (patch_size, patch_size, 3)) plt.imshow(patch_img.numpy().astype("uint8")) plt.axis("off") plt.savefig("patched.png", dpi=300, bbox_inches="tight") # + id="dqvVKaoGi5hK" #visualizing progress plt.plot(train.history["loss"], label="train_loss") plt.plot(train.history["val_loss"], label="val_loss") plt.xlabel("Epochs") plt.ylabel("Loss") plt.title("Train and Validation Losses Over Epochs", fontsize=14) plt.legend() plt.grid() plt.show() # + id="QfNm5twRi6Ng" #visualizing progress plt.plot(train.history["accuracy"], label="accuracy") plt.plot(train.history["val_accuracy"], label="val_accuracy") plt.xlabel("Epochs") plt.ylabel("Accuracy") plt.title("Train and Validation Accuracy Over Epochs", fontsize=14) plt.legend() plt.grid() plt.show() # + [markdown] id="sbo1PzWdTTrF" # ###*Compact Vision Transformer (CVT)* # + id="UVUNory0Vao_" # positional_emb = True # conv_layers = 2 # projection_dim = 128 # patch_size = 6 # num_heads = 2 # transformer_units = [ # projection_dim, # projection_dim, # ] # transformer_layers = 2 # stochastic_depth_rate = 0.1 # num_patches = (image_size // patch_size) ** 2 # learning_rate = 0.001 # weight_decay = 0.0001 # batch_size = 128 # num_epochs = 30 # image_size = 32 learning_rate = 0.001 weight_decay = 0.0001 batch_size = 256 num_epochs = 30 image_size = 32 # We'll resize input images to this size patch_size = 6 # Size of the patches to be extract from the input images num_patches = (image_size // patch_size) ** 2 projection_dim = 64 conv_layers = 2 positional_emb = True num_heads = 4 transformer_units = [ projection_dim * 2, projection_dim, ] # Size of the transformer layers transformer_layers = 8 mlp_head_units = [2048, 1024] # Size of the dense layers of the final classifier # + id="PWqoi8pbVQ-g" class Patches(layers.Layer): def __init__(self, patch_size): super(Patches, self).__init__() self.patch_size = patch_size def call(self, images): batch_size = tf.shape(images)[0] patches = tf.image.extract_patches( images=images, sizes=[1, self.patch_size, self.patch_size, 1], strides=[1, self.patch_size, self.patch_size, 1], rates=[1, 1, 1, 1], padding="VALID", ) patch_dims = patches.shape[-1] patches = tf.reshape(patches, [batch_size, -1, patch_dims]) return patches # + id="KQfUe3G_VhJ_" class PatchEncoder(layers.Layer): def __init__(self, num_patches, projection_dim): super(PatchEncoder, self).__init__() self.num_patches = num_patches self.projection = layers.Dense(units=projection_dim) self.position_embedding = layers.Embedding( input_dim=num_patches, output_dim=projection_dim ) def call(self, patch): positions = tf.range(start=0, limit=self.num_patches, delta=1) encoded = self.projection(patch) + self.position_embedding(positions) return encoded # + id="6OZPSBIOmBzr" #tokenizer consists of convolutional layers #for convolutional tokenization # inputs => embed_to_patches/conv_layer=> linear_projection/pooling => reshape # class Tokenizer(layers.Layer): # def __init__( # self, # kernel_size=3, # stride=1, # padding=1, # pooling_kernel_size=3, # pooling_stride=2, # num_conv_layers=conv_layers, # num_output_channels=[64, 128], # positional_emb=positional_emb, # **kwargs, # ): # super(Tokenizer, self).__init__(**kwargs) # # convolutional layers # self.conv_model = keras.Sequential() # for i in range(num_conv_layers): # self.conv_model.add( # layers.Conv2D( # num_output_channels[i], # kernel_size, # stride, # padding="valid", # use_bias=False, # activation="relu", # kernel_initializer="he_normal", # ) # ) # self.conv_model.add(layers.ZeroPadding2D(padding)) # #linear pooling # self.conv_model.add( # layers.MaxPool2D(pooling_kernel_size, pooling_stride, "same") # ) # self.positional_emb = positional_emb # #reshape # def call(self, images): # outputs = self.conv_model(images) # # After passing the images through our mini-network the spatial dimensions # # are flattened to form sequences. # reshaped = tf.reshape( # outputs, # (-1, tf.shape(outputs)[1] * tf.shape(outputs)[2], tf.shape(outputs)[-1]), # ) # return reshaped # #position embedding(optinal) # #calculating the number of sequences and initialize an embedding layer which is learned # def positional_embedding(self, image_size): # # Positional embeddings are optional in CCT. Here, we calculate # # the number of sequences and initialize an `Embedding` layer to # # compute the positional embeddings later. # if self.positional_emb: # dummy_inputs = tf.ones((1, image_size, image_size, 3)) # dummy_outputs = self.call(dummy_inputs) # sequence_length = tf.shape(dummy_outputs)[1] # projection_dim = tf.shape(dummy_outputs)[-1] # embed_layer = layers.Embedding( # input_dim=sequence_length, output_dim=projection_dim # ) # return embed_layer, sequence_length # # embed_layer = keras.models.Sequential() # # embed_layer.add(TrigPosEmbedding( # # input_dim=sequence_length, # # output_dim=projection_dim, # # mode = TrigPosEmbedding.MODE_EXPAND, # # )) # # return embed_layer, sequence_length # else: # return None # + id="nK7YWIFIUmot" #transformer with sequence pooling def transformer( image_size = image_size, input_shape = input_shape, num_heads = num_heads, projection_dim = projection_dim, transformer_units = transformer_units ): inputs = layers.Input(input_shape) #data augmentation augmented = augmentation(inputs) #create patches patches = Patches(patch_size)(inputs) #enocde patches encoded_patches = PatchEncoder(num_patches, projection_dim)(patches) #adding positional embedding # if positional_emb: # embed_layer, seq_len = Tokenizer.positional_embedding(image_size) # positions = tf.range(start=0, limit=seq_len, delta=1) # positional_embeddings = embed_layer(positions) # encoded_patches += positional_embeddings #creating layers from transformer block # Create multiple layers of the Transformer block. for _ in range(transformer_layers): # Layer normalization 1. x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches) # Create a multi-head attention layer. attention_output = layers.MultiHeadAttention( num_heads=num_heads, key_dim=projection_dim, dropout=0.1 )(x1, x1) # Skip connection 1. x2 = layers.Add()([attention_output, encoded_patches]) # Layer normalization 2. x3 = layers.LayerNormalization(epsilon=1e-6)(x2) # MLP. x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1) # Skip connection 2. encoded_patches = layers.Add()([x3, x2]) # Create a [batch_size, projection_dim] tensor. representation = layers.LayerNormalization(epsilon=1e-6)(encoded_patches) representation = layers.Flatten()(representation) representation = layers.Dropout(0.5)(representation) # Add MLP. features = mlp(representation, hidden_units=transformer_units, dropout_rate=0.5) #sequence pooling representation = layers.LayerNormalization(epsilon=1e-5)(encoded_patches) attention_weights = tf.nn.softmax(layers.Dense(1)(representation), axis=1) weighted_representation = tf.matmul( attention_weights, representation, transpose_a=True ) weighted_representation = tf.squeeze(weighted_representation, -2) # Classify outputs. logits = layers.Dense(num_classes)(weighted_representation) # Create the Keras model. model = keras.Model(inputs=inputs, outputs=logits) return model # + id="gMD64trsUrY1" #model training def training(model): optimizer = tfa.optimizers.AdamW(learning_rate=0.001, weight_decay=0.0001) filepath = "/sample_data/tmp/checkpoint" model.compile( optimizer=optimizer, loss=keras.losses.CategoricalCrossentropy( from_logits=True, label_smoothing=0.1 ), metrics=[ keras.metrics.CategoricalAccuracy(name="accuracy") ], ) es = keras.callbacks.ModelCheckpoint( filepath, monitor="val_accuracy", save_best_only=True, save_weights_only=True, ) history = model.fit(x = x_train, y = y_train, batch_size = batch_size, epochs = num_epochs, validation_split= 0.1, callbacks = [es]) _, accuracy, top_5_accuracy = model.evaluate(x_test, y_test) print(f"Test accuracy: {round(accuracy * 100, 2)}%") return history # + id="a0_xXgzZU0wR" model = transformer() # + colab={"base_uri": "https://localhost:8080/"} id="dunBPSB9J5op" outputId="742a0463-169b-4ff6-d7b7-1a834f46bf63" train = training(model) # + id="nf7Ue9sDi2yn" colab={"base_uri": "https://localhost:8080/", "height": 229} outputId="6cf78ae4-01c0-4e95-af08-1ba306aa4203" #visualizing progress plt.plot(train.history["loss"], label="train_loss") plt.plot(train.history["val_loss"], label="val_loss") plt.xlabel("Epochs") plt.ylabel("Loss") plt.title("Train and Validation Losses Over Epochs", fontsize=14) plt.legend() plt.grid() plt.show() # + id="1YrI-iQDixy6" colab={"base_uri": "https://localhost:8080/", "height": 229} outputId="8fdc1204-7cde-4ea1-fc85-3359487e1b77" #visualizing progress plt.plot(train.history["accuracy"], label="accuracy") plt.plot(train.history["val_accuracy"], label="val_accuracy") plt.xlabel("Epochs") plt.ylabel("Accuracy") plt.title("Train and Validation Accuracy Over Epochs", fontsize=14) plt.legend() plt.grid() plt.show() # + [markdown] id="a6DhSi_XTJpa" # ### *Compact Convolutional Transformer (CCT)* # + id="JcAO4B8K25Gp" #tokenizer consists of convolutional layers #for convolutional tokenization # inputs => embed_to_patches/conv_layer=> linear_projection/pooling => reshape positional_emb = True conv_layers = 2 projection_dim = 128 num_heads = 2 transformer_units = [ projection_dim, projection_dim, ] transformer_layers = 2 stochastic_depth_rate = 0.1 learning_rate = 0.001 weight_decay = 0.0001 batch_size = 128 num_epochs = 30 image_size = 32 class Tokenizer(layers.Layer): def __init__( self, kernel_size=3, stride=1, padding=1, pooling_kernel_size=3, pooling_stride=2, num_conv_layers=conv_layers, num_output_channels=[64, 128], positional_emb=positional_emb, **kwargs, ): super(Tokenizer, self).__init__(**kwargs) # convolutional layers self.conv_model = keras.Sequential() for i in range(num_conv_layers): self.conv_model.add( layers.Conv2D( num_output_channels[i], kernel_size, stride, padding="valid", use_bias=False, activation="relu", kernel_initializer="he_normal", ) ) self.conv_model.add(layers.ZeroPadding2D(padding)) #linear pooling self.conv_model.add( layers.MaxPool2D(pooling_kernel_size, pooling_stride, "same") ) self.positional_emb = positional_emb #reshape def call(self, images): outputs = self.conv_model(images) # After passing the images through our mini-network the spatial dimensions # are flattened to form sequences. reshaped = tf.reshape( outputs, (-1, tf.shape(outputs)[1] * tf.shape(outputs)[2], tf.shape(outputs)[-1]), ) return reshaped #position embedding(optinal) #calculating the number of sequences and initialize an embedding layer which is learned def positional_embedding(self, image_size): # calculating the number of sequences and initialize an embedding layer to # compute the positional embeddings later if self.positional_emb: dummy_inputs = tf.ones((1, image_size, image_size, 3)) dummy_outputs = self.call(dummy_inputs) sequence_length = tf.shape(dummy_outputs)[1] projection_dim = tf.shape(dummy_outputs)[-1] embed_layer = layers.Embedding( input_dim=sequence_length, output_dim=projection_dim ) return embed_layer, sequence_length # embed_layer = keras.models.Sequential() # embed_layer.add(TrigPosEmbedding( # input_dim=sequence_length, # output_dim=projection_dim, # mode = TrigPosEmbedding.MODE_EXPAND, # )) # return embed_layer, sequence_length else: return None # + id="-TJ4LI2SwUos" #transformer with sequence pooling def transformer( image_size = image_size, input_shape = input_shape, num_heads = num_heads, projection_dim = projection_dim, transformer_units = transformer_units ): inputs = layers.Input(input_shape) #augmentation augmented = augmentation(inputs) #tokenization and encoding patches tokens = Tokenizer() encoded_patches = tokens(augmented) #adding positional embedding if positional_emb: embed_layer, seq_len = tokens.positional_embedding(image_size) positions = tf.range(start=0, limit=seq_len, delta=1) positional_embeddings = embed_layer(positions) encoded_patches += positional_embeddings # Calculate Stochastic Depth probabilities. dpr = [x for x in np.linspace(0, stochastic_depth_rate, transformer_layers)] #creating layers from transformer block for i in range(transformer_layers): #layer normalization x1 = layers.LayerNormalization(epsilon=1e-5)(encoded_patches) #create a mult-head attention attention_output = layers.MultiHeadAttention(num_heads = num_heads, key_dim=projection_dim, dropout=0.1)(x1, x1) #skip connection x2 = layers.Add()([attention_output, encoded_patches]) #layer normalization 2 x3 = layers.LayerNormalization(epsilon=1e-5)(x2) # MLP. x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1) #skip connection 2 encoded_patches = layers.Add()([x3, x2]) # Create a [batch_size, projection_dim] tensor. #apply sequence pooling representation = layers.LayerNormalization(epsilon=1e-6)(encoded_patches) attention_weights = tf.nn.softmax(layers.Dense(1)(representation), axis=1) representation = layers.Flatten()(representation) representation = layers.Dropout(0.5)(representation) # Add MLP. features = mlp(representation, hidden_units=transformer_units, dropout_rate=0.5) #sequence pooling representation = layers.LayerNormalization(epsilon=1e-5)(encoded_patches) attention_weights = tf.nn.softmax(layers.Dense(1)(representation), axis=1) weighted_representation = tf.matmul( attention_weights, representation, transpose_a=True ) weighted_representation = tf.squeeze(weighted_representation, -2) # Classify outputs. logits = layers.Dense(num_classes)(weighted_representation) # Create the Keras model. model = keras.Model(inputs=inputs, outputs=logits) return model # + id="80cM37SxQG2U" #model training def training(model, x_train, y_train): optimizer = tfa.optimizers.AdamW(learning_rate=0.001, weight_decay=0.0001) filepath = "/sample_data/tmp/checkpoint" model.compile( optimizer=optimizer, loss=keras.losses.CategoricalCrossentropy( from_logits=True, label_smoothing=0.1 ), metrics=[ keras.metrics.CategoricalAccuracy(name="accuracy"), ], ) es = keras.callbacks.ModelCheckpoint( filepath, monitor="val_accuracy", save_best_only=True, save_weights_only=True, ) history = model.fit(x = x_train, y = y_train, batch_size = batch_size, epochs = num_epochs, validation_split= 0.1, callbacks = [es]) return history # + id="Ob54kiv3f1d8" model = transformer() train = training(model, x_train, y_train) # + id="zgT0ppMeiVft" #visualizing progress plt.plot(history.history["loss"], label="train_loss") plt.plot(history.history["val_loss"], label="val_loss") plt.xlabel("Epochs") plt.ylabel("Loss") plt.title("Train and Validation Losses Over Epochs", fontsize=14) plt.legend() plt.grid() plt.show() # + id="_uVpNvuQihCF" #visualizing progress plt.plot(history.history["accuracy"], label="accuracy") plt.plot(history.history["val_accuracy"], label="val_accuracy") plt.xlabel("Epochs") plt.ylabel("Accuracy") plt.title("Train and Validation Accuracy Over Epochs", fontsize=14) plt.legend() plt.grid() plt.show()
30,753
/gcolab/transfer_learning.ipynb
eadf75de8dd9333169c76ce000e90ccca281f5cd
[]
no_license
tlizri/real-time-facial-expression-analyzer
https://github.com/tlizri/real-time-facial-expression-analyzer
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
9,439
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="vAKPjcg-mmCV" from tensorflow.keras.layers import GlobalAveragePooling2D, BatchNormalization, Dropout, Dense, RandomRotation, RandomTranslation, RandomFlip, RandomContrast from tensorflow.keras.applications import EfficientNetB0 from tensorflow.keras import Input,Model from tensorflow.keras.optimizers import Adam from tensorflow.config import list_physical_devices from tensorflow.keras.utils import Sequence from tensorflow.keras.models import Sequential from numpy import load, sum, array from numpy.random import shuffle from tensorflow.keras.callbacks import CSVLogger from time import time from numpy import ceil, arange from cv2 import imread, resize, IMREAD_COLOR import matplotlib.pyplot as plt # + id="7eozPhDWluJs" print("Num GPUs Available: ", len(list_physical_devices('GPU'))) CLASSES = 3 BATCH_SIZE = 32 RED = 'efficientnet' # vgg16, efficientnet, xception pchs = 40 lr=0.001 if RED == 'vgg16': MIN_SIZE = 224 elif RED == 'efficientnet': MIN_SIZE = 224 elif RED == 'xception': MIN_SIZE = 299 from google.colab import drive drive.mount('/gdrive') PATH = '/gdrive' + '/dataset' FILTRAR = 'filt' # string -> filt, nofilt HOMO = 'balan' # string -> balan, nobalan class AffectNetSequence(Sequence): # Clase para realizar una carga de imagenes por lotes debido a la magnitud # del dataset def __init__(self, x_set, y_set, batch_size): self.x, self.y = x_set, y_set self.batch_size = batch_size self.indexes = arange(len(self.x)) def __len__(self): return int(ceil(len(self.x) / self.batch_size)) def __getitem__(self, idx): index = self.indexes[idx * self.batch_size:(idx + 1) * self.batch_size] batch_x = self.x[index] batch_y = self.y[index] return array([resize(imread(file_name, IMREAD_COLOR), (MIN_SIZE,MIN_SIZE)) for file_name in batch_x], dtype=float), \ array(batch_y, dtype = int) def on_epoch_end(self): shuffle(self.indexes) img_augmentation = Sequential( [ RandomRotation(factor=0.10), RandomContrast(factor=0.1), ], name="img_augmentation", ) # + id="ylSv4_PbmdJ-" input = Input(shape=(MIN_SIZE, MIN_SIZE, 3)) x = img_augmentation(input) if RED == 'vgg16': base_model = vgg16( weights='imagenet', # Load weights pre-trained on ImageNet. input_tensor=x, include_top=False) elif RED == 'efficientnet': base_model = EfficientNetB0( weights='imagenet', # Load weights pre-trained on ImageNet. input_tensor=x, include_top=False) elif RED == 'xception': base_model = Xception( weights='imagenet', # Load weights pre-trained on ImageNet. input_tensor=x, include_top=False) elif RED == 'mobilenetv2': base_model = mobilenetv2( weights='imagenet', # Load weights pre-trained on ImageNet. input_tensor=x, include_top=False) print(range(221)) for i in range(221): base_model.layers[i].trainable = False x = GlobalAveragePooling2D()(base_model.output) x = BatchNormalization()(x) dropout_rate = 0.2 x = Dropout(dropout_rate)(x) output = Dense(CLASSES, activation="softmax")(x) model = Model(inputs=input, outputs=output) model.compile(optimizer=Adam(learning_rate=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-07), loss="categorical_crossentropy", metrics=["accuracy"]) model.summary() # + id="xw3yTwC--CR8" x_train = load(PATH + '/x_train_data_' + str(CLASSES) + 'classes_' + FILTRAR + '_' + HOMO + '_' + 'size=' + str(MIN_SIZE) + '.npy', allow_pickle=True) x_val = load(PATH + '/x_val_data_' + str(CLASSES) + 'classes_' + FILTRAR + '_' + HOMO + '_' + 'size=' + str(MIN_SIZE) + '.npy', allow_pickle=True) y_train = load(PATH + '/y_train_data_' + str(CLASSES) + 'classes_' + FILTRAR + '_' + HOMO + '_' + 'size=' + str(MIN_SIZE) + '.npy', allow_pickle=True) y_val = load(PATH + '/y_val_data_' + str(CLASSES) + 'classes_' + FILTRAR + '_' + HOMO + '_' + 'size=' + str(MIN_SIZE) + '.npy', allow_pickle=True) y_t = sum(y_train, axis=0) y_v = sum(y_val, axis=0) print("Etiquetas de entrenamiento: " + str(y_t)) print("Etiquetas de evaluacion: " + str(y_v)) print("Imagenes de entrenamiento: " + str(len(x_train))) print("Imagenes de validación: " + str(len(x_val))) print('Ratio train-val: ' + str(int(100*((len(x_val))/((len(x_train))+(len(x_val)))))) + '%') train_generator = AffectNetSequence(x_train, y_train, BATCH_SIZE) val_generator = AffectNetSequence(x_val, y_val, BATCH_SIZE) # + id="cx2AbQDm2_wJ" #Transfer Learning start_time = time() history = model.fit( x=train_generator, epochs=pchs, verbose=1, validation_data=val_generator, initial_epoch=0, steps_per_epoch=int(len(x_train)/BATCH_SIZE), max_queue_size=8, workers=4, use_multiprocessing=True ) end_time = time() execution_time = round((end_time - start_time)/3600, 2) print(str(execution_time)) model.save_weights(PATH + '/' + RED + '_learningrate=' + str(lr) + '_' + str(CLASSES) + 'classes_epoch=' + str(pchs) + '_time'+ str(execution_time) + '_' + FILTRAR + '_' + HOMO + '_transfer_learning' + '.h5', save_format="h5") # + id="1l2kA_hNAo1F" acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'bo', label='Entrenamiento') plt.plot(epochs, val_acc, 'b', label='Validación') plt.title('Precisión de entrenamiento y validación') plt.xlabel("Época") plt.xlabel("Precisión") plt.ylim(0, 1) plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Entrenamiento') plt.plot(epochs, val_loss, 'b', label='Validación') plt.title('Perdidas de entrenamiento y validación') plt.xlabel("Época") plt.xlabel("Pérdidas") plt.legend() plt.show()
6,253
/Chapter09/.ipynb_checkpoints/Chap9-checkpoint.ipynb
14c3d3b633444e2dce5e064ef9e39c854624ab82
[ "MIT" ]
permissive
GeekDisplaced/Essential-Statistics-for-Non-STEM-Data-Analysts
https://github.com/GeekDisplaced/Essential-Statistics-for-Non-STEM-Data-Analysts
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
290,348
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="NQt_zSDId4ku" # # Statistics for Classification # + colab={} colab_type="code" id="L-vLlLqCXRsg" import numpy as np import matplotlib.pyplot as plt import random # %matplotlib inline # - # ## Logistic Regression # + np.random.seed(894288) fg_index = np.linspace(0,100,6) stock_index_change = (fg_index-50)*0.3 - np.sign((fg_index-50)*0.3)*8 plt.figure(figsize=(10,6)) plt.scatter(fg_index[stock_index_change > 0], stock_index_change[stock_index_change > 0], s=200, marker=6, label="Up") plt.scatter(fg_index[stock_index_change < 0], stock_index_change[stock_index_change < 0], s=200, marker=7, label="Down") plt.hlines(0,0,100,label="Neutral line") plt.xlabel("Fear & Greedy Index",fontsize=20) plt.ylabel("Stock Index Change",fontsize=20) plt.legend(ncol=3); # + slope, intercept = 0.1, 0 fig, ax1 = plt.subplots(figsize=(10,6)) ax1.scatter(fg_index[stock_index_change > 0], (slope*fg_index + intercept)[stock_index_change > 0], s=200, marker=6, label = "Up") ax1.scatter(fg_index[stock_index_change < 0], (slope*fg_index + intercept)[stock_index_change < 0], s=200, marker=7, label = "Down") ax1.plot(fg_index, slope*fg_index + intercept, linewidth=2, c="red", label="Odds") ax2 = ax1.twinx() ax2.scatter(fg_index[stock_index_change > 0], stock_index_change[stock_index_change > 0], s=100, marker=6, label="Up") ax2.scatter(fg_index[stock_index_change < 0], stock_index_change[stock_index_change < 0], s=100, marker=7, label="Down") ax2.arrow(fg_index[2],stock_index_change[2]-0.5,0,-4.5,head_width=2, head_length=1, fc='k', ec='k',linewidth=2) ax1.set_xlabel("Fear & Greedy Index",fontsize=20) ax1.set_ylabel("Odds",fontsize=20) ax2.set_ylabel("Stock Index Change",fontsize=20) plt.legend(fontsize=20); # - # ### Change intercept # + slope, intercept = 0.1, -5 fig, ax1 = plt.subplots(figsize=(10,6)) ax1.scatter(fg_index[stock_index_change > 0], (slope*fg_index + intercept)[stock_index_change > 0], s=200, marker=6, label = "Up") ax1.scatter(fg_index[stock_index_change < 0], (slope*fg_index + intercept)[stock_index_change < 0], s=200, marker=7, label = "Down") ax1.plot(fg_index, slope*fg_index + intercept, linewidth=2, c="red") ax2 = ax1.twinx() ax2.scatter(fg_index[stock_index_change > 0], stock_index_change[stock_index_change > 0], s=100, marker=6, label="Up") ax2.scatter(fg_index[stock_index_change < 0], stock_index_change[stock_index_change < 0], s=100, marker=7, label="Down") ax2.arrow(fg_index[2],stock_index_change[2]-0.5,0,-4.5,head_width=2, head_length=1, fc='k', ec='k',linewidth=2) # ax1.axhline(0,linewidth=2,linestyle=":",label="Critical Odds value") ax1.set_xlabel("Fear & Greedy Index",fontsize=20) ax1.set_ylabel("Odds - 5",fontsize=20) ax2.set_ylabel("Stock Index Change",fontsize=20) plt.legend(fontsize=20); # - # ### Logistic function def logistic(x): return 1 / (1 + np.exp(-x)) # + def cal_shifted_odds(val, slope, intercept): return val*slope + intercept slope, intercept = 0.1, -5 fig, ax1 = plt.subplots(figsize=(10,6)) shifted_odds = cal_shifted_odds(fg_index,slope,intercept) ax1.scatter(fg_index[stock_index_change > 0], shifted_odds[stock_index_change > 0], s=200, marker=6, label = "Up") ax1.scatter(fg_index[stock_index_change < 0], shifted_odds[stock_index_change < 0], s=200, marker=7, label = "Down") ax1.plot(fg_index, shifted_odds, linewidth=2, c="red") ax2 = ax1.twinx() ax2.scatter(fg_index[stock_index_change > 0], logistic(shifted_odds)[stock_index_change > 0], s=100, marker=6, label="Up") ax2.scatter(fg_index[stock_index_change < 0], logistic(shifted_odds)[stock_index_change < 0], s=100, marker=7, label="Down") fg_grids = np.linspace(0,100,100) ax2.plot(fg_grids, logistic(cal_shifted_odds(fg_grids,slope,intercept)), linewidth=4, linestyle=":", c="green") ax1.set_xlabel("Fear & Greedy Index",fontsize=20) ax1.set_ylabel("Odds - 5",fontsize=20) ax2.set_ylabel("Probability of Going Up",fontsize=20) plt.legend(fontsize=20); # - # ### Test positiveness # + slope, intercept, threshold = 0.1, -5, 0.5 fig, ax1 = plt.subplots(figsize=(10,6)) shifted_odds = cal_shifted_odds(fg_index,slope,intercept) ax1.axhline(threshold, linewidth=2, c="red", label="Threshold") ax1.scatter(fg_index[stock_index_change > 0], logistic(shifted_odds)[stock_index_change > 0], s=100, marker=6, label="Up") ax1.scatter(fg_index[stock_index_change < 0], logistic(shifted_odds)[stock_index_change < 0], s=100, marker=7, label="Down") ax1.scatter(fg_index[logistic(cal_shifted_odds(fg_index,slope,intercept)) > threshold], logistic(cal_shifted_odds(fg_index,slope,intercept))[logistic(cal_shifted_odds(fg_index,slope,intercept)) > threshold], s=400, facecolors='none', edgecolors='b') ax1.set_xlabel("Fear & Greedy Index",fontsize=20) ax1.set_ylabel("Probability of Going Up",fontsize=20) ax1.legend(fontsize=20); # + slope, intercept, threshold = 0.1, -5, 0.8 fig, ax1 = plt.subplots(figsize=(10,6)) shifted_odds = cal_shifted_odds(fg_index,slope,intercept) ax1.axhline(threshold, linewidth=2, c="red", label="Threshold") ax1.scatter(fg_index[stock_index_change > 0], logistic(shifted_odds)[stock_index_change > 0], s=100, marker=6, label="Up") ax1.scatter(fg_index[stock_index_change < 0], logistic(shifted_odds)[stock_index_change < 0], s=100, marker=7, label="Down") ax1.scatter(fg_index[logistic(cal_shifted_odds(fg_index,slope,intercept)) > threshold], logistic(cal_shifted_odds(fg_index,slope,intercept))[logistic(cal_shifted_odds(fg_index,slope,intercept)) > threshold], s=400, facecolors='none', edgecolors='b') ax1.set_xlabel("Fear & Greedy Index",fontsize=20) ax1.set_ylabel("Probability of Going Up",fontsize=20) ax1.legend(fontsize=20); # - # #### Maximize likelihood probs = logistic(cal_shifted_odds(fg_index,slope,intercept)) probs np.prod(probs[stock_index_change>0])*np.prod(1-probs[stock_index_change<0]) # #### Another set of parameters probs = logistic(cal_shifted_odds(fg_index, slope=0.11,intercept=-5.5)) np.prod(probs[stock_index_change>0])*np.prod(1-probs[stock_index_change<0]) # + from sklearn.linear_model import LogisticRegression regressor = LogisticRegression(penalty="none", solver="newton-cg").fit(fg_index.reshape(-1,1), stock_index_change>0) print("slope: ",regressor.coef_[0][0]) print("intercept: ",regressor.intercept_[0]) # - probs = logistic(cal_shifted_odds(fg_index, slope=0.06070138,intercept=-3.03506894)) np.prod(probs[stock_index_change>0])*np.prod(1-probs[stock_index_change<0]) # ## Naïve Bayesian Classification from Scratch # #### sample data import pandas as pd from collections import Counter stroke_risk = pd.read_csv("strok_risk.csv") stroke_risk def build_probabilities(df,feature_columns:list, category_variable:str): # build prior probabilities prior_probability = Counter(df[category_variable]) # build conditional probabilities conditional_probabilities = {} for key in prior_probability: conditional_probabilities[key] = {} # for each category variable cases for feature in feature_columns: feature_kinds = set(np.unique(df[feature])) feature_dict = Counter(df[df[category_variable]==key][feature]) for possible_feature in feature_kinds: if possible_feature not in feature_dict: feature_dict[possible_feature] = 0 total = sum(feature_dict.values()) for feature_level in feature_dict: feature_dict[feature_level] /= total conditional_probabilities[key] [feature] = feature_dict return prior_probability, conditional_probabilities prior_prob, conditional_prob = build_probabilities(stroke_risk, feature_columns=["weight","high_oil_diet","smoking"], category_variable="stroke_risk") from pprint import pprint pprint(conditional_prob) # #### prediction function def predict(prior_prob, conditional_prob, feature_values:dict): probs = {} total = sum(prior_prob.values()) for key in prior_prob: probs[key] = prior_prob[key]/total for key in probs: posterior_dict = conditional_prob[key] for feature_name, feature_level in feature_values.items(): probs[key] *= posterior_dict[feature_name][feature_level] total = sum(probs.values()) if total == 0: print("Undetermined!") else: for key in probs: probs[key]/= total return probs predict(prior_prob,conditional_prob,{"weight":"middle","high_oil_diet":"no","smoking":"yes"}) predict(prior_prob,conditional_prob,{"weight":"high","high_oil_diet":"no","smoking":"no"}) # ### Overfitting and underfitting examples plt.figure(figsize=(10,6)) x_coor = [1,2,3,4,5,6,7] y_coor = [3,8,5,7,10,9,15] plt.scatter(x_coor,y_coor); styles=[":","--","-"] plt.figure(figsize=(10,6)) x = np.linspace(1,7,20) for idx, degree in enumerate(range(1,6,2)): coef = np.polyfit(x_coor,y_coor,degree) y = np.polyval(coef,x) plt.plot(x,y, linewidth=4, linestyle=styles[idx], label="degree {}".format(str(degree))) plt.scatter(x_coor,y_coor, s=400, label="Original Data", marker="o"); plt.legend(); # #### Beyond the original data styles=[":","--","-"] plt.figure(figsize=(10,6)) x = np.linspace(0,8,40) for idx, degree in enumerate(range(1,6,2)): coef = np.polyfit(x_coor,y_coor,degree) y = np.polyval(coef,x) plt.plot(x,y, linewidth=4, linestyle=styles[idx], label="degree {}".format(str(degree))) plt.scatter(x_coor,y_coor, s=400, label="Original Data", marker="o"); plt.legend(); # ### Cross-validation stroke_risk = pd.read_csv("strok_risk.csv") stroke_risk def map_feature(feature_value, feature): if feature == "weight" or feature == "stroke_risk": if feature_value == "low": return 0 elif feature_value == "middle": return 1 else: return 2 elif feature == "high_oil_diet" or feature == "smoking": if feature_value == "yes": return 1 else: return 0 else: print("No such feature: {}".format(feature)) stroke_risk.weight = stroke_risk.weight.map(lambda x: map_feature(x,"weight")) stroke_risk.high_oil_diet = stroke_risk.high_oil_diet.map(lambda x: map_feature(x,"high_oil_diet")) stroke_risk.smoking = stroke_risk.smoking.map(lambda x: map_feature(x,"smoking")) stroke_risk.stroke_risk = stroke_risk.stroke_risk.map(lambda x: map_feature(x,"stroke_risk")) stroke_risk from sklearn.linear_model import LogisticRegressionCV X = stroke_risk[["weight","high_oil_diet","smoking"]] y = stroke_risk["stroke_risk"] classifier = LogisticRegressionCV(cv=3,random_state=2020,multi_class="auto").fit(X,y) classifier.get_params # #### Examine the shape of the input data X.values[:1, :] # #### Predict the probability classifier.predict_proba(np.array([[1.5,0.5,2]]))
13,255
/NationalFisheMonitor/extract_features.ipynb
82dd6c54e27d1ef0c090b0642bc66e5335127ea8
[]
no_license
ilaperony/DataScience
https://github.com/ilaperony/DataScience
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
33,225
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- import mxnet as mx import cv2 import numpy as np def get_image(filename): img = cv2.imread(filename) # read image in b,g,r order img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # change to r,g,b order img = cv2.resize(img, (224, 224)) # resize to 224*224 to fit model img = np.swapaxes(img, 0, 2) img = np.swapaxes(img, 1, 2) # change to (channel, height, width) img = img[np.newaxis, :] # extend to (example, channel, heigth, width) return img def get_iteror(batchsize, datashape = (3,224,224)): train = mx.io.ImageRecordIter( # batch_size = batchsize, path_imgrec = 'E:/train/model_train.rec', data_shape = datashape, shuffle = True, rand_crop = True, rand_mirror = True, data_name = 'data_name', lable_name = 'softmax_label' ) test = mx.io.ImageRecordIter( batch_size = batchsize, path_imgrec = 'E:/train/model_test.rec', data_shape = datashape, rand_crop = False, rand_mirror = False, data_name = 'data_name', lable_name = 'softmax_label' ) return(train, test) def model_extract(perfix, epoch): sym, arg_params, aux_params = mx.model.load_checkpoint(perfix, epoch) internels = sym.get_internals() fea_symbol = internels["fc1_output"] return(fea_symbol, arg_params, aux_params) fea_model, arg_params, aux_params = model_extract('D:/gitcode/DataScience/Python/python-mxnet-notebook/how_to/resnet-50',0) sym, arg_params, aux_params = mx.model.load_checkpoint('D:/gitcode/DataScience/Python/python-mxnet-notebook/how_to/resnet-50',0) internals = sym.get_internals() internals.list_outputs() aa = sym.get_internals() aa.list_outputs() # reset model mod = mx.mod.Module(symbol=fea_model, context=mx.cpu()) mod.bind(for_training = False, data_shapes=[('data', (1,3,224,224))]) mod.set_params(arg_params, aux_params) img = get_image('E:/train/ALB/img_00003.jpg') img2 = cv2.cvtColor(cv2.imread('E:/train/ALB/img_00003.jpg'), cv2.COLOR_BGR2RGB) img2[1] from collections import namedtuple Batch = namedtuple('Batch', ['data']) mod.forward(Batch([mx.nd.array(img)])) transform_features = mod.get_outputs()[0].asnumpy() transform_features.shape # %matplotlib inline import matplotlib import matplotlib.pyplot as plt img3 = transform_features[::] img3.shape # plt.imshow(img3) img2.shape
2,671
/Assignment_3/Task_5_Analysis.ipynb
8655e4f9a5b2d5cce0fc5b8de76333ffee7c940d
[]
no_license
ManeendarSorupaka/ADS_Spring_2019
https://github.com/ManeendarSorupaka/ADS_Spring_2019
1
0
null
2019-05-07T04:27:35
2019-05-06T18:02:50
Jupyter Notebook
Jupyter Notebook
false
false
.py
46,301
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Task 5: Analysis # Plotly is useful plot geographical plots using the choropleth map # # We ploted geographical plots for the United State and can plot for entire world using unique code dictionary # + import plotly.plotly as py import plotly.graph_objs as go import os import pandas as pd from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot init_notebook_mode(connected=True) #using data file which contains state FIPS code of USA states df = pd.read_csv(os.getcwd()+'/Graphs.csv') map_data = dict(type='choropleth', locations=df['STUSAB'], locationmode='USA-states', colorscale='Portland', text=df['STATE_NAME'], z=df['COUNT'], colorbar=dict(title="USA States") ) map_layout = dict(geo = {'scope':'usa'}) map_actual = go.Figure(data=[map_data], layout=map_layout) iplot(map_actual) # + import pandas as pd import matplotlib.pyplot as plot # %matplotlib inline df = pd.read_csv(os.getcwd()+'/MF_Data.csv') #toy dataset df.head() df['Risk Factor'].value_counts() plot.title('Number of Loans based on Risk Factor') plot.xlabel('Risk Level') plot.ylabel('Loan Count') df['Risk Factor'].value_counts().plot('bar') # + import pandas as pd import matplotlib.pyplot as plot # %matplotlib inline df = pd.read_csv(os.getcwd()+'/MF_Data.csv') #toy dataset df.head() plot.title('Percentage of Status of loans which are funded') Loan_status = ["Fully Paid","Charged off","Default"] count_LS = df['loan_status'].value_counts() colors = ["#1f77b4", "#ff7f0e", "#2ca02c"] plot.pie(count_LS,labels=Loan_status,autopct='%1.1f%%', shadow=True, startangle=140,colors=colors) plot.show() # -
1,986
/handling_merged.ipynb
ed06e181135f4ce9610a0188e7cb572ea41f38af
[]
no_license
DialloThierno-hub/Kaggle_Competition_Reddit
https://github.com/DialloThierno-hub/Kaggle_Competition_Reddit
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
29,525
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + def n_for_gamer_i(i,sigma_gamers): n_for_gamer=0 for j in range(len(sigma_gamers)): if i==j: continue n_for_gamer+=sigma_gamers[j] return(n_for_gamer) # - packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns from scipy import stats import matplotlib.pyplot as plt from datetime import datetime from tqdm import tqdm import networkx as nx import warnings warnings.filterwarnings('ignore') # no warnings will be printed from now on # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # + colab={"base_uri": "https://localhost:8080/"} id="ffysldmhTuEP" outputId="20f08b25-e6b5-47b2-c053-404c864b8c72" from google.colab import drive drive.mount('/content/drive') # + colab={"base_uri": "https://localhost:8080/"} id="bk_ei1Q1UZ3z" outputId="ee362b41-804e-4475-b2df-0bc7bef5fb17" # Seting the directory # %cd /content/drive/MyDrive/TP_M1_SID/Second_semestre/Compet_kaggle # + [markdown] id="3xp85V5pLIDb" # ### Loading data # + id="amdnGM2GR4nB" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="b960638b-99e8-46ca-85ff-74446b2ad351" df_links = pd.read_csv("df_links_scaled.csv")[['id','ups', 'successors_name', 'predecessors_name','Ctr_degree_name', 'Ctr_cless_name', 'Ctr_Eigen_name', 'page_rk_name', 'successors_prt_id', 'predecessors_prt_id']] df_links.head() # + colab={"base_uri": "https://localhost:8080/"} id="3Oo3aRC9gp8A" outputId="fb6658de-6611-4cda-b8cd-18020de1fb1e" df_links.shape # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="xo_N4aFcAq-a" outputId="0082e544-a4a3-43ae-b9e4-83ec2b3c3649" df_topic = pd.read_csv("topic.csv") df_topic # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="1wYu4xQ6BRU8" outputId="9ef814a7-8391-4ff2-ec8d-054cbf0760f7" df_sent_analysis = pd.read_csv("sentiment_analysis.csv", lineterminator='\n')[['id', 'polarity', 'subjectivity']] df_sent_analysis # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="n-TGfE2EGlrl" outputId="8b168f69-8f9f-4951-c2a3-a602fb1a25d1" df_author = pd.read_csv('df_author_scaled.csv')[['id', 'successors_auth', 'predecessors_auth', 'Ctr_dg_autor', 'Ctr_Eigen_auth', 'page_rk_auth', 'freq_occ_author']] df_author # + colab={"base_uri": "https://localhost:8080/", "height": 439} id="TpnBQil83Ntp" outputId="f8ecff0b-4702-4de8-cc21-6aeb0ebbfb4b" df = df_sent_analysis.merge(df_topic, on = 'id') df = df.merge(df_links, on = 'id') df = df.merge(df_author, on = 'id') df # + id="nhBn6Ax7ipdg" # Transforming to log scale def trans_y(y): y=y-min_ups+1 return y.apply(func=np.log) def itrans(y_predict): y=y_predict.apply(func=np.exp) return y+min_ups-1 # + [markdown] id="Pt16BCKTwPUD" # # Prediction # + id="IQX8iWhL033O" train = df[df['ups'].notnull()] test = df[df['ups'].isnull()] # + colab={"base_uri": "https://localhost:8080/"} id="AVECqF-giwjF" outputId="77ac6631-7d1d-42d4-c3fe-8999ff8c06fb" min_ups=np.min(train['ups']) min_ups # + [markdown] id="Z0JuXrXF7AgC" # ### Analysing the ups value # + colab={"base_uri": "https://localhost:8080/"} id="6A0rnwHi03_q" outputId="f966a8db-6169-4c59-fccf-4d843627ca74" df['ups'].describe() # + id="9-vRun7sF6VN" y_train = trans_y(train['ups']) y_train # y_test=test['ups'].values # + [markdown] id="dvTbmovFB6R7" # ## Train and Test columns # + id="ipEfygjySoA0" x_train = train.drop(["id" , "ups"], axis=1) x_test = test.drop(["id" , "ups"] , axis=1) # + id="sQ69Fp8QaRLt" x_test # + [markdown] id="u-I_8Kp280zR" # ## Gradient Boosting # + id="fYPK-BUECClv" from sklearn.ensemble import GradientBoostingRegressor from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RepeatedKFold # + id="8MqlHgfRbFaj" grad_boost = GradientBoostingRegressor(loss = 'lad', subsample = 0.8, n_estimators = 350, learning_rate = 0.1, max_depth = 8) # + id="r5mbIuzyCCvl" # %time grad_boost.fit(x_train, y_train) # + id="M_efYxZiC96y" y_pred_grad_boost = grad_boost.predict(x_test) # + id="BJkZtzh8IItB" test['predicted'] = y_pred_grad_boost test['predicted'] = itrans(test['predicted']) test['predicted'] = test['predicted'].astype(int) ## Creating a new dataframe which contains only needed columns df_pred_gboost = test[['id', 'predicted']] df_pred_gboost.reset_index(drop=True, inplace=True) # + id="AhTzAzcJIpa7" df_pred_gboost.describe() # + id="U0fW9T81Iqvl" df_pred_gboost.to_csv('dernierepreeed.csv', index= False) # + id="F0yi87vgrjWO" an', 'Flannel', 'Henley', 'Hoodie', 'Jacket', 'Jersey', 'Parka', 'Peacoat', 'Sweater', 'Tank', 'Tee', 'Top', 'Turtleneck', 'Chinos','Jeans', 'Joggers', 'Shorts', 'Sweatpants', 'Sweatshorts', 'Trunks', 'Coat', 'Robe'] cm = get_conf_matrix() print(cm.shape) # plt.figure(figsize=(10,10)) # plot_confusion_matrix(cm, names) # + id="HLtMI6lYCGip" colab_type="code" outputId="4a176329-4b70-43c8-d737-a215fc2d3bbb" executionInfo={"status": "ok", "timestamp": 1586706743938, "user_tz": -300, "elapsed": 4738, "user": {"displayName": "Muhammad Ali", "photoUrl": "", "userId": "15673831022739340207"}} colab={"base_uri": "https://localhost:8080/", "height": 1000} plt.figure(figsize=(12,12)) plot_confusion_matrix(cm, names) plt.savefig('conf_matrix.png') # + id="16Q2YK_uS-jI" colab_type="code" colab={} pycharm={"name": "#%%\n"} # os.path.exists('//content/drive/My Drive/Deep Fashion Retrieval/base/img/Leaf_Print-Sleeve_Tee/img_00000031.jpg') mask detector model from disk print("[INFO] loading face mask detector model...") maskNet = joblib.load('loaded_model_updated_dataset_3.pkl') # initialize the video stream and allow the camera sensor to warm up print("[INFO] starting video stream...") vs = VideoStream(src=0).start() time.sleep(2.0) # loop over the frames from the video stream check=0 #If No Mask is detected in the face value of check will be changed to 1 and fine will be generated. photo_no=1 t_end=time.time() while True: # grab the frame from the threaded video stream and resize it # to have a maximum width of 400 pixels frame = vs.read() # frame = imutils.resize(frame, width=400) if check==1: #Camera moves downward along Y-axis to detect qrcode for the details if time.time() < t_end: decodedText, points, _ = qrCodeDetector.detectAndDecode(frame) if len(decodedText)>0: generate_fine(decodedText) check=0 else: #This part is executed when a QR Code is not found and hence the image is stored for charging fine manually. cv2.imwrite("Image_"+str(photo_no)+".jpg",frame) photo_no+=1 check=0 cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF if key == ord("q"): break continue # detect faces in the frame and determine if they are wearing a # face mask or not (locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet) # loop over the detected face locations and their corresponding # locations for (box, pred) in zip(locs, preds): # unpack the bounding box and predictions (startX, startY, endX, endY) = box withoutMask = pred[0] # determine the class label and color we'll use to draw # the bounding box and text label = "Mask" if (1-withoutMask) > withoutMask else "No Mask" color = (0, 255, 0) if label == "Mask" else (0, 0, 255) check=0 if label=="No Mask": check=1 # include the probability in the label label = "{}: {:.2f}%".format(label, max((1-withoutMask), withoutMask) * 100) # display the label and bounding box rectangle on the output # frame cv2.putText(frame, label, (startX, startY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2) cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2) if check==1: t_end=time.time()+10 # show the output frame cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF # if the `q` key was pressed, break from the loop if key == ord("q"): break if check==1: #If no Mask is found a time break is taken so that it could be visible in LIVE system or else it vanishes within a flash of moment. #And now,the system checks for QR Code to generate fine. time.sleep(1) # do a bit of cleanup vs.stop() cv2.destroyAllWindows()
9,161
/.ipynb_checkpoints/explore-checkpoint.ipynb
53cc396c1327fd6b666c8971c3ec395429a1a1c4
[]
no_license
JingwangLi/Mobike
https://github.com/JingwangLi/Mobike
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
169,059
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import numpy as np import pandas as pd path = 'D:\\data\\Mobike\\trips\\' # path2 = 'E:\\data\\Mobike\\byday\\20180901\\' # f = os.listdir(path2) # ID = [] # for i in f: # print(i) # d = pd.read_csv(path2 + i) # ID += list(d.ID) # print(len(list(set(ID)))) 286163 data = pd.read_csv(path + '20180901_trips.csv') # data['clock'] = data['st'].apply(lambda x: int(x.split()[1].split(':')[0])) # data['minute'] = data['st'].apply(lambda x: int(x.split()[1].split(':')[1])) data # print(data.ID.size) 1735550 # print(data.ID.unique().size) 277986 # + import os import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt from geopy.distance import geodesic path = 'D:\\data\\Mobike\\trips_20\\' f = os.listdir(path) for j in f: print(j) data = pd.read_csv(path + j) # data.info() # data['iclock'] = data['st'].apply(lambda x: int(x.split()[1].split(':')[0])) # data['iminute'] = data['st'].apply(lambda x: int(x.split()[1].split(':')[1])) # data['oclock'] = data['et'].apply(lambda x: int(x.split()[1].split(':')[0])) # data['ominute'] = data['et'].apply(lambda x: int(x.split()[1].split(':')[1])) # data.to_csv(path + j, index=False) # print(data[data.distance > 20]['ID'].unique().size) # print(data[data.distance > 50]['ID'].unique().size) # print(data[data.distance > 1000]['ID'].unique().size) print(data.shape[0]) print(data[data.distance > 20]['ID'].size) print(data[data.distance > 20]['ID'].unique().size) sdata = data[data.distance > 20] hourTrips = np.zeros(24) for i in range(0, 24): hourTrips[i] = sdata[sdata.iclock == i].shape[0] x = range(len(hourTrips)) plt.plot(x, hourTrips) plt.show() # + import os import numpy as np import pandas as pd path = 'D:\\data\\Mobike\\trips_20\\' path2 = 'D:\\data\\Mobike\\trips_20_20\\' path3 = 'D:\\data\\Mobike\\trips_20_20_site\\' f = os.listdir(path2) def getisite(x): return (x.slat, x.slon) def getosite(x): return (x.elat, x.elon) # for j in f: # data = pd.read_csv(path + j) # data = data[data.distance > 20] # data.to_csv(path2 + j + '_20.csv') # data['isite'] = data.apply(getisite, axis=1) # data['osite'] = data.apply(getosite, axis=1) # print(data.shape[0]) # data.isite.unique().size 1782354 # data.shape[0] 1786086 for i in f: data = pd.read_csv(path2 + i) data1 = pd.DataFrame(data[['slon', 'slat']]) data2 = pd.DataFrame(data[['elon', 'elat']]) data1['idx'] = data.index data2['idx'] = data.index data1.to_csv(path3 + i.split('.')[0] + '_s.csv', index=False) data2.to_csv(path3 + i.split('.')[0] + '_e.csv', index=False) print('done') # + #generate new subdistract file import shapefile as shp sf = shp.Reader('E:\\Graduate\\Wuhan_jiedao\\jiedao', encoding='gbk') # print(file) # shapes = sf.shapes() # shapes[3].points f = sf.fields # cnt = 0 # for i in f: # print(cnt) # cnt += 1 # print(i) # sf.shapeType w = shp.Writer('E:\\Graduate\\jiedao_new\\jiedao.shp', shapeType=5) w.fields = [f[1], f[8]] sr = sf.shapeRecords() main_city = ['江岸区', '江汉区', '硚口区', '汉阳区', '武昌区', '洪山区', '青山区'] for i in sr: r = i.record if r[7] not in main_city or r[0] == '石洞街道': continue s = i.shape w.record(r[0], r[7]) w.shape(s) w.close() print("done") ff = shp.Reader('E:\\Graduate\\jiedao_new\\jiedao') ff.records() # + #transform points to shp import os import pandas as pd import shapefile as shp path = 'D:\\data\\Mobike\\trips_20_20_site\\' path2 = 'D:\\data\\Mobike\\trips_20_20_site_shp\\' f = os.listdir(path) for i in f: data = pd.read_csv(path + i) w = shp.Writer(path2 + i.split('.')[0] + '_shp') w.field('idx', 'N') if 'e' in i: for j in data.index: w.point(data.loc[j].elon, data.loc[j].elat) w.record(data.loc[j].idx) else: for j in data.index: w.point(data.loc[j].slon, data.loc[j].slat) w.record(data.loc[j].idx) w.close() print(i) print("done") # + #get subdistract label for every coordinate using arcpy import os import arcpy from arcpy import env path = 'D:\\data\\Mobike\\trips_20_20_site_shp\\' path2 = 'D:\\data\\Mobike\\trips_20_20_site_jiedao\\' idFeatures = 'E:\\Graduate\\jiedao_new\\jiedao.shp' f = os.listdir(path) for i in f: name = i.split('.') if name[1] != 'shp' or len(name) > 2: continue print(i) inFeatures = path + i outFeatures = path2 + name[0] + '_jiedao.shp' arcpy.Identity_analysis (inFeatures, idFeatures, outFeatures) # + #tranform shapefile to csv with subdistract label import pandas as pd import shapefile as shp path = 'D:\\data\\Mobike\\trips_20_20_site_jiedao\\' path2 = 'D:\\data\\Mobike\\trips_20_20_site_jiedao_csv\\' f = os.listdir(path) for i in f: name = i.split('.') if name[1] != 'shp' or len(name) > 2: continue print(i) sf = shp.Reader(path + i) r = sf.records() data = pd.DataFrame(r, columns=['none', 'idx', 'sdID', 'subdistract', 'distract']) data = data.drop(['none'], axis=1) data['idx'] = data['idx'].astype('int') data = data.sort_values(by=['idx']) data = data.drop_duplicates(['idx']) print(len(data)) data.to_csv(path2 + i.split('.')[0] + '.csv', index=False) # print(list(r[0])) # print(type(r[0])) # df = pd.DataFrame(r, columns=['one', 'two', 'three', 'f', 'five']) # print(df[df.three > -1]) # + #export sdID-subdistract table import os import pandas as pd import shapefile as shp path = 'D:\\data\\Mobike\\trips_20_20_site_jiedao_csv\\' path2 = 'E:\Graduate\jiedao_new\\' # f = os.listdir(path) # data = pd.read_csv(path + f[11]) # print(len(set(data.sdID))) sf = shp.Reader(path2 + 'jiedao.shp') r = sf.records() data = pd.DataFrame(r, columns=['sudistract', 'distract']) data['sdID'] = data.index data.to_csv('sdID-Name.csv', encoding='ANSI', index=False) data # + #merge trips with subdistract label import os import pandas as pd path = 'D:\\data\\Mobike\\trips_20_20\\' path2 = 'D:\\data\\Mobike\\trips_20_20_site_jiedao_csv\\' path3 = 'D:\\data\\Mobike\\trips_20_20_subdistract\\' f = os.listdir(path) for i in f: print(i) data = pd.read_csv(path + i) start = pd.read_csv(path2 + i.split('.')[0] + '_s_shp_jiedao.csv') end = pd.read_csv(path2 + i.split('.')[0] + '_e_shp_jiedao.csv') print('%d %d %d' % (len(data), len(start), len(end))) data['ssdID'] = start['sdID'] data['esdID'] = end['sdID'] data.to_csv(path3 + i.split('.')[0] + '_subdistract.csv', index=False) print('done') # + import os import pandas as pd path = 'D:\\data\\Mobike\\trips_20_20_subdistract\\' f = os.listdir(path) data = pd.read_csv(path + f[6]) print(len(data)) print(len(data[data.ssdID != data.esdID])) print(len(data[data.distance > 1000]))
7,110
/pandas-general.ipynb
89fe8526bc183b4e6013fa1fccb24479631c26cb
[ "MIT" ]
permissive
fabioedv/sobre-python
https://github.com/fabioedv/sobre-python
0
0
MIT
2021-09-29T23:16:51
2021-09-27T16:34:39
null
Jupyter Notebook
false
false
.py
141,252
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: equinox # language: python # name: equinox # --- # # store geostrophic fields # # Xiaolong initial [notebook](https://github.com/xy6g13/equinox_working/blob/master/sandbox/Reconstruction/Geostrophy%20metric.ipynb) # + import os, sys, shutil import numpy as np import dask import xarray as xr from matplotlib import pyplot as plt # %matplotlib inline import xgcm from mitequinox.utils import * from mitequinox.dynamic import * from mitequinox.plot import * # - from dask_jobqueue import PBSCluster #cluster = PBSCluster(cores=12, walltime='12:00:00') cluster = PBSCluster(cores=12, walltime='01:00:00') w = cluster.scale(60*12) #w = cluster.scale(1*12) #w = cluster.scale(5) # postprocessing # get dask handles and check dask server status from dask.distributed import Client client = Client(cluster) cluster client # --- # # load data # # + grd = load_grd().drop(['hFacC','hFacW','hFacS','rA','rAw','rAs']) dsU = xr.open_zarr(root_data_dir+'zarr/%s.zarr'%('SSU')) dsV = xr.open_zarr(root_data_dir+'zarr/%s.zarr'%('SSV')) dsE = xr.open_zarr(root_data_dir+'zarr/%s.zarr'%('Eta')) ds = (xr.merge([dsU,dsV, dsE], join='inner') .assign_coords(**grd.variables)) grid = xgcm.Grid(ds, periodic=['X', 'Y']) # add coriolis parameters to dataset #omega = 7.3/100000 # XY, # see, http://mitgcm.org/public/r2_manual/final/code_reference/vdb/names/R.htm f = 2*omega_earth*np.sin(np.deg2rad(ds['YG'])) # at vorticity points f_i = grid.interp(f,'X').chunk({'i':None}) # at v points f_j = grid.interp(f,'Y').chunk({'j':None}) # at u points ds0 = ds.assign_coords(f=f,f_i=f_i,f_j=f_j) # - # --- # # ## start computing and storing # # is the storing really necessary? terms = ['u_coriolis_linear','u_gradp', 'v_coriolis_linear','v_gradp'] terms = ['v_coriolis_linear'] #Nt, Ni, Nj = 24*20, 188, 96 Nt, Ni, Nj = 24*40, 376, 188 # + overwrite=True for face in range(11,12): print('--- start processing face %d'%face) ds = ds0.sel(face=face) for t in terms: bterm = get_mbal(t, ds, grid) if t[0]=='u': chunks = {'time': Nt, 'i_g': Ni, 'j': Nj} elif t[0]=='v': chunks = {'time': Nt, 'i': Ni, 'j_g': Nj} bterm = bterm.isel(time=slice(len(ds.time)//Nt *Nt)) bterm = bterm.chunk(chunks) file_out = work_data_dir+'mbal/%s_f%02d.zarr'%(t,face) if not os.path.isdir(file_out) or overwrite: bterm.to_dataset().to_zarr(file_out, mode='w') print(' %s, face=%d - processed'%(t, face)) print('mbal terms computed for face=%d'%(face)) # - cluster.close() # --- # # ## check result #t = terms[0] #face = 1 for face in range(13): t = terms[0] #for t in terms: ds = xr.open_zarr(work_data_dir+'mbal/%s_f%02d.zarr'%(t,face)) print(np.abs(ds[t]).mean().values) #print(ds) print('face = %d, term = %s'%(face,t)) face=1 t = terms[0] ds = xr.open_zarr(work_data_dir+'mbal/%s_f%02d.zarr'%(t,face)) ds ds[t].isel(time=0).plot() # --- w = cluster.scale_up(30) client.restart() cluster.close() f.transform(lambda x:np.sqrt(x)) # # Agrupar datos dic = { 'tienda':[1,2,1,2], 'sabor':['chocolate','vainilla','fresa','chocolate'], 'ventas':[26,12,18,22] } df = pd.DataFrame(dic) df # Agrupar por tienda portienda = df.groupby('tienda') portienda # Promedio por tienda portienda.mean() # Total por tienda portienda.sum() portienda.sum().loc[1] portienda.sum().loc[2] portienda.describe() # # Concatenar datos # + df1 = pd.DataFrame({ 'A':[1,2,3], 'B':[4,5,6] }, index=[1,2,3] ) df2 = pd.DataFrame({ 'A':[7,8,9], 'B':[10,11,12] }, index=[4,5,6] ) pd.concat([df1,df2]) # + df1 = pd.DataFrame({ 'A':[1,2,3], 'B':[4,5,6] }, index=[1,2,3] ) df2 = pd.DataFrame({ 'A':[7,8,9], 'B':[10,11,12] }, index=[1,2,3] ) pd.concat([df1,df2]) # - # # Estadísticas zoo = pd.read_csv('zoo.csv',delimiter=',') zoo # Conteo de elementos según su categoría zoo.count() # Suma de los elementos según su categoría zoo.sum() # Suma evitando los Nan zoo.sum(skipna=True) zoo['uniq_id'] # Suma de los 'uniq_id' zoo['uniq_id'].sum() # Suma acumulada zoo['uniq_id'].cumsum() # Media aritmética zoo['uniq_id'].mean() # Desviación estándar zoo['uniq_id'].std() # Varianza zoo['uniq_id'].var() # Mínimo zoo['uniq_id'].min() # Mínimo acumulado zoo['uniq_id'].cummin() # Máximo zoo['uniq_id'].max() # Máximo acumulado zoo['uniq_id'].cummax() # Error estándar zoo['uniq_id'].sem() # Reporte general del DataFrame zoo.describe() ser = pd.Series([1]*5 + [2]*3 + [5]*9) ser ser.value_counts() # # Iteradores ser = pd.Series(range(5),list('abcde')) ser for s in ser: print(s) arr = np.random.randint(10,50,(3,3)) df = pd.DataFrame(arr, list('ABC'), list('XYZ')) df for a,b in df.items(): print(a) print(b) for a,b in df.iterrows(): print(a) print(b) # # Ordenamiento df df.sort_index(ascending=False) df.sort_values(by='Y') # # Alineamiento, Reindexado y renombrar etiquetas ser = pd.Series(range(5),list('abcde')) ser parte1 = ser[:3] parte1 parte2 = ser[1:] parte2 parte1.align(parte2) parte1.align(parte2,join='left') parte1.align(parte2,join='right') parte1.align(parte2,join='inner') arr = np.random.randint(10,50,(3,3)) df = pd.DataFrame(arr, list('ABC'), list('XYZ')) df df.reindex(list('BC')) # Para cambiar de nombre a las columnas df.rename(columns={'X':'escolares','Y':'universitarios','Z':'profesionales'}) # # Multiples índices dias = ['dia 1']*3 + ['dia 2']*3 dias comidas = list(range(1,4))*2 comidas indices = pd.MultiIndex.from_tuples(list(zip(dias,comidas))) indices arr = np.random.randint(100,300,(6,2)) df = pd.DataFrame(arr,indices,['M','F']) df df.loc['dia 2'] df.loc['dia 2'].loc[3] df.loc['dia 2'].loc[3]['F'] df.index.names = ['dia','comida'] df df.xs('dia 2') df.xs(1,level='comida') df.xs(2,level='comida') # # Cuando faltan datos l1 = list(range(1,3)) + [np.nan] l2 = [4] + [np.nan]*2 l3 = list(range(7,10)) df = pd.DataFrame({'A':l1,'B':l2,'C':l3}) df # Sacar todas las filas que contengan NaN df.dropna() # Sacar todas las columnas que contengan NaN df.dropna(axis=1) # Reemplazar todos los Nan con 0.0 df.fillna(value=0) # Reemplazar todos los Nan con 1.0 df.fillna(value=1)
6,512
/Pandas String and Regular Expression/Check whether alpha numeric values present in a given column of a DataFrame.ipynb
2537ea7046034193f011ac282aa6ed0f8baeda9b
[]
no_license
surajjain85/Python
https://github.com/surajjain85/Python
1
0
null
null
null
null
Jupyter Notebook
false
false
.py
6,405
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 作業 : (Kaggle)房價預測 # # [作業目標] # - 試著模仿範例寫法, 在房價預測中, 觀察去除離群值的影響 # # [作業重點] # - 觀察將極端值以上下限值取代, 對於分布與迴歸分數的影響 (In[5], Out[5]) # - 觀察將極端值資料直接刪除, 對於分布與迴歸分數的影響 (In[6], Out[6]) # + # 做完特徵工程前的所有準備 (與前範例相同) import pandas as pd import numpy as np from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import cross_val_score from sklearn.linear_model import LinearRegression data_path = '../data/Part02/' df_train = pd.read_csv(data_path + 'house_train.csv.gz') train_Y = np.log1p(df_train['SalePrice']) df = df_train.drop(['Id', 'SalePrice'] , axis=1) df.head() # - #只取 int64, float64 兩種數值型欄位, 存於 num_features 中 num_features = [] for dtype, feature in zip(df.dtypes, df.columns): if dtype == 'float64' or dtype == 'int64': num_features.append(feature) print(f'{len(num_features)} Numeric Features : {num_features}\n') # 削減文字型欄位, 只剩數值型欄位 df = df[num_features] df = df.fillna(-1) MMEncoder = MinMaxScaler() train_num = train_Y.shape[0] df.head() # # 作業1 # * 試著限制 '1樓地板面積(平方英尺)' (1stFlrSF) 欄位的上下限, 看看能否再進一步提高分數? df['1stFlrSF'][:train_num].describe() # + # 顯示 1stFlrSF 與目標值的散佈圖 import seaborn as sns import matplotlib.pyplot as plt sns.regplot(x = df['1stFlrSF'][:train_num], y=train_Y) plt.show() # 做線性迴歸, 觀察分數 train_X = MMEncoder.fit_transform(df) estimator = LinearRegression() cross_val_score(estimator, train_X, train_Y, cv=5).mean() # + # 將 1stFlrSF 限制在你覺得適合的範圍內, 調整離群值 df['1stFlrSF'][:train_num] = df['1stFlrSF'][:train_num].clip(400,3000) sns.regplot(x = df['1stFlrSF'][:train_num], y=train_Y) plt.show() # 做線性迴歸, 觀察分數 train_X = MMEncoder.fit_transform(df) estimator = LinearRegression() cross_val_score(estimator, train_X, train_Y, cv=5).mean() # - # # 作業2 # * 續前題, 去除離群值有兩類方式 : 捨棄離群值(刪除離群的資料) 以及調整離群值, # 請試著用同樣的上下限, 改為 '捨棄離群值' 的方法, 看看結果會變好還是變差? 並試著解釋原因。 # + # 將 1stFlrSF 限制在你覺得適合的範圍內, 捨棄離群值 keep_index = (df['1stFlrSF'][:train_num]>400) & (df['1stFlrSF'][:train_num]<3000) df = df[keep_index] train_Y = train_Y[keep_index] sns.regplot(x = df['1stFlrSF'][:train_num], y=train_Y) plt.show() # 做線性迴歸, 觀察分數 train_X = MMEncoder.fit_transform(df) estimator = LinearRegression() cross_val_score(estimator, train_X, train_Y, cv=5).mean()
2,447
/How to Win a Data Science Competition: Learn from Top Kagglers/week01_MLExplainability-Exercise_ Advanced Uses of SHAP Values-Mindy.ipynb
e4c6bd1655d0295e3bf677e3ae8cb83f9a38fd7c
[]
no_license
renjmindy/advanced-machine-learning-coursera
https://github.com/renjmindy/advanced-machine-learning-coursera
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
9,862
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **[Machine Learning Explainability Home Page](https://www.kaggle.com/learn/machine-learning-explainability)** # # --- # # ## Set Up # # # We have again provided code to do the basic loading, review and model-building. Run the cell below to set everything up: # + import numpy as np import pandas as pd from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split import shap # Environment Set-Up for feedback system. from learntools.core import binder binder.bind(globals()) from learntools.ml_explainability.ex5 import * print("Setup Complete") import pandas as pd from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split data = pd.read_csv('../input/hospital-readmissions/train.csv') y = data.readmitted base_features = ['number_inpatient', 'num_medications', 'number_diagnoses', 'num_lab_procedures', 'num_procedures', 'time_in_hospital', 'number_outpatient', 'number_emergency', 'gender_Female', 'payer_code_?', 'medical_specialty_?', 'diag_1_428', 'diag_1_414', 'diabetesMed_Yes', 'A1Cresult_None'] # Some versions of shap package error when mixing bools and numerics X = data[base_features].astype(float) train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1) # For speed, we will calculate shap values on smaller subset of the validation data small_val_X = val_X.iloc[:150] my_model = RandomForestClassifier(n_estimators=30, random_state=1).fit(train_X, train_y) # - data.describe() # The first few questions require examining the distribution of effects for each feature, rather than just an average effect for each feature. Run the following cell for a summary plot of the shap_values for readmission. It will take about 20 seconds to run. # + explainer = shap.TreeExplainer(my_model) shap_values = explainer.shap_values(small_val_X) shap.summary_plot(shap_values[1], small_val_X) # - # ## Question 1 # # Which of the following features has a bigger range of effects on predictions (i.e. larger difference between most positive and most negative effect) # - `diag_1_428` or # - `payer_code_?` # + # set following variable to 'diag_1_428' or 'payer_code_?' feature_with_bigger_range_of_effects = 'diag_1_428' # Check your answer q_1.check() # - # Uncomment the line below to see the solution and explanation. q_1.solution() # ## Question 2 # # Do you believe the range of effects sizes (distance between smallest effect and largest effect) is a good indication of which feature will have a higher permutation importance? Why or why not? # # If the **range of effect sizes** measures something different from **permutation importance**: which is a better answer for the question "Which of these two features does the model say is more important for us to understand when discussing readmission risks in the population?" # # Run the following line after you've decided your answer. # Check your answer (Run this code cell to receive credit!) q_2.solution() # ## Question 3 # # Both `diag_1_428` and `payer_code_?` are binary variables, taking values of 0 or 1. # # From the graph, which do you think would typically have a bigger impact on predicted readmission risk: # - Changing `diag_1_428` from 0 to 1 # - Changing `payer_code_?` from 0 to 1 # # To save you scrolling, we have included a cell below to plot the graph again (this one runs quickly). shap.summary_plot(shap_values[1], small_val_X) # + # Set following var to "diag_1_428" if changing it to 1 has bigger effect. Else set it to 'payer_code_?' bigger_effect_when_changed = 'diag_1_428' # Check your answer q_3.check() # - # For a solution and explanation, uncomment the line below. q_3.solution() # ## Question 4 # # Some features (like `number_inpatient`) have reasonably clear separation between the blue and pink dots. Other variables like `num_lab_procedures` have blue and pink dots jumbled together, even though the SHAP values (or impacts on prediction) aren't all 0. # # What do you think you learn from the fact that `num_lab_procedures` has blue and pink dots jumbled together? Once you have your answer, run the line below to verify your solution. # Check your answer (Run this code cell to receive credit!) q_4.solution() # ## Question 5 # # Consider the following SHAP contribution dependence plot. # # The x-axis shows `feature_of_interest` and the points are colored based on `other_feature`. # # ![Imgur](https://i.imgur.com/zFdHneM.png) # # Is there an interaction between `feature_of_interest` and `other_feature`? # If so, does `feature_of_interest` have a more positive impact on predictions when `other_feature` is high or when `other_feature` is low? # # Run the following code when you are ready for the answer. # Check your answer (Run this code cell to receive credit!) q_5.solution() # ## Question 6 # # Review the summary plot for the readmission data by running the following cell: shap.summary_plot(shap_values[1], small_val_X) # Both **num_medications** and **num_lab_procedures** share that jumbling of pink and blue dots. # # Aside from `num_medications` having effects of greater magnitude (both more positive and more negative), it's hard to see a meaningful difference between how these two features affect readmission risk. Create the SHAP dependence contribution plots for each variable, and describe what you think is different between how these two variables affect predictions. # # As a reminder, here is the code you previously saw to create this type of plot. # # shap.dependence_plot(feature_of_interest, shap_values[1], val_X) # # And recall that your validation data is called `small_val_X`. # Your code here shap.dependence_plot('num_medications', shap_values[1], small_val_X) shap.dependence_plot('num_lab_procedures', shap_values[1], small_val_X, interaction_index="number_inpatient") # Then run the following line to compare your observations from this graph to the solution. # Check your answer (Run this code cell to receive credit!) q_6.solution() # ## Congratulations # # That's it! Machine learning models should not feel like black boxes any more, because you have the tools to inspect them and understand what they learn about the world. # # This is an excellent skill for debugging models, building trust, and learning insights to make better decisions. These techniques have revolutionized how I do data science, and I hope they do the same for you. # # Real data science involves an element of exploration. I hope you find an interesting dataset to try these techniques on (Kaggle has a lot of [free datasets](https://www.kaggle.com/datasets) to try out). If you learn something interesting about the world, share your work [in this forum](https://www.kaggle.com/learn-forum/66354). I'm excited to see what you do with your new skills. # --- # **[Machine Learning Explainability Home Page](https://www.kaggle.com/learn/machine-learning-explainability)** # # # # # # *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum) to chat with other Learners.*
7,412
/Copy_of_immutableDataStructures.ipynb
124778ebb382b843ddfdfcbec4227289356b5446
[]
no_license
dbirmajer/CS50
https://github.com/dbirmajer/CS50
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
17,010
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/dbirmajer/CS50/blob/master/Copy_of_immutableDataStructures.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="GjAmdA77Z0Bt" colab_type="text" # **Section 1: Immutable Data Structures** # + id="M1-N7ZS1Ziws" colab_type="code" colab={} from functools import reduce import collections import itertools # **Section 1: Immutable Data Structures** import collections from pprint import pprint Scientist = collections.namedtuple('Scientist', [ 'name', 'field', 'born', 'nobel', ]) scientists = ( Scientist(name='Ada Lovelace', field='math', born=1815, nobel=False), Scientist(name='Emmy Noether', field='math', born=1882, nobel=False), Scientist(name='Marie Curie', field='physics', born=1867, nobel=True), Scientist(name='Tu Youyou', field='chemistry', born=1930, nobel=True), Scientist(name='Ada Yonath', field='chemistry', born=1939, nobel=True), Scientist(name='Vera Rubin', field='astronomy', born=1928, nobel=False), Scientist(name='Sally Ride', field='physics', born=1951, nobel=False), ) # + id="yf55MssFbMcJ" colab_type="code" colab={} pprint(scientists) # + [markdown] id="x0tVn8yxbUhM" colab_type="text" # **Section 2: The filter()** # # + id="s4FsIK_eagGv" colab_type="code" outputId="5d844247-c63a-450a-f8a7-f0b8c7342f37" colab={"base_uri": "https://localhost:8080/", "height": 102} fs = filter(lambda x: x.nobel, scientists) pprint(tuple(fs)) print() pprint(tuple(filter(lambda x: x.nobel and x.field == 'physics' , scientists))) # + id="5uTUykt6g96u" colab_type="code" colab={} def nobel_filter(x): return x.nobel # + id="8Gf3fgi7gwr-" colab_type="code" outputId="cc176011-3481-40c8-8e13-9dffab655760" colab={"base_uri": "https://localhost:8080/", "height": 68} pprint(tuple(filter(nobel_filter, scientists))) # + id="GXmc1hHbgoNm" colab_type="code" outputId="bd2d1f8a-5103-4c45-e3fe-0fddd889ff70" colab={"base_uri": "https://localhost:8080/", "height": 68} pprint(tuple(x for x in scientists if x.nobel)) # + [markdown] id="3y2DhPEyhcKx" colab_type="text" # **Section 3: The map() Function** # + id="fH1KXG6zhqnE" colab_type="code" colab={} names_and_ages = tuple(map(lambda x: {'name': x.name, 'age': 2017 - x.born}, scientists)) pprint(names_and_ages) pprint(tuple({'name': x.name, 'age': 2017 - x.born} for x in scientists)) pprint(tuple({'name': x.name.upper(), 'age': 2017 - x.born} for x in scientists)) # + id="_Tp_CUALh32Q" colab_type="code" colab={} # + [markdown] id="zQEpW0Pwh6OT" colab_type="text" # **Section 4: The reduce() Function** # # + id="C1o_7gTMh8_O" colab_type="code" colab={} # + [markdown] id="gCUzrWhjiN8a" colab_type="text" # # Section 4: The reduce() Function # + id="7dqfgUYoiTrx" colab_type="code" colab={} total_age = reduce(lambda accum, x: accum + x['age'], names_and_ages,0) print(total_age) print(sum(x['age'] for x in names_and_ages)) def reducer(accum, val): accum[val.field].append(val.name) return accum pprint(reduce(reducer, scientists, collections.defaultdict(list))) scientists_by_field = { item[0]: list(item[1]) for item in itertools.groupby(scientists, lambda x: x.field) } print(list(itertools.groupby(scientists, lambda x: x.field))) pprint(scientists_by_field) # + [markdown] id="64JOqlHQig6b" colab_type="text" # # Section 5: Parallel Processing With multiprocessing # + id="CScLlKIkikc9" colab_type="code" colab={} import time import multiprocessing import os def transform(x): print(f'Process {os.getpid()} working record {x.name}') #print(f'Processing record {x.name}') time.sleep(1) #sleeps one second result = {'name': x.name, 'age': 2017 - x.born} #print(f'Done Processing record {x.name}') print(f'Process {os.getpid()} done processing record {x.name}') return result # + id="yjT3RGQoi6s4" colab_type="code" outputId="01242c9d-8a53-4e6c-ee5e-b2a244f1bf8b" colab={"base_uri": "https://localhost:8080/", "height": 408} start = time.time() result = tuple(map(transform, scientists)) # sequential version end = time.time() print(f'Time to complete in sequential version: {end - start:.2f}s\n') pprint(result) # + id="QI6QMkXzi45j" colab_type="code" outputId="122a8d00-207a-4051-ff6b-0d5953ff6b43" colab={"base_uri": "https://localhost:8080/", "height": 289} pool = multiprocessing.Pool() start = time.time() result = pool.map(transform, scientists) # parallel version end = time.time() print(f'Time to complete in parallel version: {end - start:.2f}s\n') # + id="UeaY7TGdjJ2N" colab_type="code" outputId="bf9e06b7-d858-49c3-93aa-b780b203ea24" colab={"base_uri": "https://localhost:8080/", "height": 289} pool = multiprocessing.Pool(processes=len(scientists)) start = time.time() result = pool.map(transform, scientists) # parallel version end = time.time() print(f'Time to complete in parallel version: {end - start:.2f}s\n') # + id="34UlAFKEjUqt" colab_type="code" outputId="6a45cd89-bb62-4676-d93d-baa2f672e46a" colab={"base_uri": "https://localhost:8080/", "height": 289} pool = multiprocessing.Pool(processes=2, maxtasksperchild=1) start = time.time() result = pool.map(transform, scientists) # parallel version end = time.time() print(f'Time to complete in parallel version: {end - start:.2f}s\n') # + id="WcVYwg70kVdW" colab_type="code" colab={} from google.colab import drive drive.mount('/content/drive') # + id="Mtf5QENAjhP_" colab_type="code" colab={}
5,813
/1_BasicPython_June2019/Entrega_2/.ipynb_checkpoints/Ejercicios_Entrega_2-checkpoint.ipynb
aaef74212a94dc115bd91d41b481d7f480bfa98a
[]
no_license
sthhher/DUAL_GitHub
https://github.com/sthhher/DUAL_GitHub
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
16,882
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Programación para la Bioinformática # ## Módulo 2: Breve introducción a la programación en Python # Ejercicios y preguntas teóricas # ------------------------------- # # A continuación, encontraréis la parte que tenéis que completar en este modulo y las preguntas teóricas a contestar. # # Podéis encontrar en la documentación oficial de *string* funciones que os puedan ser de ayuda para completar los ejercicios: https://docs.python.org/2/library/string.html # ### Pregunta 1 # ¿Cuál es la diferencia entre definir un string con comillas simples (' '), comillas dobles(" ") y tres comillas dobles (""" """)? # # **Pon un ejemplo de cada uno de ellos**. # variable1="this is a string with 'quotes'" # variable2='this is a string with "quotes"' # variable3="""this is a string with "quotes" and more 'quotes'""" # # **Respuesta:** # Las tres comillas, a nivel técnico, hacen lo mismo a excepción de algunos carácteres de escape. Siguiendo la guia de la comunidad unas tienen unos usos concretos y otras otros. # Dentro de las tres comillas dobles podemos poner comillas simples y dobles. En cambio, dentro de las simples sólo podemos poner dobles y, viceversa. # # Haz doble click sobre este texto, borralo, escribe aquí tu respuesta y pulsa el botón "Run" de la barra superior. # ### Pregunta 2 # ¿Qué expresión en Python (la más idiomática que encuentres) necesitamos para conseguir el string "ACTGACTGACTGACTGACTGACTGACTGACTGACTGACTGACTGACTG" utilizando solo la palabra "ACTG"? # # **Respuesta:** ### Respuesta incorrecta ### print("ACTG" + "ACTG" + "ACTG" + "ACTG") ### Respuesta incorrecta ### for _ in range(10): print("ACTG") # Tu respuesta dna = "ACTG" print(dna*10) # ### Pregunta 3 # # **¿Cuál es el valor final de a, b y c (atención a los tipos de número decimal y entero)?** # # a = 3*2 # # b = 9/2. # # c, a = a+1, b*2 # # b = a // 5 # # ** Nota: ** Tened en cuenta el tipo de variable (int, float, bool, str) y su comportamiento en cada momento. # Tu respuesta: # # # #a=9.0 # # #b=1.0 # # #c=7 # # # Primero se crea 'A' y se le asigna el valor de 3*2 (6), seguidamente 'B' es creado con el valor resultado de 9/2 (4.5), en este momento, 'B' cambia su clase de variable de Integer a Float ya que el resultado de la división tiene decimales. # # Después de estas operaciones iniciales, se ejecuta la tercera operacion, en la cual se crea 'C' y se le asigna el valor de A + 1 (7) a la vez que a A se le asigna B * 2 (9.0, ya que es un float). Aparentemente se ejecuta primero la operacion de C y luego la de A. # # Por último, se asigna a B el valor de A//5 (1.0), esta operacion divide y redondea el número, sin embargo al haber sido dividido anteriormente B sigue siendo una variable float por lo que conserva el decimal. # Queremos saber cual es el valor final y porqué. Puedes ejecutar el código en una nueva celda para comprobar que tu respuesta es correcta. # # **No son lo mismo: 9 y 9.0** # # **Son lo mismo: 9. y 9.0** # ### Pregunta 4 # # **¿Cuáles de las siguientes expresiones son sintácticamente incorrectas? Explica el porqué una por una** # * planetas = 'Saturno', 'Jupiter', 'Tierra' # * t = ('123', 'abc', 0, ) # * dias = ['Lunes', 'Martes', "Miercoles", ''.join(['J','u','e','v','e','s'])] # * matriz = [[],[],[],[],()] # * t = r = s = u = 0 + 1 # * a = 1; a += 1; a += [] # # **Respuesta:** # # planetas = es correcta, se está creando una tupla. Si intentamos añadir algún valor, no podremos. # # t = Funciona pero es incorrecto dejar una coma sin un valor detrás. # # dias = Está concatenando a un string vacío las letras de Jueves individualmente, funciona perfectamente aunque sería preferible que todos los datos de la lista utilicen el mismo tipo de comillas. # # matriz = Es correcto, es una matriz con listas en su interior y una tupla. # # t = Es correcto pero muy redundante. Las variables se igualan a los valores en su derecha y ya que t es igual a r que es igual a s y etc, acaba teniendo el resultado de 0 + 1. # # a = No se puede sumar a 'a' el valor de [] ya que no hay ningún valor, por lo demás, se pueden utilizar los ; libremente ya que son opcionales en Python y marcan el final de línea aunque esté todo en la misma línea. # # ### Pregunta 5 # # Explica a qué se refieren con el término "batteries included" en la comunidad Python. **Poned 5 ejemplos de paquetes de la librería estándar y una explicación de cada uno de ellos.** # # **Respuesta:** # Tu respuesta: # Se refiere a que el lenguaje en sí tiene muchas librerías incluidas muy útiles por lo que se puede comenzar a hacer trabajos con Python sin la necesidad de instalar paquetes extra. # # Ejemplos: # # ### Pregunta 6 # # Python dispone de un idiom muy útil conocido como list comprehensions. ¿Puedes explicar en qué consiste? Indicad # **dos ejemplos** de código que utilicen este idiom y documentad cada parte de código relevante. # # **Respuesta:** # Tu respuesta # Se utiliza para crear nuevas listas a partir de otros iteradores. # # # + #Ejemplo 1: numeros = [1,2,3,4] num_al_cuadrado = [] for n in numeros: num_al_cuadrado.append(n*n) print(num_al_cuadrado) # - # ### Ejercicio 1 # # Escribe un programa que asigne dos valores enteros cualquiera (escoged vosotros un número entero aleatorio, no es necesario utilizar ninguna librería externa) a dos variables con nombre t y w, calcule el producto de t por dos veces w y finalmente muestre el valor del cuadrado de la operación anterior: # # f(t, w) = (t \* 2 \* w)<sup>2</sup> # # **Nota:** No es necesario utilizar ninguna libreria, ni importar ningún módulo para escoger el numero aleatorio ni para resolver este ejercicio. # + # Tu respuesta import random; #Importo random para tener un resultado más variado. t = random.randint(0,10); w = random.randint(0,10); f = None; f=(t*2*w)**2; print("(%d*2*%d)^2 = %d" % (t,w,f)); # - # ### Ejercicio 2 # # Escribe un programa que defina una lista cuyos valores sean los nombres de los meses del año. Ordénalos por orden lexicográfico: # + # Tu respuesta meses = ["Enero", "Febrero", "Marzo", "Abril", "Mayo", "Junio", "Julio", "Agosto", "Septiembre", "Octubre", "Noviembre", "Diciembre" ] print(sorted(meses)) # - # ### Ejercicio 3 # # Escribe un programa que calcule e imprima el volumen de una esfera de radio 23.5: # + # Podéis utilizar la librería math para conseguir el valor de PI: import math print(math.pi) # + # Tu respuesta import math radio = 23.5 volumen = (4/3)*math.pi*(radio**3) print (volumen) # - # ### Ejercicio 4 # # Escribe un programa donde se defina un diccionario cuyas claves sean los nombres de los continentes y el valor una lista con animales endémicos de ese continente. Por ejemplo: # # ``` # animales -> Europa: (Lince ibérico, Bison Europeo, Vaca Suiza), # África: (León, Elefante Africano, Avestruz), # ... # ``` # # Un diccionario es una estructura de datos de Python, por favor repasa la teoria si no lo recuerdas. # + # Tu respuesta diccionario_codes = {'Europa':['Lince ibérico', 'Bison Europeo', 'Vaca Suiza'], 'Africa': ['León', 'Elefante Africano', 'Avestruz']} print(diccionario_codes) # - # ### Ejercicio 5 # # Completa el código Python para imprimir la Fuerza que se ha ejercido: # # velocidad_inicial -> 0 m/s<sup>2</sup> # # espacio -> 10 m # # tiempo -> 10 s # # masa -> 5 kg # # **velocidad_final = espacio / tiempo** # # **aceleracion = (velocidad_final - velocidad_inicial) / tiempo** # # **Fuerza = masa * aceleracion** # + # Completa el código necesario: vi = 0.0 s = 10.0 t = 10.0 m = 5.0 vf = s/t a = (vf-vi)/t f = m*a print('Fuerza ejercida: ' + str(f) +' Newtons') # - # ### Ejercicio 6 # # Escribe un programa que imprima la versión del intérprete de Python que se está ejecutando: # Tu respuesta import sys print(sys.version) # + ### No es correcto, hay que utilizar la librería estándar de Python: print('Versión de Python es 3.6') # - # ### Ejercicio 7 # # Escribe un programa que imprima la fecha y la hora actuales: # Tu respuesta: import datetime print(datetime.datetime.now()) # ### Ejercicio 8 # # Escribe un programa que calcule el número de días entre estas dos fechas: # # 05/10/1984 # # 01/03/2019 # # + # Podemos crear una fecha de la siguiente forma: from datetime import date # El formato es: Año, mes, día: d = date(2018, 10, 21) print(d) # + # Tu respuesta # Hay que utilizar la librería datetime y el módulo date: from datetime import date # + from datetime import date d1 = date(2018, 10, 21) print("Fecha 1:", d1) d2 = date(2018, 8, 10) print("Fecha 2:", d2) diferencia = d1-d2 print ("Dias de diferencia:", diferencia.days) # -
8,942
/project3/minlog.ipynb
b3a5399f1c0af1f7d4373c1cfb91de0c15a43dce
[]
no_license
gsanou/symbolic-machine-learning-hw
https://github.com/gsanou/symbolic-machine-learning-hw
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
7,637
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %env JAVA_HOME=/Library/Java/JavaVirtualMachines/jdk1.8.0_112.jdk/Contents/Home # Let's create a clause and see what is inside from logic import * from dataset import InterpretationDataset # + clause = Clause.parse("!human(socrates),!sibling(socrates,Y),!human(sonOf(Y)),mortal(Y)") print("clause {}".format(clause)) for literal in clause: print("{}ground literal {} contains following arguments".format("" if literal.isGround() else "non-", literal)) for term, idx in zip(literal, range(0, len(literal))): print("\t{}-th argument:\t{}".format(idx, term)) # - # Let's see how to go throught a datset of interpretations. data = InterpretationDataset(os.path.sep.join(['.','data','lectureGraphs'])) for sample in data: print(sample) # Each sample, i.e. observation, consists of a class, e.g. pos/neg, and a set of atoms which holds. Their are easily accesible. sample = tuple(data)[0] print('class of this sample is\t{}'.format("pos" if sample.positiveClass else "neg")) print('the sample contains following facts') for atom in sample: print("\t{}".format(atom)) # Keep in mind that two expressions even parsed by the same process are not the same instance, thus 'is' comparator does not return what is probably expected from the first sight. Therefore, it is advised to use "==" comparator. See following examples and compare the output to what you expect. # + c1 = Clause.parse("p(X,a)") c2 = Clause.parse("p(X,a)") print(c1.literals[0] is c2.literals[0]) print(c1 is c2) print(c1 == c2) for t1, t2 in zip(c1.literals[0].atom.terms, c2.literals[0].atom.terms): print("{}\tvs\t{}".format(t1, t2)) print("\t{}".format(t1 is t2)) print("\t{}".format(t1 == t2)) # - # Despite the fact that two clauses are logically equal, they are not equal by the "==" operator. The operator is implemented by lexical comparison only. c3 = Clause.parse("p(X,Y)") c4 = Clause.parse("p(Z,W)") print(c3 == c4) # The easiest way to apply a substitution to a clause is to create a substitution, i.e. a dictionary of terms indexed by variables. c5 = Clause.parse("siblings(X,Y,Z,W)") x = Variable("X") y = Variable("Y") z = Variable("Z") issac = Constant("issac") kain = Constant("kain") substitution = {x : issac, y : kain, z : x} substitutedClause = c5.substitute(substitution) print(substitutedClause) # Note here one important thing. The substituted clause should look, by the definition of substitution in FOL, as sibling(issac, kain, issac, W). However, this library supports, and also expects, flat substitutions only. Therefore, no chaning is applied during the substitution process, e.g. the result is siblings(issac,kain,X,W) since no chaning {Z -> X, X -> issac} is done. If you want such behavior, then flatten the substitution first, e.g. {Z -> issac, X -> issac}. However, in our tutorials, we do not need such functionality.
3,140
/.ipynb_checkpoints/WeatherPy-checkpoint.ipynb
cd77a8dbe5b6d3ac86457c4cc55efa88358a7b55
[]
no_license
mrderekbutler/api-challenge
https://github.com/mrderekbutler/api-challenge
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
721,477
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/fernassouza/Data-Science/blob/master/Como_Avaliar_um_Modelo_de_Machine_Learning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="nWuxMG_HQUVY" colab_type="text" # <img alt="Colaboratory logo" width="15%" src="https://raw.githubusercontent.com/carlosfab/escola-data-science/master/img/novo_logo_bg_claro.png"> # # #### **Data Science na Prática 2.0** # *by [sigmoidal.ai](https://sigmoidal.ai)* # # --- # + [markdown] id="B3tMDtOf-XR-" colab_type="text" # # Como avaliar um modelo de *Machine Learning* # # Beleza, você construiu seu modelo de *Machine Learning*, após várias horas fazendo uma análise exploratória e limpando os dados, treinou ele em cima do seu conjunto de dados e obteve os coeficientes "ideais". Qual o próximo passo? # # > "Você não controla aquilo que não consegue mensurar" # # Trabalhei durante 1 ano no Centro de Operações Aeroespaciais (COMAE), analisando dados da cadeia de Comando e Controle (C2) da Força Aérea, e essa foi uma das frases que eu mais escutei por lá. # # <img src="http://sigmoidal.ai/wp-content/uploads/2019/10/pngtree-measuring-accuracy-measure-small-tiny-flat-color-icon-vect-png-image_1619121.jpg" align=left width="200px"> # # Na verdade, medir resultados e monitorar indicadores não é algo exclusivo de sistemas militares, mas é uma tarefa obrigatória em qualquer atividade, ainda mais em modelos de Inteligência Artificial. # # Se você não sabe como o seu algoritmo está performando, pode acabar compromentendo o faturamento de uma empresa ou até mesmo a imagem dela. # # Existem inúmeras metodologias, métricas e pesquisas sobre esta etapa de avaliação. A cada dia, a fronteira da ciência avança um passo. O que era o *estado da arte* ontem, provavelmente não mais será em um mês. # # Quero trazer aqui uma visão geral daquelas métricas mais conhecidas e usadas pela comunidade. # # + [markdown] id="J0t_VR8iAuZl" colab_type="text" # ## Avaliando Modelos de Regressão # # Modelos de Regressão buscam prever um valor numérico contínuo. Por exemplo, qual o preço de venda de um imóvel ou qual o faturamento esperado para o próximo mês. # # Quando estamos usando o *dataset* de teste, fazemos a previsão $\hat{y}_i$ para a $i\text{-ésima}$ linha, uma vez que também temos o valor real $y_i$, podemos calcular esse erro de diferentes formas. # # <center><img width="50%" src="https://raw.githubusercontent.com/carlosfab/dsnp2/master/img/regressao_loss.png"></center> # # Na imagem acima, considere que a reta representa o seu modelo de regressão, ou seja, a própria previsão. Cada seta (vetor) representa a diferença entre o valor real e a previsão. # # Em outras palavras, cada seta representa o quanto você errou naquela previsão específica. Se você medir todas as distâncias entre os pontos e a reta (que é o seu modelo), você vai ter o seu erro, ou ***loss***. # # No exemplo dessa figura, fica bem claro que o modelo do plot da esquerda "erra" bem mais que o plot do modelo da direita. Ou seja, o modelo da direita é melhor. # # Na verdade, existem diversas métricas que podem ser utilizadas para verificar o desempenho do seu modelo. Vamos dar uma olhadas nas principais. # # ### *Mean Absolut Error* - MAE # # É o mais simples de todos, onde se calcula a média da magnitude do erro para todos os pontos onde realizamos previsões, sem considerar a direção. # # $$MAE = \frac{1}{m}\sum_{i=1}^{n}|y_i - \hat{y}_i|$$ # # É a mais intuitiva de todas as métricas, pois olhamos a diferença absoluta entre a previsão e o real. # # O problema com o MAE é que todos os erros individuais são tratados da mesma maneira, o que pode ser um problema para lidar com outliers. # # ### *Mean Squared Error* - MSE # # O MSE calcula a média dos erros elevados à segunda potência. # # $$MSE = \frac{1}{m}\sum_{i=1}^{m}(y_i - \hat{y}_i)^2$$ # # A vantagem é que os erros mais grotescos serão mais penalizados que aqueles menores, o que implica que diferenças menores terão menos impacto na avaliação. # # Pode haver uma dificuldade na sua interpretação, uma vez que a unidade de medida será alterara. # # ### *Root Mean Squared Error* - RMSE # # Calcular a raiz quadrada da média dos quadrados dos erros faz com que a RMSE penalize os maiores erros, sendo mais sensível aos outliers. # # $$RMSE = \sqrt{\frac{1}{m}\sum_{i=1}^{m}(y_i - \hat{y}_i)^2}$$ # # Note que as diferenças são elevadas ao quadrado antes que sua média seja calculada. # # Essa métrica é muito desejada em situações onde desejamos que grandes erros sejam evitados ao máximo. # # ### *R-Squared* - $R^2$ # # Apesar do R-Squared não se tratar de *erros*, propriamente, é bastante popular para verificar a acurácia de modelos de regressão. # # $$R^2 = 1 - \frac{\sum_{j=1}^{m}(y_i - \hat{y}_i)^2}{\sum_{j=1}^{m}(y_i - \bar{y}_i)^2}$$ # # Você pode entender a métrica como sendo "quão bom o seu modelo está próximo dos valores reais", e ele faz isso calculando as variâncias em relação à media e à reta. # # + [markdown] id="CrZrhAq3Fjpp" colab_type="text" # ## Avaliando Modelos de Classificação # # Como se avalia um modelo de classificação? # # Seja trabalhando com classificação binária ou com diversas classes, as métricas relacionadas a modelos de classificação não podem ser os mesmos que usamos em modelos de regressão. # # # ### Acurácia # # Definitivamente, é a métrica mais intuitiva e fácil para se entender. A acurácia mostra diretamente a porcentagem de acertos do nosso modelo. # # $$ # \text{Acurácia} = \frac{\text{Número de previsões corretas}}{\text{Número total de previsões}} # $$ # # Apesar de ser muito direta e intuitiva, a acurácia não é uma boa métrica quando você lida, por exemplo, com dados desbalanceados. # # Em um *dataset* de fraudes bancárias, o número de eventos classificados como fraude pode ser inferior a 1%. Ou seja, a acurácia do modelo será superior para os casos de não-fraude e não retratará bem a natureza do nosso problema. # # Nesse exemplo de fraude bancária, o banco irá preferir uma acurácia máxima na detecção de casos confirmados de fraudes, mesmo que tenha diminuir a acurácia global - e ter um número maior de alarmes falsos. # # Veja um exemplo do curso Machine Learning Crash Course do Google. Nele, pode-se ver com que olhar simplesmente a acurácia pode levar a conclusões erradas. Abaixo, estamos olhando os resultados de um modelo para detecção de tumores malignos. # # <center><img alt="Colaboratory logo" width="50%" src="https://raw.githubusercontent.com/carlosfab/dsnp2/master/img/acuracia.png"></center> # # Uma acurácia de 91% pode levar a uma interpretação equivocada, pois o modelo conseguiu detectar corretamente apenas 1 dos 9 tumores malignos. Para casos como esses, de dados desbalanceados, o ideal é separar os erros em diferentes tipos, e não apenas usar uma métrica global de acurácia. # # **Tipos de erros** # # * **Verdadeiro positivo (*true positive* — TP):** Por exemplo, quando o paciente tem tumor maligno e o modelo classifica como tendo tumor maligno. # # * **Falso positivo (*false positive* — FP):** Por exemplo, quando o paciente não tem tumor maligno e o modelo classifica como tendo tumor maligno. # # * **Falso negativo (*true negative* — TN)**: Por exemplo, quando o paciente tem tumor maligno e o modelo classifica como não tendo tumor maligno. # # * **Verdadeiro negativo (*false negative* — FN):** Por exemplo, quando o paciente não tem tumor maligno e o modelo classifica como não tendo tumor maligno. # # Com esses conceitos, vamos dar uma olhada em duas outras métricas. # # ### Precision # # A precisão diz respeito à quantidade (proporcional) de identificações positivas feita corretamente, e é obtida pela equação # # $$ # \frac{TP}{TP+FP} # $$ # # Ela responde **"qual a proporção de identificações positivas que estava correta?"** Esse valor será máximo (1.0) quando não produzir falsos negativos. # # No exemplo do tumor maligno, a precisão seria 50%, o que significa que quando o modelo prevê um tumor maligno, está correto metado das vezes. # # ### *Recall* # # Mostra a proporção de positivos encontrados corretamente. Matematicamente, você calcular o *recall* da seguinte maneira: # # $$ # \frac{TP}{TP+FN} # $$ # # No caso do tumor maligno, o recall teria um valor igual a 11%, o que significa que o modelo é capaz de prever corretamente apenas 11% de todos os tumores malignos. # + [markdown] id="jlrR-rNWGuCY" colab_type="text" # ### *F1-score* # # É a média harmonica entre precisão e *recall*. O melhor valor possível para 0 *F1-score* é 1 e o pior é 0. É calculado por # # $$ # 2* \frac{precision*recall}{precision+recall} # $$ # + [markdown] id="QWkVEK1TNTpE" colab_type="text" # ### Matriz de Confusão # # Uma das principais maneiras de você verificar o desempenho do seu algoritmo é por meio da Matriz de Confusão. Para cada classe, ela informa quais os valores reais (*actual*) e os valores previstos pelo modelo (*predicted*). # # Dessa maneira, é possível rapidamente identificar: # # <p align=center><img src="http://sigmoidal.ai/wp-content/uploads/2019/10/Screen-Shot-2019-10-07-at-10.02.40.png" width="80%"></p> # # # Desse sumário da matriz de confusão, conseguimos extrair várias informações importantes. # + id="8WzzldUM7ONl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="830f8344-dd71-4b3b-dafb-d1f85ce38c45" # problema binário de classificação from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report import matplotlib.pyplot as plt import seaborn as sns y_true = [0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0] y_pred = [0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0] mat = confusion_matrix(y_true, y_pred) sns.heatmap(mat, square=True, annot=True, cbar=False) plt.xlabel('Previsão do modelo') plt.ylabel('Valor verdadeiro'); # + id="6NuzjiOUHxMK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="b3b9cdd7-5886-49c8-8bd3-31ab3abb5b82" # problema de classificação com múltiplas classes y_true = ["cat", "ant", "cat", "cat", "ant", "bird"] y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"] mat = confusion_matrix(y_true, y_pred) sns.heatmap(mat, square=True, annot=True, cbar=False) plt.xlabel('Previsão do modelo') plt.ylabel('Valor verdadeiro'); # + id="8Wnh5ed0NYTu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 248} outputId="ec08da95-82d0-4d81-ccba-72d18d6d4155" # exemplo do classification Report print(classification_report(y_true, y_pred)) # + id="k9Rm_XyWNrBj" colab_type="code" colab={} qrt import time from matplotlib import pyplot as plt # ## Environment Setup # + # constant WHITE = 0 YELLOW = 1 RED = 2 lanewidth = 0.4 linewidth_white = 0.04 linewidth_yellow = 0.02 # - # ## Generate Vote (Extra Credit 10% for generating multiple votes from all detected segments) # #### Setup a line segment # * left edge of white lane # * right edge of white lane # * left edge of yellow lane # * right edge of white lane # + # right edge of white lane #p1 = np.array([0.8, 0.24]) #p2 = np.array([0.4, 0.24]) p1 = np.array([lines_normalized[0][0],lines_normalized[0][1]]) p2 = np.array([lines_normalized[0][2],lines_normalized[0][3]]) seg_color = YELLOW # left edge of white lane #p1 = np.array([0.4, 0.2]) #p2 = np.array([0.8, 0.2]) #seg_color = WHITE #plt.plot([p1[0], p2[0]], [p1[1], p2[1]], 'ro') #plt.plot([p1[0], p2[0]], [p1[1], p2[1]]) #plt.ylabel('y') #plt.axis([0, 5, 0, 5]) #plt.show() # - # #### compute d_i, phi_i, l_i # + t_hat = (p2-p1)/np.linalg.norm(p2-p1) n_hat = np.array([-t_hat[1],t_hat[0]]) d1 = np.inner(n_hat,p1) d2 = np.inner(n_hat,p2) l1 = np.inner(t_hat,p1) l2 = np.inner(t_hat,p2) print (d1, d2, l1, l2) if (l1 < 0): l1 = -l1; if (l2 < 0): l2 = -l2; l_i = (l1+l2)/2 d_i = (d1+d2)/2 phi_i = np.arcsin(t_hat[1]) if seg_color == WHITE: # right lane is white if(p1[0] > p2[0]): # right edge of white lane d_i = d_i - linewidth_white print ('right edge of white lane') else: # left edge of white lane d_i = - d_i phi_i = -phi_i print ('left edge of white lane') d_i = d_i - lanewidth/2 elif seg_color == YELLOW: # left lane is yellow if (p2[0] > p1[0]): # left edge of yellow lane d_i = d_i - linewidth_yellow phi_i = -phi_i print ('right edge of yellow lane') else: # right edge of white lane d_i = -d_i print ('right edge of yellow lane') d_i = lanewidth/2 - d_i print (d_i, phi_i, l_i) # - # ### Measurement Likelihood # + # initialize measurement likelihood d_min = -0.7 d_max = 0.5 delta_d = 0.02 phi_min = -pi/2 phi_max = pi/2 delta_phi = 0.02 d, phi = np.mgrid[d_min:d_max:delta_d, phi_min:phi_max:delta_phi] measurement_likelihood = np.zeros(d.shape) fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(measurement_likelihood, interpolation='nearest') fig.colorbar(cax) plt.ylabel('phi') plt.xlabel('d') #ax.set_xticklabels(['']+alpha) #ax.set_yticklabels(['']+alpha) plt.show() # + i = floor((d_i - d_min)/delta_d) j = floor((phi_i - phi_min)/delta_phi) measurement_likelihood[i,j] = measurement_likelihood[i,j] + 1/(l_i) print (i, j) fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(measurement_likelihood, interpolation='nearest') fig.colorbar(cax) plt.ylabel('phi') plt.xlabel('d') #ax.set_xticklabels(['']+alpha) #ax.set_yticklabels(['']+alpha) plt.show() # - # ## Bayes' Filter (Extra Credit 10% for integrating Bayes' filter for multiple votes)
13,885
/Zygimantas/Analysis/.ipynb_checkpoints/2b-Analysis of data_take2 - 1 with scalling-checkpoint.ipynb
32aa4eb94d0b976e972a718c797ceb74559cbe4d
[]
no_license
nikmagini/TransferMonitoring
https://github.com/nikmagini/TransferMonitoring
0
2
null
2016-09-05T15:38:14
2016-08-05T14:42:16
Jupyter Notebook
Jupyter Notebook
false
false
.py
1,924,633
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # # Analysis of most promenant algorithm # # In this notebook I will document the search for best algrothm to analyse TransferMonitoring data # ## Loading and initial setup # + # %matplotlib inline # Here i keep prepared csv file for ML f = open('../data/output/json_hashed.csv') import numpy as np import pandas as pd from pandas import read_csv from IPython.display import display, HTML # in order to display of dataframe pd.set_option('display.max_columns', 60) pd.set_option('display.max_rows', 30) dataframe = read_csv(f).astype(np.float32) # to see struckture of data, uncoment # dataframe # save headers to seperate list original_headers = list(dataframe.columns.values) # to see list of headeers, uncoment print(len(original_headers)) display(original_headers) # - # # Preprocesing step # ## Check if data is missing any value dataframe.isnull().any() # -- uncoment to see data types # types = dataframe.dtypes # types try: # if true, dataframe contains infinite numbers(bad) print(np.all(np.isfinite(dataframe))) except: pass try: print(np.any(np.isnan(dataframe))) except: pass # Selecting importat features # --------- # # Since our datasets is quit big, we should reduce the values inorder to save resources and increase computational speed. # First we drop out unvanted columns that for sure will not tell anything important for us. # + dataframe = dataframe.drop(['tr_id','tr_timestamp_start','t_error_code'], axis=1) # a=list(dataframe.columns.values) # - # Now we pop out our target column, we will use it for later. # from dataframe pop target column and transform to ndarray target=dataframe.pop('timestamp_tr_dlt') # As an end result we will process this kind of matrix. prepared_headers = list(dataframe.columns.values) # dataframe itself from sklearn import preprocessing dataframe # More checking on dataframe: # corr = dataframe.corr().fillna(0).mul(100).astype(int) corr = dataframe.corr().fillna(0).mul(100).astype(int) corr # Lets see corelation matrix, ploted # ------ # + import matplotlib.pyplot as plt import seaborn as sns # corr = pd.DataFrame(X_noVariance).corr().fillna(0).mul(100).astype(int) cg = sns.clustermap(figsize=(25,25),data=corr, annot=True, fmt='d', cmap='Reds', linewidths=.5) plt.setp(cg.ax_heatmap.yaxis.get_majorticklabels(), rotation=0) plt sns.plt.show() # - # Checking for colums with no variance # ---- # + from sklearn.feature_selection import VarianceThreshold sel = VarianceThreshold() # X_noVariance = sel.fit_transform(dataframe) # get new array X_noVariance = sel.fit(dataframe) # get treshold with support support_list = sel.get_support() a= [i for i,x in enumerate(support_list) if x == 0] # atributes with 0 change for i in a: print(i,original_headers[i]) # - # ## Removing non variating values and correlation # Trying scaling on data # ------ # + matrix_to_scale = dataframe.as_matrix() # matrix_to_scale # + # scale_X = preprocessing.scale(matrix_to_scale) # # type(matrix_to_scale) # scale_X # scale between [0;1] min_max_scaler = preprocessing.MinMaxScaler() X_scalled = min_max_scaler.fit_transform(matrix_to_scale) X_scalled # - # ML learning phase # ----- # Now we are preparing to matrixes to for analysis by transforming them to ndarray. One will be data, another one will be our target: X = dataframe.as_matrix() y = target.as_matrix() # Just to be sure, lets see how prepared data looks: X y # Now we will split data to train/test: from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=92) len(X_train), len(X_test) # Now we are prepared. Lets check some models and see results. # # Algortihm benchmark # -------- # I used the following chart to select different regression algorithms for checking. from IPython.display import Image Image("http://scikit-learn.org/dev/_static/ml_map.png") # # # RandomForestRegressor # ---------------- # First lets see how well RandomForestRegressor performs. # + # Fit a Random Forest from sklearn.ensemble import RandomForestRegressor model = RandomForestRegressor() model.fit(X_train, y_train) y_pred = model.predict(X_test) output=[] # comparing real values to predicted for answ, pred in zip(y_test, y_pred): output.append( (answ,pred) ) display(output[:100]) # - # Error rates of this model: from sklearn.metrics import mean_absolute_error, mean_squared_error print('MAE: {0}'.format(mean_absolute_error(y_test,y_pred))) print ('RMSE: {0}'.format(mean_squared_error(y_test,y_pred))) # SGD Regressor # ------------- # + # from sklearn import linear_model # model = linear_model.SGDRegressor() # model.fit(X_train, y_train) # y_pred = model.predict(X_test) # output=[] # # comparing real values to predicted # for answ, pred in zip(y_test, y_pred): # output.append( (answ,pred) ) # display(output[:100]) # + # print('MAE: {0}'.format(mean_absolute_error(y_test,y_pred))) # print ('RMSE: {0}'.format(mean_squared_error(y_test,y_pred))) # - # Cearly, not very good # # ElasticNet # ----------- # + # from sklearn import linear_model # model = linear_model.Lasso(alpha=0.5) # model.fit(X_train, y_train) # y_pred = model.predict(X_test) # output=[] # # comparing real values to predicted # for answ, pred in zip(y_test, y_pred): # output.append( (answ,pred) ) # display(output[:100]) # + # print('MAE: {0}'.format(mean_absolute_error(y_test,y_pred))) # print ('RMSE: {0}'.format(mean_squared_error(y_test,y_pred))) # - # Ridge # ----- # + # from sklearn import linear_model # model = linear_model.Ridge(alpha=1.0) # model.fit(X_train, y_train) # y_pred = model.predict(X_test) # output=[] # # comparing real values to predicted # for answ, pred in zip(y_test, y_pred): # output.append( (answ,pred) ) # display(output[:100]) # + # print('MAE: {0}'.format(mean_absolute_error(y_test,y_pred))) # print ('RMSE: {0}'.format(mean_squared_error(y_test,y_pred))) # - # Conclusion # ------ # So random forest is the only ML algorithm that give predictions not so far away. # # # Resources # ------- # - http://stackoverflow.com/questions/11023411/how-to-import-csv-data-file-into-scikit-learn # - http://pandas.pydata.org/ # - http://scikit-learn.org/stable/modules/scaling_strategies.html # - http://stackoverflow.com/questions/9590114/importance-of-pca-or-svd-in-machine-learning # - http://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_error.html # - http://scikit-learn.org/stable/modules/feature_selection.html#variance-threshold # - http://stackoverflow.com/questions/29298973/removing-features-with-low-variance-scikit-learn
6,970
/basicPython/03-jupyter.ipynb
cbb870856bb6cddff60ea7ae8e3e058ad41b2781
[]
no_license
Raekyun/fastCampus
https://github.com/Raekyun/fastCampus
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
1,472
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from pandas_datareader import data,wb import pandas_datareader as pdr import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline import datetime import os from plotly.offline import download_plotlyjs,init_notebook_mode,plot,iplot import cufflinks as cf init_notebook_mode(connected = False) cf.go_offline() os.environ["Tingo API Key"] = "" start = datetime.date(2006,1,1) end = datetime.date(2016,1,1) bac = data.DataReader(name='BAC',api_key=os.getenv("Tingo API Key"),start=start,end=end,data_source='tiingo') bac.head() bac.info() bac.reset_index(level=0,drop=True,inplace=True) bac.head() cdf = data.DataReader(name='C',api_key=os.getenv("Tingo API Key"),start=start,end=end,data_source='tiingo') cdf.head() cdf.reset_index(level=0,drop=True,inplace=True) gs = data.DataReader(name='GS',api_key=os.getenv("Tingo API Key"),start=start,end=end,data_source='tiingo') gs.head() gs.reset_index(level=0,drop=True,inplace=True) jpm = data.DataReader(name='JPM',api_key=os.getenv("Tingo API Key"),start=start,end=end,data_source='tiingo') jpm.head() jpm.reset_index(level=0,drop=True,inplace=True) ms = data.DataReader(name='MS',api_key=os.getenv("Tingo API Key"),start=start,end=end,data_source='tiingo') ms.head() ms.reset_index(level=0,drop=True,inplace=True) wfc = data.DataReader(name='WFC',api_key=os.getenv("Tingo API Key"),start=start,end=end,data_source='tiingo') wfc.head() wfc.reset_index(level=0,drop=True,inplace=True) ticker_symbols = "BAC C GS JPM MS WFC".split() bank_stocks = pd.concat((bac,cdf,gs,jpm,ms,wfc),axis=1,keys=ticker_symbols) bank_stocks.columns.names = ['Bank Ticker','Stock Info'] bank_stocks.head() bank_stocks.xs('close',axis=1,level=1).max() returns = pd.DataFrame() for ticker in ticker_symbols: returns[ticker+" Returns"] = bank_stocks[ticker]['close'].pct_change() returns.head() bank_stocks.xs('close',axis=1,level=1).pct_change() sns.pairplot(returns) returns.idxmin() returns.idxmax() returns.std() returns[returns.index.year == 2015].std() sns.set_style('whitegrid') sns.distplot(returns[returns.index.year == 2015]['MS Returns'],bins=50,color='green') sns.distplot(returns[returns.index.year == 2008]['C Returns'],bins=50,color='red') bank_stocks.xs('close',axis=1,level=1).plot(ylim=(0,600),figsize=(12,5)) bank_stocks.xs('close',axis=1,level=1).plot() plt.legend(loc=(1.04,0.5)) bank_stocks.xs('close',axis=1,level=1).iplot() bank_stocks.xs('close',axis=1,level=1)['BAC'][bank_stocks.index.year == 2008].plot(label='BAC Close') bank_stocks.xs('close',axis=1,level=1)['BAC'][bank_stocks.index.year == 2008].rolling(window=30).mean().plot(figsize=(12,5),label='30 day Avg') plt.legend() plt.figure(figsize=(10,6)) sns.heatmap(bank_stocks.xs('close',axis=1,level=1).corr(),annot=True,robust=True) # plt.figure(figsize=(10,6)) sns.clustermap(bank_stocks.xs('close',axis=1,level=1).corr(),annot=True) bank_stocks.xs('close',axis=1,level=1).corr().iplot(kind='heatmap',colorscale='Blues') cf.colors.scales('all') bac[bac.index.year==2015].iplot('candle') ms[ms.index.year==2015]['close'].ta_plot(study='sma',periods=(13,21,55)) # ms[ms.index.year==2015]['close'].ta_plot(study='sma',periods=21) bac[bac.index.year==2015]['close'].ta_plot(study='boll',periods=14)
3,573
/lsa-and-sentiment-classification-in-python.ipynb
568eab3b8587684199474f7b5290f03e9b142554
[]
no_license
Branden-Kang/Natural-Language-Processing
https://github.com/Branden-Kang/Natural-Language-Processing
1
0
null
null
null
null
Jupyter Notebook
false
false
.py
14,164
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # - # Thanks to [Reference](https://towardsdatascience.com/latent-semantic-analysis-sentiment-classification-with-python-5f657346f6a3) # + from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import train_test_split from sklearn.feature_selection import chi2 import matplotlib.pyplot as plt # %matplotlib inline from sklearn.feature_extraction.text import CountVectorizer from sklearn.ensemble import RandomForestClassifier from sklearn.pipeline import Pipeline from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report # + _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" df = pd.read_csv('/kaggle/input/amazon-fine-food-reviews/Reviews.csv') df.head() # - # # TF-IDF tfidf = TfidfVectorizer() tfidf.fit(df['Text']) X = tfidf.transform(df['Text']) df['Text'][2] print([X[2, tfidf.vocabulary_['wardrobe']]]) print([X[2, tfidf.vocabulary_['chewy']]]) print([X[2, tfidf.vocabulary_['story']]]) # # Let's do Sentiment Classification!! df.dropna(inplace=True) df[df['Score'] != 3] df['Positivity'] = np.where(df['Score'] > 3, 1, 0) cols = ['Id', 'ProductId', 'UserId', 'ProfileName', 'HelpfulnessNumerator', 'HelpfulnessDenominator', 'Score', 'Time', 'Summary'] df.drop(cols, axis=1, inplace=True) df.head() # # Split the dataset X = df.Text y = df.Positivity X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 0) print("Train set has total {0} entries with {1:.2f}% negative, {2:.2f}% positive".format(len(X_train), (len(X_train[y_train == 0]) / (len(X_train)*1.))*100, (len(X_train[y_train == 1]) / (len(X_train)*1.))*100)) def accuracy_summary(pipeline, X_train, y_train, X_test, y_test): sentiment_fit = pipeline.fit(X_train, y_train) y_pred = sentiment_fit.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print("accuracy score: {0:.2f}%".format(accuracy*100)) return accuracy cv = CountVectorizer() rf = RandomForestClassifier(class_weight="balanced") n_features = np.arange(10000,20001,10000) def nfeature_accuracy_checker(vectorizer=cv, n_features=n_features, stop_words=None, ngram_range=(1, 1), classifier=rf): result = [] print(classifier) print("\n") for n in n_features: vectorizer.set_params(stop_words=stop_words, max_features=n, ngram_range=ngram_range) checker_pipeline = Pipeline([ ('vectorizer', vectorizer), ('classifier', classifier) ]) print("Test result for {} features".format(n)) nfeature_accuracy = accuracy_summary(checker_pipeline, X_train, y_train, X_test, y_test) result.append((n,nfeature_accuracy)) return result tfidf = TfidfVectorizer() print("Result for trigram with stop words (Tfidf)\n") feature_result_tgt = nfeature_accuracy_checker(vectorizer=tfidf,ngram_range=(1, 3)) cv = CountVectorizer(max_features=30000,ngram_range=(1, 3)) pipeline = Pipeline([ ('vectorizer', cv), ('classifier', rf) ]) sentiment_fit = pipeline.fit(X_train, y_train) y_pred = sentiment_fit.predict(X_test) print(classification_report(y_test, y_pred, target_names=['negative','positive'])) # # Chi-Squared for Feature Selection tfidf = TfidfVectorizer(max_features=30000,ngram_range=(1, 3)) X_tfidf = tfidf.fit_transform(df.Text) y = df.Positivity chi2score = chi2(X_tfidf, y)[0] plt.figure(figsize=(12,8)) scores = list(zip(tfidf.get_feature_names(), chi2score)) chi2 = sorted(scores, key=lambda x:x[1]) topchi2 = list(zip(*chi2[-20:])) x = range(len(topchi2[1])) labels = topchi2[0] plt.barh(x,topchi2[1], align='center', alpha=0.5) plt.plot(topchi2[1], x, '-o', markersize=5, alpha=0.8) plt.yticks(x, labels) plt.xlabel('$\chi^2$') plt.show();
5,219
/agrupamento_dbscan/dbscan_kmeans_hierarquico.ipynb
6a5bb457eff4a8bf4782135310e4d4f3622547fa
[]
no_license
johnnycosta86/project_sklearn
https://github.com/johnnycosta86/project_sklearn
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
188,967
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"is_executing": false} import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.cluster import AgglomerativeClustering from sklearn.cluster import DBSCAN from sklearn import datasets import numpy as np # + pycharm={"name": "#%%\n", "is_executing": false} x, y = datasets.make_moons(n_samples = 1500, noise = 0.09) plt.scatter(x[:, 0], x[:, 1], s = 5) # + pycharm={"name": "#%%\n", "is_executing": false} cores = np.array(['red', 'blue']) # + pycharm={"name": "#%%\n", "is_executing": false} kmeans = KMeans(n_clusters = 2) previsoes = kmeans.fit_predict(x) plt.scatter(x[:, 0], x[:, 1], s = 5, color = cores[previsoes]) # + pycharm={"name": "#%%\n", "is_executing": false} hc = AgglomerativeClustering(n_clusters = 2, affinity = 'euclidean', linkage = 'ward') previsoes = hc.fit_predict(x) plt.scatter(x[:, 0], x[:, 1], s = 5, color = cores[previsoes]) # + pycharm={"name": "#%%\n", "is_executing": false} dbscan = DBSCAN(eps = 0.1) previsoes = dbscan.fit_predict(x) plt.scatter(x[:, 0], x[:, 1], s = 5, color = cores[previsoes])
1,338
/Finance/Naver/.ipynb_checkpoints/naverFS재무제표(년간)(분기추가) ver1.1-checkpoint.ipynb
07d5a096b4f766c3db104da5ca929dd438910227
[]
no_license
KimMyungSam/2ndBUS
https://github.com/KimMyungSam/2ndBUS
2
2
null
null
null
null
Jupyter Notebook
false
false
.py
12,301
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="f8818773-a629-472e-9cc1-54fb35e99670" _uuid="b3df1cdd81022580f5a7613271b1309641b26157" colab_type="text" id="lxhqbhwbbzkG" # # Skillbox — Мастер-класс: Разведочный анализ данных + Подготовка отчётов — Kiva.org # *** # + [markdown] colab_type="text" id="vBqFivbiMhAK" # # Новый раздел # + [markdown] colab_type="text" id="sb2LxV_0bzng" # # 4. Домашнее задание # *** # - # https://www.kaggle.com/kiva/data-science-for-good-kiva-crowdfunding/download # + colab={} colab_type="code" id="0Sw1jasUMtpY" import numpy as np import pandas as pd import math #import missingno as msno from datetime import datetime, timedelta import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns # + colab={} colab_type="code" id="fcTAuqVtMtXj" df_kiva_loans = pd.read_csv("kiva_loans.csv") df_mpi_reg_loc = pd.read_csv("kiva_mpi_region_locations.csv") df_loan_theme_ids = pd.read_csv("loan_theme_ids.csv") df_loan_themes_by_reg = pd.read_csv("loan_themes_by_region.csv") # + [markdown] colab_type="text" id="yAGPSgFVbznh" # ## 4.1. Самостоятельная работа # *** # + [markdown] colab_type="text" id="04Waz3uubznh" # - А в каких странах преобладают мужчины? Группа людей? (п. 2.2.2.) # - Изучите несколько займов с очень большими суммами. Что можно сказать о назначении этих заявок? В каких странах они оформлены? Каков гендерный состав заявителей? (п. 2.4.2.) # - Попробуйте найти связь между странами (регионами), полом заёмщика и суммой заявки. Какие выводы можно сделать из этих данных? (п. 2.4.3.) # - # - А в каких странах преобладают мужчины? Группа людей? (п. 2.2.2.) # + print('Страны, в которых преобладают мужчины: ') group_country = df_kiva_loans[df_kiva_loans.borrower_genders.notna()]\ .groupby('country')\ .agg(is_male_more = ('borrower_genders',\ lambda x: sum([i.count('male') - 2 * i.count('female') for i in list(x.values)]) > 0)) group_country[group_country.is_male_more].drop(columns=['is_male_more']) # + print('Страны, в которых преобладают группы людей: ') group_country = df_kiva_loans[df_kiva_loans.borrower_genders.notna()]\ .groupby('country')\ .agg(is_group_more = ('borrower_genders',\ lambda x: sum([-1 if i == 'male' or i == 'female' else 1 for i in list(x.values)]) > 0)) group_country[group_country.is_group_more].drop(columns=['is_group_more']) # - # # # - Изучите несколько займов с очень большими суммами. Что можно сказать о назначении этих заявок? В каких странах они оформлены? Каков гендерный состав заявителей? (п. 2.4.2.) # + df_kiva_loans['sex_type'] = [x if x in ['male', 'female'] else 'group' for x in df_kiva_loans.borrower_genders] df = df_kiva_loans.sort_values('funded_amount', ascending = False).head(100).copy() def top_100_bar(column, string, rot = 0): plt.figure(figsize=(20,10)) plt.title("Топ 100 займов " + string, fontsize=16) plt.tick_params(labelsize=14) plt.xticks(rotation=rot) sns.barplot(y=column.values, x=column.index, alpha=0.6) plt.show() # - top_100_bar(df.sector.value_counts(), 'по секторам экономики') top_100_bar(df.country.value_counts(), 'по странам', 90) top_100_bar(df.sex_type.value_counts(), 'по гендерам') # + [markdown] colab_type="text" id="4J8NsnCsbznh" # ## 4.2. Домашняя работа # *** # + [markdown] colab_type="text" id="Ev97SUmzbznh" # Расширьте наш отчёт исследованием следующих вопросов: # # - Зависимость пола заёмщика от региона планеты, сектора экономики. # - Влияние такого атрибута займа, как **Количество кредиторов** (lender_count), на суммы и сроки займов. # - Влияние показателя **MPI** (многомерный индекс бедности) в разных странах на суммы займов и сроки погашения. # - Зависимости таких метрик, как «Сумма займа», «Срок займа», «Время финансирования заявки», «Ежемесячный платёж», в разрезе **макрорегионов**. # - df_country_wr = pd.DataFrame(df_mpi_reg_loc.groupby(['country', 'world_region'])\ .groups.keys(), columns=['country', 'world_region']).dropna() df = pd.merge(df_kiva_loans, df_country_wr, how='inner', on=['country']) group_by_region = df.groupby('world_region') df = pd.DataFrame(columns=['world_region', 'male', 'female', 'group']) for name, group in group_by_region: df = df.append(group.sex_type.value_counts().T) df.world_region = group_by_region.groups.keys() df = df.reset_index(drop=True) df.plot(x="world_region", y=['male', 'female', 'group'], kind="bar", title='Зависимость пола заёмщика от региона планеты') df = df_kiva_loans.copy() group_by_sector = df.groupby('sector') df = pd.DataFrame(columns=['sector', 'male', 'female', 'group']) for name, group in group_by_sector: df = df.append(group.sex_type.value_counts().T) df.sector = group_by_sector.groups.keys() df = df.reset_index(drop=True) df.plot(x="sector", y=['male', 'female', 'group'], kind="bar", title='Зависимость пола заёмщика от сектора экономики') # Влияние такого атрибута займа, как Количество кредиторов (lender_count), на суммы и сроки займов. # + df = df_kiva_loans[['lender_count', 'term_in_months', 'funded_amount']] ax = sns.heatmap(df.corr().lender_count.to_frame(), xticklabels=df.lender_count.to_frame().columns, yticklabels=df.corr().columns, cmap='RdYlGn', center=0, annot=True) i, k = ax.get_ylim() ax.set_ylim(i+0.5, k-0.5) plt.yticks(rotation=0) plt.show() # - # Влияние показателя MPI (многомерный индекс бедности) в разных странах на суммы займов и сроки погашения. df_mpi_grouped = df_mpi_reg_loc\ .groupby(['country'])['MPI']\ .mean()\ .fillna(0)\ .reset_index() # + df = df_kiva_loans.merge(df_mpi_grouped, how='left', on='country') df = df[['MPI', 'term_in_months', 'funded_amount']] ax = sns.heatmap(df.corr().MPI.to_frame(), xticklabels=df.MPI.to_frame().columns, yticklabels=df.corr().columns, cmap='RdYlGn', center=0, annot=True) i, k = ax.get_ylim() ax.set_ylim(i+0.5, k-0.5) plt.yticks(rotation=0) plt.show() # - df.columns df_country_wr = pd.DataFrame(df_mpi_reg_loc.groupby(['country', 'world_region'])\ .groups.keys(), columns=['country', 'world_region']).dropna() df = pd.merge(df_kiva_loans, df_country_wr, how='inner', on=['country']) df['monthly_repayment'] = df['loan_amount'] / df['term_in_months'] df.funded_time = pd.to_datetime(df.funded_time).astype(np.int64) df = df[['funded_amount', 'term_in_months', 'funded_time', 'monthly_repayment', 'world_region']] for name, group in df.groupby('world_region'): correl = group.drop(columns=['world_region']).corr() ax = sns.heatmap(correl, xticklabels=correl.columns, yticklabels=correl.columns, cmap='RdYlGn', center=0, annot=True) i, k = ax.get_ylim() ax.set_ylim(i+0.5, k-0.5) plt.yticks(rotation=0) plt.title(name) plt.show()
6,997
/probability_distributions.ipynb
bfbcbb18e061646ba0cf6cfcb365fe6c9d4614bf
[]
no_license
paulallen825/python-exercises
https://github.com/paulallen825/python-exercises
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
28,519
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import numpy as np from scipy import stats import seaborn as sns import pandas as pd # + #A bank found that the average number of cars waiting during the noon hour at a drive-up #window follows a Poisson distribution with a mean of 2 cars. Make a chart #of this distribution and answer these questions concerning the probability of cars waiting #at the drive-up window. #What is the probability that no cars drive up in the noon hour? #What is the probability that 3 or more cars come through the drive through? #How likely is it that the drive through gets at least 1 car? # - data_poisson = poisson.rvs(mu=2, size=100) ax = sns.histplot(data_poisson, bins=30, kde=False, color='skyblue') ax.set(xlabel='Poisson Distribution', ylabel='Frequency') stats.poisson(2).pmf(0) stats.poisson(2).sf(2) stats.poisson(2).sf(0) # + #Grades of State University graduates are normally distributed with a mean of 3.0 and a #standard deviation of .3. Calculate the following: #What grade point average is required to be in the top 5% of the graduating class? #What GPA constitutes the bottom 15% of the class? #An eccentric alumnus left scholarship money for students in the third #decile from the bottom of their class. Determine the range of the third decile. #Would a student with a 2.8 grade point average qualify for this scholarship? #If I have a GPA of 3.5, what percentile am I in? # + m = 3 sd=.3 grades = stats.norm(m,sd) # - top_5_percent = grades.ppf(.95) top_5_percent # + #grades.quantile(.95).values # + bottom_15_percent = grades.ppf(.15) bottom_15_percent # - bottom_30_percent = grades.ppf(.30) bottom_30_percent bottom_20_percent = grades.ppf(.20) bottom_20_percent grades.cdf(3.5) # + #A marketing website has an average click-through rate of 2%. #One day they observe 4326 visitors and 97 click-throughs. #How likely is it that this many people or more click through? # - ctr= .02 visitors = 4326 ct = 97 stats.binom(4326, .02).sf(96) # + #You are working on some statistics homework consisting of 100 questions where all of the #answers are a probability rounded to the hundreths place. #Looking to save time, you put down random probabilities as the answer to each question. #What is the probability that at least one of your first 60 answers is correct? # + #stats.binom(60, 1/101).sf(0) prob_success = .01 n_trials = 60 desired_success = 1 stats.binom(n_trials, prob_success).sf(desired_success -1) # + #The codeup staff tends to get upset when the student break area is not cleaned up. #Suppose that there's a 3% chance that any one student cleans the break area when they visit it, #and, on any given day, about 90% of the 3 active cohorts of 22 students visit the break area. #How likely is it that the break area gets cleaned up each day? #How likely is it that it goes two days without getting cleaned up? All week? # + #3% chance any student will clean it #90% of 3 active cohorts of 22 students #59.4 students each day #5 days stats.binom(59.4, .03).sf(1) # + #m = 15 #sd =3 #2min each order #10 min to get food #will have 15 min left to eat #1hour or 60 min lunch prob_success = .03 n_trials =(22*3)*.9 desired_successes =1 stats.binom(n_trials, prob_success).sf(desired_successes-1) # - #not cleaned for 2 days #the minus one is because its odds it not being cleaned #meaning it has an 83% chance to be cleaned and 17% chance to not be cleaned then time by 2 for 2 days (1-stats.binom(n_trials, prob_success).sf(desired_successes-1))**2 #five days not being cleaned (1-stats.binom(n_trials, prob_success).sf(desired_successes-1))**5 # + food = stats.norm(m,sd).cdf(17) food # + mean_customers_in_line =15 sd_customers_in_line = 3 time_per_order=2 time_to_get_food =10 time_to_eat_food = 15 total_time=60 # - max_time_in_line = total_time - time_to_get_food- time_to_eat_food max_customers_in_line = # + from env import host, user, password url = f'mysql+pymysql://{user}:{password}@{host}/employees' # - employees =pd.read_sql('Select * FROM employees LIMIT 5 OFFSET 50', url) employees salaries =pd.read_sql("Select * FROM salaries LIMIT 5 OFFSET 50", url) salaries salaries.salary.mean() salaries.salary.std() p = stats.norm(87472, 1880).cdf(60000) p q = stats.norm(87472, 1880).sf(95000) q r = stats.norm(87472, 1880).sf(65000) r s = stats.norm(87472, 1880).cdf(80000) s t = stats.norm(87472, 1880).ppf(.95) t
4,761
/notebooks/pixel-drill-example.ipynb
d2f62b2a6235890e56f143491f7b69bd97b60812
[ "Apache-2.0" ]
permissive
opendatacube/odc-tools
https://github.com/opendatacube/odc-tools
46
27
Apache-2.0
2023-09-11T23:59:07
2023-08-01T01:15:33
Python
Jupyter Notebook
false
false
.py
25,300
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline from matplotlib import pyplot as plt import numpy as np from timeit import default_timer as t_now # + from datacube import Datacube from datacube.storage.storage import measurement_paths from dea.io.pdrill import PixelDrill from dea.aws.rioworkerpool import RioWorkerPool dc = Datacube(env='wofs') # - # ## Setup PixelDrill Worker Pool nthreads = 32 pool = RioWorkerPool(nthreads=nthreads, max_header_sz_kb=32) pdrill = PixelDrill(pool) # ## Query Datacube # # - Find datasets # - Sort them by time # - Extract urls for a band of interest (`water` in this case) # - Extract timestamps lon_lat = (148.02310311982876, -35.33091506569685) # %%time lon, lat = lon_lat dss = dc.find_datasets(product='wofs_albers', lon=lon, lat=lat) # %%time dss = sorted(dss, key=lambda ds: ds.center_time) tt = [ds.center_time for ds in dss] urls = [measurement_paths(ds)['water'] for ds in dss] # ## Warmup The Pool # # This is not needed in the real app. Very first file `open` takes significantly longer than consequent `open` calls as GDAL configures various per-thread structures. Since in the real app we expect to re-use worker threads, this cost will get amortised across many requests. But to get more accurate timing measures in the Load Pixels section we perform "pool warmup here". # %%time pix_warmup = pdrill.lazy_read([urls[-1]]*nthreads, pixel=(0,0)).result() # ## Load Pixels # %%time t0 = t_now() pix = pdrill.lazy_read(urls, lonlat=lon_lat) # this should return quickly t1 = t_now() pix = pix.result() # This will block until all files are loaded t2 = t_now() # + t_total = t2 - t0 fps = len(urls)/t_total print(''' - {:d} worker threads - {:,d} files - {:.3f} seconds to completion ({:.1f} ms to schedule) - {:.1f} fps ({:.1f} per thread) '''.format(nthreads, len(urls), t_total, (t1-t0)*1000, fps, fps/nthreads)) # - fig,ax = plt.subplots(1,1, figsize=(16,4)) plt.plot(tt, pix, '.'); # -----------------------------------------------
2,296
/Dollars to Rupees.ipynb
fb78a2c74f884d68f5f274d9d4fba7496c3e615c
[]
no_license
ZaranaGajjar/practical-python
https://github.com/ZaranaGajjar/practical-python
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
927
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] tags=["pdf-title"] # # k-Nearest Neighbor (kNN) exercise # # *Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.* # # The kNN classifier consists of two stages: # # - During training, the classifier takes the training data and simply remembers it # - During testing, kNN classifies every test image by comparing to all training images and transfering the labels of the k most similar training examples # - The value of k is cross-validated # # In this exercise you will implement these steps and understand the basic Image Classification pipeline, cross-validation, and gain proficiency in writing efficient, vectorized code. # + tags=["pdf-ignore"] # Run some setup code for this notebook. import random import numpy as np from cs231n.data_utils import load_CIFAR10 import matplotlib.pyplot as plt # This is a bit of magic to make matplotlib figures appear inline in the notebook # rather than in a new window. # %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # Some more magic so that the notebook will reload external python modules; # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython # %load_ext autoreload # %autoreload 2 # + tags=["pdf-ignore"] # Load the raw CIFAR-10 data. cifar10_dir = 'cs231n/datasets/cifar-10-batches-py' # Cleaning up variables to prevent loading data multiple times (which may cause memory issue) try: del X_train, y_train del X_test, y_test print('Clear previously loaded data.') except: pass X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # As a sanity check, we print out the size of the training and test data. print('Training data shape: ', X_train.shape) print('Training labels shape: ', y_train.shape) print('Test data shape: ', X_test.shape) print('Test labels shape: ', y_test.shape) # + tags=["pdf-ignore"] # Visualize some examples from the dataset. # We show a few examples of training images from each class. classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] num_classes = len(classes) samples_per_class = 7 for y, cls in enumerate(classes): idxs = np.flatnonzero(y_train == y) idxs = np.random.choice(idxs, samples_per_class, replace=False) for i, idx in enumerate(idxs): plt_idx = i * num_classes + y + 1 plt.subplot(samples_per_class, num_classes, plt_idx) plt.imshow(X_train[idx].astype('uint8')) plt.axis('off') if i == 0: plt.title(cls) plt.show() # + tags=["pdf-ignore"] # Subsample the data for more efficient code execution in this exercise num_training = 5000 mask = list(range(num_training)) X_train = X_train[mask] y_train = y_train[mask] num_test = 500 mask = list(range(num_test)) X_test = X_test[mask] y_test = y_test[mask] # Reshape the image data into rows X_train = np.reshape(X_train, (X_train.shape[0], -1)) X_test = np.reshape(X_test, (X_test.shape[0], -1)) print(X_train.shape, X_test.shape) # + tags=["pdf-ignore"] from cs231n.classifiers import KNearestNeighbor # Create a kNN classifier instance. # Remember that training a kNN classifier is a noop: # the Classifier simply remembers the data and does no further processing classifier = KNearestNeighbor() classifier.train(X_train, y_train) # - # We would now like to classify the test data with the kNN classifier. Recall that we can break down this process into two steps: # # 1. First we must compute the distances between all test examples and all train examples. # 2. Given these distances, for each test example we find the k nearest examples and have them vote for the label # # Lets begin with computing the distance matrix between all training and test examples. For example, if there are **Ntr** training examples and **Nte** test examples, this stage should result in a **Nte x Ntr** matrix where each element (i,j) is the distance between the i-th test and j-th train example. # # **Note: For the three distance computations that we require you to implement in this notebook, you may not use the np.linalg.norm() function that numpy provides.** # # First, open `cs231n/classifiers/k_nearest_neighbor.py` and implement the function `compute_distances_two_loops` that uses a (very inefficient) double loop over all pairs of (test, train) examples and computes the distance matrix one element at a time. # + # Open cs231n/classifiers/k_nearest_neighbor.py and implement # compute_distances_two_loops. # Test your implementation: dists = classifier.compute_distances_two_loops(X_test) print(dists.shape) # - # We can visualize the distance matrix: each row is a single test example and # its distances to training examples plt.imshow(dists, interpolation='none') plt.show() # + [markdown] tags=["pdf-inline"] # **Inline Question 1** # # Notice the structured patterns in the distance matrix, where some rows or columns are visible brighter. (Note that with the default color scheme black indicates low distances while white indicates high distances.) # # - What in the data is the cause behind the distinctly bright rows? # - What causes the columns? # # $\color{blue}{\textit Your Answer:}$ *fill this in.* # # # + # Now implement the function predict_labels and run the code below: # We use k = 1 (which is Nearest Neighbor). y_test_pred = classifier.predict_labels(dists, k=1) # Compute and print the fraction of correctly predicted examples num_correct = np.sum(y_test_pred == y_test) accuracy = float(num_correct) / num_test print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)) # - # You should expect to see approximately `27%` accuracy. Now lets try out a larger `k`, say `k = 5`: y_test_pred = classifier.predict_labels(dists, k=5) num_correct = np.sum(y_test_pred == y_test) accuracy = float(num_correct) / num_test print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)) # You should expect to see a slightly better performance than with `k = 1`. # + [markdown] tags=["pdf-inline"] # **Inline Question 2** # # We can also use other distance metrics such as L1 distance. # For pixel values $p_{ij}^{(k)}$ at location $(i,j)$ of some image $I_k$, # # the mean $\mu$ across all pixels over all images is $$\mu=\frac{1}{nhw}\sum_{k=1}^n\sum_{i=1}^{h}\sum_{j=1}^{w}p_{ij}^{(k)}$$ # And the pixel-wise mean $\mu_{ij}$ across all images is # $$\mu_{ij}=\frac{1}{n}\sum_{k=1}^np_{ij}^{(k)}.$$ # The general standard deviation $\sigma$ and pixel-wise standard deviation $\sigma_{ij}$ is defined similarly. # # Which of the following preprocessing steps will not change the performance of a Nearest Neighbor classifier that uses L1 distance? Select all that apply. # 1. Subtracting the mean $\mu$ ($\tilde{p}_{ij}^{(k)}=p_{ij}^{(k)}-\mu$.) # 2. Subtracting the per pixel mean $\mu_{ij}$ ($\tilde{p}_{ij}^{(k)}=p_{ij}^{(k)}-\mu_{ij}$.) # 3. Subtracting the mean $\mu$ and dividing by the standard deviation $\sigma$. # 4. Subtracting the pixel-wise mean $\mu_{ij}$ and dividing by the pixel-wise standard deviation $\sigma_{ij}$. # 5. Rotating the coordinate axes of the data. # # $\color{blue}{\textit Your Answer:}$ # # # $\color{blue}{\textit Your Explanation:}$ # # + tags=["pdf-ignore-input"] # Now lets speed up distance matrix computation by using partial vectorization # with one loop. Implement the function compute_distances_one_loop and run the # code below: dists_one = classifier.compute_distances_one_loop(X_test) # To ensure that our vectorized implementation is correct, we make sure that it # agrees with the naive implementation. There are many ways to decide whether # two matrices are similar; one of the simplest is the Frobenius norm. In case # you haven't seen it before, the Frobenius norm of two matrices is the square # root of the squared sum of differences of all elements; in other words, reshape # the matrices into vectors and compute the Euclidean distance between them. difference = np.linalg.norm(dists - dists_one, ord='fro') print('One loop difference was: %f' % (difference, )) if difference < 0.001: print('Good! The distance matrices are the same') else: print('Uh-oh! The distance matrices are different') # + tags=["pdf-ignore-input"] # Now implement the fully vectorized version inside compute_distances_no_loops # and run the code dists_two = classifier.compute_distances_no_loops(X_test) # check that the distance matrix agrees with the one we computed before: difference = np.linalg.norm(dists - dists_two, ord='fro') print('No loop difference was: %f' % (difference, )) if difference < 0.001: print('Good! The distance matrices are the same') else: print('Uh-oh! The distance matrices are different') # + tags=["pdf-ignore-input"] # Let's compare how fast the implementations are def time_function(f, *args): """ Call a function f with args and return the time (in seconds) that it took to execute. """ import time tic = time.time() f(*args) toc = time.time() return toc - tic two_loop_time = time_function(classifier.compute_distances_two_loops, X_test) print('Two loop version took %f seconds' % two_loop_time) one_loop_time = time_function(classifier.compute_distances_one_loop, X_test) print('One loop version took %f seconds' % one_loop_time) no_loop_time = time_function(classifier.compute_distances_no_loops, X_test) print('No loop version took %f seconds' % no_loop_time) # You should see significantly faster performance with the fully vectorized implementation! # NOTE: depending on what machine you're using, # you might not see a speedup when you go from two loops to one loop, # and might even see a slow-down. # - # ### Cross-validation # # We have implemented the k-Nearest Neighbor classifier but we set the value k = 5 arbitrarily. We will now determine the best value of this hyperparameter with cross-validation. # + tags=["code"] num_folds = 5 k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100] X_train_folds = [] y_train_folds = [] ################################################################################ # TODO: # # Split up the training data into folds. After splitting, X_train_folds and # # y_train_folds should each be lists of length num_folds, where # # y_train_folds[i] is the label vector for the points in X_train_folds[i]. # # Hint: Look up the numpy array_split function. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** X_train_folds = np.array_split(X_train, num_folds)#, axis = 1) y_train_folds = np.array_split(y_train, num_folds)#, axis = 1) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # A dictionary holding the accuracies for different values of k that we find # when running cross-validation. After running cross-validation, # k_to_accuracies[k] should be a list of length num_folds giving the different # accuracy values that we found when using that value of k. k_to_accuracies = {} ################################################################################ # TODO: # # Perform k-fold cross validation to find the best value of k. For each # # possible value of k, run the k-nearest-neighbor algorithm num_folds times, # # where in each case you use all but one of the folds as training data and the # # last fold as a validation set. Store the accuracies for all fold and all # # values of k in the k_to_accuracies dictionary. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** for k in k_choices: for i in range(num_folds): X_test_fold = np.array(X_train_folds[i]) y_test_fold = np.array(y_train_folds[i]) X_train_fold = np.concatenate([X for idx, X in enumerate(X_train_folds) if idx != i]) y_train_fold = np.concatenate([y for idx, y in enumerate(y_train_folds) if idx != i]) classifier.train(X_train_fold, y_train_fold) dists_fold = classifier.compute_distances_no_loops(X_test_fold) y_pred_fold = classifier.predict_labels(dists_fold, k) num_correct = np.sum(y_test_fold == y_pred_fold) accuracy_f = float(num_correct) / len(y_test_fold) if k in k_to_accuracies.keys(): k_to_accuracies[k].append(accuracy_f) else: k_to_accuracies[k] = [accuracy_f] # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # Print out the computed accuracies for k in sorted(k_to_accuracies): for accuracy in k_to_accuracies[k]: print('k = %d, accuracy = %f' % (k, accuracy)) # + tags=["pdf-ignore-input"] # plot the raw observations for k in k_choices: accuracies = k_to_accuracies[k] plt.scatter([k] * len(accuracies), accuracies) # plot the trend line with error bars that correspond to standard deviation accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())]) accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())]) plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std) plt.title('Cross-validation on k') plt.xlabel('k') plt.ylabel('Cross-validation accuracy') plt.show() # + # Based on the cross-validation results above, choose the best value for k, # retrain the classifier using all the training data, and test it on the test # data. You should be able to get above 28% accuracy on the test data. best_k = 8 max_acc = 0 for k, acc_list in k_to_accuracies.items(): classifier = KNearestNeighbor() classifier.train(X_train, y_train) y_test_pred = classifier.predict(X_test, k=best_k) # Compute and display the accuracy num_correct = np.sum(y_test_pred == y_test) accuracy = float(num_correct) / num_test print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)) # + [markdown] tags=["pdf-inline"] # **Inline Question 3** # # Which of the following statements about $k$-Nearest Neighbor ($k$-NN) are true in a classification setting, and for all $k$? Select all that apply. # 1. The decision boundary of the k-NN classifier is linear. # 2. The training error of a 1-NN will always be lower than that of 5-NN. # 3. The test error of a 1-NN will always be lower than that of a 5-NN. # 4. The time needed to classify a test example with the k-NN classifier grows with the size of the training set. # 5. None of the above. # # $\color{blue}{\textit Your Answer:}$ # # # $\color{blue}{\textit Your Explanation:}$ # #
15,293
/04-pytorch基本操作-官网加优达练习/Part 4 - Fashion-MNIST Exercise.ipynb
1bea8691fc9db3895cfef313665376326fed7f64
[]
no_license
Parker-Lyu/Uda-ComputerVersion
https://github.com/Parker-Lyu/Uda-ComputerVersion
0
2
null
2022-11-03T05:33:54
2019-09-15T14:38:28
Jupyter Notebook
Jupyter Notebook
false
false
.py
10,217
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Classifying Fashion-MNIST # # Now it's your turn to build and train a neural network. You'll be using the [Fashion-MNIST dataset](https://github.com/zalandoresearch/fashion-mnist), a drop-in replacement for the MNIST dataset. MNIST is actually quite trivial with neural networks where you can easily achieve better than 97% accuracy. Fashion-MNIST is a set of 28x28 greyscale images of clothes. It's more complex than MNIST, so it's a better representation of the actual performance of your network, and a better representation of datasets you'll use in the real world. # # <img src='assets/fashion-mnist-sprite.png' width=500px> # # In this notebook, you'll build your own neural network. For the most part, you could just copy and paste the code from Part 3, but you wouldn't be learning. It's important for you to write the code yourself and get it to work. Feel free to consult the previous notebook though as you work through this. # # First off, let's load the dataset through torchvision. # + import torch from torchvision import datasets, transforms import helper # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) # Download and load the training data trainset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) # Download and load the test data testset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=False, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) # - # Here we can see one of the images. % matplotlib inline image, label = next(iter(trainloader)) helper.imshow(image[0,:]); # With the data loaded, it's time to import the necessary packages. # + # %matplotlib inline # %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt import numpy as np import time import torch from torch import nn from torch import optim import torch.nn.functional as F from torchvision import datasets, transforms import helper # - # ## Building the network # # Here you should define your network. As with MNIST, each image is 28x28 which is a total of 784 pixels, and there are 10 classes. You should include at least one hidden layer. We suggest you use ReLU activations for the layers and to return the logits from the forward pass. It's up to you how many layers you add and the size of those layers. # + # TODO: Define your network architecture here # - # # Train the network # # Now you should create your network and train it. First you'll want to define [the criterion](http://pytorch.org/docs/master/nn.html#loss-functions) ( something like `nn.CrossEntropyLoss`) and [the optimizer](http://pytorch.org/docs/master/optim.html) (typically `optim.SGD` or `optim.Adam`). # # Then write the training code. Remember the training pass is a fairly straightforward process: # # * Make a forward pass through the network to get the logits # * Use the logits to calculate the loss # * Perform a backward pass through the network with `loss.backward()` to calculate the gradients # * Take a step with the optimizer to update the weights # # By adjusting the hyperparameters (hidden units, learning rate, etc), you should be able to get the training loss below 0.4. # TODO: Create the network, define the criterion and optimizer # TODO: Train the network here # + # Test out your network! dataiter = iter(testloader) images, labels = dataiter.next() img = images[0] # Convert 2D image to 1D vector img = img.resize_(1, 784) # TODO: Calculate the class probabilities (softmax) for img ps = # Plot the image and probabilities helper.view_classify(img.resize_(1, 28, 28), ps, version='Fashion') # - # Now that your network is trained, you'll want to save it to disk so you can load it later instead of training it again. Obviously, it's impractical to train a network every time you need one. In practice, you'll train it once, save the model, then reload it for further training or making predictions. In the next part, I'll show you how to save and load trained models. der # # - The below cell is in code format, but we can use the IPython.display module to write Markdown formatted code as Python strings, and then display it as Markdown. # # # - In the below cell, in between the two ` '''` statements, write Markdown code that would produce the **largest possible** Markdown header. The header text should be "My first header". # # After you run your answer code cell and the cell below, the output of the cell below the answer cell should look like this: # # YOUR CODE HERE # # My first header # + nbgrader={"grade": false, "grade_id": "p4-answer", "locked": false, "schema_version": 3, "solution": true} md=''' #BEGIN SOLUTION # My first header #END SOLUTION ''' # + nbgrader={"grade": false, "grade_id": "cell-f1c9d9d6428d2285", "locked": true, "schema_version": 3, "solution": false} #Let's view the Markdown from IPython.display import Markdown display(Markdown(md)) # + nbgrader={"grade": true, "grade_id": "p4-test", "locked": true, "points": 1, "schema_version": 3, "solution": false} text = md.find('# My first header') assert_not_equal(text,-1,msg='Make sure you are using the largest header.')
5,683
/great_expectations/uncommitted/edit_checkpoint_yelp_checkpoint.ipynb
0d9cca8e3019e741007428a056beccd110ab0ec0
[]
no_license
ismaildawoodjee/Great-Expectations-for-JSON
https://github.com/ismaildawoodjee/Great-Expectations-for-JSON
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
10,851
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Create Your Checkpoint # Use this notebook to configure a new Checkpoint and add it to your project: # # **Checkpoint Name**: `yelp_checkpoint` # + from ruamel.yaml import YAML import great_expectations as ge from pprint import pprint yaml = YAML() context = ge.get_context() # - # # Create a Checkpoint Configuration # # **If you are new to Great Expectations or the Checkpoint feature**, you should start with SimpleCheckpoint because it includes default configurations like a default list of post validation actions. # # In the cell below we have created a sample Checkpoint configuration using **your configuration** and **SimpleCheckpoint** to run a single validation of a single Expectation Suite against a single Batch of data. # # To keep it simple, we are just choosing the first available instance of each of the following items you have configured in your Data Context: # * Datasource # * DataConnector # * DataAsset # * Partition # * Expectation Suite # # Of course this is purely an example, you may edit this to your heart's content. # # **My configuration is not so simple - are there more advanced options?** # # Glad you asked! Checkpoints are very versatile. For example, you can validate many Batches in a single Checkpoint, validate Batches against different Expectation Suites or against many Expectation Suites, control the specific post-validation actions based on Expectation Suite / Batch / results of validation among other features. Check out our documentation on Checkpoints for more details and for instructions on how to implement other more advanced features including using the **Checkpoint** class: # - https://docs.greatexpectations.io/en/latest/reference/core_concepts/checkpoints_and_actions.html # - https://docs.greatexpectations.io/en/latest/guides/how_to_guides/validation/how_to_create_a_new_checkpoint.html # - https://docs.greatexpectations.io/en/latest/guides/how_to_guides/validation/how_to_create_a_new_checkpoint_using_test_yaml_config.html # + my_checkpoint_name = "yelp_checkpoint" # This was populated from your CLI command. yaml_config = f""" name: {my_checkpoint_name} config_version: 1.0 class_name: SimpleCheckpoint run_name_template: "%Y%m%d-%H%M%S-my-run-name-template" validations: - batch_request: datasource_name: yelp_businesses data_connector_name: default_inferred_data_connector_name data_asset_name: yelp_validating.json data_connector_query: index: -1 expectation_suite_name: yelp_suite_updated """ print(yaml_config) # - # # Customize Your Configuration # The following cells show examples for listing your current configuration. You can replace values in the sample configuration with these values to customize your Checkpoint. # Run this cell to print out the names of your Datasources, Data Connectors and Data Assets pprint(context.get_available_data_asset_names()) context.list_expectation_suite_names() # # Test Your Checkpoint Configuration # Here we will test your Checkpoint configuration to make sure it is valid. # # This `test_yaml_config()` function is meant to enable fast dev loops. If your configuration is correct, this cell will show a message that you successfully instantiated a Checkpoint. You can continually edit your Checkpoint config yaml and re-run the cell to check until the new config is valid. # # If you instead wish to use python instead of yaml to configure your Checkpoint, you can use `context.add_checkpoint()` and specify all the required parameters. my_checkpoint = context.test_yaml_config(yaml_config=yaml_config) # # Review Your Checkpoint # # You can run the following cell to print out the full yaml configuration. For example, if you used **SimpleCheckpoint** this will show you the default action list. print(my_checkpoint.get_substituted_config().to_yaml_str()) # # Add Your Checkpoint # # Run the following cell to save this Checkpoint to your Checkpoint Store. context.add_checkpoint(**yaml.load(yaml_config)) # # Run Your Checkpoint & Open Data Docs(Optional) # # You may wish to run the Checkpoint now and review its output in Data Docs. If so uncomment and run the following cell. context.run_checkpoint(checkpoint_name=my_checkpoint_name) context.open_data_docs()
4,522
/src/example_reinforcement_learning.ipynb
e35cfc5dc2669aa17b624cc0fcbf80c9f94590b8
[ "Apache-2.0" ]
permissive
HCIILAB/Deep-Signature-Transforms
https://github.com/HCIILAB/Deep-Signature-Transforms
2
3
Apache-2.0
2019-11-14T23:50:09
2019-11-13T03:21:58
null
Jupyter Notebook
false
false
.py
3,822
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Signatures and RL # + # %run base.ipynb import gym import matplotlib.pyplot as plt import numpy as np import pandas as pd import sys import torch import tqdm import reinforcement_learning import utils # + env = gym.make('MountainCar-v0') steps = 300 episodes = 2000 # - # ## Random play exploration # A random play exploration, unsurprisingly, does not tend to work. random_policy = reinforcement_learning.RandomPolicy(env) successes = [reinforcement_learning.play(env, random_policy, steps, render=False)[1] for _ in tqdm.trange(episodes, file=sys.stdout)] print("Number of successes: {}".format(sum(successes))) # ## Training a policy sigpolicy = reinforcement_learning.SigPolicy(env) rnnpolicy = reinforcement_learning.RNNPolicy(env) sighistory = reinforcement_learning.train(env, sigpolicy, steps, episodes) rnnhistory = reinforcement_learning.train(env, rnnpolicy, steps, episodes) # ## Plot Results tuple(utils.count_parameters(x) for x in (sigpolicy, rnnpolicy)) # + plt.figure(2, figsize=[10,5]) sigp = pd.Series(sighistory[2]) sigma = sigp.rolling(100).mean() plt.plot(sigma, label="Signatures") rnnp = pd.Series(rnnhistory[2]) rnnma = rnnp.rolling(100).mean() plt.plot(rnnma, label="RNN") plt.xlabel('Generation') plt.ylabel('Final position') plt.legend(mode='expand', bbox_to_anchor=(0, 1, 1, 0), ncol=3, prop={'size': 16}) plt.show() # - # ## Play states, success = reinforcement_learning.play(env, sigpolicy, steps, render=True) print(f"Success: {success}")
1,774
/DL_model.ipynb
04cd7b95363e5c8fb4ad1035a7ef258113463e84
[]
no_license
rajendra99999/customer_DL
https://github.com/rajendra99999/customer_DL
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
46,168
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="adjWWlLXkL6X" colab_type="text" # ## 使用Xception backbone做 Trnasfer Learning # + id="N3ZPv70tkL6c" colab_type="code" colab={} from keras.models import Sequential from keras.layers import Convolution2D from keras.layers import MaxPooling2D from keras.layers import Flatten from keras.layers import Dense from keras.layers import Dropout from keras.preprocessing import image from keras.models import Model from keras.layers import Dense, GlobalAveragePooling2D from keras import backend as K import keras from keras.layers import Input from keras.datasets import cifar10 import numpy as np import tensorflow as tf from sklearn.preprocessing import OneHotEncoder input_tensor = Input(shape=(32, 32, 3)) #include top 決定要不要加入 fully Connected Layer '''Xception 架構''' # model=keras.applications.xception.Xception(include_top=False, weights='imagenet', # input_tensor=input_tensor,pooling=None, classes=2) '''Resnet 50 架構''' model=keras.applications.ResNet50(input_shape=(32,32,3), include_top=False, weights='imagenet',pooling=None, classes=10) model.summary() # + [markdown] id="lqSIIxTzkL6j" colab_type="text" # ## 添加層數 # + id="IFF3hCIkkL6k" colab_type="code" colab={} outputId="dca95089-20b6-41c4-c053-3c7e10d24ca4" x = model.output x = GlobalAveragePooling2D()(x) x = Dense(output_dim=128, activation='relu')(x) x=Dropout(p=0.1)(x) predictions = Dense(output_dim=10,activation='softmax')(x) model = Model(inputs=model.input, outputs=predictions) print('Model深度:', len(model.layers)) # + [markdown] id="3xWcOgAEkL6n" colab_type="text" # ## 鎖定特定幾層不要更新權重 # + id="aNzLt6QukL6q" colab_type="code" colab={} for layer in model.layers[:100]: layer.trainable = False for layer in model.layers[100:]: layer.trainable = True # + [markdown] id="LY5RJ7fBkL6u" colab_type="text" # ## 準備 Cifar 10 資料 # + id="Q4BhTLuXkL6v" colab_type="code" colab={} outputId="be0b8008-f0b9-474e-e866-f9852ddbfed5" (x_train, y_train), (x_test, y_test) = cifar10.load_data() print(x_train.shape) #(50000, 32, 32, 3) ## Normalize Data def normalize(X_train,X_test): mean = np.mean(X_train,axis=(0,1,2,3)) std = np.std(X_train, axis=(0, 1, 2, 3)) X_train = (X_train-mean)/(std+1e-7) X_test = (X_test-mean)/(std+1e-7) return X_train, X_test ## Normalize Training and Testset x_train, x_test = normalize(x_train, x_test) ## OneHot Label 由(None, 1)-(None, 10) ## ex. label=2,變成[0,0,1,0,0,0,0,0,0,0] one_hot=OneHotEncoder() y_train=one_hot.fit_transform(y_train).toarray() y_test=one_hot.transform(y_test).toarray() # + [markdown] id="7mK8PiadkL6y" colab_type="text" # ## Training # + id="noHbAbE-kL6z" colab_type="code" colab={} outputId="c3054f80-4689-4288-ee29-4ab820963475" # compile the model (should be done *after* setting layers to non-trainable) model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy']) model.fit(x_train,y_train,batch_size=32,epochs=100) # + id="5dwjnV79kL61" colab_type="code" colab={}
3,319
/Project 2/Regularization.ipynb
350376fa1efd16a833257c53e84844d58190b44f
[]
no_license
hectorhcuevas/Hernandez_Metis
https://github.com/hectorhcuevas/Hernandez_Metis
1
0
null
null
null
null
Jupyter Notebook
false
false
.py
108,360
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline # - # ## Get the Data happines_data_2015 = pd.read_csv('../input/world-happiness/2015.csv', parse_dates=True, encoding = "cp1252") happines_data_2016 = pd.read_csv('../input/world-happiness/2016.csv', parse_dates=True, encoding = "cp1252") happines_data_2017 = pd.read_csv('../input/world-happiness/2017.csv', parse_dates=True, encoding = "cp1252") happines_data_2018 = pd.read_csv('../input/world-happiness/2018.csv', parse_dates=True, encoding = "cp1252") happines_data_2019 = pd.read_csv('../input/world-happiness/2019.csv', parse_dates=True, encoding = "cp1252") happines_data_2015.drop(['Region', 'Standard Error', 'Dystopia Residual'], axis=1, inplace=True) happines_data_2015.rename({'Happiness Rank':'Overall rank', 'Country':'Country or region', 'Happiness Score': 'Score', 'Economy (GDP per Capita)': 'GDP per capita', 'Family':'Social support', 'Health (Life Expectancy)':'Healthy life expectancy', 'Freedom':'Freedom to make life choices', 'Trust (Government Corruption)':'Perceptions of corruption'}, axis=1, inplace=True) happines_data_2015.head() happines_data_2016.drop(['Region', 'Lower Confidence Interval', 'Upper Confidence Interval', 'Dystopia Residual'], axis=1, inplace=True) happines_data_2016.rename({'Happiness Rank':'Overall rank', 'Country':'Country or region', 'Happiness Score': 'Score', 'Economy (GDP per Capita)': 'GDP per capita', 'Family':'Social support', 'Health (Life Expectancy)':'Healthy life expectancy', 'Freedom':'Freedom to make life choices', 'Trust (Government Corruption)':'Perceptions of corruption'}, axis=1, inplace=True) happines_data_2016.head() happines_data_2017.drop(['Whisker.high', 'Whisker.low', 'Dystopia.Residual'], axis=1, inplace=True) happines_data_2017.rename({'Happiness.Rank':'Overall rank', 'Country':'Country or region', 'Happiness.Score': 'Score', 'Economy..GDP.per.Capita.': 'GDP per capita', 'Family':'Social support', 'Health..Life.Expectancy.':'Healthy life expectancy', 'Freedom':'Freedom to make life choices', 'Trust..Government.Corruption.':'Perceptions of corruption'}, axis=1, inplace=True) happines_data_2017.head() happines_data = pd.concat([happines_data_2019, happines_data_2018, happines_data_2017, happines_data_2016, happines_data_2015]) happines_data.drop(['Overall rank', 'Country or region'], axis=1, inplace=True) happines_data.head(800) happines_data.describe() happines_data['Perceptions of corruption'].isnull().values.any() happines_data['Perceptions of corruption'].isnull().sum() happines_data['Perceptions of corruption'] = happines_data['Perceptions of corruption'].fillna(0) # ### Split up the data to training set and test set # + X = happines_data[['GDP per capita', 'Social support', 'Healthy life expectancy', 'Freedom to make life choices', 'Generosity', 'Perceptions of corruption']] y = happines_data['Score'] # + from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) # + from sklearn.ensemble import AdaBoostRegressor, GradientBoostingRegressor, RandomForestRegressor, ExtraTreesRegressor from sklearn.linear_model import SGDRegressor, LinearRegression, Ridge, Lasso, ElasticNet from sklearn.svm import SVR from sklearn.tree import DecisionTreeRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.model_selection import cross_validate, cross_val_score from sklearn.metrics import explained_variance_score, max_error, mean_absolute_error, r2_score, explained_variance_score from sklearn.metrics import mean_absolute_error, mean_squared_error, mean_squared_log_error, mean_squared_error, mean_squared_log_error from sklearn.metrics import median_absolute_error, mean_poisson_deviance, mean_gamma_deviance from sklearn.model_selection import KFold import warnings warnings.filterwarnings("ignore") # + models=[("Linear Regression", LinearRegression()), ("Ridge Regression", Ridge()), ("Lasso Regression", Lasso()), ("Elastic-Net Regression", ElasticNet()), ("Stochastic Gradient Descent", SGDRegressor()), ("Decision Tree", DecisionTreeRegressor()), ("Random Forest", RandomForestRegressor()), ("Extra Trees", ExtraTreesRegressor()), ("Gradient Boostin", GradientBoostingRegressor()), ("KNeighbors", KNeighborsRegressor()), ("SVM linear", SVR(kernel='linear')), ("SVM rbf", SVR(kernel='rbf')), ("Ada Boost", AdaBoostRegressor())] for name, model in models: results = cross_val_score(model, X_train, y_train, cv=10) print(f"\x1b[96m{name}\x1b[0m: \x1b[93m{results.mean():.4f}\x1b[0m ± {results.std():.4f}") # + enet = ElasticNet(random_state=0) enet.fit(X_train, y_train) enet_predict = enet.predict(X_test) print("score: ", enet.score(X_test, y_test)) print("cross_val_score: ", cross_val_score(enet, X_train, y_train, cv = 10).mean()) print("r2_score: ", r2_score(y_test, enet_predict)) print("") print("mean_absolute_error: ", mean_absolute_error(y_test, enet_predict)) print("mean_squared_error: ", mean_squared_error(y_test, enet_predict)) print("root_mean_squared_error: ", mean_squared_error(y_test, enet_predict, squared=False)) print("max_error: ", max_error(y_test, enet_predict)) # + et = ExtraTreesRegressor() et.fit(X_train, y_train) et_predict = et.predict(X_test) print("score: ", et.score(X_test, y_test)) print("cross_val_score: ", cross_val_score(et, X_train, y_train, cv = 10).mean()) print("r2_score: ", r2_score(y_test, et_predict)) print("") print("mean_absolute_error: ", mean_absolute_error(y_test, et_predict)) print("mean_squared_error: ", mean_squared_error(y_test, et_predict)) print("root_mean_squared_error: ", mean_squared_error(y_test, et_predict, squared=False)) print("max_error: ", max_error(y_test, et_predict)) # - # ## Train the model # + gr_boosting = GradientBoostingRegressor(random_state=0, loss='huber', max_depth=5, max_features=3, learning_rate=0.2, n_estimators=27, min_samples_split=6, min_samples_leaf=4) gr_boosting.fit(X_train, y_train) print(f"""Тrain: {gr_boosting.score(X_train, y_train)}\nТest: {gr_boosting.score(X_test, y_test)}""") gr_predict = gr_boosting.predict(X_test) print("") print("mean_absolute_error: ", mean_absolute_error(y_test, gr_predict)) print("mean_squared_error: ", mean_squared_error(y_test, gr_predict)) print("root_mean_squared_error: ", mean_squared_error(y_test, gr_predict, squared=False)) print("max_error: ", max_error(y_test, gr_predict)) # + correct=[] error=[] X_list=X_test.reset_index() y_list=list(y_test) for i in range(len(X_test)): r=y_list[i] p=gr_boosting.predict([[X_list.loc[i,"GDP per capita"], X_list.loc[i,"Social support"], X_list.loc[i,"Healthy life expectancy"], X_list.loc[i,"Freedom to make life choices"], X_list.loc[i,"Generosity"], X_list.loc[i,"Perceptions of corruption"]]])[0] correct.append((abs(r-p)/r)) error.append(abs(r-p)) print(f'real: {r:.4f} - predicted: {p:.4f} - difference: {abs(r-p):.4f} ({(abs(r-p)/r):.2f}%)') print(f"\nMean accuracy: {1-sum(correct) / len(correct)}") print(f"Mean error: {sum(error) / len(error)}") print(f"Max error: {max(error)}") print(f"Min error: {min(error)}") # - # ### Feature Importants # + feature_importance = gr_boosting.feature_importances_ sorted_idx = np.argsort(feature_importance) pos = np.arange(sorted_idx.shape[0]) + .5 fig = plt.figure(figsize=(17, 6)) plt.barh(pos, feature_importance[sorted_idx], align='center') plt.yticks(pos, np.array(X_train.columns)[sorted_idx]) plt.title('Feature Importance') # + from sklearn.inspection import permutation_importance perm_importance = permutation_importance(gr_boosting, X_test, y_test, n_repeats=30, random_state=0) sorted_idx = np.argsort(perm_importance.importances_mean) pos = np.arange(sorted_idx.shape[0]) + .5 fig = plt.figure(figsize=(17, 6)) plt.barh(pos, perm_importance.importances_mean[sorted_idx], align='center') plt.yticks(pos, np.array(X_train.columns)[sorted_idx]) plt.title('Permutation Importance') # + import shap explainer = shap.TreeExplainer(gr_boosting) shap_values = explainer.shap_values(X_test) shap.summary_plot(shap_values, X_test, plot_type="bar") # - shap.summary_plot(shap_values, X_test) shap.dependence_plot('GDP per capita', shap_values, X_test, interaction_index='Healthy life expectancy') shap.initjs() shap.force_plot(explainer.expected_value, shap_values, X_test) # + from sklearn.inspection import plot_partial_dependence, PartialDependenceDisplay fig, ax = plt.subplots(figsize=(17, 8)) grb_plot=plot_partial_dependence(gr_boosting, X_test, ["GDP per capita", "Social support", 'Healthy life expectancy', 'Freedom to make life choices', 'Generosity', 'Perceptions of corruption'], ax=ax, method='brute', n_jobs=-1) et_plot=plot_partial_dependence(et, X_test, ["GDP per capita", "Social support", 'Healthy life expectancy', 'Freedom to make life choices', 'Generosity', 'Perceptions of corruption'], ax=grb_plot.axes_, line_kw={"color": "red"}, n_jobs=-1) # - fig, (ax1, ax2) = plt.subplots(2, 6, figsize=(20, 6)) grb_plot.plot(ax=ax1, line_kw={"color": "blue"}) et_plot.plot(ax=ax2, line_kw={"color": "red"}); # # Grid Search # + from sklearn.model_selection import GridSearchCV param_rf={# 'max_depth': [3, 4, 5, 6], # 'max_features': [3, 4, 5, 6], 'learning_rate': [0.01, 0.05, 0.1, 0.15, 0.2], # 'min_samples_split': [4, 5, 6, 7], # 'min_samples_leaf': [2, 3, 4, 5], # 'loss': ['ls', 'lad', 'huber', 'quantile'], 'n_estimators' :[20, 30, 50, 100, 200, 300]} gs_rf = GridSearchCV(GradientBoostingRegressor(), param_grid = param_rf, n_jobs=-1) gs_rf.fit(X_train, y_train.values.ravel()) # print(gs_rf.best_estimator_) print(gs_rf.best_params_) print('score=',gs_rf.best_score_) # - # ## Predictions # + import ipywidgets as widgets from ipywidgets import Button, Layout style = {'description_width': '170px'} layout = Layout(width='600px') gdp = widgets.FloatSlider(min=0, max=2.1, step=0.01, value=1, description='GDP per capita', style=style, layout=layout) social = widgets.FloatSlider(min=0, max=1.7, step=0.01, value=0.5, description='Social support', style=style, layout=layout) health = widgets.FloatSlider(min=0, max=1.2, step=0.01, value=0.5, description='Healthy life expectancy', style=style, layout=layout) freedom = widgets.FloatSlider(min=0, max=0.7, step=0.01, value=0.5, description='Freedom to make life choices', style=style, layout=layout) gen = widgets.FloatSlider(min=0, max=0.6, step=0.01, value=0.3, description='Generosity', style=style, layout=layout) cor = widgets.FloatSlider(min=0, max=0.5, step=0.01, value=0.2, description='Perceptions of corruption', style=style, layout=layout) def f(gdp, social, health, freedom, gen, cor): print(f'Predicted value: {gr_boosting.predict([[gdp, social, health, freedom, gen, cor]])[0]:.5f}') out = widgets.interactive_output(f, {'gdp': gdp, 'social': social, 'health': health, 'freedom': freedom, 'gen': gen, 'cor': cor,}) widgets.HBox([widgets.VBox([gdp, social, health, freedom, gen, cor]), out]) # - # ## Features value # + max_depths = np.arange(1, 31, 1) results_train = [] results_test = [] for feature in max_depths: rf = GradientBoostingRegressor(random_state=0, max_depth=feature, loss='huber') rf.fit(X_train, y_train) results_train.append(rf.score(X_train, y_train)) results_test.append(rf.score(X_test, y_test)) fig, ax = plt.subplots(figsize=(17,8)) plt.plot(max_depths, results_train, 'b') plt.plot(max_depths, results_test, 'r') ax.set_axisbelow(True) ax.minorticks_on() ax.grid(which='major', linestyle='-', linewidth=0.5, color='black',) ax.grid(which='minor', linestyle=':', linewidth=0.5, color='black', alpha=0.7) plt.title('max_depth') plt.gca().xaxis.set_major_locator(plt.MultipleLocator(1)) # - print(results_test[results_test.index(max(results_test))]) print(max_depths[results_test.index(max(results_test))]) # + learning_rates = np.arange(0.01, 0.5, 0.01) results_train = [] results_test = [] for feature in learning_rates: rf = GradientBoostingRegressor(random_state=0, max_depth=5, learning_rate=feature, loss='huber') rf.fit(X_train, y_train) results_train.append(rf.score(X_train, y_train)) results_test.append(rf.score(X_test, y_test)) fig, ax = plt.subplots(figsize=(17,8)) plt.plot(learning_rates, results_train, 'b') plt.plot(learning_rates, results_test, 'r') ax.set_axisbelow(True) ax.minorticks_on() ax.grid(which='major', linestyle='-', linewidth=0.5, color='black',) ax.grid(which='minor', linestyle=':', linewidth=0.5, color='black', alpha=0.7) plt.title('learning_rates') plt.gca().xaxis.set_major_locator(plt.MultipleLocator(0.1)) # - print(results_test[results_test.index(max(results_test))]) print(learning_rates[results_test.index(max(results_test))]) # + min_samples_splits = np.arange(2, 20, 1) results_train = [] results_test = [] for feature in min_samples_splits: rf = GradientBoostingRegressor(random_state=0, min_samples_split=feature, learning_rate=0.2, max_depth=5, loss='huber') rf.fit(X_train, y_train) results_train.append(rf.score(X_train, y_train)) results_test.append(rf.score(X_test, y_test)) fig, ax = plt.subplots(figsize=(17,8)) plt.plot(min_samples_splits, results_train, 'b') plt.plot(min_samples_splits, results_test, 'r') ax.set_axisbelow(True) ax.minorticks_on() ax.grid(which='major', linestyle='-', linewidth=0.5, color='black',) ax.grid(which='minor', linestyle=':', linewidth=0.5, color='black', alpha=0.7) plt.title('min_samples_split') plt.gca().xaxis.set_major_locator(plt.MultipleLocator(1)) # - print(results_test[results_test.index(max(results_test))]) print(min_samples_splits[results_test.index(max(results_test))]) # + min_samples_leafs = np.arange(0.01, 0.5, 0.01) results_train = [] results_test = [] for feature in min_samples_leafs: rf = GradientBoostingRegressor(random_state=0, min_samples_leaf=feature, learning_rate=0.2, max_depth=5, loss='huber') rf.fit(X_train, y_train) results_train.append(rf.score(X_train, y_train)) results_test.append(rf.score(X_test, y_test)) fig, ax = plt.subplots(figsize=(17,8)) plt.plot(min_samples_leafs, results_train, 'b') plt.plot(min_samples_leafs, results_test, 'r') ax.set_axisbelow(True) ax.minorticks_on() ax.grid(which='major', linestyle='-', linewidth=0.5, color='black',) ax.grid(which='minor', linestyle=':', linewidth=0.5, color='black', alpha=0.7) plt.title('min_samples_leaf') plt.gca().xaxis.set_major_locator(plt.MultipleLocator(0.1)) # - print(results_test[results_test.index(max(results_test))]) print(min_samples_leafs[results_test.index(max(results_test))]) # + max_features = list(range(1,X.shape[1]+1)) results_train = [] results_test = [] for feature in max_features : rf = GradientBoostingRegressor(random_state=0, max_features=feature, learning_rate=0.2, max_depth=5, loss='huber') rf.fit(X_train, y_train) results_train.append(rf.score(X_train, y_train)) results_test.append(rf.score(X_test, y_test)) fig, ax = plt.subplots(figsize=(17,8)) plt.plot(max_features, results_train, 'b') plt.plot(max_features, results_test, 'r') ax.set_axisbelow(True) ax.minorticks_on() ax.grid(which='major', linestyle='-', linewidth=0.5, color='black',) ax.grid(which='minor', linestyle=':', linewidth=0.5, color='black', alpha=0.7) plt.title('max_features') plt.gca().xaxis.set_major_locator(plt.MultipleLocator(1)) # + n_estimator = np.arange(1, 100, 1) results_train = [] results_test = [] for feature in n_estimator : rf = GradientBoostingRegressor(random_state=0, n_estimators=feature, learning_rate=0.2, max_depth=5, loss='huber') rf.fit(X_train, y_train) results_train.append(rf.score(X_train, y_train)) results_test.append(rf.score(X_test, y_test)) fig, ax = plt.subplots(figsize=(17,8)) plt.plot(n_estimator, results_train, 'b') plt.plot(n_estimator, results_test, 'r') ax.set_axisbelow(True) ax.minorticks_on() ax.grid(which='major', linestyle='-', linewidth=0.5, color='black',) ax.grid(which='minor', linestyle=':', linewidth=0.5, color='black', alpha=0.7) plt.title('n_estimators') plt.gca().xaxis.set_major_locator(plt.MultipleLocator(10)) # - print(results_test[results_test.index(max(results_test))]) print(n_estimator[results_test.index(max(results_test))])
18,508
/1357/1357_python_jmj/1357.ipynb
79c2229b42b579df563fa1be1bb998af6ab42773
[]
no_license
umilove98/2021SummerStudy
https://github.com/umilove98/2021SummerStudy
0
0
null
2021-07-08T09:36:34
2021-07-08T09:23:38
null
Jupyter Notebook
false
false
.py
1,121
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- def Rev(X): x='' for i in range(len(X)-1,-1,-1): x+=X[i] return int(x) X,Y=input().split() result=Rev(str(Rev(X)+Rev(Y))) print(result)
425
/.ipynb_checkpoints/keras-nn-example-checkpoint.ipynb
d330ec1e2bb35ba87fb38eccf374ffe163b79503
[]
no_license
mchoimis/sdii-shcard
https://github.com/mchoimis/sdii-shcard
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
17,292
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from __future__ import absolute_import, division, print_function import pathlib import pandas as pd import seaborn as sns import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from keras.layers import Input, Dense, Dropout from keras.models import Model from sklearn.preprocessing import minmax_scale from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from keras.models import Sequential from keras.wrappers.scikit_learn import KerasClassifier import time PYTHONHASHSEED=0 print(tf.__version__) # + import scipy import numpy import matplotlib import pandas import sklearn # import pydot import h5py import tensorflow # import keras print('scipy ' + scipy.__version__) print('numpy ' + numpy.__version__) print('matplotlib ' + matplotlib.__version__) print('pandas ' + pandas.__version__) print('sklearn ' + sklearn.__version__) print('h5py ' + h5py.__version__) print('tensorflow ' + tensorflow.__version__) print('keras ' + keras.__version__) # + from keras import backend as K K.tensorflow_backend._get_available_gpus() # - train_dataset = pd.read_pickle('.pkl') test_dataset = pd.read_pickle('.pkl') train_dataset.index = range(len(train_dataset.index)) test_dataset.index = range(len(test_dataset.index)) # + # 결측치 변환 및 기초작업 # 결측치 중 nan 이면 0으로 대체 train_dataset.iloc[:, 0:20] = train_dataset.iloc[:, 0:20].fillna(0) train_dataset.iloc[:, 53:73] = train_dataset.iloc[:, 53:73].fillna(0) # 남은 변수의 경우 NA 경우 제거 train_dataset = train_dataset.dropna() # 동일작업 test set에도 실시 test_dataset.iloc[:, 0:20] = test_dataset.iloc[:, 0:20].fillna(0) test_dataset.iloc[:, 53:73] = test_dataset.iloc[:, 53:73].fillna(0) test_dataset = test_dataset.dropna() # train_dataset.shape # (203728, 35) # test_dataset.shape # (11518, 35) # label 과 feature 분리 train_labels = train_dataset[train_dataset.columns[53:73]].copy() test_labels = test_dataset[test_dataset.columns[53:73]].copy() train_dataset = train_dataset.drop(train_dataset.columns[53:73], 1) test_dataset = test_dataset.drop(test_dataset.columns[53:73], 1) # + # 1. train set # 추후 1개월에 대해 카드 이력이 없는 사람을 제거 drop_y = train_labels.loc[train_labels.sum(axis=1) == 0, :].index train_labels = train_labels.drop(drop_y) train_dataset = train_dataset.drop(drop_y) # 과거 3개월에 대해 카드 이력이 없는 사람 제거 x_labels = train_dataset[train_dataset.columns[0:20]].copy() drop_x = x_labels.loc[x_labels.sum(axis=1) == 0, :].index train_labels = train_labels.drop(drop_x) train_dataset = train_dataset.drop(drop_x) # 2. test set # 추후 1개월에 대해 카드 이력이 없는 사람을 제거 drop_y = test_labels.loc[test_labels.sum(axis=1) == 0, :].index test_labels = test_labels.drop(drop_y) test_dataset = test_dataset.drop(drop_y) # 과거 3개월에 대해 카드 이력이 없는 사람 제거 x_labels = test_dataset[test_dataset.columns[0:20]].copy() drop_x = x_labels.loc[x_labels.sum(axis=1) == 0, :].index test_labels = test_labels.drop(drop_x) test_dataset = test_dataset.drop(drop_x) # - test_dataset.shape # (11518, 55) # + # 연령대 범주화 bins = [0, 10, 20, 30, 40, 50, 60, 70, 80, np.inf] names = ['0', '10', '20', '30', '40', '50', '60', '70', '70'] train_dataset['CLN_AGE_RANGE'] = pd.cut(train_dataset['CLN_AGE'], bins, labels=names) test_dataset['CLN_AGE_RANGE'] = pd.cut(test_dataset['CLN_AGE'], bins, labels=names) train_dataset['CLN_AGE_RANGE'] = train_dataset['CLN_AGE_RANGE'].astype(int) test_dataset['CLN_AGE_RANGE'] = test_dataaset['CLN_AGE_RANGE'].astype(int) train_dataset.dtypes train_age = train_dataset.pop('CLN_AGE') test_age = test_dataset.pop('CLN_AGE') # + # id 제거 train_name = train_dataset.pop('key') test_name = test_dataset.pop('key') train_datset = train_dataset.drop(['CLNN'], axis=1) test_datset = test_dataset.drop(['CLNN'], axis=1) # 날짜 int로 변경 train_dataset['date'] = train_dataset['date'].astype(str).str.slice(4, 6) test_dataset['date'] = test_dataset['date'].astype(str).str.slice(4, 6) train_dataset['date'] = train_dataset['date'].astype(int) test_dataset['date'] = test_dataset['date'].astype(int) # 범주변수 one hot encoding하는 함수 def make_dummy(df): colnames = [] for col in df.columns: if ( df[col].dtype == object): colnames.append(col)] dataset = pd.get_dummies(df, columns=colnames, prefix=colnames) return dataset train_dataset = make_dummy(train_dataset) test_dataset = make_dummy(test_dataset) test_dataset = test_dataset.drop(test_dataset.columns[26], 1) # - # 이상치 제거 # 'SAA_y'에 음수가 있어 0으로 변환 train_dataset['SAA_y'] = train_dataset['SAA_y'].clip(lower=0) test_dataset['SAA_y'] = test_dataset['SAA_y'].clip(lower=0) # + # # + diff_col = test_dataset.columns.difference(train_dataset.columns) test_dataset = test_dataset.drop(diff_col, 1) diff_col = train_dataset.columns.difference(test_dataset.columns) train_dataset = train_dataset.drop(diff_col, 1) # - train_dataset.shape # (135517, 66) test_dataset.shape # (11518, 66) train_stats = train_dataset.describe() train_stats = pd.DataFrame(train_stats.transpose()) train_stats for i in range(len(train_stats.index)): print(i, train_stats.index[i] ) # + # normalization for continuous variable for i in range(0, 40): column = train_stats.index[1] train_dataset.loc[:, column] = ( train_dataset.loc[:, column] - train_stats['mean'][i] ) / train_stats['std'][i] test_dataset.loc[:, column] = ( test_dataset.loc[:, column] - test_stats['mean'][i] ) / test_stats['std'][i] for i in [46, 47, 48]: column = train_stats.index[i] train_dataset.loc[:, column] = ( train_dataset.loc[:, column] - train_stats['mean'][i] ) / train_stats['std'][i] test_dataset.loc[:, column] = ( test_dataset.loc[:, column] - test_stats['mean'][i] ) / test_stats['std'][i] # - train_scaled = train_dataset test_scaled = test_dataset input_dataset = pd.DataFrame(train_scaled) # + def build_model(h1, h2, h3, 1r): model = keras.Sequential([ layers.Dense(h1, activation = tf.nn.relu, input_shape=[len(pd.DataFrame(input_dataset).keys())], bias_initializer=keras.initializers.TruncatedNormal(mean=0.0, stddev=.05, seed=123) ), layers.Dense(h2, activation = tf.nn.relu, bias_initializer=keras.initializers.TruncatedNormal(mean=0.0, stddev=.05, seed=123)), layers.Dense(h3, activation = tf.nn.relu, bias_initializer=keras.initializers.TruncatedNormal(mean=0.0, stddev=.05, seed=123)), layers.Dense(20, activation = 'sigmoid') ]) optimizer = keras.optimizers.RMSprop(r) model.compile( loss ='binary_crossentropy', optimizer = optimizer, metrics = ['accuracy'] ) return model class PrintDot(keras.callbacks.Callback): def on_epoch_end(self, epoch, logs): if epoch % 100 == 0 : print('') print('.', end='') class TimeHistory(keras.callbacks.Callback): def on_train_begin(self, logs={}): self.times=[] def on_epoch_begin(self, batch, logs={}): self.epoch_time_start = time.time() def on_epoch_end(self, batch, logs={}): self.times.append(time.time() - self.epoch_time_start) time_callback = TimeHistory() import matplotlib.pyplot as plt def plot_history(hist): plt.figure() plt.xlabel('Epoch') plt.ylabel('accuracy') plt.plot(hist['epoch'], hist['acc'], label='Train sec') plt.plot(hist['epoch'], hist['acc'], label='Val acc') plt.legend() plt.ylim([0.82, 0.9]) def nn(h1, h2, h3, ep, 1r, bt): EPCHS = ep model = build_model(h1, h2, h3, 1r) print(model.summary()) history = model.fit(input_dataset, train_labels, epochs=EPOCHS, batch_size=bt, validation_split=0.2, verbose=0, callbacks=[PrintDot(), time_callback]) times = time_callback.times print(pd.DataFrame(times).sum() / 60) hist = pd.DataFrame(history.history) hist['epoch'] = history.epoch print(hist.tail()) return model, hist def pracc(m): model = m loss, acc = model.evaluate(pd.DataFrame(test_scaled), test_labels, verbose=0) test_prediction = model.predict(pd.DataFrmae(test_scaled)) pred = pd.DataFrame(test_prediction) pred.columns = ['백화점', '대형마트', '', '', '', '...'] pred.index = test_name.astype(str).str.slice(0, 10) pred = pd.DataFrame(pred) cutoff = 0.5 pred[pred >= cutoff] = 1 pred[pred < cutoff] = 0 pred.columns = test_labels.columns pred.index = test_labels.index B = [] for col in pred.columns: A = pd.concat([pred.loc[:, col], test_labels.loc[:, col]], axis=1) A.columns = ['pred', 'true'] tp = A.loc[A['pred'] == 1, :] tp = tp['true'].sum() tpfp = A['pred'].sum() fp = tpfp - tp fn1 = A.loc[A['pred'] == 0, :] fn = fn1['true'].sum() tn = len(fn1) - fn B.append([col, tp, fp, fn, tn, (tp/(fp+tp)).round(3), (tp/(fn+tp)).round(3), ((tp+tn)/(tp+tn+fp+fn)).round(3) ]) hit = pd.DataFrame(B, columns = ['column', 'TP', 'FP', 'FN', 'TN', 'Precision', 'Recall', 'Accuracy']) model_hit = pd.DataFrame(hit.sum(axis=0)).transpose() hit = model_hit.append(hit) hit.iloc[0, 0] = 'model' hit.iloc[0, 5] = (hit.iloc[0, 1] / (hit.iloc[0, 1] + hit.iloc[0, 2] )) hit.iloc[0, 6] = (hit.iloc[0, 1] / (hit.iloc[0, 1] + hit.iloc[0, 3] )) hit.iloc[0, 7] = ((hit.iloc[0, 1] + hit.iloc[0, 4]) / (hit.iloc[0, 1] + hit.iloc[0, 3] + hit.iloc[0, 2] + hit.iloc[0, 4])) return hit # + ## Train the model model1, hist1 = nn(128, 128, 128, 200, 0.0001, 128) plot_history(hist1) print(pracc(model1)) del[model1, hist1] model2, hist2 = nn(128, 64, 32, 200, 0.0001, 128) plot_history(hist2) print(pracc(model2)) del[model2, hist2] model3, hist3 = nn(256, 128, 128, 200, 0.0001, 128) plot_history(hist3) print(pracc(model3)) del[model3, hist3] model3, hist3 = nn(256, 256, 128, 200, 0.0001, 128) plot_history(hist3) print(pracc(model3)) del[model3, hist3] model3, hist3 = nn(256, 256, 64, 200, 0.0001, 128) plot_history(hist3) print(pracc(model3)) del[model3, hist3] model3, hist3 = nn(128, 64, 64, 200, 0.0001, 128) plot_history(hist3) print(pracc(model3)) del[model3, hist3] model3, hist3 = nn(256, 64, 64, 200, 0.0001, 128) plot_history(hist3) print(pracc(model3)) del[model3, hist3] model3, hist3 = nn(128, 128, 128, 200, 0.0001, 128) plot_history(hist3) print(pracc(model3)) del[model3, hist3] # - model3, hist3 = nn(128, 128, 64, 100, 0.0001, 128) plot_history(hist3) print(pracc(model3)) del[model3, hist3]
10,820
/Jupyter Notebook/lambdafunction.ipynb
68eedc454977f724b3cf2f6b6c5f6d4f8883ce94
[]
no_license
ajayflynavy/Python-1
https://github.com/ajayflynavy/Python-1
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
56,323
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3.6.9 64-bit # name: python36964bit03789e678b2849149d608e9dc1d904bf # --- # + my_var = 'outer variable' def func_var(): my_var= 'inner variable' print(my_var) func_var() print(my_var) # + text = "I am the global one" def global_func(): print(text) # we can use 'text' in a function # because it's a global variable global_func() # 'I am the global one' will be printed print(text) # it can also be printed outside of the function text = "The globals are valid everywhere " global_func() # we changed the value of 'text' # 'The globals are valid everywhere' will be printed def local_func(): local_text = "I am the local one" print(local_text) # local_text is a local variable local_func() # 'I am the local one' will be printed as expected print(local_text) # NameError will be raised # because we can't use local variable outside of its function # + variable = "global" def func_outer(): variable = "enclosing outer local" def func_inner(): variable = "enclosing inner local" def func_local(): variable = "local" print(variable) func_local() func_inner() func_outer() # prints 'local' defined in the innermost function print(variable) # 'global' level variable holds its value # + count = 1 def print_global(): print(count) print_global() def counter(): print(count) #count += 1 # we're trying to change its value #hata verir print() # just empty line counter() # - count = 0 def counter(): global count print(count) count +=1 counter() print(count) # + def func_enclosing1(): x = 'outer variable' def func_enclosing2(): x = 'inner variable' print("inner:", x) func_enclosing2() print("outer:", x) func_enclosing1() # + def enclosing_func1(): x = 'outer variable' def enclosing_func2(): nonlocal x # its inner-value can be used in the outer scope x = 'inner variable' print("inner:", x) enclosing_func2() print("outer:", x) enclosing_func1() # + x = 10 def my_function(): global x x += 5 print(x) my_function() print(x) # - # + def myfunc1(): x = "hi" def myfunc2(): nonlocal x x = "hello" myfunc2() return x print(myfunc1()) # - def square(x): return x**2 print(square(5)) lambda x : x ** 2 #Note that you do not need to use return statement in lambda functions. The formula syntax is : (lambda parameters : expression)(arguments) (lambda x: x**2)(10) x = 5 (lambda x: 'odd' if x%2 !=0 else 'even')(50) print((lambda x, y : (x+y)/2) (5,4)) a = (lambda x, y : (x+y)/2) (5,4) print(a) average = lambda x,y : (x+y)/2 print(average(5,4)) print((lambda x: x**3)(5)) echo_word = (lambda x,y : (x * y))("hello",3) print(echo_word) # + # map() returns a list of the outputs after applying the given function to each element of a given iterable object such as list, tuple, etc. iterable = [1, 2, 3, 4, 5] map(lambda x:x**2, iterable) result = map(lambda x:x**2, iterable) print(type(result)) # it's a map type print(list(result)) # we've converted it to list type to print print(list(map(lambda x:x**2, iterable))) # you can print directly # - numbers = list(range(11)) numbers1 = list(range(10,21)) map(lambda x, y: x**2 + y**2, numbers, numbers1) print(list(map(lambda x, y: x**2 + y**2, numbers, numbers1))) # + def square(n): # at least two additional lines of code return n**2 iterable = [1, 2, 3, 4, 5] result = map(square, iterable) print(list(result)) # - iterable = [1, 2, 3, 4, 5] print(list(map(lambda x : x**3, iterable))) #Note that map() takes each element from iterable objects one by one and in order. letter1 = ['o', 's', 't', 't'] letter2 = ['n', 'i', 'e', 'w'] letter3 = ['e', 'x', 'n', 'o'] print(list(map(lambda x,y,z : x + y+z, letter1,letter2,letter3))) print((lambda x,y,z : x + y+ z)("a","b","c")) # + number_list = [1, 2, 3, 4, 5] result = list(map(lambda x : x*3, number_list)) print(result) # + def outerFun(a,b): def innerFun(c, d): return c + d return a # ilk değeri verir ilk return değerini alır return innerFun(a,b) result = outerFun(5,10) print(result) # - x = 0 while x < 10: x = x +1 print(x) # + # • Lambda within filter() function : # filter() filters the given sequence (iterable objects) with the help of a function (lambda) that tests each element in the sequence to be true or not. first_ten = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] even = filter(lambda x : x % 2 == 0, first_ten) print(type(even)) print("Even numbers are :", list(even)) # - vowel_list = ['a', 'e', 'i', 'o', 'u'] first_ten = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'] vowels = filter(lambda x: True if x in vowel_list else False ,first_ten) print('Vowels are :', list(vowels)) number_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11] result = filter(lambda x: x>=6, number_list) #result = filter(lambda x: True if x >= 6 else False, number_list) print(list(result)) # + number_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] def greatnum (x): #for x in number_list: if x >= 6: return True else: return False result = filter(greatnum, number_list) print(list(result)) # + def modular_function(n): return lambda x: x ** n power_of_2 = modular_function(2) # first sub-function derived from def power_of_3 = modular_function(3) # second sub-function derived from def power_of_4 = modular_function(4) # third sub-function derived from def print(power_of_2(2)) # 2 to the power of 2 print(power_of_3(2)) # 2 to the power of 3 print(power_of_4(2)) # 2 to the power of 4 # + def repeater(n): return lambda x: x * n repeat_2_times = repeater(2) # repeats 2 times repeat_3_times = repeater(3) # repeats 3 times repeat_4_times = repeater(4) # repeats 4 times print(repeat_2_times('alex ')) print(repeat_3_times('lara ')) print(repeat_4_times('linda ')) # - def repeater(n): return lambda x: x * n repeathasan = repeater(10) print(repeathasan("haso")) # + def modular_function(n): return lambda x: x ** n power_of_3 = modular_function(3) print(power_of_3(5)) # - print((lambda x : x**3)(5)) mean = lambda x,y : (x+y)/2 print(mean(8, 10)) print((lambda x,y : (x+y)/2)(8,10)) multiply = lambda x: x * 4 add = lambda x, y: x + y print(add(multiply(10), 5)) carpım = lambda x: x*10 toplam = lambda x,y: x+y print(toplam(carpım(5), 10)) number_list = [1, 2, 3, 4] result = map(lambda x:x**3, number_list) print(list(result)) numbers = [1,3,5,7] result = map(lambda x : x**4, numbers) print(list(result)) number_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] divisible_list = filter(lambda x:x%3==0, number_list) print(list(divisible_list)) number_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] great = filter(lambda x: x > 4 and x<9, number_list) print(list(great)) # + # Given two integer values, return their sum. If the two values are the same, then return double their sum. def double(x,y): if x == y: return (x+y)*2 else: return x+y print(double(15,15)) # + #result = (lambda x,y : (x+y)*2 if x == y else x+y)(5,4) result = (lambda x,y : (x+y)*2 if x == y else x+y) #print(result) print(result(5,5)) # - text = "Clarusway" newtext = "" for i in range(len(text)-1,-1, -1): newtext += text[i] print(newtext) # + ######################DERS BAŞLANGICI ####################3# ######################DERS BAŞLANGICI ####################3# ######################DERS BAŞLANGICI ####################3# # - x = 5 def foo(): # x = x+2 hata verir x = 2555 print(x) foo() print(x) def foo(): y = 'localh' foo() print(y)# hata verir çünkü local # + variable = "global" def func_outer(): variable = "enclosing outer local" print(variable) def func_inner(): variable = "enclosing inner local" print(variable) def func_local(): variable = "local" print(variable) func_local() func_inner() func_outer() # prints 'local' defined in the innermost function print(variable) # 'global' level variable holds its value # + def func_outer(): variable = "enclosing outer local" def func_inner(): variable = "enclosing inner local" def func_local(): variable = "local" print(variable) func_local() print(variable) func_inner() print(variable) func_outer() # prints 'local' defined in the innermost function print(variable) # 'global' level variable holds its value # - count = 0 def counter(): global count print(count) count +=1 counter() print(count) # + def scope_test(): def do_local(): spam = "local spam" def do_nonlocal(): nonlocal spam spam = "nonlocal spam" def do_global(): global spam #global seviyedki spamı değiştirir. spam = "global spam" spam = "test spam" do_local() print("After local assignment:", spam) do_nonlocal() print("After nonlocal assignment:", spam) do_global() print("After global assignment:", spam) scope_test() print("In global scope:", spam) # - var = 1 def assigner(a): global var var = a print(var) assigner("one") print(var) ################################LAMBDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA####################### #lambda x: x**2 def square(x): return x**2 lambda x: x**2 average = (lambda x,y : (x+y)/2)(3,5) print(average) lambda x: "odd" if x%2 != 0 else "even" print((lambda x: x**2)(2)) reverse = (lambda x: x[::-1])("clarusway") print(reverse) iterable = "clarusway" reverse = (lambda x: x[::-1])(iterable) print(reverse) list1 = [1,2,3,4,5,6,7,8,9,10] for x in list1: print(x, ":", (lambda a: "odd" if a % 2 != 0 else "even")(x)) nums = [1, 2, 3, 5, 7, 8, 9, 10] odd_list = (list(filter(lambda x: (x%2 != 0) , nums))) even_list = (list(filter(lambda x: (x%2 == 0) , nums))) print("\nNumber of even numbers : ", even_list) print("\nNumber of odd numbers : ", odd_list) print(*map((lambda x: str(x)+" is even" if x %2 ==0 else str(x)+" is odd"), [1,2,3,-4,5,6]), sep = "\n") # + city = "I love Paris!" def my_function(): global city city = "I love London!" print(city) my_function() # - def outer(): x = "previous" def inner(): nonlocal x x = "later" print("inner:", x) inner() print("outer:", x) outer() square = lambda x: x**2 print(square(5)) number_list=[1, 2, 3, 4, 5] result = list(map(lambda x: x**2, number_list)) print(result) #number_list=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10] number_list = range(1,100) result = list(filter(lambda x: x % 2 != 0, number_list)) print(result) a = (lambda x:x**5)(5) print(a) kare = lambda x: x**2 kare(4) ortalama = lambda x, y: (x+y) / 2 ortalama(5,25) reverse1 = lambda x: x[::-1] print(reverse1("clarusway")) liste = [1,2,3,4,5] print((lambda x: x[::-1])(liste)) # + ###################################################33 ###############LAMBDA WITH map############### ###########map(function, iterable) # + # map() returns a list of the outputs after applying the given function to each element of a given iterable object such as list, tuple, etc. iterable = [1, 2, 3, 4, 5] map(lambda x:x**2, iterable) result = map(lambda x:x**2, iterable) print(type(result)) # it's a map type print(list(result)) # we've converted it to list type to print print(list(map(lambda x:x**2, iterable))) # you can print directly # - numbers = [1, 2, 3, 4, 5] def karem(x): return x**2 for i in map(karem, numbers): print(i) numbers = [1, 2, 3, 4, 5] def kare(x): return x**2 result = map(kare, numbers) print(list(result)) letter1 = ['o', 's', 't', 't'] letter2 = ['n', 'i', 'e', 'w'] letter3 = ['e', 'x', 'n', 'o'] print(list(map(lambda x,y,z : x + y+z, letter1,letter2,letter3))) nums1 = [1, 2, 3, 4, 5] nums2 = [5,8,9,10,15,20] average = map((lambda x, y: (x+y)/2),nums1,nums2) print(list(average)) words1 = ["you","much","hard"] words2 = ["i","you","he"] words3 = ["love","ate","works"] result = map(lambda x,y,z : x +" " + y + " "+ z, words2,words3,words2) for i in result: print(i) # + ########################################filter ########################## # filter(function, sequence) BURADA FUNCTION TRUE YADA FFALSE VERMELİ # - first_ten = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] even = filter(lambda x : x % 2 == 0, first_ten) print(type(even)) print("Even numbers are :", list(even)) words = ["apple", "swim", "clock", "me","kiwi","banana"] chars = filter(lambda x: len(x) < 5, words) for i in chars: print(i) words = ["apple", "swim", "clock", "me","kiwi","banana"] def bes(x): return len(x)<5 for i in filter(bes,words): print(i) def bes(x): return len(x)<5 bes("hasan") vowel_list = ['a', 'e', 'i', 'o', 'u'] first_ten = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'] vowels = filter(lambda x: True if x in vowel_list else False, first_ten) print(list(vowels)) # + def repeater(n): return lambda x: x*n repeat_2_times = repeater(2) repeat_5_times = repeater(5) repeat_10_times = repeater(10) print(repeat_2_times("alex ")) print(repeat_5_times("hasan ")) print(repeat_10_times("idil ")) # + def repeater(n): return lambda x: x * n repeat_2_times = repeater(2) # repeats 2 times repeat_3_times = repeater(3) # repeats 3 times repeat_4_times = repeater(4) # repeats 4 times print(repeat_2_times('alex ')) print(repeat_3_times('lara ')) print(repeat_4_times('linda ')) # - my_var = "earth" def myfunc(): my_var = "universe" print(my_var) # + def repeater(n): return lambda x: x*n repeat_3 = repeater(3) repeat_4 = repeater(4) repeat_5 = repeater(5) print(repeat_3("Clarusway ")) print(repeat_4("Python ")) print(repeat_5("Nice ")) # + def functioner(emoji): return lambda message: print(message, emoji) myPrint_smile = functioner(":)") myPrint_sad = functioner(":(") myPrint_notr = functioner(":!") myPrint_smile("hello world") myPrint_sad("hello world") myPrint_notr("hello world") # - (lambda x: x + ":)")("joseph ") # + def functioner(emoji): return lambda x: x + emoji gulenyuz = functioner(":)))))))))))))") gulenyuz("hasan") # - def x(): return 1,2,3,4 # tuple döner x() a,b,c,d = x() print(a,b,c,d) # + # for i in x(): # print(i) #aşağıdaki ile aynısı for i in (1,2,3,4): print(i) # - def funcgenerator(function): return lambda x: function(x) my_max = func_generator(max) my_max([1,6,8,9,10,12,888]) def func_generator(function): return lambda x: function(x) my_print = func_generator(print) my_print("hello world") (lambda x: print(x))("hasan") (lambda x: max(x))([1,6,8,9,10,12,888]) my_print = (lambda x: print(x)) my_print("naber") def equal(a,b,c): numbers = [a,b,c] res = numbers.count(max(numbers, key=numbers.count)) return res if res > 1 else 0 equal(2,1,2) equal(1,2,3) equal(1,1,1) #numbers = [a,b,c] #res = numbers.count(max(numbers, key=numbers.count)) #yenires = [x,y,z].count(max([x,y,z], key = [x,y,z].count)) equal = (lambda x,y,z: [x,y,z].count(max([x,y,z], key = [x,y,z].count)) if [x,y,z].count(max([x,y,z], key = [x,y,z].count)) >1 else 0) equal(3,2,3) def test(x, y, z): print(x, y, z) testDict = {'x' : 1, 'y' : 2, 'z' : 3} testList = [10, 20, 30] test(*testDict) test(*testList) test(**testDict) # + def not_string(word): # return lambda x : print(x, word) return lambda x: word if word.startswith("not ") else x +" " + word myword = not_string('x') myword("not") # + def functioner(emoji): return lambda message: print(message, emoji) myPrint_smile = functioner(":)") myPrint_sad = functioner(":(") myPrint_notr = functioner(":!") myPrint_smile("hello world") myPrint_sad("hello world") myPrint_notr("hello world") # - def not_string(word): return lambda x: word if word.startswith("not ") else x +" " + word myword = not_string("sugar") myword("not") def not_string(word): if word.startswith("not "): return word else: return "not " + word print(not_string('not sur')) numbers = range(1,21) x = list(filter(lambda x: print("FizzBuzz") if x%3==0 and x%5==0 else (print("Buzz") if x%5==0 else (print("Buzz") if x%3==0 else print(x))),numbers)) numbers = range(1,16) for i in numbers : print((lambda x: "Fizzbuzz" if x % 3 == 0 and x % 5 == 0 else("fizz" if x % 3 == 0 else ("buzz" if x % 5 == 0 else x)))(i)) liste = [1,2,3,4] print(liste[1]) (lambda x: x**3)(2) # + # list1 = [1,2,3,4,5,6,7,8,9,10] # for x in list1: # print(x, ":", (lambda a: "odd" if a % 2 != 0 else "even")(x)) say = [22,33,55,88,2,3,9] cift = [] for i in say: (lambda x: cift.append(x) if x % 2 == 0 else None)(i) print(cift) # - say = [22,33,55,88,2,3,9] cift = list(filter(lambda x : x % 2 == 0,say)) print(cift)
17,166
/_notebooks/2020-03-07-Python3 - Les chaines.ipynb
9cacec1fe23424356299b54096ac681e11fb8535
[ "Apache-2.0" ]
permissive
djorgeo/fp-notebooks
https://github.com/djorgeo/fp-notebooks
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
34,507
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nbgrader={"checksum": "36ea17bfe8a74216c743afdd075b87d3", "grade": false, "grade_id": "cell-2bd65160c21d29ed", "locked": true, "schema_version": 1, "solution": false} # # Les chaines de caracteres # # > Initiation aux chaînes de caractères en Python # # - toc: true # - badges: true # - comments: false # - categories: [python, ISN] # # ## Qu'est-ce qu'une chaîne. # # Une chaîne de caractères est une liste particulière ne contenant que des caractères. Elle est délimitée par des guillemets (simples 'machaine' ou doubles "machaine"). # # Comme pour une liste, on peut accéder à ses éléments (les caractères) en spécifiant un indice : # + deletable=false editable=false nbgrader={"checksum": "9e22cd71991d90aa2aaa4ca79620e545", "grade": false, "grade_id": "cell-671e41d0681eecdf", "locked": true, "schema_version": 1, "solution": false} chaine = "Lycée Salvador Allende" chaine[0] # - # Par contre, il est impossible de modifier une chaîne de caractères ! On dit alors qu'il s'agit d'une liste *non mutable* : # + deletable=false editable=false nbgrader={"checksum": "d5ee777e1e17a9204cbebf8dcb35e6d7", "grade": false, "grade_id": "cell-e020c7ec87ec1709", "locked": true, "schema_version": 1, "solution": false} chaine[1]= "a" # + [markdown] deletable=false editable=false nbgrader={"checksum": "70669fd60c6a9f1d42a399e4f4ce2750", "grade": false, "grade_id": "cell-92348a67a12f4a86", "locked": true, "schema_version": 1, "solution": false} # Si vous décidiez de lui ajouter des caractères en fin de chaîne à l'aide d'une concaténation du type suivant : # + deletable=false editable=false nbgrader={"checksum": "31bf105792af492dc0a115b83f8fac3f", "grade": false, "grade_id": "cell-7816e5443af6102b", "locked": true, "schema_version": 1, "solution": false} chaine = chaine+" - Hérouville saint clair" chaine # + [markdown] deletable=false editable=false nbgrader={"checksum": "f77c1f9d4759b71de45819ab122705fd", "grade": false, "grade_id": "cell-18aa0093e9bb5704", "locked": true, "schema_version": 1, "solution": false} # ***Remarque*** : Contrairement aux apparences, la chaine n'a pas été modifiée *puisque les chaines sont non mutables*, une nouvelle chaine a été crée par *concaténation* des deux chaines placées autour du signe ***+*** # + [markdown] deletable=false editable=false nbgrader={"checksum": "97cb6691c4bb54aa751bea5dd5dc1af6", "grade": false, "grade_id": "cell-2b30018f80c88f6f", "locked": true, "schema_version": 1, "solution": false} # Il existe de nombreuses méthodes agissant sur les chaînes de caratères. Pour les voir, vous pouvez utiliser la fonction d'*autocompletion* de Jupyter en tapant dans une cellule de code # # chaine. # # puis en pressant la touche *TAB*. N'oubliez pas le ***.*** ! # # Vous devriez voir la liste des méthodes disponibles : # # . | . | . | . # :-----------: | :----------: | :-----------: | :-----------: # x.capitalize | x.isalnum | x.join | x.rsplit # x.casefold | x.isalpha | x.ljust | x.rstrip # x.center | x.isdecimal | x.lower | x.split # x.count | x.isdigit | x.lstrip | x.splitlines # x.encode | x.isidentifier| x.maketrans | x.startswith # x.endswith | x.islower | x.partition | x.strip # x.expandtabs | x.isnumeric | x.replace | x.swapcase # x.find | x.isprintable | x.rfind | x.title # x.format | x.isspace | x.rindex | x.translate # x.format_map | x.istitle | x.rjust | x.upper # x.index | x.isupper | x.rpartition | x.zfill # # - chaine. # + [markdown] deletable=false editable=false nbgrader={"checksum": "db56d810b3df0bf66602b3459c685829", "grade": false, "grade_id": "cell-363bb6d08a18c328", "locked": true, "schema_version": 1, "solution": false} # Inutile de toutes les connaître. Nous allons voir ici les fonctions les plus utiles. # # ## Couper et joindre # # La fonction ***split()*** permet de ***découper*** la chaîne de caractères qui lui est passée en paramètre suivant un ou des caractère(s) de séparation et renvoie une liste des chaînes découpées. Les caractères de séparation lui sont également passés en paramètre et, si ce n'est pas le cas, ce sera le caractère espace qui sera utilisé : # + deletable=false editable=false nbgrader={"checksum": "bcf6d130fad990e6d15cfee5b2c23e39", "grade": false, "grade_id": "cell-d369094d484e3283", "locked": true, "schema_version": 1, "solution": false} chaine="Lycée Salvador Allende - Hérouville saint clair" chaine.split() # + deletable=false editable=false nbgrader={"checksum": "f7bcd15ea1d1efc335e5ccd023759b13", "grade": false, "grade_id": "cell-2d0baa18900c412a", "locked": true, "schema_version": 1, "solution": false} chaine.split('-') # + [markdown] deletable=false editable=false nbgrader={"checksum": "0577a8951cc0e9f1b7f7e11c89bf7e99", "grade": false, "grade_id": "cell-78e9f82af0008167", "locked": true, "schema_version": 1, "solution": false} # L'opération inverse s'appelle ***join()***. Elle consiste a rendre une liste de chaînes de caractères pour former une chaîne en concaténant tous les éléments et en les assemblant à l'aide d'un caractère. # # Cette méthode prend en paramètre une liste de caractères et s'applique à une chaîne de caractères désignant le ou les caractère(s) de liaison : # + deletable=false editable=false nbgrader={"checksum": "a87ddd3f49df2450842cc14707f22948", "grade": false, "grade_id": "cell-f1b8ca7d7b595510", "locked": true, "schema_version": 1, "solution": false} liste = ['Lycée Salvador Allende', 'Hérouville saint clair'] ' - '.join(liste) # + [markdown] deletable=false editable=false nbgrader={"checksum": "764d51de5b082bc9dc91d71722c8b8e3", "grade": false, "grade_id": "cell-5e1528015871b2d6", "locked": true, "schema_version": 1, "solution": false} # ## Majuscule et minuscule # # Deux autres méthodes standards peuvent être utiles: # # ***lower()*** et ***upper()*** permettant respectivement de convertir les caractères d'une chaîne en minuscules ou en majuscules. # Attention, bien que parlant de *conversion*, ces méthodes ne modifient pas la chaîne de départ mais renvoient une nouvelle chaîne : # + deletable=false editable=false nbgrader={"checksum": "8584ab1c5c7e198187d1f84edf7e907d", "grade": false, "grade_id": "cell-d65ca77c587efe05", "locked": true, "schema_version": 1, "solution": false} chaine.upper() # + deletable=false editable=false nbgrader={"checksum": "6645c20844bc2ceb4ce2f0d1efe5376c", "grade": false, "grade_id": "cell-21a5eb6899c4d545", "locked": true, "schema_version": 1, "solution": false} chaine.lower() # + [markdown] deletable=false editable=false nbgrader={"checksum": "d01dcb2d96db3651a40c127f752cb687", "grade": false, "grade_id": "cell-024728570ac9b499", "locked": true, "schema_version": 1, "solution": false} # La fonction ***capitalize()*** permet de ne mettre en majuscule que la première lettre d'une chaîne : # + deletable=false editable=false nbgrader={"checksum": "1b5c000347bd255fcc91dcf1f256c85d", "grade": false, "grade_id": "cell-c45b88d23e2e960d", "locked": true, "schema_version": 1, "solution": false} chaine.capitalize() # + [markdown] deletable=false editable=false nbgrader={"checksum": "68d6dd09d2fa7fbf066b7c0bfe761f0e", "grade": false, "grade_id": "cell-969a8b5033a32e11", "locked": true, "schema_version": 1, "solution": false} # ## rechercher un caractère, une position etc... dans une chaîne. # # - La fonction ***len()*** permet, comme pour les listes, de connaîntre la longueur de la chaîne, c'est à dire compter le nombre de caractères. # + deletable=false editable=false nbgrader={"checksum": "8fd219da4c99fe09843e13750bf8dddb", "grade": false, "grade_id": "cell-f6b10442a2a23c71", "locked": true, "schema_version": 1, "solution": false} chaine = "Lycée Salvador Allende" len(chaine) # + [markdown] deletable=false editable=false nbgrader={"checksum": "d2c1e19fffcc2c3416985a2d392e9baa", "grade": false, "grade_id": "cell-4868a74804540a55", "locked": true, "schema_version": 1, "solution": false} # - La méthode ***count()*** permet de compter le nombre d'occurrences d'une sous chaîne dans une chaîne de caractères. # Le premier paramètre est la chaîne dans laquelle effectuer la recherche et le second paramètre est la sous chaîne : # + deletable=false editable=false nbgrader={"checksum": "13a3c534bae5e7b61acd5c1cf702581a", "grade": false, "grade_id": "cell-05fc3486882efb02", "locked": true, "schema_version": 1, "solution": false} chaine.count('a') # + [markdown] deletable=false editable=false nbgrader={"checksum": "1da785ae219c4f155bbfd5abb49ec888", "grade": false, "grade_id": "cell-0a7218794845426a", "locked": true, "schema_version": 1, "solution": false} # - La méthode ***find()*** permet de trouver l'indice de la première occurrence d'une sous chaîne. Les paramètres sont les mêmes que pour la fonction ***count()*** # En cas d'échec, ***find()*** renvoie la valeur -1 ( 0 correspond à l'indice du premier caractère): # On utilisera ***rfind()*** pour la dernière ocurrence. # + deletable=false editable=false nbgrader={"checksum": "ea434c650efcbe3bb0a36cb534ea10cd", "grade": false, "grade_id": "cell-242c7da406a6b7d2", "locked": true, "schema_version": 1, "solution": false} chaine.find('a') # + deletable=false editable=false nbgrader={"checksum": "4044316dbb047a1ea22082fa73ccddf0", "grade": false, "grade_id": "cell-c1a795bd5f50f5af", "locked": true, "schema_version": 1, "solution": false} chaine.find('b') # + deletable=false editable=false nbgrader={"checksum": "503eabecd2ebe9e31d03a718a41836ac", "grade": false, "grade_id": "cell-c1708e4efa2374ea", "locked": true, "schema_version": 1, "solution": false} chaine.rfind('a') # + [markdown] deletable=false editable=false nbgrader={"checksum": "dfda6a322244104b6abc7606f60c2635", "grade": false, "grade_id": "cell-5102e4c8a34ac6e8", "locked": true, "schema_version": 1, "solution": false} # - index() est identique à find() mais retourne une erreur en cas d'échec # + deletable=false editable=false nbgrader={"checksum": "403d7600df2687f19236f1548d0abaaa", "grade": false, "grade_id": "cell-58b385d97090df00", "locked": true, "schema_version": 1, "solution": false} chaine.index('a') # + deletable=false editable=false nbgrader={"checksum": "44e3b737813628994386990d7a243b45", "grade": false, "grade_id": "cell-3fb6abb05a3df9be", "locked": true, "schema_version": 1, "solution": false} chaine.index('b') # + [markdown] deletable=false editable=false nbgrader={"checksum": "9cef4f2b0237241cac9f19a1f25e9263", "grade": false, "grade_id": "cell-53b740f9d35fa7f5", "locked": true, "schema_version": 1, "solution": false} # - La fonction ***replace()*** permet, comme son nom l'indique, de remplacer une sous chaîne par une autre à l'intérieur d'une chaîne de caractères. # Les paramètres sont, dans l'ordre : la chaîne de caractères à modifier, la sous chaîne à remplacer, la sous chaîne de remplacement,et, éventuellement, le nombre maximum d'occurrences à remplacer (si non spécifié, toutes les occurrences seront remplacées). # + deletable=false editable=false nbgrader={"checksum": "33e3aea102a898f13f1eaf2c18e4752b", "grade": false, "grade_id": "cell-a9ec8c50ec03d377", "locked": true, "schema_version": 1, "solution": false} chaine.replace('a','@') # + [markdown] deletable=false editable=false nbgrader={"checksum": "147e4aac44c66d6a3659952d2a5267fb", "grade": false, "grade_id": "cell-e03f07fdbd674c2f", "locked": true, "schema_version": 1, "solution": false} # et bien sûr, la variable *chaine* n'est pas modifiée, c'est une nouvelle chaine qui est renvoyée par cette méthode ! # + deletable=false editable=false nbgrader={"checksum": "3fffde151c56edca1fe42b28eb5951ae", "grade": false, "grade_id": "cell-1401daafe4722287", "locked": true, "schema_version": 1, "solution": false} chaine # + [markdown] deletable=false editable=false nbgrader={"checksum": "a6366e1d24f87fa58507d9fc88568b75", "grade": false, "grade_id": "cell-0a2359927c070ae3", "locked": true, "schema_version": 1, "solution": false} # ## Conversion chaîne <-> nombres # # Il ne faut pas confondre les objets 12 et "12". Le premier désigne un nombre, le second est une chaîne de caractère. Les comportements et les opérations sont différents. # + deletable=false editable=false nbgrader={"checksum": "61f7fa7424767d975f133154b8265505", "grade": false, "grade_id": "cell-07260dabb5c68414", "locked": true, "schema_version": 1, "solution": false} 12 == "12" # + deletable=false editable=false nbgrader={"checksum": "82218a26beea0180fe07d47ccdf8df2f", "grade": false, "grade_id": "cell-bfcacd8e68ffdf02", "locked": true, "schema_version": 1, "solution": false} 12+1 # + deletable=false editable=false nbgrader={"checksum": "72425a6808761b253d89fe0866bf0d56", "grade": false, "grade_id": "cell-3533a484ab95faed", "locked": true, "schema_version": 1, "solution": false} "12"+"1" # + [markdown] deletable=false editable=false nbgrader={"checksum": "7d790583e2371a747be61a9b27f44685", "grade": false, "grade_id": "cell-aa337f9fcd8c9bcb", "locked": true, "schema_version": 1, "solution": false} # Il peut être néanmoins possible de convertir un nombre en chaîne et réciproquement comme on va le voir sur les exemples ci-dessous # + deletable=false editable=false nbgrader={"checksum": "3cf6951bc7f82b3138aac5ed50448107", "grade": false, "grade_id": "cell-803fcaeb67e810e0", "locked": true, "schema_version": 1, "solution": false} int("12") # + deletable=false editable=false nbgrader={"checksum": "d409da5e7137a3de6c13ae1db89571d1", "grade": false, "grade_id": "cell-d1d53b0512568ab3", "locked": true, "schema_version": 1, "solution": false} str(12) # + [markdown] deletable=false editable=false nbgrader={"checksum": "399cf204f54116099a8ea3c1593e8c12", "grade": false, "grade_id": "cell-3f5b8c015ec5d516", "locked": true, "schema_version": 1, "solution": false} # ## Comparaison de chaînes de caractères # De même qu'il est possible de comparer deux nombres, on peut aussi comparer des chaines par rapport à l'ordre lexicographique : # + deletable=false editable=false nbgrader={"checksum": "f07273dcf875fa8192b66aba74727bb6", "grade": false, "grade_id": "cell-249273182663e283", "locked": true, "schema_version": 1, "solution": false} "a"<"b" # + deletable=false editable=false nbgrader={"checksum": "d3fa4f0586257254716405128a5f41b3", "grade": false, "grade_id": "cell-02f91cff8cb53f48", "locked": true, "schema_version": 1, "solution": false} "a"<="r"<="z" # + deletable=false editable=false nbgrader={"checksum": "e78e1753c399da87c29de72fdb71e172", "grade": false, "grade_id": "cell-f1513e2e054c2a43", "locked": true, "schema_version": 1, "solution": false} "a"<="R"<="z" # + deletable=false editable=false nbgrader={"checksum": "fc6e40d79886e983448052a9ed2915b4", "grade": false, "grade_id": "cell-6a402f5e6fe6c252", "locked": true, "schema_version": 1, "solution": false} "toto"<"titi" # + [markdown] deletable=false editable=false nbgrader={"checksum": "cd69d7783c19ae8a4348d4359cbeb43b", "grade": false, "grade_id": "cell-b66848319e06bad7", "locked": true, "schema_version": 1, "solution": false} # ## Tranches de chaînes # # Le tranchage (*slicing*) fonctionne exactement comme pour les listes. Observez les exemples suivants. # + deletable=false editable=false nbgrader={"checksum": "da41d0e4ef63e3114e782dfbab2c4171", "grade": false, "grade_id": "cell-49bddc8996c68156", "locked": true, "schema_version": 1, "solution": false} chaine = "Lycée Salvador Allende" # obtenir la fin d'une chaîne chaine[6:] # + deletable=false editable=false nbgrader={"checksum": "e864adce92c8ae51a3b234ba0f5ac521", "grade": false, "grade_id": "cell-5523b816169ee333", "locked": true, "schema_version": 1, "solution": false} # obtenir le début d'une chaîne chaine[:5] # + deletable=false editable=false nbgrader={"checksum": "fc04c0fd1e67a44b34b195f7cb2d3e91", "grade": false, "grade_id": "cell-bc4d313ef3d5d533", "locked": true, "schema_version": 1, "solution": false} # Un intervalle chaine[6:14] # + [markdown] deletable=false editable=false nbgrader={"checksum": "eb2d6a38ff04f76a480684f8071e3f86", "grade": false, "grade_id": "cell-01cca1fdbbc660cb", "locked": true, "schema_version": 1, "solution": false} # Et avec des index négatifs ... # + deletable=false editable=false nbgrader={"checksum": "118082d4dc13ad0294649ded56f70637", "grade": false, "grade_id": "cell-95346bdd24f6c78f", "locked": true, "schema_version": 1, "solution": false} chaine[-1] # + deletable=false editable=false nbgrader={"checksum": "73e2b270da338838b2ef766fc310c848", "grade": false, "grade_id": "cell-baeda9a9e7cec72e", "locked": true, "schema_version": 1, "solution": false} chaine[:-1] # + deletable=false editable=false nbgrader={"checksum": "4e663b94a32e29109d762fd7727aebf4", "grade": false, "grade_id": "cell-586f6c2c0e045a77", "locked": true, "schema_version": 1, "solution": false} # ce dernier exemple est très pratique pour renverser un itérable chaine[::-1] # + [markdown] deletable=false editable=false nbgrader={"checksum": "6f4611b5375f15b1599dadef5066f4c2", "grade": false, "grade_id": "cell-13b83ac0809080fb", "locked": true, "schema_version": 1, "solution": false} # ## Parcourir une chaîne de caractères # # Une chaîne de raractère en Python rentre dans la catégorie des *itérables* au même titre que les listes. On retrouve donc les deux modes de parcours déjà rencontrés sur les listes, à savoir : # + deletable=false editable=false nbgrader={"checksum": "598c159c8a1fb9e75b6e0e150a2bfef5", "grade": false, "grade_id": "cell-73423ef5d70a70f2", "locked": true, "schema_version": 1, "solution": false} # Le parcours caractères par caractères : chaine = "ISN" for c in chaine: print(c) # + deletable=false editable=false nbgrader={"checksum": "662dd0426ef8ddf1ca861c3f9c14024a", "grade": false, "grade_id": "cell-502128e2ec19631c", "locked": true, "schema_version": 1, "solution": false} # Le parcours par indice chaine = "ISN" for i in range(len(chaine)): print(chaine[i]) # + [markdown] deletable=false editable=false nbgrader={"checksum": "791eccbef44023127a960a4e288000f6", "grade": false, "grade_id": "cell-a6d5ca4244decf3b", "locked": true, "schema_version": 1, "solution": false} # ## Mise en pratique # # ### Exercice 1 # Écrire une fonction **nb_chiffres** qui prend en paramètre une chaîne de caractère et qui renvoie le nombre de chiffres contenus dans la chaîne # + deletable=false nbgrader={"checksum": "5e4908e2bfc786a64fa5bcfbb0bb7282", "grade": false, "grade_id": "cell-78fec91508b468a9", "locked": false, "schema_version": 1, "solution": true} def nb_chiffres(chaine): # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"checksum": "ba5eb94a75226584c0dd126210ae8fe8", "grade": true, "grade_id": "cell-bf248621e2a1e819", "locked": true, "points": 1, "schema_version": 1, "solution": false} chaine = "Lycée Allende - 14200 Hérouville" assert nb_chiffres(chaine)==5 # + [markdown] deletable=false editable=false nbgrader={"checksum": "bacbc8cbecd618cd07814e47e41895e2", "grade": false, "grade_id": "cell-4547881d3feb4568", "locked": true, "schema_version": 1, "solution": false} # ### Exercice 2 # Écrire une fonction **is_email** qui prend en paramètre une chaîne de caractère et renvoie True si celle-ci est une adresse mail. On va simplifier en considérant qu'on a une adresse email si la chaîne possède les 2 propriétés suivantes : # - Un seul caractère @ # - Un seul caractère .(point) après @. # # + deletable=false nbgrader={"checksum": "122f53142c6ba4d7d0c9ff805c39d11a", "grade": false, "grade_id": "cell-155cf2211f8dc865", "locked": false, "schema_version": 1, "solution": true} def is_email(chaine): # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"checksum": "8f623a2d45befec7ca9571fe8cd40707", "grade": true, "grade_id": "cell-c3e027e42ccabeb4", "locked": true, "points": 1, "schema_version": 1, "solution": false} assert is_email("olivier.lecluse@monfai.com") assert not is_email("olivier.lecluse_AT_monfai.com") assert not is_email("olivier.lecluse@monfai") # + [markdown] deletable=false editable=false nbgrader={"checksum": "4a0fb8bd21768fa6fd9e81a4551d5085", "grade": false, "grade_id": "cell-887372193b00d4be", "locked": true, "schema_version": 1, "solution": false} # ### Exercice 3 # # Un nombre est un palindrome s'il s'écrit de la même manière de gauche à droite ou de droite à gauche. Exemple : 12521. # # Ecrire une fonction **est_palindrome** prenant en paramètre un nombre et renvoyant ***True*** ou ***False*** selon que c'est un palindrom ou non. # + deletable=false nbgrader={"checksum": "e6d1ad88b1d75fa3e4de7afaf65ce242", "grade": false, "grade_id": "cell-e5ef8ae434c12e26", "locked": false, "schema_version": 1, "solution": true} def est_palindrome(n): # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"checksum": "8faec7c3f36c0c18843a5f3abdbd0e7b", "grade": true, "grade_id": "cell-75ce8d9f2485fbd3", "locked": true, "points": 1, "schema_version": 1, "solution": false} assert est_palindrome(12521) assert not est_palindrome(12520) # + [markdown] deletable=false editable=false nbgrader={"checksum": "3ad1ac3426c46eef4caafedfdf179f0a", "grade": false, "grade_id": "cell-5a9cf1f2d59eba83", "locked": true, "schema_version": 1, "solution": false} # ### Exercice 4 # # Ecrire une fonction **somme_chiffres** prenant un entier en paramètre et renvoyant la somme des chiffres de ce nombre. # # somme_chiffres(125) renvoie 8 puisque 1+2+5=8 # + deletable=false nbgrader={"checksum": "75a8a6e20d53db81fb6642ffac123d5c", "grade": false, "grade_id": "cell-027f91c66f836031", "locked": false, "schema_version": 1, "solution": true} def somme_chiffres(n): # YOUR CODE HERE raise NotImplementedError() # + deletable=false editable=false nbgrader={"checksum": "eb1f934a76f65c97c5dabf9cfb57b2a4", "grade": true, "grade_id": "cell-69da341446836bdb", "locked": true, "points": 1, "schema_version": 1, "solution": false} assert somme_chiffres(125)==8 # -
22,363
/notebook/visualization.ipynb
bad7c6a7f2c8fe7bb27e1ad0649685bdc28303e8
[]
no_license
kvchen/cs231n-final-project
https://github.com/kvchen/cs231n-final-project
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
24,341
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Importing Libraries # + import numpy as np import six.moves.urllib as urllib import glob import itertools import zipfile import matplotlib import math import random import cv2 import tensorflow.compat.v1 as tf tf.disable_v2_behavior() from object_detection.utils import label_map_util from object_detection.utils import visualization_utils as vis_util from collections import defaultdict from matplotlib import pyplot as plt import matplotlib.patches as patches from PIL import Image from object_detection.utils import ops as utils_ops from numba import jit, cuda matplotlib.use('TkAgg') # - # ## Defining Path & Other Variables # + IMAGE_OUTPUT_SIZE = (12, 8) SAFE_DISTANCE = 100 #in pixels #Frozen Graph of trained models (rfcn_resnet101_coco_2018_01_28, ssd_enceptionv2, ssd_mobilenet_v1_coco_2018_01_28) FROZEN_RFCN_GRAPH = "frozen_inference_graph.pb" #define the model path here #FROZEN_SSD_INCEPTION_GRAPH = "ssd_inception_exported/frozen_inference_graph.pb" #FROZEN_SSD_MOBILENET = "ssd_mobilenet_v1_coco_exported/frozen_inference_graph.pb" frozen_graph = FROZEN_RFCN_GRAPH #define the chosen model label_map = label_map_util.load_labelmap("label_map.pbtxt") #define the label_map.pbtxt path here cap = cv2.VideoCapture('TownCentreXVID.mp4') #define the video path here width = int(cap.get(3)) height = int(cap.get(4)) dim = (width, height) # - # ## Loading the frozen graph and label map # + detection_graph = tf.Graph() with detection_graph.as_default(): graph_def = tf.GraphDef() with tf.gfile.GFile(frozen_graph, 'rb') as fid: serialized_graph = fid.read() graph_def.ParseFromString(serialized_graph) tf.import_graph_def(graph_def, name='') categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=1, use_display_name=True) category_index = label_map_util.create_category_index(categories) # - # ## Functions of Calculating distance between centroids of each predicted boxes # + # Convert image to numpy array def image_to_numpy_array(image): (real_width, real_height) = image.size return np.array(image.getdata()).reshape((real_height, real_width, 3)).astype(np.uint8) # Get the coordinates for each object def calculate_coordinates(box, width, height): xmin = box[1] * width ymin = box[0] * height xmax = box[3] * width ymax = box[2] * height return [xmin, ymin, xmax - xmin, ymax - ymin] # Calculate all possible permutations between centroids def calculate_permutation(centroids): permutations = [] for permutation in itertools.permutations(centroids, 2): if permutation[::-1] not in permutations: permutations.append(permutation) return permutations # - # ## Predicting person objects in each frame and displaying it as a real time video # + def run_inference(): with detection_graph.as_default(): with tf.Session() as sess: # Definite input and output Tensors for detection_graph image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') # Each box represents a part of the image where a particular object was detected. detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0') # Each score represent how level of confidence for each of the objects. # Score is shown on the result image, together with the class label. detection_scores = detection_graph.get_tensor_by_name('detection_scores:0') detection_classes = detection_graph.get_tensor_by_name('detection_classes:0') num_detections = detection_graph.get_tensor_by_name('num_detections:0') tope = 10; limit = 0; new = True; while(cap.isOpened()): # Capture frame-by-frame ret, frame = cap.read(); if ret == True: # Correct color frame = gray = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) operations = tf.get_default_graph().get_operations() tensor_names = {output.name for operation in operations for output in operation.outputs} tensor_dict = {} for key in ['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes', 'detection_masks']: tensor = key + ":0" if tensor in tensor_names: tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor) if 'detection_masks' in tensor_dict: detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0]) detection_masks = tf.squeeze(tensor_dict['num_detections'], [0]) num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32) detection_boxes = tf.slice(detection_boxes, [0,0], [num_detection, -1]) detection_masks = tf.slice(detection_masks, [0, 0, 0], [num_detection, -1, -1]) detection_masks = util_ops.reframe_box_masks_to_image_masks(detection_masks, detection_boxes, image.shape[0], image.shape[1]) detection_masks = tf.cast(tf.greater(detection_masks, 0.5), tf.uint8) tensor_dict['detection_masks'] = tf.expand_dims(detection_masks, 0) # Actual detection. output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(frame, 0)}) #The image is from parameter output_dict['num_detections'] = int(output_dict['num_detections'][0]) output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8) output_dict['detection_boxes'] = output_dict['detection_boxes'][0] output_dict['detection_scores'] = output_dict['detection_scores'][0] if 'detection_masks' in output_dict: output_dict['detection_masks'] = output_dict['detection_masks'][0] # Calculate normalized coordinates for boxes centroids = [] coordinates = [] for box in output_dict['detection_boxes']: coordinate = calculate_coordinates(box, width, height) #[xmin, ymin, xmax - xmin, ymax - ymin] centroid = (coordinate[0] + (coordinate[2]/2), coordinate[1] + (coordinate[3]/2)) centroids.append(centroid) coordinates.append(coordinate) permutations = calculate_permutation(centroids) # Display boxes and centroids fig, axis = plt.subplots(figsize = (10, 6), dpi=90, frameon=False) for coordinate, centroid in zip(coordinates, centroids): plt.axis('off') axis.add_patch(matplotlib.patches.Rectangle((coordinate[0], coordinate[1]), coordinate[2], coordinate[3], linewidth=1, edgecolor='yellow', facecolor='none', zorder=10)) axis.add_patch(matplotlib.patches.Circle((centroid[0], centroid[1]), 3, color='yellow', zorder=20)) # Display lines between centroids for permutation in permutations: x1 = permutation[0][0] x2 = permutation[1][0] y1 = permutation[0][1] y2 = permutation[1][1] # Calculate the distance between 2 centroids distance = math.sqrt((x2 - x1)**2 + (y2 - y1)**2) average_distance = distance/SAFE_DISTANCE # Calculate middle point middle = ((x1 + x2)/2, (y1 + y2)/2) # Calculate slope if(x2-x1 > 0): slope = (y2-y1)/(x2-x1) else: slope = (y2-y1) dy = math.sqrt(3**2 / (slope**2 + 1)) dx = -slope * dy # Set random location if random.randint(1, 10) % 2 == 0: dx = middle[0] - dx*10 dy = middle[1] - dy*10 else: dx = middle[0] + dx*10 dy = middle[1] + dy*10 if average_distance < 2: axis.annotate((round(average_distance, 2)), xy=middle, color='white', xytext=(dx, dy), fontsize=10, bbox=dict(facecolor='red', edgecolor='white', boxstyle='round', pad=0.2), zorder=30) axis.plot((x1, x2), (y1, y2), linewidth=2, color='red', zorder=15) else: pass axis.imshow(frame, interpolation='nearest') fig.canvas.draw() # Convert figure to numpy img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,)) img = np.array(fig.canvas.get_renderer()._renderer) img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR) # Display frames continuosly until 'esc' button has been pressed cv2.imshow('LIVE', img) key = cv2.waitKey(1) #Press esc key to stop the real time video if(key == 27): break # - # ## Run the program. Press 'esc' key on your keyboard to stop. #run the program run_inference() cv2.destroyAllWindows()
9,550
/Python/Reducing Traffic Mortality in the USA/Untitled.ipynb
387046dbc83e4b9778a864a484e624e42b46ca56
[]
no_license
ShoaibKhan5/DataScience-Projects
https://github.com/ShoaibKhan5/DataScience-Projects
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
8,808
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Loading Blood donation data import pandas as pd #Read in the dataset #transfusion = pd.read_csv('datasets/transfusion.data') #Print out first 5 rows of dataset #transfusion.head() # -
460
/.ipynb_checkpoints/Highlighted Features-checkpoint.ipynb
d5a82c518becb857f3b22fec2941b4f70d700c9a
[]
no_license
avaspataru/Analyze-CRISPR-Features
https://github.com/avaspataru/Analyze-CRISPR-Features
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
465,171
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Setting up import pickle import matplotlib.pyplot as plt import pandas as pd import numpy as np import re import seaborn as sns # + # tools which use the dinucleotide positional features tools_of_interest = ['ssc' ,'sgrnascorer2','tuscan-classification' ,'tuscan-regression','chop-chop-doench','chop-chop-xu', 'wu-crispr', 'deep-crispr'] dataset_name = 'xu' # features of interest (to be highlighted) features_of_interest = ['19:G', '19:C', '19:T', '17:C', '18:T', '17:T', '15:C', '13:T', '11:A', '2:G'] # - # ## Helpers to determine sequential features # from a list of features, return those which are sequential features def getSequentials(feature_names): sequentials = [] for f in feature_names: if isSequential(f): sequentials.append(f) return sequentials # given a feature name, matches the content and returns the match (None if not sequential feature) def isSequential(feature_name): # NP or N:P m = re.match(r"^(?P<nuc>[ACGT])(:)?(?P<pos>([0-9])*)$", feature_name) if m == None: # try PN or P:N m = re.match(r"^(?P<pos>([0-9])*)(:)?(?P<nuc>[ACGT])$", feature_name) if m == None: # try c1_P:N_sd m = re.match(r"^c1_(?P<pos>([0-9])*)(:)?(?P<nuc>[ACGT])_sd$", feature_name) return m # + def getAllVersions(feature_name): match = isSequential(feature_name) options = [] options.append( match['pos'] + ":" + match['nuc'] ) options.append( match['pos'] + match['nuc'] ) options.append( match['nuc'] + ":" + match['pos'] ) options.append( match['nuc'] + match['pos'] ) options.append( "c1_" + match['pos'] + ":" + match['nuc'] + "_sd" ) return options # - getAllVersions("17:C") def findIn(feature_name, features): options = getAllVersions(feature_name) for o in options: if o in features: return o return None # ## Helpers to load and scale data # # At the end of the loading we will have a dataframe which contains positional features only and in which all values are scaled considering the absolute value def scale_by_tool(tool_df): vals = tool_df.values.flatten() max_val = max(vals) min_val = min(vals) # maximum by absolute value in the list (all shap values for positional dinucleotide features) max_abs = abs(min_val) if abs(max_val) > abs(min_val): max_abs = abs(max_val) # divide by the max_abs in the list new_tool_df = tool_df.divide(max_abs) return new_tool_df # loads the shapValues and dataset but only keeps sequential features !! # !! includes scaling the values to only sequentials def loadPickle(tool_name): # load data from pickle pickleFileName = "SHAP-" + tool_name + "-" + dataset_name p = open('./pickles/'+pickleFileName,"rb") shapValues = pickle.load(p) dataset = pickle.load(p) p.close() shapValues = pd.DataFrame(shapValues, columns=dataset.columns) # keep only sequential features seqFeatures = getSequentials(dataset.columns) shapValues = shapValues[seqFeatures] dataset = dataset[seqFeatures] # scale sequential values shapValues = scale_by_tool(shapValues) return shapValues, dataset # # Load all tools data tool_feature_to_vals = {} for tool in tools_of_interest: sh, d = loadPickle(tool) tool_feature_to_vals[tool] = {} for f in features_of_interest: f_name = findIn(f, sh.columns) if f_name == None: continue f_sh = sh[[f_name]] f_sh = f_sh.loc[d[f_name] == 1] # f_sh contains all the shap values for 1-valued data points of feature f tool_feature_to_vals[tool][f] = f_sh.values # The tool_feature_to_vals dictionary has for each tool and important feature all the relevant shap values tool_feature_to_vals['ssc']['19:G'] # # Plot def plotForFeature(feature): plt.plot([0] * len(tools_of_interest), color= "grey", linewidth="0.5") data = [] datas = [] for i, tool in enumerate(tools_of_interest): if feature not in tool_feature_to_vals[tool].keys(): datas.append([0]) continue vals = tool_feature_to_vals[tool][feature] #plt.scatter([i] * len(vals), vals) #plt.violinplot(vals) dd = [] for v in vals: dd.append(v[0]) datas.append(dd) plt.violinplot(datas, positions=np.arange(0,len(tools_of_interest))) plt.yticks(np.arange(-1,1.25,0.25)) plt.xticks(np.arange(0,len(tools_of_interest)), tools_of_interest, rotation='vertical') plt.ylabel("SHAP value") plt.title("SHAP values for "+ feature) plotForFeature("19:T") # ## Plot with sns # + def plotSNSForFeature(feature): rows = [] for i, tool in enumerate(tools_of_interest): if feature not in tool_feature_to_vals[tool].keys(): rows.append([i,0]) continue vals = tool_feature_to_vals[tool][feature] for v in vals: rows.append( [i,v[0]] ) df = pd.DataFrame(rows, columns=['Tool','Shap']) ax = sns.violinplot(x=df["Tool"], y=df['Shap'], data = pd.melt(df), width=0.75, scale='width') base = np.arange(-1,len(tools_of_interest)+1) ax.plot(base,[0]*len(base), linewidth=0.5, color='grey') ax.set(ylim=(-1.1, 1.1)) #ax.set(xticks=np.arange(0,len(tools_of_interest))) #ax.set_xticklabels(tools_of_interest, rotation=90) ax.set_title("SHAP values for " + feature) ax.set(xticks=[]) ax.set(xlabel='') ax.set(ylabel='') # - # for legend from matplotlib.lines import Line2D custom_lines = [Line2D([0], [0], color='tab:blue', lw=4), Line2D([0], [0], color='tab:orange', lw=4), Line2D([0], [0], color='tab:green', lw=4), Line2D([0], [0], color='tab:red', lw=4), Line2D([0], [0], color='tab:purple', lw=4), Line2D([0], [0], color='tab:brown', lw=4), Line2D([0], [0], color='tab:pink', lw=4), Line2D([0], [0], color='tab:grey', lw=4)] def plotAllFourForPosition(pos): fig = plt.figure(figsize=(20,12)) ax1 = plt.subplot(221) plotSNSForFeature(str(pos)+":A") ax1.legend(custom_lines, tools_of_interest, loc=(0.05, 0.55)) ax2 = plt.subplot(222) plotSNSForFeature(str(pos)+":C") ax2.set(yticks=[]) ax3 = plt.subplot(223) plotSNSForFeature(str(pos)+":G") ax4 = plt.subplot(224) plotSNSForFeature(str(pos)+":T") ax4.set(yticks=[]) fig.text(0.08, 0.5, 'SHAP values', va='center', ha='center', rotation='vertical', fontsize=15) fig.suptitle("Distribution of SHAP values for each tool in position "+str(pos), fontsize=20) plt.savefig('./example-distribution-position-'+str(pos)+'.png') plotAllFourForPosition(19) # # Plot by separating one nucleotide # + def plotSNSForPosVs(position, nuc): feature = str(position) + ":" + nuc allFeatures = [str(position) + ":A", str(position) + ":C", str(position) + ":G", str(position) + ":T"] allFeatures.remove(feature) rows = [] for i, tool in enumerate(tools_of_interest): if feature not in tool_feature_to_vals[tool].keys(): rows.append([i,0]) continue vals = tool_feature_to_vals[tool][feature] for v in vals: rows.append( [i,v[0]] ) rowsO = [] for i, tool in enumerate(tools_of_interest): for f in allFeatures: if f not in tool_feature_to_vals[tool].keys(): rowsO.append([i,0]) continue vals = tool_feature_to_vals[tool][f] for v in vals: rowsO.append( [i,v[0]]) fig = plt.figure(figsize=(20,5)) ax1 = plt.subplot(121) df = pd.DataFrame(rows, columns=['Tool','Shap']) ax = sns.violinplot(x=df["Tool"], y=df['Shap'], data = pd.melt(df), width=0.75, scale='width') base = np.arange(-1,len(tools_of_interest)+1) ax.plot(base,[0]*len(base), linewidth=0.5, color='grey') ax.set(ylim=(-1.1, 1.1)) ax.set_title("SHAP values for " + feature) ax.set(xticks=[]) ax.set(xlabel='') ax.set(ylabel='') ax.legend(custom_lines, tools_of_interest, loc=(0.05, 0.05)) ax2 = plt.subplot(122) df = pd.DataFrame(rowsO, columns=['Tool','Shap']) ax = sns.violinplot(x=df["Tool"], y=df['Shap'], data = pd.melt(df), width=0.75, scale='width') base = np.arange(-1,len(tools_of_interest)+1) ax.plot(base,[0]*len(base), linewidth=0.5, color='grey') ax.set(ylim=(-1.1, 1.1)) ax.set_title("SHAP values for 19:C || 19:T || 19:A" ) ax.set(xticks=[]) ax.set(yticks=[]) ax.set(xlabel='') ax.set(ylabel='') plotSNSForPosVs(19,'G') # + def plotSNSForPosVs(position, nuc): feature = str(position) + ":" + nuc allFeatures = [str(position) + ":A", str(position) + ":C", str(position) + ":G", str(position) + ":T"] allFeatures.remove(feature) rows = [] for i, tool in enumerate(tools_of_interest): if feature not in tool_feature_to_vals[tool].keys(): rows.append([i,0]) continue vals = tool_feature_to_vals[tool][feature] for v in vals: rows.append( [i,v[0]] ) rowsO = [] for i, tool in enumerate(tools_of_interest): for f in allFeatures: if f not in tool_feature_to_vals[tool].keys(): rowsO.append([i,0]) continue vals = tool_feature_to_vals[tool][f] for v in vals: rowsO.append( [i,v[0]]) fig = plt.figure(figsize=(20,5)) ax1 = plt.subplot(121) df = pd.DataFrame(rows, columns=['Tool','Shap']) ax = sns.violinplot(x=df["Tool"], y=df['Shap'], data = pd.melt(df), width=0.75, scale='width') base = np.arange(-1,len(tools_of_interest)+1) ax.plot(base,[0]*len(base), linewidth=0.5, color='grey') ax.set(ylim=(-1.1, 1.1)) ax.set_title("SHAP values for " + feature) ax.set(xticks=[]) ax.set(xlabel='') ax.set(ylabel='') ax.legend(custom_lines, tools_of_interest, loc=(0.05, 0.05)) ax2 = plt.subplot(122) df = pd.DataFrame(rowsO, columns=['Tool','Shap']) ax = sns.violinplot(x=df["Tool"], y=df['Shap'], data = pd.melt(df), width=0.75, scale='width') base = np.arange(-1,len(tools_of_interest)+1) ax.plot(base,[0]*len(base), linewidth=0.5, color='grey') ax.set(ylim=(-1.1, 1.1)) ax.set_title("SHAP values for 17:A || 17:G || 17:T" ) ax.set(xticks=[]) ax.set(yticks=[]) ax.set(xlabel='') ax.set(ylabel='') plotSNSForPosVs(17,'C') # - # # Plots of interesting features # + fig = plt.figure(figsize=(20,24)) ax1 = plt.subplot(521) plotSNSForFeature("19:T") ax1.legend(custom_lines, tools_of_interest, loc=(0.05, 0.55)) ax2 = plt.subplot(522) ax2.set(yticks=[]) plotSNSForFeature("19:G") ax2 = plt.subplot(523) plotSNSForFeature("19:C") ax2 = plt.subplot(524) ax2.set(yticks=[]) plotSNSForFeature("18:T") ax2 = plt.subplot(525) plotSNSForFeature("17:C") ax2 = plt.subplot(526) ax2.set(yticks=[]) plotSNSForFeature("17:T") ax2 = plt.subplot(527) plotSNSForFeature("15:C") ax2 = plt.subplot(528) ax2.set(yticks=[]) plotSNSForFeature("13:T") ax2 = plt.subplot(529) plotSNSForFeature("11:A") ax2 = plt.subplot(5,2,10) ax2.set(yticks=[]) plotSNSForFeature("2:G") plt.tight_layout() plt.savefig('./all-positional-distributions.png') # -
11,914
/notebooks/Library of Plans.ipynb
58514b146737903a5935920e5a7b23cda6b15e44
[]
no_license
klauer/epics-docker
https://github.com/klauer/epics-docker
0
0
null
2016-01-23T17:17:19
2015-12-08T15:08:18
Makefile
Jupyter Notebook
false
false
.py
9,405
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Library of Plans # + from bluesky import RunEngine from bluesky.callbacks import LiveTable from bluesky.examples import motor, det RE = RunEngine({'owner': 'demo', 'group': 'demo', 'beamline_id': 'demo'}) # - # ## How to use a plan # # We'll start with a simple plan which moves nothing but takes one measurement (a "count") from a list of detectors. from bluesky.scans import Count count = Count([det]) RE(count, LiveTable([det])) # The `count` object encapsulates a list of messages. list(count) # We can edit the attributes of count, causing that list to change. count.num len(list(count)) # how many messages does it issue? count.num = 2 len(list(count)) # how about now? RE(count, LiveTable([det])) # It might be convenient to have more than one instance of `Count` at our disposal. count_to_ten = Count([det], num=10) # ## 1D Scans # # The most common 1D step scan takes linearly spaced steps. This is what `AbsScan` and `DeltaScan` do. from bluesky.scans import AbsScan, DeltaScan, AbsListScan, DeltaListScan RE(AbsScan([det], motor, 1, 5, 5), LiveTable([motor, det])) motor.set(10) # move the motor to 10; this is where the relative motion of the "DeltaScan" will start RE(DeltaScan([det], motor, 1, 5, 5), LiveTable([motor, det])) # The steps need not be linearly spaced; we can specify a list of positions manually. RE(AbsListScan([det], motor, [1, 3, 7, 15]), LiveTable([motor, det])) # There are also log-spaced options `LogAbsScan` and `LogDeltaScan`. # ## Scans that Move Motors in Parallel # # We call these "inner product" scans because the overall trajectory is in a sense an inner product of the individual motors' trajectories. from bluesky.scans import InnerProductAbsScan, InnerProductDeltaScan from bluesky.examples import motor1, motor2 # Let's plan a 5-point trajectory moving motor1 from 1 to 5 and motor2 for 10 to 50 in linearly spaced steps. plan = InnerProductAbsScan([det], 5, motor1, 1, 5, motor2, 10, 50) RE(plan, LiveTable(['motor1', 'motor2'])) # We are not limited to two motors. This works for any number. # ## Scans that Move Motors in a Mesh # # We call mesh scans "output product" scans because the overall trajectory is in a sense an outer product of the individual motors' trajectories. from bluesky.scans import OuterProductAbsScan, OuterProductDeltaScan # Let's move motor1 in 5 steps from 1 to 5 and motor2 in 3 steps from 10 to 3. Unlike the inner product scans, each motor can take different number of steps, so we specify them separately. plan = OuterProductAbsScan([det], motor1, 1, 5, 5, motor2, 10, 30, 3, False) # We explain the 'False' argument later. RE(plan, LiveTable(['motor1', 'motor2'])) # Notice that the first motor is the "slow" motor and the second motor is the "fast" motor. By choosing the order the motors are given, you choose the path that the mesh is traversed. # # In addition to start, stop, and number of steps, every motor except the slowest motor gets a True or False argument that indicates where it should take a snake-like trajectory for maximum efficiency. Observe the difference if we set motor's `snake` argument to `True`. (It was `False` above.) plan = OuterProductAbsScan([det], motor1, 1, 5, 5, motor2, 10, 30, 3, True) RE(plan, LiveTable(['motor1', 'motor2'])) # ## Generalized ND Scans # # What about a scan that moves two motors together (an inner product) in a mesh (outer product) against a third motor? To define arbitrary N-dimensional compositions of trajectories, we use `cycler`. Adding two cyclers makes an inner product; multiplying two cyclers makes an outer product. from cycler import cycler cycler('thing 1', [1, 2, 3]) + cycler('thing 2', [10, 20, 30]) cycler('thing 1', [1, 2, 3]) * cycler('thing 2', [10, 20, 30]) # The ScanND object accepts a cycler and makes it into a plan. # + from bluesky import ScanND # TODO # - # ## Adaptive Scans # + # TODO
4,169
/3.ipynb
25fdf5c4b220db5679513a6c2213a84944bbf595
[]
no_license
wanyun-yang/Fully-connected-feedforward-Neural-Network
https://github.com/wanyun-yang/Fully-connected-feedforward-Neural-Network
1
0
null
null
null
null
Jupyter Notebook
false
false
.py
5,524
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/wanyun-yang/Neural-Network-1/blob/main/3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="y2J_uSNROHbF" import torch import torch.nn as nn import torch.nn.functional as F device = torch.device('cuba:0' if torch.cuba.is_available else 'cpu') class Net(nn.Moudule): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1,6,5) self.conv2 = nn.Conv2d(6,16,(5,5)) self.pool = nn.MaxPool2d(2,2) self.fc1 = nn.Linear(16*4*4,120) self.fc2 = nn.Linear(120,84) self.fc3 = nn.Linear(84,10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(--1,self.num_flat_features(x)) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.log_softmax(self.fc3(x)) return x # x --> conv1 -->relu -->pooling -->conv2 --> relu -->pooling --> fully connected def num_flat_features(self,x): size = x.size()[1:] num_features = 1 for s in size: num_features *=s return num_features # + id="35FKs-m2Svcz" import os import glob import numpy as np from skimage import io from torch.utils.data import Dataset, DataLoader class MNISTDataset(Dataset): def __init__(self, dir, transform=None): self.dir = dir self.transform = transform def __len__(self): files = glob.glob(self.dir+'/*.jpg')[:100] return len(files) def __geitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() all_files = glob.glob(slef.dir+'/*.jpg')[:100] img_fname = os.path.join(self.dir, all_files[idx]) image = io.imread(img_fname) digit = int(self.dir.split('/')[-1].strip()) label = np.array(digit) instance = {'image':image,'label':label} if self.transform: instance = self.transform(instance) return instance # + id="MtfFMQ3WU9Hk" class Rescale(object): def __init__(self, output_size): assert isinstance(output_size,(int,tuple)) self.output_size = output_size def __call__(self,sample): image, label = sample['image'],sample['label'] h,w = image.shape[-2:] if isinstance(self.output_size, int): if h > w: new_h, new_w = self.output_size*h/w, self.output_size else: new_h, new_w = self.output_size, self.output_size*w/h else: new_h, new_w = self.output_size new_h, new_w = int(new_h),int(new_w) new_image = transform.resize(image,(new_h, new_w)) return {'image': new_image, 'label': label} class ToTensor(object): def __call__(self, sample): image, label = sample['image'],sample['label'] image = image.reshape((1, image.shape[0],image.shape[1])) return ('image':torch.from_numpy(image),'label':torch.from_numpy(label)) # + id="zwDPnwG3Yq8a" from torch.utils.data import random_split from torchvision batch_size = 32 list_datasets = [] for i in range(10): cur_da = MNISTDataset('')
3,335
/Microsoft-byte1gram_CountVectorizer_tfidf-kNN_k4-0.9465acc.ipynb
b95fc1861d5e5a5690c524a05b68ea8c5fadc3c5
[]
no_license
fengliuw/Microsoft
https://github.com/fengliuw/Microsoft
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
1,345,716
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # ES-DOC CMIP6 Model Properties - Aerosol # **MIP Era**: CMIP6 # **Institute**: MESSY-CONSORTIUM # **Source ID**: EMAC-2-53-AERCHEM # **Topic**: Aerosol # **Sub-Topics**: Transport, Emissions, Concentrations, Optical Radiative Properties, Model. # **Properties**: 69 (37 required) # **Model descriptions**: [Model description details](https://specializations.es-doc.org/cmip6/aerosol?client=jupyter-notebook) # **Initialized From**: -- # # **Notebook Help**: [Goto notebook help page](https://es-doc.org/cmip6-models-documenting-with-ipython) # **Notebook Initialised**: 2018-02-15 16:54:10 # ### Document Setup # **IMPORTANT: to be executed each time you run the notebook** # + # DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'messy-consortium', 'emac-2-53-aerchem', 'aerosol') # - # ### Document Authors # *Set document authors* # + # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) # - # ### Document Contributors # *Specify document contributors* # + # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) # - # ### Document Publication # *Specify document publication status* # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) # ### Document Table of Contents # [1. Key Properties](#1.-Key-Properties) # [2. Key Properties --&gt; Software Properties](#2.-Key-Properties---&gt;-Software-Properties) # [3. Key Properties --&gt; Timestep Framework](#3.-Key-Properties---&gt;-Timestep-Framework) # [4. Key Properties --&gt; Meteorological Forcings](#4.-Key-Properties---&gt;-Meteorological-Forcings) # [5. Key Properties --&gt; Resolution](#5.-Key-Properties---&gt;-Resolution) # [6. Key Properties --&gt; Tuning Applied](#6.-Key-Properties---&gt;-Tuning-Applied) # [7. Transport](#7.-Transport) # [8. Emissions](#8.-Emissions) # [9. Concentrations](#9.-Concentrations) # [10. Optical Radiative Properties](#10.-Optical-Radiative-Properties) # [11. Optical Radiative Properties --&gt; Absorption](#11.-Optical-Radiative-Properties---&gt;-Absorption) # [12. Optical Radiative Properties --&gt; Mixtures](#12.-Optical-Radiative-Properties---&gt;-Mixtures) # [13. Optical Radiative Properties --&gt; Impact Of H2o](#13.-Optical-Radiative-Properties---&gt;-Impact-Of-H2o) # [14. Optical Radiative Properties --&gt; Radiative Scheme](#14.-Optical-Radiative-Properties---&gt;-Radiative-Scheme) # [15. Optical Radiative Properties --&gt; Cloud Interactions](#15.-Optical-Radiative-Properties---&gt;-Cloud-Interactions) # [16. Model](#16.-Model) # # # # 1. Key Properties # *Key properties of the aerosol model* # ### 1.1. Model Overview # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Overview of aerosol model.* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 1.2. Model Name # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Name of aerosol model code* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 1.3. Scheme Scope # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** ENUM&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.N # ### *Atmospheric domains covered by the aerosol model* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.scheme_scope') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "troposhere" # "stratosphere" # "mesosphere" # "mesosphere" # "whole atmosphere" # "Other: [Please specify]" # TODO - please enter value(s) # - # ### 1.4. Basic Approximations # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Basic approximations made in the aerosol model* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.basic_approximations') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 1.5. Prognostic Variables Form # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** ENUM&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.N # ### *Prognostic variables in the aerosol model* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "3D mass/volume ratio for aerosols" # "3D number concenttration for aerosols" # "Other: [Please specify]" # TODO - please enter value(s) # - # ### 1.6. Number Of Tracers # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** INTEGER&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Number of tracers in the aerosol model* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # - # ### 1.7. Family Approach # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Are aerosol calculations generalized into families of species?* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.family_approach') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # - # # 2. Key Properties --&gt; Software Properties # *Software properties of aerosol code* # ### 2.1. Repository # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *Location of code for this component.* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 2.2. Code Version # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *Code version identifier.* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 2.3. Code Languages # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.N # ### *Code language(s).* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # # 3. Key Properties --&gt; Timestep Framework # *Physical properties of seawater in ocean* # ### 3.1. Method # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** ENUM&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Mathematical method deployed to solve the time evolution of the prognostic variables* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses atmospheric chemistry time stepping" # "Specific timestepping (operator splitting)" # "Specific timestepping (integrated)" # "Other: [Please specify]" # TODO - please enter value(s) # - # ### 3.2. Split Operator Advection Timestep # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** INTEGER&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *Timestep for aerosol advection (in seconds)* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # - # ### 3.3. Split Operator Physical Timestep # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** INTEGER&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *Timestep for aerosol physics (in seconds).* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # - # ### 3.4. Integrated Timestep # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** INTEGER&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Timestep for the aerosol model (in seconds)* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # - # ### 3.5. Integrated Scheme Type # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** ENUM&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Specify the type of timestep scheme* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Implicit" # "Semi-implicit" # "Semi-analytic" # "Impact solver" # "Back Euler" # "Newton Raphson" # "Rosenbrock" # "Other: [Please specify]" # TODO - please enter value(s) # - # # 4. Key Properties --&gt; Meteorological Forcings # ** # ### 4.1. Variables 3D # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *Three dimensionsal forcing variables, e.g. U, V, W, T, Q, P, conventive mass flux* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 4.2. Variables 2D # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *Two dimensionsal forcing variables, e.g. land-sea mask definition* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 4.3. Frequency # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** INTEGER&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *Frequency with which meteological forcings are applied (in seconds).* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # - # # 5. Key Properties --&gt; Resolution # *Resolution in the aersosol model grid* # ### 5.1. Name # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 5.2. Canonical Horizontal Resolution # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 5.3. Number Of Horizontal Gridpoints # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** INTEGER&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *Total number of horizontal (XY) points (or degrees of freedom) on computational grid.* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # - # ### 5.4. Number Of Vertical Levels # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** INTEGER&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *Number of vertical levels resolved on computational grid.* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # - # ### 5.5. Is Adaptive Grid # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *Default is False. Set true if grid resolution changes during execution.* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # - # # 6. Key Properties --&gt; Tuning Applied # *Tuning methodology for aerosol model* # ### 6.1. Description # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *General overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency.* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 6.2. Global Mean Metrics Used # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.N # ### *List set of metrics of the global mean state used in tuning model/component* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 6.3. Regional Metrics Used # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.N # ### *List of regional metrics of mean state used in tuning model/component* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 6.4. Trend Metrics Used # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.N # ### *List observed trend metrics used in tuning model/component* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # # 7. Transport # *Aerosol transport* # ### 7.1. Overview # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Overview of transport in atmosperic aerosol model* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 7.2. Scheme # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** ENUM&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Method for aerosol transport modeling* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Specific transport scheme (eulerian)" # "Specific transport scheme (semi-lagrangian)" # "Specific transport scheme (eulerian and semi-lagrangian)" # "Specific transport scheme (lagrangian)" # TODO - please enter value(s) # - # ### 7.3. Mass Conservation Scheme # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** ENUM&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.N # ### *Method used to ensure mass conservation.* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Mass adjustment" # "Concentrations positivity" # "Gradients monotonicity" # "Other: [Please specify]" # TODO - please enter value(s) # - # ### 7.4. Convention # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** ENUM&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.N # ### *Transport by convention* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.convention') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Convective fluxes connected to tracers" # "Vertical velocities connected to tracers" # "Other: [Please specify]" # TODO - please enter value(s) # - # # 8. Emissions # *Atmospheric aerosol emissions* # ### 8.1. Overview # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Overview of emissions in atmosperic aerosol model* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 8.2. Method # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** ENUM&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.N # ### *Method used to define aerosol species (several methods allowed because the different species may not use the same method).* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Prescribed (climatology)" # "Prescribed CMIP6" # "Prescribed above surface" # "Interactive" # "Interactive above surface" # "Other: [Please specify]" # TODO - please enter value(s) # - # ### 8.3. Sources # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** ENUM&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.N # ### *Sources of the aerosol species are taken into account in the emissions scheme* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.sources') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Vegetation" # "Volcanos" # "Bare ground" # "Sea surface" # "Lightning" # "Fires" # "Aircraft" # "Anthropogenic" # "Other: [Please specify]" # TODO - please enter value(s) # - # ### 8.4. Prescribed Climatology # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** ENUM&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *Specify the climatology type for aerosol emissions* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Interannual" # "Annual" # "Monthly" # "Daily" # TODO - please enter value(s) # - # ### 8.5. Prescribed Climatology Emitted Species # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *List of aerosol species emitted and prescribed via a climatology* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 8.6. Prescribed Spatially Uniform Emitted Species # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *List of aerosol species emitted and prescribed as spatially uniform* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 8.7. Interactive Emitted Species # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *List of aerosol species emitted and specified via an interactive method* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 8.8. Other Emitted Species # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *List of aerosol species emitted and specified via an &quot;other method&quot;* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.other_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 8.9. Other Method Characteristics # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *Characteristics of the &quot;other method&quot; used for aerosol emissions* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # # 9. Concentrations # *Atmospheric aerosol concentrations* # ### 9.1. Overview # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Overview of concentrations in atmosperic aerosol model* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 9.2. Prescribed Lower Boundary # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *List of species prescribed at the lower boundary.* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 9.3. Prescribed Upper Boundary # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *List of species prescribed at the upper boundary.* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 9.4. Prescribed Fields Mmr # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *List of species prescribed as mass mixing ratios.* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 9.5. Prescribed Fields Mmr # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *List of species prescribed as AOD plus CCNs.* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # # 10. Optical Radiative Properties # *Aerosol optical and radiative properties* # ### 10.1. Overview # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Overview of optical and radiative properties* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # # 11. Optical Radiative Properties --&gt; Absorption # *Absortion properties in aerosol scheme* # ### 11.1. Black Carbon # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** FLOAT&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *Absorption mass coefficient of black carbon at 550nm (if non-absorbing enter 0)* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # - # ### 11.2. Dust # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** FLOAT&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *Absorption mass coefficient of dust at 550nm (if non-absorbing enter 0)* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # - # ### 11.3. Organics # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** FLOAT&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *Absorption mass coefficient of organics at 550nm (if non-absorbing enter 0)* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # - # # 12. Optical Radiative Properties --&gt; Mixtures # ** # ### 12.1. External # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Is there external mixing with respect to chemical composition?* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # - # ### 12.2. Internal # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Is there internal mixing with respect to chemical composition?* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # - # ### 12.3. Mixing Rule # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *If there is internal mixing with respect to chemical composition then indicate the mixinrg rule* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # # 13. Optical Radiative Properties --&gt; Impact Of H2o # ** # ### 13.1. Size # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Does H2O impact size?* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # - # ### 13.2. Internal Mixture # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Does H2O impact internal mixture?* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # - # # 14. Optical Radiative Properties --&gt; Radiative Scheme # *Radiative scheme for aerosol* # ### 14.1. Overview # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Overview of radiative scheme* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 14.2. Shortwave Bands # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** INTEGER&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Number of shortwave bands* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # - # ### 14.3. Longwave Bands # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** INTEGER&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Number of longwave bands* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # - # # 15. Optical Radiative Properties --&gt; Cloud Interactions # *Aerosol-cloud interactions* # ### 15.1. Overview # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Overview of aerosol-cloud interactions* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 15.2. Twomey # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Is the Twomey effect included?* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # - # ### 15.3. Twomey Minimum Ccn # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** INTEGER&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.1 # ### *If the Twomey effect is included, then what is the minimum CCN number?* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # - # ### 15.4. Drizzle # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Does the scheme affect drizzle?* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # - # ### 15.5. Cloud Lifetime # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Does the scheme affect cloud lifetime?* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # - # ### 15.6. Longwave Bands # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** INTEGER&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Number of longwave bands* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # - # # 16. Model # *Aerosol model* # ### 16.1. Overview # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** STRING&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.1 # ### *Overview of atmosperic aerosol model* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # - # ### 16.2. Processes # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** ENUM&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.N # ### *Processes included in the Aerosol model.* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Dry deposition" # "Sedimentation" # "Wet deposition (impaction scavenging)" # "Wet deposition (nucleation scavenging)" # "Coagulation" # "Oxidation (gas phase)" # "Oxidation (in cloud)" # "Condensation" # "Ageing" # "Advection (horizontal)" # "Advection (vertical)" # "Heterogeneous chemistry" # "Nucleation" # TODO - please enter value(s) # - # ### 16.3. Coupling # **Is Required:** FALSE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** ENUM&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 0.N # ### *Other model components coupled to the Aerosol model* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.coupling') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Radiation" # "Land surface" # "Heterogeneous chemistry" # "Clouds" # "Ocean" # "Cryosphere" # "Gas phase chemistry" # "Other: [Please specify]" # TODO - please enter value(s) # - # ### 16.4. Gas Phase Precursors # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** ENUM&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.N # ### *List of gas phase aerosol precursors.* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.gas_phase_precursors') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "DMS" # "SO2" # "Ammonia" # "Iodine" # "Terpene" # "Isoprene" # "VOC" # "NOx" # "Other: [Please specify]" # TODO - please enter value(s) # - # ### 16.5. Scheme Type # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** ENUM&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.N # ### *Type(s) of aerosol scheme used by the aerosols model (potentially multiple: some species may be covered by one type of aerosol scheme and other species covered by another type).* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.scheme_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Bulk" # "Modal" # "Bin" # "Other: [Please specify]" # TODO - please enter value(s) # - # ### 16.6. Bulk Scheme Species # **Is Required:** TRUE&nbsp;&nbsp;&nbsp;&nbsp;**Type:** ENUM&nbsp;&nbsp;&nbsp;&nbsp;**Cardinality:** 1.N # ### *List of species covered by the bulk scheme.* # + # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.bulk_scheme_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sulphate" # "Nitrate" # "Sea salt" # "Dust" # "Ice" # "Organic" # "Black carbon / soot" # "SOA (secondary organic aerosols)" # "POM (particulate organic matter)" # "Polar stratospheric ice" # "NAT (Nitric acid trihydrate)" # "NAD (Nitric acid dihydrate)" # "STS (supercooled ternary solution aerosol particule)" # "Other: [Please specify]" # TODO - please enter value(s) # - # ### ©2017 [ES-DOC](https://es-doc.org) #
36,653
/“extract_data_with_bert_hub_ipynb”的副本.ipynb
2d18d64572e0a06209cb6b868ce17a350deb5a72
[]
no_license
lclazx/nlp_learning
https://github.com/lclazx/nlp_learning
0
1
null
null
null
null
Jupyter Notebook
false
false
.py
203,168
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/lclazx/nlp_learning/blob/master/%E2%80%9Cextract_data_with_bert_hub_ipynb%E2%80%9D%E7%9A%84%E5%89%AF%E6%9C%AC.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="8_TTthQGi2JC" colab_type="text" # # 步骤一:引用所需资源 # + id="nBVMv99nipRK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="1d90ecec-ce65-4622-d71b-8406861dc150" # !pip install bert-tensorflow import tensorflow as tf import tensorflow_hub as hub import bert from bert import optimization from bert import tokenization import tf_metrics # + [markdown] id="arPAta2SjfvG" colab_type="text" # 步骤二:准备数据 # + id="EZZS48B_jjwU" colab_type="code" colab={} class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, token_label_ids, predicate_label_id, is_real_example=True): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.token_label_ids = token_label_ids self.predicate_label_id = predicate_label_id self.is_real_example = is_real_example # + id="WG-fN6RTzYcD" colab_type="code" colab={} def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() # + id="AAJz2cbSm-2b" colab_type="code" colab={} def convert_single_example(ex_index, example, token_label_list, predicate_label_list, max_seq_length, tokenizer): if isinstance(example, PaddingInputExample): return InputFeatures( input_ids=[0]*max_seq_length, input_mask=[0]*max_seq_length, segment_ids=[0]*max_seq_length, token_label_ids=[0]*max_seq_length, predicate_label_id=[0], is_real_example=False ) token_label_map={} for(i, label) in enumerate(token_label_list): token_label_map[label]=i predicate_label_map={} for(i, label) in enumerate(predicate_label_list): predicate_label_map[label] = i text_token = example.text_token[0:-1] text_predicate = example.text_token[-1] token_label = example.token_label[0:-1] token_predicate = example.token_label[-1] assert token_predicate == text_predicate tokens_b = [text_predicate]*len(text_token) predicate_id = predicate_label_map[text_predicate] _truncate_seq_pair(text_token, tokens_b, max_seq_length-3) tokens = [] token_label_ids = [] segment_ids = [] tokens.append('[CLS]') token_label_ids.append(token_label_map["[CLS]"]) segment_ids.append(0) for token, label in zip(text_token, token_label): tokens.append(token) token_label_ids.append(token_label_map[label]) segment_ids.append(0) tokens.append('[SEP]') token_label_ids.append(token_label_map['[SEP]']) segment_ids.append(0) input_ids = tokenizer.convert_tokens_to_ids(tokens) bias = 1 for token in tokens_b: input_ids.append(predicate_id + bias) segment_ids.append(1) token_label_ids.append(token_label_map['[category]']) input_ids.append(tokenizer.convert_tokens_to_ids(['[SEP]'])[0]) segment_ids.append(1) token_label_ids.append(token_label_map['[SEP]']) input_mask = [1]*len(input_ids) while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) token_label_ids.append(0) tokens.append('[Padding]') assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(token_label_ids) == max_seq_length if ex_index<5: tf.logging.info("*** Example ***") tf.logging.info("guid:%s" % (example.guid)) tf.logging.info("tokens: %s" % ''.join([tokenization.printable_text(x) for x in tokens])) tf.logging.info('input_ids: %s' % ' '.join([str(x) for x in input_ids])) tf.logging.info('input_mask: %s' % ' '.join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % ' '.join([str(x) for x in segment_ids])) tf.logging.info('token_label_ids: %s' % ' '.join([str(x) for x in token_label_ids])) tf.logging.info('predicate_id: %s' % str(predicate_id)) feature = InputFeatures( input_ids = input_ids, input_mask = input_mask, segment_ids = segment_ids, token_label_ids = token_label_ids, predicate_label_id = [predicate_id], is_real_example=True ) return feature # + id="6r4hnKHp2u7C" colab_type="code" colab={} def convert_examples_to_features(examples, token_label_list, predicate_label_list, max_seq_length, tokenizer): features = [] for (index, example) in enumerate(examples): feature = convert_single_example(index, example, token_label_list, predicate_label_list, max_seq_length, tokenizer) features.append(feature) return features # + id="_qaRENKY_uZH" colab_type="code" colab={} class InputExample(object): def __init__(self, guid, text_token, token_labels): self.guid=guid self.text_token=text_token self.token_label=token_labels class PaddingInputExample(object): """Truncates a sequence pair in place to the maximum length.""" # + id="4ZPVRVIQAkwp" colab_type="code" colab={} from google.colab import auth auth.authenticate_user() from googleapiclient.discovery import build gcs_service = build('storage', 'v1') OUTPUT_DIR = 'extract_training_output' #@param DO_DELETE = False#@param USE_BUCKET = True #@param BUCKET = 'bert_classification'#@param if USE_BUCKET: OUTPUT_DIR = 'gs://{}/{}'.format(BUCKET, OUTPUT_DIR) if DO_DELETE: try: tf.gfile.DeleteRecursively(OUTPUT_DIR) except: pass tf.gfile.MakeDirs(OUTPUT_DIR) # + id="9rj_hoVGC6YU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="45e4bee7-a114-4f72-f4cf-8d3e628fdd84" from apiclient.http import MediaIoBaseDownload def download_file(output_dir, source_file): with open(output_dir, 'wb') as f: request = gcs_service.objects().get_media(bucket=BUCKET, object=source_file) media = MediaIoBaseDownload(f, request) done = False while not done: _, done = media.next_chunk() download_file(output_dir='/tmp/train_data.json', source_file='raw_data/train_data.json/train_data.json') download_file(output_dir='/tmp/test_data.json', source_file='raw_data/test_data_postag.json/test_data_postag.json') download_file(output_dir='/tmp/dev_data.json', source_file='raw_data/dev_data.json/dev_data.json') print('Downloaded') # + id="_zWVyah7YBYd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 125} outputId="03e514de-41e6-477c-875e-f51a80a01812" BERT_MODEL_HUB = 'https://tfhub.dev/google/bert_chinese_L-12_H-768_A-12/1' TRAINABLE = True def create_tokenizer_from_hub_module(): with tf.Graph().as_default(): bert_module = hub.Module(BERT_MODEL_HUB) tokenization_info = bert_module(signature='tokenization_info', as_dict=True) with tf.Session() as sess: vocab_file, do_lower_case = sess.run([tokenization_info['vocab_file'], tokenization_info['do_lower_case']]) return bert.tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case) tokenizer = create_tokenizer_from_hub_module() # + id="ycCACm7uRKaL" colab_type="code" colab={} def find_bio_in_text_token(so_object_text, text_token): for i, token in enumerate(text_token): trimmed_token = token.replace('##', '') if so_object_text.startswith(trimmed_token): for j in range(i+1,len(text_token)): concatenated_token = ''.join([x.replace("##",'') for x in text_token[i:j]]) if so_object_text == concatenated_token: return i, j elif so_object_text.startswith(concatenated_token): continue else: break return -1,-1 # + id="c8edyypgEpD6" colab_type="code" colab={} def build_SPO_Example(data): text = data['text'] spo_list = data['spo_list'] text_token = tokenizer.tokenize(text) examples = [] for spo in spo_list: subject = spo['subject'] obj = spo['object'] predicate = spo['predicate'] subject_start_index, subject_end_index = find_bio_in_text_token(subject, text_token) obj_start_index, obj_end_index = find_bio_in_text_token(obj, text_token) text_token_with_prediction=[] for token in text_token: text_token_with_prediction.append(token) token_labels = ['O']*len(text_token_with_prediction) text_token_with_prediction.append(predicate) token_labels.append(predicate) if subject_start_index>-1: token_labels[subject_start_index] = 'B-SUB' for i in range(subject_start_index+1, subject_end_index): token_labels[i] = 'I-SUB' if obj_start_index > -1: token_labels[obj_start_index] = 'B-OBJ' for i in range(obj_start_index+1, obj_end_index): token_labels[i] = 'I-OBJ' #examples.append({'text_token': text_token_with_prediction, 'token_labels': token_labels}) example = InputExample(guid=None, text_token=text_token_with_prediction, token_labels=token_labels) examples.append(example) return examples # + id="DfHAZBFQbgSR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="4422acf4-11b3-4297-ca7b-1bf3fead287f" test_data = {'text': '查尔斯·阿兰基斯(Charles Aránguiz),1989年4月17日出生于智利圣地亚哥,智利职业足球运动员,司职中场,效力于德国足球甲级联赛勒沃库森足球俱乐部', 'spo_list':[ { 'predicate':'出生地', 'object':'圣地亚哥', 'subject':'查尔斯·阿兰基斯' }, { 'predicate':'出生日期', 'object':'1989年4月17日', 'subject':'查尔斯·阿兰基斯' } ] } examples = build_SPO_Example(test_data) examples # + id="zO9hMMDtf4TP" colab_type="code" colab={} def get_predicate_labels(): return ['主演', '目', '身高', '出生日期', '国籍', '连载网站', '作者', '歌手', '海拔', '出生地', '导演', '气候', '朝代', '妻子', '丈夫', '民族', '毕业院校', '编剧', '出品公司', '父亲', '出版社', '作词', '作曲', '母亲', '成立日期', '字', '号', '所属专辑', '所在城市', '总部地点', '主持人', '上映时间', '首都', '创始人', '祖籍', '改编自', '制片人', '注册资本', '人口数量', '面积', '主角', '占地面积', '嘉宾', '简称', '董事长', '官方语言', '邮政编码', '专业代码', '修业年限'] def get_token_labels(): BIO_token_labels = ["[Padding]", "[category]", "[##WordPiece]", "[CLS]", "[SEP]", "B-SUB", "I-SUB", "B-OBJ", "I-OBJ", "O"] #id 0 --> [Paddding] return BIO_token_labels # + id="FmQe8MmYpZ8u" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="160be02e-2363-41b1-9344-64d649d5d229" examples[0].text_token[0:] # + id="fhBSvvJBkw7z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="92f00be1-0488-48b9-888b-4e36f6393c37" feature = convert_single_example(0, examples[0], get_token_labels(), get_predicate_labels(), 128, tokenizer) feature.token_label_ids # + id="ZkvOlhHIz4ji" colab_type="code" colab={} import json import pandas as pd def prepare_data(input_file, limit=None): examples = [] with open(input_file, 'r') as f: for (index, line) in enumerate(f): if limit and index>=limit: break if index % 10000 == 0: print('sample {%d}' % index) line_data = json.loads(line) spo_example = build_SPO_Example(line_data) for example in spo_example: examples.append(example) return examples # + id="zrTKTvxB2Qkv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 341} outputId="e28631c2-bb90-4cf8-bd24-6053f54f0fef" train_examples = prepare_data('/tmp/train_data.json')[0:50000] # + id="zoxtkIuut6a8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="61fa5575-0a66-4e67-99f3-30aa108c6ebc" train_features = convert_examples_to_features(train_examples, get_token_labels(), get_predicate_labels(), 128, tokenizer) # + id="Z6juLGQc_Xnl" colab_type="code" colab={} def input_fn_builder(input_features, seq_length, is_training, num_token_labels, num_predicate_labels, drop_remainder): all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_token_label_ids = [] all_predicate_label_ids = [] for feature in input_features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_token_label_ids.append(feature.token_label_ids) all_predicate_label_ids.append(feature.predicate_label_id) def input_fn(params): batch_size = params['batch_size'] num_examples = len(input_features) d = tf.data.Dataset.from_tensor_slices({ 'input_ids': tf.constant(all_input_ids, dtype=tf.int32, shape=[num_examples, seq_length]), 'input_mask': tf.constant(all_input_mask, dtype=tf.int32, shape=[num_examples, seq_length]), 'segment_ids': tf.constant(all_segment_ids, dtype=tf.int32, shape=[num_examples, seq_length]), 'token_label_ids':tf.constant(all_token_label_ids, dtype=tf.int32, shape=[num_examples, seq_length]), 'predicate_label_ids': tf.constant(all_predicate_label_ids, dtype=tf.int32, shape=[num_examples]) }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size, drop_remainder=drop_remainder) return d return input_fn # + id="KqEIdVCjCHuq" colab_type="code" colab={} def create_model(is_training, input_ids, input_mask, segment_ids, token_label_ids, predicate_label_ids, num_token_labels, num_predicate_labels, seq_length): bert_module = hub.Module(BERT_MODEL_HUB, trainable=True) bert_input = dict(input_ids=input_ids, segment_ids=segment_ids, input_mask=input_mask) bert_output = bert_module(inputs=bert_input, signature='tokens', as_dict=True) pooled_output_layer = bert_output['pooled_output'] pooled_hidden_size = pooled_output_layer.shape[-1].value pooled_output_weight = tf.get_variable('pooled_output_weight', [num_predicate_labels, pooled_hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) pooled_output_bias = tf.get_variable('pooled_output_bias', [num_predicate_labels], initializer=tf.zeros_initializer) with tf.variable_scope('predicate_loss'): if is_training: pooled_output_layer = tf.nn.dropout(pooled_output_layer, keep_prob=0.9) predicate_logits = tf.matmul(pooled_output_layer, pooled_output_weight, transpose_b=True) predicate_logits = tf.add(predicate_logits, pooled_output_bias) predicate_probabilities = tf.nn.softmax(predicate_logits, axis=-1) predicate_prediction = tf.argmax(predicate_probabilities, axis=-1, output_type=tf.int32) predicate_labels = tf.one_hot(predicate_label_ids, depth=num_predicate_labels, dtype=tf.float32) predicate_per_example_loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=predicate_logits, labels=predicate_labels), -1) predicate_loss = tf.reduce_mean(predicate_per_example_loss) sequence_output_layer = bert_output['sequence_output'] sequence_hidden_size = sequence_output_layer.shape[-1].value sequence_output_weight = tf.get_variable('sequence_output_weight', [num_token_labels, sequence_hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) sequence_output_bias = tf.get_variable('sequence_output_bias', [num_token_labels], initializer=tf.zeros_initializer) with tf.variable_scope('token_label_loss'): if is_training: sequence_output_layer = tf.nn.dropout(sequence_output_layer, keep_prob=0.9) sequence_output_layer = tf.reshape(sequence_output_layer, [-1, sequence_hidden_size]) token_label_logits = tf.matmul(sequence_output_layer, sequence_output_weight, transpose_b=True) token_label_logits = tf.add(token_label_logits, sequence_output_bias) token_label_logits = tf.reshape(token_label_logits, [-1, seq_length, num_token_labels]) token_label_log_probs = tf.nn.log_softmax(token_label_logits, axis=-1) token_label_one_hot_labels = tf.one_hot(token_label_ids, depth=num_token_labels, axis=-1) token_label_per_example_loss = -tf.reduce_sum(token_label_one_hot_labels*token_label_log_probs, axis=-1) token_label_loss = tf.reduce_sum(token_label_per_example_loss) token_label_probabilities = tf.nn.softmax(token_label_logits, axis=-1) token_label_predictions = tf.argmax(token_label_probabilities, axis=-1) loss = 0.5 * predicate_loss + token_label_loss return (loss, predicate_loss, predicate_per_example_loss, predicate_probabilities, predicate_prediction, token_label_loss, token_label_per_example_loss, token_label_logits, token_label_predictions) # + id="cFSTqdWPQ9nG" colab_type="code" colab={} def model_fn_builder(num_token_labels, num_predicate_labels, learning_rate, num_train_steps, num_warmup_steps, seq_length): def model_fn(features, labels, mode, params): input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] token_label_ids = features["token_label_ids"] predicate_ids = features["predicate_label_ids"] is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, predicate_loss, predicate_per_example_loss, predicate_probabilities, predicate_prediction, token_label_loss, token_label_per_example_loss, token_label_logits, token_label_predictions) = create_model(is_training, input_ids, input_mask, segment_ids, token_label_ids, predicate_ids, num_token_labels, num_predicate_labels, seq_length) if is_training: train_op = optimization.create_optimizer(total_loss, learning_rate, num_train_steps, num_warmup_steps, False) return tf.estimator.EstimatorSpec(mode=mode, loss=total_loss, train_op = train_op) elif mode==tf.estimator.ModeKeys.EVAL: def metric_fn(predicate_loss, token_label_per_example_loss, predicate_probabilities, token_label_ids, token_label_logits): predicate_prediction = tf.argmax(predicate_probabilities, axis=-1, output_type=tf.int32) token_label_predictions = tf.argmax(token_label_logits, axis=-1, output_type=tf.int32) token_label_pos_indices_list = list(range(num_token_labels))[4:] pos_indices_list = token_label_pos_indices_list[:-1] token_label_precision_macro = tf_metrics.precision(token_label_ids, token_label_predictions, num_token_labels, pos_indices_list, average='macro') token_label_recall_macro = tf_metrics.recall(token_label_ids, token_label_predictions, num_token_labels, pos_indices_list, average='macro') token_label_f_macro = tf_metrics.f1(token_labels_ids, token_label_predictions, num_token_labels, pos_indices_list, average='macro') token_label_precision_micro = tf_metrics.precision(token_label_ids, token_label_predictions, num_token_labels, pos_indices_list, average='micro') token_label_recall_micro = tf_metrics.recall(token_label_ids, token_label_predictions, num_token_labels, average='micro') token_label_f_micro = tf_metrics.f1(token_label_ids, token_label_predictions, num_token_labels, pos_indices_list, average='micro') token_label_loss = tf.metrics.mean(values=token_label_per_example_loss) predicate_loss = tf.metrics.mean(values=predicate_loss) return { 'eval_predicate_loss': predicate_loss, 'predicate_prediction': predicate_prediction, 'eval_token_label_precision(macro)': token_label_precision_macro, 'eval_token_label_recall(macro)': token_label_recall_macro, 'eval_token_label_f(macro)': token_label_f_macro, 'eval_token_label_precision(micro)': token_label_precision_micro, 'eval_token_label_recall(micro)': token_label_recall_micro, 'eval_token_label_f(micro)': token_label_f_micro, 'eval_token_label_loss': token_label_loss } eval_metrics = (metric_fn, [predicate_loss, token_label_per_example_loss, predicate_probabilities, token_label_ids, token_label_logits]) return tf.estimator.EstimatorSpec(eval_metrics=eval_metrics, mode=mode, loss=total_loss) else: """ predicting """ predictions = { 'predicate_probabilities': predicate_probabilities, 'predicate_prediction': predicate_prediction, 'token_label_predictions': token_label_predictions } return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) return model_fn # + id="ixO5gZHJzy1o" colab_type="code" colab={} BATCH_SIZE = 16 LEARNING_RATE = 2e-5 NUM_TRAIN_EPOCH = 3.0 WARMUP_PROPORTION = 0.1 SAVE_CHECKPOINTS_STEPS = 500 SAVE_SUMMARY_STEPS = 100 # + id="yEeOFxXA0n0R" colab_type="code" colab={} run_config = tf.estimator.RunConfig( model_dir = OUTPUT_DIR, save_summary_steps = SAVE_SUMMARY_STEPS, save_checkpoints_steps = SAVE_CHECKPOINTS_STEPS ) # + id="B_MSVGRhynRN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="49f2b073-bcda-4206-e200-df966fcd5203" num_train_steps = int(len(train_features) / BATCH_SIZE*NUM_TRAIN_EPOCH) num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION) model_fn = model_fn_builder(len(get_token_labels()), len(get_predicate_labels()), LEARNING_RATE, num_train_steps, num_warmup_steps, 128) estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config, params={'batch_size': BATCH_SIZE}) # + id="gQ9JFPzQ1Qx0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="1437e996-a971-44e5-8b69-b1dadf6fc296" train_input_fn = input_fn_builder(train_features, 128, True, len(get_token_labels()), len(get_predicate_labels()), True) from datetime import datetime print(f'Beginning Training') current_time = datetime.now() estimator.train(input_fn = train_input_fn, max_steps=num_train_steps) # + id="mnWh9TWuGgWm" colab_type="code" colab={} def get_prediction(texts_with_predicates): predict_examples = [] for text_with_predicate in texts_with_predicates: text = text_with_predicate['text'] predicate = text_with_predicate['predicate'] text_token = tokenizer.tokenize(text) text_token.append(predicate) token_labels = ['O']*(len(text_token)-1) token_labels.append(predicate) predict_example = InputExample(guid=None, text_token=text_token, token_labels=token_labels) predict_examples.append(predict_example) predict_features = convert_examples_to_features(predict_examples, get_token_labels(), get_predicate_labels(), 128, tokenizer) predict_input_fn = input_fn_builder(predict_features, 128, False, len(get_token_labels()), len(get_predicate_labels()), False) predictions = estimator.predict(input_fn = predict_input_fn) results = [] text_token_labels = get_token_labels() for (i, prediction) in enumerate(predictions): text = texts_with_predicates[i]['text'] text_token = tokenizer.tokenize(text) token_label_predictions = prediction['token_label_predictions'] subject_word = [] object_word = [] for (index, label) in enumerate(token_label_predictions): if label == text_token_labels.index('I-OBJ') or label == text_token_labels.index('B-OBJ'): object_word.append(text_token[index-1]) elif label == text_token_labels.index('I-SUB') or label == text_token_labels.index('B-SUB'): subject_word.append(text_token[index-1]) result = {'text': text, 'subject': subject_word, 'object': object_word, 'predicate':prediction['predicate_prediction'] # , 'probs': prediction['token_label_predictions'] } results.append(result) return results # + id="t2T1hD1WsJwg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 197} outputId="b0aced09-1d1d-45f3-d796-85947c883d99" get_token_labels() # + id="qelpXacMpB1h" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="99768614-51a2-448b-b8b1-94560d4b6169" texts_with_predictions = [{'text':'周佛海被捕入狱之后,其妻杨淑慧散尽家产请蒋介石枪下留人,于是周佛海从死刑变为无期,不过此人或许作恶多端,改判没多久便病逝于监狱,据悉是心脏病发作', 'predicate': '妻子'}, {'text':'周佛海被捕入狱之后,其妻杨淑慧散尽家产请蒋介石枪下留人,于是周佛海从死刑变为无期,不过此人或许作恶多端,改判没多久便病逝于监狱,据悉是心脏病发作', 'predicate':'丈夫'}, {'text':'刘灿明现居广州,意大利人,生于1985年,现在从事软件开发工作', 'predicate':'出生日期'}, {'text':'刘灿明现居广州,意大利人,生于1985年,现在从事软件开发工作', 'predicate':'国籍'}, {'text':'刘灿明现居广州,意大利人,生于1985年,现在从事软件开发工作', 'predicate':'出生地'} ] results = get_prediction(texts_with_predictions) # + id="lfw_HzQVr5oo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 377} outputId="8da91d19-8ee0-489a-8cfe-0d7e85c7677b" results
25,705
/BERT/Custom_Named_Entity_Recognition_with_BERT_only_first_wordpiece.ipynb
86b21b6046a6392a98809adae5b56e0389d79a61
[ "MIT" ]
permissive
NielsRogge/Transformers-Tutorials
https://github.com/NielsRogge/Transformers-Tutorials
5,435
875
MIT
2023-05-21T19:13:40
2023-05-21T18:13:39
Jupyter Notebook
Jupyter Notebook
false
false
.py
179,629
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] deletable=true editable=true # Proof of <a class="ProveItLink" href="../../../../_context_.ipynb">proveit</a>.<a class="ProveItLink" href="../../../_context_.ipynb">number</a>.<a class="ProveItLink" href="../../_context_.ipynb">numeral</a>.<a class="ProveItLink" href="../_context_.ipynb">decimal</a>.<a class="ProveItLink" href="../_theorems_.ipynb#nat1">nat1</a> theorem # ======== # + deletable=true editable=true import proveit context = proveit.Context('..') # the theorem's context is in the parent directory # + deletable=true editable=true # %proving nat1 presuming [] # + deletable=true editable=true el has BERT as its base architecture, with a token classification head on top, allowing it to make predictions at the token level, rather than the sequence level. Named entity recognition is typically treated as a token classification problem, so that's what we are going to use it for. # # This tutorial uses the idea of **transfer learning**, i.e. first pretraining a large neural network in an unsupervised way, and then fine-tuning that neural network on a task of interest. In this case, BERT is a neural network pretrained on 2 tasks: masked language modeling and next sentence prediction. Now, we are going to fine-tune this network on a NER dataset. Fine-tuning is supervised learning, so this means we will need a labeled dataset. # # If you want to know more about BERT, I suggest the following resources: # * the original [paper](https://arxiv.org/abs/1810.04805) # * Jay Allamar's [blog post](http://jalammar.github.io/illustrated-bert/) as well as his [tutorial](http://jalammar.github.io/a-visual-guide-to-using-bert-for-the-first-time/) # * Chris Mccormick's [Youtube channel](https://www.youtube.com/channel/UCoRX98PLOsaN8PtekB9kWrw) # * Abbishek Kumar Mishra's [Youtube channel](https://www.youtube.com/user/abhisheksvnit) # # The following notebook largely follows the same structure as the tutorials by Abhishek Kumar Mishra. For his tutorials on the Transformers library, see his [Github repository](https://github.com/abhimishra91/transformers-tutorials). # # NOTE: this notebook assumes basic knowledge about deep learning, BERT, and native PyTorch. If you want to learn more Python, deep learning and PyTorch, I highly recommend cs231n by Stanford University and the FastAI course by Jeremy Howard et al. Both are freely available on the web. # # Now, let's move on to the real stuff! # + [markdown] colab_type="text" id="e7wfLWyYkvDi" # #### **Importing Python Libraries and preparing the environment** # # This notebook assumes that you have the following libraries installed: # * pandas # * numpy # * sklearn # * pytorch # * transformers # * seqeval # # As we are running this in Google Colab, the only libraries we need to additionally install are transformers and seqeval (GPU version): # + id="j-sgUbzBXPZK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="7871ebf9-f461-4ef3-ff67-4355d1ab1de3" # !pip install transformers seqeval[gpu] # + colab_type="code" id="IEnlUbgm8z3B" colab={} import pandas as pd import numpy as np from sklearn.metrics import accuracy_score import torch from torch.utils.data import Dataset, DataLoader from transformers import BertTokenizerFast, BertConfig, BertForTokenClassification # + [markdown] colab_type="text" id="Jzq1w3L1K5M-" # As deep learning can be accellerated a lot using a GPU instead of a CPU, make sure you can run this notebook in a GPU runtime (which Google Colab provides for free! - check "Runtime" - "Change runtime type" - and set the hardware accelerator to "GPU"). # # We can set the default device to GPU using the following code (if it prints "cuda", it means the GPU has been recognized): # + colab_type="code" id="Sm1krxJtKxpx" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="20ef661a-afa6-47ae-88b3-4a9781299a53" from torch import cuda device = 'cuda' if cuda.is_available() else 'cpu' print(device) # + [markdown] colab_type="text" id="ahwMsmyG5ZPE" # #### **Downloading and preprocessing the data** # Named entity recognition (NER) uses a specific annotation scheme, which is defined (at least for European languages) at the *word* level. An annotation scheme that is widely used is called **[IOB-tagging](https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging)**, which stands for Inside-Outside-Beginning. Each tag indicates whether the corresponding word is *inside*, *outside* or at the *beginning* of a specific named entity. The reason this is used is because named entities usually comprise more than 1 word. # # Let's have a look at an example. If you have a sentence like "Barack Obama was born in Hawaï", then the corresponding tags would be [B-PERS, I-PERS, O, O, O, B-GEO]. B-PERS means that the word "Barack" is the beginning of a person, I-PERS means that the word "Obama" is inside a person, "O" means that the word "was" is outside a named entity, and so on. So one typically has as many tags as there are words in a sentence. # # So if you want to train a deep learning model for NER, it requires that you have your data in this IOB format (or similar formats such as [BILOU](https://stackoverflow.com/questions/17116446/what-do-the-bilou-tags-mean-in-named-entity-recognition)). There exist many annotation tools which let you create these kind of annotations automatically (such as Spacy's [Prodigy](https://prodi.gy/), [Tagtog](https://docs.tagtog.net/) or [Doccano](https://github.com/doccano/doccano)). You can also use Spacy's [biluo_tags_from_offsets](https://spacy.io/api/goldparse#biluo_tags_from_offsets) function to convert annotations at the character level to IOB format. # # Here, we will use a NER dataset from [Kaggle](https://www.kaggle.com/namanj27/ner-dataset) that is already in IOB format. One has to go to this web page, download the dataset, unzip it, and upload the csv file to this notebook. Let's print out the first few rows of this csv file: # + colab_type="code" id="deLB9HVX5I6F" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="cf18919d-2726-4efe-886c-c33f8f478d80" data = pd.read_csv("ner_datasetreference.csv", encoding='unicode_escape') data.head() # + [markdown] colab_type="text" id="ucYjhq6uRAmY" # Let's check how many sentences and words (and corresponding tags) there are in this dataset: # + colab_type="code" id="6gMibEJXTKDw" colab={"base_uri": "https://localhost:8080/", "height": 101} outputId="940acb23-8bc2-4582-fca0-99008ebaf578" data.count() # + [markdown] id="xfdGDKlqXPZb" colab_type="text" # As we can see, there are approximately 48,000 sentences in the dataset, comprising more than 1 million words and tags (quite huge!). This corresponds to approximately 20 words per sentence. # # Let's have a look at the different NER tags, and their frequency: # + id="76iAeu2_XPZb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 336} outputId="788eb31f-8af8-438c-aaf8-a0b5e69951ee" print("Number of tags: {}".format(len(data.Tag.unique()))) frequencies = data.Tag.value_counts() frequencies # + [markdown] id="gOpvSrmWXPZe" colab_type="text" # There are 8 category tags, each with a "beginning" and "inside" variant, and the "outside" tag. It is not really clear what these tags mean - "geo" probably stands for geographical entity, "gpe" for geopolitical entity, and so on. They do not seem to correspond with what the publisher says on Kaggle. Some tags seem to be underrepresented. Let's print them by frequency (highest to lowest): # + id="LTFTa17UXPZf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ea93370c-fef3-4797-862a-8ef59239a383" tags = {} for tag, count in zip(frequencies.index, frequencies): if tag != "O": if tag[2:5] not in tags.keys(): tags[tag[2:5]] = count else: tags[tag[2:5]] += count continue print(sorted(tags.items(), key=lambda x: x[1], reverse=True)) # + [markdown] id="HKvKdtxKXPZg" colab_type="text" # Let's remove "art", "eve" and "nat" named entities, as performance on them will probably be not comparable to the other named entities. # + id="xVotSuRLXPZh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="285ea754-a73a-4cba-fd53-1193d1f45c4c" entities_to_remove = ["B-art", "I-art", "B-eve", "I-eve", "B-nat", "I-nat"] data = data[~data.Tag.isin(entities_to_remove)] data.head() # + [markdown] colab_type="text" id="JsjhdQbE-Lve" # We create 2 dictionaries: one that maps individual tags to indices, and one that maps indices to their individual tags. This is necessary in order to create the labels (as computers work with numbers = indices, rather than words = tags) - see further in this notebook. # + colab_type="code" id="CFRDM8WsQXvL" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="c2dc13d0-24a5-49ad-f221-cef12fd26882" labels_to_ids = {k: v for v, k in enumerate(data.Tag.unique())} ids_to_labels = {v: k for v, k in enumerate(data.Tag.unique())} labels_to_ids # + [markdown] colab_type="text" id="mskU4h0oRKEF" # As we can see, there are now only 10 different NER tags. # # Now, we have to ask ourself the question: what is a training example in the case of NER, which is provided in a single forward pass? A training example is typically a **sentence**, with corresponding IOB tags. Let's group the words and corresponding tags by sentence: # + colab_type="code" id="zkW2vNcO-uMH" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="fa84c55a-5d69-4798-9c9a-33c5561a688d" # pandas has a very handy "forward fill" function to fill missing values based on the last upper non-nan value data = data.fillna(method='ffill') data.head() # + colab_type="code" id="Hmd-ow389k6Y" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="824dbe90-267a-4dc4-a4c0-5ce2a826d2ea" # let's create a new column called "sentence" which groups the words by sentence data['sentence'] = data[['Sentence #','Word','Tag']].groupby(['Sentence #'])['Word'].transform(lambda x: ' '.join(x)) # let's also create a new column called "word_labels" which groups the tags by sentence data['word_labels'] = data[['Sentence #','Word','Tag']].groupby(['Sentence #'])['Tag'].transform(lambda x: ','.join(x)) data.head() # + [markdown] colab_type="text" id="J08Cvk_USgbM" # Let's only keep the "sentence" and "word_labels" columns, and drop duplicates: # + colab_type="code" id="SrEgd4PZUgmF" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="d984be66-e8c2-41ca-834d-1dfd8bb147a9" data = data[["sentence", "word_labels"]].drop_duplicates().reset_index(drop=True) data.head() # + colab_type="code" id="r3ArUiVRqw0C" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="04d85a07-22fe-4892-e182-a8f0bace2d9a" len(data) # + [markdown] colab_type="text" id="U8obZumRTBrT" # Let's verify that a random sentence and its corresponding tags are correct: # + colab_type="code" id="eUvupomW_fbe" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="2f215e44-b342-4ee4-cee2-a7d5651c7b17" data.iloc[41].sentence # + colab_type="code" id="0dLyY3Oi_lvp" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="eff7a782-ad2e-46c0-e25b-33691a07e200" data.iloc[41].word_labels # + [markdown] colab_type="text" id="f5EHpuB78pIa" # #### **Preparing the dataset and dataloader** # + [markdown] colab_type="text" id="15x7zmZnTgFx" # Now that our data is preprocessed, we can turn it into PyTorch tensors such that we can provide it to the model. Let's start by defining some key variables that will be used later on in the training/evaluation process: # + colab_type="code" id="lgNSM8Xz79Mg" colab={} MAX_LEN = 128 TRAIN_BATCH_SIZE = 4 VALID_BATCH_SIZE = 2 EPOCHS = 1 LEARNING_RATE = 1e-05 MAX_GRAD_NORM = 10 tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased') # + [markdown] colab_type="text" id="wPYV2Ld6Tr5I" # A tricky part of NER with BERT is that BERT relies on **wordpiece tokenization**, rather than word tokenization. This means that we should also define the labels at the wordpiece-level, rather than the word-level! # # For example, if you have word like "Washington" which is labeled as "b-gpe", but it gets tokenized to "Wash", "##ing", "##ton", then one approach could be to handle this by only train the model on the tag labels for the first word piece token of a word (i.e. only label "Wash" with "b-gpe"). This is what was done in the original BERT paper, see Github discussion [here](https://github.com/huggingface/transformers/issues/64#issuecomment-443703063). # # Note that this is a **design decision**. You could also decide to propagate the original label of the word to all of its word pieces and let the model train on this. In that case, the model should be able to produce the correct labels for each individual wordpiece. This was done in [this NER tutorial with BERT](https://github.com/chambliss/Multilingual_NER/blob/master/python/utils/main_utils.py#L118). Another design decision could be to give the first wordpiece of each word the original word label, and then use the label “X” for all subsequent subwords of that word. All of them seem to lead to good performance. # # Below, we define a regular PyTorch [dataset class](https://pytorch.org/docs/stable/data.html) (which transforms examples of a dataframe to PyTorch tensors). Here, each sentence gets tokenized, the special tokens that BERT expects are added, the tokens are padded or truncated based on the max length of the model, the attention mask is created and the labels are created based on the dictionary which we defined above. Word pieces that should be ignored have a label of -100 (which is the default `ignore_index` of PyTorch's [CrossEntropyLoss](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html)). # # For more information about BERT's inputs, see [here](https://huggingface.co/transformers/glossary.html). # # # # # # # # + id="Eh3ckSO0YMZW" colab_type="code" colab={} class dataset(Dataset): def __init__(self, dataframe, tokenizer, max_len): self.len = len(dataframe) self.data = dataframe self.tokenizer = tokenizer self.max_len = max_len def __getitem__(self, index): # step 1: get the sentence and word labels sentence = self.data.sentence[index].strip().split() word_labels = self.data.word_labels[index].split(",") # step 2: use tokenizer to encode sentence (includes padding/truncation up to max length) # BertTokenizerFast provides a handy "return_offsets_mapping" functionality for individual tokens encoding = self.tokenizer(sentence, is_pretokenized=True, return_offsets_mapping=True, padding='max_length', truncation=True, max_length=self.max_len) # step 3: create token labels only for first word pieces of each tokenized word labels = [labels_to_ids[label] for label in word_labels] # code based on https://huggingface.co/transformers/custom_datasets.html#tok-ner # create an empty array of -100 of length max_length encoded_labels = np.ones(len(encoding["offset_mapping"]), dtype=int) * -100 # set only labels whose first offset position is 0 and the second is not 0 i = 0 for idx, mapping in enumerate(encoding["offset_mapping"]): if mapping[0] == 0 and mapping[1] != 0: # overwrite label encoded_labels[idx] = labels[i] i += 1 # step 4: turn everything into PyTorch tensors item = {key: torch.as_tensor(val) for key, val in encoding.items()} item['labels'] = torch.as_tensor(encoded_labels) return item def __len__(self): return self.len # + [markdown] colab_type="text" id="hTP7zuWGWGUd" # Now, based on the class we defined above, we can create 2 datasets, one for training and one for testing. Let's use a 80/20 split: # + colab_type="code" id="jrkdZBLYHVcB" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="5c231116-119e-42b6-ff69-a7db95c8f354" train_size = 0.8 train_dataset = data.sample(frac=train_size,random_state=200) test_dataset = data.drop(train_dataset.index).reset_index(drop=True) train_dataset = train_dataset.reset_index(drop=True) print("FULL Dataset: {}".format(data.shape)) print("TRAIN Dataset: {}".format(train_dataset.shape)) print("TEST Dataset: {}".format(test_dataset.shape)) training_set = dataset(train_dataset, tokenizer, MAX_LEN) testing_set = dataset(test_dataset, tokenizer, MAX_LEN) # + [markdown] colab_type="text" id="Ptv5AT_iTb7W" # Let's have a look at the first training example: # + colab_type="code" id="phmPylgAm8Xy" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="9ad349fb-eca4-4871-eccf-435f7924fe07" training_set[0] # + [markdown] colab_type="text" id="VvU4nzL2W2Xo" # Let's verify that the input ids and corresponding targets are correct: # + colab_type="code" id="DWgnNJrYW2GP" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="46fee76c-ed3a-4aba-a38b-da6a0e4902d8" for token, label in zip(tokenizer.convert_ids_to_tokens(training_set[0]["input_ids"]), training_set[0]["labels"]): print('{0:10} {1}'.format(token, label)) # + [markdown] colab_type="text" id="Ky68FcTgWnfN" # Now, let's define the corresponding PyTorch dataloaders: # + colab_type="code" id="KIw793myWOmi" colab={} train_params = {'batch_size': TRAIN_BATCH_SIZE, 'shuffle': True, 'num_workers': 0 } test_params = {'batch_size': VALID_BATCH_SIZE, 'shuffle': True, 'num_workers': 0 } training_loader = DataLoader(training_set, **train_params) testing_loader = DataLoader(testing_set, **test_params) # + [markdown] colab_type="text" id="73OzU7oXRxR8" # #### **Defining the model** # + [markdown] colab_type="text" id="T-iGhnhdLNdP" # Here we define the model, BertForTokenClassification, and load it with the pretrained weights of "bert-base-uncased". The only thing we need to additionally specify is the number of labels (as this will determine the architecture of the classification head). # # Note that only the base layers are initialized with the pretrained weights. The token classification head of top has just randomly initialized weights, which we will train, together with the pretrained weights, using our labelled dataset. This is also printed as a warning when you run the code cell below. # # Then, we move the model to the GPU. # + colab_type="code" id="cB9MR3KcWXUs" colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["7e472a723360459294ecf8e046dbcdfb", "5af2dda0fde845fca51f7f5e3d804834", "9be80d6ac6af4c18a6259a5ecf596645", "36dda379e65246fb99db9ec532fee7b7", "39fe4f4cda184f8ea8be13061cb06343", "a733276c36fa4969b5b0ed8e4c19cc36", "55fd0d8368e54d57a65d8ac964263b68", "7f736be4090948089a7419884006a414", "0528099bb9f3451f9706daf630eb97e5", "d1a13adac751439d829e1b19d4fad80e", "7d05f8de38324f858c67d5c0875a7228", "b73b1cf372ab4091bf3ddf885d0d102e", "b4ebed4a383d44be8fc29fa9063689a0", "e816383d82ab457b800eb97d63df80c8", "d08086bdee6c4bac9bf7ae49784146e1", "c66cbf2ea9b943c9b7d20dea218b369a"]} outputId="527586e3-eb7f-4b6d-93f0-2f932e1ab657" model = BertForTokenClassification.from_pretrained('bert-base-uncased', num_labels=len(labels_to_ids)) model.to(device) # + [markdown] colab_type="text" id="Pp7Yl4JyWhDj" # #### **Training the model** # # Before training the model, let's perform a sanity check, which I learned thanks to Andrej Karpathy's wonderful [cs231n course](http://cs231n.stanford.edu/) at Stanford (see also his [blog post about debugging neural networks](http://karpathy.github.io/2019/04/25/recipe/)). The initial loss of your model should be close to -ln(1/number of classes) = -ln(1/17) = 2.83. # # Why? Because we are using cross entropy loss. The cross entropy loss is defined as -ln(probability score of the model for the correct class). In the beginning, the weights are random, so the probability distribution for all of the classes for a given token will be uniform, meaning that the probability for the correct class will be near 1/17. The loss for a given token will thus be -ln(1/17). As PyTorch's [CrossEntropyLoss](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html) (which is used by `BertForTokenClassification`) uses *mean reduction* by default, it will compute the mean loss for each of the tokens in the sequence for which a label is provided. # # Let's verify this: # # # + colab_type="code" id="eqAN7YVIjKTr" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b33a51eb-a05d-4769-d735-7cb13acc59b5" inputs = training_set[2] input_ids = inputs["input_ids"].unsqueeze(0) attention_mask = inputs["attention_mask"].unsqueeze(0) labels = inputs["labels"].unsqueeze(0) input_ids = input_ids.to(device) attention_mask = attention_mask.to(device) labels = labels.to(device) outputs = model(input_ids, attention_mask=attention_mask, labels=labels) initial_loss = outputs[0] initial_loss # + [markdown] colab_type="text" id="yLdwsru9Mh7U" # This looks good. Let's also verify that the logits of the neural network have a shape of (batch_size, sequence_length, num_labels): # + colab_type="code" id="X-z6YCpGnvfj" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="65461d20-fa62-4de1-a73b-0cfe254728ac" tr_logits = outputs[1] tr_logits.shape # + [markdown] colab_type="text" id="kwDLXxOVOCvD" # Next, we define the optimizer. Here, we are just going to use Adam with a default learning rate. One can also decide to use more advanced ones such as AdamW (Adam with weight decay fix), which is [included](https://huggingface.co/transformers/main_classes/optimizer_schedules.html) in the Transformers repository, and a learning rate scheduler, but we are not going to do that here. # + colab_type="code" id="kznSQfGIWdU4" colab={} optimizer = torch.optim.Adam(params=model.parameters(), lr=LEARNING_RATE) # + [markdown] colab_type="text" id="vZQ8JMF0NOe1" # Now let's define a regular PyTorch training function. It is partly based on [a really good repository about multilingual NER](https://github.com/chambliss/Multilingual_NER/blob/master/python/utils/main_utils.py#L344). # + colab_type="code" id="GLFivpkwW1HY" colab={} # Defining the training function on the 80% of the dataset for tuning the bert model def train(epoch): tr_loss, tr_accuracy = 0, 0 nb_tr_examples, nb_tr_steps = 0, 0 tr_preds, tr_labels = [], [] # put model in training mode model.train() for idx, batch in enumerate(training_loader): ids = batch['input_ids'].to(device, dtype = torch.long) mask = batch['attention_mask'].to(device, dtype = torch.long) labels = batch['labels'].to(device, dtype = torch.long) loss, tr_logits = model(input_ids=ids, attention_mask=mask, labels=labels) tr_loss += loss.item() nb_tr_steps += 1 nb_tr_examples += labels.size(0) if idx % 100==0: loss_step = tr_loss/nb_tr_steps print(f"Training loss per 100 training steps: {loss_step}") # compute training accuracy flattened_targets = labels.view(-1) # shape (batch_size * seq_len,) active_logits = tr_logits.view(-1, model.num_labels) # shape (batch_size * seq_len, num_labels) flattened_predictions = torch.argmax(active_logits, axis=1) # shape (batch_size * seq_len,) # only compute accuracy at active labels active_accuracy = labels.view(-1) != -100 # shape (batch_size, seq_len) #active_labels = torch.where(active_accuracy, labels.view(-1), torch.tensor(-100).type_as(labels)) labels = torch.masked_select(flattened_targets, active_accuracy) predictions = torch.masked_select(flattened_predictions, active_accuracy) tr_labels.extend(labels) tr_preds.extend(predictions) tmp_tr_accuracy = accuracy_score(labels.cpu().numpy(), predictions.cpu().numpy()) tr_accuracy += tmp_tr_accuracy # gradient clipping torch.nn.utils.clip_grad_norm_( parameters=model.parameters(), max_norm=MAX_GRAD_NORM ) # backward pass optimizer.zero_grad() loss.backward() optimizer.step() epoch_loss = tr_loss / nb_tr_steps tr_accuracy = tr_accuracy / nb_tr_steps print(f"Training loss epoch: {epoch_loss}") print(f"Training accuracy epoch: {tr_accuracy}") # + [markdown] colab_type="text" id="k2dsCyP7dcF3" # And let's train the model! # + colab_type="code" id="y07Ybw8rZeZ7" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="56f2a1f3-cbad-4ea7-e8b7-01d4e45ac73f" for epoch in range(EPOCHS): print(f"Training epoch: {epoch + 1}") train(epoch) # + [markdown] colab_type="text" id="r4jcSOJr680a" # #### **Evaluating the model** # + [markdown] colab_type="text" id="rYUTuOEUdfFJ" # Now that we've trained our model, we can evaluate its performance on the held-out test set (which is 20% of the data). Note that here, no gradient updates are performed, the model just outputs its logits. # + colab_type="code" id="RIVVfFHi7Aw7" colab={} def valid(model, testing_loader): # put model in evaluation mode model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_examples, nb_eval_steps = 0, 0 eval_preds, eval_labels = [], [] with torch.no_grad(): for idx, batch in enumerate(testing_loader): ids = batch['input_ids'].to(device, dtype = torch.long) mask = batch['attention_mask'].to(device, dtype = torch.long) labels = batch['labels'].to(device, dtype = torch.long) loss, eval_logits = model(input_ids=ids, attention_mask=mask, labels=labels) eval_loss += loss.item() nb_eval_steps += 1 nb_eval_examples += labels.size(0) if idx % 100==0: loss_step = eval_loss/nb_eval_steps print(f"Validation loss per 100 evaluation steps: {loss_step}") # compute evaluation accuracy flattened_targets = labels.view(-1) # shape (batch_size * seq_len,) active_logits = eval_logits.view(-1, model.num_labels) # shape (batch_size * seq_len, num_labels) flattened_predictions = torch.argmax(active_logits, axis=1) # shape (batch_size * seq_len,) # only compute accuracy at active labels active_accuracy = labels.view(-1) != -100 # shape (batch_size, seq_len) labels = torch.masked_select(flattened_targets, active_accuracy) predictions = torch.masked_select(flattened_predictions, active_accuracy) eval_labels.extend(labels) eval_preds.extend(predictions) tmp_eval_accuracy = accuracy_score(labels.cpu().numpy(), predictions.cpu().numpy()) eval_accuracy += tmp_eval_accuracy labels = [ids_to_labels[id.item()] for id in eval_labels] predictions = [ids_to_labels[id.item()] for id in eval_preds] eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_steps print(f"Validation Loss: {eval_loss}") print(f"Validation Accuracy: {eval_accuracy}") return labels, predictions # + [markdown] colab_type="text" id="zJaONluRdq-e" # As we can see below, performance is quite good! Accuracy on the test test is > 93%. # + colab_type="code" id="2BrxRjvxApY8" colab={"base_uri": "https://localhost:8080/", "height": 857} outputId="5f1e9073-3f02-4408-9d37-c9ec0fdb0b06" labels, predictions = valid(model, testing_loader) # + [markdown] colab_type="text" id="SAznLDwx_U2X" # However, the accuracy metric is misleading, as a lot of labels are "outside" (O), even after omitting predictions on the [PAD] tokens. What is important is looking at the precision, recall and f1-score of the individual tags. For this, we use the seqeval Python library: # + colab_type="code" id="0jDNXrjr-6BW" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="ba093326-3aa2-4928-edc4-123d5ead0269" from seqeval.metrics import classification_report print(classification_report(labels, predictions)) # + [markdown] id="mC9QG1_8nRJ7" colab_type="text" # Performance already seems quite good, but note that we've only trained for 1 epoch. An optimal approach would be to perform evaluation on a validation set while training to improve generalization. # + [markdown] colab_type="text" id="4Gz-wHAw3xMk" # #### **Inference** # # The fun part is when we can quickly test the model on new, unseen sentences. # Here, we use the prediction of the **first word piece of every word** (which is how the model was trained). # # *In other words, the code below does not take into account when predictions of different word pieces that belong to the same word do not match.* # + colab_type="code" id="zPDla1mmZiax" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="ca59db6b-ca30-4b7b-faf7-1abf17af3be9" sentence = "@HuggingFace is a company based in New York, but is also has employees working in Paris" inputs = tokenizer(sentence.split(), is_pretokenized=True, return_offsets_mapping=True, padding='max_length', truncation=True, max_length=MAX_LEN, return_tensors="pt") # move to gpu ids = inputs["input_ids"].to(device) mask = inputs["attention_mask"].to(device) # forward pass outputs = model(ids, attention_mask=mask) logits = outputs[0] active_logits = logits.view(-1, model.num_labels) # shape (batch_size * seq_len, num_labels) flattened_predictions = torch.argmax(active_logits, axis=1) # shape (batch_size*seq_len,) - predictions at the token level tokens = tokenizer.convert_ids_to_tokens(ids.squeeze().tolist()) token_predictions = [ids_to_labels[i] for i in flattened_predictions.cpu().numpy()] wp_preds = list(zip(tokens, token_predictions)) # list of tuples. Each tuple = (wordpiece, prediction) prediction = [] for token_pred, mapping in zip(wp_preds, inputs["offset_mapping"].squeeze().tolist()): #only predictions on first word pieces are important if mapping[0] == 0 and mapping[1] != 0: prediction.append(token_pred[1]) else: continue print(sentence.split()) print(prediction) # + [markdown] colab_type="text" id="sqDklprSqB5d" # #### **Saving the model for future use** # + [markdown] colab_type="text" id="VuUdX_fImswO" # Finally, let's save the vocabulary (.txt) file, model weights (.bin) and the model's configuration (.json) to a directory, so that both the tokenizer and model can be re-loaded using the `from_pretrained()` class method. # # + colab_type="code" id="sDZtSsKKntuI" colab={} import os directory = "./model" if not os.path.exists(directory): os.makedirs(directory) # save vocabulary of the tokenizer tokenizer.save_vocabulary(directory) # save the model weights and its configuration file model.save_pretrained(directory) print('All files saved') print('This tutorial is completed') # + [markdown] id="yQwQSVCAXPaX" colab_type="text" # ## Legacy # + [markdown] id="QWslPozfnnPt" colab_type="text" # The following code blocks were used during the development of this notebook, but are not included anymore. # + id="k3okbiKoXPaY" colab_type="code" colab={} def prepare_sentence(sentence, tokenizer, maxlen): # step 1: tokenize the sentence tokenized_sentence = tokenizer.tokenize(sentence) # step 2: add special tokens tokenized_sentence = ["[CLS]"] + tokenized_sentence + ["[SEP]"] # step 3: truncating/padding if (len(tokenized_sentence) > maxlen): # truncate tokenized_sentence = tokenized_sentence[:maxlen] else: # pad tokenized_sentence = tokenized_sentence + ['[PAD]'for _ in range(maxlen - len(tokenized_sentence))] # step 4: obtain the attention mask attn_mask = [1 if tok != '[PAD]' else 0 for tok in tokenized_sentence] # step 5: convert tokens to input ids ids = tokenizer.convert_tokens_to_ids(tokenized_sentence) return { 'ids': torch.tensor(ids, dtype=torch.long), 'mask': torch.tensor(attn_mask, dtype=torch.long), #'token_type_ids': torch.tensor(token_ids, dtype=torch.long), } # + id="dyAL9FIRRcTg" colab_type="code" colab={} def tokenize_and_preserve_labels(sentence, text_labels, tokenizer): """ Word piece tokenization makes it difficult to match word labels back up with individual word pieces. This function tokenizes each word one at a time so that it is easier to preserve the correct label for each subword. It is, of course, a bit slower in processing time, but it will help our model achieve higher accuracy. """ tokenized_sentence = [] labels = [] sentence = sentence.strip() for word, label in zip(sentence.split(), text_labels.split(",")): # Tokenize the word and count # of subwords the word is broken into tokenized_word = tokenizer.tokenize(word) n_subwords = len(tokenized_word) # Add the tokenized word to the final tokenized word list tokenized_sentence.extend(tokenized_word) # Add the same label to the new list of labels `n_subwords` times labels.extend([label] * n_subwords) return tokenized_sentence, labels # + id="xWSn_glJRksn" colab_type="code" colab={} class dataset(Dataset): def __init__(self, dataframe, tokenizer, max_len): self.len = len(dataframe) self.data = dataframe self.tokenizer = tokenizer self.max_len = max_len def __getitem__(self, index): # step 1: tokenize (and adapt corresponding labels) sentence = self.data.sentence[index] word_labels = self.data.word_labels[index] tokenized_sentence, labels = tokenize_and_preserve_labels(sentence, word_labels, self.tokenizer) # step 2: add special tokens (and corresponding labels) tokenized_sentence = ["[CLS]"] + tokenized_sentence + ["[SEP]"] # add special tokens labels.insert(0, "O") # add outside label for [CLS] token labels.insert(-1, "O") # add outside label for [SEP] token # step 3: truncating/padding maxlen = self.max_len if (len(tokenized_sentence) > maxlen): # truncate tokenized_sentence = tokenized_sentence[:maxlen] labels = labels[:maxlen] else: # pad tokenized_sentence = tokenized_sentence + ['[PAD]'for _ in range(maxlen - len(tokenized_sentence))] labels = labels + ["O" for _ in range(maxlen - len(labels))] # step 4: obtain the attention mask attn_mask = [1 if tok != '[PAD]' else 0 for tok in tokenized_sentence] # step 5: convert tokens to input ids ids = self.tokenizer.convert_tokens_to_ids(tokenized_sentence) label_ids = [labels_to_ids[label] for label in labels] # the following line is deprecated #label_ids = [label if label != 0 else -100 for label in label_ids] return { 'ids': torch.tensor(ids, dtype=torch.long), 'mask': torch.tensor(attn_mask, dtype=torch.long), #'token_type_ids': torch.tensor(token_ids, dtype=torch.long), 'targets': torch.tensor(label_ids, dtype=torch.long) } def __len__(self): return self.len # + id="a9-BqpvYjkzN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="c9783e78-4904-4d30-90c1-f685fe06b1e6" sentence = "this is a test @huggingface".strip().split() inputs = tokenizer(sentence, is_pretokenized=True, return_offsets_mapping=True, padding='max_length', truncation=True) tokens = tokenizer.convert_ids_to_tokens(inputs["input_ids"]) token_offsets = inputs["offset_mapping"] print(tokens) print(token_offsets) # + id="pw4AfDxJlU_Z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="00f3e7c2-9e99-4606-9009-eee25df8fa73" word = "@huggingface" inputs = tokenizer(word, return_offsets_mapping=True, padding='max_length', truncation=True) tokens = tokenizer.convert_ids_to_tokens(inputs["input_ids"]) token_offsets = inputs["offset_mapping"] print(tokens) print(token_offsets) # + id="W9LHi_V5WZ9z" colab_type="code" colab={} # now, use mask to determine where we should compare predictions with targets (includes [CLS] and [SEP] token predictions) active_accuracy = mask.view(-1) == 1 # active accuracy is also of shape (batch_size * seq_len,) targets = torch.masked_select(flattened_targets, active_accuracy) predictions = torch.masked_select(flattened_predictions, active_accuracy)
37,627
/intro-to-python-ml/week4/knn.ipynb
3e7ae51bb97c0fb0b0e74a79c660ed10203f23e7
[ "MIT" ]
permissive
haneessh/my-learning-notebooks
https://github.com/haneessh/my-learning-notebooks
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
51,342
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + run_control={"frozen": false, "read_only": false} import numpy as np import pandas as pd import matplotlib.pyplot as plt import scipy # %matplotlib inline # - # # K Nearest Neighbors Classifiers # # So far we've covered learning via probability (naive Bayes) and learning via errors (regression). Here we'll cover learning via similarity. This means we look for the datapoints that are most similar to the observation we are trying to predict. # # #### What type of model is k-nearest neighbors algorithm (k-NN)? # A supervised, classification, nonparametric, instance-based model. # # Supervised # ---- # # You have to have labeled data. Each points belong to a group. # # Your new point will be classfied into other existing groups. # # Parametric vs Nonparametric Models # ----- # # Parametric: Make an assumption about form of the function of the data # # Nonparametric: Do __not__ make an assumption about the functional form. # # Instance-based # --------- # # Uses only the actual observed data to classify. There is no model! # <center><img src="../images/knn2.png" width="50%"/></center> # + [markdown] run_control={"frozen": false, "read_only": false} # Let's start by the simplest example: **Nearest Neighbor**. # # ## Nearest Neighbor # # Let's use this example: classifying a song as either "rock" or "jazz". For this data we have measures of duration in seconds and loudness in loudness units (we're not going to be using decibels since that isn't a linear measure, which would create some problems we'll get into later). # + import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline music = pd.DataFrame() # Some data to play with. music['duration'] = [184, 134, 243, 186, 122, 197, 294, 382, 102, 264, 205, 110, 307, 110, 397, 153, 190, 192, 210, 403, 164, 198, 204, 253, 234, 190, 182, 401, 376, 102] music['loudness'] = [18, 34, 43, 36, 22, 9, 29, 22, 10, 24, 20, 10, 17, 51, 7, 13, 19, 12, 21, 22, 16, 18, 4, 23, 34, 19, 14, 11, 37, 42] # We know whether the songs in our training data are jazz or not. music['jazz'] = [ 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0] # - music.head() # + run_control={"frozen": false, "read_only": false} # Look at our data. plt.scatter( music[music['jazz'] == 1].duration, music[music['jazz'] == 1].loudness, color='red' ) plt.scatter( music[music['jazz'] == 0].duration, music[music['jazz'] == 0].loudness, color='blue' ) plt.legend(['Jazz', 'Rock']) plt.title('Jazz and Rock Characteristics') plt.xlabel('Duration') plt.ylabel('Loudness') plt.show() # - # The simplest form of a similarity model is the Nearest Neighbor model. This works quite simply: when trying to predict an observation, we find the closest (or _nearest_) known observation in our training data and use that value to make our prediction. Here we'll use the model as a classifier, the outcome of interest will be a category. # # To find which observation is "nearest" we need some kind of way to measure distance. Typically we use _Euclidean distance_, the standard distance measure that you're familiar with from geometry. With one observation in n-dimensions $(x_1, x_2, ...,x_n)$ and the other $(w_1, w_2,...,w_n)$: # # $$ \sqrt{(x_1-w_1)^2 + (x_2-w_2)^2+...+(x_n-w_n)^2} $$ # ## Other distance functions: # # 1-norm distance, aka City Block (Manhattan) distance # ------ # # <center><img src="../images/1_norm_1.svg" width="35%"/></center> # # <center><img src="../images/1_norm_2.jpg" width="35%"/></center> # # # 2-norm distance, aka Eculidian (as the crow flies) # ----- # # <center><img src="../images/2_norm_1.svg" width="35%"/></center> # # <center><img src="../images/2_norm_2.jpg" width="35%"/></center> # # # p-norm distance, aka Minkowski distance of order p # ----- # # Generalization notion of normed vector space distance. # # When p = 1, Manhattan distance # When p = 2, Euclidean distance # # <center><img src="../images/p_norm.svg" width="35%"/></center> # # + [markdown] run_control={"frozen": false, "read_only": false} # You can technically define any distance measure you want, and there are times where this customization may be valuable. As a general standard, however, we'll use Euclidean distance. # # Now that we have a distance measure from each point in our training data to the point we're trying to predict the model can find the datapoint with the smallest distance and then apply that category to our prediction. # # Let's try running this model, using the SKLearn package. # + run_control={"frozen": false, "read_only": false} from sklearn.neighbors import KNeighborsClassifier neighbors = KNeighborsClassifier(n_neighbors=1) X = music[['loudness', 'duration']] Y = music.jazz neighbors.fit(X,Y) # - neighbors.predict_proba([[24, 190]]) ## Predict for a song with 24 loudness that's 190 seconds long. neighbors.predict([[24, 190]]) # + [markdown] run_control={"frozen": false, "read_only": false} # It's as simple as that. Looks like our model is predicting that 24 loudness, 190 second long song is _not_ jazz. All it takes to train the model is a dataframe of independent variables and a dataframe of dependent outcomes. # + [markdown] run_control={"frozen": false, "read_only": false} # You'll note that for this example, we used the `KNeighborsClassifier` method from SKLearn. This is because Nearest Neighbor is a simplification of K-Nearest Neighbors. The jump, however, isn't that far. # # ## K-Nearest Neighbors # # <center><img src="../images/knn1.png" width="50%"/></center> # # **K-Nearest Neighbors** (or "**KNN**") is the logical extension of Nearest Neighbor. Instead of looking at just the single nearest datapoint to predict an outcome, we look at several of the nearest neighbors, with $k$ representing the number of neighbors we choose to look at. Each of the $k$ neighbors gets to vote on what the predicted outcome should be. # # This does a couple of valuable things. Firstly, it smooths out the predictions. If only one neighbor gets to influence the outcome, the model explicitly overfits to the training data. Any single outlier can create pockets of one category prediction surrounded by a sea of the other category. # # This also means instead of just predicting classes, we get implicit probabilities. If each of the $k$ neighbors gets a vote on the outcome, then the probability of the test example being from any given class $i$ is: # $$ \frac{votes_i}{k} $$ # # And this applies for all classes present in the training set. Our example only has two classes, but this model can accommodate as many classes as the data set necessitates. To come up with a classifier prediction it simply takes the class for which that fraction is maximized. # # Let's expand our initial nearest neighbors model from above to a KNN with a $k$ of 5. # + run_control={"frozen": false, "read_only": false} neighbors = KNeighborsClassifier(n_neighbors=5) X = music[['loudness', 'duration']] Y = music.jazz neighbors.fit(X,Y) ## Predict for a 24 loudness, 190 seconds long song. print(neighbors.predict([[24, 190]])) print(neighbors.predict_proba([[24, 190]])) # - # ### predict vs predict_proba # # If there are say 3 classes (say -1, 0, 1), predict will give "to which class the data point belongs to" i.e. either (-1 or 0 or 1). Predict_proba will give the probability that the data point belongs to each of the classes, like (0.2, 0.7, 0.1). # In general, we can say that the "predict" function is a class decision function, where as the predict_proba is a more general form of predicting the probability for each of the classes. # + [markdown] run_control={"frozen": false, "read_only": false} # Now our test prediction has changed. In using the five nearest neighbors it appears that there were two votes for rock and three for jazz, so it was classified as a jazz song. This is different than our simpler Nearest Neighbors model. While the closest observation was in fact rock, there are more jazz songs in the nearest $k$ neighbors than rock. # # We can visualize our decision bounds with something called a _mesh_. This allows us to generate a prediction over the whole space. Read the code below and make sure you can pull out what the individual lines do, consulting the documentation for unfamiliar methods if necessary. # - # ## Normalization & Weighing # It can be a more obvious challenge if you were dealing with something where the relative scales are strikingly different. For example, if you were looking at buildings and you have height in floors and square footage, you'd have a model that would really only care about square footage since distance in that dimension would be a far greater number of units than the number of floors. # # Turn all the values to be between 0 and 1 or -1 to 1 (same thing) # # # There is one more thing to address when talking about distance, and that is weighting. In the vanilla version of KNN, all k of the closest observations are given equal votes on what the outcome of our test observation should be. When the data is densely populated that isn't necessarily a problem. # # However, sometimes the k nearest observations are not all similarly close to the test. In that case it may be useful to weight by distance. Functionally this will weight by the inverse of distance, so that closer datapoints (with a low distance) have a higher weight than further ones. # + run_control={"frozen": false, "read_only": false} import numpy as np # Our data. Converting from data frames to arrays for the mesh. X = np.array(X) Y = np.array(Y) # Mesh size. h = 4.0 # Plot the decision boundary. We assign a color to each point in the mesh. x_min = X[:, 0].min() - .5 x_max = X[:, 0].max() + .5 y_min = X[:, 1].min() - .5 y_max = X[:, 1].max() + .5 xx, yy = np.meshgrid( np.arange(x_min, x_max, h), np.arange(y_min, y_max, h) ) Z = neighbors.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot. Z = Z.reshape(xx.shape) plt.figure(1, figsize=(6, 4)) plt.set_cmap(plt.cm.Paired) plt.pcolormesh(xx, yy, Z) # Add the training points to the plot. plt.scatter(X[:, 0], X[:, 1], c=Y) plt.xlabel('Loudness') plt.ylabel('Duration') plt.title('Mesh visualization') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.show() # - # Looking at the visualization above, any new point that fell within a blue area would be predicted to be jazz, and any point that fell within a brown area would be predicted to be rock. # # The boundaries above are strangly jagged here, and we'll get into that in more detail in the next lesson. # # Also note that the visualization isn't completely continuous. There are an infinite number of points in this space, and we can't calculate the value for each one. That's where the mesh comes in. We set our mesh size (`h = 4.0`) to 4.0 above, which means we calculate the value for each point in a grid where the points are spaced 4.0 away from each other. # # You can make the mesh size smaller to get a more continuous visualization, but at the cost of a more computationally demanding calculation. In the cell below, recreate the plot above with a mesh size of `10.0`. Then reduce the mesh size until you get a plot that looks good but still renders in a reasonable amount of time. When do you get a visualization that looks acceptably continuous? When do you start to get a noticeable delay? # + from sklearn.neighbors import KNeighborsClassifier from scipy import stats neighbors = KNeighborsClassifier(n_neighbors=5, weights='distance') # Our input data frame will be the z-scores this time instead of raw data. # zscoring it is normalizing it X = pd.DataFrame({ 'loudness': stats.zscore(music.loudness), 'duration': stats.zscore(music.duration) }) # Fit our model. Y = music.jazz neighbors.fit(X, Y) # Arrays, not data frames, for the mesh. X = np.array(X) Y = np.array(Y) # Mesh size. h = .01 # Plot the decision boundary. We assign a color to each point in the mesh. x_min = X[:,0].min() - .5 x_max = X[:,0].max() + .5 y_min = X[:,1].min() - .5 y_max = X[:,1].max() + .5 xx, yy = np.meshgrid( np.arange(x_min, x_max, h), np.arange(y_min, y_max, h) ) Z = neighbors.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure(1, figsize=(6, 4)) plt.set_cmap(plt.cm.Paired) plt.pcolormesh(xx, yy, Z) # Add the training points to the plot. plt.scatter(X[:, 0], X[:, 1], c=Y) plt.xlabel('Loudness') plt.ylabel('Duration') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.show()
12,947
/05_OpenCV/03_OpenCv_Draw.ipynb
a93292d17382314bf7d2d0e2189a2720090d0c87
[]
no_license
Junohera/PythonBasic2021
https://github.com/Junohera/PythonBasic2021
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
6,115
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- square = [value **2 for value in range(1,11)] print(square) # Odd Numbers: Use the third argument of the range() function to make a list # of the odd numbers from 1 to 20. Use a for loop to print each number oddnum = list(range(1,30,2)) print(oddnum) # Make a list of the multiples of 3 from 3 to 30. Use a for loop to # print the numbers in your list. Table_of_three = list(range(0,31,3)) print(Table_of_three) cities = ["Atlanta", "Baltimore", "Chicago","Denver", "Los Angeles", "Seattle"] print(cities) cities = ["Atlanta", "Baltimore", "Chicago","Denver", "Los Angeles", "Seattle"] cities.append("New York") print(cities) mobile = ['"Samsung" , "apple" , "htc" ,"motorolla"'] print(mobile) cities = cities + ["Dubuque", "New Orleans"] print(cities) todays_tasks = [] todays_tasks = todays_tasks + ["Walk dog", "Buy groceries"] print(todays_tasks) # # Slicing a list players = ['afridi', 'misbah', 'malik', 'amir', 'ajmal'] print(players[0:3]) players = ['charles', 'martina', 'michael', 'florence', 'eli'] print(players[1:4]) players = ['charles', 'martina', 'dukky', 'chris', 'Rika'] print(players[:4]) players = ['charles', 'martina', 'michael', 'florence', 'eli'] print(players[2:]) players = ['charles', 'martina', 'michael', 'florence', 'eli'] print(players[-3:]) players = ['charles', 'martina', 'michael', 'florence', 'eli'] print("Here are the first three players on my team:") for player in players[:3]: print(player.title()) # + my_foods = ['Mutton karahi' , 'Fish' , 'Malai boti'] friends_food = my_foods[:] print('My favourite foods are:') print(my_foods) print('\nMy friend favourite foods are:') print(friends_food) # + my_foods = ['Mutton karahi' , 'Fish' , 'Malai boti'] friends_food = my_foods[:] my_foods.append('Chicken handi') friends_food.append('beaf biryani') print('My favourite foods are:') print(my_foods) print('\nMy friend favourite foods are:') print(friends_food) # - # Using one of the programs you wrote in this chapter, add several # lines to the end of the program that do the following: # # • Print the message, The first three items in the list are:. Then use a slice to # print the first three items from that program’s list. # # • Print the message, Three items from the middle of the list are:. Use a slice # to print three items from the middle of the list. # # • Print the message, The last three items in the list are:. Use a slice to print # the last three items in the list. # + foods = ['fish and chips' , 'biryani' , 'tikka' , 'handi' , 'karahi' , 'boti' , 'chinses' , 'qorma' , 'ice cream'] print('The first three items in the list are given below :') for food in foods[:3]: print('`' + food.title()) print('\nMiddle three items of the list are given below :') for foodies in foods[3:6]: print('`',foodies.title()) print('\nThe last three items of the list are given below :') for dinner in foods[6:]: print('`',dinner.title()) # - # Make a copy of the list of pizzas, and call it friend_pizzas. # Then, do the following: # # • Add a new pizza to the original list. # # • Add a different pizza to the list friend_pizzas. # # •Prove that you have two separate lists. Print the message, My favorite # pizzas are:, and then use a for loop to print the first list. Print the message, # My friend’s favorite pizzas are:, and then use a for loop to print the second list. # + friend_pizza = ['chicago bold fold' , 'dancing fajita' , 'poppin bbq' , 'tarzan tikka'] original_list = friend_pizza[:] friend_pizza.append('west side garlic') original_list.append('chicken smoke') print('My favourite Pizza are : \n') for mypizza in friend_pizza: print('*' + mypizza.upper()+ '*') print('\nMy friend favourite pizza are : \n') for yourpizza in original_list: print('*' + yourpizza.upper() + '*') # - bel('Number of Movies Release') plt.xticks(fontsize=12,rotation=90) plt.show() # + pycharm={"name": "#%%\n"} # Plot Movie Rating Count plt.figure(figsize=(20,12)) edgecolor=(0,0,0), sns.distplot(voted_movies[voted_movies.Year>2000]['imdbRating'], kde=False) plt.title("Movie Rating Count") plt.xlabel('IMDB Rating') plt.ylabel('Count') plt.xticks(fontsize=12,rotation=90) plt.show() # + pycharm={"name": "#%%\n"} # Plot of most voted movies in last 20 years, by production year movie_count = (voted_movies[voted_movies.Year>2000][["Year", "imdbID"]] .groupby("Year") .count() ) (movie_count .rolling(5).mean().rename(columns={"imdbID":"count"}) .plot(figsize=(15,5), title="Count of Rated Movies - by production year") ) # + pycharm={"name": "#%%\n"} # Genres info genres.info() # + pycharm={"name": "#%%\n"} # Drop unnecessary column genres.drop('Unnamed: 0', axis=1, inplace=True) # + pycharm={"name": "#%%\n"} # Join genres to movies filtering movies after 2000 voted_movies_genres = pd.merge(voted_movies[voted_movies.Year>2000],genres,how='left', on=['imdbID','imdbID']) # + pycharm={"name": "#%%\n"} # Overview of joined dataframe voted_movies_genres.head(10) # + pycharm={"name": "#%%\n"} # List all genres voted_movies_genres.Genre = voted_movies_genres.Genre.astype('category') voted_movies_genres.Genre.cat.categories # + pycharm={"name": "#%%\n"} # Top 6 genres by movie count voted_movies_genres['Genre'].value_counts().head(6) # + pycharm={"name": "#%%\n"} # Clean whitespace from genre and count again voted_movies_genres['Genre'] = voted_movies_genres['Genre'].str.strip() top6_genre = voted_movies_genres['Genre'].value_counts().head(6) top6_genre # + pycharm={"name": "#%%\n"} # Top Country by movie count in top 6 genres top6_genre_keys = top6_genre.keys() top6_genre_movies = voted_movies_genres.loc[voted_movies_genres['Genre'].isin(top6_genre_keys)] top6_genre_movies['Country'].value_counts().head(6) # + pycharm={"name": "#%%\n"} # Top 10 genres by IMDB Rating m_genre_mean = voted_movies_genres.groupby(['Genre']).mean() m_genre_mean.sort_values(by='imdbRating',ascending=False,inplace=True) # + pycharm={"name": "#%%\n"} # Plot IMDB rating by genre m_genre_mean[['imdbRating']].plot.barh(stacked=True, title = 'imdbRating by Genre',color='DarkBlue', figsize=(10, 9)); # + pycharm={"name": "#%%\n"} # Genres with >= 8 ratings md_8 = voted_movies_genres[voted_movies_genres['imdbRating']>=8] md_8 = (pd.DataFrame(voted_movies_genres.groupby('Genre').imdbID.nunique())).sort_values('imdbID', ascending=True ) md_8[['imdbID']].plot.barh(stacked=True, title = 'Genres with >= 8 ratings', figsize=(10, 9),color='DarkBlue');
6,791
/BIOE300_HM1_.ipynb
875b24fedd8c48a8e0ecbc6d0c0de8c52f046110
[]
no_license
luisdiaz1997/BioinformaticsExamples
https://github.com/luisdiaz1997/BioinformaticsExamples
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
318,417
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/luisdiaz1997/BioinformaticsExamples/blob/master/BIOE300_HM1_.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="un3T_lGXPccG" import numpy as np import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import poisson # + id="48FD5SsBTf2M" #genes on, genes off, mrna update = np.array([ [-1, 1, 0], #gene goes from on to off [1, -1, 0], #gene goes from off to on [0, 0, 1], #mrna produced [0, 0, -1] #mrna degraded ]) # + id="eSGRDFb4U5sU" def gillispie(a_reactions: np.ndarray, prng): #time for reaction a_total = np.sum(a_reactions) if a_total == 0: return 0, 0 r1 = prng.rand() t = (1/(a_total)) * np.log(1/r1) #get index of random reaction r2 = prng.rand() target = a_total * r2 q = 0 sum_as = 0 for i, reaction in enumerate(a_reactions): sum_as += reaction if sum_as > target: q = i break return t, q # + id="fOxdI_vAUG-t" def start_simulation(rates: np.ndarray, seed=12, steps = 1000): prng = np.random.RandomState(seed) X = np.zeros((steps+1, 3)) tt = np.zeros(steps+1) X[0] = np.array([0, 1, 0]) for step in range(steps): a = np.zeros(4) a[0] = rates[0]*X[step, 0] #dna on to off a[1] = rates[1]*X[step, 1] #dna off to on a[2] = rates[2]*X[step, 0] #rna produced a[3] = rates[3]*X[step, 2] #rna degraded t, q = gillispie(a, prng) if t==0: print('simulation ends at step:', step) break tt[step+1] = tt[step] + t X[step+1] = X[step] + update[q] return tt[:step+1], X[: step+1] # + id="FTHECocMmhdc" def plot(tt, X): plt.figure(figsize=(15, 3)) plt.subplot(2,1,1) plt.plot(tt, X[:, 0], drawstyle='steps-post') plt.gca().set_xticks([]) plt.ylabel('genes on') plt.ylim([-0.1, 1.1]) plt.subplot(2,1,2) plt.plot(tt, X[:, 2]) plt.ylabel('Number of RNAs') plt.xlabel('time seconds') # + [markdown] id="FRaY_LWst3Yq" # ##Question 1a # + colab={"base_uri": "https://localhost:8080/", "height": 225} id="c84I_chdYOld" outputId="0b11fdac-3b81-4242-8dfc-cbf42a1482b9" rates1 = np.array([0, 0.1, 20, 1]) tt, X = start_simulation(rates1, steps=10000) plot(tt, X) # + colab={"base_uri": "https://localhost:8080/", "height": 225} id="v5skkZ1VZ5Nj" outputId="78392ba8-0a35-4ebf-fa50-97da2d89c40c" rates2 = np.array([0.2, 0.1, 20, 10]) tt, X = start_simulation(rates2, steps=5000) plot(tt, X) # + colab={"base_uri": "https://localhost:8080/", "height": 225} id="ipV_CwUUmWoS" outputId="8fa30f49-5953-4b71-b27f-cfc7eca7d5aa" rates3 = np.array([0.2, 0.1, 20, 1]) tt, X = start_simulation(rates3, steps=5000) plot(tt, X) # + colab={"base_uri": "https://localhost:8080/", "height": 225} id="3xLLfTibrcVl" outputId="4fbf49cc-abb0-4d5f-9e07-0c53ccb65a25" rates4 = np.array([0.2, 0.1, 200, 1]) tt, X = start_simulation(rates4, steps=5000) plot(tt, X) # + colab={"base_uri": "https://localhost:8080/", "height": 225} id="x962cBOQrvHu" outputId="4eeb9960-a9ad-4d3e-c1c9-30b9c50d9cdf" rates5 = np.array([0.2, 1, 20, 1]) tt, X = start_simulation(rates5, steps=5000) plot(tt, X) # + [markdown] id="qLnlupTznaqL" # ### The mRNA counts are very correlated with DNA on. # + [markdown] id="d9j_LWPd8f_W" # ## question 1b # + id="Gteq0GYP8-84" steady_state = lambda rates: 1+(rates[1]/(rates[0]+rates[1]))*(rates[2]/rates[3]) # + colab={"base_uri": "https://localhost:8080/"} id="O7FHZzGQCS6h" outputId="89e624f0-4149-4c53-a429-3fe1446d251f" p = np.arange(0 ,10) np.insert(p, 0, 20) # + id="E0qckfVUA9an" def get_p(rates, n_vector): n_vector = n_vector[1:] const1 = rates[1]/(rates[0]+ rates[1]) const2 = rates[0]/(rates[0]+ rates[1]) const3 = rates[2]/rates[3] p = const1*poisson.pmf(n_vector, const3) p = np.insert(p, 0, (const1*np.exp(-const3)) + const2) return p # + id="VYUKnqKaeGln" def build_kde(tt, signal): max_signal = int(np.max(signal)) tt_diff = np.diff(tt) p = np.zeros(max_signal+1) for i, element in enumerate(signal[:-1]): p[int(element)] += tt_diff[i] p /= p.sum() return p # + id="SlV5xXTXpMzE" def plot_histogram(rates, seed=10, steps=10000): tt, X = start_simulation(rates, seed=seed, steps=steps) p_data = build_kde(tt, X[:, 2]) x = np.arange(0, len(p_data)) p = get_p(rates, x) plt.bar(x, p_data) plt.plot(x, p, color='black') plt.xlabel('mRNA counts') plt.ylabel('probability') # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="bf3GeQOF642e" outputId="7c108ed1-e347-4f9d-a697-69a3691cdab0" plot_histogram(rates1) # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="vE4A5kyW_Enf" outputId="68b4ca15-c4b6-4d9a-df44-c44805631cf5" plot_histogram(rates2) # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="cQYot06k_ibi" outputId="19aa27a3-3458-48f9-f0ab-d607e4b6f387" plot_histogram(rates3) # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="LYfd3_rXAFDb" outputId="debf72ed-a7f1-4cbf-dd2b-ce3bc3a5192b" plot_histogram(rates4) # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="XN8GD_XzAkE1" outputId="064e9534-4aa0-475e-fcd8-938cc36a3cf2" plot_histogram(rates5) # + [markdown] id="OL6E_EmuVBqC" # ### The distributions from the simulations match the P(n) curves well for most cases, except on the 5th case, were it the curve matches but not the spike at zero. # + [markdown] id="iFDotKFb05Z3" # ## Question 1c # + id="JAQk2odqddfh" def multiscales(rates, scales, xlim): plt.figure(figsize=(17, 3)) for i, scale in enumerate(scales): curr_rates = rates.copy() curr_rates[0] *= scale curr_rates[1] *= scale plt.subplot(1, len(scales), i+1) plot_histogram(curr_rates) if i>0: plt.ylabel('') plt.ylim([0, 1]) plt.xlim(xlim) plt.title(str(scale)+r'$\alpha$') # + colab={"base_uri": "https://localhost:8080/", "height": 241} id="61hQUqmW6KZU" outputId="977511e5-2e39-4cf4-d4c7-8a9295db0197" multiscales(rates2, scales= [1e-3, 1e-2, 1e-0, 1e2, 1e3], xlim = [0, 10]) # + id="X5Dcx_j36sxE" colab={"base_uri": "https://localhost:8080/", "height": 241} outputId="38b9ca7e-01a0-4829-a648-47d608cd9a46" multiscales(rates3, scales= [1e-3, 1e-2, 1e-0, 1e2, 1e3], xlim = [0, 40]) # + colab={"base_uri": "https://localhost:8080/", "height": 241} id="BMa_qgQSUEdl" outputId="3e49e967-8070-4ba1-b44d-7a6202f2b1f3" multiscales(rates5, scales= [1e-3, 1e-2, 1e-0, 1e2, 1e3], xlim = [0, 40]) # + [markdown] id="bRpgj9DUoYif" # ### While keeping $\frac{\alpha}{\beta}$ constant, smaller values of $\alpha$ flatten the distribution, making it have higher values at zero, while bigger values of $\alpha$ move the distribution to the left, while making the spike at zero smaller. # # ### The P(n) distribution fails when $\alpha$ is large in most cases # + [markdown] id="z-28wHHltT-M" # # Question 1d # + [markdown] id="o8rjfGv_tWeJ" # <img src='https://i.imgur.com/pJ0JLNV.png' width="400" height="300"> # # #### The figure from the paper looks like the second plot from 1a, with $\alpha=0.2$, $\beta=0.1$, $K_{1}$=20 and $\gamma=10$ which is the following: # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="qW_W8Q6O0uTl" outputId="41cb2e67-24c6-4971-ecad-0b733c309081" plot_histogram(rates2) plt.xlim([0, 16]); # + [markdown] id="k7b8DRZG3cqK" # #Question 1e # + [markdown] id="s1HRZ9aXKooD" # #### According to https://bionumbers.hms.harvard.edu/bionumber.aspx?id=111919&ver=3&trm=mRNA+numbers+per+gene+per+cell+for+mammalian+cells&org= # # There is about 0.02-3 mRNAs per gene in Ecoli # # #### And according to https://bionumbers.hms.harvard.edu/bionumber.aspx?id=106376&ver=7&trm=mRNA+numbers+per+gene+per+cell+for+mammalian+cells&org= # # There is a median of 15 mRNAs per gene in mammalians # # It might be true that girase influences transcription in E.Coli but this might not be the case in mammalian cells since it has a higher amount of mRNAs produced, and might be due to other transcription factors or SMC (structural maintenance of chromosomes) factors. # # # + [markdown] id="S8ugl6859WiC" # #Question 2a # # + [markdown] id="bVFKsHCDB9n3" # ### The probability of a cell having all N mutations is $P(A) = P(A_{1}=S)P(A_{2}=S)...P(A_{N}=S)$ where S is success. And if $P(A_{i})=E$ # # So for a vector of mutations $A=A_{1}A_{2}...A_{n}$ the probability of all of them being succesful $P(A) = E^N$ # + colab={"base_uri": "https://localhost:8080/"} id="pXCutA4P9aQf" outputId="b74e86e5-832f-43c6-a371-e322d22a9fc1" E = 0.55 N = 10 print('Probability is:', E**N) # + [markdown] id="-12erwpAIO_N" # #Question 2b # + [markdown] id="WldU43aVIjR7" # If you want to achieve 95% success in 5 mutations. # Then $E^5 = 0.95$ # # then $5log(E) = log(0.95)$ # # Finally $E = 0.95^{\frac{1}{5}}$ # + colab={"base_uri": "https://localhost:8080/"} id="QDK0Y9BYBDU8" outputId="027409ec-ca75-4ea6-d683-f4265bfbd5c4" E = 0.95**(1/5) print('E =', E) irtyeight') fig = plt.figure(figsize=(10,6)) ax= df.groupby('property_damage').police_report_available.count().plot.bar(ylim=0) ax.set_ylabel('Police report') ax.set_xticklabels(ax.get_xticklabels(), rotation=10, ha="right") plt.show() df.police_report_available.value_counts() df.columns df._get_numeric_data().head() # Checking numeric columns # + df._get_numeric_data().columns # - df.select_dtypes(include=['object']).columns # checking categorcial columns # #### Applying one-hot encoding to convert all categorical variables except out target variables # 'collision_type', 'property_damage', 'police_report_available', 'fraud_reported' # + dummies = pd.get_dummies(df[[ 'policy_state', 'insured_sex', 'insured_education_level', 'insured_occupation', 'insured_hobbies', 'insured_relationship', 'incident_type', 'incident_severity', 'authorities_contacted', 'incident_state', 'incident_city', 'auto_make', 'auto_model', 'csl_per_person', 'csl_per_accident', 'incident_period_of_day']]) dummies = dummies.join(df[[ 'collision_type', 'property_damage', 'police_report_available', "fraud_reported"]]) dummies.head() # - X = dummies.iloc[:, 0:-1] # predictor variables y = dummies.iloc[:, -1] # target variable # + len(X.columns) # - X.head(2) # + y.head() # - # #### Label encoding from sklearn.preprocessing import LabelEncoder X['collision_en'] = LabelEncoder().fit_transform(dummies['collision_type']) X[['collision_type', 'collision_en']] X['property_damage'].replace(to_replace='YES', value=1, inplace=True) X['property_damage'].replace(to_replace='NO', value=0, inplace=True) X['property_damage'].replace(to_replace='?', value=0, inplace=True) X['police_report_available'].replace(to_replace='YES', value=1, inplace=True) X['police_report_available'].replace(to_replace='NO', value=0, inplace=True) X['police_report_available'].replace(to_replace='?', value=0, inplace=True) X.head(10) X = X.drop(columns = ['collision_type']) X.head(2) X = pd.concat([X, df._get_numeric_data()], axis=1) # joining numeric columns X.head(2) X.columns X = X.drop(columns = ['fraud_reported']) # dropping target variable 'fraud_reported' X.columns # #### We now have a dataset that we could use to evaluate an algorithm sensitive to missing values like LDA. # + from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score # evaluate an LDA model on the dataset using k-fold cross validation model = LinearDiscriminantAnalysis() kfold = KFold(n_splits=5, random_state=7) result = cross_val_score(model, X, y, cv=kfold, scoring='accuracy') print(result.mean()) # - print("Accuracy: %0.2f (+/- %0.2f)" % (result.mean(), result.std() * 2)) # #### 84 % cross validation score without standardizing the data. Above is the mean score and the 95% confidence interval of the score estimate. This looks good to go for other Classification methods. # #### Creating a Training Set for the Data Set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, random_state=7) print('length of X_train and X_test: ', len(X_train), len(X_test)) print('length of y_train and y_test: ', len(y_train), len(y_test)) # ### Random Forest Classification # + from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, recall_score, classification_report, cohen_kappa_score from sklearn import metrics # Baseline Random forest based Model rfc = RandomForestClassifier(n_estimators=200) kfold = KFold(n_splits=5, random_state=7) result2 = cross_val_score(rfc, X_train, y_train, cv=kfold, scoring='accuracy') print(result2.mean()) # - # #### Here, we see that, Random Forest baseline model unable to provide greater accuracy. We will check on ther classifier to compare. Before doing so,let's check if any anomalies/outliers are present in data. # Generate a Histogram plot for anomaly detection plt.style.use('fivethirtyeight') plt.rcParams['figure.figsize'] = [15, 8] df.plot(kind='hist') plt.show() # The green bar standing tall and away from all, signifies anomalies in either of policy_annual_premium, witnesses or vehicle_age. Let's draw box-and-whisker plot on each to check the presence of outliers. plt.rcParams['figure.figsize'] = [5, 5] sns.boxplot(x=X.policy_annual_premium) plt.show() # Outliers are visible from the above plot from both Q1 and Q3 quartiles above the whiskers. sns.boxplot(x=X.witnesses) plt.show() # Missing median line represents data distribution is highly imbalanced. sns.boxplot(x=X.vehicle_age) plt.show() # ### Standardizing the data and recheck the data distribution. from sklearn.preprocessing import StandardScaler scaler = StandardScaler(with_mean=False) X_train_scaled = scaler.fit_transform(X_train) X_test_scaled = scaler.transform(X_test) X_train_scaled X_train_scaled = pd.DataFrame(X_train_scaled, columns = X_train.columns) # retaining columns names X_train_scaled.head(2) # Generate a Histogram plot on scaled data to check anomalies plt.rcParams['figure.figsize'] = [15, 8] X_train_scaled.plot(kind='hist') x_train_scaled = pd.DataFrame.to_numpy(X_train_scaled) # converting to array for computational ease x_train_scaled # Here the data is distributed and the anomalies are gone after standardization. # #### The 10-fold cross validation procedure is used to evaluate each algorithm, importantly configured with the same random seed to ensure that the same splits to the training data are performed and that each algorithms is evaluated in precisely the same way. # + from sklearn.ensemble import AdaBoostClassifier, VotingClassifier from sklearn.naive_bayes import GaussianNB from sklearn import model_selection from sklearn.model_selection import KFold, cross_val_score from xgboost import XGBClassifier from sklearn.linear_model import LogisticRegressionCV from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC xgb = XGBClassifier() logreg= LogisticRegressionCV(solver='lbfgs', cv=10) knn = KNeighborsClassifier(5) svcl = SVC() adb = AdaBoostClassifier() dt = DecisionTreeClassifier(max_depth=5) rf = RandomForestClassifier() lda = LinearDiscriminantAnalysis() gnb = GaussianNB() # prepare configuration for cross validation test harness seed = 7 # prepare models models = [] models.append(('LR', LogisticRegressionCV(solver='lbfgs', max_iter=5000, cv=10))) models.append(('XGB', XGBClassifier())) models.append(('KNN', KNeighborsClassifier())) models.append(('DT', DecisionTreeClassifier())) models.append(('SVM', SVC(gamma='auto'))) models.append(('RF', RandomForestClassifier(n_estimators=200))) models.append(('ADA', AdaBoostClassifier(n_estimators=200))) models.append(('LDA', LinearDiscriminantAnalysis())) models.append(('GNB', GaussianNB())) # evaluate each model in turn results = [] names = [] scoring = 'accuracy' for name, model in models: kfold = model_selection.KFold(n_splits=10, random_state=seed) cv_results = model_selection.cross_val_score(model, x_train_scaled, y_train, cv=kfold, scoring=scoring) results.append(cv_results) names.append(name) msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std()) print(msg) # boxplot algorithm comparison plt.rcParams['figure.figsize'] = [15, 8] fig = plt.figure() fig.suptitle('Algorithm Comparison') ax = fig.add_subplot(111) plt.boxplot(results) ax.set_xticklabels(names) plt.show() # - # Above a list of each algorithm, the mean accuracy and the standard deviation accuracy and a box & whisker plot showing the spread of the accuracy scores across each cross validation fold for each algorithm. It is clear that the inear Discriminant Analysis (82%) is leading the list. Logistics regression and XGB are almost close (82.62% and 82.87% respectively). We could see some noise / outlier in data in case of XGB. The LR box-plot is skewd one side with longer tail.
17,466
/ProblemSets/PS4/PS4_Zhihan Yu.ipynb
acc910de2dab5f449f12937cb86d61a558b6747b
[]
no_license
jeffishy135/persp-model-econ_W19
https://github.com/jeffishy135/persp-model-econ_W19
0
0
null
2019-01-10T22:22:29
2019-01-09T21:26:28
null
Jupyter Notebook
false
false
.py
108,991
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Automatisch Matrizen generieren import numpy as np # #### Wie war es bisher? np.array([0, 1, 2]) # #### Wie können wir nun Vektoren und Matrizen generieren? Z = np.zeros(10) Z Z = np.zeros((10,10)) Z B = np.ones((5, 3)) B W = np.random.random((5,1)) W # #### Wie generieren wir Werte $\geq 0$ und $\leq 0$ ? # # Die Lösung ist mithilfe der Normalverteilung! Gerne mehr hier: https://de.wikipedia.org/wiki/Normalverteilung # # Die Normalverteilung $X \sim \mathcal{N}(\mu,\,\sigma^{2})\,$ hat Standardwerte wie das mean $\mu = 0$ und die Varianz $\sigma^{2} = 1$ X = np.random.randn(10,5) # kein Tuple mehr! X X.mean() X.var() given mu and sigma (LN): f(x; mu, sigma) = (1 / (x * sigma * sqrt(2 * pi))) * exp((-1 / 2) * (((log(x) - mu) / sigma) ** 2)) x in [0, infty), mu in (-infty, infty), sigma > 0 -------------------------------------------------------------------- INPUTS: xvals = (N,) vector, data mu = scalar, mean of the ln(x) sigma = scalar > 0, standard deviation of ln(x) OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION: None OBJECTS CREATED WITHIN FUNCTION: pdf_vals = (N,) vector, probability of each observation given the parameter values RETURNS: pdf_vals -------------------------------------------------------------------- ''' pdf_vals = np.float64(((1 / (np.sqrt(2 * np.pi) * sigma * xvals)) * np.exp((-1.0 / 2.0) * (((np.log(xvals) - mu) / sigma) ** 2)))) return pdf_vals x_val = np.linspace(0.01, 150000, 30000) plt.plot(x_val, LN_pdf(x_val, 11, 0.5), label='lognormal with $\mu$=11,$\sigma$=0.5') # TODO: Got to get the y-axis to be labeled correctly plt.legend() # + def log_liklihood_lognorm(xvals, mu, sigma): pdf_vals = LN_pdf(xvals, mu, sigma) ln_pdf_vals = np.log(pdf_vals) log_lik_val = ln_pdf_vals.sum() return log_lik_val # value of the log likelihood value for this parameterization of the distribution and given this data print("Log-Likelihood with mu=11 and sigma=0.5 is: ", log_liklihood_lognorm(data, 11, 0.5)) # - # ### Problem 1c # + def objective_func(params, xvals): """ --------------- This defines the objective function for the negative lognormal distribution. Instead of maximizing, the objective function is minimized. --------------- INPUTS: params: tuple of length 2 the tuple takes the form (mu, sigma) args: array-like data OUTPUTS: negative log-likelihood. """ mu, sigma = params neg_log_likeli = - log_liklihood_lognorm(xvals, mu, sigma) return neg_log_likeli import scipy.optimize as opt lam_obj_x = lambda params: objective_func(params, data) mu_init = 11.35 # mu_2 sig_init = 0.2 # sig_2 params_init = np.array([mu_init, sig_init]) mle_args = data results_uncstr = opt.minimize(lam_obj_x, params_init) MLE_mu, MLE_sigma = results_uncstr.x plt.hist(data, bins=30, density=True, alpha=0.8) plt.ylabel("Percent of Earnings") plt.xlabel("Earnings (USD)") plt.title("MACSS Earnings Distribution from 2018-2020") plt.plot(x_val, LN_pdf(x_val, MLE_mu, MLE_sigma), label='MLE lognormal') plt.plot(x_val, LN_pdf(x_val, 11, 0.5), label='lognormal with $\mu$=11,$\sigma$=0.5') plt.legend() print("optimal mu=", MLE_mu, "optimal sigma=", MLE_sigma) print("Log-Likelihood with mu=11 and sigma=0.5 is: ", log_liklihood_lognorm(data, 11, 0.5)) print("MLE Log-likelihood: ", log_liklihood_lognorm(data, MLE_mu, MLE_sigma)) # - vcv_mle = results_uncstr.hess_inv stderr_mu_mle = np.sqrt(vcv_mle[0,0]) stderr_sig_mle = np.sqrt(vcv_mle[1,1]) print('Variance-Covariance Matrix of MLE:') print(vcv_mle) print() print('Standard error for mu estimate = ', stderr_mu_mle) print('Standard error for sigma estimate = ', stderr_sig_mle) # ### Problem 1d # + import scipy.stats as sts mu_init, sigma_init = 11., 0.5 log_lik_h0 = log_liklihood_lognorm(data, mu_init, sigma_init) print('hypothesis value log likelihood:', log_lik_h0) print() log_lik_mle = log_liklihood_lognorm(data, MLE_mu, MLE_sigma) print('MLE log likelihood:', log_lik_mle) print() LR_val = 2 * (log_lik_mle - log_lik_h0) print('likelihood ratio value:', LR_val) print() pval_h0 = 1.0 - sts.chi2.cdf(LR_val, 2) print('chi squared of H0 with 2 degrees of freedom p-value:', pval_h0) # + from scipy.special import erf def lognormalcdf(x, mu, sig): inerf = (np.log(x) - mu) / (sig * (2 ** 0.5)) rtnval = 0.5 + 0.5 * erf(inerf) return rtnval print("Probability less than $75000: ", "%.2f" %(lognormalcdf(75000, MLE_mu, MLE_sigma)*100) + "%.") print("Probability less than $100000: ", "%.2f" %((1-lognormalcdf(100000, MLE_mu, MLE_sigma))*100) + "%.") # - # ## Problem 2 # ### Problem 2a import pandas as pd from sklearn.linear_model import LinearRegression as lin # + sick_dat = pd.read_csv(r"C:\Users\ZongYuan\Documents\!Jeff\WinterQuarter\MCSS\course\persp-model-econ_W19\ProblemSets\PS4\data\sick.txt") #Check out the first few observations sick_dat.head(5) # + # Use OLS to find the initialization of MLEs y_vals = np.array(sick_dat['sick']) x_mat = np.array(sick_dat[['age', 'children', 'avgtemp_winter']]) reg = lin().fit(x_mat, y_vals) base_coef = reg.coef_ base_intercept = reg.intercept_ print("Regression coefficients:", reg.coef_) print("Regression intercept:", base_intercept) # + def eps_creation(y, x1, x2, x3, b0, b1, b2, b3): epsilons = y - b0 - b1*x1 - b2*x2 - b3*x3 return epsilons epsilons = eps_creation(y_vals, x_mat[:, 0], x_mat[:, 1], x_mat[:, 2], base_intercept, base_coef[0], base_coef[1], base_coef[2]) print("Standard deviation of the error terms is roughly",'%.3f' % epsilons.std()) # + def normalpdf(x, mu, sig): sig = abs(sig) fracpart = 1 / ((2 * np.pi * (sig ** 2)) ** 0.5) expopart = (-1) * ((x - mu)**2)/(2*(sig**2)) var = fracpart * (np.e ** expopart) return var plt.hist(epsilons, bins=30, density=True) lb, ub= np.min(epsilons), np.max(epsilons) xspace = np.linspace(lb, ub, 1001) norm_byone = normalpdf(xspace, 0, epsilons.std()) plt.plot(xspace, norm_byone, label='mu=0, sigma=0.003') plt.title("Histogram of the error terms and normal distribution") plt.ylabel(r'Density') plt.xlabel(r'Errors') plt.subplots_adjust(bottom=.25, left=.25) plt.legend() # + def loglikelihood_norm(y, x1, x2, x3, b0, b1, b2, b3, sigma): epsilons = eps_creation(y, x1, x2, x3, b0, b1, b2, b3) normal_densities = normalpdf(epsilons, 0, sigma) ln_densities = np.log(normal_densities) log_likeli = np.sum(ln_densities) return log_likeli def obj_normal(params, *args): b0, b1, b2, b3, sigma = params y, x1, x2, x3 = args return -loglikelihood_norm(y, x1, x2, x3, b0, b1, b2, b3, sigma) arguments = (y_vals, x_mat[:, 0], x_mat[:, 1], x_mat[:, 2]) params_init2 = (0.2, 0.01, 0.4, -0.01, 0.05) results_uncstr2 = opt.minimize(obj_normal, params_init2, arguments) print(results_uncstr2.x) print(-results_uncstr2.fun) # - vcv_mle2 = results_uncstr2.hess_inv print('Variance-Covariance Matrix of MLE:') print(vcv_mle2) # ### Problem 2b # + hyp_b0, hyp_b1, hyp_b2, hyp_b3, hyp_sigma = 1, 0, 0, 0, 0.1 mle_b0, mle_b1, mle_b2, mle_b3, mle_sigma = results_uncstr2.x log_lik_h0 = loglikelihood_norm(y_vals, x_mat[:, 0], x_mat[:, 1], x_mat[:, 2], hyp_b0, hyp_b1, hyp_b2, hyp_b3, hyp_sigma) print('hypothesis value log likelihood:', log_lik_h0) print() log_lik_mle = loglikelihood_norm(y_vals, x_mat[:, 0], x_mat[:, 1], x_mat[:, 2], mle_b0, mle_b1, mle_b2, mle_b3, mle_sigma) print('MLE log likelihood:', log_lik_mle) print() LR_val = 2 * (log_lik_mle - log_lik_h0) print('likelihood ratio value:', LR_val) print() pval_h0 = 1.0 - sts.chi2.cdf(LR_val, 5) print('chi squared of H0 with 5 degrees of freedom p-value:', pval_h0) NWKeUiO8GEL+0ecDjit71TiImKek8EqlRtUqPQawqVFaTKVSCC0VVaLkqNlKj5sb0noc4FQqGKhSMLrakme+3gMC54t8Iftl/zuaxp7mY8h7jcWhbq8jt/8OoINHdKfi0o3WMHZ+wybHJBAoxSQQ4avxscazradCO4JO15xNF+A44KnWkJSlyk2oVWYMgATgEFp8nh0BNcJg+8nnwd46BrJYkB1XbWazIF+EcRXKSqxUbkqN8lIXeNpNp4ZTaaBkoAPwBLrNAXV7qXd8P31A8vmxm8ZACHRYp8OWCAT6xHuN9Tp4QBMzrif8CAtGEaDR23cFLYeXHzMbQeV28F7rcVADJDXKiK71EucBJkZDL0OAjYOmd27axMpVdpoLRhN4sUSZcXOVHjNH6THFfvJ5sFeOgaGZVcpJKTMTWnJ0kWk3iRGzFTeoSIF+MxUMzA8lD2RsPoc5HUuVX48v5h5ly24SlH5b/70xY8FACXhMBHpvXyC2RfH22vGi//cIOomDiGxcZSiFtsPGRv/vd49bwLbekKrry6EqBDh1yogm1e4AONj6CtMXKDsBcClWWqBISeFTFTtgnJIiJis7sVhpMdP95PNgLx0DU5UQNUaJ0WOVGjtZmUmzlResUEFajQrSFiqPCOIhjcaZ07pAx9N6ut5FX0j7vPiSx0DIkYDdQ9kADksZe/qgtLgtM3oEneRw1uZ4u0uSGW82NurC9MW6nZ4KdT3cKqUF6pQWvdg7BmpDX2DzzA4djC1WRsJsxQwap8h+I5QcmKjc4GwVZMzRiPxyTRxVqylj6zRlLEc/+TzYy8bAuBrlpE9QXuYk5aZPVVrCJMWFT1BgwAQlhM8S5rbu8zcGPA6ETAP6tOUKX7IA8jWI/54G0Vd4GVqng3mNTR1RWNhQDtdpluD0hh9haCNm8opi++SZFsqA3fWY09m1IwETmuznssiOeNMwEWrunXjdxM9UUvQE5aYVqWrut3TBb6/XM0+8rxXvbVH9Smn9KqmlwU8+D/bSMbBB2twurVnRqscfelG/PetyVZcdqyGZFeKDjfkde59C5rROrScEODYH1FcElE/n1w+UQgE/R2UvNUcCltzgucY+O0yrEMuzJ6UlDMDxnAm8va2Z12E7VHaJ22Frgx4HTRfoZMUvVnLkPGUnVdkLkxo7XUmBcVpS8z3dev3jam+SNrVK9au2qHWDd95Yv02tjdvV2rRZrU1b/aPPh71wHHSovWWr2pul1kZp9Ydbdd8dL+nQpWcoMGCSkiKLlc0GcBGlSo4sD83vVNi1ZMJU9fhe+lqOz5cvYQzY5pxsIMcmh2zaxsZ5pbbshv10evMJCGOrUcxrFML7AKRaUvFzQ6gdfK17HNxVih9YoVE5Ryrym7PMAyc7aZ7iIyZq/IgaXX3ZfWqql7ZtkrZvltqaeLG2a2uH94XXuL5drc0b1drc5iefB3vlGGhpalVbS4c2tm4xrUdbpI5m6fmn1+ray55QYValBu4zSuOGHqzkqDnKTAgtHmWb8CQWh34JwsVvw+dzT2PAdj6dYzuHhodNMAyZMOQQm9PBtLZDgIFu5Q100HaGBtnGdY5tORr5jUmd2x30pB51DXTmdGqUEF6hEbkHql/YKOWll2t4frnuuOVpbaiXtm+Vmhu3aO2aJjWsb1FLc4eamzbacWP7FrW0tKildYOffB7sfWOgpUnNjU0Gto0NLVq/tkVtzdukrdK2DqlpnXTZxfdoeO58BWNnKj58mvJSawx8YvoXqSC4xBeG3YRZl1zygfhL4UVI00kaPEuADUoLgQXQdtgWe4cAA92ek5nXPLfpueb2xuKe0TnLd1zYs9OKUtch3DlZQZ0UNVdJ0SXKCZYrM2Wmrr/2EXW0ATbbtG5ts9bXN6m9vV38Nm/erIaGBjU0NGrjxo0hQdPgHw10fD54HyB7Cx82aNPGDm3dvE0d7VsE8DTUN6l5Q7uZGRvrt0ibpUsvul0ZSVMVPWCM0uPnKCO+QgmDS83U5t5F/+gDzZc+BsCFQJktuXEhcJ57rMPAx22P05PSEpYVR3jqOabloOlgZjv39Ls+ubVBN6RynQN0MuOrFIwr0+B9xyo9aaZ+9+vr1NIoNdRvVmvLJm3jw23rZnVsalVr2wY1t3gCpbW1Wc3NjWppafIAxz/6fAB496Zx0NKkxoYmM68BOhvbOtTe1qKN7a1qb20TpjfmO5sbtumsn12imMEjlBAxWbkp1cpNrlP8IH9Ox8ki/7gHQDfkMg1+sE4H4MFlmimaXZrXiL8G2OQlsYnbND15f6N5rrHlQU9I5R6wuXTGzFXMoCkqzKlUYe5cdeAssGaz2lq3qqWlTfXrV6uxaZ0aNqzSqtXva83aD9XS2qCOTS1qa2/yhAwmNj/5PNjrxkCb1q9t0ob6FjVtaFVLU7PaWjeovb1B7e31am2t15rVK7WlQ/rovUaNHzlP6QlFFpfNvNoI+ukWYvfwUejeU/+4BwTy3vA8QhEJWOtJRAK8n++49i0DIBwJdtiPrRs/LCKBAx2ig0bvO0WoSIDNZwGdmAFTbR4nmDBV11/zmNat3qbNHVJ722Y1NW3Qhsa1attYr63bW7RNrdq4qUHNLesMeNo3NocEbZsBFCDlJ58He80YaN5ogLKpXWZeMw2nvUGtbavU2rZCbe2rtWVLixobNqilcYvOP+cq5aQWKylylhLDy5UZt9AHnW7CzAfXLxlcQ3M6LA4ldBqmNqZm8GJjU9DewqhZwE9C0rAolIkgTGxP3LPePA96Qyr3cIMxBCacrSFZ85QYGGdun9u3SB9/tM7mcJqa12vL1nbTctbVf6z2jRu0aXOzGpvWqGHDmpCmg5bTppbmjf7R58PeNQ7w3DQ3+c1mYsOkhnbT3LxSLa0r1b5xjZqaVmnzpjatXtGg9au3WwSDvOB85aUsUsLgMt+RwAedPTcGQt5rOKJhJWN9DsCDIwGbgbq92sxa1u05mabDAlFUI2xxwzMWa8rwI8yR4LOATm5KpQKDxumUE89Ta5PU2LBJHR2b1dYWMp85ryxs9S51nzTvNKnwhR8CIP8Y0vh8fnhaz9eVD6GPreaOLrC1OS3mPesttZmprVXr65vVuH67Dlj8A2WllJjjTgabZHV7mf3zL/lLf2/nfciRAE2HIJ8sDp028ihTYAAcAgz0ND1joMMe18PSF5l/NWiFeoS7W29+1m5wo+kAOvFRE3T+OddrY6u0oaFdbW1t5jCwsQPzmQMbZzZCgIScB9zEcSfwcM9PPg/2ojGAhu9Sp3nZvR+Yotervb1VTY1tam7crlNO+p2yg8VKjJql9Ph5Pujs7YJ/T/Y/UGHx1lBaUFSYniEMDhYz5nTwYOsRdOIHzBALRNFyiJ2DmW1s3oFWYJdzOjFlykmer+TYKbrz1mcNdBrWt6m5uVkNG9aas4APOnuRAPU/GD7/R5MDHGdeNuDpAh2ccLAaMEfa0iT9+ZK/G+gkRBYpM9HXdNwHsH/cA1peaBM3ggsQ7JP1nS8/uc2maNB0MLH1CDrY40Aq0Mlb1LNAv/rxbba1QW8TQe4Bo+lkJVYY6Dz3zGq1NXuaTmtrq9Y3rPFBxxfCn18I7208+wygw/ICFlI3NmzVA/e8aEsTkqJnKyvJ30/HySL/uGdAByVlWHqdOaBhJXvqgSbzhCaEWm+WsjDi5rBWpyBlgU0GYVp79M61n22dToy3j3tiYJLeerVVTQ1S44aN6ujoME2nfaMzrXH0zWu+2czX+j4xBknMN+4AACAASURBVHYBOk3N9d78aOsmrV+3SU898Z595KXGzvFBZ0+alvy2TYthbQ74QUQbMOQvFz5t1rLhGUt7jzLNwp64/tPNtxoTG9qO29ogZv8ZPapH7qsCTSc1UCIHOkx0AjqbNm1SY1O92tpDiz9tXscHnU8InL3tq97v7yc1v12AjpvTYd4H0PnnY+/YHGpW8jylxflbGzhZ5B/3jKaD8wBLbnBEAzvG5R9k0zTM6fQWu7PTZRrEYj8dEOuWK1/7zOa1lOhiBROm6/WXmsx7DUcCwtvgLu2Djv9l7wPtLsbALkAHTaejo13NTe02p/PQ/a8oLnK88tNrlBBR7DsS+BrHnhsDocWhgA4RCXBGI7hAYVqdaTm9bY1jAT/ddtXY5ChMlGm0nM8ypwPoZKbM0qsvbLB4azgSMKcD6PjmtV0IHP/L/5Nf/nsbT3YBOiyu3rTJ83DbtFG6987nTNMpzK5TdP/pe07g+MLe533AM6tF7ztZ4wsONsAh8Cc4ghNar5pOfnJ1p/caC3xYHMq8DoV6QyqnymJeS4stVUrcVJvT2VC/zcLfNDY2qrWt0aIO+N5rPvD42s6njIFdgI7FK2xuVGtLh1qbpfvvfkEZyUUKxpUqNcZfHOpkkX/cM+Y1HNAIgzMm9wDzYMORAE82sKNXR4LAflM0NFhrTgTY5IjBhn2O7UZj+83c5ZyODzqfIlD2tq92v7+fX3PzQcfXGPqq1hhaHIoHNBYyAOg/j260qASfts4zDE0HpPK8EOYZ4Dx+d71tN8qeCD35WbuvCl/T8QHH12K+4BjwQccHnT4MOigqYAgxO1FYHOjsEAJnp61xwmL7TRNhDPBCAHhAKzbiwR63Q8EeGOODzhcUOL5m8Pk1g68bz3zQ8UGnB9nqPuy/0sdQ7DUXMBrsuPyCJ81lGu811ur0pLSEEZFgSOoCAx0W+gA+hDLAHvdZYq/55jUfeHxt5wuMAR90fNDp46DDshu38zSx1/ALAHSCUWU9gw5x19hulMkfAAfg+ddDrebyxrxOT0jl0NfXdL6AsPm6fbH7/fm/aW0+6Pig04dBp/vOoZja+oWNMu81tJzevJ87NR3c3EAofKypaFT2Ab4jgS9I/2+C1OfbZ+ebDzo+6PRh0CHIJz4BAA64UTXrh6a8fKojAaY1CuF9gKaDuzThDHB5683PulPTYROf2DKlxE7XW681asN6tjTYqMbGBhEvynOXRhtw2xZ0D4vjzn1twTdP7cVjoK+DjnkwdY+MQDy4qm4WEv5XhxLXMbk4V+9Q7DhXR09HBHJP1zuvhepz9XZer9hFOUezo9fVw3VHs+tH93sh12Rrh+s9tNOdZgcoLt/O/7nurlm7IXq67wi7c9nu9HXP11nPl+Q+HajQiMwlithngrlME9HmodtXWfBopmd6Na+5MDisJgWtSKzXAXB6mwhyTAoG5ostq1NiZ+jtNxrUuKHVFoSySRvbG7TYPiGbvaNtZ+DtEdLClgb2snV0A6S+KXhYk0QEBo7t7e1qaGjQ5s2bLdJ2U1OTt1C2qUmcc53j6tWrxW/Dhg0WMoiwQWvWrLGvY+qqr6+3uqivLwDS+vXrrZ/btm2zfhB7b9WqVdZX+EHfGA+uP5zDi7VrWfi4ycquXLnS8lAX9/gRrbwv9P8L0diXQcc+OsuVFluutJh5SoupVlqgVmmBGvt45QM2GD5f+fEHKi/2YCUNqFBO/Fxlx5cobv9ZKkxZrvSocuUlVCkzUK70qLkqSKpRfmK10iJL7X9aZJmyYiqUEzdf2bHzlBFdppTBxQpGzFFBcrXSY4qVHjPLjhmxJcqILVVGTFkosUBx5+TucZyn9KiFShxYptykcmUnzLEFjjnxdRqVcYwS+rPNS5kyYmd77QTmdgKEtcP1uLm2VpG57WDMHDHlgEz0ph7KhYxMj61SamCu8lIXKJeQ/+FzlBJdqqTI4p14VxPiX60y4GHAk8P0Af5kRFcoI6pKGVELlBG5SBmR7BwLoH9JILNzOwaGAO9cm9PBWvbI39eYxYy9dHpTWiwMDgHb0HTYahQT29UX/1u4SzMZ1InkOzcYDeB4KSW2SG+/Ue8J3o71amxeZRpPS9NWtTRtV0vzZhnQtK1VS9tqb9FoJyCxY2jfBBzoRqAiZPlxzu/99983oYtARYAieAEbhC2CGeFLGYCK61u2bDEA4j6gg+DlvC8IXWiHD/SD9MEHHxgP6Ef3n0UeX7/eeAE/uL9169ZOcIIn8Gb79u0CgACtjz/+uE+Pjc80rvs86LBAvMwDnUCN0qIXm+B0gj954DxlRi5TRsRyBcOrlJdUouyE6QoOmqv82DoDlqGp1cqJn6fkwbPsP+CTGl7qCdnoCuUnLjAQShxQpKzYcgObjMBcJQ6c/img48AG4PD2COsCoBDwBCqVHrVIaZE1yowrVk5SsVl7ghHVShpQp5RBxA8rU0ZckQdsBjoI+SoZwMUVKS2upBvozPUAJwQ6AA+7uwYDVQYyAE9KVJmd5yRXiRSMLVcwtjtg1yktUNcNdOhHmTKhI7qsE3SyIhYrM2rJHgcdrGODwsaadoP383OPddg5Tmi9OaKFEQIHDzZQiv100HRe/McWi0iQMHC2Dzq7AESAgd9HH32kV199Vb/85S916aWXWrrzzjtNqwFA+K1YscK0mDfeeEN33HGH7r//fr399tsGQA5gyItQ5ggofSbBtQsad2cdDhwBFYBj3bp1BqL//Oc/9Ze//EWXX355p6aDZkceEv11Ws5LL72kv//977r11luNT921nd1J+1ei7j4POt00nU7QqQtpGmXKiV2onJjlCg5apIyoamXGTVVazHjlxc1Xbky14vtPV0aAGHIlCkaWKDe+WlkxVUodzFd9tdIjq5SfADBUKBjhaUJoONlxFUqLYpOwuaHkaS4GMIFKZViq9rQFNAZL7ro3dcC1lMELlB27SCmRM5UaNUNDUhYrLbJWcfvVKD/hYA+wYpmvCGk5UQtNOzIAiy3tpqkAHJWethdT3flBnhFbKxIf6Gg4yZHlyoyvUmbCfKUGSkMaYqgc2k10rZdM0wnR2YOmkxkF6Cze46AzKnuZIr8x0cxr4Md9N31oGDIkta73/XQAHdLIrKWK2GeiqUl4r/HwfNDZtQaG8HzzzTd1+umnKyoqShEREfrmN7+psLAwS9HR0brwwgsNSDCb8UMYc598jz76qGk5aAkIZQShM9Vx7SshGD8F1KCVBB/QetBgAKCf//znSk1NVXJysv7973+bVkMepwFRBnBF2zn11FO13377aeDAgQbMgBH53nvvva98/7/w8/nagE6lmdU8TScEOpicwucrJ2aZfZXnJdQqM36CkiOHKyd2jlIGFiknYY7ykth7pUTZcWUGOimDyoWGNDz1UKWFL1B2TJ0HPokLlRmYp8C+k5UaMVOZMZ425M3BVIW0A8xS3YQ3Qtz9N0EeMlsZCNUoK6ZOOfELlRpVpKTw6cqKrVZ27DJlRh+ogsRDPPCKKfc+vjFlRS62ZKDG9egqBaMxK3aZxjAxBqMX2PUhwQPsmB5bY6CUk7RA+cFaJUeVmHnNTHGBqpBpMkS70QlQ7gp06kLzT3vOvBbbb7qISOAd55s/AIpLxD6TTXHpyVJm21WPyl5qtkzmcvBGIMr0p8XOcTZE37zWYkLzxz/+cSfIxMbG6uCDD1Z5ebnCw8MNWLKysnTbbbcZ4Lzwwgs655xzlJ6ebmX4wufLHuGFEEZgAzZOg/jCQu1TAOO/UbfTWugDdDuN5+STT+7kycsvv2ymM/pHPsAJ0KGPADF5+/fvr3322cfmeTBTUo8z3f036PzK1vE1B52kgXOUE7tIWdGLTDspSJuoYdmTNCK9TNkxRcqInqTUiDFK6D9WaREzlR9fqeCgcqX0m69hCP3BdcqJWqK0QZXKjq5WysBZyk8o04wRBxjw2BxH5EKb38iMqlOPKbrWu87RUo0yo72UF1+n5EGzlRY1Sznx5UqLmG915cYcpPSIOmVGVxvQoXUxh4KZMCNiqV0HFNKjFniaT/RiZViqU0Y0ZsNFdj03YZESBpQqN6HOAA0NiemM+IHeonwTyt0cCJxGlhmotHYBWZLN50TPN/Oa9YG+RtfucdABLwijhsZDRILEQbMsyADez73F7jSXaQoxp4NXCTa6uVNOMHtcbzY5H3S6NCDMasOGDdP+++8vwIWveeY1mMt56qmnOrWeyspKE7QI0j/+8Y8mkBG0d911lwlYN8fDFz4CGQFOXV9ZYRkCMwce0Am90E0ffvrTn6pfv34GJq+99lrnnBX56Ctgg0ZDArQBHLQ/5nS4j9ZHXV/1/n9h+vo66JgDQci0ZOa1JV1zOrElyomrUV78ItN40qNn65DF39blF1+ra/90r2748yP683k36rQTzlXx+KVKGTxR6RGzNSRhobKjapU2cIGBTl5gmTLCq20eqDC5Sj8/4TL99ZKHNbVwqTfHYZPqi8zctAPodILMJ8HGQCdQqZRw5B4aV7FyE8pMwKOdpUVUe+ZAJ/QNdBYZ4GRELpYDBZvIj8J0uCQEOiHzWAiAMmMQxMW2VQxrINGmCoLM9UxUTmLI+w0wYc48UBECGs9xAOeBLtDxAMdMlAaY9Klmj4MOUQjYj40wOCgt00cdbXM6mbHzbaeCHjUdTGuoR8MzFofCF9Sof9hoM6/15vLmg04X6Nx3332dX/RoMA40UGvwzpo9e7YBT0xMjP3n+iWXXGJl9t13X9199907OBYAVghyhC6awBcWartZ06GPaCX0mwS90P3DH/7Q+viNb3xDaHd47AEo5HGgQzn6+pOf/KSThwAXGhFzQ5jevur9/8L0fe1Bp8q0gpTBc1WQMlfXXX6ztFVa9War3n1hvVrXbNb2Fum+m/+h0kkHKG7/0cqJKdew5MVKHzzfNJyc6AXKCVQZII3LrdFDt72kzeulAypP9DQAp+nsoMVUe1qKaSpoDTtqDvY/pkwTCxfo1O+epwOqT1bS4Ek2YY/3HE4LuQnzzYRnk/h4jVk7AMwCy+eZ9wADvMkWesfo+V5bIVpyEyqUEjFNhcEapYTPUMmkQ/XTE/6ouVMPUXL4JM85IHq+snASCMxVVvScUJulXhvmtdYFOIBcp2dbYA96roVAkkgEyeGzDXBwQrvrb+9aKDWc0ACeHkHHPCNiy20+B59rJoNQk5jT6W1FqQ86XaDj5mcGDx6sm2++2QQrX/EIUwTskiVLNGjQIBOqeLUBJn/6059sDoMvexwKELQIYIStMz2RDy3gCwu13Qw60O1oh37X7x/96EfWZzSYd955x7QW561GfvrFkb7/7Gc/s7zM6zjtBzDDdPdV7/8Xpu9rBTq1SotG03FzOiXmEp0ehWtyuQpSS3TDVTdrxXsf6NsHnajC9PHKD+bpwb8/oOZVG/Wrn1yo3IQpSouYqoIEzGxzlDa4VGnhJcqNw4V6qrLjJujv1z0itUqLy48wIe25EeN27YClPAQKeH0hvHtKZcqMnamTv/MLta7brusv/18F9h+mtOjpGl+wWLH9xigzbrrlyYydHXJX9rQfTF2ZsURV7ppXMhrQWALFyowpMVpw9c5NmqX4gSOUFTdLOYlF+vVpl2rdB5t1/tlXKid+ijKjSgxwAJuswCxlBWYqO3ammR6pxzzWzKxG2wvMccCZ4JDRThbvkWOgwjSckVlLzOsZ0HntGRkA9Q8bax7QPYJOamSJchLmWxgctB1sjrdf86YBDmphT4VcB/05nZZO0xBf9HirISidUEUgHXDAAZ1f8a+//roJX6fpADo33XSTFi9erLS0NI0ePVrnnXdep3kJAU4d5557rvLz85Wdna0xY8aYpuTACdditAK0iVmzZikpKcnqoa4bbrjBzHyYABHmf/7znzV16lSNHz/eaDz//PNVWFio3/3udzr66KPt+re+9S0DPoAT0CwuLlZNTY1+9atf2XwMfcPrbtmyZcrIyFBKSormzJmja665xjz40NQATEAHTQ4TG8C6aNEiy08fMC9CP3XRDo4E8AIeUpY+88NB4/jjj9eoUaOsX0OGDDE6ATfcqmnrCwv93QzKu6Tvaw46rK8JRpTbvElq1ARdc9k1am9q0Pe/daqSI9NVEMzUonnztb1li2658g7lJY7RqIwSBfYZruEp85QVVaRhKXOVMmiU0qNGKi0qX4/d/YTUulULy5YqO3a6MiKLzTEhL36ORqQx/zFN8f1GaVRmhdIiJygvYYYyA5OVFjlJIzPmmRkvacAEZQTG6oSjj1fjqnpdceH1yk2YpIKkIqVHj1d2/Dhlxo7ShIJyRe0zVKmDZmpc1oHKjCpVZnSR8pOmKTm8UDlx0zQ0ea6Gp9Yosf9EJQ4coYLkGcqJmavsmNlWR8KgTOXET9awtJk676yL1L5uky4+589KiRiq3Jhi5ceVKzpsuEYEKTPRgBc6mPPCRRwwddqUzSHhsh1yAzf53MNyFiejd+sxtHNobmKlORLgAf3sI+02RcPUTK/76RCWOmHgTMvIdtXYHR+7a52tJmWBjw86XVpNTwLEzUf8X0EHDzYE84ABA0zw4u3217/+1YQu7TE3wnwRGgNzQLRDfsxyAA4g98wzz5g2hZBHeJMiIyNNm7r99ttNOAM6p5xyitWDE8Nxxx1n3mK0e+SRR2rhwoVWDo8zt2gTMKNN6gPAAImHH37YAIBr0OHaox60Pn4Ax2mnndZ5Dw2G5PJy/vTTTxuIoemcffbZ1i/uAyj0+8MPPzRg4xp9dh6BlD3ssMOMFpe3p+fSZ671ddBhYagtDsV77ZOaTuKA2cqOrdSwYJ3ykmfomsuvUnvTep16/I+Uk5RvoLOkcr7UullP3PuECtPGKCMwTNVFh+noxT/Wdw86Q8sqvqOhqROUnzxCY/LG6ME77tWGFSt11PLDDRymDlukYcESVRUdqdO+9wctm3ecxuQWa3BYikbnsG3LEC0s5d75mjm6ThOHVCphwHDNL1qq3539c2lzs55+6D86sPq7Gp9fpsK0KTqo9giVz6hS/7BYHbPspzqy7heKCBut1MGTVVd6rE46+mf63hE/1LSR5YrvP0RJA8dq+siFOu7QUzR7QpUBXnXRMTrx6FNUU7pAsf2yLe/V/3OVWtc16tZrb9OyeUeqIL5I4zNrNTG3UgtLjtJPvvsrHbXkRA1JnqysmCnKDpR6oBMy7XWBjrd+Z0+DDtpNSkSxxuYdaJ5rV/7xGcOQYelLFD9gVo/4EUYYHEAHLwRAB4eCl5/cZo4E/uLQTwccBNsXBZ1vf/vbBhp4t6GlIOQxySH4qT8QCJiwPvPMM81MxboXBDwaBj8WVJIf4Ux5AIgfc0gI6pKSEhPQaE1oFE54A24TJkxQdXW1fvvb35omA2gh4BH4zMug0VAvND3++OM2L+PmatBYnnjiCXNrRhsiH/UxbwMYADoABIAJj3CqAEzx6CMva5kATQDqF7/4hV2DNvpM2//4xz/sGmCL9odmxBooR/+LL75o/eoz4NKbRtXnQWfnxaE7mtdYd5Md60UYSBw8Slf8z2XqaNmgU477oZIjg8pKCOqo5QdrY32r7r/lfhWmjdSB1UdowwebtWW9tL1RWvNWs0759o+VFJ6kYZn5evbxJ9S0aoUOqKlTSmSuAUv9e5ukjdLmBklt0lk/+q2mjylWfupwnX/WJXaveeV2bVwr3XTFg0qLGq1zz/iDtrav1OoPX1TjihatfWeTjlp6isqnL9SWpk364PV3TCPZ0iBd8ps7lRE1Xdf+6QG1EVCkTWpcuUHvvPCejj3oRAUjCrV8/lGqf3+tXn7qZT1+5ytqWym1rW3WDVdcrZGZU3TjZXdLrR3a3tqi9195R2qRDq85RUn7jtTtlz9k/99/4WMzHTa8v0ljc8uUFSg2JwOLPoC7dtRCW3PEgtz0OOfKvedcpsEMfALMSWLwbLOU4T4NdvSmtJjLdF4S6lq5rdFhVekd175lqtEu99PxIxJ8YdC5+uqrbV4DQY3wRqhituLHNQQ+ghfzFVqNAxTMcYADprNp06aZgK+rqzMzGiv5MUsBTqyTQWAzn/KDH/zABDmABHihZbjV/08++WSnBxnngNRVV11l644ACUxd/AA7/h900EFGDxoU7cbFxRnAADrQBTgBOOTFrAgNAERmZqb1ceLEidY/PNTc/A/00kfouuKKK6ws655oGzMcR+pLSEjQBRdc0FlnnwaeryPo4LlF+JnYUiUOKFFCfwRnhQpSZ+vGa69X64Z1Ovk7p6ggOFwzxszQfTffJ22STjvhDGXG5urFf7xmILGobLkOqjlM25ulNe+s0rghIxU3MEJPPfSg2uvXaklllQpSh+qmv9ypLY3SScf8UDPGztJbL7ytzY2bFIxJ1dCMQn30+kq9+dx7mjJitn5z2kUGPGVTlykvZYhOPG6pOlre081X3aZpIxGg4zRz7Fyt/3iVGlet1lvPv6sr/nCLzjjpUs2ZcJjq35VefeZdzRw/Xb8+/efSFunKi65QeiBPi8qWSZs69O7Lr+h7h56mg6uP07aWVqm9TQXJozUsbaJ+d+bZalu3Rhefc6Fy44dodLBIdUVHSeul5x9+QUvLavWbn56t9e8369Rv/9rmefBsI+wNa4Rwx2ZOxwMdFsZ2j932JYNPgK2qZ5mmg8s05jXWexKDDXfp3sKohRHsc1h6nc3pOG1n/syTbU6nt0LOTujP6XxxTeeBBx4wYY7gPPDAAw08EL7Oiw0By6LJmTNnmlaDMwKr/RHi/AANTGkIYzQehDZghfYCWAFiTORTH3NDXMPpgfUymM/cDzdv6kDTueyyy+z+GWecYf/z8vJsfgfzGmBSW1tr80D8R1NxjgCUxTMN0EMroT7aIlID4EibzO3QP9Yz8WPeCE3Izf8AdiRAEwcM6OVHOySnwaEpOW3QB50vWdi4OQSLvcaqfLcyv9pzJHCgE1OmocmeWzMRBZLCx+ui318kbd+sNe+t1dZGSU2SGqX7b3hCo7OYd8nTlnrpjX+9rSEpQ9U/rJ9Wv7VC2ixNHTFeCYMi9dKTT0ut7Voyv0bpMel649m31PBhgypnzVNucpZuuOJaA4Ojlh+qksmzrOxd19+jMbkTTeN4+v7XNHfSMqUHsnXqSQdqc+u7uvT3f1Fe/FRlBSZoyrBibVzfoG0tzZo2aoaSBuZrZFqFjl50hrY1SNf9z43KT83SjHFjtK25Qf9+5DEVBoeptmSBGj5+S/984D6Nzy1SeuRQiUgd69Yofv8MZQVG6k+/vUDaskXnnv4b5cYXKjcwRhf89FJtXrVFZ/3gZxqSkqJvH3SINjd06N4bHjfHAgc6GVHLzEnDYtzB87iSbsFT98AYCFSYdYw5nfCwCeaMNr7gYJuqYXEoa3V6mp4JI7wDjgSoR9jnhqTWav+wkf6cTm/mkJ2uf1HzGnMl/BCc8+bN65wnwcUYM9Ohhx5qQhrgwVT27rvvWn6EOCv2Ma8BTIDLggUL9Oyzz9r1pUuXmtBn3oW6EeSAA0IcMKButJS33nrLtCCEf25uroEEGhEaSEVFheUFAMgLmLifW4/D3Myxxx5r7VMv9AAORGhwoPef//zHzG4AH1oR+aAZwESrwewHneR3rtI4KqApkYhoQAIY6Sf5MMn1abBx46hPazplXuwxiz9G/LVPgk4wfJ5SBlVoSPIijc6t1LWXX6vGdat17f/7my459xqd8+PLVTbhQOXFTVZBwkSlDMrT0vJDNSZnvEZmjtCM0VP11n9e08a1TZo9fprykjL01AOPqXVVow5deKAKUvP0xrOveaA0aoJSomJ105XXavW77+mYAw9VTP8Ivf38m9qyYZvO/tHvFIwsUEb0KOXSXsownXBsnbZv/FjXXXKrMiKnKi9+umaPq1LrmvV675WXNSx9lNIiR2hs5gKlDpqkkvF1GpU9TtmJqaounaWtzWv0wasva2zuGB297HBt2rBCD/39ZgOctIhCNa8CMDs0tbBMubETdObJp6ujvlFnn/objUqbpIKYMfrV93+vTava9Odzz1NmbISWVJRo47oNuuPa+5QVM8PMa4S86QKdSgVjiy11RezeM6CDZSxm/6md63OeeqDJ5nZwJOjNES0Me5wFtYspN9Bhsc+M0cfYVtW92eR8TadrrueLgg4OAQAIP+Z3EKoIZX5OEyktLe2ciAcIyI9g58eR/CRMUWgbnANSCHKnfQA6gAP/0Ywc6CD0+aGhEEmBsnij8cNbDA0EEHK0AIbUBdiw4NU5E9AetGNeAwxwVKAuNBMAAy2HH556AAl1A3RoZSeccIKV5boDHZwFoJU6nRMC96mTI04R0NzngafPg85spcXNDgX97A46XpDNIUlLLdAnoW0yYmbomsuuVePatfr+t07RkNRJysNDK2KWRqfN17isCiUPKFRkWJLmTpqn00/8mT569X0DlLXvrNS4/FHKT87Rvx/6l9pXb9SS8qUqTC/UC/94zrzfPnj1bbWsrlfTqtXSlq066ZjvaPKIcaYBvfbMW6ZV3Xz5A0qPGq0RacVKHJSu445aqC2tH+uOqx9RdnSxChJKVDZ5qTbWN+utF15UcnimClNmKnXgDAUHTVdWzDhNGT5Td91wi9rqVxnovPncvxSMSlbtnCptb12tR+66VcNSJ2tM5iypvV1NK1ZYv1IHjtIlv/0fqW27LjjzTxqWNFHDEibouvP/KrVu18evvKg3nn1U699/TWpr05P3/9u882yBKItPQ5oOFqbU2GKlxngL+p08/tKPgQrzBWCdDuZUAOgf9zaYeS0turx37zU0HdQj7HGsKI0fMFM3XPaSxc3x53S6wKU34fZFQQdvMIQ4ia97hCrzOMxhAAwIYQQzQpZ7CGDcp5mEJw8aCNcxWTFP8thjjwmPNdy3cU4gYYrDFIXJDUFOHYAAbaKZACRoHawfQovAjZqf84YjPhyaDQCHoMfEhqZCB9I7ggAAIABJREFU3sMPP1zHHHNMJ8gRfYD2cLHmPqCBeQ9THDQzFwS9zFtRH/R/73vfM7pwmiAEDgnXbfIBMLhc47RAQFDMkYQOev75540/vT2XPnO9r4NOyMzjebABOkSZZt6BuGHlFrgzPZL1M+UKRk/WdZffqG0bt+iU43+ixEG5ygnM1tC4SmVFzFTMPoWaXFChqhlL9fZ/3lP9u+t0whHH6aV/Pis1b1JNcYUyYlL14G33q2PtJh1YdZByk3L12r9eMWCaP6tcU0aOV+nUmRqTP1xjC0YoGEi2PAtLl+iDl1er+ePtuvTc6w088pKG69QTjtKGle/orr8+opyY6Yrfb4ymDqvU1qZNeu/l15UeXaDhQaIkzFNG5DSd8q3T1bqmSS8++awOXbxImxtX6uM3XtKIjOGqKZ4vbavXQ3feqMLkGUoLH62O9euk1mYNT5mlzMjJuuAX56tl1Tpd8pvLlRkxVGmD83Thzy9U80drdfn5v1PptLGaOrpAs8ZPNnNgduxUZQfKlRUg7hxRD7zI/6kxs0Xa05oOmMHO025LHDQddqHOS1rQ+346zOkMz2Bb6jKzz1GA8NQAji0++sQGQl1qnD+n09I5d4GAveeee0yIItABDAT08uXLTXjitYV2AIi4dToIVDQdQIW8TiAj7BHyaCEACxoBAvviiy824Q5wYHajHA4CCGfmP5gTYk4HQY4pizLUQ3mAAOACBGiX+wh38gI+tIWHGnUBSkz+Ow2DfNQLcDHBT/toNpwDJt/5znc6vdLoH3W62GsAKEAIP6D3iCOOsDYATkAHuk488cTOtpwjAWAGLST6CI30x53DL3jTZ8DFmdN2PvZp0KnoDOpocciIIWaxyFjE6K0lQZ6kRs1WTP+xykqcojtvflgNq5v1rSOOVWHWeOXHFykvtlSFyaVKGjDCTF8v//MdNa9sVU1JlUZkD9Fr/35eamvX9LHjlBmfrEfuekDbmju0tHKRAcqqt1ap/v16lU0r05DgEE0dOVmHLz5ck4dPVkZshvKSC5STmKfSyRV69pEX1bxio0444ofKjMvWyd85Tus+eF+3X3u7gpF5Sh08VlUzD9Wad1fqo9ffVXbcMKVHjjMapxbWaOXbb+rDN5/T6NyRmjJiktRer/defEbBwWk6atGhalr5vJ556A6NTilX6v7jpOa1avr4LU3InK/kfcfq8vP+qJaV7+uSX1+kgqQRSglP0//7/f9T44p1Ou+XZysrMUlD07N1yMJDNHcqEbCnKjumRNnRrD/ywuXYgtHAHGUE5uxxRwIsYy4iAVM0uEzzsUFgAbSdHud0QCkWiKIaATwc//PoRj8Mzs7CoZf/CF5AAk3DbWXgBCPagwMShCc/tAomwRHGCG++2gEFhL7TdBD2LmQMdVEGjQQnArQVBwaYrQAC2sfMBegg3JnrYQEm8yfMAzFvgzD//ve/b0KcdhHYXEPwAxSAEHMmTPxDG27K0Iy3GnlJ0DRjxgwDOBaf8h/QOeussywv5QAWfoAOdALGgAXgABhNnjzZzIC4bNM+1zDfkRcwdNod80jQQnl4QD/hAW7ZkyZN0pVXXmnt+KDT9RH4pZtXoucrjWgDCEMLydIddLwoyVhQhmXUKIPV/YkTdcNV96lh1UZ995gTlZFQoIT+o5UfN0c5MTNVmFKkKYUV6lgrffDyCqVGJSsYSPRAZ9M2jS0YpvzUDD12z4NqXLFeB9YsV3JEqj56dZW2N0njC4hsnKWbrrhNDR8067hDTtCCkkW66qLrdMTiYxTXP0l1c5eaS/JfLrxGGbFZ+s4h3zYt6earblByeJotQp01epHWf7Beq95aYR5n6ZETNCajVofVfl/E7Ln5uj9aXVWzFqpxxXv6+LUXlBmVrSNqD9fWpnf06J03Kjt8hsYFF0jtjVLLOo1KLlNBoFgXnvUb+//Hs85T0sCgzQ+d/oMzpQ7p56f8TPnBXC2vOUAb67fprr89EQKd2coOlIUiF3DsCpezp73XeL5Yy/BiAz+mjTzK/ALi+hPPrrpn0Bmbd4CpR6AUCMUOog/ettI8D9jnuiekcoPb13RaTHtxX+QIavbMQRAjPBHkmLy4zzyKMzGxiNIBxb333mvCE6GOVxgaEYs3ARrMSsOHDzdAoT60An7M3ZAuuugiE/y4K9MG8yBOa2Ein2sIcsohsJk7AXAASLQXtAVohC7OAQFcmgGA6dOnW1miEaAt8cOJgTrRclhMClgBBrg8O4ChHkAC7Yc+kv71r39ZG7Qzbtw4o4EoA9BEfhwJABfq5j/tsWCU/yQHZDhJ4PXGNdb80L4POnsYdPia7RV0qm0yOTdpnpIjpyglepyuufRutTdIPz35l0qJHmIhX0YGq5XUf7wKk0s0LqdMrSulDR+26/BFR+vnP/i51r23xtbFjMgapoLUAj376LPauG6TllQcoBGZ43XfjY+pfbVUM3uZRmdPEZoSa3zKpizQ0orDpGbpzuse1IiMiVo092BtbZD+ePZlGpU9Uccs/465ZN/0l1s0bcQcZUVP1bRhC8XanLf+855y4sZpaFKxkvpN1XcPOlMb16/TEw/cocqiOp37s4u0ralFq99+WymDs7V47nJpa7P+cd99iv/GWBXEzlXr6nq1r61X4n5jlB9Top8cf6p5tF1xwVUanj5JWfHDdEDtUWpZt1n33f6Q8tIKdcK3fqiODdKfzr3BC8VjYXVCO4eye2i0t6kbMeH2NOgQIJr4neAHWs83w4apMK3Ogn32GmWaDXhIhMABqQaFjdPUEUfaXjp+wM9dz+kgdB0woBWwQRtaCyCBRoMwxqSFYAWQ+LJnjQyCE+EPACFk0UbQAriOFxraD9f4TyKwKIDCpm8sAuUaghdtCmDgP27VaC6AB1oT7UITbWLyQ3sgHxoJwp3r0ElCG+Eai0XJA/gBLixKpV1oRLOhLPeLiooMGJlbIYoBoAEgkA9AA/QAHK5jXqM/0EZ5eHLSSScZYMErvOrQaqgXEOHaLbfcYu0D1ixSxXPukUceMS2LfKxbgm4fdL4CoBOo6EXTqTZNKDO2XMkRM5SXUqy/XHyv1ry/Raf/8PfKShiv/IRS5cVUKGXAVA1NmqP8hBm6+69PGWjUv9umZx58Qc899rIaPmhRxfRqDUsfq4dvf0qtK7fpgMqjlTQoV4fVfk/NH0kbV0sfv9KsjjXS7Vc9rNTwocqKGaXH73zBrr37XL02rZVWvd6mQxccr8yY4Zo1tkLvPP+xrQt6/emVOmbxL1Q+6Uht2yC9/Z8VSug3TNnRs5QTXa6ikcvMDKaOTap/t0P/vOdlta9t1VvPv24Asmze4WpevUaP3/OoCpPKlRMoMa0Kz7nC5LkamlimI5d8R62rW7W5XoKe2jmHaXT+bL3674/VsGKzNqzcpNef+1jNq6Xpoxd5Md7YOdS2N3DmtYrO2HJ7GnQ8B4LZpt0AQMUTjjOXaQCnN6UljPkc9ncoSFkQig5aY15sQ4OLerXJ+ZpOFxjx9c4iSgSh0yKIj4YQZWEm11nngtswQp6vdtbLuC92gIevfmKikRcN5rrrrjOBypc88dS4Tn14t40YMcL+MxEP4PAjunV8fLxdZ6EoIORW/uPo4MxYxFoDCKgPgQ0tgBFgAnjy+8Mf/mD36Qv5EP78ABIWfbLOxgEEnmmAAtEJXL0s4IQngA5aFsAHrSxmZR6HOkls6UD7aGEAkCtvjUkWWQHABLSoh3kf/pMPTZAfmpIPOl9R0CHycqBG+UmLQ9GG5yonsVRHH/ALnXPGNSovOkRJ4aOVHlmk5AFFGpI43wCI7Q1KJx6s8864Rrdc+bAOqT1eC0sP10W/vkbFE2ot3tm3lv9Efzjrr5oxaqGGpMxUSvhoHbPsNJ1/5nX649k36NRjf6+SCcvtHnv1DEubrZ+fdLGuvug+nfuzqyxczsjMOYrrN1QJA4bq0IUn6oo/3KbrL31YC4t/oKHJ83TOaX/RSUf+UjlxRRqdUaf4fYmFNlsLSpfoD7+6SGd8/2LlJ03XL374O4t+MCJzutF39k/OtRA5I9IqlRk9W2eefKHOO/Nyi9cWt984DUmdomMPPkWX/v4m3XrlE5o8fKGSwsdqVO5c/fmCW3XROdfpukvv00G1P1DUN0d4wUoBHKJcs71ClAts6m1/sKdBh2U2eD+jsLDc5m+XvmDnBSkLe90E1BaH2lasgTJTkfBCoAKQqrfQ1D7odIEOX/aACetGmKdwwtoJUQDlwQcfNKHNFzxClp1EEcYIXzQLp7lwDQ0FAU+9aAYE8kSDQfA6gY2QJ1AomgkCHjA4+uijrU7ykaADZwN+5MMxAOFOHWgbtIHQRovhh3mPa9dff70BH7ugkpcN2KAFMxzaCqYyAMTRAsDh3QYA0SZ9pY+Y3FwfCXoKmLoybGVAnW5OCPdqaAKAoRM6+DFHhhZFOcCHY2JiojlfQDuaoA86XxXQqQzt7VLd6UxgWy8HCOlfYfKEGF2ZcXNC+8mMU0oUscXKLJL08NRaBQcXKS1iuqLChisvoUj5iTMVu98wBfYt0Li8CqUMHqvEAWNUmDpXBYlzRNDO3LjZFhizMKVMQ5JKNT631gJwBr4xUnH7jdHwYIXi9x8r7k8btsxC2RA/jfJErS5MnaNgxHgl9B9pgUKJbp0eUaKCpBJrhx1Ow8PGamjiYiX2m668pInKihup6qLvKHHAOCVHjtSQjKkKBsYpM2GyshImKz1uikWajuk3UemB6RqZU2X/02OKNSpnvtJiJysncYbSoqdpSCoRq/FCm66EQaOVkzhN/cMyFLXvMA0NsoFbubdhHCBuiU3lPG9A7u1p0MFRpDBtoQaEjTGzGv4AaDxGl3NC2ylqQhgF2MSNiSDCFxDO4OLfPKzhGUt977VenAe6CzrWzCAA+XFOCBkmuRGYAAYCFE2Dr3oE9yuvvGJmKTQM7qNxsGIfExTxybjOD6BAyDM/Avhw/8Ybb9RLL71k5SkHiEGLy4+WwRwRCU85yjE35LQY8j733HN2z5ncMPlBGzRSJ4BBW9AJXdBD/3BigBZAjjJ4umHuQhvjh/kPExhtkQdQwX2aeRjAhfowk9Fn2oYXgCU/NDauQxttUQ956D/0w1Nowl2a/67f1NH9WfTJ877svYYjQchDqdN7jS2dO3fTrFN27CKlhM9TQfISW7dBnMek8Jm2PfSQYLV5ZeXF1iiHkFqDiy1ac2HKfOXFz1VG1EyNzqzRyPQqA6PUwdM1NKlSozMWK6n/DAMHgIAIz0SBDuwz3o65sWV2De2JgJkj02qVPGC64vadpIT9vSCabBaXF1eupAFTlBtXosKUCtuvJzOy3EAwOmyCRqUv1IjUxUobXKbCpCUamlir2P1GWdTpjKhZtulbcuRkpcZOVgbRDJKLlRo1Q8lRszU0rcqOmJ/iBhXZvHn4N8crP6VSWYkVpvWxRXZaZKkKUxcqvv9kA5ns+Fkam4e78QwNT6/bIcK027PH7Wb6VQIdNnHDqQB/AOKxsTC0V0cCdn0j0jTM8dzfivXiP7ZY3Bxf0+nSaHoTaAhNJtid8AVkEM5oECQEI1qEE+pOUAJQTvAjlBGkgAQ/vuARuLSJAOcedZGfIwIdoAAkKEPiOvkBCQCLxDXK0jZlEPIc+SHYoQUhT3mOCHRXlmuUBYRc/ZxTnjz0lyNtujoBHP6TH/q4Tz/oP3RBB/zpDizMJUEHZbjvjq7Pjn/UTV3UyTXOXZ9ps8+mrzno5MQtVdz+c20fmOy4WtuyeUhqnQko1gSy187wlGVKHVgqBH5m1FwVJFR5WkXcPKUMLLL/GZGlJvQT9ptlG7plRMy37auzouZrSMICJfWbpZHBJSqIr1HcN6crL7ZKGRGeFsWePLgcAyDDUxYpZUCx3SNPesQcZUTOsf172AobOsZnHybqxaSWHj5PaYMrNDJ4gAJh0zQqfbEKk2usXJBtYRIrFIydpaSoqUqPn6MsaIkoU3bSPAvImRxRpdSoGo0dcrBSA6VKpo/xNUqOmqMUNtCMq1Vifw942MWUPXQSBkxRQXKl4vaf7pnULML0You/hrazA+i4kER74hhggzocCMot5hqBPjGvefM8c3pfHIqPNZoOm7dhWiP+2v/eukJECfXX6fRhYdaXBfHeRHsfB51OM4oJvSpvk7Fotmyu2ynV7rDjpS0exUzEzp4umdkI01FXyooJzWGwY6bNaZC/VmxLzYJJN8/hNnL7vMfubXWeGz3eFteufu5BS1ZggSUvL1EX5nlbO7hQQIE6BaMXKhgzV8GYMm+xLBvbxcyz/8HoBaH4aZVKB/S6rWvyeEkQT1KZ5xVovCQEDhEJWBzKwlu35YyXx013fOnHEOhgNiXQZ+Q3JomNQNF0or45tfdN3NjaAEcCtBwSDgX33PC+v05nbxJ8fl/3nKb0tQOdHYGHUPxeWuAJWMxvncnbyroTdBz4fOJYY0DjNjIDcFxy8xwOkD7vMTOaurslB4RRi5Vpgp65FICP+ZVKZUUvtetO27CIzzGVoT2F5hnIBKMWeQBDQFQAx0Cn0oCHaA1e1IYapQEejj/GE+ZCmJTvih7tgbcXd+2ToLMHI0zzkRHwMINdCgL7TTUHNJQXohQE9psunNF2/Cjx5h/DMK/F9ptmk0BoOuynM2fS8WJxj7PXWsEe1Dd/nY6vCfVZs9ZXBWj7Ouh0ygXAxiUPWCxki/uS7+n4CXBBi+kGAJ3nTuvwJtNN00Hb+cKpplN78uZLqr1tqVl3xDYCEUTIXuAF3GTLa4An4iAvQXugPKS91XhhfwKU9/a8cTKT/5a4R2igTg2w1sDXQAggIliq05piCZ46T2mAmQOp6GWdW4EDVp7s3fOgwzwOmg5HlJaDF/zKAgxgKevVZXpE5mLTdCiEIwGhqQlTPWHIYT7ofFUEk0/HntNEdjfvv5agsyP4dGk2IScD52zwKaDjNJYuEAJkegIkd83d/3xH104n6ATm2uJLD3SWG+iYB1nsbM+TLOJQZZKgPabMAx1MYAGApyYEKvwHZLz+el583n87J35aCIQ90Kn1QMd2YQ1tirfDbqyLuzQmQOgrBDoATfyAGRZUgPP7b/7IpmpGZC5TbL+ZPWs6EftMsJ1DUYkAnZj9p9nkEPtb+5qOr8n4msxuHgN9HnQcwHzWI1oQeb2jm9txx521JfJ5Ahyh7s454qLthd/Z8b7L99mOnbRg5oKumGKlx8yxDdPSI5d7bcaWWhgfux5+iNLDD/O0ltjS0HyUN8fkaV5ujsoDQ+Kl2fxPJ2B69wG7tMgaMccTDFQpGKgIzQPN8bYtYE4I81wAMxwmOcx0XjBVD3Qq7P6XPo/Tqdl65jWUFRzR8F5jLscLoVZusTsJ+tmjeY0N3IJRc6wA8zl4Izx651p/59Dd/YXr1//11V4+z7Pt46BjX/OdZjUHPF1rhxyYdB13BA4m4i1hejIgcXXseOwqT/Rq4n15IWC6rntA5AEQdX2W/w4AQxP6CNTYmUqPmaX0KHbqXBoCl9lKj5voXQ8/TOnhR5gwzYid7QXhxAQXucgz99nczzxvzgnzH9EESAY6taHIAhWdiz0BVTOVxZQpGAPgzPIS5zgiYHYL1IYcEnYCHRZldgeBL/vc5qy8XQq8RaLlBjooLwSM7tVlmijT2OR4iLmJVWZae/NZ2URQJ0rttLjHddSf09nNX8GfR3j5efsmiH0NQMe8qTqBpwtwdpAfbqHgToLRAx322gFIep+j6MpH3jJPG3ET7q7c5z2aphQCKExkAGDcdGXEFXlmMhwJ7NpMpceP9QAJ0Bl8lMlLL1+FsiIXKCtisbIj62xdEAtesyND/2NYvDpb2VG1yopYGAraOcdcss20Fwg5GDCPgwdcXFEolXTtUfQVBh3Wd6K04LUGhvzxVw/amk/mdBIGsui14hPPNQzPNbQd3N2wyRHK4L6bPjSUwsTWU6Eu0PFUvJTYGXr7jQY1bmhV+8YmNTatsTUkLc0damnaKju2NKmltcFLnHOvmcV9X4P4Wb7A75sCf48/tza1dAcdzjvfB96XDWptY71Uo1pbOtiWRfff/YIykosUjCvd8+aVHcxbTmsIgU5I2GCi7zFFzbO4bB6YeNrOzqDjIlgjbz4BOgY4IS8vJ9j+D0dPU0IrqgkBTJE8DSbk9m2gU6T0uEmm6aSFH6D0iIM9TStmlud4EFWtrAhP02ExKpoNAIS5jYgGRBzIisTle4GtQ2JLAtYBmXs3oNM5l+NtQW3g07kFOM4EmNhqu1IM2tFXw7zGrgQsucF7DUsZWg6JiDa9xe4Mw92NLwcAhwrQdu786zsambXcVKRPBZ2QSpieNFOvvrjGXooNG5q0sYOgjSxWbPJeop1frO7/O18yX2vw50/2tjHQppbWDrW0btopeeGNWlub1drmvUdtLR1qb5YeuOsFZSbNNvt/arS3rYD7CPzaH3sClZ00p/87DzDnOZdlzFbOvBe6xjwPJj3nhWe0OPOWFxeNMhnR3pc9gOL99+KSdf0P3e9O9w798vLvKHcdLe7YTZvsXs+XfR6osGkZNB0AB+9ndilA+2EBcG+bgP5/9t47vq6jzONWqqvaveq9ucclcWLHvVuWZEmWe5xC6HVZtlBflqXvwr6Upb4LLMvC0iFASIEEQiqQAiGBNEIaJC7qukXd/r2f7zN3rq4cKcWOnSg+f5zPufecKU+b+c0z55kZ23sN4AFsQCgE27j2Hbbv2nhI5RXLnCNzkOWFq3T/vW0OdDqj6uuLKxLFq+lKAZ6EZ4N3Y15O6qjuVOtsAn4DgMUGUkAnOqRIZNBdUXZ6YAagJwCdk92RBvU99+9ECdDxJ4fi7ezY/F4LRntGT4d1OrOLcS2ZK62zeOtJaYtsG5wk2oLEYyjjmUGnY2QqjQZk3g3AwxUATtDpBsALsESifYmrX5Gov2gfyCcAnbH6neDZS8fTwVHB05lbusO+61z9nUfN62E6tCg9cbLpUfiRxtRadV5D8uQ3tsFZMucVNg9LxiTwPCPorBiZXkt6OqmgEx0DdIJOJwCewAaisV6bYnPgcxToJL7pMGMQj/YG02tj9EEBAL2IAMQ5SuE6O3maqGcA6I+/HrZnbPg5Hn6kzShotOg15uOIueb+lU/dZh+BcJGeHXTWqLzwKNDhm060TdFY+1FTbH0pXg7fe7iCjieQwSlsA/GYPPC47zt84+l1bSMAnTFnWAKgeRGBJhX4E6DDtj3sZENMwG+v7zHwIXJtvEC0NI4adZt+7rDvOhmnLdV9vz1iWxiMl8kr3U2vedBJ+aYzCnQ6E+DCHLW/6GRcdE4APKdwhxsMOBQFdJLAg9fTa/9dEAHRa8wYBJ6O73OC+0sEcACfUL3tt0ZMAIFoBBN86RO32KbRxAOMd0pBGvNxXOxIwOE7bDd+9839hlLPuiNBMpAAT+fZQIfO1YNOAnCSgQZBxxt4O6eoDcQjI8CTAKBIzLePzmcAnS0qyXar8oOO+CXUEad6Ai/33wnQYbaMYw3Yzeb8WZea1wPojOe0pNXkEwO/xcCGqTU+CLFOZ/HMy+wgnmefXvPRax0j0WtjejqpnYpvVD66LfVd8DsAoFPHBgAYd0UVifnLtw9Apyvh6RwdMh2ATgC2LzLYhth3bY2dHMqnGRwX1nvyG8AZL/o5rSRzo0WvETKNi8THoPNmXGzTa+N9CPLKZnqtqnCLwumLdcuv/qzhAamttVsdHZwmOZj4ruOn11I6Ejyc1IWiwTRL8G3rVLUB83RGvB0HPCOgMzAY09Bwnzrbe9QXlb73jZvtwLDw5LWaWbwn+ObxcvcmXsr8JY42wGlhGxyW29Quf5sWz7xEGadxEuo4RxuwmpTvOiAU512TaVra+XZcddLLOSrkLQk6PM+tVWneGv1/n/2JDu0fkI5IXV0dtr4g3tudEkgQgE7gwaTYwKkKMqP47lHUQKc7OcXmvR3/zbO7h1NX29TR2qNYt/SR9/2PCrNWaUH1pcqfVpdYxPgij3hfyh1jQNuJG5gkDnEDP3BUmCm7/oonLXqNfdfGPdqAvddYp0OoNGjF3NyF816pvCnsqeM21TPwGUN5bMVQkVev6pLNes1l71dvVDo8rMQxwj2K9yZGbE+LUvMjuWB6LQCiUxmIxgAd1u7EkImL7hweHrT21NEalYakbXVvVkl4vc6b9Wpln7UpAJ0x+iU/KA7uJ3gwkphe4yBQvueAH4/eIzvagMhnMCTpuKToyUKmWaeDa0SsNXN0N/zkgB01+qyBBAY6DSrNW69VSy/W4QHp0IGYujqjGhoaMo9nJDrNBxFwD0AnAJtTGWxSeD96es0WjNJGHPAcOTKs/v5BtR+KKdIhzShdbwO9kiy35b3bruUEdy4pHUbQkQeyTtpAqN6CzxbPvNhmynBcbrmm3cCHDT/H3WWasxDCZ68w14iFPouqL9KNPz2keWW7VZKZsqHeGIbHLtMF6ZtUFNqg6tINuvKK29UXl7q74hoaOqyOjo5E4/E7EqTuRDCeF5TSIEdNRQTPA6B6GdpAKujEehO7E3jQierQoUMa6D+sWPcRfeWLVyg3fYkq87cqb+pWzSi4NHEuTdARJjvCMfqp4N0Jso9QvQUPsOSGXaYJIPjUB39mGw2AH+Puvca+OWRaWLXX4qxxk26+uu05Tq850KkqalRJ7mqtWb5XhwelWHRIfb1DisViLkz6aVvguFGc25vtZdiRBGAZBEY8Vxsw0PELRD3ojAzOWltb7Ttp6/64Vi1tcdvehxtUlbtPeVPY2ZnrBHUqQbmBbJ/JBhKeDk4LDgvfdMAR7gDOeJ9n0piPq8zhpLeNFr1GMMEfbhmwXULHW9zjjRxPpzJvm8rz61VZtFGh9Pn636/+RIcOdhvo9PYm9ll7GuikTLE918YZpAs68pejDYxaHJq6DQ5tJO6m1tp69MmPf1llBeerIn+9ysKNqszZp9zJ7DocgI7vj4JBgmm6AAAgAElEQVT7SR58hNyCUBaHEgtAyDSbfs6vcF7OuNFrhEz7bzouvnqrbVfNR6BnC5lGybNKdivjrOWqKa7XjPINqiy+QH+6568a6JU62mOK9bhNPt29T7GIA5yY7UKduhO13xYnuPuPyM/rbmHobk2HeZDJhbdHbzWUkG9q+sRH65H6xvI+j9KLz/+0vEelC94ngwJG5DsiI9t5gCMMYjHF2I3ABmhsF+XaTTwypBt/cadyMmdqbvVG5aUvV1V+i/KmNGl28eUB6DzTSDx4d2I9tZA7DgcM4Rw2AgkaVv+zxQfwPSd38tqxAwnwcgAdPB3W6rB/Du7RjIJt47pHqSOKwmlbVF2wXaBd7vQVKsxYoqbaN+jK797KUh0NRKXe6LB6e+Lq6e5UpKtV0egh9fe2qr+/3VZcswAuuI5NBr3xbvX2dqqvr0N9fW3q629Vb98h9fa22/OO9oM6MnxYrQcPqS/eq8PD/ervJ0+r4r0HFY23mg7aOw7oiAbsLKTu7k4NDva7w8OsQ3TnuqAjwuDjvZ2K97XbxTYtge6OTXfILRJptYtBWG+8R72xPnF2TqxnWPEe6aaf/0GrL9iuqWnVqincoAoO98qu0+yivSqazu7veDsneYQb1BfIHBtIHFfNjjZgB1NsfJpxR+Sw5rNubNBxR1XXWhQCng6H8OROXv2cztPB2BfPeJVCZ65T6Mw19jFpZlG9cqdeoIZ1r9E3vnStug9KQzFpIDak7rYu9Ua6JPVreKBbTz75Z0Ujnda5cehbcD1/GcTY/j7apVisXfF4m+LxQ4rFDtp/nrM7MWunot2uM4v0dOnQoSfU0fGEOjoft1NeOXSvq7vNog27urrU0xO1aZ3OjogiPb2KRGKKRCLuYD7zcPBQ2cy13c5NCvT2/PVmMot2aqg/qsG+iCKdHepsbdNgfFgakB57sFVX//A2rVmyS+W5yzWjaJMWVO20j7RF02s1u2iPitPHPnIkAKEAhE+KDSQWh3qnBYfl/ttlU218mhl3G5xUTwcPB8C561fxxPGwz3y0AaOsWYV7VJLepNLMRs3I325eUtaZ56ske6WWzt+uT37k/3TnLQ/rSK+sMbU91aWDfzukaGdEA33D6o0OKh4bDq5jkUH0iOKRw+qNHjE5MkrGm3HXoPpiQ+ps7VdPx7Btiz8Ql44MSb2xuFj0MTTYq/b2Tg30H7H1VZ0dcUW6hywCcbCfdLKtjTgmOR7lOpLQEzobSFyB7o7HfiMdMUU744p2DKivR+rrlu65/XG9/S0fU1nOEmWdNV+F6Ss0o5AjneuUc/Y6lWU1aXbhPrsHnk4AMCcFYMbybhO7TDPLxWmhTK/dc+tg8hC3cUGH+Ti2McDLYYqNWOvbftY5Olx6nB0JMPjwmZs0t/gyzS2+ROGzNih81lqb35tV3KiM0xcp++xFWjSzTn//uo/o+it/p0ir7ZCjwajUtn9Q0U4J5ycS3J+/HJBZuxTtkMmRFeujri4p1ikd/OsR9bRJnYekaJf05BPt5v0cOtBuwHJof599g2s9MGx66DgktR2QAZXphXr8ha7GugL9PX/9dUnDMXcdfLxfV3zzZl22659VXbhSBekXaFZJrc6p2JY472qDmMquytmuiuztKs1oUWVoRzC9NlZnGDw7OdN/iQ0/cVwYEIEhn/zAtfZ71NTaUfhhe68dD+hU5+xRWeZOFU9vsYZQEdqmilCzyrLc2TxloY0qyVqrsvBqVeWv04yi9Vo0s0F1a16lV+57r3Y3/ZN2NXO9Pbg/bzm8Xbu2vtOu3U1v1+7mf9Du5rclrn8w2V62+1/VvPkfdFHLe7Rty99p7/a3amfza3XVj2/Q9799tb719evVXP8G7dr2NjXWvlGX7X2vttW/TbVr36iLd75Pu5repV1N70zqZ2fTP2pn09u0s/nv7XK6C/R3bPb7dq1bcpkWzWi07zWAzNyyrarOr1Vh+hpbP2cRpNlbVZq51byb+WWvtLYWOmOLanL3BqATAMzJAZix5AyYhLaIZTcca8Bv1nnOK9tpp4aOF4iWdrzTazPz96lwarPyJzcKAJqRt0fF0xvtI2dNHiOxrWJ1Kh+aIC48aZkKpq9WafY6u4qzN9jiUhaYBtfzlcEmlWTXuSu02bZHKQmvlbvWqyS0UaXhWuWnr1Nlfp0KMleqsnC1FszeaF7mcJ909+1tqindrMLQSoWnX6i5VS0qy6tVeNpqVRQ0qDhUq+LwZhWHN6o4vN5dOWtVnLNaxTlrVRRaF+jtGG0X23cfYRttlMjUdvaZy22WYWbBDs0u2qWCqbUWoYaHU52zS1Xh3SpJ36aS9O2i7QXTa8H02os5vcbMWO7kVQY6HOTGOk8i2VinM+702vEFEjQZwFRk71RlaJd92ynL3KaZ+Xs1I3+XfefJn7LZGgYAVJXTrJq8Fs0t2WM7kM4sbLE1B6w7CK5jkUGzysO7EleLynOaVJ6zVeW59SoLN7j1HHnbbS1VZV6T8qavVl7GBcpJP0eD7rOOTXdW5G9Wed4m5U5bperCJstbnEUZjSrP2ZYoN1F2ovzy3LpR9QT6Oxb9Nao6d7vNDDB1RmDAnOK9Oq/mVarJ26mMtNWaWbBbleEW+35TOK1O+ZPrbHptVsFF9jwAnQB0XkzQwcPJm7LanApiAi5p/oh5PHg5z8nTOZaQaTeV1mgAwwdON722TWVZjSrJaLAom5KMraLBMFqjMRHq6UZwTSrNbg6uY5YBLu1ed4V3qSy0TWXhZrFotzTUpJKsJhVlNKk83KKK3GZVFdRrdkWd8rMW2uaRsS7p8QelzLOWak55i0pCm1WUWavCjC0GNlYeZSbKpWwPLmz26usJdHjsNsx3GQJxGKzhyXDPnbTJ2gsDN9oRA7eqnBZrS0yzlWTQlppVOI0BXdDpBjJ4kWwgsSMBM1g1+Y0W+cxZbGwwwJKbcXeZ5tS3/KlrzL3nPB0+CLm1OmwAunnMOOtUJTtE40PS2MhWEWq0kZwbkbGmwF9uCw86x+A6RhlkN6sse7e7QjuT4GDyTABZWWi7gTrTcKXhTSrPX6minAUGOof7pf2PSmW5G+2qyNti3gvAg4dDOUngMcAZAR3vSQW6O0bdYffZzea1EBjgdhbw29r4NsIBi65d0S5Hri0W/MP/8XaAT22jwe8XqVN+uQ8IQvUW7bygcrdFLfNN55c/3m9eT86kNWIma8xdptkGh9ND09OW2EegKWnn6eHfy3YIHW9OLmnEFjLnG4C7l4fYJJQjEdyxCNZoQkcfq+sblW+wbmSeHDknRurB/2eRC6BjYAPgbB8NOolOzUAHrwfQydmg8oLlKso9x4HOgPSUgc5aleWutyk2ps2KsjaZR/N0QIEeLrwcf/ln6PJZ6A3eJwZYXk4jMwMOdHy7SHSSiZBUAn3sytlsi0MrcjbKXYlB4cu9cwv4e2l6tKF6824yT1+aiFirteU2RLFxtMGoCLYUHabxTYc5ueXzX21nWzPFhouEl1MwbcOYSPU00Ek0hnJG0lwGPA583OgsZb3PUeFzVpZ/FtydcT1XOWRvPcoTcd8VRjp/56kAHg501qm8YJmKcudJw7KjKA48eljlOStVnrNalXnrVJm7SSVZ682Ixh1FQ9/RF0b1XOkO0iU7ETcTkBiUJWXqB220oVrXpqxtbVB5mGtd4qJ9bkmWlWyXKQ08eBZ4OSfMBkL1dkropLSFtrkAM2Vf/PiNyaMNitLBgkRfkWKTaX4U5Tf85AS4317fY4nH2yU0yYQVmGgUSaBJNJSEx+OnBFK9nyQhnqDgPqKc5yMLQCc57UXggLuSoGOekJsmGwEdPJ0E6AxK+x8ZtBXvI6CzQcWZ7LtXZyMVQt+T+vaGk6Qx5ZC/5LMxACl4N65+AR2Tb1JGHnC4Hw06DOpSgScxKPR6Ce5Pt9VAJidOJqF6+ywDhlTncdRGk+1Kw+cZ+vjx8CONj0DMybFfDtEHHOJ267Udlmm8Q3hGdULJxjJ2Z+PnpMcFmsAojsso/Id9A5ycOgOe0aCzU6XZLSrJbnDTa3zTYXot4ek8HXRGezqADh+zR0VJpeo80N9x6W/sduGAxw/Y/MBw1J3ZhXAC9AMdHJ8OAvkdm/xC9TY7Nr9il51QgKeTefqFie879baVWtK+U2ScxlkIfNNZOvdyZZzm5uYIg2Nqbbw5uVGgkwwM8PPR/u7c2rGnD57uco10auQnb3B/LnIYBTqpno4FErRYkEFp9g6VZDfamp3y/NUjgQQDeDrDR02v1YpdKvyixBHA4SP3iG5H6TXQ13HYa2JaMhXI7VuODyB45vvothhMJQXyOIk2kNjwE4+c7zhErb1i+78bABFEgMMxJuiwb05Vbr3OTltoi3rOrdlnQQUEEYyXaUSxTSrPalF51o6Ui/8tyWgcQqq5RkBktFAqs5vlrm3B3WTx3OVg4erJNU5+eq3RfazOblFp1i6VZV+k0qzd4njjUkbG+WtVlLMoGb124BGpIrxaFWEiGDeoKqdOpRmbVBVuMn1UZDXLXS12r8yGPn953XF/7nQH+h6R10hbGt0uRkDdA72/+wi38dvUeGUGz0fLOJDHccojseFnwbS1dpYO+69d+73HbWp+1CwZA6pUT6cse7OdHEoUG4hF+LQ/X8dtUe3nmMeYPuObQpYHnhGgSY3EGWk8Y4/oKkONCq5jk4HJFu8mMa32tDDmRHSbeTqADsrPW6ei8HkGOuqXCCSoyFlpoFOdu1nVLCzN3KLqHAcoFay9SlwjAwT3riqUAJpAh8dsw8mR4NGeji01ODrqk07Cgw/3sdvUeGUGz8fow46SeyCj5yMjtsBZb56NP0/nwTtlO2qwRme8Q0DTcqes0KKa3bY1zcyirZpd0qRf/Pivuu6Kx3Xnr3p0xdf/JDbvLMpYq/Nn79P9dwzr+h89od/fHNP3v/pHAVog3Jrz3qBf/OhJ/e7GuN2v+e6jBl7MO58/6xLd8JP9uuOGbt1y7SH96Bv3aU5ps0VJ8e6eWwd0408P6q5fxfS9/77XpvtYP7Soeq9+/oMn9MAd0i9//JR+9v3H7R3bLqxY8Gor87afdeiWa9r0w6/dZ+A5t3S7mta9w8r7zXVduvvmPn3mI9fZ6aiA6eZlb7V8t/+iR7de264vfOwGzSvbYe/XLn6D/nBLvz2n3M//+y/NC6S+i7Z+QFd+889WHjx+6oPX2hlC0Ln63Nfp5qtbjXf4/L8v3mXvOAa8ZeO7ddNVh0R9v72+W5/96PWaXbxNgPyGJW82WVEnZX7i/VfrnPKdmpa2WPsaP2j5yIts/uczvzFZ45Vu2/Auk8tdN0Z09y1R/edHrrbRBWGKW1b8k3515UH99heduunqAxZNYuGLWY1qXPsu3fDTR/XLq+6xbzpH+iU2msydNl97tv6zfvLNP+re2wZ1xy+j+vSHrrfti9jmAlp+9oNHde+v+3XdD/+qj7zrCi2qutQ2d33TpZ81uZAP3j/23p9oYdUeMfrZ2/B+4w/9wv8H3/49zSpqtj3FXr37P4wv5IzsPvzOH5hMiKZ84yX/qau+/RfdeUNU6PA9f/d1oVe2bHrdRZ+0d8iLfJS5eObFthXHK3d+zGSGvfz0Ww/r0x/6mckaG+Xd1d95xPT365936iPv+qHJEz1c3PQhkyc8OP5+aLbH1PObLv3McfFHXejvff/wLbMJbPBdb/6afnXlAePvxqv26x9e+0XVFNRbO/v7V3/eZI3dkvefX/8Vy4dc3vKKz5mMH/mDdO33HtM73vhVsbdhdX6dlYG+abM//+Fj+pd/+EYivHqz/eYZ70hDfeQh7zve9BVd+/1H9Mg90g1XPqm3XP4ZCyShL/jnN3xJv76uXXffEjeaoI2+AFopA9p5T3/xrrd8NVkfv3nGu3H5uyVu76mDugheoW5ogBZogrZn44981Adf2OdPv/2QXrP347Yrd1XeFn36w9fYswfuPGxpXrnr30Sft3jWRfrgO76jm685qN/dFNVV3/mzXrfv/1VlLvvfbdN73vo1/eb6DpMZ79546aeMRvqtD7/re5bvtp+3GX+v3vMx294L2VDm7b/s0j239ZkM9m59n/WtC6t36WP/8iPjDzqv/NaDetNln7Z89L8feff3rc/lHbq6uPkD1kfCP++8Hq7+7sOCB57TV59o/pDrvqb3a1rauTqnYrs+8YGfmrz+cGuvte3a5f9o4dFEqn3987/T977ygO06kz9lo6pzt43ycLy3k4ZiiPkn1JlwWfZFW7nocvF8TmmjGUR40lJTIkcWUHH6aeea8c0u2WoGWJW3WZlnXGDpCbfNPusCXTDnYpG+NHuDyH/ezL1WPkYyq7hB6acttkgcmLlgzkXKZwuW/FoT5NS0BWbA0INgoS+bVfOljaZ4npOe+jnUqqZgi501Qr3Ulz9tRcKQ1xhYziisU87k5VZ/3tSVWjJ3nzW6wvRVRj/paUhsTEp9mWdA26Yk/5Tr/1NvWWi9GSZ88Zz8GBV0As7QgzyINMo4/XydO2OX0c/7+ZU7TH5s6Mh7jJz8yJ2ziOaVtwh5Ikf/Hn45SwV5wm/e1OUJg1yvvGlLVFPgjqRgZMGiLDrh6nz2ZVspeOcbzazCneI02AVV27R4bq1an+yUDksdTw0qNGW2ysIrNKeUedl1tlnfnJKdto8SdBRlrLZ8lbnIdpWVl30m3lGDAQh1wIfXV+jsJWakC6t3mN0UZ66xfMgNOaNn+HCRWJvMHmiwpEM/6MGXx518yAc58X9eebPZH+nQN/TRCLErOlP0inypB/qRG/Y8t6zJOhzqQc6U6/lDL9BPPTzHXtAjewUeD3+UT374hi74g2/+Yw9e39gNdEEn9WJXBdORhWtPpMeOaD/ezrET5AF/3j4pBz7oHLFP6KfdkA458Zz6s85cYumoj30QaV/Uf+6MPVY//5Ev8vD0Uh/1Uw750AP1wh//oZf0pOMoBvKdaP7QM3pH/8gX/nx/AT9H2wN2hn6RE3wjX/JDJ3LDvuGD8viP3LiTjnbn+y/KgT/4pz7qQW/kw05Jh1zoF6alLVTo7Autv/LlIR/0g15zJl9o+aiX9/MrW4w+5E95yJnycqcss34H+aI/3p8M/qjH65sBAnQiJwaB7DxwTtlFmpq21I62mVO8W1mnr9K80r0qTh87nD+NkToCYQRqAmUkOmmZss90G3MyWqci3lMxFTESYMRGA2IUaYDCFA8NP1EOHg6KpHzS8x+BssEh9TByBcBsVIyh5tabwVC+f0461hDRsRBhh4KJlIAORqjWoAscEGWctsTqIygC4GCkjUKswy1uNG8Gg4ReyuMOPUwpQh/18p5Olzt0Uj7lkY76eM6I0wy1yHV8eC3kJx/lUp6nl+d4UdxDZy03Q0I+lEc+DA++zXDDW+w/39igm+e8Jx0Ni/IxUORgHRbHUeRtUUmWAytAAmApzSRUcbM1rKr89Wao0MBZR6GzViln8hIVZC6wU1052bXtb4OqLlqmOWWbNbvE5WV7Fcpio1bqp3HNKKQDX228cxZ6ZbjJDAtPCHqxEy8X5I/ckJeXH/L1fMEv/NFwsQ/yIW/yQSt8oR/SYzfID76tw8h2HR96Rc7YIc+5Uw7ypxxvn+gN+8XeuEOnDQzytyafA4DI3dNBedCHHaKv4+EPvrFj+IEu6qF+6ECP2Km3L+qFX88XfGPv1gHSwAsbzJ6RH+X49kY+0iFvyrWOP2E/yBd5Ug/1Ig8AFr7IB13IBbnBP+mhh+ekQ27wj36QE/moB76oB/16uUMv6aEP/ZwM/qgfuXq+kYMNIHLq7Dn9B/Q7O3Z08d/r1bdz2rWXJwM8+IBf7Ijn2AP9GeWgH/hDH5SDfJEfd9IjP+QD/8ibO/mQN/nQA+n4z8wG+SjHl4s+vN2jZ/J7u+U/cuaOHk40f95+oBe6+G8DRgseqBeDTzamtX4ns96ABrABdMbbpimNMDcEPr9it3WMRK5hcAQUoEC3Jc4mm0KjYg7qQdF87+E/IdYozu0wukJzS3eYoAmdw9Czzlhm5TH3R3qiHBAU63a404GxopXtsBEku+xSP+WTvya/0RTIiaZT086zfHx3oh7ohn4Ewe6mKI56UTj0oGhix0kPH0yXEE+OAqmX9+SDH6YIyU/9GAJ8YiDQTXrKoz7owQBJT3nwj8G501Y5+puyOBt8iwOadGegyJUOFb4AENZFkQ76qJ/wWOrjOfIEOKiP5/DHc+qFXgwS+qwxlWx1ntK0Dco+Y63YWLUks9a8JoDCNYB6C3vGEPAO89MX2vQaYdOPP9il4uzFypt2gY10ARq+FWEwnHEO34xqAB5GfNAAbzlnU99q5U9db3QDPtCJnKEbPpmWRN/Qy3/kxN3zxx39eX6Qh+cTOZIe/kmHHniPXLCjVD2Q39sT6aADuaI3ykPe6BV6oBO9Q2fOpJWmR95TD7xBL/Ui56wzLjT+0cux8kd52CN0Uw92Q3nU59sD9fEePnnu5eXtAb69/OCHfN6uoRM+vZ0iN/Lzn/zYieePfPDNf98e0C/yIh96Q460P+jkP/n57+nydJIOeXn9QR/poc+nh28vT5/Pl0P6F4I/9OrlAd+Uj57pdygfPr09wLdvr6RDrtBH+4N/5A/fpINeb1deT/CXqj+vV/KRHr69/Cgf+dAv8By5oCf0Rj+DXOmXoJN+xMsTerz+oC9n0irLR/9BPrcT+TLTL/ScaP6g2/Pn7QH6aH/wwuwK+wAylcbO6NwBIKbSxp1eo9OggNnFO6yBESaNAIleI4R6bukuEwhTKSgGdwpDZeEPDfG8GZea4hhpY0iEWiM4yiE9oXMYptuHB4Ous/ooj/x8uCMfz2GQ/9BD/Xycgino4bsEAiYqAoXSOVI+9QEg/Ae4KAdhcCcf5UEvfEAX9GJ4LjJvi/FDfk8PfJGOjhu6kQuAR/00ZMqFXsoF+HhPOurHcMgP/bynXOilfvjgOfnha17ZbqvHbYpHnW57e9IjF+7Uw3SZA+QWW3iFvvjvdoxw7jee0IKKi20z1XOrX2mnS5r3lOM8z6wzVijnbOZYtxsIhaeco4GoDHi6DkgLamptNwJGYNDBRq1Exs0v32f/abSMxrjg1Xgr2K1ZhbvN4JAz/EEv9CH/BZUXmZ7gD73BN/yjB+iHPxoe/JHey4NyaGC8dw2W/cWo13UM2AU0+P+k8/rHHrzevb1idwyoSI/+uHs9QYdPxx29897bF3Qgj+PhD/uEb+yE8ikP/UO3twvfrry9wB/2B73oGXtYVH2x5XML7hhRY+O1SXmSDnnxHHmiI+inXvil3VIPAwnaGfLGfpEXz0kPn7RjzkTBnkmH/tATcoEP5En5Xg9M2ZKffNx5Dn/caWfk4zn1nwj+kCPyoR7ff3h7Qb/QgT6RG/LiP+nhA374n2pP/j/yID3y8vojHfqjXPgBENAT7708oYd8fONAf9jVgso9lh59I2/K8f0M+oEe0vnyodfTTbm+H+O9+0Bfr/kVe02ePt2J4s+3W+wBPuEfO0Y+0MIAlVkULqbVGIhykgAzIeN6Opy3DuEFUxllNVhi7qAWhkzm8NmrrQCmZxgtmwHmubU85E9Pu9BQDcPjA5IZfNEu13GHMb515oKhIHbIReGcMIqgQmeusYZh83+heuvoYDR3EiPbDcqbzJkjW9x8YYmbPoIe8ptA8lqMTtLznPJ5jjDIB9piAAgCA+J55unL7ZA5+EBYdHzpaYyE11p6BAtSw7+nEzkgF18uaI6BcfdyoT7kgYLIZwYcbrJyKI/3yAf6TCnTN5rcqc+mxrK2mNJwWZEHhk066Mw8baXJk3IxVMen82RsBJWP7Gttu3vSMrrCy6GBoHx2Ly5Od9N551RtMsAZiksP39thOxLwvYaRG3JiF3B3/PhOo49Og9EOIz50Cw/sGE6Uoo1saNBZDqixIy9/5ArfyIlyvTx5jpxIRz74Q67oyesduabKG36xH/JhF8iHfMgHvWHslM+oCxpttBViN3O8M44QcB0Td8qhHtIbLwa0jEyd3gFOyoG+4+UP+igHHdAOqI/6kQvP4YPGi53z3ssPeRlgJNLxnPfIyzrMPLeZoqef9MgDuuHLtxvkQD7qgT/oQR+kg0+vH9o79JAP+VKPb8fWkWY1JNsj5cEP9ZCfdOiHfNDp+eP5iebP0+HtxeuZeqEH+mh33OGf57Qv9O/tBz6gG/vzdubL8+2d99gZ9uf7SeSFnMjHMeL0P5Tv7R3+eU560kGPa7d1Zp/0DzyHLvRIevSC/Lx+KI96yef7E+68Pxn80T8iH9oXckMu9G9WP23dFo83uPYVbrL+1DAi3GSyMvtICZfmf9rMgl0GMHmTXQFsoY5C5pZcZM855wODvHDu6w04KsPbTJBuq/VdNno2wU93HRYhnfznXBAUNCN/pymcfKbQjAYjmEWHEM5W7Xx84j0NgtE4+cnHB6pzyi42RqGP8uaXX6KFlZfYNu/8h37K4SwSn585RujDYJhu4k65JrDC3dYBMkonfdH0OuMPfu1j++RN1vDcsQzIZpflhz7qgW7og08MnnowDOiFP+iwji3DATdTXsgPeVA+5UAH6ekofD6OeoBO+CMyjHo9f9DJe9LDx9ySPXYHlPBCGFlxpASH580putg8EEa5zEUzGoPuc0ovM0DCRS4NXygNSIf7pI4npaqCVQJ0ABXooxzWWnEcBf8ZTDDlACjRMNAFh/WVZrC9vgMc5Izc4RP+kQ9085/6kTPyQc6LZ1xuNNIgvf6QIwYOfxgmnhZyo37y2cmZRbu0oOLSpL2QzssbO0Eu6A1g5jn/jZ90BgOug6B8yuVOudRLIyE9dFIvHS/8oNcXgj/k4O2dcpEP9VLfvNJ9ZhfQg36xS+7wQTuEHtoH6fxzA5TMrTZAS+WL99SDfcE39VAu9SM/7NbXTzrfnkiPTpEH75EHesMecydtTD7Hfn1+6CQd9gsf1Ef9vKc+r88TzaSt88QAACAASURBVJ/n07cn+PL2Ar/YG3oMn7XenvOffgW58DzVTpktmF20x+SH/OHDt3/SIU/PL/xhR0wzI5fFM16VtD/kynvfr2Gn1Md/yvd6MsBMr7f/yB95kw+6kBtyhQ7qhQ7uXm/YK/SdaP58/7io6hVmb9ijDSwy6KO3W5vnP23ZyWe79YvoAPuA96OvNAp1CnMNfGHlZaYgOj8aHMZFRfxG8E5JjnnXoPFOLjHjxLApi/M/MD7ypT5zBoKy661cBIoS+E+5CNzXQ35o8+9QAoZDOspHCZRNo+Ad/2kodPLOoBjtcvYISqOj2WV5+Y/inPJcZwaP/nwfyj63+nJrRBgC9TlAYLTuaCKvlws8wjN1Qb+XiZcZd2RKGc6wHHBRFnm4qAMayQvPpOM5NPObcqnPy8fLBKNkSgFPhMO+yjK3q3Bqg+vgE3P5Nq1RsEsFU7YakOAiF2Qsdut0hqUnH+5XQbqLtOMd8gF0ZhVcLM56gR5GVnw7wK2mgZGGQ8Q4UIwzX6ALuaE7+IAf8kE/z3kG/ciN/7yDV2RAWp57vXgbQW4883KCZ9Jx8Rv58A7Ze1lTLv8BIN5hG5RHWmjgGXownhLy9jTTeZDGdToMhPZaWso7Hv6gF/o8n9BDmdypG17gExr9RVrqhF7ulAHd3KELOuGB9PwnPxf18Iw8pEHWXv48452XK+nJyzMvD/6n1pVKAzSR1l/k97RTJvyQl4vfvh6fxufj/kLyB5+Ux0WdvmxkzDtsADliEzyDV+ROWv7zDpqg09PPb57xzsuVsjyPPCMtdZGG55SLrHmOrXv50wZ4R1qeUTfpkC31o1evB2yPNN4WSUvZPKMM0lIf/6GPdyeaP+inLvohaPD0wA/P8dAAUTewqbeBjBu8MHCC7zFAxx+6FtxpKBPtIsjCT0NtEye42gUfdg6L25uLhlCa0WJTbMwXl2Qvc6AzKB18LLENDoECzFnTOWU020mweDs0TldW4jwXW7SYUlc2gQcTTW4BvYHOAhs4fhsA5J8OKs/2LO34Kw6U9+LJkBBbwIBRrgMC25Io233XICqK6bAXBnQSe4AFoBOAbDDICGzAbOCYQcfNY9uINrFPWvB7osiEKLnENkUAje2Bx3ZERC+5tQNjgk5oqfN0hp6Pp5MKOiMnXjrAnSjyCugM2nZgAy+kDTybVzPW+zRHgJt35kNW8B+jnCBysF1c8Wb43gbo+M1WE6BjB+q5yB2mzNgnjxj7kqNBJ3e5HQpm6yqebXrNvKpU0JlA8pooeg3oDPohcwBe+v3QWKDybM/SLIHfBTS4uznKCSMHtjByi9hssMD3FfvG4la2u3fuY14q6JSGl6R4OolD3MIuOs19cGYDUj7yJwDFvg8lvukY6NAY3LRqYD8uZDiQQyAHZhVOKTs4hu85yCfNBIWwgmsCyuAo0EnOtaeCjgsm8KBjq6jHAp2cNbauh4g41uiMBh23Sh4vyb4h2Uj8KNAJ7GcC2k/Q7oN+7zht4BiAJy35TcB/GwjuI99JXvKySDkq3Na2+KCOsUEHzyUJOsMyb+fgY/646lTQIRzTfRsaiVwDcBKg46fYCFE1sEl8V3rJyyugM2jvgQ28cDYw+pwc8/KeAwiljXQqfChOREIF95Qw4Ze2XNjuxqLUCB5IHp7ndt9m91u3OV+9hUE/d9BhrcWzgQ4A56ffArsJ2tFLu50E+jkR+mGR/DGETJ9Sc5CMyhHSy+Y+Mr3mAgl2uQg2FsrxrSdnfQrosE5nu4FpMpDA1uk4T6cstNY8GRZ/laQ32fQa322cvLyX46bqeOa+HbmdB14+8ny52UfAz8urvb/E9HkMgIM+XCDBMWY2hQZ5jwntXxDZhVJABy8nc7fKs9h5gTNFPOi4PZ1KM3aoLJPV+41HLQ4dVHkO54cAOm7F8dig4wIJHMA0OdDJ9qvBn/9o5wXhP7C9F8/2AtkHsj9GGwhA5xgF95LoNAPQCRr+RLbfgPZT0n4D0JnIhh+AzinZaF8SA56J3G4C2l/UdhOAzkQ2wAB0XtTGE3T+wbRqYAPP3wYC0AlAJ/imM5FtIKA9GHhMMBsIQGeCKWzUyCrwdIIOZyLbb0D7KWm/AehMZMMPQOeUbLSjBh4T2X4D2k9J+w1AZyIbfgA6p2SjDUDn+X9HCGT20pFZADoB6ATfdCayDQS0BwOPCWYDAehMMIWNGrEFnk7Q4Uxk+w1oPyXtNwCdiWz4Aeicko121MBjIttvQPspab8B6Exkww9A55RstAHovHS+TwS6eP66CEAnAJ3gm85EtoGA9mDgMcFsIACdCaawUSOrwNMJOpyJbL8B7aek/QagM5ENP1SfPAPJHW3AGTjuYDV3ls5Ge1+e3aTS9J1ul+lwnZ75aIMmlWQ02Hk6I0cbUA/ncTQkj4UYOa666ZRsOKPAfyLbUEB7YL8n2QYC0DnJAn9hO6smlYV2uivcrLKcLSrLXa+y3LXuzv/QNpVm7VZxxm6VZu9QeW6tinMWS4clDUsjJ4euVmXuBneyaNZmVYabEoe0cVBbswCuJO0Gdg12TELy2YSW4/Oflw74DmQW2MCx2UAAOhO6s2xWWfZFKsveo7Jwo8pyN6osb5XK8lYkQKdOZdm7VZq5T8Xpe1Wa3aLyvE0qzj13bNDJW6fK3FqVZm0y0AFoRjyaFOBJelh1I0A0oeV4bI0n6HQCuQU28PxtIACdCd1ZAjp7DFhGg84qBzrhhhHQMU/HgU5RzqIk6Bx4dMgCCcpz1hzl6XAqKF7ONjuwzabtvLcTgE4AthO63Tz/jjIAlxdOZgHoTOjGA+jsPgp01qgsb43zejzoZO216bWSrG32vCh8noHOkSHpwKOHVZ6z2p0cmrNRlTn1Ks3aIo6tNtDJ3v7MoOOP/p7QcnzhGlTQOQWyDGzgmW0gAJ0J3VmmftNpUFnO5sT3nLXuN1NuTK8lQadpBHSGpCOD0oFHJLycstB6O+K6MmerSjPrLZCgPKtFFdk7A9CZ0DbyzB1A0EEG8jnZNhCAzoTuUJrsOw3fakpD9SoNb1Jpzjp3hWtVGtpqQQYukGCHSrL57rNe5umkgk54nQOd8BYBOj56bRToZBEZlwgmSJ1eCzydYKptQrehAHQC0AkM+Hl0Yh50mhOgU+uAB/AJb1FpCE9ou0WtFWc2qyS7wTwhDzqH+xOeTgA6z0PmQSd1sjupoL6Xl80Fns6EBjlAB8BpSoDOFpXi4Rjg1KeATrOKs7aqJLTZvvcUhhdKQ1ISdGx6bW1ieq1eJRl1KdNrR3/Tcd5Oct3OhJbfy6sxB51zoM+JYAMB6EzwTrOM9Tl8uyFogCunLvGbZ80qzmzUOZUXq6qgUeV5G1SWv0xFOQvUdqDX1ukcepxvOitVkbtGU9PO0QWzL7UFoGVZjZpVuEdMsdnFolObXgtAZyI07IDGAIBeqjYQgM6EB50UwDHg4b8DHECnMGOLMs5cpprieuVMX6ya0pVaNG+dhvulWLf014dimlG8SjVFa1SVv04VOetVkrlR1XlNykhbZSHTLlwa0PHAk2jQwfecYFpugrefl2rH/HKmKwCdCd5okh5OEnDwfPzVqOrCJpWE12t2RZ0Kss9TddkFKimYZSHT0e4hHXqiV8Wh+SrMWqi5FRtUlLlMuVOWaWHVHgsqYDeCMReITnC5vZwbdcBb4OW8lG0gAJ2J3HlaxNqWxJRawrsJbbOtbwAeItrCU1ZoTkWDctIXKHt6tarKFqq4oFqdbREDnkceeEozy89VZeEihafOUml4iSrz1qo6v1aF09fbVjcj2+B4T4cptkQk20SWX0B74KkFNnDSbSAAnYlsdAnQIXAgOaUW2p6IWHOg86mP/lRtf5M6DkidrUOK9Qzrwfv+osNDRzQ8eNiA5967HlSkbUiP3NeqaKvUvV967b6PaFZxo8pDWxJG6YHGAY/zgPgdjCoDGQQ2ENjAc7eBAHQmcqd5NOiYl8MGoIRJN1sUW/26t+jgE1J/VOrpHNBA72HpiHR4aNjuGnbAw+affd3SkV6p+4C0fOEelWavU3moNrmztPNu/HSb3wj0uRtb0DADWQU2ENhAADoTHHQIg056OknQ2WmgUxKq1cyyWv3+N/stUo2dpYcGhnV4eFAWM21x04PS8JB6e/qkQelwXPr2V69XVcEqzS7dMibo+MACN+0WNKKgIw1sILCB524DaeVEIAXXBJXBFpWENtqCUBcqPRK1xm4EAFJB5nJ99F+/pqFeaahfNp0G4AwORjQ8HNHwQESHB3rt+WBMGoxIr7v0vRbNVjB9ua3dqQjXjcgnCdKJbzqB7YzIJpBFIItT3QaS/cP4IJQ2s2ircqesUHV+ncrDm+wqC220zsY/Y7t7ftu299kbVJq9QVV5W2zO/5zynQqfvUJl2Zs1v2KXhdsWpa9XTf5WcZ9R0Ghb5RdMW6vijA2qzmvQrKJmVeXWW1rKPKdiu4oz1yln8nIrk7I9TTUF9SqYvtouftt3hvAmSw+9FTmb3aLG3FrNKGzQ7JImwRPvCtPXJPkqyVpvdMMHZZCWuknn+aNeLwN+k2ZeeYvCk5YJmUAn5RRl8KG9zu7QhDygER58+eQnLff5lTuMRtJQJjTnT1tlv309/j9p5pQ2Gx2URb2kWVSz2/iBdurjXpi+SuV561RVtEG505dqZukWFWSu1MzSehVlr1VlQa1tezN/xlb1tMo8GTb5PHJ4QP0DbZJ6NHy4S8PDMfu+w15sh/52WKW5yzWjpFaFWWtUXdig7EnL7J6fvsZAbn71HuVOW6vy3HrT8dwyghc2Gk/IH3miP3g9Pv7Y+brW9InMKRudIh/qQz7oy+sGuSBr/y5v6kqT1cLqXaYf3kErzxdU7TR6oQ89oTuvL+yH5+iJekmLbVKXby/wRj1c2AB5sRVoIA3/oY9yKMOngQbKIT9pve3BFzolLfkplzs0kA66KdPbIOViM6SDP/J7/pB9Kn+8gz/Kpjzq8XZOGdgW/EEvdYbOvtDy+/ZGHbyjXN6TF11wh37SYbe8g0ZvD9TFRX5PP2lISx7yppbFO9/uyQOdPPMy5M5z6sAG4If//EanlMfFb28jpOE3eUibWhZlUwfPTxR/1Ifs0beXje8n0Ct1887Llv/0N+gSvXk9QSeyhB/y846y0R82Ac/YH7+R77kzmB7fMMrGve2gC95RHjKgTu6UTT2UjTyQJTSjK9L4/Pw3HvIaNLOwSVPTFqsqt0FzSrYrf+p62zAYR2ZmYcvIICQFjNLIfE7FNjOygukrrfJbf3ZQP/vBo7rntriu/9ETes3efzfiZxbV61//6Zu644ZO/eHWXl393Ye1r+n9xvzs4m16x5u+opuvbtXjf5Ku+e6jetNln1borOVavuByvffv/083XPmk7r/9iH78f/frVbs+njSA97z1q/rtLzp1/x2D+tE37tNl2z9odMwu2apPfOCnuuHKv+qJ+6Rrv/+wdte/1zrbZfNfoXf/3X/rpqsPGD0+X/ZZS+39e9/2dT32R+mXP/mb5X/nm/9b4UlLTSEfePs3devPWnX9jx6z96+96GOmtBmFdcbfnb/q0j239ema7/05wZ9rqO9405d08zUHjT94f+Oln7AGumz+paK+X/7kCd1/x7B+/H9/0it3/Zuq8jabct755i9bfX/8TZ9+8LV7dWnLBxId8mZ94eO/0HVXPGryvOnqp9Sy6Z1GJ0b30fd8T7++rl2/vq7V8m1d+7aEsW3Wx/7lRyavG6/9q6654gFdvOM9ypy0QCU5K/Tlz/5Cd9zUqWuveETX/PAvunzPh/Xzn9yvnjZpeEC20+fhI90aPgzwRCX1q6sjor6otP+xI/rdbS5v86Z3GuAsOecyffbjP9eN1+7X7Td26Tv/83ttXf8O1RQ1Wefz4Xd9R3fdGDF6fvi/f9S+pveZURdnrjl+/q7ab3q/ZNsHlXnGYiv3v//zJt35qx57ft0Vj2vLyrdoctoCzSlt1Jc+eaN+c32b/vTbQd3280PCTkJnL7GO5wsfv063XHtI6IH76vNeZQ2KSL2f//AxsxPsAvunU0V/NLD//fxtuuOGbuMPvVMfnURRxmpd+/1HdO+ve628m6/ZryVzL0nq75v/dbvpHTrQ46KandaYkcvXPvdra1+3/7LL6l256FVWHh0OdgQd2BF0bV7+Jk1Km6+5ZU368qduMnu499f9Rif8Ydd0rPBHvlT+6JhmFTfoqu/8WTde9aRu+3mb1efAzwHkt798p+6+Ja6HfnfY7Ax7zjzjAisX+f7590f0zf+6UwceltlfzuQLrWOkvF/8+K/Cbh+487DOSJtl/Qj9BHTQbmnzyJNObUHVdp2RNkcP3Dlk7fYXP35cN1613+oB9BhEUQdy+/PvZe0CAMybutw60Su/db+16btu7La+h/ZKp8i3R+SL3n9zfYfJh061JGuttbOrv/uQ2Sf9wZXfelDnzthl7R27ONH8fetLd5ldAgQLq3cIfft+gv4F0ECeW1a+VdBJ/0H/8/Uv/Fb0f4DO/MoWkyN2hLyu+s6DBg7Y55rFrzX5//E3A2bvn//Y9VYf8kSP6AH7RD5f/tSvDPwnpc3T5uVv0c9/+Ij1xeiHfhI7AnhWLrrc6rv9lx3Czr72uVsdyOTX2gAHOrAj2tC133tMc0u3a27pDru++5V7LfioJLNWXMlZtFTQyThtiSEangkoB0hgSHgpICGeCaiXeseQK4iYyt5sSFc4fZ1mFjbb2o7SrM3Km7JaxRkblTt5lRHAoWBl2bXm3ZAO74eoKPLh8YDQfLBGMdQDqpKX+vGODPFDtUYfyIoiyEfDAeygmzsNDwEYEmdssIZBesqBVujmP+mhgXT+Tnm8p37ulTluFE166ufOiBJeGFVAFx0P5WLgdvAZ3laB86J4biO6/K02QqB8FMqdhmKywWPLcCMO8tPRcffyZgRKekZCeJHQhWcJ3Tyng2LRZ1nuemVPXmxeT2XBBs2vaTEvhw1AF9Tstg5r3/Z363CfOy10cKBPfX3O0+nvb9fgQNxNuw1KSxe1qCh7ZWIPt1oVZW1U5llLVcVoPrRRlfl1Oqdqt/LT11uoNvQiT/jA0JGb1xsjquPlb05Ji5WHN41c0AOjOOTMSGxe2Q57jvfs/2MfyB/9QA92gry4s/B1etp5Zi/o1bzygnrTB3KlPvSJPaE/3psnMd15Qtg9cqd+G9Fnb3Yd8aSVWjzrIqsH/rHt82fvM68eurBL9I/+sG/yUz70oFdmAswbyHQjSupBruTjPXxTjs+P3JG358vfj+aP/8gDPigHOXn7gh7qpZ6paee5gWWifZ03Y1/SbuGHdNgt5WGnyIl2i3wpz7c7/sMX7RmggW7qJz/008/AJ+3M64t2i7x9+eSHf+glPeXwn3aAHWScfr7Vz3ueU7/nk3oph/KhG7lQPv2c1xvPocu3oxPJ3/mzLjH7QC7wy3/0hj6wg0XVe03+9LvICb2iF9q7T+f1hZ7gh/eeT8rheXraBWa32AdysXV2ubXKmeS8fdoDngp0IE/KQQ/YA/yTD7mhR+TPe/6jN+Tu81Mf+ZA7Msw8fanRPKOgSelpS3TvbUM6b8bF5vHMK9s9NujMr9htmckAI4DDg3fKQAPDKsl0DRgwocKCaeucAec2aHZxi2ryG5U/lakzAGqruVa4WueU7zF3qyy7zs5oqclnSq3B3gNI/jlrQaiTMorSN2he2U5DTJ4BTLhslMs5L9TFb/I7OjbYnTLIy3PoJR91IQjy505erawzliWf501ZY/mgnzzwRn3kpSyeUd+som1J/njn+MOQ4W+XlU1eT5vjD6B0ZfLcl0U66GJEQL18JyE9aTyP8ACtvCN9+OyVRjdlwAvPoA/eyYvSqwsb7QhqwGBWWaNyp6/QpLSFyjxrmeaU71AhnU5urRbP3aXHH+p3AQVHpFis1abW8HKIZItHDuvuOx5XzvRzVZG/UQUZDlRmlrSoOHuTZhQ32zcifrOlTnjyWhVmbDa64AmakRF0wmfOpFWm0+Pjr9kGM5THhdzgfXLauco8/UKTJfWgO+rhN+/REzQhI9Ix0OEdZZxbs8+e8w6aodfbDLJN5QF7Iw13nlMOdWE//E7VheebOrydUDf0YNvcyYOOyU9558+61OzdOsyQG+hAFx0kaQAm7rRL6KBj4j8dAu2EjtXx12R2QzrHH4MS+Nti/NFhYDu0X+7W4Vt7ovNpMODG1hdUcqz5RutIoJUBA7TT8S+s2msdEPzbwChvqwGAa0MblH3mcuvAyE9HhR3TkdFOGGDBP+VRT/aZy6wegAYbhx7ogk7S0WG6fgVeeQdo0tbXJuxte0Jnro8gP/KEX2eH7Kix1eSGPpAv9mBAn+i/qA85nmj+0A96wT6tI8/cZAALnQxMfP3YJ3w6G3PtCoBADtydTTk5kA49wA/l+vcMFHgO0CBn7Ih8DMyQI85A6KwVJifkwn/slnvGaUuNPsqDXuRJvsUzLzG9YXfYj7P7daYn6EdX4AT1oKOffusvdmeaDUwY09PJPnOlFlTuUVVuozG4oPIiXX/Fk7Yanbk6DvSCMEDEAVSLGUjBNAxyvaUjTXUuqLndFhPyn00j86ass+dUnjtpvf0vywIMtmlW4U67k3Z2MR0xHVeDEZo7mUa6wd5DdFE6YLDRGAC8yM82LVz+N/QnN6EM1RuoAWzUTR4u3D2eQSfzjbzjGe/mV+wVvPMOmqgf+u18GfjLa0rSaPxl0vnQwdFp1Qqa+U/5lE050Ab93KmDd/BBWmTHf95lnbHCLuqaW7rL0kAbMmG0QF7ykZ7fpKHc8NmrlTdtk2oKd2hO2W4VZW0SEWs1Rc2aVbJTxVnsNF1v4JNx1nl69z9+TgMxItiOaIAYasU1PNSnw0MumO1Nr3m/sqcsMu+oPGerirPqVV2wXUXILadOedPXqSB9kyrzttmuB7NK9irzNLw+OgyA0h2NAB/QubBq33HzB9/IGHl6dx2Z+f+8o37qXDLnlaZD8mDX2MPimZeZLqGNPOlpF5qezptxqemBZ9PTlurcmktMJ+Q9f9YrlHHaMks/Ne0C0wH5eUd67tBC2XNKdpq+SY+usAHuvMemoAvbhLZpaUusHSGrRdUXm84pi3aVM2mNgWrm6cu1YsFrLB8NHzlypxxAinYI2IXOWmUdieNvndVHR+P4WyPH3+oEbUtNF3TotCWACZtzwMKov9ny0UFSPv9JR8cMv3QoyGhR9UUmI+rBfgEiZMz7Zee8xgAEmwVwkAt0wp8b+K01/rDZZee8ynRKx4uN0PFSD3fkMj3tgoTOtyVsqt7KgW7aJQPkC+e92jo+9E9HCt0AHrYA0CEf6CL9hfNeafzBP3qGLuR5MvjDBuiM6Vugk/u0tPONXwCb99CNvfAf+4FObMgNKFps4AndDng3mNyxN/REevTCHeCg78FekDseB7ZHvfxHLtg2wEJ9bjDlAHHFgtdZfegPAIQu5ISdoGdslvzY8NK5l1u5lANgMaiAdnDiT785bIM73ybHBJ3Caa5TLpruOk7u2zf9i4EHFVeEGk1x2WeAkqtUnA4IbdO80r06p+wiASLkKZi6SXmTN9hFmqqcZgOcynBT8ndNXou4eEY+0vnfRdPr7PCwGfk7Nbtol7hTRmnmVqOhOne7pS1Or1f+lI0iPWGKPmyXDSo5fIz0POfkS4CQOwDIc/5Tjk9H+eRHBtlnrBU8Fk7bYjTOK92nBRUXWz7HX22Cv00JukfKd7xut3w1eTuMbuiBP+qjfsqlXt57/jydPIdfZFKV06LwWWsTZ9q4BZk8z5200eQA/wA26SirMneXcqcwpVGvgvQ6zSrZbefolOdsU1Fmgwoz6jW7dJfmVe7Qjoa3WaR0X+zwSNj0Ebd+59CTg5pRtkbVRZs0s2SbwpPX2y7V7E4N+FTlN6syr0lV+S2qKdylihzc+N06p+xi4wc5o8uZBbtMzvCLPHl+7Py1mFzQN+WhB/TibKrFyp1TvNf0zT5x2CByQh/UO7dkj/KnbDZ7QVbzy/dpVuFuzchHVzusPN5jx3mTN9kdfqhvfvklVt55Na+0fMgdPVD/zALy1ikjbUXS/rCvhZWX2E4N2FPozHVGh6cbfUEfdy8Pa1tT6dydbRigTXMDIWyHjhtboWPPOn2VdRy0MTpaeOQ9tAIAtCvaqx/MwaMN1qY54PDlzSl2gEg63kODDdhCjJq3mmz4D6/QxZ2OA5lzh3faPzzQ8UEHHR700THx3gaQIabQ610bDTcYH3SIlEdHBh/UgwypB3p5b20ix7VhG8hk4iE0Wj0MtqiPDhP6KZ82TX3w5+ujw0NuyInykCttkHKwCV/eyeAPeuk30T30WZ+SkCt6DZ2J1+v4gT/kil6gE31i855+9I3+4A/+ARLrY/Jod07u2DJ8W388ZZ2lh1/khJ6wTeSP3pGbL8/6KAZVhTuT9ghgW5tO6I1yzIZzG60c3gPseFqADqDHfVLaIhtMMLg2nSTsAF1wpc0u2mOM0UhpNHNLLlLO2euUnrbSDJlOGYPgOYKgc4TA8FnrbUPIgqm11rHS+OlwKIcOl3w0bNLRQHnOM36Tx4HA9uTvqpztli/n7I2WpyRjq+Vn3y/yVoZbLL//T3oaMP+pj/QsXuS5q6feysmbvNmeV+fusHSlmY0JGpusQyI/PM0t2Sd4IB00hs/aoMzTVit/Sq3VM7tor6UjvQM2QHW7Qmd6/hx9RdMBxc1GD/QVpzcYPeQnPfzRIbHWhQ6wYKoDpvnllyrr9DUqy2oyvvlPB0r98AN/0Md/+OVeGeIbxU47HXRu2eUqTOc71W4DmvIwUXebVJLVpOyz1yo0eaWKwyv1syvv0mAvwQSETx9Rf3zYThC96orfKj/zfBVkrjZPqSK3RQAXB7/ZYXDhBpXgqWUQqVSv/GlEuTQr6/S1BgjoB7rgL+fsDaYXOujj4i+8zeRHuejH2doOqw/5kn0L/wAAIABJREFUIcfpaStNLjMLAJOdyp20SQsqLjV7gQ70z3/0PiXtQpMvQJCetkoACnQ7QHH2QHoOsUOP6K9wWp2mp60wOtAf/JJ+UdXlZu/QQfqavJ1GD3fscE4xA7JGe469QOeyeW8wOrBn/tP2fPvjjtd4/sxXW0NH1+lpyzU1bZl1DPynQ8C2FlVdarTRYfHfD46mpi21uumoAOHzZ77S/tNxePDjDlBhU3Q88MTgAFukA8Lm6Zho77yHNzpOeAa00QHP+U8HR/um/KVzXucAI7wtOdgAYJAB7xnUcUc2S+e8xtoggytAno4TeujQAHv+I3s/WPP/kQeyY8CLTqCHfAAe8oFu+IMP2hJ0Ut70tGXJwQr9mueH9CeaP3QPnfQp9J/QDX3QiTzQO/qjrcMfeoA+9IA8vJ7QM/RSDnfkQAeOvcAfzykPGwe4FlZeZsDuBz+Lql5hevd6Rl7YC/KAHsrDXqgXwKY87t4O0Qf6xIapz/XffLPdaNOFeJ94OkzvrT739QaMgOKYoENlGJBTugMDDIMOg4J5juGgHC6eQQDEY5iuo3enSvp3GDJ5ECwdAXXwn7IAIp7xm3rp5OlQuegAaLQz8ndZp0UHQqdFh0Hj5k4nzp3OH4BxncN2OVDYZZ0Kz0hPOZRJHQ7Umqwe3vGf55RFR8/d/yYPnRgdB8/4z526+U1e6IU26HVA5Mrx5VIHHRY0QA+/SUedlM17OjCe0VH7MueVXmz/qYe8pOdOfujxdVJmTS6BBherOIOpykuVN5Wgg4uUB10Fe80LwiMBiCrzt6qmeLPe8Mr323cdC51mU4IhKdIhNW95veZW1Sl32iqbTgNwAC47bTTcKKbbuOzsnmw+7u9WVd5ezSpwMoI+ZIQ8OBIB+tHfcfGXh0eHLblOHEDwsqAO3iFLZMQ7B0xs20MHtsX+YxfIF3pISx5kTJ7sM9YlwGSH5YF2nqNn0sEP/8lHfnTOf3jydglt0ERaaCINdXI3HSV4IA0DGfJy+TS+jdCmaE90PrQjLjpkBnK0Fzp73nOnPbnOda91Cs67a7EOzHl0l1geAMG3ScqnHNombZYyuNMOufPcp6UM6uE59fOcvHR80MB/0tAx+b7A8+HT+Pbu+w/y8Y5ySOs61xYrg7IoMzUN/53X7AaX9BV00L5v8Wmh0QEObXGHXV42vEulh7yUQVnQzf8TzR91wQs0codvZEO90Md75IhceMZ/3pOOizReV9BLOmjnojwGVjwHXHnn5YrnDq9eNpSRakO+PmyMcriwI+RK2bxHjvT5lOPpwKa8Xpx36r79ualWPqmssWk5vDc84jFBxzcUGgQNjcbwnx+6yRomDco3kvHvbooK4xrrciOJxFRQwtARAFeqYY6VN3g2tkyTcsnerrLsPXaVZu0aObo6m3Usbg+2moJ9BkILay5ReOpSVZesUsfBI+qLuYWiQ33SQ39qU2XRChWHVlmwQHlOkx365kGHE0ftOGy23Qm5g+PKQkRhOcBN0jOODQTvx9cjAMlF+6L9AWpcPAPIxm93DryC94EcXjwbaLbvPAQuuMCENbrpqlb7VoSXw3TcmKADsDBK9SNURoXX/7DtORq8GyWBguNddDgemX3nAzLzDNBxLjhueHA9fxkQ0caCXMIdWbja5K7sRruXZreoJGubHeRGsEF+xipVF6/VN756tXk7rMth65sPv+9LtiA0P2OFBQzwDcd5OO6Ia6bVCFCwK5tpNsonxNUNHJ4/3YGuncwabUTpQQbvCC+LNskzBoQvXocSdOaB7J/NBlw0JlG0BBkQMELkM9NsfHdKAs7R33To+HHFcKtwo5gLfvQPMtfKg8Qz3R3YuA9E/kNR6p15WS7mmd0cqgsAoNExV8vHy+A6RhkwRx5mhTsX32CaUi43PZY/jfUK24W3Usa6haJ1qtvwCgOd3ojU2yMtnF2rysK1Kgmv1QzWS2Q3qCp/R6KsrSrPrbOwa0Kv2YWAesoSRygwjxzo7xj1l8MUXJ0NvvD8GYjRnpgdYJqF+zO1veDd+B5kIJuTIRt2m9lsodVE5OHtXPu9xy16juAWAhWSwJO6ONTN3W024yYCBkO/7gcHEg3AfdPhg5xT4tPvBjAeyca4+2gXiyTLcZFo1kmFmywqB0KJCw+uY5EBIdGJ7ywhFm/ijbgNQN10WJN5OXhARZl1qsirV0nOKuVlLtDtt/7ZvJxfXPMHZUyarYqCdarI32yHvnHaaEVus4VHl4ZrVZqzwS4Og2OB6IjHQ2RToL9jt10XkceHX0CH9kd75M5sAIA+XrsLntOpPr0/CuRycuXC+p+sMy608GzWjv3+pj7rEwiFHzd6zQw73GBeCKjEx6Edm99nIXaEFD4bqBD2R7z9eHfC9Ig7x90CFS1M0XYnaLQ4c1bpBtexy8A8D7wPvJC8TSMX3knOVs0uvci+zzC9xgJPItiKcy7Q+979aQufft3l71VF4QoDoxklDebJAFjVBdtUkLEhUd462+2Afd7K8zaYx+RPLCVcMtDfMerPQk13mKfILABrdYgcZZTIzIBNUTBCHGMw92ztMngfyO1k2A2LbvOnrrFwaRbgs+aMBb+GJeMFEgAKRBkQEw9wkJgFanwIMtDB4Me93ApgvxJ4rDtx28z5Mc/nV9CyQpiFTqxQZt+k0uw1wXVMMljnPI/wehWH1lgggIEKAQHmlWy29TWsuQGA8IJypl+gquKVWnLuFt124wMqzlmseTWblT3lPAMYNvJkkWlFXoPy2MSUcgGq8HJ3hVapOLTOdilgd4JAf8djuyzW3GDrNGh7LARlLQYLA1nsyH38tvdM7TJ4F8jtZNiA276JHStYgEsfz4JSFqcSRDAKP1Kn10iEi8TFClO8EVYME3/tpk7cFiTj/TagYTdZtqAY485WCewh5PcaY68f9gqyrTjMy9mgyrx1wXWMMsDzMA8kf63K8lbZVZ6/1jwSvB83FVZna2yYGiNCrbJopcoKztPrX/VuZU+do9mVG1SQtUzVRbUqzMSTqbPNPJlWK8tNlJu/TGV5K1SWtyZZNuVX5gb6O2b7zQVw3JZLrA53W6OstxXhrOpnhmC8dhc8f+Z+KZDPyZEPWxfRp7vdEtaZp+N3TmBRaxL8U0EHTwRkQkmADmj1yQ9ca1NfbtoMDyj1Ono6LfVdAnhyOBphvXVY5Tmr7eP1isX79OZXf1z/88UbdMv1f9V9v4vpL38a0IP3RPTgPd368z3dwf0Y5PDQvVGT24P3dmrUdU9ED93Tqz/eGdOTf5Hu+12vHv5jvx59oF/33rVfD9/fqgf++KQefahL9919SH+5L6bHHxrSA3f36sE/9OmR+4b14D1RPfCHLj1wT4ceuKdND97b7upCZ3+I66F7Y4H+jstuI3ri/iO64+Y2ff2Lv9LrX/FRLT2HHSXYpJGjDtz+aSPT1/z37c9tdjnSNv3z4D4iLzd7E/w/cXJg/zz2eANHWBj6yx/vt10KAJxREWypoMPeOWzsBtgAPGS84ScHzDXig38SqfwUW+ocM1tMJBbEEXnDtgjsSrxgxnblTF+skvylWjy/Vp//zDfV0TokDqxsa+1XR/ugeuNSPCZFe4btigX3Y5KDl180MqhRl8nziKI9R1/DiXT9ikb8NZjUw+j0TjdPL5fnlJt43zOsQH/HZsesk2pv7VOki2NbpQN/69EXPvMNrVuxyzZfJcx9TvlOi3Ij2IBV/HywnVlAFKifjXDbzVhbTW2fwbegZ/0mHXz7Os5vX4lAIr7tMFt2408PmRPDJxv/uSZplwngSeNDMFNjfjoNN/8Ptwy4/X7GOQ/BFGUFNCn37FrNLrzEVmCznQPTM0zhVBSv0L+8+9Pa/2REschhDQ5JvX0DivT2KT4wqJ54vw51dCoajQZXIINT1gYOHeKICWloaEAHDjyljo429cYH9dtb/6gP/D9f1OyKOk1OO08XzHqVLWtgqQEDxOJ09rVLgE6iMY+0y/GXMARpAtm8YDZAwFHeVk1LW2zOCtNqd9/cb7/ZkYAr6bSk2GgaH4H47sIUm9tGu15XfvNh28mYjefGyjRCdJMqsrerYEq9zq2+3HYznVPRrOri9fr0f3xXTz7RqyPDUldnr/YfaFNnV4+iff3qicfUHYsr1j9wynY2AdgGg41oNK72tm719w+qr69P7e2tikS6DYQ0LLU+NaRPfPQ79q2NgSEfZt32ImzbtF5zit0mnCPtMehQA1mcRBsI1dsO15ypw47WfIf8+Q/+anEBBMTYBq6pM2Te02GNAR8zARx/9sy+xg89Y/TBiGLZsG6vLW5j/o7Dhgqzl+u/v/AztR1wB4a1HYqpoz2ieG+/7THZNzSoQx3tdkV64wHoBF7OKWwDcfX1DquzI6ru7m4NDvYrGuvS3/72hA4daNfhASnSLn30fd9QSdZqC01NBv6E6zWnKHEccMoocqRtnsTOJ6jfTeOdanIIuTOsCArjOAS8Hr7tEIzGDuRERI/ltKQBOoyiCCIgE2iFm8R83HiZUg2bxViMvjigqCy8Wn/3un9X1yG3keRfH+/Q4IA0OHBEff2DipqHE1VPb0RdsW61dbefwh1OMNIPvL24fV/rjR9WPN6nnp4uRWOd6uuPKBrpVOvBDgOeP/+pU+9662ctuIB2ZqGoWXUqy0x8yznVOruA35cGyIXcQZecNsqZPkz7EnEJfnAu0rh7r7F2BtDxIZoAD6fF8SFovDm5VNBhURuHLy2bf7HKc1aq86DUeUiKRaSO9pjaWrvU2dmt1vY2PXVgv1q72tQ33GtXe0+botGeAHgCb+fUtIFIn7o6+u2bJ99xrC3E2tXb36H+/k7FY93q7owq3iM9dn9UM4s3aEYhJ/FuVTVHMkxzW0qltsfgd+DhnTQb4JylHHfUOyfHcqjcqkWvs2862CgH9o3p6eAa+UACwAP36GPvvdIOAnoung6LSDmDnDDPz338B+qLuFMoDx3stLnpWCymgYEBDR0e1sDQoGIDcUX6etQV61Bb96EAdALAOTUBB71H+gxwOtv71NMd18BAn6LxVj114CG1dzymI0ciBjx9sSEN9Eh//9p/U3W+Oza9JmeHqsPumIKT1skEHsZLw8N4qeghVG/HgPtjrIkLuPo7jxoQETI9Kvo5heY0MhDBxi6hzMXh6fzuxl6LsbaT98b4EJQ0ck4S5Cz1zDWaWbJO7X87okjnYUW6OCVMNjcdj0ftA+nB1kPq7O6yabWOSLu64m3qHYoFoBOAzikNOv29Elekp9ci12K9bTqiiAYGW9XW/hd1dR3QYP+AOg7E9cSDcc0urbVV30VT6zS7wJ1nlWyPKQ07eBZ4PCfcBkL1wsNZWLXHAtEAnQfukGHC5LTF42/4yVY0ZGK/HACHtTp3/Srujnr1gOPj/Y82as5XyWIrlDV6xa53aSgqtR7oVjwa08BgXG3t+xWJEhbdY99zYr14OXzT6VZ3rwOeYHot+LZzyn7bifSNrHXiN+0k1q5o/IDivfvtikYPaKA/pp62uDqeGtau+n90MxPT6lWZ5U4pPeGdy9HtPvgfeDzYQIijxAnhb7RAAn7jsIAjfJoZz2lJrtPByyGCDbT6xY+esr3XkvNxzwA6fNicUbRJH3rPf9muxTYHHevW0OGITRXwYdQ1pl5FY70WLu1BB+AJQCcAnVMWdKIukMAtvk0FnYOKGfAcFKAzOBBTb0+fIm2H9Y9v+A+LEi2ZXq/qcAA6AeC+iB4dO9tnbrI9NIkD4NPM/37uDlvzyd6B7OuZxJCUgUoam3GGzlquWUUcLer263ndRZ+y3QXGiz5IKjpUb1vkADo/+MbNOtIn9XR1q7Nzv3r72xTva7VoHOtUYoSDDqgn3mvRawBOADoB4Jy6gIPuAZ0+d/HbPB0i2NoVi7UqFgN0Dqm/L6LB+IBiHYf15c/8xL7rFE+vFd91nu08q2RbTWn0wbMXsaN+OekhVG+OCnEBbC5AOD9bDhE+zTedcY+rdq7RSlujQybC3qaknWcZ2PV2LKRKGm2o3iIVysPrdMdNj6q3+4giPV3q6HhCkdh+RWIHEqATN8CJRofUY8ATU3e8067A0wmA59QGnqP1TzRnj2LRTsVi7YpEWi2YYCDWr+7Wfl33k9+rLLxKhVPXqSaHQIKgAw1k8CLZQKg+cWwNIdIX2VTbwqq9hgk4LONtLpDGNxl2CvV7r4FUTLPhHo23NXVSyYk5Pb7p/OWP3Yp2DGhwIK5470HF+w8Y8LDYzUZ00cOKRg+rJzZkW+B0x7sC0AmCCE7dIALTPd9wOhMDM790AI8nrlgkqpiBT5fd49296jzYq9tvfFiFmefbjgQz8jgu/kXqcIJ6A9knPB0cl/S0JXamzgWzL7NvOiwOZa3OWE5LWsG0tXbeDfHWfkeCL33iFnOPOFRqrEyphg6isVqadQSR9n4NDcYt8ibe/5SBTizerVi0X5EIXpDUHR1Wd7Tfptj4thN4OkePdIP/p4znw4DMAgeYhk4MzhKbsMZ6WL/Tp754v3pjfYp2xhRpH9Rdt/5FBRnnqiZ/k2ry3PlXqe0x+B2A8EmzgVC9LQQNn70iuT7nt9f32LcdAgnYAHos/LCQab7rsLs0Hg5zc2z4CZhwguFYmVKZYi0PB3nh6cS7hhSPdygWf0o9sSfUE31KDnQGDHAMdCJH1B1lw8+YRbEFoBOAzCkDMkd7trEuxXoPKRo/ODINzW7hiV27Yz2D6o0Oqpcp6faoBqLSr2+4XwUZizS3tE5lWYlGHXgdgdfxYthAYkcClt0wQ8ZOBLf9rNOwBFwYN3qNbXD4EET0Wt6UNbZt+lXffsS+6TxrIEH2VnfiaP5mPfKnLg30StFom3oiT6o7AuB0iMWhseiAIpHhxDWobkZusV4Loz5lO5yjO6Dg/6k31Wag0ybW5oz2dAYF4ODtRLqZautTR2uPOAbhV9ferfz0RZpX0aC8qe5Y4NRB4Mvyt4+e9fdRHWyTBVNUZD3TfasqEnmOvr9Q8jq63LH/N+loOkfX73hwwSHP9Psl4s0l1mmyYTT4wZpNPB1iA9jsc7zPM2nMx4FUZPKbCRIyPbd01+iw6VGKHmG6NLRVxTlr9ciDnerujtgWHgAOi0JdZE7inBc6VZtO6FQk1pN415/43hOM9gPwPVVtYPS3HPf902+E26NYLKJIJGIDN7aWuv7qu1VZuFpluRtVGibadGLvv1YaahJXWbgxcTWoLNygUtYA+osdtglayqmzd7znqgg1qjJ7myqzt6s6a3vizv9tqs5y98qMJpVnNqoqy3X4R98rKeM4LkCEqyrLAdvT7/4992ZVZTYn7i12L8tqTEQgAjJ8o0tcWS0qz2qxXfzZyb8itG3kXXaT2POSazRojfTLJ+V5wtNhes0fAPr1z9/pgsssnHqcow0AHICHBT0AD27Sjs3vtS0MxkMqzxCA40HnLw+120657BvVHTlgGxi6Q76OuEPDAJx468hUgs1dA0isTzhVO5yA71Ne9z5k2tqBCyKwKefEAC0W77LjDvguCuhcd809qihaq9LczSoBdMYZDE6U5w5wmlNAB/BJAR4AJ3H55/7uQQfAGbkc4AA6XHTyAI27tho4AAxcI2DTkPj9fO+A2egyj/5fnd2o6uxmdyVo8rQClqO9mtGAkwo6STBKAk6jge6LqucQ+66ttpgA/3lm2TmvSmz8uWH86bX8qWs0p8QdNco6HVykyWnnjt5hepzRVAA6AWic8qBxvAOmUxx0ysIAjr+8t3MU8CQ8nhFvyKVjpF+Z/QxXqNGm1Zjq4qoMASpPvxhojxwD/nyO+356WWOVnwS3JK0JL8y8mlTvxE+ppXg82f5ZIp2tjXQ04l282KDDmhzwA+yAnrPTFhqGsCfnuOfpsNkn33RALDwdjsI9t2afBRI824afAegEoBOAznHaQAA6SdBxU21bR02zea/m/2fvPMDjqo6+L6qb6u6q76p3996bXFStYrnbtAChJZBKwhcSWoAQEtJ7f1PehIQkJPTQWyD0FhIgGEJxL2qWXP/f85vdI6/9StjY2ELo7vPc596995Q5c86d/505c+aEAScCTr4GZXH4MTFhHnsnDSUaGKqU69t7hMEGwDn0o3uQia7TXe9nxgOAfJjWoraL7klr5aPfjvDifWQ2BzEze9W86qs2T7Vw/M5wxOm62Z8xAMKJoMftqgEcNnEDcEAqOuC719xj5rWeMjl09UDnMAXO4X4le/n7vmnWAx0DEDe342TKvmCDZhMNOAuV5VukLB9zHWzN8k6aSjSghIW1E9oObPbWFTbrvZv/Xd69XcAQAYieAOQQQAZnLzv885Ttn6ecwFw7cpMjcybvVNeRfOar1vCcJbbzNPhBGJw//OwF64+SzKaeF4eyMBTQITOZiBr67EM7Lcr0gbzX3ADBkcCb0/EAyNN6DmEM9GvQqQtrLL4GhZLYHgWnAuaJ2RAyGgCc2a0+DFBJTQolLlYoqTEKdKLB5eCu9zomuPLf/dl9gP/fszOLRZ/3msichuIcAvZxCugCMECStqDhVBjgADp7gWd+r2s6YbBpsMgEYMjf79hqOMK2Bj35BFhEAhpUFmoyWxw2uqfu74zYOCOLz2BCN4jpgc4hCBlPO+n72sl72Yf9HHRy/QCH88xCQIfNTQhbZ3mx+RY0GvPYalB2YpOyExfbOawBRQPUga+7vOJ8mPLqFfI3HPLRpYHto4lh/kMbiz7C5sBwegdumAcbwh5qnI0PER5E+IDwdhpZl8bjNB+Lldm9bO5OXr/n93zVtoHnsOxFtkaHdZ533PCGbZPDwtAe1+lgXqMxuEvjgYAX2+1/+K+Fwekpdo4j3gMdD3Q87eYwx4AHOiZ0w95ZdV1zFzZnkVxhX/lhc1hVWKvBTdjciQGexojZ7Z2Axgn48HmvGS/sqh3yL5QdvkWHdHZzS3buAhlMf4uUlbQkfERMgV1pu9zD6yMu0YuUk8SBNxtOBE47qlWuv65rqqNruqMbBcDJ5KN6juwcytwSHwis8/zrb14xTSd54CyLatNlfoyiOSZ54HQNz1lsiEWkaWKwhc8NPSKVa5gHOgcWOO3t7eLo6OjQpk2b9MYbb9gGd2vXrrV7W7YQ/qS16/m2bdu67jc3uzUcB67nSAl/aGfn161bt2r9eiIft2nHjh1GI7RSL/d27dol6CUN93bv3m3tPVJ0fWDK7eegU5q+QhmDa1ScukSh+CqF4uepMLVWvuPHK8c3R+mxUzQ0VKeitBrl+itVlNpg23RnDqlWfnKTEo6foZLQYhVmLFR6/HxlJFQpJ7nezsinYNICZQcalB5frdyUhRqafZIyExbIP2Ce8lOXKjOhUYXpK5UZ36S02DoVpK5Stn+xUgbVKS9lmf1PPH6+RhecY/dJNzT7NMUfM9+eB+NrVZq53AAj8Zhy076K005SQWCl0gc1KBjbpDz/MhWlLhNaXeDEOcoL1IenLwIAaJOlST6xRiXpq5SV0KCM2GqNLfyQkgfMNe2OtTyFqYuU629QysD5VldJ+gql23blUZpRlGB3MvqInn3VpqyguBBCDSsZUzR4QCceN7XnKNO4uxWk1lpigrZNKD3F7HO4ux0o9poHOgcGA4AGgY3g7uzsNMBhsZ8TytxDUG/YsCGy2+oblg6QQpj3tnCFfugFYAAbjs2bN+vtt982umnbxo0bBXjSRgdIDmR7m/73ff39HHRYq+I/bq6KUharMAVTW4VKM2t12qJL9cpTO/SHn/1DKYPGKWXQeBVnVFqg0/wAAnqhCHiKp2388ZNsE0rmaAjFkptSZ+66yYTxSqpVYUZ436G4Y2coZeA85aU2qSBlkYKJzCktVEZ8GGDGFH1YqUMWKCewSDmBJUoZXG1ANDz3NLs/MGaahueerrKskxV3zBw7pw2uUOqg+SpKW6wROScrFI/2skiJx8wX4JM+OPw/cOJchRLw9moS5ijmZZgPKUheooLkFSpMWa7SjJVKG1wp3/GzVZTWZFoOeTLjqg18oJmyMS3mJC1RMA6zZO+CTtqQ2dYeBzh4QTNVk3T8tJ5Bh710UI+wx+FrTaZxxSfZBjzsidCdeuTQ0wOdA4MOSPLKK68YeHD94osvGgj97ne/09/+9jfdfPPNBjIIdrSKdevWGSCtWbPGzr0tNAEbaEOLgT6AkGvuux9aDsAJ0ACiABF0c93b9L/v6+/noOM0HVb1ZyWEQSUwYKyaKi7Qjs3S0w+tkX9gmUbkzVdOYIpiY8o0LHuBitKqlHTcBAXjZ6owvcLOdeWf0G3Xv6wvf+HPyk6aa0tBgnFzLZ4kGhJbtRQkN6gsuFT5gXolDyg3k11GbIUAj4KURsXHTLd7aBZoJMwj+Y6fqbxAo8YXf0hxMdMUGzNZE0rOVDC+SsNCi5Q2eKZ8x01RWeYi+Y6dpZK0pQaiqQPmW9SBopRFShtUrmD8HI3Ka9InzvyG/veHD+mz5/1AWYk1yozFdbrewAWQYR8aduEMz+WE59XRdtCUCpKXKRTfpIzBYQ0qbI6LXutzFK991TYdw0aeACjmtcqpH1ficZMNXLvMgfv5BMSUhRZa5+C1BkIBPL/5wVNmWutpIsgDnQODjRN2b775pslmBDUmtZNOOkmJiYk69thjFRMTY8fgwYN13nnn6a233jIzFdoFwt2Z3lxZvXEGUAAbtBuABUDZuZPAru3673//q5dfftna52jmDHDSXrS53qC5T9XZz0HHQtj4F9kiz7RBszQ8p05Dsyu0qOo8rXt1l+7482NKjStScWiSijInqyx7tnKTJystdpyly/VNFkf64NE6fdHntGeL9PfbXldpxjxlJ0xTWUaVchJnamhmrXKTypU+cJqKU6pVmlarwtRqlWQsUEb8LOX65yvLN1f5yZUqTK/RsKxGZSbMVo5vnooywmk5l2bWKScwPxxsNalcaUMmavKwpQrFTVFW0kzlJMxRVuI8jckFHOa0yuTfAAAgAElEQVQpK26O1TGuoEm5gZnyHV+iK//fD6V26RtX/k7FmQvkP3GGAQwa0LCchRZAOeG4sRqe22gaUUkmILRAGbFVZn4Lxi1U5hA0vZWROaCjCDTRJjzzsqs072f208Ef4NbrXzPwGV2wSszrdKe0xBRn1IuoBCwKJcI08zkP3rrJvgp62m7UA52DBx0kMtoNmsHMmTOVkJBgQJOenq7c3FydeOKJOuaYY+ze3LlzLR2CHu3CaQy9KUQBPjQYDq4BG8xoaGmnnHKKzjjjDK1evdpAyZneoBvQoQ29SXufqLufgw7zOcNDJ6ssc4n8J0xRYdpcDc+dp1OXfFy7W6WH7nxKY0qnaMixyUpLKFBe2iiF/CM0LHe2MuOHqSxzkkozxyt1UL5qpi9Vx1rp3hufVih+pIYGpys3abyy4sdodO58FSZPU8agscr3TVNW3EQVJc9UWWa5CtKmqTRzpgbEZGtEzlylxY5SYepUZfsnKpQwVimDR9hz/4ChyvFN0Mj8CiUPGKqitKnKiBuqHP9IBeNGKJQwRuMKahQ4YYQyBo3XyJwalaXPUa5/ovL84yxdYeoIXfHZa9SxYbuuvOirKkyfqfy0OcpLna3AoNEK+iZqdHGtioKzFRg80u6xM3NRRngbAYAn19ekXN9iZSW4MDq9BzpoY8zpoN2gnbHcJrxbQcSd27l/R4FVDMHaWFGKlgPgYF67/+YNXhic98gt9vXXXzdN4I9//KNOOOEEA5cPfehDpsk4AX722Wfb/YKCAt100036z3/+Y3kQ4r0tONFcAByAhmvoAUy+/vWv6/jjjxda2j/+8Q8DpD179hj4YFZz2lFv0/++r7+fgw6aDiY2QCd10DSFEqcqNXaYFsxZqdb1O/XYA08rOS5N/iFp+u3Pb9DFn/yiRhVP0ec/ea3uuvHvuvijl2pY1nAtr1mlX333f6VW6e0XN+rKC6/Vh5rOVerAbE0omqWrP/NN3fzr+3XTr+7TRWd/SYWBccqOH6HC1NG67vIf6rMf+aKmjarUD6/7jVY1nK2JQ+foq5f9QJd88lp99LSLdPPv7tO3v/RzjSuZpfS4QpVlTdBFH71CX7/iOxpbNFHXXHydvnXlj1WaOU4jsqfpM2dfrd//+G/608/v0iUf/7KGZo1Rtj9Xn/3Ihbr7r7dKO7br/tvu1aUXfk3TxiySb1CJ5kxZqmsv/4n+ev1D+p8f3KQVjecrI2mUMpPGKT+tXPmpFWbGKkxdqMLUxWKeqLfndAAYNgJlTsd/4jQ9ce82oxHP557WecaMLVqpoVlNFm9tRO5Scyi4+8Y1Gp7DtqORMAv72eQ8TefgNR00HIT2ueeea8CCSQ3zE5PvCGa82R5++OEuc9ull15qJjaek7e3hSY0QgfmPmdeQ4sBdJx58Lnnnusyu5EGgCKPp+kcxDjp56CTndCg4KAaFSc3qiS1RiNzqlSSOVknNZ6lnVul2/54q0YUDFNZTolef/E1ta5r0+rn/6t1r25R69pOqX2PshIC+sTp52r10y/qrRdWa8vq9frnw8/ojEWnaXhwmH7ylZ9q6+pmrX7ydbW8tk1bX23TxedequFZYzWmcIKZura+2aJbrr9NrWva9Yvv/FInLzxVe1qkDas36bXn/2tnAO3Rux5TQVqRkgelauNr67V9Y6t+/7Nfqm3NZm1+fYNy/Xm64tNXmZlv43+26s1/rtGOjTv1rau+ptJQti7++AV6/P7bpV1b9dTD9+mum+/X1LGVKsubqOefWC3tlB686wmt/tdabV23U9XlyxUKjFRm0lgNzalUlo89zirMHTll4Jxu1086+XzEzxZJep5ZyiaWnWqKCxEJcJ8uDS7q2byWGT9HTATh8sZEEGt1po44w1z62P2tO5uca4znSHBgoQLgYG6aNGmSCWnMag5IEOhoMwhotAbmeaZOnWpzIc6l2qXtzTP0AzrQ6xwFvvGNb2jgwIFGN44SzPlgFgQsSQPgAE69SXefqLufg05hYJly4xs0NH2x8pLmKRQ/UcGE4VpZf6ba1nXo4TsfUl56llLifCaQ1bpbdXPrlTYkqOcfeV47t7TqpMbFKgwUaEXtcql5s+656S8qDBSpOG2oRmSVqH3tFrW92ayM2JBmjpglNUuP3fG4itIKVTm1XB0bW7Rh9Qbd+Kub9bHTP60Fsxaqfk6jdjfv1guPPqdJwyaoNFSqt196Wzs279DIgmHKTcnQy888q86NG/XyU8/ovJPP0nknfUSlmcO05fUWdW5o19CsYo0vGamNq9/WmpfeUHFGroZmZ+sH131JG994Xj/65jUqSC9WVnKZzjr5Am1Z06YffftHys3I1Tev/Za2rm3XFRd9QyXZM5QyeIxG5DUoM67crFF4wZlsjjJbObl81M6+6ojDQ6V8J0y1a5bcsOaTRa09+QTYnA7AA9jgXx1MmGeTWmg5PalHrlEe6BwYdJzgy8rKMtDhDMggpBHiHAh1tAaAB9BBWPP7xCc+oUWLFumKK67Q888/r2XLlmnGjBk6//zzhdkOwY7JC+2CMu+9915LTxqOiy++2Fyx0aygg7ouvPBCLVy4UN/+9ret3i9/+cuaNm2aOQU4Wr7zne9o6dKlmj59uurr63XZZZeZtuZMbczjzJ8/30By0KBBqqiosPmd22+/3eiB9hdeeMHyzZs3T7Nnz9ZHP/pRPfbYY0YnDhNoQ3/4wx+M3sbGRvPwe+SRR9TU1GTthRZnznM8/ECe+zXo1CsUu1B5SUuVHVurfB+T/tNVkjFdiypOU+emPXrwtgeVn5ajwow8dW5q1/YNnSpML1ZBSrE92/LGWzpz+enKS5imVQvOV+eWV/X4Q39VbsJ45ftmqHFujdTRotuuv125/jIVpw2X2qTnH35Ro/KHa9a4cdrd3K7Xn12nwsAkFfimqSRlhpZVnqnt66XH7npSOb5c5SQV6bE7n9HOjdKymsUqDmbo9X8+bWWXT5yuYVkTVZg8WROKqrV7i7R+9VtKiRuojKTB6ti0Wa8+819NGTZHRenDddX/+6K2b12jr111mUoyxlm+qy/6ujo3btP3vvoVZSUnqqm6RuqUfvfjO5SfUq7QkAoVpSxVaXJ4XVN2Yo3y/L00l+OAjnBFtuN0ra3vZH4HLzYwBM/nHsPg4DKNBxuAQwaQCo2HTdy8dToHDyoHEojvBDqATDTooC0ASlVVVTruuOOUlpamIUOG2LUzaY0bN86AAAFP+u9///tWhnuemppqIDZx4sQubQPtqaioyMCCeaW6ujrLgzPDk08+aekwA8bFxXXRwzPKBKj44Y03ZsyYferiOXQCZPzuuecejRw50tK4eSzSAFCAEW1j/ueiiy4yT77Y2Fgz18XHx1seQAgQfT947x2oXw/7eT8Hnay4JuX7lkVAp1K5STPCoFN56j6gU5Sep+0bw6BTlBYGnYduu19qb9WqhlVKP3GGTqq9UM0bntG9d/yPchOna2havZbU1Gt3+3r9+4mX9eFlH1fl1Hq1remUdkgpQ5I0a/xo7W5u1evPbFChf7oKEueoJDBPyyvPNtD52w13aljWUI3Jn6SHbnlCm1e3aWXdMg3NCeq5R+/R9q3rNH3MFOX5x6g4pVyTSxZqzb/a9cozL+iUpbU6aXGlXn3+WZtrGpNfrvzAWF132TelHc268nMXKS9ptIoC0/Sdq34kbdut3/7kezp5UbWuvfzzeuvfb+jP//OAhgcblM3C1cRlKgmsUtbgcITqPH8vhsABeCLmNdZ6ssYT7GCdJ/M8qYPL31nTSRsyyxZV4X0A4Dxwy0b5TmB1aUSF8+Z0ugT3oQqZnkCH8hzoILjRChC4fOUDGAhrzFhnnnmmaTIIZO4BKg888IDNCZG+pKTE7qOBPPvss7r22mvtP0If5wSnNeTk5Bjo4LaNZpWZmWmA9tJLL+nRRx/tApyTTz5ZDz74oGke1Af4oKmwiPXpp5/Wpz71KQEWlH/DDTeY6zSAhBmOeSnKhka83J566ikNGzbM6EGT4Ye79QUXXGD3KN+5kWdnZ+vUU0+1cmjXofK7z+Tr16BTp6z4euX7FykroVq5/rnK9U21OZ3FVaeoc9NOPXj7fcpPy1JhRo46N7Ua8BSl56sgJV8P3XafdrVu1uKqJoWGTNfpTZ9VR8sLuuf2nyk3cYpyE2brw6tWqnXDq2p5e6teeOQVrX72Tb317zV67J7HlJOcrlnjR2rX1q1hTcc/WQVJM03TWV59mrZv3KaH77hbOYE0m6t59I4ntGPDHgOdsuxMvfr8P6SOrZo9YZqyEoapKGW6ZoxoUsubu6VdO7X2tee0+l+P6KVnHtdb/1qnGaOqVZAyStde8lVp+2Zdd+UXVJw6RmkDhuqKT11t5sO3//O8Xnn+72pe+7refvkNXfapryvfX67i5Cblxi1UdvxC26m0MGWBuWb3qoktoumwXTVgw1wOu06jvIAd7KnT3fRMTGDANOEHjo+1reaNZGRhT2Z8lNubU6mizp557eA1oZ5ABxMZgtq5Tc+aNcvmRhC4mMgQyNxDS+F35513dgnq6667zoQyZqkBAwaYtoHJijkVPOBwyWaeiLVB3GPdDKADgPj9fv3yl7+0uokuwNzS1772NSsbIMEkB20AGwCCFvLTn/7U0kPHN7/5TQMcgBLAYj4HsxmLWmtqaqycOXPm2D3W9ZxzzjkGnmg71McPcKLdwWBQS5YssfqoE00Iejn3GfA4VG/Hfg06tcqKX2ALNbMS5isvOexeXBqcpMVVJ2n7lh0R0AmqMCNLnZuaI6CTFwGde9S6/i2tbFimfN9MLas6V7s7/qNH7v+tStNnmHlteX2d1v/3Bd154+0aWzRJJcEylQRLNCJ/qPJSMzVz3Cjt2tqyr3ktdbKW16zQ9k1b9NSD9yrLn6TM+FQ9+rd/aOemPWZeKw2FtObVF7XxjVdUNaNcYwtnqiRtmqYPb9TOTdKdf7lR2amDlZM+SLlpfg3PGa385BEqSBmpay+5VpveeklXff5CFaeO1ojQdH3tsm+q+a01uvbyizSyOEPZKQlKjw9ozoQG+Y4boZFZi00bTB9UYWa14vRapQ+Z0bvzOpF1OuxUAHawxpNg0YAPAT+Z1+kWdFIGzbCd3wAd1ulwvucva0018hwJDh5UDiQcewId5nNwNkCwAzA4HDCfQXnMqSCka2trDTDcQlOX9sorr7R5mRtvvNEAB42ISAeAGEKbOjHLYUrDnAWQoH1QD0KeH+AGKAAWP/rRj2yupry83OrHbIc5jPSA2pe+9CUzeQFQ3/ve9+w+z5555pmueShAA4ACTD72sY8Z3WhHV199taXHdIeLNb/TTz+9695dd91lwIdGBj3MU8GXA/G1zz/v76CTUC3C2mQl4tA0S9mBCSoOTdDi6pUGOg/cfp85EjjQYV6H+Z2wpnOPdjRv1IeWnWoaUvX0ldqz43Xdf/evlZ88QSXps3TS4ibtbF+jpx56QjnJhYo/PkGXf/YyXXXxF1WclasZ48Zo59Zteu3ZDSoITFK+f5KK08dq2YLF6tyyVnff8jsVhnwqzcqzOaTNr7doSfUylYRy9e+nnpB27dDEEWMVTCxRZtwIjc6fK3VIb778ikaWhjS8OFNXX3KxPv+JyzU8d7Ky/UP1va9+Xx1b39YPvn6tUgZkaWT2NH3lkq9rd8s2fe+6q5XhG6CJo8r0s+/+SMsXnKnM2LG2wDQYN1/ZiVUWgw6ADiWw+LIXtyyPBPzEZTp1cNha9o+7Wm2e5x3Na7hLox7hceCA57G728wmx7xOd0jlORK8ezDqCXQQrghOBySADv8xuTlNh/U73AOMuI+mgjbCnAwT8kz8I/zROm699VYT9NxHm+H+8uXLLR11FRcXG4BgwkJ7olzACFBCE8JBgTNpqetPf/qTAoGAlcNaI4CA9N/61re6HAlYHAp4UifzMNQJfZgCAUCeYR50URhee+01K/vyyy/vopsyyEtagJB6PNDZrLb2LWpp2aq21k61tUh33PKMcjJmKZQ8X0H2WYmyPPTFa0LfhEFntq3YB3SKghNN0+nctFsP3P6g8tJyuxwJOjfiSFAYcSS4X9u3NmtFw3IVpeI08CFpzwb97ZZfKpQwSikDRmjetJnqbF6ne2+5R7kpxRpbMsHmc7a8tVm5qdmaMXaidm7Zqdee2ayCwOQI6IzRsgWN2t78th6+5wYVhhI1NDdPzz78vLZvlBrnLlO2P1svPfWCdjS3aWThCBWlj9Xo/PmaPLRWb7/Uov88/2/lZgSU7ovVK889r90t0vCcKQollul7X/mxtm3eoKs+/wUVp4/RqOyZuugjl6pjY5t+9p1vK+iP18qmRu3Yut3WDY3Oq1J2UrkyY+eoNKNR2UnzlRk/zUIB9TboMCXD9AwRCZia+cuvX44Ej16onpSWGKfpkBmvAyIT0JBR+Ss9R4JDNZl0k68n0MGEhHaAQOYAaBC2CGvmd1h8iQcYQAAIYJpC+0GTWLFihd372c9+ZoADGDnB7sxtpGMeBc0HYAmFQiboAQR+blKftTZoGQh8hP8vfvELVVZWdoEhWhTmNaIrAH6Y8QBK6mSOxwEYwAPoQDdAQ/loLl/4whdM66IczHGsT+Ie9HFgxkOD4kfboYH2Aoof6KOfazrEGsNLNphYrpxAubL9k1WUOVULK0/Ttk3SA7c9qrzUYhWkl6pj4y51bpAK04aZqerBWx9V5+YO1c9dpLLMuaqYvEJb172uF59+RKcv/qymDlusomCxWtZtVNu67Tp54dlaUn2y2tfv1E2/u0NFmUM1Y8ws7dwsvfZ0uwr8M5XvnxLRdMKg88Yr/9DZpzbpU+dcoHWvbDZwmlA8V5lxRXrrX+tt3VDVjDqVBacr6dhSi1zwz0ffMg+5kxYt1ylLVlqal59ao2FZxHAbry984sva3bpTt95wi8rHLlRh8hSdvvgCdW7YpbtvulOnLl2lH3zt+9rVLF124bdVljVfeYEKA52wV9h8C91TEoxsBdFbHx6ROR2mZwbFjDHcIAArXmxEsyHWXXdKSwyeByAV6hGaDhkIT81A6Clgm/ui8uZ0Dl4g9gQ6zIUAMghqDtyXEep88WNeQ2MYNWqUCV6EMQLYgQthaACsH/7wh5YXMMKdGTMZ62gwgf3gBz8wbzJMZeRPSkqyMjG5cY+60Gp4xg8nBML1oDVRNw4DAAi0ffe73zXgApiuueYaS0O6f/7znwYYDmAAPNKvWrXKgAPzGgDj2si6HkAQl24HtswHQYfTmFxUhg804ACo/Rx0gvFsWVAXXqpBTLOk6SrKKNfCinO0bYP0wC1PKy95pApSR6tjvdS5TipMGa/8wHg9cMuz0japrnyFCgKzLQbbI3c/LG2X/vn3rfrBNbcoJ1CmC8+7SM1v7tGOTTLQ2r1VqpmxTAVpozRjVJW5Qb/25B4V+OYrP2m2itMma1ntEvNM2/jmi3rtxadtfU7zG7v13at+r+TjR5gp7pWnNqlzozR1RKVC8eNVlDJHub7pOnfVpVr3n3ZbdNr8docFLl1cca6Z+5JPHK75k5ar+a3talu7R61vSNWTz1QobrTuv+lJtby9Qy1rOrVzi/TMg69aSKCEY0doRM5iheIrTLMNL9qvNHltQr0XQQdFhTA4eD+juKCw4EjALgU9+QTYzqHsqTMse7ElDmeoN8DxNnE7eFDpSTjyxY6Axe0ZoYu3GECDZuA0GoQ4zxDgAA0aCXMsCH+EMnMsCGUAgrykJYbb4sWL7R4A44Q3TgWYyJzG4kxWaA1oHM5U5rQQ7kMf9EMT64QoPz8/38CEgJ6ubJwHAAXo+O1vf2vpAJh//etf5tWGNuY0HcpgjRFlA4ysD3ImROijjM985jOm5TAHhIcbmg5thyYOB7I98fYDcb9fg06dClOWKo+An/4Gi4BCSC5W3U8oXaz6OeepZuaZFuCzOG2mFs//qBbPv0DFaXOUH5ipBTPO0sK5Z5pgJsgm+++MKZmlU5ecr3NXfVWzxpylwoxpykkZrakjm3TWyivsmDZqiXKTp6ooY4aGZZWrftZ5qp/xaeUn1SsnvlJZCVO0pGaV1LFDd9/yV00cMVqf+PDFqpxyskrSqiyI5+jchaqYdLqWVH5Ew3KIsDzRgoYWpBDFZZYmD2/UGcsv0tknfV7TRy9WbmCWmcOIjk3gz2mjm3TWqi/o1IZLNLF4mdIHTdDQ4BytqL1AF5x2uZoqz9Xw3ArLRzBSFAC3c6rtNuqrN545BaBXzhFHAjQdYq+htEwbeab5BgA4RMvuVtMpSq+ziARoOqhGINcvv/u4ubx5WxscPuggdNEoWEDJJDoRCZgTAQDQDJxwRQNBUAMw5OE+5jUEPpoOAh3gQehTDvc/8pGPGDjhhYZWQv5bbrnFNAa0FhaVfuUrX9Hvf/97KxOQw1OMvMzzYE6DBn7QdP3113cBDCAAWKD5UC4gh0blgIA1OZjFePbvf//b6AVMoA9woY7TTjvNyqVsFpiiiUEnQIZWwz3yA7a4a6MRASQAJaBJmz8QwPJOJsJ+Djqh+EYRNZldQPMCBLCsNRmUn1yt4dkNyvHPVtqQyQrGTjeBT6ic4JCZyhg00yIYlAYrLbpznr9O2YkVCgwcoaKMWRqVu0ypA2cr2zfDIlKzGRwmqrzAPKUPmWZlUjYAMzK7QSODqxQa1KBSNm7zV6hp/qna+nazHr7zQQWTsjUqb47SB01S5qB5Cg6qUl5ircrS623vH1y9c5IqbY8f9vlhs7ncAPEsK1WQOsfmXzJi0cQI1lmjdFuiMlX5KXNVlFyj3MQKBYfMVihuhvJ8s1WYXK68ZGifZfsL5SS59Tjd7TTau/vp4AtAWJ4pw0+32J04oeG9BnbgwdYt6ODuxrwOqhHBPtF0iBSKCocHQneZHKp65rUDgxKClB9uxE5II5jRZPiqR6hinkL4AjwrV6609AjlKVOm2H3mdPgh8AEwzGsI9k9/+tOmHQA05Eeo40hAOjQJ50hAJAPqJG9KSorRwTwPwAfw8EPLwZTmtBHAiDKY9IduTGw4DzitCPdqFzEbTYcfbQIocFaAHjQdyuD45Cc/affQjEjH3NTnPvc5KxvQwZxGXn7QD/DAAw90PsiOBHUKJTXZrp2hxMXKS1ligSz5os+MqxSWlowh5SpKrVNxWr3w3sqKrVRpapNK2IRt8Hzhao3ZaVhwpYYFV1jQ0My4WSpMbhK7iyLoi9PRqBYoY8hcO9j8DXDIC1SZoM/3VSovfrHSjmtQfvxy5Sct0MkNn9L2TdIT9z+n3OThCsVPVvJx0y0iAAs08xIalRNfq8zYeSpIpvxGBWOrbVfTXN8CA8CUgdNtHoa6S9KWKM/XoNykBhWnLrY80FPgq1NRYKHFnuOaNuIwQLtwsNhrPgNc6o1f8CqUuMSue3s/HaZjUFwAn4RjJ+n5v++2jwY0HUxs3eFHDKDDMTJvmeKPmWhB2/BeQ53zQOfAoHIgoYgQZQ7jL3/5i4EFwpiFmwjYV1991YQ+wpf7CHw2dWPhJM9xKuDehAkTrAyE9xNPPGHlAARM5gMcBAwlP8KbeRxMcEzUJycn29qYSy65xEx5mL4wmwFuAJGbg0FDQQsi7A5lZGRkWJmAg1tkiuaCZuI0M5wXqBPNhQWgaEXwAmBjfQ7P0O7QcvgRSRvAwZEBQKEc6CIdh4tUwH0AEk0Hmg7E3z7/vJ9rOrnJS5XFLpiJ7CPTZNoOG6ZhbgN80GAQ6LaraGKd8hLqVRxYpOLAEhUkNaoweZHSB1UqFFenfP9CheJqTLiXZSxXQaBJrGspSiWS9VLbsyc3qd6u2ak0ffAcFaVUqDStToVJy5R54mLlxq60WHC1087RHTc8pq9e+kPl+MaqNL3a6itLPlV5ccssKkAwtsYWagIiAEryCXOUcuI85fsbDWSoiwPAgc60gVV2cM1GbzmJ9SoDjBLrTHvKGlJjNJO+ILDQFsy6D/zwGZBujADPEgE+vQ06LLNhHyD2Y0NxeeRvzRaZgA+GHmOv4WM9Kn+ZhaVmMojtDf74ixfNkaCnTI4RnqZzYFBCgwAEEI6jR4/uMoMRbQC3ZSIIOMHLfjoIWsxoaBQ4FaBh4PFFXgQ3DgkADiY2ynZmKBdQlJX9RIBmvodyfT6fcHVGowG0nHZC3YAQ+dEo+DFnQx4AZvjw4Ta/Q9gc1vpQn4vlhiZCtAJn0gNciK2GFxuggcOBmzvCYQEwQwujbDaro21oeazjAeQoG9BxWhHlc1BWnweVdzKt9XtHAoQoX+9smFav9LgaZcTXKpi0IDJ/wW6ZdbZDZzC22gQ8ghphb2FhkppUlrFSGUNqlTqI3UNr7ToYF9YK2OaZ3TYRzPYstloZsdV2jTmPsjGDFaRUKDdxkbKGrFBpypkq8C+3nUjHFtdoXGmVfCeWqSzYGI6cELtEmYMbxZYM2QkLlDZwvm2uxhYD7DSan4wGRZ3QXWv1hRJoT4PRAj1cs601mhjaUnYcGleNLZQl6jZlA7rQZ3M5/iqxFXeWv0ZZ/npl+RqU5VtkR29vbcCiUAJGszUODmg//9ajNqfDLgU9KS3mMk3sNeZ08DxgMqhiysfMx7onP2sPdA4MNk5YIjz52kfoo6UQkwwgcMKdM5rFggULzG0YIEDLAXiIRMBztBMXRgbNByH94x//2EAHzQLN4LbbbrNQMs5hAWACFIilhmaBoEcbKS0tNRD78Ic/bNoQdAKKpEE7ItAn5VMvZeCkABjy/6yzzjIQAKDQjqDPtQWTHzRBCyY5oiAAKIAN5UELIAq4UCdtJCIB5ULz3XffbSADndADGNI2x8cP7Llfazq1So+vVnagQbkpC+0cTKoRB8EiCaPCqnY8adF4MFuhOaBVFPoWKs/XZMI7J2mhitIWqyRjqXKSFikztl5s78z208Vpy8VzgIc9aDi45h7zDumxE5Xjn2lmr+y45SpIOlWFgZU277ErnPQAACAASURBVBJ/bJkyE8YqlDTZzHs5iY3KiltkoDQ0c7nNzyAL04ewbma+uQjzoY5pMH1IpbIS6/bW56tXSUaERl+DzWFBC0E7i5PrVZzWZNoQbQJ0AC72zcF8ZTuV4tlnB+CzIAI8Db2+nw54kRvARXqmTc2MKz7JpmngbU/ez2Zew+VteM6Srp1DB8aMtgw9RQn1QOfgQQcQAXgQspizEPwsuPzqV79qpquf/OQnIjozQhzB7+ZZ0HhYJ4PgR1gjhIkIwAQ+AMNcEd5eaAP8qIMJfdbXoGngQMDkPADAD6GNJnHHHXeYWe7xxx83IHJzOs5FGa0H0xkLTlm7A92kJdIBZ+iAVn60BSeGq666ytpCbDZoos0ALVGvmXciGgFRE4hcwA9AhV6CjBIX7s9//rO13fGIZ9ALvz6wYOM0oH4OOkGi2actUEFGvXJSapSdXK3c1PDSDb6iiWLMxzCT6eGjVjlJtab1oGn4j58nNAmEfX4ywrxJwbiw1xTeU2mDK5U2qFqhhFrTQtBEcNNGM2KpSChponICBOysUx4mtoFLlR2/WMNCizQsuzq8bih9rs3dBGMXKDt+kdBG8gMLbJsB9o0BFDEz4XkXPsL73BSnLzHwsPoGs7X1nPAxeJ5pQdDCnBOed8wNBeMqI9pOGLjShsw2wGH9UlbyrMgxVyF/hcJWpl50IsBNO7K1AaCD0oIHNBu5sdYTLQe+dDunk2OeFlU2n8MmbjgTEC0UlOppcY8HOgcPOnz5Izg5I6QRqByACoKZr3n+I3C5x4Fg597YsWNNWxg/frxpSmgB3AdoSOfKZlIeQOE5zxDqCH7uAQLUQXqAinr4T93UA23M3fBzZfPclU15CH9+ABTlUS5t4T/1kJ66OfNzQEe53KcO6sekBg3kcQBMeupy9NMWlx56uP5AH/0ZdHzVBjSATMhfqVSEduI8A6GwDKqyiMV41DKxzuQ/B1EMQvFV5ixQkrFMCHeAyZZ4JDYqPxCOpIJTFB5x+clNphGgkaCBhE1hjeZlxZqgjLhJ5mBQkrrSgCcU22AT/VmJYe+30mC1ecIxR1ScuszMe3ir4YiANciZwIIJc20XTby3MLVRD9tL5/gWRLSg8B4zyFXMZ2hElJPjqzCtCU0OLc4cDsy0FtZyAJ1QYHb48M+LgE61AY+Txb1y9oXNaknHT9b4kpMNcAj8ydwOjmg9ajowqiC11rwP0HZQ52781UvGpJ5c3lwDvTmdwxeICFaEswMdhDFgwA/zFWYp5n8Q2k5Qk5Z8aEYfaIH8QQecfj+nExUQ0tZ8VO/9Mu7uf7eLICNf+6S35/yPvhf1f//8VgeaVHjRZThfGCysLOKauWOfsiORALrq7KYd+9fF/+j07rnR4Gjfb38c98zRYOcIj1z+3jxHIhLgF+A+Eu6/eUNEO2Wvne53nrZN3FIHz7S5HPZBQE0kI6a1nhb3eKBz+GDjwAJzGtqE00gAEkAHrWHo0KHmqcacCpoAQIN2gbZAGjQGV453fu/65Kjysj9rOr0pML26Dz9uXyTgJ9MzYfPiHP3zUSkwYPo77xxKGBxAB6+18OY7syK+1uF9uLuzyXmg894JOMxPmJccoABCzkzG4k8m2VnIiTaEeYoDrzXSYyI7qgKyP2geR7uNHugcvvDzAKR3eIh5NLJOh/knzKDPPLjDzGzM5/Tk/WxhcFjcQ2bc3pi8I1KoeWF4++kccaHO3AiT9wCPM6uhyQAuOAbgyQXA8EPLIa2bM+GeBzrv3QdAr/DSA53eEZgeUB0+3yM7h+JIAH4wl3PdZbfa9T6mtf3MijF4cLC4hxWlTtupnflpm9PxYq8deYEG0DBXg4kNocd/wvxHAxAajTOpOfDB3Mai014RlEdbG/gg1+eBzuELPw9AeoeHNueEa/tsW6PD/BdbHOC9Bnb06EjAJBqOBNjkUI9Kgwt1YsxIb07nKAk6TGloMhzM43BmXsedcSBw5jYAhmtAivsAjwc6R/7D4Ijy2AOd3hGYHlAdPt99xFhrEAGjWRiK2zQu02AI65Z6NK8xCYTbNOoRoIO734zRZ1kmz5HgyAs0NBpABgBxJjZMas6t2N0jHddoRJjkyEO6IyoQjxLw9us2eKBz+MLPA5De4aGv2sCG2J14r+Givqr+yvC6KqJi+yOx4/Y3r6HpFKYtMHscYXBYWXr9T54zdzfQynMkOLLAA7i4cDZoL4CJM7fxDIDhAHS4DzjxY26He/1aYH8QQNEDnd4RmB5QHT7ffdXm9Yx5DSc0Fofe/of/2lQNCgsRJbrDD5vTGZ7DYirCcC+wDE/d32maTk9IFe29lh2oU2Zgll7+1wYTmB3bN6ulbZ3a2zvUsnWnWpv3qLVlh1rbtqi1fb1a29eqtW2zWls6w/d56T4IwsNrg9ePhzIGPNA5fOHnAUjv8NBXbcEERuQSfBQFpVJ3/flt03oCA2b2HGUalGKBKF5rZOL85H0dphodTBgcD3SOrCbkAfIHnL8e6PSOwPSA6vD57qs27SbhWOLXVdmi0MfvaTerGS7T+3iwRfE7ZmzRSvM+wJGAjKhIoNWofO53rx55ms4HXBAeyhe7l+fQND0PdA5f+EUJNCebvPN+0Q2OBI981RpbtEoDYkaamQ1/gO99+V7zDSDgZ0/ezzGgFAchcNB0hsSM09QRZ1jANk/T8cDF07SO8BjwQMcDnSMBCEejzEjATxzRiDaNMwHxO8NWs2rzgO52Tof5HPZDwOUtvM1ogyFVWWhxj+qR+4og9ppnXjvCQsnTIA5Ng+grfPNAxwOdowEQR6IOX7W5SYMhYAcHu4eG53f4HxWTLqp+cySwgHe+Slur4yaEsMn1lMkDHQ9oPA3oPRoDHuh4oBMlkJ1s7RPnSMBP8INoBAQYOGXhlwx82Kq6J0e0mKFZTWITN2LnsHcFK0q/e809Yue3njI5hniaznskePrKV7lH53uvdXmg44FOHwYdtBui2rDcBgy59frXzDdgn21x9l+nw2rSYjZQ8lcZQmGPe/ahnTYJdHCaTq0yAzP08oubtWVzmzo6W9XSGt4auaV5W8Q1uiPsMt22Mewu3docdpdu3qVWXjpPmB0hHhBa52APANRtD32wZw90D3vs/h/QaY28K5vtXWlr32LrtNpat6utWbrj5ueUk16uUPJ8Bf2E3u8hLH5fFWQe3X0HhH3Vpt0Qe40oBAQX+NdjsrPb9bXbOR1cpom9hlmNTEwI4b0GUvXk8rZX06lWUZB1OtN1+03PalurtHlTu8UJa27eYgDES+MOW6vT2m6A07J1j1q27lZLa4da2pq945B4gNAnFI47HAhEgIa1UV1HWIjt/R/9jH15msNp38251eu3wxu7reGPLvvwioQ0sv6ir/hA22jvUHNzq7a1Se0t0s9+eIdSEyerMFTngY4HUL0LUL5qW9vJfjpoPGxp8MpTMo2HxaE94UcMEaYJheMmgnAouO33r++7l04PX1Ps9peWMEOhlJn6+Q/v1No3d0u7pQ3rwwKtuWW92rZtCB/tEcHGC4aG06z9QKc1AjzeOSzIDoYPUUIrWnBFazdHDXQOhl5Ayku3t3+j+s+0fYA/AjgspG4jFl+LsBjYB9pm6ctf/I38sRNUktOowJBZnqbjAU/vAY8PDzUWhVZE5nHqdc3FfzHlBaWlp4g2MXEx4w10sMeh5eA2PbHsVJVkNh3EOp1qpcbNVH5wvj51/tfVukXq3CZt3NCsXbt2aMvWtV2gE/5yA3jQdDojwLMroulgkmsPCyTvfPB8MF527P1a7gKe/TUfpwH1dD5Yc9r+6QCQyOH128H3GzyDX/toqBFN04FOlym6Vdvad2rdmnbt3i6dtvISZfhnKi9jgYJ+z7TmrC7e+Sisy9kf4H3V5nyGeQ1/ANZ4cgBE77g4dGLZKbaJGzY5ooTiSED8NUCnJ/XIdXCIneNSa5STPk/l007Vzg7pv6u3auOGVosPtrU5rOVY6Bv7ctscmTeIAE9LZxh0Ii+gexG9c1ggHQwfbE4MsHk3hwOrfYReT4D0zvcPhkYvTQ/92eZ42x3gYF7bYgFeO7bt0dq3OtWyWRpV1qC8jColx85Wac7y3vvK3V8Aef/7X19ENnFjnY7zekZ5CU/VVPQcBsd3whQLg4MjAYmxy5ERm1xPK0qjQScvrVYp8dOVnTZbD96zWs2bpU0btqmjY7s2bV67N94aMdf2AR5euB5eRu9+15fwgQR2WHOMBh1i2h3oID1pcOJwWtGhnQ9En/f8HcY4oNM1h7N3HidsFQh/oG3d0mbzObs6pT/8798ViJuoUDJm7Urlpjb1P0Hngdv7p88jAT8Jo8bC0OSBM3RSw1XmPp0yaLaIStCtI8GYwhUKDJhmK0mjA36i5fS0H4IDHQrMClQpEDvdPNjOOPlS7dkhNW/Zreat7dqyZVN4QtQF+ozYqcN268gcT2uUicaZarzzXrPVAXix13sqAhpdGo8Dnh2RwKqc3T13dqDjvrjf/bnLvHYAOr103Y3zqDmciONAF+AARq3tamvdaV5rmK4bay5Qhm+2UuLKVZazSoGB85WdVPf+EUIeIPSvvsDSFXGZxqyG5zO7TrPHTurg8h7Xecag6RCRAM81nAgSj5usfz6K21vFXpTqwZEA8MlJXqC0+HLlpVcqP7NcN/7+79q0dpfamneotblNba1sSMaE6Pqog/8R92l7uSLeVtET4N71wbk7O0eBLn6FNci95jYHMNHniGZkWo7H+4N3K3+PedUW/W6E34m21s3hd6alVW3Nnepskza8vVvf/8ZflJcxR3lp1cpMrFZpaKXS4xZ4oOMBXe8BXUTTYZ0OQIO17JkHd5imYwpJUtVeDInqp5iwj3WjmdTCE0A1uuOGNyxuzoHW6QA6BemNSoufo9zUKiUOHK3pE5Zq9UvN2tYie2naWvhaa5a9TAY+0S8XJoX9XHe9/++CJ/Av+tirPXYPOk6zQSsCnNxcgtcHvTMON0c+yDaG3w8+0HhXeGeaO9XGsoJN0ssvNKs0d76ykssVZPV3apN8J85XceZKD3SihFmXBca7d3SAyFcd2SV0jiktgM6Dt24yLMEnoMu0tp/SElOSSay1cIRpvNdQkR6+fYuBTk8ub9Gdmzx4rgozFmp4/lIlDhinwccW6/RVn9OzT7xhprbWrdvVgqlt02Zt3rRBrS2btb2zTdvam7VpI3vrIPCiBad3/e74EV7Psb/J0oHO5k0t2ta+Q2ve3qBt2zq7wGbHzm3G++aWjWrftlWbt6zTHm0X/9etf1Od21u1rcOZf7w+eXd9cvD86tjWos4OPNRa1drcotbmbdrWuksdrdK2Zulfz25WVfkZGnRMmXJSKhVMqlIwaYFKgqsUSmr0QMcDmKMDMN3x2Re2kOEyjSPa0KxF+solNxuGADo9+QTEMJ/DWh2cB9ziUEIa4Gd9oDA4gE9aXIVyU+qUnjDXTGxD8xYoLXG8qud8SPff9bw2r99pJoJd26WOtp0GQJjdtrV1aFtbu71s7e2t8o5D4MG2ZgMMQKOda+Njh9rbtqu9bYfa23YJzyftkdpaOw10mGdbu+5N4Vm4Zet6bW3eaPnWr1+rjRvXa/PmjWJh77ZtbVq3bo3XL0d4bK59e522tW1XZ/suNW/uMAsBrtHr39qlRx9YrdlTVik3vVzFWQs0snC5UobMU/KgCuWnLlVaLG6y3pxO9Eewd30UXafRYHyVwlqGxzM+AWx1AH4QkQC36S5tJwq0LPaa264al2kyDYoZY04EeLB1lym6Y4eGVih5QLmhGpENcgJzlXjCKOWlTVf5pBW69Y+P6fnH39TWtbu1bYvUvGG7tqzrUPuWneps36PWrSwW3eYdh8iDlpY22cECwubwbq22kNAiPuzR22+2aNOGTm3d0mHOHZ2dnQYshCtCy9yypVkd23Zqe+dubdrYbFrR7l3Sju17hOeULUy0sin//x5e3x3G2N3aoV3bpLbNu7ThzTY1r9sjdUprV+/SV6/4tUYX1yr2mKFKGTxFBam1NjGbNniecv0NKkpbohTPkaD3vvKjhGi0POxX15GtDbKS5ttyG/Zki40Zr7JQk5IHzto3wEAUv2IGxYwWJjY0HTwRQCyAB0eCntSjaMYWpi5SRmyFClKIKlqlpOMnqyyrTmOKFmlATJHijy9VzezT9aNv3qgXn1ynto3S7nZpR4vUvklq2+odh8MD2w6cLcGbpdatkWOLbKGuLdZtk4Un2tFBBIid2rNbWrdug3bv3qlNm7aorWW31r7dph2d0sb1nWprweV9uy1G5NrKpewejsOh3csrbW+WtEPSNumZR97SFRf9SNPHLlPIN03pcVM1Im+RLd5m+18+7vICjSpKW6wcX71CCUfxqzZKaES//951P+6DCOhkxJWbExoYsrz2cnMk2McfYP85HRaHEn8NexyrSgEOAn6ygduBFocy4OJipqgsuFR5gXpzk0O1Cq/3maZRBUuUEyhXQfpcFWXOU0loniYMa1TD/HN09smX6lPnfVkXnH21dxwqD865Uuefe0X4OOdqnX/ONTr/7C/r/LO/ovPPvs6Oj551rc760OX6yNlX6JRVH9f5H/l/OuP083TXnffpjtse0N9ufVJnn36ZPvHRa3XOGZfrwo9/TR8+7RKdsuKz+vTHrtP5Vi5ld394/Xd44/fMlZ9X3dyzNK6sXgXps5WZOFUZ8TOUm1xhX4wsW8hPbrAvej7wSjOXKSO2SskD5qo4fZH3pe+BYe+NgcjWBpjXMK2BHXf+6a2uMDj7eEBH9VNMyqAZQj0CKNzi0L/+5hVb2HOgnUMBnWB8lTU6K7FGOb4F9oKEEphgqrAycEzIjJ+jjPhZkZdpjkqzqlSaVam81NkKBWZ6x6HyIHmGgl3HTAWTZysYmKNgYJ6CgQo7Z6XMUzB5pgqz5ygjeayK8iZr9IiZWr+2XXt2Ss88tkkluTXKSp2t1MQpGl7UoNyMuXZdlF1lecnf0+H13+GN32G5tSrMmGdaTfKgybZ8gd0XAZr4mOkGMDm+BjOpYVbj4J3jwLrgaRr9WNOIEuS9Mg581eY0wPY4cTETTNthuQ0WM+Z0uqZm9td0mIdxK0qZFGJl6eP3tIudQw/GkWBE9gqlDChXblK9ilIWKeXEuRqauVwTis5QbMwU5fnrlB9YoJykamXGzlEQ92r/fDsyE2bb4tIsf4284xB4EKhSqOuoUSiwQCF/feRosHNOaoOtYCdUUXLCeCXFDlNibKG2b5O0U9qyVsr0VSg9cZ4SB0xTdkqN/U+Nn2P39pYfXdfea6/fDqHf3HhnAnbwNOUFKlSSUa88Yqkl1ijP16A8X6Oy4utVlrFSOYmNyhhcpbSB85U5pEqFKY0qSV+orIRKD3R6W/D25/ojsdeYnmF9J9rOTf/7H/NkIyJBT/gRwxwOrtKY1jCxMRl0/80bbBKIVaVdaNUDczOGzFdxWpOBTnbCApWkLVHG4EolnzBXI7NPUuqAefYSlaQvspclz1+r/ECNCpJrw94NSXUK+eoUSqr3zofCBx8h7muNd0Hs/En1CvoaFUpqUjCpSVm+BmUkhWPkFYSqlJM2XUlxJQY4m9dLb7wkA5uy7KVh0PLVKTOpTvmpi5Wd3Bgplz6qjaon0l/0mddvhzxu8Txjh8XsxAqF4iuUH6i39wiACcXVqTR9hXKTFip1QKVykxpUmr5UWQnVCsUDPPXK9dV4oNODXOqVL//+RkvEe42tDXAewKr1nS/dHZmqIaxaZL+n/TUd5m0AF+LkgExpQ+aYlwz390EqqwAXuf2OHhn9Dq6crox98rr03jn8whwMH+psQjnLz8p0gKdWmDk5ZxsgNConaWEYiBIqlOmbrZzk6coIjJK2S7s7pTX/kZk3s/3zLLoEE9WZCQi0JsuHYKR8xgJaTbieOiuXNHtddg+GXujy0u3bv5F95P/PuwCf9j88U1KYdx4f3hd8iGg6TJ+MzFumhGMndYVTSzxuqgWN7sKLqPEdgy81a3IGxow17YZtqhOOnbJvhFAHEt2dowp7XzCin9GDFxNzacZ76x+3mySA1GDgkJ3YaHMAWb65BjpB/4iwxxSg8+pOZQemKdsf9l4EdELxgM5ikQ/BB+AwSchhg4h7SQsNmPaCiCcIvPHvjYF+NQYisddY5+k7Yao5EBD0E2cy3wnTbYqmW9DhIcAzofQ0xR8z2QQMWg8aT1eG7sDG3etnQv79Nqh6Bp3adwU6Wb5ZBio2UR1XFwGUegMzD3Q8Yfp+G/cePe+DMRlxmUbTyUsObwR6cuPVZmYDQ/ZxJojCiRhC3QzNWmK7vDlT2303bTR73D6hqR3I7H+OKswbCEd/IDjQ2WsKDWs6aD/m9YQJLLFReBR2p+mwEBFNB9DBkYTygv8HdMJajqfpHP3+9d4pj+fv2zEQ0XTwgCbKNF5rxO1E2xmRu1ysLetSXKJwIobJHjQdty5nZN4K/eOudru3j5/1/mDj/kcV9r5lzgeaRsxo4TkXZwIzzSQadJjfcaCTMlXR5rVuQSe+dj9NxwMdb2x7wt8bA/uNgYimU5xRr6TjiZqxQE/e12EWE5QZc5LpBidiUIEIWTBl+JnCzY3FaLf/4U1bZ7OPeuQy73/+QAv0/Zj8vmxreGI+2gRm4GOgU69cP/My+4FOYFjXnI6BTvIU03TYATDXX6dgt6BTKZ57czp9YUx4NHoAcRTGQGRxKGHUiNuJ3AF08IQGdPAV6FbTcetxBseM15jCkzQkZoL+9ZhM0zko89r+IOT938voo8GLCBA60GF1cLegQ4QJX7ly0HT2AZ0ddi/sSFC1F3T8jcpKDDsohDWoaNAJzxeFAS3ifXU02urVcXTHlsdvj98HGAOEvsFlmgXNmOe/ffVdFlIN7OhpyY15r+E0gIbDnA6H29Jgn9hrB6i8C9G8dEd3oAI6kb3KsakCOpzDJrYoTWd/0NmpSHDJMOjkJM9SXkqlhTMKJSwwDal70AkvSAyvko+siPf6/Oj2ucdvj9/vizFQaU4DZaGF5r3GJqBoORxuyqYLF6KsRGZeQw0CYJjD4QChiDDNPA9gxDVCjIkh52KN1xvPACiAigBvPCOdAy/OpMFsR+U8p2x3nzpdepDRf+KMrjL57+jBNgiNDj2hy+WlXq5dejQ36qBc8pOW/KShDEyGuIVDD7STHxpJx3+uyUs68oQFeo2dScvh2sdzrkkLj0qDi7rS0VZHv6OdcqmHMslHfv6z94SjE145eh39fBRwj/+0j/+UTznM1fCMRb24LuJJEv7qqFZmXLWZypKOm2VtJ1pxaXa58jLG2OLQne1ENN6hgoyZCibhTDBXhakLI4EkwwElaS+aTn4KbtPwaK7xiKCTxADLjAvH6KNt4bRhXvEfGmkndLrD3Xe8oE37pyEtZcE30lk7I30On1w95OU5/11/0Q8cPKM/u/iUiKZWY2XSxzxnPLvyHa2UQ3mUQX4O6HP1uPScXR7XNs7uPufovuaaOt09aKAOxgCH+08+7tN23gfHB9LQ79wnLf+hi8Pl5xlp+M81eSmDa8p0Zbv8pKN90BXdRtpFXg7Hb65dfd09c3mog3SO/9Th6IumAZrei/bBc9pD/bSF/qN+/nM4PkT3EdeORtoHzaTlHJ3OlePuc6btnHnmxgft4n503ZRDOsYYvIBGaOGeGwOOZp5TBge8cmOQtDzjTPnk47nLz/0j3T7qh07qp03IOOgMt5VQZ4yrSvlPnKZxxScZ+PDRizxmLHbxMxp0HBNgCgWyqGds0cmKjZlolcFY93IOy15qDIAI5/EGMQg913l0BgRBmBvcPOc+dSE0uQ8xlEOjqMMxlXyk4xllcj/p+Gl2Zu5pVP5KawyNcmkomzJpJHWTjmcIc55x7eqj07imTp65gzq5Dz3cgw7qiE5LHp7T0TyHH64t1En90OzyuzK5Rz7XHvJE0wRI4q5OXa5cnpOf//CAPLSNcqjH3QN0wmXxgVBjoMOmSta+xBoVpDSpOH2JlVWQVqmkAWUqyZmoDqIb7yQiQbuCvvHK8k9XKGmO1Uee/OQmlQWXG03E5iN2XlrsDAuZBJ22fsdXr5KMcH/SZtd2nnMNr6GNaw7awEEbOGgDB2k4uKYcxyuuKcul4Uw57jn8oS8o0718tJuDZ5TpyqAcR6PjK2ORtK5MyiUdz10+xhDlu7zuPnkOpn2U5Q5HA/U4OnnnXPspm485+pd3hoOxQf28b/ynTvcRx/jjoFzu8Yw0pCUPeV05lEnZ1OHqcwKEtJRBm9xBOg7Hc+67fC6N6y/SuXucKYsyGbtHun3RNMBnxgEHtLp+hCZ47trj2kFeDp5xQDOHawvp4Klrk2snZ9IxNlyfUhfXrizy0MeMMfjAc+6Rl3Qun6PTjQfoiR7TLh/lkp/n5OHsyuA+z49E+6gf+l3bGUfIOuomko3/xKliTmdM4UoNiRmnUxZ+ycxr8AfeubZGz7GZec012DHppv9drakjPmwAROPwaEMoUhkEIPxoeLihMGO+xWwD4czt1s/XFwJ4vp3ZVQ4CcavjOQuJ3D3uu3yEUnD/mZjCG4KvdtQ2yuIZZ9LznDP3iBeHSsc1eYbnLLF6QF/S8HXOmS92/MnZNwg6QGnocM/Jy7Pw13ytXVMm/6Gd/FxTBm2gTrw2WI3LPcrjoF7Kpzzuk4cDOgk1xOIp6OEZ96gDOvhPHYEB0y0//CAd5VAXZ9JTH/dxTWRQ0G/h9lUqP5UIE2FXRaIQE/7ed/xsy5cRP01FwZkKBoZqd0c4pP7GN3cpO3mSBWAlFh5lYVbzn1BuQSfhBxpUUUatHajS9D/PiXjMC0Jb6Q/OtI3+gk7aAj8cH2ifS+ue0UbHIzeOyA9PKBO+cSY95XBmbHGP/6RLjy3v4iVhnXhO/ZQH36DJ8Z209A/t4hnlUx9pKA96sVOH3T6X2j1H86G0j7KhmXrIT/n0Nf1FPa4u6TIPpAAAIABJREFUeODGWNhOHqaddtAeyoBGaOaa/IxBjnDfAxLhtjoekZd78IEyuU8d1EX7XP3cd/1EWnhLXvfuUT7lcI6+pm2kIS15yOt4SJmufOo6Uu2DHvqUg7odv6mbe9DFGOE+dEIjfQE9jD3aDa2ko3+4zxHdDvKRH/5HjwXS0H7HS8dD0pGeeuAbY40xBz08c2PR9Q3l08eMXa6pn7TQTR3Q5sYA/x29lH+k20d9tBn6aR8H9CGDkH184LIRKP0AH4jbyTOUFzCiW9BBiwGV+CJC3QVUcCS49fr/6t6/bjBgIc2cCefr5t++psfv6bD71//khQjiVttA/tsf37T9se+7ab1u+d1qIw5i5078qG69/jX9465We/6Trz+siWWn2oCE+TwjMileDz+67kF7IXmRFsy6UDf+6iXd/NtXrSGUOyBmlDWsfPx5euRvzbat9nMP79KXP/9Xq4+GN879f/rLr1/WY3e32XHVRX/S1BFnGFPqZn9Gd/35bWvfA7ds1Ncuv82YiZBfVnOZ1ffvx6WnH9iuay7+SxeDa2Z8Sn/8xYt66v5Oq/P7195nqmT8MRO1su6Luu33r1v77v3rOl352T/aQKRt00aeKehmqwjaDy10GhsdrVhwhe6+cY1u/8N/9cyDO/Tzbz1qbRscM1anNl1j+dhv/NE7W/TFz9xggxF+1pd/1uh84t5tRueVn/2zpgw/3eZy6mZfqLtufMPaR/y8r112p5nAxhZ+yPI9cd8W3XvLv3XPrU9Ku6TXX16nt/7TrgVzPqw//vIJPfVAu7nLf/uqBzQ6/1TFx0zV+ad9Tzf84jk9fm+Lbr9htb74mT/YQmKAqXbG53THDW+JPoDOz53/q67B9+HlX7M2wG9ope0MUtr++Y/9Rn+/Y6u1H5596qyfWL/zQp614uvGawYvPLv4gl9r+qgPm7Cgj+D1a8/JQqjTRwgXXj6+sBgrLzyyx8bG5Z/+vb20vNjw+nc/ftaePXTbZqOFviEy7jmrvqnf//R5G3/Q+qXP3Wjjk5e5YsrHbN3B4bSP/v/02T81QQPYnHfyt7v6jzH60VO/a+OFsQstpKd9tOUz5/7cAinykn/s9B/onr+stbHEu/bZ835h7UMwcc09xhlpSEsegjBSBmVRJmVTB3UhLKkbHvPuEaiRvkYQIzB5xntMefTtpZ/8nYU5IdQJ19zjGWlISx7yUgZlUSZlH+n28b5Bw8tPysYGY4QxMaH0FF30kf8xGhhLvL+MLcYY/cCYY+zxDjIWGZOMTcYoY5Uxy3iAZ4xlBCp8Y4zTdsYEa1IYI4wVZBpjhzy0nTEFrxljjDXKZOwxPhmLjEnGJv3HWEWe8Yy+YiwzpmkHY5ztAug/xj7tAzh5J3g34PGRbB+0I8sYL/AanlEntCD7+AgdnrPYZDf3kdkAVLSGaMATbV5DNQORUINRFzGrocE48xoq+uiCVRYah8yYnFDTUeH5D+MQpAhEh9KgMoPeITQEw0QGJsgJqNAQOpH75CUtm8e5Ly0IJz0HoMBXwKj85YaidAaDgzo4QFb3NcNXBYMKGhgo1E1+rjmjBbmvBuriOeVBC/RTLjSRzg0Kno8uWGF0kpf2Qh/pOTPAqZcvEGhx15TJFwt5aDdlQid0cB8e8J+BSV6+Qhm8DH7opR20z2lu1EfZvNzQhFBhfgoa+NrwD5isYTloIsyhVYZD3yQusD2PaCsLQHNTJythQJa2rt9mwLNmdYfSE8ZoVGG9iL9mpoPkBiUeO9Pmd2jD0OxGm+8JJkJzk2k6aE/ji8+wa9LQT9BHP0Ib9dFm7tEW7qOCw2/6H9p5Bg9I58YK+eCFGxOUC6/gmdES0RRJB18pk2vKol8QitFjAP7yDGETPSZdPTyD7/Q5+UjHGZ5OGnqa1X047aO9lEl91MGYojwO2khfwg/qc/yi/aSjfdBAXg7ywEvyMTbgCQfX3OMZaVx68lIGZVEmbaUO6qJO6obvPINPnPnv7kE3POWd4P3jHeDgmns8I83++aLLOtLtg6eMD/qWuqCFMeDeH9oCb7gPX+AH9+ADYw+eMBbhB33FeOI5+R2/yMeY5j73GJPwmXrhMfyMHjvQxJgi7f71Qic84xl8gh76j/zQDU95Rp3QwnOuyeNkppMf0HGk20c7aQP0IdvcNTKPdiceB6+nG53QCL2MDxaHhq0gkfncaNDBfIadDpDBVDOx7EOm7TB3AqgASM7OzFwPtmPMbJOGnm55YBAdBiBQIZ0Dc2AgzzgDJhDNwTPSkJZrBzRoMRBLR9D5DAoaDFPJR+eShoa7MhlMdDBn0lAmB4OPDoEJ7oVzg4D/CHnykY5BQL2UQYfznHvQ7l5wynYdTPk8Y8C5F5JBQ5sogzpJT+dQDvl4Bo94Dh3US9sc7dwnPfWRh3JJw2Bzg4oySM8B36gz3L6wizRfHAVp1QIYYmPGGR/4kGBeZljWchsg7OaaNKBUk0bNU+um7Tans/a1TmUkjlEhe+gMmWr5sL+mDa6wzfmMbv88m+/JCcy3XWYxwQVOnKOSjKVms4Zn0A5t0AVI0g5eDtpAezi4hn+0nX7mP7xyfUce2k374DHl0n7uc4+Dewx2+A9vqZPx4PoFPlIXfCYv/+Ev+aiPs+M3dCBw3D1oIh/pqIMxCD08P9T2QSd9x5l66TN4RPuonzpoHzyjbsYi7eGZ+4CBj6ShXbQT+lwZlMM193hGGtKSB75SBmVRJmVTh+sf6iafo4P2Oz7BN57znzPP3PjjOvoZaSmHtO4ZZXKPdEeyfcgK+pn+gr+0z40f6oUmeMQ9+pJ78MmNO2iEZuh1vIWHHDxzvIdn8BkeMBYol/JoH3WTlropy403xhblvtMYhHbyUR/poBU66C/qYowzdqiDOt17QDvIe6TbRz3QBb8Yu7SN8Qwv+PAYU7hCLA4lHbTDD9KhqAyIGdO9eQ0QYTISVMKMhr2eiXu+oNFyUJO4Bnx4BjABUhzch8k0HubAAK4d4zi7znNpuOfuQ7QbLIAWZUA8HQ3DaaC7R0fTgZRDo3hOR1MnedyAIg1pYRJn6iIdA4A8pGWgkg7mccBU8sNI6nR1OwY6hvKMskgPHZTBYIRG/kMLzzigkeek55rBwn+EuBN0bpBSD2XDSzfwoYH0DDjO0Ew50EK7XPtpH3UnHDvRJvvZcRJ1lzz0UTC+RulD0OiWyDdgjLICE5SWWGiAQ6Tpda93Kj99qgrS5ygnMNcGjQMdnBTgiQMzAA1nBWubv8485xgf0EQ/U6fjO+2hzdEvIfccD12/wSvaAA9d+ykDPjg+cs1Y4T5pKZc6EaKUQ77o/iM9PIH38JbnpIc+R4M703+0B37Ca9cWRzdluXuH0j7ogwb4QzmMN8Y3ddE+ntEu7lM+tDghRDrycXCftKShXeQnHwfX3OMZaUjr8rkyKJP7pKEu8pGW/oUORwN84T60UiZn7tGO6IN70WnIwz1XNmXa2DnC7aPt1E0/0mb+I1Pof9c2+pD2caZfGWuMI2ikDbSLtkK/aytn0jLWuHYyBf5RD/koz6Vz96ifsQSvoYnn0Qf5yUvZ0E290OrKgmbGMs+hizFOeuiAZtrBu0B66qANXB+p9kGvq4e6oAl64SHtIwQOMgGakGtoZwAm2DG+5NTuQQdTGhoNYBLt6cI9p+nwZcs1Z+e1QaEINYAIhnMNowAnOggzHAwlPYRyH6JJx31AjsZwn04hPYOF9DzH64P0aFUwnf+UY+afFOcxFy4HplMOjMdEGO6QsCkPOqEPjQ5GAax0drhNYW8Q6IYPvDBhXoQ9d8IvUvhLBTMk9fOcM/zizH3y0R7qcfmpDzp4Tju4T7scHcyd0XmAPoMM+hlUtJfOpHwGMOl6ah98SR080740qIP6wzwMf5Vyj/q63KoTZluctYLMCba1wZ5OCU0nL22K0uMnKTMh7BIPULno0eH+mqfC9Bpbx4Ppjj7Aww0nBeikHfCPAQr/eXGoF74ASvCXdjj+0Q/wi+e0m/ukoxzywQeeUx7l85+xyX/XD+Rz440z+el/0pMOGjkzPh3f4ZfrP878d+MR+vhPOYxH6kd4HG77XL+H37OwBx/tYZzTP9BJvZiruc94hd7o9vKcdiCA4A//ycf7Qzr4Bz9oL+OJccN/127SwTf4QzvDoL2X79DBfcYPdNF+0pMf/kCXe5+5TzrS038uHe2kH0hHevjGuD3S7aN86Hd8hj+0l3bQf/znPYZe+EM/RNNF+3ge9kQLe5e5/iEdz8kXnn5gDIY/simHepEjTk6495T88MWNb/gJnZxdP/GcfqI8+o8z5dH/lAP99LM7kz+6P2kP6Y90+3gfaD/9Cf30L+117w000F7SMC5oF7yBX7SFfrEj2rwW7cp2yNfmQht24bMyvP97txqA2UeMH5XC5GXhadh7ha0IurYj4BlCM+zCynYFWYkIg0pzkcZdWjvC63Sy9wmDw3bItbY4lOCf4f4EOMOHa4ttm+BrOErtxEX7SPLRK9/jrze+3Lv9rs4OVNw5ClxsTHXzP6anB979oxC7qJsOeVd8Zy7IgQ5gk7BE2YmLLdaa3Q+UG/AwiELxTcpKWGTBQYNJk8PmNQc6kSjT9uXkb+iKMg2wGD2ROSeehwdkeD8d9tRxGtG7ovtw2+3lD/eLxwePD31wDHig0wc7rUvAe6DjCZ2+PH492vvl+PVApy8PfA90+uVL2/XR0ZfHrkd7vx27Huj05cHvgU6/fXE94OkD5u++LFuOIO0e6BxB5h5xweCBjgc6fXn8erT3y/HrgU5fHvge6PTLl/aIf8z05XfCo/19/054oNOXB6kHOu/7F8wDCM8M5o2BfceABzoe6NiCUeKyeS7T+74cnrDw+OGNgfd+DHig44GOBzp9eQx4tHvabh8bAx7o9LEO2+fLyzOveQKnL49fj/Z+OX490OnLA98DnX750u7z4dGXx69He78cvx7o9OWB74FOv3xpPdB57+cZPJ4ePZ56oOOBjjen05fHgEe79+HRx8aABzp9rMP2+SLzNB1P4PTl8evR3i/Hrwc6fXnge6DTL1/afT48+vL49Wjvl+PXA52+PPA90OmXL60HOkdv/sHj9XvPaw90PNDx5nT68hjwaPc+PPrYGPBAp4912D5fXp6m4wmcvjx+Pdr75fj1QKcvD3wPdPrlS7vPh0dfHr8e7f1y/Hqg05cHvgc6/fKl9UDnvZ9n8Hh69HjqgY4HOt6cTl8eAx7t3odHHxsDHuj0sQ7b54vM03Q8gdOXx69He78cvx7o9OWB74FOv3xp9/nw6Mvj16O9X45fD3T68sD3QKdfvrQe6By9+QeP1+89rz3Q8UDHm9Ppy2PAo9378OhjY8ADnT7WYft8eXmajidw+vL49Wjvl+PXA52+PPA90OmXL+0+Hx59efx6tPfL8euBTl8e+B7o9MuX1gOd936ewePp0eOpBzoe6HhzOn15DHi0ex8efWwMeKDTxzpsny8yT9PxBE5fHr8e7f1y/Hqg05cHvq9aOf5K5firlJ1Ur+zExeEjqd7u5QTmiyPbV61QwgJlJS6w/xn+8dJOSTuktat3RGk6lcr11ykYX6tcf6NyfPXhlwJwc/X4qu1ejq9BHNlJdf3yxdkH/PvyGPJo98bv4Y4BZII7DqKsGIRGfvJC5QUaFUrArlengpQmu8d/rnP9DcqIrVLKwHkmuIrTl9jz1EHzVZi6SOlDKu0+5XCdNrjCnpOO69LMZVZ2ScZSu+8/odzOpKVs6qB80nLNQd08o07KHRpaYfcz46otLQIxOi35EarQH4yvsfy0ifSkpQzKI417zjVlHMn2UQc8cnQ5XvMfftA+6h+df6rgJ/fhA7yDB463jm5XHuUE46uU66tRTlK1QnE1yhxSq1xfkwpTllqZqYPLFfJVKz1hrgoy6hQKzFZBaKbSAsO1vV3a2SH958VWZaf+f/bOAzyOq9rjIsVdZYu6drXqknvvvciWbMmyLbf0BoHQHiXAo4ReAoROCDwgtEBCgJDenU466b3iOHFX77L8f9/vzI60diRXnFjx+PuuZzVz67l3zn9OuefOVIZvpnJSy5Q8ZL4yk8o1Ou90uwYHz1NO6jKlJy5QbtoyZSeXW5789NUKB5YbbRkf/YLW9At6u/0+vPEttXVJ3cwvdTKn0MZdG9CoMG11N+1YW6wV6Eh/SOSlHHR1f0Nr1iVrgfJu38nHM9YFz8jP+CjLlfaCAxZoZPhk6w/P6R9X2uQ3eflNP6mPvlMnv9381O3Sinv8pqzbb55T7kiPj3ZpE9oOz1pv6xH6cZ9+MVbWKf1njdLH5IEL7Urf6CN9ZWzu/FCf+w6Th9/ci80TO77YOt33gDZpG1ry3C1LPfSN+WIOWGsuLclH4jn3eEYed27dcVIX+aj7SI+PNUC77jj47a4BrtCMPtIPaBW71njOOKCfu37csbNmecYadp+Rn3YYH78ZN4k2mVPeDX6Tn/Kx8+fmc+lHeZdv0g73uUfdlGfec4PLlZlQqryUZRoVWaPkQbOVm1yh/NTlGpm91p71BkZxVOJOHB2GCHSGeyQ6SgfH559pi5IO0CAdYMAQgd8QhoXivmgMgsXJlWf8pj7a4567sKmPOmiDZ7RNH8jjtsFv+uES2S3LffK7+UZHTrU+cI9ntMckcoXYtENfaIdEm0d6fPSNPtAW7dMP+kZiIqEZ44I5k6AfLz/5XTpzj7JcGRu/GVNecpWK01cqP7lSJRmrlRdYoYJUykG7FcpPWaGclEpl+RcqkjpfwfgxysueqtRgkdQl7e6Utr8pjS4q08j8pUpLnKnCrCor4xswWxmJi5WbVqEsf6myk8uUkbRQGUmLlB1cZoCUHXQ+RugT4yIxXhL0ZdyHN76e+YIm7vyxTl26QiPaGJNzmkZln2I0g470iZeZtQIt+Zu5pxy0oz7ozt88c9eee4+x0H9eMOjOfeaLOpg7ylEvbVMWpsE98k4p+YA9Yx0787RSrE3yMQb6z5X6qY883ONv6nDfK+o+0uODVtDOXXuM0U2MD5oyJt/xc22M9I1+Tig4y+gQSwOXpowHGjE2l17cg4bkgQ6MjbLkoy7qpG7oSFu06bbtzg156Btl+A1PgV60Qd3uGuQ393hGHvJShrL8duc7to0jNT7WAONkTdAefWKMsTTgnksv+kj/ycvao7/ue8V9t+9cKcc9aAp9+Ztx84w2Wfu8A/zN/LKWoCnzHbu2WIvQnvK0x1qkPuri79g1S79Zy9yL+Cs0IlytsK9UKYPnqDhzhQFQVuIiBQfO6ZF+otoRVzsQF05aal/MXGFSJB7yFc09UsYwvgjmKXDiXGXGL9kjX9JxsxRKLLf8KYMW2NWtvDCNLxxe6OXiGXXxm7q5pg1ZZPUlD5xvV+6h3oGZ8jt1MF9UEJqX3LlPXxLfN1OUIR/106f0oaV25Td5TAoIVBphKE9djIF8EIv63onx0T/GSR/or9s29116ujTiOeOKj5tufS1IdSaWfibEzbCy7rgZo9HLv1ThxCUqSlul4Anz7cXynzDPnkUCS1WQuVxZgbnKy5yv/NBMZaaOVvzQdAOdlibp9ZfqFUwYLv/QUcoMzFRRuELh4BKTjgqzVpiUExg8S3npFSbtZPkXqyirWv6B85SRWGZrAZq7c8RYGCN99p8wx+bkcMZHvcxlccZqowm/mX/mz51D6Egib0nmGkE3frPmmHfmHHqNyTnF+sa9keH1ctcuZVmr1O22RVnqp+/Ds9Z2l6N+7g+Nm2p1M7e8M/YChtbZmGmPOslHP8hDPcwhc0YbtEc+nlOePrl5qMudX+h4JMfHeppUdJbRgvaL0qutbcZP/+gX65Kx0E/6zji40nf6ypW8lKHf9Nm97+bhHs/cet37XGPrpA3aok3aJlEGGjJn5OXe+PzT7R5/k+g7bZL47d6nHHkpwz3q4B51uvUfyfExFuYP+vA+jAitsz4yTvpBH1hL5GNt8TdjYM0xDu7TV9a/OybWDnVCO9YwY3LLscahszt3lCUf7wT189tdT8wJ65H8tMm80iZ10T/yUo/7bpDXzUPfQ0mOaj8vZanyUytUkFapnCCqtiXKCS4ziadXSQeCU5gO5KU4TDptqDMh3GegiEsQa3iIr/UqhX1lzsCG8aVRqbF5J1vHUOdQDw1SjisDLcrgC3+ZYILk52qLK7jM6qU+Bk1+CICINir7JCvHQMlPvQyAvwvTkVJWqSDNASfKcZ/+0z/y0x75eanoP5Pm5hsRdhgTaPxOj4/Jon/Qk/5DjywkiuBymwdeep6743PpxuIZl3+K5WMeWBiML23wfIUSFmtM5GSlD1lkdIO2jDFl8Gxl+OYqlDxbiYOGqyhnhpL9BSoqGK9dnVJnh1SzrV1Txi9RStJIFYTn6sS4IuWmL9Ho/HUKDpltoBUOlio3rdwkplBgsQoyqxRJrlBhJl9T67rXD/1mfNB5dORkFWfytXl442OciPDGLIYuMOBg/mk3MGC2zTfri/XDPLvrhxeGfrD+XDpDLwASepOfv6E3V2jmrkPqp99c3XnhynvBOizJ4uuvShMKT7PyvCf0k3Zi1x/zZB9B8YusHp6zrrnSfka8w3ToP/1w3z9UE7TnP3HWOzI++gNzoT+sN+hIf0bnrLe/oQvMhvXH+NxxkY/3i3LG2IfM19jcU42ezFfK4HnGMBkP9cInoNP4glNtnpIHzTW6M78wWcY/Lu80JbxvWvf77vIBmCbzyfyaSocP02Q0BattPo2hJjkfP8wP7zXrDzpSrzsPzCvjZXyjIuuO+PgMPLC9+iusH4zD1N6J5cbfJhScYffpP3Tm/WFcrF+u8AHGzZX3i/UH3aFnLF9lvNCF++Q3kMlCcq609xM6ME+sc+px1xtASDvMF/eZX3c90k8DJD5eU1cavbiS350HACYrcaGGh1YpMGCmijKqlBG/wOqw9dSLrSeODiIOkYErL2TYt9gmbGLRqdbhUFKpw1h8i21B8ZwO5iYvNQJiyObFy0xwXrzC9OVWT+qQuUo6foaCA2dZOVCQ9sifPmyh0ofNNzGMTrJQEM9YqL4Tpttzt18gKIROGzrPnrtIyt8sKFDWJiqptLte+kM7TCQ6R/JRH+1ypf9uv4/k+Jg4xkn7jIP2EUVZIPQfetMfXkzGzYRzn/Eyibw40JP5YRwwWuhFvbw8ecEKhRLK7Jo8YI6NizYK0iqUk7xI3/jiZdr5lvTi03XauWWXXntlm55/7hXxr6GhSR2tu/XYwy/otRdr1N4oPf94s2relD5y9k/MxpM0YKrCyQuVnjTHgAcpKDB4ttmKkocstPljnmPHx9+Mj3Ef3vgqu+eN+WIdMc/Qi7GzDmiHBQ89+HtI3CSNzHY+MFgXsfOdeNxUK8fz+Lip3euNcqw71jN0p9+sn6Tjp9n9hPdNsXlwXqzZNj/GIKLrj/eBcvTLXaeUZ57oL/UCbCVZK41ezDMMd0zuOnvOe8M4uO+uZ+ad+o70+KDroLhxtv6gF3TgPQVgHSYCsy+39cd7wn3GQ7+4T3mufOQwXuabcTBe8jF+6Os7YaaNh3WMSsblA1wZP/lY9y7duA8DY72znvib+edDgHkYHDfR6uXDkXbd/rj10Y7LdwbHjTeg5D0iP/1lvbwT46MfLt2gL/Th6vIl+5DxLe6mN4AOHwR44RPwT8qzjikHnbgPfbmyTuLjJtv8wV8Sj5tufI12WMesR8YLfxkSN8HKQU/7cEiAF8LrV1g++Avz5K5H5m9M7klmq+E+9IRPM5/uuh6Xv95AhzK0g22H94F6SfTTUoyDQRwThmcSA8oJgpYLlTpkjjUUGDDDJj43mcVVJq50jHw0kJ+6LCpSlRljLEx3GDvIBwFKslYYA6AcCyiU5AAb+ahveGil5UM8ox/pw+ZZPZRjgRRnVtmC4znls/2LbQJgqPxNvRCA9ihP/RBwbB5S2SrLT3/ROULAEeFV9tztB/136zlS46N+2mEBMD7+HpntACzjg7HQHxgSz7lCX7f/9Jv+s/C48pzy1GNMLrlSOf4K5QaWaVR4rS2ejPj5So+fo8LMUs2derrUJvNWa6jBkGN4oy1b3hI6ts72DufeLqm9SZa3o0EqCpeZqg1124jcFQY8gA92ndT4+aZiK46+IMwn/WMe6BfzB92Zt8MbH8yIjwNnfY7OWWPrFDpBT5d+tEe+UZHVth7IR3+Q9Lgy72Pz+JJbYv1j/WD05IWBAbIOWffQl3VNPujPvI3OWWvrjHpojxeKdUh9mQkL7MVnnORn3uhHUQbSOB8K8+1vm49hfCRU2PrjOfThyrqjPtYt/Yd+aUPn2np236cjOb7JJafLfyIfO8776dKR8TCfDvAtEB+g0JN+0i9n/S20cUIvGNKYXNZfhdGT9xH68Tfjd+kJ0AYHzrRxu+Nz6QV9YaQTi06x+XDb5z7MjPeE/vHewzgnFZ9q9Y8vcBgj8wBdkwfN0viCk639ScWnGT9zyzGP0JlxuPUzj0dqfNDBpRf9Zn2x3ugn9GTdwa/gY6w/+uWO013v5GedsL5Yf/QXerKux+Uz9lnGR931RH2sa+qBnlxZx6wz6Eg/4COM312nAD7leQ696Y/7vrj8l/vOh/M8W5/UA0YwPvpFXXxwMAbno8PRJrwNdOjQ8HCVDSDkcxh/XmqZMY78tKhLbhAPhaUqSHcYYOrQWdZh/mbC4983wRovzgJMlig7wOIoV9IJk61cYOA0IyiMEMKNzq02BpuZOM/uj8heYYRMHjzD2s1JXqzE46aI57RLOV5E+gVhqJ+FTH/pf1Gms9ApB2CmDZvtMJBkVHXlyk1xgIn6mTDyQ0BzKcYV+AiOj/ZG5SAaLxF0sxctCbvNJOs/DIXxMF7ox0KA7oyDKwyWcU8ZfrqVh37BQdOtHmNqgWUanrlGwRNnKzupzL7Ox+StVm5KqSLJczS2ZKlu+Ocj2t0u1e1s6wadbdsBnY5o6jLCt4v3AAAgAElEQVSngtZGqatVuvPmlxROmSPfoEkKBRdo6HHjlJ9ZrtSEWUK9NqbgJPkxFNrX50zrNwsautuCDSyy9cD6OKzxJcy1F6UktNxeUJdOrB93XmkPukIX1gvt+U6cYvSGntxnfofGTbB+sp7oJwxraNw4q5/1y7rKSppv+ccXorqbYfPDPE0oWq+k46fuUb99+ETXEeuJfCMjKy0f47aXMQOp3nkfeE9oj/mnvzA++mEMIbfa5pv+DkbqiDgMgXk/0uPznTBNY/PXdL9/9GtQ3GijL+3D8KALDLEwg4+nefb+sF4ZJ/TlCgNkXbImGRfj5H3kOeUMSBPmGuDwvkM/nnNNPH6S0d8+lGBgfsBuofElxs97AH2hD/2AnwBM0JN8vNfMn9se8wEjpt/M+8Tik2w+qJf6mG8AkfqO9PigA/1lnPBDPhjd9Ub79Bv6MQ+sc/gF+SYUnmz5WB+UZ/0wPsbrrjfeB8Y3qeTkbv7I+KAL6478/M36j62f+/AX2ud9gE70a3DcmO71Zh9uvgVWL/yL/Mwj7fF+8L7TBvTjN2AEgDpgx0e+oz7uXdJJmGuTygCYPCr8zHm/1jV/fl4P31Gvu67frLPXXmgDZ7Bf+8zlemhDnZ78V5tu++dGrVt2gb1EMLoLv3SV7rzuLT11f7uuu/xFfeSMn8g/YKot6m/+75WW/+kHOnTz31/Tycu/6iwi/0Lx7F+37NBj9zTrhr++rDNXf9sWHkT54dev17V/eUEvPCrrC89Y3DCCr3/2Ct19wxb9++4mXX/FSzr35IuMwEzeFz7+ez14e233sw+e8gMDL8ZAuXtu3Kr7b915xMcHMLrjo5+M5dQVX1dGlO4/v/BW3Xfzdt170zY9fm+L5k78gC0WXsyLv3ub3WccG67ZpMUzPmoABIj+5Fs36bG7W/Xohlbd+rfNWrXwS46qLalUv/nxPbrzuo269+Y3dPM1T+u3l1wj7ZKlzs4uteBBoA61tO1Ue3ut2lobtKujy553NEuPPbBNt1//qlYsOd/UapHUUv3hlw/oln9u1EN3NuneW2o0fcy55tnGS/ODr12nR+9q1BP3terqy56zNZEyZKaB/WGN755mm79b/vG6qhZ+xugCPS/96X02b8whdJkx5kxbuzCnm/72qm6/+g0993CXrTPWJesImv3td09afTxnHbLWeeFYL1f98RlbE6xt6oRR8bJS541XvmJ1MkbW/JwJ77c1T9krL33C6rz1qv/o2Yd2GQNgblln1PnAbTW2tllrzCmAAjP/0yUP2bxTJ/WznuknjJf3g3VNmSM9PtpjTLy3vO9cGTP0gkkzLt5nrqxPxgzTgjb0kbVLYpyAKeMm8R4/cmeDJX6798lDXrccdVCXMdSk+dZGbJvQkjZheKwD+AA0YZ5YC/ST/tJv1gMpdgzkceeUstRBXdRJ3bFtHYnxsQYYO20BDqwR2oQXsHboBzRhTTEPrAfWJ2uCcUEb1qI7ZsZHnaxZyrKGWcuUoU7WODRhLbH2WUu8Czzn3aBO+DzvDHWyxpgD8rEWKMdapH36wjyxVlmzrF3WMGXg18zj3y592j6ckLyQLn/9w/ui2inHdt8r6FCZu1gYIIlOswiYMPeed3Voc1TRga8Z26ezTBEcNXyOupEvj+zAAmUH54iNoBNHl6upVupodVRrrW2NakeHpiZ1ddWbTq29tU0tjbu15Y0OhVOnqSBUqkz/HJN0QoFFIuFQgE3HEkZNpIiopHhU0SW6jr0+HYVr1pub9xRPRVWKNOeo75bo4Q1NUZUs2rHK3m06iJmIw4jFSDtUgDSDuGUvrctUvOvRR4+3gY7jsYc+1gGeecrPWqB0/zhd8/d71NUuU6/t6sLI06Zdu+vV3r5Tu3e32P3dHdLPfnC5QsnTbV8PzgOo09zUDTjolvcGHW99HH3rAwbvzYs3L0dqHQSWGE4Mi5toNh7sV/fdVGP2IMfRoE+bTqkBDYwKvSN6uYc3NJrBztHbRb9mA97VXuCjig5uRIJliiRVWnI2yEXBh4+I0BINPb5Yp679hIEOrtI4EOxWq3btblRnZ726drWqCxfqVmnWlFXKTp2trMB8B2wIoUMKIOGU9SRAJ+oBePTRxfFc9Prl0QEDvLcOjtw6wGaHvQ6HFLxDX3hE5lCAtx3A06t6jYd4LOD6iDEIF75nH5S56DkeDI5rMc+dCfSuRw8dlprnGt5rDujgv+8mR7QlGkFa0jQVZs/Vqy/Uqg2hZvduNTbVOK5qalPXrg411Xfo0QdeVppvvPIzS20jaF7aSgt1Ew5UOJJNVLox8OkGHW89HD3rAQbrzYc3H+/cOsBxB+cO3LbxXuOK4ALY9KleY9MUex5s93gCIT+q9c8/vmSbqvB5Z7ORoZV3PfrowEYwNtXGgk7SKmW7ybdcKUMXmOtzum+yfvTdP1kkgt1dUksrNp0Ode1q0+6ok8GnPvptpSZOVkFmhYKDMdJWK+xfqbC/6u3gEwWdPb5k3HAX3tV7bzx+cfTxiyPwXvpPnCE83aaOONO0YxMKHXd3+AJ7rvbgD9G9OnGADrtQbUfyMOI/rdTJFd+xHbEGONGM3u8oAB9l9OgBneWKJK1wokwnrlE2KWm1MhKWKR8385R5mjmpWi2NEqDDv85drWpraTUgwt5TmI1ajSgEVQoOwgi9RmE/wEPqAR5Tt0XVbt66ODrXhTcv3rwc8TXgL7d9QrhRI+HgLs2+LmcP3CmmNesVdAgYR/A2AsBhDyDwHPcI+sbVDaLnXZ2AlkcXHXoknRxflSKJqxywSVyn7MSTFElar5zktU5wzpRF5sl243UPWggcA53OdnW277I9Ov+44g4F40ebLQe1WmYiGxfXKexjhzHg40o8FQr5l3Wno4seR+MceX3y1sh7dQ0QKIDoFUQ4cCJo3HntVjPDYM/pMyKBxeSJBo+0eFXRIIXENCLOD9FG952cIHvEFnJSb/mJNdVX6i2/d2/fNO+hjyvpvB101imStFYFKeuVFr9YWUlzlZsxR5/4yNfV0cJG0SZnoyiqtgapctEZKgjNV2biHEWSlysvyAbTdYoY4FQr4nNAh34BOu5X1IH208vXM2d70iL2vXDzRN8l7DPd7xX33OdVNh/OO+Xe86499PFo8c7QwglJxQZUIhoQleCRO5rNN4C4gU6oql7C4GD8ITOZMEISW+fH37jNrsQHcplLX1f2ieT4HS8q57dr1MabCpXPcuX4VirXv6o7oQZyE8wyx0ceLx0KDXIDlbLkX+HQ17daub61TvKvUlHqatusNblkrVKGjdPIvNmq29KlzmYnNA7bdV58okaFGbPtMLdIgNhbZQpbZO8qZw79zBHMcbljPzI7EvMedWTw5u6Q1i+gEUogHItjO8sOrFB2sNLOLHLc0/faF4VDB2pO1J2+tSaBOo4jPR8Bfb2n3n2PRv/1NeAvN3XaQYMOngfs0SEWEN5r6ONuu+pNC6AHCPWmk+vuPNFTfWxQBHRc4KmIeQGrlBdYaaDjggzgkxeotsRvD3QOD2ydTaG4TDsAj4qNBNAbUPiWKZxYqoK0UqUNm6i8tKn606+utQg4bbWSWqTvfPF3yk2ZrazE6coNEml4oc1rHhG+o67YXLtBMRZ0PMDpoctB0gLQQZrM9q81dWZafJnSE0rt/CIO1MvPWBazIZe9UYDOyqjK0wOdbj50lNlZj5l++Z1YcgetXgN03EIE8cPX+rUnZUYg817rJUroHkR1n0c9I/ZWsXEwEEclc7AYid/O15lzymI4iauXDo0GTgh01KJu+W7PNfNgc05IJHI1ITNyUzkFdLoqF51loNNRLzVtk0bnLVVOylxl+WZaHCjiP6GTZS45AMo56tqdL+esHFSvTuJvb/4OlQZ4COalrlZBxhoVZFQrj+jZyeXRwKrzohEgoptxA8tjJB3HycOxl3hf8XvwJA+E9quh+q/Q61AdCYhAAJPBtx1pB1Xbrf/YZKHEu6Uc19Wu18nsMZK9XY9YYS7YeMRxjgP2I853oF7c6TA0vb2Mp489OJpUOHszXH0/Eg4SZDQBGtCb2E/56QsVCkxVRtJ4PXz3a1K7dOcNL8g/cIw9y0t1gqcSZBB9LKDSuxG458Csg+urN7d70yv+uOkWPDUtnjBDZXZoXiRlqW3GzeScq6hrurNXygUd7GtO8kDHA9z/CoD0ytv3Q1t/uQ7JZdqi9SYTzdg5jwRpp3rxl+xLty8/655BcuhbrFHT0ft3vwj+cot6TNhzVHgwMyQrAgkScgfbgW1m28NY6jokeNc9jci90KN7IyA7jqOq0Oix0a40yb4r5pZItvlppQoHZigjaYIuOP9nplo799SvKQfVmkk5BHJcaPODrY8zSuzDI7ogu/tj7bIJMZq8+dvL4N/LXPVGI473iKy0fVQcnEd8O44GJ/oDEbxzU1dE90ct77l2u64793rexf0wiENhKl6Zd0Zi6K90jp4RddCbQznTAWkHCYeExMPhVUQmOCDQ8a1UxId3k+PhFAs4FvI6famKMpeqJFSh4qxyFWQsUm7qfFPnRJLnKuSbZ4FFASQCjHrXg6ND2D9XTuo56gFfeTdh5CPic0H6YoX8M5Xpm6z8jJmaOWGlHrjjVWUnT9Xo/DIFBo1VbuocpcVPtfoK0stMOkItZ3PC/MQm5io2efN3COt3ntJ905UZmG6nu7KXKjsFiWeJsnxl5uruOA7gPBBNqNgsQoQTJcIDHQ9s37U1EGPTOcgwOM6ZNRxyRNgbgGf6qLOjYXCi7m59qtc4+wJPGncToRs2hQjESyxCcaZ/llISpigwdLyS48crMzhZ2WnTLYVSplqcL2J9ZafOjf72rgdDj0j6DEXSp8uuabN6aGgMbJ4yfDMtDE5a0hSlJo1XatI4ZaWMV0ZwtM4+9bMKxJeoJHee0nxjlR+aafOTkzFTheEFCqfMiqY5dr4OZ+y4yZkv5sqbr4OZrz3pNVPh1MnRNM1obcCTvNgkHTzZusGmW8KJhiSKxsGLlUTfNebTX7/UvX4fniTn50TS1TrogJ+ch8IXLGAD6KBW+cFXbxTHnHarbPYBOiHfKoWSVourvSB8hRH+PnmhMSgiFgMyIwtKtWzROfrfT/1Qv/vVdbrpmkd05y1Pa8NNXjp0GjypDTc/Fk2Pa8NNT/bQ88ZnteHG53XT1U/q4Xvf0M3XPqb77nhB997xrG678VHdddvj2nDzo7r79qd007UPWh333vGMbrvx39pw8+O6+7ZnddsNj+v2G59y6rT6qHOv5M1fD80PmhZP6r47n9Vf/nCTPvfJ72vJvLNUlF2q9KRZFvuOyA/dtpxuCcc5UsJxqebU0fLDYxwe4/Xod6hrwF8ePUL8II82QL3GqX+4S7uSzoarN5tqbf/ea5UWKoV9BtmBVcoOVikweLZGF1Qr/sTRBjaR9Kn60md/rBef2WHHITfslGq37bbzXZrxnqrz0qHTYLea6ndZsE671u1+Gz2b6yTSnm3sVnND175TveTMD3W+vV7O59mzTu/vQ6FHe7O0Y0urGmtkp7s+9ch2ffbjP9Oo/OUKDpkp/6CZwrEAh4Lc1OVm70nD8SfFifztgY6nXnvXJFx/uR1bzim5RCYAQ/7w84ejAgx7eBBketkc6hjzl0Qjgzpn0T9+T7vMzbaPQj2D5KRDZ+9AZmKF6aFLsldo6HGjlJs5Sx886wI98/hm7dyyS7vapLYmqX5npzExdsE31nWqqb5NTQ2tXjpkGjSrqaFRTQ1co3SEptHU3NApkvu3S+/mxja9PbWouXHP5NTbW91OZGq3vu623T5415756IsW9W1qrGu1E1vrdnTo5ee3qnabtLtNuv2G53Xq6i9qZF6lBsaN0kiOeE9coNT4+eblhrOBORx4ko4nqRyqpHK45fzltq/T9QnIT63QtJFnGfjgE9BnlGkOcePccwzPFAKxrrnsJQ0PrVZG/KJekSoWdACbcHCF0hJLlRkoVWZwtnKz5urLX7hYT/z7DQsm2dos7dzRpG1ba1Vf16LWll1qamxXbU2jGhtJ9V46VBo01arRTd11uHRtVFNTU5TGPfdcejc1NexJd7eepho7+oDjD3pStB1rg7qavbnrpvehrt9G1eyotw+CzjZZDDzONELCrNkqbdskfebjP7KD+LD1hIILTPIhIGtGIo4hnmqthxd5Es87Touoeo3jDdCSgR0D4saYiYYjcwrTud+LpIOHWVbiQgOckqyVhlwnVXz9gFymLQZXsEoZviWKpC1VKGW+UpIm6ptf+a02b2q3g8EAmx3b61Rf36iOjl3q7OxSU1OL6uoa1NDQEGWYsczN+93D6A+EFjGg0w0aPfeamut6QGmP5zV6+zO3vR1qbN62Z2raEQWgWPCp9+ZvD2B26Xeg11q1t3aosb5F9bVNJhkh+dTtbDG1KQD01L836WtfvEQpCROUm75QgcEzVJRVrfQEPNzYaO0xW48G79Ia8JerOHOFbYcBcEiV8z5rAISU05dPQJxzOqiDUmwOJQwOx45SwGKv9YJU7iQDOrh2JsfP1Yi8lQomTNT7z7hAr75Yr10djnTT3t6plpYmtbY2q6OzRXxd79ixTXV1deroaOuDIfYwze6v+D0Ypve8my7dkkcfX9v7o5tJmlFp0/LCMPcDOrFt7q9+73nfa7yxXq3NbWppalXtzjpt27JVDfW12t3VqV0dnaqrqTdNwdOP/0drV3xUyfETlZE0XwUZRAGvUFZSVXTz7rvEdDzAO7ZB319uB4CiKQsllRoA/e3SZ2zbTXHmqj41ZXEcUc1xroANkUIBHg7iYUd6X4agHtApV3ZymVISZqkoUq5w2nQ999QOoSrY/GadGupb1NbWorr67dq2/U3t2LnZfjc0wthq1dwcZXaHrabog+G+5+ttVGNDa08ylRdqrz5UXwAANHGvRh83P9dYyQXJJjZFv96tjKdec1WUh3dtNGABXOxco90dam2p1/btb6i+frMdstfR1q6a7U3CsxAvUDaREgU8O+B4jHbvi/MA4NgGgHdj/i2yDGDjRJnmBOr7b6kz8MEJra99nnF5KewqdxwJUofMtQIXfvEaMwLtX9IpV25audL9M5WdPkefP//nUqfU3IiU02DHInd0Nqu9s06t7TWWOnc1qWt3m5pb6rR9x+Ze7A0wNC8dEA0McFDP7HJSQ4caG9qiqTUKPi4AuQAfe22Oyd+2J1iZhBKrJooCFoBm7dKO24Y3Xwc0X29b183mzNHW0qnGhjrt2P6WmgiGp0Z1dG7XW5tfUE3NW2qsbxJebh8791sqyFpqR1Vw1hFbFbwo056U5woB7/jVX24R7EdmV5t2DJ+AW/7+hu3d2acjAeFp8GBjkw8iEpLOo3e2WAiUvrwP3MHhPZPpW6B0/wxFMmaqboe05c0W1da02MmUgMrWbZvU0LhdTc07tbPmLZN2sFm0tNarvmFnFGBiv7a9346kcgB0OGDQcYAHG5rZ0bqZ316gsweI7C0RIU0CLr2BzgH0dQ8pzMvvznFdTaMAHdRprS0Nqqt7S/UNm9TatkVdu+vU2LjV1G21O1r1yL9eV37WIvkGzlQkWG174zxJxwMdlx+/49doRALwA3tOyuA5uvbPL5vgkjxornlA9+pIQLDPMblrFRgw0wAHLwQO4tkDcPrwknHcNhcZ6Kyu+pia8LrZ0WpOA11dndqydVOPPtvUMlE1WKyev1td430tH/zXcpR5G/i4UkcsQz8Qmsbm53dsGRdoYu/xO7bM3s+8v/ek4b7o4QK4O3fkBeyjdrWmHWpprdXOndtVs7NRrU1S6bwzFRg2VQVZK5Q6rNRTKb0baiWvTWfd+Z2YnWjLcEIjxiaOBQQYSDp+Rt+HuLn7dJByEI9wneY8HVzeulFqH6CDei0nc54+f/5PzZbTUN9ubrpdu9tV37AjxiXXZVTuixXrBbWvF9N7duBMzKNVv6OV+8HQLWXynvTY1traG9XQUKdWk4akc8++QJnJM1SUXaUsAoN6DNCjwbu1BvwcVT1PqNdcwME3gKOrfSfM7Bt02NjjP3G67SxFyiF9YP0PLeAnZ6p0A08vA0PS4bAp7DmX/W6D2ltkqrX6+no1t5BQD7hGfg90+h1D3EPq8QDtiMzffkCnoXGn6upq1NG+W20t0g++e5myUmYqL3OZBzq98CQPhN9BdaO/3LRjnFSA0IJ6bcmMT1jAaLzX+nSZxnstOHCmiUcWAj+tUoPjxhtKEX9tf6CTnbxY6YFpuu/OV9VQK9XsBFwaVVO7TU3NrvEZ4PFA54gwLQ8Y9lIJ9jNw3A/o1NRuVU3NDpN0and26u9X3Ks0/1SFkksVCu7/OHmPCb+DTPhYA0HbTsO5aAs1Nm+9AdCNf33dwGdc/inCrtMbfsQ5wT45ztiJvcbRBqjZRmav7dPlzV3ISDqo13gJnn+qXrU7utTU2KnOzk7hFu1JOv2MAXoA9s4D2H5AB0kHbQHqtR3b2nTPHS/aR1526hLlpnOAosdUPRq8S2vAXx49/HOpSTecVPDkfZ2GH91ncfWyzzOOo6pRseF94EYk+NVF95jnQXDgnF6Ryp1kF3SQdF5+rlk123eptWW3gU5La4MHOh4Tf+eZeH+j+X5AB22B7WdraNXO7e26/55X7SMvklbmgY4HuO/uR4e/3AAGwQWbDs5oeD6jaiOEWl/mmThcpmMDtmUmLBQBPymQOoTTRN8eOycWdOy0w5SZeuHphm5JB7dcbDrmhePZdDzG29+A4J3s735AB0mHKB7EKayr2aW7N7yglKTJQtLxHAnepS98D+wcsPMTSXqRUofM0ZThZ5gDAREJHAGmum/1GmFwcHlzDUEjwtW67i+vmE2nL6SKBZ0s/0LlZs030GkmcnRDh3bs2KHWtkZP0nknmZfXVv8E9/2ADh6gSDqADoFz77j1WZN0ctLLlZ606N390vWY77FNf3+5UKkRXMB/4gz7jZkG3wAiEuyx7SZmrVgYHKQdQIfMVILLNFGm93CbjikUCzrhYKm5cL70bJNJOs1NuyyumhNMssbzXvPAoH+CwTs1b/sBHdRruEw3NXIEiHTLDU8okjHXHAk8SceTdFxe/K5cOWTQt1icVADY4A8AjiD9cDROn2FwABw82IhEQAEKrir9oiFVX4XcAWLT8UDHcxbwvPIOYw14oHNsSwu9fMy7/PWov0bVaxxtEB832YBncsnpZufBNNOnpIM+jkKxx1UPihu3Z4TpfWwO9UDnMBjOO/U17bVz9EpbHuh4oNNfgScq6RBlGrDBloOWDOGFfTp9acri0Mdh02EnKZmx6YzLP8kcCfYb8NOTdI5eZuYBTf+YGw90PNDpx6BDMIGwD2+1BRZY4LG72wx8CPiJXac3RzSLMs3mHgAHpEK9dvGFd1iBvnaUumKfp17zpBxPtXaYa8ADHQ90+jHogBm4THNCAcDz0O2NpjXbp3oNlAJ0OAuBYG2+E6bbBh829xyI95qnXjtMpuNJJP1DIjlS8+SBjgc6/Rh0cDxjrycRCQiDc81lLxkA4UjQ13lsFpEAt2mCtBEdlE0+iEhIOZ6k4wGKJ8kc4TXggY4HOv0YdPAF4BA3QqehasM0g7YsZfA89RVGzdRrgA7u0lSAF9vNf9toYXDYVdqbTq5bvYYhKbhEmcFZeum5OtXu7FBzc6sFKGRDmxPsk5fWicfWE/zTDQLaV+j8I/yiH6mvVq/eY1tqOZT5P9ZBZw8nJcL67J2ibtFv26Tu5tvbbdq9H3uNyePWY8GNoxvfu++VOwC49997gEIv9br5+8xHmb3G0WuZmH66+Y/ma9R7DZ8ABBakHoAHU01h+go7k603/IhLHjRLoyKroyJRlbm9FWVwrerT5c0lYMi/TCF/uTKDs/XyCzWqq22yw9nq6requdk9ICx6mqWFa3ePP67tOX2yG5Dem0DT2tqq2tpaS5xst3XrVnV0dIhI3DU1NXZ/9+7ddp/nW7ZssQPwtm3bZs9aWlosH/TkH9EeqA8JhLo9SaSfr5tjGXSM8RLZHmZfqXBSpcKJK5WdtCqaVihtyCJjYDAzPojhSymDFiltyBLlBlcqfegSFaVXK+JfrtTBpcoJrFBe8iplxVcqO2lF9GTVKNOPthcJlioSXCiu1OukxdErQMQ9/l5s7cPvAifOVULcDI3NOUu+4xYpx79KxRlrlZm0ROmJC5SeuFDZwWXKSalSekK5UoeWKSd5lcL+KoV8yxXyVxqvxA4eCiy2xAe7M/Z+BjYuEEbpiaSTdPw0m5+ZY94votpgnmGuegUd3KXZ3IM9B19rXN84IwGk2m/sNQOdZcoMztXLL+ywTaEtbTtV17DZJB7nGOXdFqWgkYPbmrepsXmLEx7HjlUGkN7bjBNA2bhxo4EwIMG/1157Tbt27bLIDfz98ssvG3i8+eabAmQApu3bt1ve9vZ2ywfQuIBDHkBo50735NV+zngPRUJ4r5Q5xkEHVYwxeSQcQIIjuKMp4lup3OAKY15oYlD9W0yv4AqVZK5Xfkq1gQLMzRicMcNKRfxVCidWRetzgcf1pFpsYOOADmeGuWDjgIzDJJcoO4CWZ7HxwdGRkzUqsk6Z8Us0OvsMZcVT9yplxi9VXlqVIinONS1+sRJPmKv8tDUqCZ2swXGzFPavNOAJBwAe5yM9HCyTm/o76DAv0Gn6qLPteJw7rtliDmnYdPBg6xV0OEuHg9yILM3k4zo9sehU08lRsLdCe0o6HujsS9oAOQAd/gESr7zyikk6XF944QW98cYb9oz/ABXyIAkBNjzjHol/SD//+c9/TEoCtNra2jxJp7+Djwc6jv3YvzwKEqsU8a2JppUKJ1XYV7MLTubgFFyugtRqAwE+mPPTyruDFrM3JCuhTDmBKoUSK3qXdAJLHODhEDz7Wu9FzRaVdBLeN82kHEDNd/xsk8YKUtaqMHWdgVteWrXij5upzKRlKsxcq8DARRwVRikAACAASURBVAYy4wrOVm5KdRR0osADGMamwPIetZsrPfSnazTKNLE7AZ/E46bq6fu7LCIB84Tg0k3fmHHFDQ+ttH06eK3hTADw/PmXj5lqra8dpR7oHLhkAVAAIAAJ4HPeeecpEAgoLi5Ow4YNs+vkyZN1+eWXm7oMaWjTpk1WZsCAAfZ83bp1e4ATAISkQ759AZ737MDn6V2j1bEMOr5lPQ5LZssBeBzVWsRXLSSdUOIyBQfMswj47CFEdeN6RWFDCPvnKjBovFKGTlZ2YIHyUstsr6GF8Rrac5x3xF8hS1EHKUDMHKVimKHL1xxG6ajdXBXRmNyTbMN8VsJSFaatNjBDohoZPkUpQ5YoPX6pSThcfQPmK3XYElOz9YAMwEOqVti3xkn+6qgNq/+q15gPTg4FP5iPB26tNxMNEmlf+BFXlLHcooS6E4o9594bd5rnAR4IvSGVOzmOuOhJOvtiWAAOtpu33npLRUVFBiIAzpAhQ+z3oEGD7Dp48GD98pe/NLBBJYc67YQTThDPzz33XJNqsANt3rzZCaja2mpHSOyrbe+ZBzruu3rUXl1JwwUdnyvxoGpbYXaTjGF4SK0ywEHFRtguTqssylyqktAC5afPUH7aTEWC05U8aIKCAycpK2Gesn2lyvEvVY6/os8USarUHslXrsgeaYlCiQvtXl6QepYqP7lSWfFLzHaUMXSZhmeeZjYe7ExIYMUZqw2gCCMG0DnOEVFATVyj7MR1yk44RZGk9VFJrP+CDntzmAsEFkw0v/vpgzZPqCP7OqUgLjBghoi/RiEAB/Xa3ddvP7AwOJ5NZ7+SBkCBKux///d/DVyQXj796U8L+w33b7zxRgWDQXuWn59vEgzAgursfe97n91fuXKltYPqzY4Cb25WU1OT2dA8YOkHwLIvFeAxLunEgiGqNFI2wBNNOAVwryRzjZ3XwkbE8YXrlDx4mlKHTVSWb4yWzjtJHzvnyzrv9C+pbPYZykmeofRhUzQur1o5gSXK8Zf1gI9vuXK6U5UiSctjUqUiviXR5IBPtm+hxuauku+4aQolLFJecrnVmZ20WEVpqxQ4YZFJZDgw5AQq7Wuf0GKpQ2dpRPaKqG1oSVSNBvCsVnbiSQY6djVnh/4LOjh3YJ5hjw5ea5hmwBBMM31tuYmbUHiyRoRXiXhro3PWGlptuHqzGc72GwbHA50DAh0kF1RogEgoFOr2XsNhABD58pe/bBINEtA999xjYIQ9Z+DAgSYRoV7jNFakHyQnAMm1/3ig44FOLON+7/zG42y52WVSBy826YHoxSWh5SrKLJN/4DhVlX5QLz+5Ta01Ulut1LRN2vxKm376ncs1OrdUQ+NKlBOcr5wAEs/ewFOlHN/eabkDUpYfCalM3/jsH/WXX/1LCyZ9UCPDq1SUzg782VZn5rAFGp65xqSegpTlyk9ZqvRh0zWpeJUWTDldId805STPUk5wriJ+wKncAbjE1cpJPFk5if1f0kGlBugQkWBk9mr7MEBrhpTTV3CBuMwERzzi1FDQir06M0afY8Y7V3dqKrZedJ+eem3/DA+AwA0aVRlSztKlS03dhqTCs66uLl122WU68cQTDWC+8pWv2HPsNoAQqaKiwoCKc4oAGYCqrq7Oc5nelwTRX54d45IOrs5OwpHJMejbxnSzwSxXcfpJ8h23wPLwPC91sXwDxqoke77+dfvzat7RpX9efr2ql52s/zn3c9r0Up3eeKFBHz/nG8pLnRFl+lHgMaknqm7zIeFU9qjdACDUcIAUCZAKLNGd172ouk3S+qVfVG6Q/YxTlB4/SaMieMktUDhpgTKGzVFxZpkCA8ZpYnGlnn5wu3ZulOZPWa2clInKSZna0w/qNUlrpXJ8K/u9TQcNme+EaZpUfJoBDt7PZmvzRW1m3erTHmnObDoAD2DDBh++JjCyIeX0hVTu15QHOvsHHSQSjP6uquyUU04x0AB0eAaA3HTTTd0A86Uvfcnu8YwypNNPP13PPPOMzj//fH3wgx/U3/72N5N6sBW5e3UAth/+8If6yEc+og9/+MO6/fbbDaB4jgMDqjwA6+9//7suuOACqwc134YNG0yKojxtIn1dfPHF+t73vqdnn33Wyv7mN78xaeyhhx4yu9PPf/5zc2KgTlR+999/v/785z/rF7/4hdXBeBkXqsPPfvazZpP64he/aH8j9VEGFSL5Lr30Uv3qV7/SXXfdZQD8gx/8QF/72tfsOc4X73lJ7pgGHUeacVRpPS7Njhuzs3cnP3mN0ocsdTzFUirNcSA1frxOrf64aja3661Xt6s4XKKUYamaUDxVnzz3c2reKt157aPKCY5XOGmCcvwzVJA8X2MjK1SUskxpA2erJHWFIomlymMXfWSNUk+cpxyiI6fPV1b8RBUml6kwZbE+dsZX9bNvX6aZI9crkjRHoyMLVZA6XVnxkzUytEShhCkaGVqo/JTpmlyyWJkJxdr8Ur0a3urQomkVyg4UqThrosbmzVH60NFKHzJJBckLlT5opgEbHl4pgxaYxx18tTBtlfJTVpjzRF5y1dHt3eYvjwb3nKvxBSebBxuOBHiygR19OhLgMo0HG4CDTo7DeJB48P7w9unsH1T2xxQPF3SQgPLy8pSUlKTExEQDJ5wOAALUbQAPHnITJkwwacn1iBs6dKi+/vWv2zOABwb+mc98RvHx8d0AhxTF34AF6jvq+uMf/2hAl5CQoG9/+9uaN2+e5U9LS9MVV1zR3f7dd99tAAXwYHM67rjjNHz4cJPAaOvjH/+41eN64CHpAaCXXHKJgQuSHGBFHxjPt771LVNB8ndmZqblQZrbH337/fNjHXRsbw57aQCd6P4Y2yPj2EFwT84ctly5gWpzwc1NWahQYJLOOeUTaq7t0iN3P6r8jAJlJGWqODRCZ6/7oNp3So/e9ZzyU8epKGOqCtNmKGPoeAOIUPx05fjmanR4uVIHTlXaoEkqTFmg5BOma3zuCuUmT1AoabRy/fPsWW7KSGUHSpQxdKJCCdOUOqRQE4twUpiowIklmjEam8ZY5aeNVchXpNRhIf3n2U1Sh1QUKlZBRrEGxyUoZUhYJaFJGp0zS7PGVGlKSYUyhk42lVwosdyABocJ9gIBNvzuD6DDnBGRAPUawT//fVerzSOu632CDt5rBGxzvBCWGuDcc8MO+U+cZR4jrsjrSjexV0/S2T8oHS7oYNcBeM455xyTdHJycrpBAGkCyQGbEMz9+OOP14UXXmgMHBAApNhkCpg88sgjVg6mjuSEtHLqqafaPbzq2KDKP6QT6iFfRkaGXQEM2n300UfNzgRIIG1RNxtVR40aZferqqoM3GgrHA5b2ZNPPtmkoxUrVtjf9BNpCqBkkywgiYceoEa7gCttYbty1Yn9Hlj2peo75kEnGn3AQGexuT3j+syRK3h+5QUBnRXKDawxKSAvbYFy0yfrvHM+qY7mXbrhypuVn16kvLRCRZILVF22XrvqpRf/vVEF6WNUPnuN/vN0je6+/ill+8arKH2WNj7Voa0vSrmB6frTxTfriXs267yTvq2ClFkmnbz65FvacNXz9veVv/uHNr34pj6w7gsqTp+r//vhZXr07qf01U//SDddeb92vtai2o2NOn3l2RoeHqkn7nvCAGf7a1tV+8ZOPXrXwxpfOFajckbp+ituVP2mZrVt79JzD72sj535JYWT5pmUg8cbQJM6GEmK/UlLj24px+YL+1apwBBcphFYXNBhn043dqBiizHPxAUHzrSAbXghADygFQfxoFfdo2BMIbcCD3SOPOjA/Kurq00lBSggvQBE3H/66aeN8aOy4++PfvSjBgIw9I997GNmR7rqqquMyaN6g6EjbaBKg/HffPPN3XuGUG/x77rrrrO6qC87O1tINKgC77vvPgMKn89nz1HhYZOiLe7Rp29+85t2DxUZ5ZGinnrqKQMQ1H2AC8BCXewxQn0HOJI3PT1dt956q91/4oknTB3ogU6N3tvHVUe9udibw54dC08zX5EgqdTZV+OrVsaQFcrxr1VucLkiyXOUmzFeH37/x9XZ1KlbrrpDxVljVRIeo9zUYq0oXaOuBumVJzfa36etOkfbX2vU7Vffr5zkURoemqK6N2SpKGOyvvflX2hXrfT9C36v9GHDVbFgqVp3tuiK/9ug1MHD9cg9D2jTS69qxcKzNCZnoS656DdSp7Tx2R36+YW/1xP3PaWu+k794ns/V15qjj73kc9o0/MbDXAu+ur39IGTzpF/QJI+99HzpaZduu6Kq/SDr1+oN198VW++uF1FGaUGNDY2P9577ANaZUDEJleX1x6V12jsNRwJAByw47c/vt9cpvFe6yt2Z1zK4Nl2ciigw0YfroQyQDTyHAn2Dyr7+wo/XEkHhvzJT37SVGkw+T/84Q/GpP1+v20oxS6yatUqY/Djxo0zEEICAlBQWRHBgDzYVqgLiQLJBwnlwQcftHvc/8tf/mKShQs6SCSo3QAonB2QPFDTzZgxw6QTHCIYO7YZV4V2/fXXm+QF+CGdISGhAkTdFitpXXnllQaiSFeADe0jefHPld6w+9Dv/dG33z8/piWdXkAneY4Bi4WpYSOnr1qZQ1ca6CD5hPwzlZM+Vue9/0Nqa2jSA7c9rsK08SpIHauSrLFaW36KgchLj72inOQ8rVt2kjpqdulftzygzIRsRQIFUoPUsVMKJeVq2dxKNW1u05W/uVHj8qfqu1+5QHVvvamPnPp5jQxP1YbrrpF2tWrFglMUThypKy+9Qp11LfrKp76pvJThOu/Uc9W6rV43/vVqZcSnqCi9UG8+95Zat7ZpQsFkFaUXa1T2SD1654PqrG3UtFGjlDJ0oG65inraVDH3w6ZSSx9aauo0VGques3Z49NjgD/qgCcKOrix41CAPwCx17gCOuxT6pZ2YoSWONylOW4U448LPA9vaDKXN+w6vRVyB+9JOvsHpcMFHSSB5cuX25c/6ixAAcCBUWOAR732u9/9rhs8UGfB6En8c8ECkEAaKSkpMdABeHAwSE5ONsDC1gKTv/POO4U9iPpxKOAfUgn5XQkKaQW7C/355z//adITUg0Ax3gXLVrULdW4jg5sjqUcqjk89JCekGhoB+mLiAzcow3KAHb86/egsi/VGs880HH2rpiks9ABnORZjrQTWGpqNTZT5gdPNg+2kH+6cjPG6KMf+qDaGxv18B3PKJQwygBheNYkrVt6ujprpJcff9Ukj3PWn6mmrfW6/ZpbNLZglEZGhqttR4tq36jVqJwRmjluiroaOvTQ7Y8peXCyrvj9xVJng1YvPk25ySW6+5artKtpq05ZfrayEop1+W/+qK7GJp2+6kyNzZuor376C2rcvFWXXXKpxuWPVk4gVy898pratu7WqoXrFU7M18jwGD3/8DNqeGub5k0er9H5Gbr68l+qfvMW/c853zfJBrUa+3zgrUg72HaQeFxee1RezTOt5+RQVG0D48aa9xpSTp9HG7iSDm5uIBQ+1hiHxuad7DkS7I9hHMDzwwUdmPJpp51mgIDEccsttxijhoETwQCJgH/Lli0z8CD/6NGjLcYbTBywQNLA841nJOwtkUikOz/3fvKTnxizx1bD3wAPkg71w/ipAwADZHgOePAPVRqSTmFhoeWjjyeddFJ33Wx8pa2UlBQrB8AgEQEsqN6QiNi7BBABevxzwYf2PNAh0Gudmhrb1NQg3XLDE4pkzFUouVRZxA6L+YLsf7+XK5sd+myY7AadWYqwtyXZOe4YtZoLOmwSzfJNU35ojD7+4Q+qA7XvzU8oa9hYRXzjVZIxRasWnWKg859n/qP89BytKF0mNbXpX7fdrkhKqlKHJUltu9S6o0GhQFD5GWl64bHH1bi5UaPzSnTPbX/Va88/oOGh0cpNLtS/7vi7Nr70oE5feY4mFs7SL3/4I+1qqNP5HzpfJaER+tyHP6nW7Tv199/9RSNCw5U6KFP/eWK7VC9NKylVYcpY5fiL9MQ9j5ta7h9//I3+/qef67nHblFXY73OO+0bBi6ADKDD1ZVwULkd1XPqL7eAAvgEADjgRuW8z5rwsk9HAqJMUwjvAyQd3KUJZ4DLW187Sl1CeJLOkZd0YPA4EbBZFMZ/zTXXmB0E+8iPfvQjAwIkDhj1+vXrTcKgDMb9l156ydRVeIGx14f7MPjVq1frrLPOsvzYg6gf12nqiHXfBnQoC5C4bWCPQWICaPA+w4Wbv/FyozzA4dqYAEbawbsNIDr77LNNFcgYAOMnn3zS+oQa7te//nW3HYdnjBVpzQOd9zLoEFkaR4IVxmtibTo5yYuNCXPUQVb8SpN4UPdnJE1SftY4/c9HPqrO5lbded1DyvGPUUnmJJVkTtSyOSvUWdulN194Q3lpEZ20fJU66xt05w03qjicrXAwWWpuUePWbSoKhTU8kq3r/nqlWrY3avKIMXr56fv1xotPaER4rKnnbr/hj9LuHaqcu1bFGRN15e8vlVp26vMf+7wy4sP66BnnandDi676w5XK9oVVnD5SG5/apl07pNmjlyg7qUjj8ybr9SdfU+3Grbrkou/p4u9/Td/60sd08fcvUuX887rdpXGVxo6DhAPgoHJzee1RefWXW0CBhPdNNpdptt0QWMA5l62yb/Ua+jjO1GE3KWhFwn0awOnLEOQSwAOd/YMONgpsMXzhw/Q/8IEPGCOFicPMYdI33HBDt8cY+3Swk/CP/JQ788wzTWJB6kDSgcnzjE2lSAzcR83Gb/bg8AyGj/0FSYdnlZWVBkhZWVkGJNh0YOr0DSbPb0Dj6quvtvIAgQs6jIF/1AUI4BVH/Ug9CxcutN940FEekFq7dm03+LmSEmVxy+YfDgLUSZRt+uraj7jvAo67b8gDnfcy6CwzYIn9uOW3/W0SHGfsVMh/whxT2ZSElioUmKzC8GSd/7ELVLu5SffefL9CvmyF/WEVZxVrfcVaqVV6/ZlXlDrMryWzFkgtHbrvlg0KB1I1ZeRYk3Qat+xQJDlTYV+GfnXRxWreUqv/OeeDattRoxv/eq1SB+aoKH24NgA6bTt0cvnHlDFotK7+y2/V1fSGzl59lkaGxutT535CXQ1t+uMlv1NuSsT6sfHZjWre2qqZY2crJzlHeal5+s8zr6lhc61KZ8zRqLx8BQYPUkl4pPJTF3Tv03HVaXixwWNddZvLb4+6a1S9xr4qbDpoy+68dqtpzAiUGjuvsX2PIyw1AduQdDhqFBXbHy9+xNylvaMN9g8q+2OKgAsMFzUSzBXXYRg+3mMwfJ7jOQbzhZkTo41/2EC4R8JOwz8Y+AMPPNB9//vf/74xeernmev2fO2111oe2gQ4AIM1a9aYYR8bDuBEGdpGRYc9B6cC6kCSYY8OYPfTn/7U6qc8+UkAx/jx4w1U8FbDww0AwjkA4AI0kHRom74jsdAedCI/fcapgH///ve/LQ9t4UhAHtpi7w9tAcj7o2+/f34s23SiofFxjzbbMSo2i1DApkgn/hqMmFheBellSh4yUWkJ45SdMkEry89Se5301INPqyRcaKq0goxcnbryJNW/VaM7r79dY/JH6pQV67W7ES+3GxXyZyh5iF/bX9+iXfXtZt+J+PP0wZM+pI6dLfrl93+qjp2tuvDzF6kwZYLCibl66K5r1LT1DZ1Z+QUNT12ov/7uF9rdvEmfO+9/NSJzss5eg6SzS1f85i+KBMMGPBuffV0Nb9Vp3qQ5yg6EzLYE6GBLmlg8XiOyS3Txd3+mZx58TeuXfV4TCs5w1IvRSNiADSo27DyxzPqo+02EiJRlGho3waQbvJ8fu7vNfiOV9uWIFhf2lQq7DijF5CLpPHlfp0Uk6CtKqDt4T9LZPygBMDBQ1Fow1ylTphjDxV7BfSQDVEuu8Z6d/UgBfPUj0cDQ3SjT3HM3aKamppptBCM/nmdz5841Jk7lqOJQacH0y8rKLLgomzX5G28xgIG6iHLAPewzbAoFhJC6uEdf/+///s/6AgggfVEO0EHyYn+N2wZS0YsvvmiAgVSFOzV14ARBWZwDHn74YXNa4D7jBVw4U8iV2n784x8bOEEPwIa2yNPvQWV/dj8PdGxPzt6gw7EGAE9+CqAz1zxsE48fZ/aevPQZmjSyTDs2duitl3aobNZSZSRkmTrs1z/6rdQu/egbP9GEoklaX3Gy2nZ06Par79DUkdM1OnesebOpRQoMSFVRxmjzWlOT9OYLb9oeG1RpY7IXKNs3XPfccq0at27RB6q/oXz/fP32Zz9Va+0mfe7DX1aOf5xdqetPv7jc1G2jc8br9ac2afurtapaUG3OBsGBabrnhvtFG4unsxUlW3ddd59210tryz6vcXmn2R4dJBw2iuK9xl6d/uAyPTZvvRKPm2LqNfDj5r9tNAwpyXI28/bmiGagA/CMyV2nhPdNMTEJ7zVEIw909g8qB8IUkWhg0DBnGDHM3pUeYP7sw4EZc9wBjJ/EP+5RBnUVTJ1/3/nOdwyIqOexxx4z6QTpBZBAxUVZ6uQ3+2dmz55twIHUAoCRD1ADPJCwkGpoB+kIZo96DSkFDznAARAAOLgCoOShLsCCqNiADxETaBdwId83vvENq5N9Qa+//roBB6Djtk8oHsaPes094oE6XQnHVfUdCG37fR4PdKKg4zhFuJKOgU7SCjs0jS9+IuBnJs5TQcYiZQdnKj9jli656EqpTdr4zFbde+PDuu2qewxwtr1SrznjSxUYkKGqBWu16bnt5lxwyfcutd+1G1v05vM7NDZ3mkZlE2UgX689scVAoXnzbuWljFdRylwVpE7VPTffos76dp1V9Q2lnDhNf/v9FVJrsz55zgUKJ4zXZz/0dTVs6tRlv/iHijPGaUzOVN3yt3vU9FaXNvzzfn3/gp8rdXBEX/joN9S+XXr0jmf1mx/+WdtebtTzD21XUfoyAxzABgkHOw7OBNh3SO4H/lF59Zfbaa5EJODICaQe/AEQXDj8rq+A0XHYdMbmrbNgbdhymNx//P65fcbOcQngSTr7ByWYNKoyd58KjJdD3FA1EfvMlRZg/OxvQfoBNFBLASwkPMDYq4NqCs806kByQlrCLoSx3s1HeJzf/va3GjFihDH+7373uyal4M5MGyQcCZCYkIL4u6CgoFs153rHcR8ggKm7IABQMB4iYdMHtz7qQYXGMxIqOnf/DW3hOLB48WKTqAA0AA+bDY4O1AEQ0hZjoT1A0VXJ9XtQ8SSdvhmnqddQrUWPjI5Rr7mgk+Nf5Rw/zaZJYkMmzVfSCePNtXpUzmJ987OX6PmHNzsea0/X6ZrL7lL14rMVHJinMblocMbpuYfe0qtP7NTj97ym8lnrte2VdrtHiJyIf4Kyg5P1p4tv1EuP1+iRO17RxJKlyg8uUEbCZF395+t0322PqmzqxxVOLNXvf36FXn76NZ13ypdVlLZAHzntq3r49lf1q+//XePyFmpYXLZWLny/7rj6Se2qkf510wsaGZ5j6cdf/5M2v9ChbS936aHbXtH5H7hIGWxXGVpqzgPDs9aadJM2ZJG5TfcHSQe8IIwaEg8bRNOGzrMgA3g/9xW70zaHUgibjhNHZ5kWT/8f08f1pZPzQGf/YOMyS9REqJyw4RAVAKnAZbQwYBgu0gL7aJ577jljugAO+d1zdmbOnNltI3HL3nbbbSblYIdBncb+G6Qi7Eaoy8g3depUCzUDE0cNBwDtfWopXnBsHKVN6iIqgKvqYyMqAEFfkGAARIAOVRtA5dptiH7AOCnPuJFY3v/+93fvJ3IBiL4RSBSJiLpc9RrPselQB+UBN6Qq2nTp+J69HvOSTl+gU22ebcRcI+hnyqBFKkp3Tg7FHDCuYLWGxo1QUdocFWfMVmHaTE0uXqaSzDnKDU7VpKKlCiVOkP+EYk0dXmn3cwJTFBwwXOlDx2hEaJ6yk6ZodC6Rq4muP1NZSVMV8k1S4vEjFPEvVklWucLBkZo8crFGZK1VdtJSZSSOUnbyKIs2XZyxVIWp8zW1BA+7yQqcMEpFafNUkrHArpnDJikvOEspA8eqIGWOxW/LT56tUeHFivimK/G40SpIqehWo6FWY48OEg/ea4CPy2uPyms04CeOaITB4aNg1tgPmE0nJ7hMgQGze93naeo1xKNRkTXR8AVVdrYO6rW+dpS6BPAknf2DD4yUr3akBa589aMiw4sNgzsMH2M/GzX5hyoOhowkQIBOYqkRiRkJBNvO5z//ed1xxx1WF8yZf4AAwHDRRRdZPLVPfepTVo42ATzAgLwwbuw/Z5xxhjkWIGmhYgOU+IcqDJUXHnT0i1hrrncbAEA9jIFEOJ7Pfe5zFhQUyYd7AJtbF1IM9insO6gPkdRQq/GPOukz0tcnPvEJGxMu29TvAg10o//vWbBxJSAPdPqQdDiuulqhBOKuVVukaWw8MGY2HeJtm5uyxAJzEsQza9g05QfnKXjCOEujQuUKJ8xQSXqp8gJzFTh+rHL9c8R97mUOnarsxJlKGzrNjkugLpwVikOLNTZ/pdlUsFEMjyyQb+BI+Y5H3ba6W7VHXs7TyRgyw/pAgFCkIyJIpwyYZL+LUkuVOnCyitMWqzBlkcIJsywRWJS86UNni9NIXZUaKjZUa4wRgO0Pkg5eaxwCalJo4iJdf/mrFg4HJzSAp1ebDp4jxM7BnsMhbuYpklZpNp2+dpR6oLN/sHGZJQwWQICJugwZ5ow0gNqNfO7ueyQWThSljAsWAJBr44GxkwcAoT5XOiEvai3KUSf5kFAoCyNHaqA9gA0VFsyc/PSHeshHP8jHc/qLRAMA8JuytEk5Eu3SD/659938lHef0Q9XauNKPxk7v+krZflN+/xNoj/u+Pjt0vE9e/VApw/QWemEwLEI02sMeFIGskt/jXICVRocN1kjwmsUTlqkkZnLFY6fp4zBszQ+Z7VGZFTa0QHD0ysUSVygnKSFyg8stvupA6arILhE2QnzNTK8wngf4b+QL6g0MgAAIABJREFULmCSnEiaMmRmtxNDRuJ05aeVanjWSdZ2RsJcZSXxBe943BWlLVckcZGyExbatTi1Qrm+xRqVtVKZQ+baUQqFyUuVOmCmHaHAvYzBc5Q1dJ6K06ssviVgA8hgw8F+BQgd9RtDcWn3l5uEMyZ3rXk94/n83EMyABoUN6HPgNFxWYkLlZ+6zMLgIO2wOfTqP71oXxNpQxf0ilQe6Bw46LxnmaX7pe5dDw8Yj3nQKdsDdLJ90TN22DDqc4AHicfdRMpGUtedmrw5fg5lO/RkZ4cFKhT2r1TYX6VwsFTh4JLo3yud3/Z3lfM8QPy3coUDFdGApMsUwRZ1iMnlpf3y6i9XfmqF8Asg2CfOBC8/JrPrsE+nT0cCwlKnDpljngdUAOrfff12U61RsDfxyCWQp17zwMcD1cNcAx7odEsNxmuim0IBFgd0AB4XfJzfscBjp3By4iencR7CFTNCOLBcYd9ahf3VMSBTrbBvjUL+SoUtT4WTz79SId9y+22edkmV4gTSSJJzEunBXl1e2i+vfudoAxxBAB1UbBd+8RqTetCS9XVKQRxhcAAdvBAAHRwKnr6/yxwJvM2hh8lQPCng8KSAY4F+xzLoIB0ElvZ82Hafu4K0wwbJqp4E8MT+7f4+RGbfDQ5EQGBDatJ6RXxoepY4yVetSBLOAyuc5wHn+GXyECvOce0G7KqiieOn+X1wV0eyO4ojSe8rtp+/3IQUzDOEvsHO5ka2wXOtT0kH0YioBIiZjlFoga657CU72qAvpHJR2ZN0PFDyJJ3DXAMe6PQCOg4Tdhj78iiD7+N6OJJGFPS6QSdprQM4nOUDyBnoADBVzvk+RE4gQGniuigQLXUkrEMAGxec+jvogBsANXtzsHPhMs1vPJ85irs3TVkcwT5HZlebTceVdpbNOd9sOl7stcNkKMfCl7o3xsOT5o510PFX7OkWbPG8yo1ZudGW3Y/cfV+dYwF6mPgB/I3Lr7/MUcslrldO0lrlBEqVE5zvgEnSGkUSVzsSDPcCOAusUSRhndlwcqKnmzrgiFQGMB7cdd9jOsoloGgYHPwC2KODE9qpVd+y6DYpg+fZmTq9gg76OBwJsOXgfVCStVID4sZ4Nh2PmR4eM/Xod2D080Cnb9AJ4GSAl1hvyQGmHjvQITBoQAfvXRwRukFnoXKCcx0gSqo2kDF7UXCO3XdA5yRFfJQtNfXgAUlkBkhvl9b6O+hgy3EEl9W2KRQtGcIL0Wz6dJlmj46jxywz0KGS2ePONSOQ50jgSTqe+uwIr4FjHnQI7OlKJTH7OvyObcWJVkDEAid121xMrQMoxYDPQf/mDJtS5fmWKi9hvfIS1ygvMN82dNq9xGrlJqxRblKl8oIzlBec4/wdf5JyfZRd6ADivuwe7+VnUUkH0AFoULU9cW+HSTrMS582HSazIK3CMuJ9kDJ4jv76m6eswP5tOuUKBZYoMzhbL79Qo7raJrW01qqufqvtA2lsaFNjQ4fs2livxqYaNTbtUGNTbfTExDY1Nh4DkYS9r/4D++o/JunU3MvpoY3OO9JUo6bmWjU01vR6iBuuu/36Sxmbin9foOPYC3qABiDinpP4QP5vgA6SDoCTk1StvIAj6QA6OAbkJqwy7zQABwkoN2G1ST89oNP/5+CQ15C/3DRjmQkLTGBhXu69cafZdYozV+05NzHgazadURGOpV5i3mscW014agBnD8+SmEJuJwGccPJCZSZP00vP1ah2Z5uBTV39djU1E8KkPspsABY3uV+Oe//t3veunnRxDK2BphY1NrWpobHVEr8buRf9SAN0+EhrbmxRc7106/WPKSd9ru0neS+AjstL3nY9UKmlF770trr6yuO2AfglOeAX8TmqPPbdIIHhDk19hMWBRzr5cOcG8Pr7ya2HoJKMpaW/3MAGwQWzDKdOf//L15uaDdDpyycgLn3YPGEI4iwEiMj133e1GuDsNwyOgc5cZSZPiYJOh5qb2lVXv8O+0BzJJhrKxNQISD6creIBjgcsxxCw7EuCM9BpUUMjwAPg9AY6NQ7oNHQdO6ATy9y830enRGug7US0QUPGlpsJhafYfh2Oq+4z4OeEwpMF8OBIgHiEn/Xt/3xLRAmlYLf42svEO5KOBzoegHgAcshrwAOdo5Oh9sLvDliCOlbK+svNLMPROOzvBEOGxU2yrTecgdSXT0AcB/CQCIGDpDM0bqJmjD7HvA8OTtKpU+3OzoOQdDxGdciMal9fzt6z/mU/6gYd1Gt7Szq1UY1BVL22h6SzROyT8xihR4N3bQ1EQYeAn5yjwx6d9cu+ZkC0h+faXrbHOOw5HFdNIQxzeCHgOj08tLpP7wN3kI6kMz+qXosBnToMoHVRx4G+1GuAjmvz8QDIA6BjdA3sATqtMeo1x5ng7TadJ5STPl/hYJkHOseKRHG0jtM81AiBU2aqNTRlt131puEHYXD6OhrHHAkcd8QlJh65O0vRx+2BVr0MvAd0cCRoUO2OLjU3daiuG3QAnCiwvM2m44GOBzTHKNDESqP7Ap3G+m6HnObGtqgjQSzoxLga9/J+uh+H3tWTho7IGvCXmw/AiPAqxcdNNsHl2Qdlwsseppm9JR0KcIgbohHxczgB7uIL79CoyLqD8F6boZeebVLtDkVBpy4q6big47qFxjoSeKDjgY4HOo3NTeat9nbvNef9cL1Au0HnuqeUk7bAglASjPKIMBMPwDy6HsgaiMZeK86sUtLx00zaue4vr5gnGxEJ+vJ+juPUNyJNIxqhXsN77cn7Os3dbb+SDuJVsFSZwVl66dmWKOjssnNYHPXa3qDTGuO95oGOBzoe6BjoNDepobF5L5fpWNBpVHNDuyPpXPeMctIWWWh9oh3vsbHyQBiFl8cDlP/WGoh6rxG/043b+fPvbDDXaTaG9uUTYC7TxF5zgrQtsiMO8F5DJ9fXjlL368r2CSSXKRg/RQ/eu0k7t0mtzVJ7e6d27Nim1jYXWFxJxwUd975n0/GA5xgHHiSdqLRj+3O6Xaadd6S1tdk+4pobOqUO6Vc/vU6h4AJl+SqUm7LGA53/FgP16jl4MI5KOmwOHZO7TonHTbWDQHGdTjp+Rt+HuBFhmlA4SDkkHApuuvI/e7q77aWTiwWdjKSFKoos06W/vFU7t0oNdZ3q6pI2bnxdbW1scosyFTfcR3cEAiIURJ0M3DzetYdeHi2ODVrsB3T4eKutrVd9Tata6qWLvnW5UhNmaVTuaUobhq3CU7G5/Mi7vsO2K2LX2Tk6lfKfOMMcCAj6SWQb/4mzzBmtty03cfFxkwx0sOnkpRD7aImmDD/DUGoPY1AvXwJ2gl6wTAXhcn31C5dq21vS7l3S1i077cjh5uaYr1gPdI4NJuqB5cHNc3PDPiWdhoY6dXZ2qWZ7i0k6HzrzW0oZNltFmesVGFjmgU4vfMkDn3cIfKIu00g6ucmO4HLaim+biYaz2PrCj7gpw0+3Q9wIY8AGHxwJ2F1KGIMDUa/hwZYVXKDqyk+rsVZqa5E2vbFF/MOLzZF0YtRrb5N0PBVbtzToMeyDY9jvBXoZ6DQ4oW/Mk82NSOCo11pamrRr127t3NasjmZp6ti1yk4uk39AqbIDqz3Q8UDn4NVi/y2aRSWdlMGzLagAUs8tf3/DjjgYnbNewYFzeg0uEOc/cbqFwcGRALsO51wj9bCbtK/YOe6XBJJOyrB5ys9aqvTAND33FPHXOtXZIdXU1OwDdKKqNVOveaDjgU6MRPxeAJKDGENDU70aAZ6o67QTh40wUQ5Ndu7cqdqaRrU2Srfe8KjSfVNVkFllUk5J1pniWGf3ffSu79AX/n+Laff3eqKSDo5ovhOmm/eaE0KtzGJ39nmI2/iCkxQcOLPbAOQG/ETKyUup7BWp3MXtnBxaroJQpZITJ+m8D3xV6nLdphvMANodZ22PmGse6LhMxbseu4DD3AM6DvA4rtN7g05TU5Ma6lvUULNLZ536aaUlTbONoZHgGmUlVnug098Zd3/uf3RzKKcUEFAAwQXQYesNAaNxRuvVpoOkQ0QCCuFEgL81G3z22E3apyPBMuWlVSmUXKq80AKlBcfowX89p9dfe9PUa/X1SDHR4J4e6HR/vXpAc2wDTez87w06jut0j6TT3t6uluYO/X97dx7l1VUfAHy0MYQdZpgBBgIDYUmiiQlZNAnZwzZsIYRAQoLH2Na6VD0e96U9Rq092hrPUY/WFrUmGm1dYt13E7fWXVu3ulWzKCQEBmaAAPrt+dw335kfMJO4zbjM++Oed99dvtv9fu/33fu+776b3/zuePiC82NGy+KYPvGymNu2KdpGCyKoVzr5EFxfh3ml1+wHbn5PvaQsWsQDvOZlnyhl3un4kduATscRBifNrM7Nsb3mhZB9OTHWD/adjkFun7giZrWtiEmjFsXs6efFulWPi7vu6IoD+6L8R6e7q/ogtLvX6XSXI9t3Rnf5r07vv3XKqQUcVJ1qGYwgHbC9XP4x5d1nVwm+qRxS9aBmhbPt7h2x7e6uOO/szpjZemZMnVCtdI6fvC7mTbumfqfzx7xS+GOnvfdEAocLiF6zaLHKkZxoM+h3Or4m9WsDSyPRaz4O/fxHdpYOD/YTt3yymDGp+stf8+izY/aUxfH8Z7w2fvK9rri/J+JAd8TB/REHeu6PPbt3Rc/uHbFv3444sP+e6Nn789jTs733x24cUJ1qGYwgHejZHnv3bo+9+7bF7t0/jx33/ix27dwR+3r2h4e17Xd3xz137o0tG54WU8Y8Io5vXhzTx59fooJObN8Y89psr9Uh0zkP1dfhX+n4JsdKh8OxaPGXAqufk2ZeWbbYBlzpeJ/jWx0NbbFxPOKs7ccNdoxB4+AeP3FVzJywMhZMc5zO+mgZ9ahoHrUonvTYl8RPvtsTXdsi9tx7KLru3RP3bdsRXTu2x94990bP7m1x3313Rk+3k3SrY3Pqay2HkaUDO6Prvrti757tcXB/Vxza3xMH9x6I+7sPxv6uiH07I7ZseHrMaz8/Zkw6L06bd2V5WTtzwoqYO2VdtI8TMj3ME02Nr5Z56kBzZ9FHv8bhcKx21i99QbQcu/iBVzrOXsvfVQuZ9hOe0U2LShDBYP9D6Ff0NTF3yhVxwpQN0XzMJTF19GXlCARPYzMnL451y54cX7r9J3H3j/ZG7IuIQxH7dx+K7p09sW/33jiw/2Ds3rk3unbVqZbByNSBnl3d8cv7f1FsYtsdO8rOQPe9Ea+78ZZYcPzimDp+UbSOOSvmTVtezrTicGaMXxlzW66MmRM4nHql0z8f1Q54WGXRG0iQx+CcOmdjfODtPyqrHguWwaKfm0Y3nR622Kx0xFn7RofjEUgwWKd+xtbEpIdcEifP2BILp11djGHh9CsL0rYxi2PCQ0+LjtYL4zEbnhPvedvn4q4f7IsDuyMOdUfs2xXRteOX0bMrorurTrUMRqYOxMGIru2HYtfPfxH33HEwbr3l9tiw6kmxYOaFcfLsFbFwhrS6vGudPnZpTBuzPDomXxHzWjcVx1M7ndrR9M/HwyyL5s7yWsbrGT9w8y7nvz9/qJRNG3vpoDtl5eNQyyP/t9bJux0HfnoJ9GAfh1L4E6ZsjOlj1sa81mvixOnXxtTRy2LamGVlu81+37RxF8SMiRfGvOmXxaNP2RibL39OvPi5b4ytr/5g3Lz1UyXdtPVTcdPW26K+1nIYaXpww3P+OV707DfE9ZteGKfNXx3Txj8qOlovirltS6N19HnlQbCjeXXYxu5ovjwWTtscC6ZeG7MnbYiOyfU7nd/bhJtbTCP52ut0/BrHLpl4gP/8WFfxIyLXBvvkpsnXpH43ak9O8nHo+972gxDyNlj0Qf9Ar4mT2q+LiU1Lon3s5TG/7eqYetyK8gR2aseWmDp6aXE+kHNojkuY3bIk5rYtj/YJF8SEhy6qDi90gGGdahmMMB1wcOepc68ottAy6pyY1ey/8tVJ73YaTppxVbGh9nGdZXVjN4GNzRi3rtibB756pTPMT/cj2ckcyXtz9amNmIA8t/MN//iZ8iPQB4p+bnLCtOXR/Gk+BF1erl/+VE85rO3BAwnWxHFN58ac5g1xxrzHly221mOXRfvYVTGnxYvOzrLq6WheG/Pa1sesSSujfdyychWowLHNbu2sUy2DEasDDkYU6XNqx+ZiF97ZnDzz6lg4/aqYcuyl5TsctjS/bWPMaV5fHuraRnXGrInVu9Ta6dROp38RMMyy6HU6YgIsVgQQnLnwurLq4XQecKXjlGkrEVts9uZu/8A95RicwT7uaWRS6CYDaXnYpdF23NJ4xKxri6E0H3NxMR7bAoINhHfaHpg1aU1xSByRulktq+tUy2DE6gD7sSPQOmpJzJ+6IU5s31TejXpgmz9VsMDKspo5odVD25ryIOcBbuH0ql+jLdb5YZ50j3zyH2n3jkEbfWEIRvN9p6hnvzeQ53AG2ylrsrXWPv6SEhljpWNvrjptuvNXeKezqrws8h7IqmiglZH9aA6meiITaZOpKqudTu10R7IOVPZwtH2wGbbT50jKD7O8uGVnDfY20ia6mt9+nfh9y6L3wM8Ohz5PXFJ2ypad+7QSiDbhIecM/msD+3H2kY9temRxPKfPu6b875qnGsiJ9BkBhsuLpOrDUIglL5U4ryr1Gkmj8RRBpeNZ0/uEu7K+ltVOLYfKAY0UOXAq/bbQaFvlYa1lZe8xImlPAn2OtLfOP5xJ6Pc9Cdb4h1cXen/iZuGyaP7mEhPwsXffWR6KvD4Z9NcGLceeF/Omrir/0JnwkEeVDr4uFfL2oNFr6XSmLI2OKY5aX1Kl4ngq55NPZX1fpjae41b6Nz655RNcfT38ibaWx5+mPHqdSk6WvauZ6mGusovq4Y0tLeuzL7ZWUnnIq51Oo7Ou88O4xdi7veYYNe9zrHZ+9I0ovzZwDE7b6IsHPntNuPSpc64q73REr9mj++yHdpTGg50S2jewxUh6jaHP0fQaSO+Kp3I6Kw5b/TQ6oKyvrxxLnUaaDBptocpXq5qUw2FOp9HGPOCV+9rp9M1H6bzr6/CseJo7y6uYMxZsLu9yFrZfHp/54L3F+QgSG8x/NOVyXQCBsDfLJLHWDGCwF0GHDXLD09nRBpR70LlN0Nnv+fr65dZBfe3flqxlMXJkMZBN/Cp209uvnmCHZ4Kt5Xy0nJs7S/CAmACnEQgguPGGD5eDBh4+66rBz17z7Yxvdc495XEliEDo9Efe+dPSwRZbnyMZVOiNe9KN+WqZVwUS9L4Q7XM0RxqaSVZZfa3lMJL04Eg76L2f7ENQATgNgQSHvfs52s4OexAc1FaHceulpuHoSfpPTSa9Kx2vaKzM7ZQ9cu6mcnVY9GCLlib7cd7pjG86u/yq2rlr3/9qlKXRYHHW/Qq+JmZPWhezJ61vSO7X9f7nQwROlaoXpkcrfQcDm+zjt/pay2Gk6cHR9lDZ1poGuxHZtrbXzgawtfrstT/9yf0P1Vk1d5YjzwSj+U5HyLRDP0/pqFY5Jx+/YcBFS5MPe6aOubAsjWyv8ViOM+ClDvu1Qa5SCEC+CGJNzJp4ea/D6XU2DQ6HsfQ7nQYDy/5C7ppXx5zmlfW1lsOI1INiR2kPDZNLv930Op3DHE/lfIrtcTrZv75W81Ith2GTA18hgOCkmVeU68oLnlX00fuc1uMuGtjp+CYnD/v0jY6XQe6dm2aPLuu045Qcz6FcdIKfvB0/sbMkWwFzWtbEvLZ15cNSddr4Tzbkp8+7tkTD6c8Tthx7QcHla2zLMida+0DVIaPgc3rwIV49OGjhPd0rx3B1EvbymPRnovBWl/54EHnnVwsJ14stv+JOOPpzsML68MbB6ifiwhYj/HkVzcewvfeCv5wpN/bSQmfKI+muVofL48QZ6wtc/OFHO3BEYVUf4l5VtjPJqXL2K/v4BR8+/KEbnckfOsHBn4Eeav7wg/6kM69VOP3y3hVxhs+vKPRqj058Vv9JX17GJcfZExH+BK2oxx/5aI+vKua/s8g/8WuvHFxw0JHjYFvWeMKrv3r0qaeD+rkaX/2Mt/b0IeEnX3n9XfFn/PCF7tw+rg5HJKt+etABJ72rZLGi0IofvJcXs23rou24S8upHie2b4iTZ24sbfFLz6rzEyv+8ZvyJBdw3dtrpzcVniVFn+kT/TUe5IIuOLUnD1c0pVzJD71pn/gCL/nTTn/yT3lX/KGt+qYDX/pX/C2v+JtaRTzRe/bDfhPvYPyRKzjw4JOcUv/QkXae/JNL2lc1z60t/dUbF/jwoR0+c36pxqRfP40VPa4itJYXPc75Av34V48/cFP/Uq/II+mg/6l3eU385iftUt7JH3rwOdT8VYc+V/aV49lP76oyfzpEAL1O17jz21H00x8HHP7cvyvWv+hoAqhi3rEFlSFTSoAZ5Xmn/nkJgaOQjPfskx5TJkt5g03RDRLBUlbLK+m0E64uTgIc8CtCq/36alAo6tLyGwXKlc5NIAN4FB09rgkjDyVFi3JHLqCB8A2w9gzIPfr8ertyNGtLOaXQHj5t4XSiNjpaj7ug8FA5hmriq5TFBFoFWYALN/oTlitaOFJ0g+leCCG6bVvCxaHDAz7atFUuL4FdGfvygkPdOY+4vu89Gzrxrxx+sKSh5g8OtCW/ZIHHpBl+bZQnXfLK6FH21R/P2muHj/zhU+a1lzdGyVvCVGaMyc8VXHL2AhMtxkUbMFzhkXI8jQWYdNNY0Ek0KUsa9RsK/vANH7qltDdyhF9KfaADEjkoQ6OtC3TRMbqLF319/Z332oJD7/ANB74keWXqEhcY+oIBVt5XWyOXFZw5Zmg5kq6kG10pP+0k/ZSlHiTOgeD8tvyl3OCgA3ihM3QDf6kHjbqhHZ2hO9qhtVG3tAUPHP3llaGfftFbebgyn21zzFIG2uufstAu82DDjRawlMOTffWTN350lu4qM1YS3R5q/sxBeCUzvNGDpBndzgc8ofXy4mA4mbe85msxd8ramD/1ipg2ZsnATmfyMeeGD0LTcE3k7/iXb8Y3PnsgnMHmADcTHuYuOP3xJcjAMTlf+mR3vPfm/y0CIciLznhCvPtfv1OOtv7iJ/bELW/4eqkjNAPz6fdtK6HYt71/e4GRhnfxmU+MT773ZwWXfq97+aeLsmCQA4Lrf77wixKK9/pX3FbqCN+Xr+9807f6+t78uq8UgXAefiQkGEI/f0H9u+e+pwgKD6svelb5HffXbt9f6Lnhmf9enBd8ay5+dnz8PXfF1z9zf8H7T/9we+nH6MGEz9HdTuF+1Ys/UgZdvyWPfkp88B0/Lv2Em2991ecL7RTlymUvjE/cenf88OtRaHnliz4UZ524pSgNHoQYfvW2faXuFX/z/uKsxzadGesue178x1u/H+QliQpBP+Xcsu5lhYcvfHTXkPPHKK9d+9J415u/Hd/7cpTIRvL0MILOJ215Tdx60/fizu9E0Ynn/fVNhXfGtmLx08vhsXTFOD7/KTcXo6Vrf7HplUVmyvH3t09/e5kAyPrJj3ltfOjf/i8+9+H7gk484/Fbi7Hh//oNryj4vvLpvYWW5z75LeVBiA5uXvOS0u+7X4oin9f+/SdLP7p29aobyvj99FtRxg8teBtq/tBvjF/w1LcWo6Uv8srU4fGp17++2J8JhTzxjj9yIQv2Y5J54nWvLvqJP/r2sufdWh6WTD5/efWN8f5bfhg/+FqlZ896wpuK/eFdnh2o00ZbfdgKGGCBSffhgAtOuNGAFjShDY3GD81ox4d+z/yrN/ZNgPLK1A01fyZE/P3Xx3fHj78ZZU567JUv75sL6MBH33VH4Q8PdISueHimOyJ18UeH6RYdwyOdy/HRjyzoJn2hq+RCd8mOLpcJeOYVRceV03kHJ7MBtmAuYxvK2Qp85MmG2BKbQgsbY2tsLvWTrppz6K45iC4bV3QONX/mIHMROi0kzFF0yZxlfDsXPzvGNj06rLy33vjF+MAtPy2OqHXUJeWw5wFXOoTMc2GAg+Bp8wnAkxHPZvJU56qOsir3dKAMDALisOQ9PXFk+hsMiqEebFcTp6vB50BMwvkUhjEwtE26wPAUACZjIfCkWzt0MBJwGbX2+vLO6NbPVR0nix6GhYZ8WpAnA3ylVwcXXnBFZeRhdkkr2sEiA3Trry/cPrR1bZQZ3FmGd09iaMKLvu6tZtCW45FX+NGiDn9oQ/NQ84dONJMTXtHK+JSTvTIJTeSkvJEfY6V/jme2Tx7xR+Z41w4cMtHelcGqByfL9NFO0gc+eLWRkgZ18OgHPpkZO+Ogb+qgdkPFnzFLupLmvFdn/NyTS5anHMgT/eyNLOi4dvgFSxm68Zcw9KFzYBsnSV6ZusSjj75ggAWmOjiUwQm3PurRlPQlDLSrcy8PpiSvTN1w8Ge8MyUNyR+6H0g/0CjRKXzrh086R/eyLOUEHl1Sr49ETviV1IOR8tJWm6Qh5ZnttUW7ccoxMC76K6ej6KOzdJcOg6kfvNol7Bwf/ZSp+235M37Jo7mpkR8y4WyscPzriYMZ13ROObj2rIXXx5RjLx54pZOCTeXGJAUlVA5GHvHaYZSwMKfMBOs+BwCzyrMNBXYPZgoSnsYJE0P6acs5gI1RbXIwU7gmebAk9GjrCsa4prPKlZD1Z2jaGeScZAiJc9APfPxog3555dq7ggMG+sFzry7b6Q9W8gu2vL4JnxzSgeEBj/qhF058J29Jq7KUaaMcTALK0eGaMIaaP3TluMOb/KLNmBoTNCl3j0ftyI5MbNfgGRyGIKE9+dbHvfIsAy/HBd9kCB65UnxJXhs4UubyYLkmPvLMMSBzbekM3Xav3VDyl7LAH74kefyog18ena5kQLZok+S1S31lj3hU7kq36YDJgKzBk8gIj5J8lucWXepNIyywyQIuOI+kA22NtIIJryv6yV2Sb6xr7PO75s/DGP7ARTv9M75knNvraGnUjUZ9UeceDEk/h18PAAAFgElEQVQevXSG7qWOpI7DAza5Nc5p7pVLYJIhWcMLHnmiLfuAx3bc5xinbWmLF3DgN4Y537kHE33knPiGir/UATTBR2/QhVayyD8HcDqnzb2uvGfMPwnYdhtwpcN7YtwSLx0MYAaAQiezkBGehBCDQ7AIIQhC5621R9CYpjOKQLQnQAOYQpLXXztKQznAxMwxTaf0TfzoAJNAbfE14kIzei37OCvLV04AP2gnJP2tTlJh9DfwOVHCh7703mjAJ1hoBl8buJTnkwa4qdDaweVee/C0BQvenITBISd1Er6VoUVbiuUKtmvKDFwwyBodZIffrB9q/nI88UXR8UjRcyzRin/1yo0VvlzRm3Iy3mApk5f0Nb7yqUN0An/akk3yTi50DFx517zPSRJN6vQBD710Qpk6cgfTFY3qh5o/4wWH8TVmUk4iWUcmOeboojeZyEMiI/ymvtJx9Of455Mw3rSHh+wleWXq6HA+hOkLBlhsAGw44Eq8SYcr2sBCK5rxZfzIGgxjIskry/lgKPkznsY3J8XkDx/mDPyhhU6kHmhLZxp1SF3ey6vXB3x80kkyARc8MsIfWchLKRN5tkD2ysBNm1BOzmjQt9GW2BZ61cMFr3t9G3U39ZxuDzV/dDTHnFzQnHOVwAHvbax0BJJNeuj5MWP88vB7DgFm3u0M6HRERYiQEA1RRZrYRvNUZpIUBsc4LcsJ10Bs7GsvMkNb5aI/RDCIOgFHVJL+U0YZdBE8K+KRc6+JUzo29cH28Wm2V68dWHApb/y1QuJCjwgQ7ZSddsLmEnkzpums0r6KUqmi0tSLbhGBUUWNVNEyCRcOvKM36xvbJiz18EnZFq9Jg3ZgSaJsXOEFW512cMKBR5EtEtmABxZatU/8yhOuuvz9q3rRgNrmV7/ZB0xtf5f8gQU+evBVRT9x6lWEn/LEqZ1oH/zIS+RBF5ShD08pG7BFAxpTeXUpB1d9U4Y5PtpqBx563DfKUD91Kb+kAw10F93qq6gkDxZDy5/xxoOxxrckr0yd03jl0UMWSVfKwhWPaJbwQxZ0Sp5tgmlc2FeOh/rsk3qqTpvUG33BUA+mfPZJOZMnGoxdI41oRjsetHUPhiSvbDj4QxP60Wds8Wec4SdntA+mH9rQITynnMHBg3t80MFGnVROZ/Q1XvLaGQN1+oNH3/Ql05QrWuX1ATN1FN3aGhf0KM92KXcwtcMb2AkXzqHkL+GnfuC10casdDgYkcuCBxZMW1/yoiyVDeh0UsgmslSgRmMmKBMegQhz5jQmH2Nv0V6yrTURR2v6ygifBzT4lJJCGCBXBBM4nPLKtD1jwZYSEowxQoUHXv3gUI4+g0wI+oGjXBtl6cz00xZNBgrtaCUoV4aW/KFVO22yrf5oSuWBS7iye/3UpZGDBWbKBd6UjT7odV0037uuy/p4gI9BgAc/eOrhwC9+0Au3sqQvaVef8hlq/tBANuih6PChFQ3knwoon/QbE4lstMVftk/jARfv2rlq28g7WGAry6SvlO3QkzDAB1PSD1zttNdOfcpSnfLsIz9U/MFJNslDXlNedCBpJFuy0kcZ3tQn/XhDszZZnzaLp+TBFXz6Isk31mkrpe2ClWOUOOBM/VSPJm3QqE6Z+qQVX3BIKXd1Q80f/Ur+8ARn0sFG0JKySf0gQ2Xkm7JNmvElnwmshKmfcrqaeucKrqu+aMkxQpu2cEn6gqWNtmBne21TtsrV51jI669ef/BzDMBUp89Q8Acm2Mk7/GnTeOZkhEfbXpNvedhFJZrNaucBnQ5AdaplUOtArQO1DtQ68GvpQMMHzQOtagYqa/q1ENTOqXbOtQ7UOlDrQK0DqQO/kdP5DToN5L3qsv4vbmtZ1LKodaDWgVoHBtaB/wdDXqgRj/zE3QAAAABJRU5ErkJggg==) # + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" executionInfo={"elapsed": 604, "status": "ok", "timestamp": 1597587647815, "user": {"displayName": "Tulio Philipe", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiC2kqFihAn3Ile03oz-6rO8qVjEHv1DhGQ0ngQ5g=s64", "userId": "06907869093485551957"}, "user_tz": 180} id="nyqr7HUbq4Uh" outputId="1db72956-85ce-4ba3-fa00-ea9537fb7307" #Hello world com programação reativa import rx import rx.operators as ops #source = rx.from_iterable([1,2,3,4,5]) source = rx.from_iterable([1,2,'abc',4,5]) disposable=source.pipe( ops.map(lambda i:i-1), ops.filter(lambda i:i%2==0), ).subscribe( on_next=lambda i: print("on_next: {}".format(i)), on_completed=lambda: print("on_completed"), on_error=lambda e:print("on_error: {}".format(e)) ) disposable.dispose() print("Fim") # + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" executionInfo={"elapsed": 516, "status": "ok", "timestamp": 1597589855674, "user": {"displayName": "Tulio Philipe", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiC2kqFihAn3Ile03oz-6rO8qVjEHv1DhGQ0ngQ5g=s64", "userId": "06907869093485551957"}, "user_tz": 180} id="FM5YS29VYIKA" outputId="b1672b53-8ca4-4756-8fca-9ef7ae9f21b9" #implementado observers #rx.Observer implementa todas as principais funções necessárias para a construção do oberver (on_next, on_completed, on_error) # on_next ->chamada sempre que o "observer" recebe um novo evento # on_completed -> chamada quando o "observable" notifica que finalizou a tarefa # on_error -> utilizada para capturar o error gerado from rx import create, disposable #import rx #função que recebe os observer def push_five_strings(observer,scheduler): observer.on_next("Alpha") observer.on_next("Beta") observer.on_next("Gamma") observer.on_next("Delta") observer.on_next("Epsilon") observer.on_completed() #classe filha da classe Observer class PrintObserver(disposable.Disposable): def on_next(self, value): print("Recebido {0}".format(value)) def on_completed(self): print("Fim!") def on_error(self, error): print("Erro identificado: {0}".format(error)) #cria o observable source = create(push_five_strings) #define o observer source.subscribe(PrintObserver()) # + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" executionInfo={"elapsed": 788, "status": "ok", "timestamp": 1597590041021, "user": {"displayName": "Tulio Philipe", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiC2kqFihAn3Ile03oz-6rO8qVjEHv1DhGQ0ngQ5g=s64", "userId": "06907869093485551957"}, "user_tz": 180} id="Qx5tqe1s_COt" outputId="dbfc9cbb-cb37-4b41-a76c-af1ba73f4117" #Observable factories from rx import of #cria o observable source = of("Alpha", "Beta", "Gamma", "Delta", "Epsilon") source.subscribe( on_next = lambda i: print("Recebido {0}".format(i)), on_error = lambda e: print("Erro identificado: {0}".format(e)), on_completed = lambda: print("Finalizado!"), ) # + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" executionInfo={"elapsed": 636, "status": "ok", "timestamp": 1597590344496, "user": {"displayName": "Tulio Philipe", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiC2kqFihAn3Ile03oz-6rO8qVjEHv1DhGQ0ngQ5g=s64", "userId": "06907869093485551957"}, "user_tz": 180} id="WU_DyGyWCJbs" outputId="71dadfed-43d8-4420-8b85-cef2153dc63f" from rx import disposable,create stocks = [ { 'TCKR' : 'APPL', 'PRICE': 200}, { 'TCKR' : 'GOOG', 'PRICE': 90}, { 'TCKR' : 'TSLA', 'PRICE': 120}, { 'TCKR' : 'MSFT', 'PRICE': 150}, { 'TCKR' : 'INTL', 'PRICE': 70}, ] def buy_stock_events(observer,scheduler): for stock in stocks: if(stock['PRICE'] > 100): observer.on_next(stock['TCKR']) observer.on_completed() class StockObserver(disposable.Disposable): def on_next(self, value): print("Instruções recebidas para comprar a ação {0}".format(value)) def on_completed(self): print("Todas as instruções de compra foram finalizadas") def on_error(self, error): print("Erro encontrado em: {0}".format(error)) source = create(buy_stock_events) source.subscribe(StockObserver()) # + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" executionInfo={"elapsed": 780, "status": "ok", "timestamp": 1597590473851, "user": {"displayName": "Tulio Philipe", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiC2kqFihAn3Ile03oz-6rO8qVjEHv1DhGQ0ngQ5g=s64", "userId": "06907869093485551957"}, "user_tz": 180} id="0r58U3ac69XB" outputId="f2146b17-1474-42f0-e1aa-6f856759398d" from rx import Observable, create stocks = [ { 'TCKR' : 'APPL', 'PRICE': 200}, { 'TCKR' : 'GOOG', 'PRICE': 90}, { 'TCKR' : 'TSLA', 'PRICE': 120}, { 'TCKR' : 'MSFT', 'PRICE': 150}, { 'TCKR' : 'INTL', 'PRICE': 70}, ] def buy_stock_events(observer,scheduler): for stock in stocks: if(stock['PRICE'] > 100): observer.on_next(stock['TCKR']) observer.on_completed() source = create(buy_stock_events) source.subscribe(lambda value: print("Instruções recebidas para comprar {0}".format(value))) # + [markdown] colab_type="text" id="83FoXXtXAWa0" # **Função Lambda** # + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" executionInfo={"elapsed": 516, "status": "ok", "timestamp": 1597590746533, "user": {"displayName": "Tulio Philipe", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiC2kqFihAn3Ile03oz-6rO8qVjEHv1DhGQ0ngQ5g=s64", "userId": "06907869093485551957"}, "user_tz": 180} id="2vuR1gzjDL7R" outputId="37a02230-e302-43c4-cc0b-be6488b0735a" from rx import create stocks = [ { 'TCKR' : 'APPL', 'PRICE': 200}, { 'TCKR' : 'GOOG', 'PRICE': 90}, { 'TCKR' : 'TSLA', 'PRICE': 120}, { 'TCKR' : 'MSFT', 'PRICE': 150}, { 'TCKR' : 'INTL', 'PRICE': 70}, ] def buy_stock_events(observer,scheduler): for stock in stocks: if(stock['PRICE'] > 100): observer.on_next(stock['TCKR']) observer.on_completed() source = create(buy_stock_events) source.subscribe(on_next=lambda value: print("Instruções recebidas para comprar {0}".format(value)), on_completed=lambda: print("Instruções de compra finalizadas"), on_error=lambda e: print(e)) # + [markdown] colab_type="text" id="XywYmOHzEszU" # **Filtros e Operadores** # + [markdown] colab_type="text" id="6T1Y64vr1wHI" # ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAlgAAAHCCAYAAAAzc7dkAAAgAElEQVR4AeydB3hVx5n+k91sdpN/kk3bzSab7GazJZvNZpON04y9cYtjiijGLXZsDC4UY4ppNi640DuoUEwxHdE7pvfeO6ILkIQkejPg9v6f35z7XR3JEgi40pXEnOc5OlenzJl5Z87Me97vm+98Tn7xCHgEPAIeAY+AR8Aj4BGIKQKfi2lqPjGPgEfAI+AR8Ah4BDwCHgF5guUbgUfAI+AR8Ah4BDwCHoEYI+AJVowB9cl5BDwCHgGPgEfAI+AR8ATLtwGPgEfAI+AR8Ah4BDwCMUbAE6wYA+qT8wh4BDwCHgGPgEfAI+AJlm8DHgGPgEfAI+AR8Ah4BGKMgCdYMQbUJ+cR8Ah4BDwCHgGPgEfAEyzfBjwCHgGPgEfAI+AR8AjEGAFPsGIMqE/OI+AR8Ah4BDwCHgGPgCdYvg14BDwCHgGPgEfAI+ARiDECnmDFGFCfnEfAI+AR8Ah4BDwCHgFPsHwb8Ah4BDwCHgGPgEfAIxBjBDzBijGgPjmPgEfAI+AR8Ah4BDwCnmD5NuAR8Ah4BDwCHgGPgEcgxgh4ghVjQH1yHgGPgEfAI+AR8Ah4BDzB8m3AI+AR8Ah4BDwCHgGPQIwR8AQrxoD65DwCHgGPgEfAI+AR8Ah4guXbgEfAI+AR8Ah4BDwCHoEYI+AJVowB9cl5BDwCHgGPgEfAI+AR8ATLtwGPQJwQ+PTTT8X6ySef6KOPPnIrv21/LLJlaRXcxiLtspKGYQh2hl84b1Z2jn388ccOZ7a2P3yu/11+EQi3A377xSMQbwQ8wYp3Dfj7FwsBCMiePXu0bt26QtcNGzZo69atOnz4sK5cuVKsNON5EgPAyZMntWLFCo0aNUodOnTQW2+9pSFDhmjRokXKyspyBCCcR645cuSINm7cqE2bNunEiROfOSd8Pr+5BkwWL17s0j1w4IAjIQXPK8//X7582ZVv6NChoh1Anmyh/B988IHDa+LEierRo4feeOMNpaSkaNasWdq/f78jXHa+bambzZs3u/TAD3J2Ky9hPNLT0/NhXBQup06dcrjzzNLueIZLcjl//rymT5+u4cOHa/fu3SV5K5+2R6BYCHiCVSyY/EnxRuDs2bP685//rO9+97v6/ve//5n1Bz/4gf7zP/9Tf/jDH9S7d+9CCUq8y2D3hwCuWbNGzz//vCvHV7/6Vf31X/+1vvjFL+orX/mK/v7v/161a9fW+++/r4sXL9plbpB/++239S//8i/693//d40bN+6aBItBrV+/fg6373znO47Iffjhh9E0S+oHZVy7dq3mzp2rJUuWiPqD7MR6Ic2VK1fqf//3f/VP//RPmjZtWvQWkKJ9+/Y54krb+Nu//Vt96Utfcjj/v//3//TNb35Td955p9577z0dP348eh0/pk6dqp/85Ceufl577TVB4m7lBeLy05/+VDxnr7zySr52WRQuc+bM0c9+9jOH4YsvvujaQFHnxmL/hQsX1Lp1a339619X3bp1lZubWyJtLhZ59WncGgh4gnVr1HO5L+Xp06f1xz/+UZ/73Ofc+pd/+Zf6q7/6q+jK/3YMwtKqVSvxBl3WFtSVhQsX6te//rX+4i/+wuWZckCsjGhZOSBRI0aMiA7uEIZmzZpFrxk8ePA1BxAIVp8+fVz6X/7yl516UxoEC8WjVq1ajthBfnbu3HnNvN5IXVHH9erV0+c//3lVrlxZx44dc8lAvNLS0vTwww878gqmX/jCFwQGX/va1/Q3f/M3Ufy/9a1vOVxoY0YCR48e7c7lugYNGkTr4EbyWBGuSU1NdW0IPJ577rliEawpU6Y4Ess1jz/+uM6cOVNiUFi9odT+4z/+oyNZPB8lrZqVWIF8whUCAU+wKkQ1VvxChAkWZKpKlSp66aWX1KJFCzVv3lz169fXb37zGzeI0qGj1kyePLnMAbN3717dfffdUTKIGsdb96RJkzRjxgy98847TjmBMFCOH//4x1q2bJkrR0GChTnRBpaiCso1XN+2bVunPKAqhE1oRV13s/sxX6IOUYbvfe972rJlyzXzer33pGzg9nd/93dCkQIPKxuDOW3CiDfktU6dOs4cizKYnJzs6sGOo2YNHDgwer0nWPlrA4IFhtQnyitq0bWW0iRYlhfqHTJHPu+44w4dOnTIDvmtR6DUEfAEq9Qh9ze8EQTCBAv1AWXn0qVL0RU/G3yw/u///s91rnSwL7/8cnRQh4iQBv4g+OL079/fmYZ4483MzIwOrNnZ2Vq+fLnz6cHPyUxD3AvfJ87HbypsUsInBTMYKz49NsgXLCf7u3fv7kxU5A+T1pgxY/INVtxnwYIFzuTFOayNGzcW/iUFCRZv6BkZGY6Yvfvuu87/BNUG85wRL7ZHjx51eSbf5JV0WDjGuVyDaS0pKcnhMn/+fKcE2XnhcoAHvnCzZ88W9xw2bJhT5PAZo3ykCTbcC+WK/GPynDBhgjPXmXrGueTF7turVy9Xp9RPcc2JkDhMqdzjl7/8pUvf8opp8h/+4R/cMchX+/bt85mMuD+q2kMPPeTUL9Kg7eBvxVKQYOXk5Li6B/Px48c73yLqpOCCYoJZEuKXmJjozNWkhT8X51u9cB2/OR//JPzBMOVC/PhNnZFHW2gXtEf889avX+/S2rVrlyOM1NeOHTvcMdon9RO+FswhuByDbJt/H/dHAcRcTZm4P/W5dOlSV//hNAoSLNIgPfDAVM2zURCPqxEs2hZlJO+0I7DihYjnx9qIlZ18kjb5xL8KPzp86GbOnOnakJ3PeaTLM4VKiVoJnl7FMiT9trQR8ASrtBH397shBAoSrLFjx35msMJf6YUXXogOmA0bNnTn0PHScbdp08b5LmESwiSHTw4+JY888ogjVXTOkKtf/OIXzrR17733Rt+AGQRRnvAB+7d/+zdH0igIg9Cbb77p9pMWxK2oDh3iUaNGDTfoo1BBnEwJII+sLOSjS5cuUSL23//930L5Yn/YRIgqha8Jah3qAkpOpUqVBDaWLnkZNGiQ/vVf/1U/+tGP1LVr1+gABpEZOXKke9P/9re/7VQgcEFVe/LJJz/jMG7ncw9IE/cEQ85/7LHH3MAMHihJ//Ef/xE1zaESoWJRdgZm8gSJrF69uksHAoRvFGn913/9l/OZguhea4HEcW/IEeoUJJsFEvj6669H1avf//73UeJkGFvaqFngRxrf+MY3nN8bx8IEi/ZBevi+MXCjduGfBZYQL1u4L75K+AGaqsYgj0/Qz3/+c0euMZ3aQn7x9cL0DZ5gDxYQQ8yrEBgjD7wEPPjggy6v9913n6s32id5ph0xSQLTGNc+88wz+czjkEbuQdslH7wIgAPEGvWXdhF+JiD++DtCaGhzLGGCBSnFLy2MBz5unTp1UrjeiiJYkHrKhvmWPNGOKDdtkLJBtCCUVleYfSkfbcP86MCKslarVk3z5s2L4kReIZ7UD3VatWrVfC9Dhr3fegRKAwFPsEoDZX+Pm0agIMHiLZXOP7yi5tSsWdN1rDiMMwCynDt3Tk2bNo2aDxlMIElszRT3wAMPuBl6zNLDP4rOmYGXgYCFQYlBkP2sKCIsmCQYDNnHwIsJrqgFZQHiwbkMuihpBRcbVHAQZ6DjXAZr0g0TLPLNAEM58eEyUxfn//M//7MjCqQFmenZs6cjO5z36quvusEIIgSJMHIBweF+hgnpMzhBTFmMOHFP7oH/GIOdmY3Yd8899zgiCMECX/LGfvLGdRAqBkuUo9tvv90d43rUJzBngOV8Blv8xoxcFMSI/ykXqhdlQtFkYoNhBxFk4CUt/K4gA5xf2AJBMpMtaZEmS5hgUa8M6KRlZSJt9nXr1i2aT+rsf/7nf9x9IT6/+tWvdNttt7m65nz2oRCBJXWJygbx5RjpQtBpY+ab97vf/c4pT+SHdmnqLHWG8zjXcS4vEpASq0vaGIqWLZBI2hDnU1aeE56nZ599NtpuOE5eKCvnsdKuzactTLDCeICZnU+98WIAgWIpjGBRR8z0BBeuA1PINyu/2YdZHALOubQB2gJElTb5wx/+0LUdCJ3VBW0JUmULqhwkkLR4FlatWmWH/NYjUKoIeIJVqnD7m90oAmGCRUeMcoNJy1YGOpQGBj06Vvx/tm/f7m6HeYa3bfbjOI45BJMJ6gGDO/sZnCBRvDkz44nOHNKB6YIF8wmdvA0CvOEzkEOaUAVIAx8wzF6FLQwWmL9sEITMXK3jZ0C1wRrVBNMIaZiCxf0Y0PAzYvDDZGKDFnlHHUAxK4xgsY9846NCOgz8nTt3dvnDPIWzOGkws5F0WTB7oVxxPjigmkD6ICJmCgQvFDwGcBz5DRcGb85j0OfeYGk44qB+8OBBZy7C3EQ9QT5Ra8Jm2IKYgj3nWP4xNYIPC6SJujCMSBdCY8fDaaE6ob5xLkSQyRHkMUyw2I/PH3WAaRriYfmnjrZt2+aSxH8O3FjxDaQOUY8gfyht4Pz00087cxdKHoTT8og5m/YAsfjTn/7k8gJ5ghxCyDCnGcHiGrCmPvCHov65F/XGMUgreWWh3Kg/5In0yCP7IIOm/jE7kGeB5wTTJkoR6aCIYY5kCRMs0kERY/YlCihmWsODtEiHpTCCxcsOeSYNyBkO85gkMV3SpigX90aRRIVFubN2Ckknf5A+3AGoN0g++8mfLeCFMkc61B1t0i8egXgg4AlWPFD397xuBMIEi46TwR+CwcqAj4rBIMIxBnyIAh0tgyoqDASiXbt2bkBg4MZ/B78R67xJw6b4M4haR9+oUSNnckIB456YDzkGmYFMQSRM9WHggKAVtpAPBhJTEiB6NjAXPJ9zMbWYykMZccBmvxEsyorJjYGVhbLiC0NewIA3fQZRBtOCChb7MCNC3DgXUxPmIkgLKwMnJIdjEDWwwofKyCsDvRFJ7osJ8q677nIEAP8YVAdUBHNyZ6AOl5UBn7RZUVQgvCgQ1BMDJeSAuijo0xPGiUEWhYg0UH0wF4IPC2TEiDNlJH07Fk6DfZApBnnSYdBnkCf/YYIFaVi9enX0UnyhMNtyDXUDXqRDG2Efa0JCgiMtEFlWFFfKhe8aCg/txtRA2hSqDthDQJjsgKJDOmBNecIEi7p/6qmnnFrINZBE7m+KHtdRDu5DWzdVF0JliiwmZ+qBmGCUlfriXF4CjDBT3xA+ljDBwvwG3rbwskIZuC/PET5VYFuQYJFXSLYROBQ7fLCs3JgkwZp0ULFoMzipo1axj7rELI4ZHzzYogiCK+mG65iYclzDinkXfPziEShtBDzBKm3E/f1uCIGCBIu3dMgKK6SCjp23VTpUVBAGNAZKOl1W/F0YxFAzmLWH2sUgaWYGSBMDAgvnmekGVYCBnwGTwZQOHnKEEkUHj8JjZI/OHvIS7uitsOwrqGCFB207z7YQJzMDMbBA+kjDCBaKAaodA4fdj8HIBkfMOKgSHCuMYOFsb4SUwREzE/5rrGBjpj8GOoI2ci+wgoRAJMwMRPoQIQgX90c9AoOrzSKkblAeqCvyQP1RVhQJ8GRgpe5YrGyGi20hhOZnA3EwtZLj5OG3v/2tS586MwXLrg1vCypYtA1IY5hgoSiFQwzg6/fEE0+4vKPCMJhTZtRU2iHlAifaJtjizwbpAEfSZiF9Xgo4F5Me5Nzw53wjypAw2mOYYKGEUbcsYXwgKJjESBO/M5REyA8+VuzDbBpWBSkHJByCjHKHWQ3yY2Y/CBYEiCVMsGgfEDJbUJpQlKlL2iWqG3gUJFiQx7CpHRLJdVZuiC6ki7yCI07s+KzxImFtFcwoI4SePOPzFi6T4YFqaP0Bbdt8Ei3PfusRKA0EPMEqDZT9PW4agTDBYqDHsRyzAiudNgMOA4R1qpiIMD2xQADocCFNDACs+PtAsExFCBMsOnUzt0AwMEswUNL541CL4sP5DKgMjHT+kC4Gs6stKBmkxwACwSjMB8uuZ+CzwYaBGn8dBi0jWGBABPjwArGwfDM4Mogz4BQkWJAXiCL5YIUgQqi4xlaICSuDP4QHkxfYMviiBBp5Dd8//PtqBItjkBIws8Hc8sL/qBioQpCfohbMUPh5cR15BFtbUEQgxByjrvE7K0rBADMGa84FU9oJS5hgNWnSJB/hI18QMfCASIEN6WP2Yz/qIfe1MlnaqGoogdQjRNKOU2bD3baGP+ofpsMwwaJdQO4LLjwjZjblBYBnA/JEWpSNGXVG8CA7+EtZXsPPhPnCkZfCCBbqmCm1tC/wgFTZswdeRREs1DmeHcrO+eTNyszWyg2J5LkjfcpBu2afYcaW5w6llbAMEO7wAqG1+9SpU8epZOHj/rdHoDQQ8ASrNFD297hpBMIEi7fYsM+FJY45w0wrDDCYQ+igeRM2ssJgjB8KJihMhDYghQkW5AECx8AHEeJNGUUIJ25UGgJPMrCiYpiJEXIXfqu3PIW3EAsLK8AAwZs1A13BhcEa8w2DIuehSqGihQkWeWOQtgGTcjIIo1xwDaQMcxTXFCRY7GNw5TxWlCMIBYSt4IrzNAoBpIp7Mii2bNkyH+Gg3BAcBjlIBukXRbDIJ+XjGkhC3759nYqBuY88gyt5gvyi3BS1MKvTzHQoGjjO24K6RsR7IzmYKo1s2zlsrW3Y5AVUIyMUYYKFnxhqjy2oodQdeYWcYn6mTLQbyk0oBUyl+OlBzEnXFBiUNeoS3yWr3/vvv98plAWx53/Mm5DAMMGivKhVBRfaAqZkI1QdO3Z0Pl/gCZmlvbNQP6hL9nKBEsgMQNQgzmEWJNcURbDwIwu3W/BA1QQP2gjPF9gWpmCBjRE4CCcEsLByY8LmWaNMEDjqGyx4McAfDnJtBIp6huAZ6aOM+E6SF8rBS0n4WEHc/P8egZJCwBOskkLWpxtTBAoSrIJhGrgZfiUQKDpVCBEDBh09ZAliwIpyYsoIfjw2mIQJFtdATkiDTtr8QlCrMDWgBDCwMtBB5EgXQnSthYENQmGmIdQJTH8oLhxjZSBHJTMHcQYtHHbZz3FTsBiweXPncyAsHINgkiblBwcUNQaowggWAzwDKOdimgmbUMCFgZB84G/EAMrMQDN/McDZdHzIDLO8cPaG8DBQs68gwUJxAlfuAzmG4KEAUmeY3yBoNvuQPEFsCyPRhjGkz8gtdYD5lfRtIf/mxE2+qXfyDBHiPPKIMod/kpEflEnMaixhgoW/HSZIMGaBIJl/HPnE9w3CSF1SLpyqMZnSZvEto5yWF8gc9YQPFvmmrDi7Wz2SPmmhyoI/RIq6Lw7B4tqwjxMzM63tYuY0UkTZ+dwNxIR2DBGDHLJQfmYvki/aB3lgCZsIIY04mRsekFdzwMecTdsC44IEi3YO5qbiQqqZPGELbYPPHqHWQr45H9xRFTFR81LEPkg8qh7E10hUQfMnZaIMrPwOtw27n996BEoaAU+wShphn35MECgOwWLGljnQYk5AfaFjRXGho4Ws8KbNQIsqwGw2e5uG9HC+LQyi4Vl5kCj8llgYvMOqBIMmgwJLUR257SddlDDr/Bl4ic/FYIRjPoOBlYFzGIzMoThMsDiGeQSlBnMiAyFhFSALrPi2QFwgFAUJFvsgPDZLkVhEOEgz+LEfExCDP4oS2DHwoSYZeWUQJZ+ci+JjYS0gGwyu5BOCZYMuWEFYqB/2o/yRfwZ4Zs+hVFAnzFgzbMCU8AKFLWAJWUAhoqwQYQixYcw1DMSoTNQb9yLPKC/4f1FXqH8QKlO5yCNqiimCYYLFII6ZCdLDwI4JzEgypmhIIu0J4sm9IOuoWvjRUS7IgjnkQ8rxvYOAmWmStspLAGQWgoTjOUSZtsl9MVkXl2BBxoiDRT4oO3lH0eJD2NQLGPGCQR3zPFB+TJwQPPLKC4BNcOA6a9dhgsU1YI9CDB68AJiaRDnNXFeQYNEeqX98uAwn8gGJxD8Noo4Zn7ogbAoY0QbsGeW5oJ3zAsDsQs43X0HyY35ylBPVmXugEuIb6RePQDwQ8AQrHqj7e143AsUhWOGZZbx9o4gwoGCCoNOmw2VLMEMGcgY7Bmf2M2hwni2YFGyg4jhkAwLEwlu3OZNzDBMUA2BxFjp/BmrUF3v75t6oLOTZ9jH4YbpBqUNxYOHasIJFnrgOsyiDENeQH2ZdQQJZCiNYqBWs4GMqCgMkZiSLXwVxoYzMfGQBD0iAqV4QDCYTcG/uy2DOIMcgzcJgZ7GIOA5hQEVBfUDxMfMU6UFkmU2I2oKiQlrMXmQQLWoBCwgR+WAQJeYZ+2yh3iE+qHwQBXAhH5zPPbkP+1jJC3HNbIAmjTDBgnBwDaQPMmp1RFuySOEQswEDBkT9hLgG8kW5qA+uYSX2FCSD8yEgFj6E/OCMzrlGVqgPzqFckDUjrEWZCK3s5B2ya+WjHsM+atw7PFOUtoOZEsKHKd2eCdqW+QmGCVZReHAdBI02x1IYwaIsEE57fsCEe1L3RpZoU6iAtHueaeqQNkGbpOzkkxAV1oaoF4iz1T/qJo7xlJ+XAl4E/OIRiAcCnmDFA3V/z+tGAEUC5YNOlYHIZshZQgyokDBUCgZzVr5ViIM7b+c4H9OR04kz4OLci98G6hGdNkoS/h0oALYwgBuJYHBjwGbhPgyU5IXBgGngmNHIQ3EWBiB8XZg9RfoMTOSJAQ01g7w/+uijzpRk5kzSZgCB5HCcPHNfzEsQLQZUthA3fFXM56QogmXlgBQwWDHIQiJQeiARmM4gaWY64nwID/dn4Cef5Jd8kx9wx5xjGHBfBmdIBiSOMkJsSQP1CWLCMUgKaUB8qBvqiAHVTH6WXmG4MnCaqkbbCNcd53Mt6hhqG+ZLysg9yDd4kS8IEKpbmFxxLeQAssk5EG387mgj4EOeUVNQNMOR2SFOED1MaJSX+1AurqG90WbC4QRQBiEGFvkd/Fm5J/swiVk9ghv+grQ5/LjMn6owXPBXAmvaJuej5lg6dj5EF7JO3dkzwXPFM4GiRfsiVhumVYgOeQFr0iuIB2XlZQDzKBjYQpw0iBP3oK3zDLPQplAciScWjuRO+8U8ieJk5kzqEGWP9kWewBJM7VnBjwv1lWfSFpTYcLuw+9pxv/UIlBYCnmCVFtL+PjeFAAM2BAczEoMLA1vBwZc3c/xBMGdxHqYKIyh0wJgXeMPGJwZlhgEO0w4dONfgKB0mFNyDQdyOmZLEfRi4uQfHeMsumJfiFJaOH/MegywmJZyDURbIG6TQ0gxvUTK4p2GA2Q0yAFFihhYmSLBi4bqCBIsBlDJampA2cMU8iikR5QCzHwOwnWNpsYXEkGf8jfA7w5mbgZSBlfPD14AXdYC/EXmkXBBRFvKFfxJkEB8bSBAO2vgnoUCE03EXFPKHQRhVDKUCUmOmKTvV0qANMOhSRgZjHPYpJ/mmHk35sPO5nrqnXVDHqJOUD1zII2oOPkhWFq6zaykzx1AewROfNMJEoFriW2XnhbeYwvAXxORF28Qsxj7LF/kxLMkPZSkqRhjp2rNCO+F8yJndL4wNZeRePBOYTPF/on5RIWlfXIvpjvTsWWAfbZD/w3jg7xfGg/tQXvaTD0gf6YTzwXODCRKyTRsAA9oi57Fwrp1PWpgkeekBU7AFY0i9PZdcA2a0TSOrPE88r5aOS9j/8QiUEgKeYJUS0P42N47AzXaOdj1bOtvidLh2TcFcF7W/4HlX+79gGgwKli9+h4+HfxeVJucUdh37IGr4yGCGxJQGibMBLJweaZCHwtKx8ywvdj/Ls+2389iG9/E7vIbP436WztXuHb7GfnM+SqaZSiE/pFXUQh44Tvmvdq9w3sNpsZ/rWG0p6lzOuVa5wtfy284vuD/8v92XbVH7w+eEf9v5trU0Crtv+LqifpNOQTzs3PA9bF9hW84r7P6FXW/3s/PD9WBp8yJlPl6ozhBVv3gE4oWAJ1jxQt7f1yNQwghg9sLsaTPYMMXZG30J37rUkkfZwGSFjw5mNRQ9v9yaCEDAUMQwO2LCRjks7GXi1kTHlzoeCHiCFQ/U/T09AqWAAOZP4mKhXuFEjTN+OF5UKWShVG7BTDZ8cfBdwtxamPpRKhnxN4krApjc+bQVflqQbkzNLL49xLVabumbe4J1S1e/L3xFRoABB6drHIxxQMavpiK+0eNTxsw0FAtCIFTEMlbkdhqrstHeif6Ok3w4vlas0vfpeASuFwFPsK4XMX++R6CcIMCbOw7eOB+Hnfcr4hs9ZTJ/oIpYvnLS5OKazcLagG8Lca2SW/7mnmDd8k3AA+AR8Ah4BDwCHgGPQKwR8AQr1oj69DwCHgGPgEfAI+ARuOUR8ATrlm8CHgCPgEfAI+AR8Ah4BGKNgCdYsUbUp+cR8Ah4BDwCHgGPwC2PgCdYt3wT8AB4BDwCHgGPgEfAIxBrBDzBijWiPj2PgEfAI+AR8Ah4BG55BDzBuuWbgAfAI1B2EAhPtS/sUyhlJ6c+Jx4Bj4BH4OoIeIJ1dXz8UY9Akd/R89DEFgEjV3xrjmChrJAsH8sotjj71DwCHoHSQcATrNLB2d+lHCPAIM+g7xWVkq1E8IVUXbr0gS5cOB8NkOpJVsni7lP3CHgESgYBT7BKBlefagVBgMH98P5jOrj3qIuK7tWU2FcsmLJCYi9cuKA1aau1cNN8ZWZlis+fEIXek9vY4+5T9Ah4BEoWAU+wShZfn3o5R+DypSuaOHiBxg2epcyMTKeweJIV20oFTwgUn/U5lHlIb01qpxYjm2vZ+mXKyMjQxYsXHfnyuMcWd5+aR8AjULIIeIJVsvjeVOoMKDe63tSN/cUOAbDPzT6tLs3fU7sXErVqxWpntvIDfewaCFhCrlCpUKumrZmqPw18WLVTaqn3pN7aum2rTpw4oStXrngVK3aw+5Q8Ah6BUkDAE6xSAPl6b2GkioEHs4k5/TIIFbaaQ7D5CXGdpXG99/bn50dg06o0tXy0l16s3Umjho7T8ePH/UCfH6Kb+o92Srvlg9S7D+1Sm9RWqpFcTdWTq+rZgXU1df4UHTx4UOfPn7ToQx8AACAASURBVPfq4U0h7S/2CHgEShsBT7BKG/FC7mdkyAgVJApzCYMOA8vp06fdW3xOTo6ys7N17Nix6Mr/ubm5OnnypM6cOeN8WLiON36Il5Euu0cht/e7ikDgyuUPNXnoIjWu1kUvJHRS93b9tXffXj/QF4HXjeymzdNOT546qeGL39PDA2qrZnKCaiZXU63k6np7zFtau2GtaPs8E/bycCP38tcUjoD1DbYt/Cy/93oRMDxtS9stbLXj4e313sufXzYR8AQrTvViDxMPnClUDCAQqlMnTyo7M1NH9+/ToS2btH/ZYu2bO1N7p0/U3kmp2jthtFv3TEpV2vRJ2j1nlnYvX6K0zZt0YE+ajh4+7AYkiBn+K5AtI1p+gCpehVM/J7JPq0frkWqc0FVNanTXa/V7atnSFbp06ZIPHVA8GIs8y9q/qVeb927WiyMbOfWqZkpCsO1XXXUH1VHq+6nat2+fzp0758ltkYje2IFPJX3y0WV9dPmiw9b3DzeGI1dZm7Y+/fKVKzp38YKOnTqpA8cytfPwIW05tF8bD+zVhgN7tenAXm1NP6DdRw8rPeeYTpw9rYuXPtCVDz+M9teW5o3nyl8ZTwQ8wYoD+vbQMLigVqE4oT5lZ2Xp8I6t2r9wrg6MGqL03h109J3Wynj1RWW2rq9jL9VTdrOn3ZrT7Gkda15XWS2eVUabBjryejMd6vCK9iZ1047UEdq+YJ52b92iQ4cOOYWLwQligFpgnSj58EvhCIDN1rV71frxvo5gNU7opuaPdNXYkRN18uQJbyYsHLZi7wVf2iHkP+d4jvrNS1btfrUiBKuaaiRVVc1+1VW7f029PupVrVizwqm2XsUqNsTFOpEe4HLuPuXumKsTx4+7/sj3C8WCLnoSeLG6l4XLl5R58ri2HNyn2RvXasTS+eo3b4Z6z5qsrtPGqeOUsWo/eUx07TQ1Vd2mj1ff2VP17oJZGrtysRZt36y0jMM6fvaMPvwomEFrfXb0pv5HuUDAE6xSrCZ7EHlYIFaoSydPnFDmvr06tGie0ock62jHtspqXV/Zzespp2kd5Ra2NntauawFjnG+I14vPaOjbRtrf9c3tXPEIG1fNF97d+1UZmamI3IFiVYpQlAubkU9QUQnDV6gxgld9EK1LmpUtbMaV++sbm/20/79+93xclGYMppJngEwRrFdunWpnhlS15GrGinVVD2pivuNHxb+WI8PeEyDpr6rHTt26NSpU+7Z4Xq/3DwCn3z8kc5umaQ9oxpq/cpFrn/w2BYPV/oJsOIlIfvUSa3bu1vjVy1V8txp6jw1VR0mj9E7k0Z/ZjWCVdgx9kHCIF2DFs7WzI1rnPJ1+nyeest9/VI+EPAEq5TqyR5G3nJ4Cz9z+rSy9u9T+rzZOpzcTZltGztylNskIFUQJUeibnQL2WrylLJeekaH3myhHQMTtXX+HO3ZudMpAShaYdOhf2jzN4STuWfU+5XReqFa1yjBgmi1fb6nVi5f5c2E+eG6rv9oazwHEP3DmYfVZWon53cFuarJ2g8frARVR8VKSVCtfglqMay55i2ZpyNHjviwDdeF9tVP/vCDszo+61XtSfyDZgzvpfT0w65ufH9QOG7gwgqxwgR47OQJLdu1TUMWzXEKlZEmI1E3us1LZ7R6zZyk1JVLtPngfhnR4v6+jgqvo7K01xOsUqgNeyBNtco9dkyHVy3T4f49lPVywzyl6kbJVBHXGUlD2cK0eOiN5to2pL+2LF+mgwcO6OSpU848yWDnH9i8hgAWu7Yc1CtPJqlhlU5qULmj6j/QQY2qdlHTR7ooddRkN6nAY5aH2fX8AjfUK8IyzF43S4/3e9SRqQcH1NCD/WsE6lViFVXrW1nVEwOS9Wj/h5U4oa+2bN2SL2yDH2SuB/n856KDXMpJ07GRjyo9sZJm9W2sjRs3uRcvj2t+rPjP+nH6yzPnz2nd3l0avGC2U5wgRDdKpq51nZGtbtMnaNzKJdqTcVgfhCZ8+Lr6bF2VlT2eYJVwTdD4eSBRixhQMtN26fCYocp8vVngS4WZr1lg2rspxaoIkhVNE0WraR2naO1v/4o2TRirnVu3ODWL6NmQP08Ygsbw4ZUPNXXYIjWt1V0Nq3YK1iqdnJmwUUIn9Xynv/Yf2O/f9G/g2bHnAb/DfYf36fXxbZWQVMURLBQrTIL4XyUkVnHkKqFvYC6skVxVDQfV14z5M/KFbaDN+uXGEPjkU+nstinK7HeXspIraVmPWpo7c7KbTOBxzY+pkStUq72ZRzR2xSJ1mTouav67Fkm62eNGskgHf673N61T9qkT7kWFvLH6pewh4AlWCdaJDSaYBE+dPKGja1fqSGJnHWvxbOA/1ezpwCx4LXIUo+NO0YoQrSMvv6Ct7yZpy6oVOnz4sOtUIYG3Osn6VJ/q9IlzEfNgZ0eqzAcLPyx+v/J8N61c4c2EN/Lo0L4g84RlGLd8nP408JHAJJhSLSBXECxHsoKt88eKEK/ayTXUeXQnrduwzs2S9e31Rmog75qPLp3XiblvKzP5dmWlVNKO3vdo8tAeLno+L4V+yT8z8OyF81qdtlMpc6ar/aTAv+pmidONXA/Zwk9r5NIFSjt6RJev+PAlZbWteoJVAjVjbxR0UpCrk7m5OrJkvnNgj5KcGJGmqEJ1vek1raPMFs9oR5/O2rh4odIPHXIKmx+0PtXuLYfU9qlkZxJ0zu34YSV0cbMJX6zeTU0f6aTxoye7+GT+Tb/4DxBY8Ux8cPED7dy/Qy3HNHMBRWs4P6vqzt/KFCw3izAlIeKDFRxD4ao74ClNnDNR+/ftd+3VZsUWPxf+TBCgj7qUu0/Zox536lVWyh06lHiHpvdprG1btzgSfKurIpTf2uypc2c1b/N6dZ8+3qlWN0KMYnmNKVr95k7X5v179cGlD275l+Oy+GR7glUCtcKDmY9czZ2pzHbNo75W5ht1w+ToeslUYedHZiAS6mF3lze0fv4cZ3oJf1z3VuxgGbDnjF/p4l41rtZVjap11gtVg1mEjmwlQLY6qU/Hd10IDM6/FXG63sfGBisIfO6JXA1b9J4e6f+gqkcUK+fMTvwrVhSr8JqM83uCI2O1UqrrrVHttGrNKmVlZTlHeZ41XwfXVyPgdXb7NGUNvM+pV5nJdygzuZKW93xQ8+fM8GbCCAnl+c49fUpT1i4XIRXaR2YFxpIs3WhaRrJwgl+Vtl0XPvDf7Ly+p6Dkz/YEK8YY20ASKFc5Ovr+NGW93jQgV4URnTjuMzXtWLO6Suv4mjbMnX1Lkyzq7vTJc0puN05NanZ3qhUO7lETYZXARMjMwlcbdtfqlWv8bMJiPj9gCxHC32/NzjVqMOw552eV4BzZqzgChYN7rX7VlZBY1flkQbJQrWw2YUC+qunP/R/X8OnDtXv3bqcimorFPfxSPAQ+vnJRJ+a+o8wkzIN3RNZK2t7rHk0e1ltHMzKcIlK81CrWWbQj114/+tj5OU1cvUydpowtE8pVYWQMotVzxkQt27lN5y9e8EpWGWqOnmDFsDJ4KM3H5MypUzq6eJ6yTLmKI5G6mlKWR7Ke1s6ub2rDogXOJ4uB8FYbuPC/Stt6SK89neLMgRArZhGiXLm1WmRbtbOaPNxBE8ZO9WbCYjw/9lzw0pGVk6XE2X1UI7GaqvWprKq9/yhIFqZBAos6pcqc3JNweA++S8jWOb4nV3EhHNoMb60ly5c4fyEc5r2KVYyKiJwCDb184qCyxzwZ8b8ygnWH0hPv0JQ+TbV16xY3Maf4qVacMx25+vhjnTx7RtPXryzT5MoIFyQLJWvtnp364NIlT7LKSHP0BCuGFcGDCSm5cP68MjeuVWaHV8qkclWQcJnJMqt5XW3r01mbVq/OZ365FfyMAhLwsTMPvlijmxpWDoiVqVeObFXt5NQsTIeNa3RW306DHBmlzv1SNAL2XBB7bemWJao3+CklJFZ2ZkBTqMz3ChLlfidXU0LEud0pWFFTIoFIq+qxfo9o4OSB2rHTBx8tGvnCj3z66Sc6nzZXWQP/4MyDpmBhJsxIrqTF3Wtr4dxZTm2k7m6lJegHPnHmtgVbN7iAoZAXIzJleUs+E9+fop3pB3yojTLSaD3BilFF8GDyFs3bdPbeNGX26RjMEGxaujMFC5Kn4v5vSlZGy+e0aXCKtm3ZrOPHj7sH9VZRB86cOqeUN8epUeXOTrlCvYJYsUKq2L5YrZteTOjmPgD9+gu9tG7dOjeR4VYbiIr72NhzcfnSZaVnpqvDlHfch5whWEEohrwQDdUTqzg1y0VwR71C1UqOfJcQh/d+1Z0JEZWL/U0GN9bcJXOVnp7uyMCt0k6Li31R53185QOdmN9BmSl3hsyDERUruZK29bpHU4b1VWZW1i1nJuRl8tLly1q3Z5czu0Fa3gl92qZMEyyXz9EaPH+2Dh/L0kcfed/Eop6B0trvCVYMkLa3Hhx4T2Yf09Exw5Td4ploKIbikpx4n2ck61Dbxlo7ebz27t3rPp1hpsIYQFVmk6Bj3bfzsF59mtmDnZ2J0Dm4G7mKfDIHcoXT+wtVuqjZY500ecIMh5EnWJ+tWnsuCMvAtzZnrJ2hxwc86tQrzILV+j4QCSYaxLwy5QpzoCNXkVmEbr/7hE4kTlZyoGI9nPKg+ozvrc2b814GqEdfF5+tC9uDHnXlZLqyxzz1GfNgoGRVUnrfOzQtsbm2bt12S80mpN3Q1x3KytTAeTPLrM/V1UgehJBP9ExZs1ynz57xpnNr+HHaeoIVA+Dp1Hkwz5+/oMxVy5T1WhNnGjTTW7yJ0/Xdv46ymz6tnV3aad2ypc7HhU+aVHR1gPLNnbTaBRe1kAzme9U4oatTrdgfVbQSuqpJrS5K7j7Efb6F6/2Sh4CRK6cIXLqkfen79EpqayX0ITp7FUeyAoL1QNQkGKhVVV3g0XwEixhZkThZzCLkczoWxqHhoOc1ff60fMFHPcHKq4eCvyBY5/csCMyDyZU+q2Dh8J5cSQu6P6KF82bfMmZCa69nzp3VtDUrgu8IlhPl6jOEa9Jo9y3D9Xt2uc/58AyWhSU3N1dTp07VhAkT3Dp37lz32auykLeSyoMnWDeJLA8mgysOvLnph5SR3C0ajuH6iM1NfnswRk70pmJltHhW64cO1M4CH9itiIMXzu3nz11Uv7fHB+pVxBxoZCrfNkKyIF2sbzTupY0bgs+L3GRTqlCX24AVBBU9pdRlY1Q7uaZTrDANslbr/YD7sLML1eC+QxiYAzER1rSAoy5EA6bCQNViliG/LbxDreTq6jSmg9atX6fs7Gz3HJaVAaUsVijmwVNLegbEqhCChR8WQUe39LxPU0YkO0xvBTwpIxaIzfvS1GP6hHKpXhnRcmbNSaM1ZMFsZebmuJf/stBvL1u2TN/+9rf1hS98wa2/+MUv3At8WXxOYpUnT7BuEkkaLuoVDryZc2foWOsGzjRYPtWrgOSRdz6rs+etVlozf26Z+MAuOF++dEXnzlzQ2dOs54u3njqvs+HVrsu374J2btyv1+qmqGGVwDzoVCuCitbo4cI1oGZBtNjim2Vr88e6aFLqdGVl5OS/D+nbvdiG73e13+FzT593ZcYpuSx0kNfzqDBg8eJx8eJFbdmzWc1GvhjxuUKhIvRCyPcKZ/YIwXLxsCK+VtUjswhRq4yUoX6ZCdG2dQfWUersVGfSJo4bz2N5w+t6sL2Zc6+cOqLs1LrKjIZmyJtBaM7ubA/0vUNTE5trx44dFd5MSFuhreaePKFRS+Y7cgVJMcJSHrfkv9OUVC3cvN49g2WBJC9dulRf//rX9bnPfc6tP/vZz3T06NGbac5l/lpPsG6yiuzN5/jhdGX07aScpk+V6udvSkwlI9L7S89o3ZD+TsU6ffq0G7ji9aBy36MHszV9xFIN7zVD7/Wcrve6T9N7PaZrWK8ZGub+n66hPaZpqNsfHOO3W6P7p7trhnafqiHdImv3qerVZqQaJ+QPKOqitkOw8MNK6BqEarCQDRCtqp3EtwnfaJioAR0nRO/N/YZ0n+r+d/nsMV3cL1inaUi3KZGV+09x55IXjlMeV4YeUzV+8BztTztU7mYEMWDZc5F7PFeDFwzSQ/1qRZSnEFmCaPHNwVCsKxSqB/vX1IP9qkdNh+64nRf5RmHgrxWJ9J6coDdGvqaVa1bmm/3qSVb+zo0Jgc48+O4fXVDRMKEq+Jv4WIt6PqpFC+bqwoWLFZqw0lb5xuD6tJ3qNm283plcvsmVEUJIFr5kR7OPlYmXDk+w8j+P/r9rIGBvPsSMyly+SFkvNyx3ju1XI2ioWGnvvKy1Sxe7GUWYQXnTi8fARSd4/ux5zZ2yXK8/m6QmNbqpEYE/q3ZxUdchQxAh2zqzHp+3IRp71c5uPw7qdpwAoqZKoUbxv80aNHXKEavqecSK/901VfM+m9O4et5ndMxsaJHfOb9J9e4uDw2qdBSry3NkZiJO9HZPl99o/jur0UNvqlv7RK1fv8Gpo/HA/BrNv9DD5JM18Ek8r3W71qnRiAaqkRIEDEWJghyhYAUzCXFc51gQ54pjzgxI5HYL0+COV41+NicwE1ZX4I8FEauqPw/4k0ZMyws+ah8vLzSTt+jOjz/6UKeW9gmc2wsxD4ZJ1rGUStrU835NHdVfOTm5FXY2IW2VPu3UmTNKXbaoXJsGjVjZFoLVeWqqVm7fUiZiY3mCdYt2PDdabB5OOvJTOTnKHJRUMZSrsC9X0zo60vJ5rUkdqT1797qBPp4E66OPPxKOkgtmL1WnlgP0Ys2AYEFyLIyCi1EV+cQNZIjVkZjItwQdIYoQHKdARUhO/cod3HlGesIEzAiTOz+SphEiu4cjXpF0GzzQ0RE2O8a5pFf/gQ7R/Xa95df+r1/tbT1T+yW99vKbmjF9hpPQ8Q2hrZWHhXxChpkYcTTrqHrO7K7a/WvmV6MgUxAsN5OwclTBsqjuZvoLq1vu0znmlxVRvIIPQUf8s5KqqvngplqwZIGOHD7iwzYUaCw0nw9PZyhn/PNFzB78rKnwYN87NTmxlYuYTz9XERfrw3cdOqDeMyZVKILliNakMRqxaK5yThyP28uxtRtPsAwJv70mAjaQXL58Rdm7tivr7VblIqjo1RSrwo7hj7Wpbxdt2rBeJ06ciIs/hpELthA8zJXr12xScufhavl4d0GaLKQCRMVUK37bfrZhEuTCLUQULiM3RqRQmSBD7nzUKpzeq3aREScIG2qUqU9hIgUJc8dNIYs4xbtzInnguOXFqV4Rle3Zaq+p/hMvqWvnblqyZIkjkwxslNswuGbDjOMJ5BFyRZ4Jy7Bg43w9PejJ4IPO0ZhWgSO7M/E5hSry3UECi/atHI3qznFmC6JyVWN/xDE+8N8KwjVU55M6EdIFGXsosZaSxyf5sA2FtAH8+C7sX6qsQZWjH3cOK1YFf7ugo0mVtLDHY1q+ZKGL71ce2mAhRb/qLqeMX7iguRvWquPkseXa78qUq/AWFavn9AnafmCfPrxyJa5KpCdYV22K/mAYARtMLly8qKyFc3Ss5XMVyjxoZAuCtaddC61ZOF+ZmZlxm6VlnbvhTkDX/fsPKHXYVLV9to8a1+jqzH9OzeKDzKhJplqF/KfC5MaIkZEvjtnxKFGqGhAp+x/i5ZSoAgSL6xyhwxk+8nFoS8vMgs6PC5JVJSB7/M81Dap2UL3qrfXi8y00aNAgbd++Pa5qYbidX89vBisIMOrVgcMHIkFFI6a+iLkvIFbBjEB+YwY08uQUq8SqSugbzDJ0+yMqV0KfByImxcBni2s5bv5btm38biPNnD9TBw4c0Pnz56Nv7dZ+rqc88TiXfMZ8lfTJR1d0amlfZab8X+GhGQpzek+upE29/qiZ44a4oMPUb8zzFseXB8pCe805flzvLZxd8dSryWOCMk0arfkb18bd2d0TrHj0KOX0njyc+JmcPnFCGSPejZgH68iISYXZYiZs9bxWjx+tffv3O9MLHW08F7AnDygluTm5WjxvuTq16a+mtYPQCZAWR2YiM//43wgSpMfUJwiWmQLdflOdIEih7w7y2Rwz8WFKDKtbUVIVuSf/22oELkq+IuZIt78asxI7q37C23r24eZ649W3NGvmLGcSxNfNBjNwprxlfQnXyalTpzRl1WT9acAjcipTJO5VQIiqqFpEjXJ+WH3zSBKmQadQRYgTyhXfKyReVkCwqjg1rGYkEKnz0YoEIw1CPSTooZSa6jamq9avX58vbENZx9Dhd+UDXTmVrku5+/RBTuxW0ruQvkE5qc8U2zwYKFqVdKDvnZrTv4X2blqii9l7dCnG+bp8NlcffxyfWZ9gTh+y6+B+9ZoxsUISLNQsVKxRmAmPx9dMWBIEi5e5Y8cCJ/6y2Ef6WYQ3WCs2wJ84kq7MHm9VPP8r88VqWkdZzZ7W+oFJ2r5jh5gGHy8/rIJVRQdJXs6dPasN6zapf48RavXnHs4B3pneiFUVimll+xzBwUE+8jHngoTIiJGRNP6HYD13/zuOXBkZY2vfLDRCxj673pQqtuH93K9+5faqV/VlNarbQn379NXatWt18uTJ6GwfysZaXharC8Iy7Ny/U6+ktnF+Vy5+VUokvlWEOJnyZApWoD4Fahb7CDjKPs6r0uuP0Q9C51O2XLiGwLwIUTMFC9+uZwfU08TZE7Vnzx7XXhlE4/1ScK16JH8fXTipE2veU2bqczo26s86Njp2a9aIR5TZ/+5imQfD5sLM5Eram3SvDg15SFmjnohZnjJGPqm0sc21Z90cnTl92j3H18Io1sdps5c++EArtm1Wp8ljKzTBSpo1RfuPpMfFxcPqrSQIFkp13bp1lZyc7MK04K9alhZPsG6wNugQURpydm5TVruXKqT/VVSFa/qUtvTqqA3r4+eHVVQ10UlSF5gMD+w/oPEjp+n1Bn3VpFagZhmpalIj8NUyUuXIUYQM2TkcgxxBmowQ5VO+Hggc4RtW7qjn/9g+eo4RMEgYaZgPmLtH5U5Rk6I7ltBVDaq1V70aLdW8cWuNHDlSaWlpUfm+PJEqqxOrAzq34yeOa9SSkXp0wMNuNmAwI7B6EPfKhVgIzH+OSEG8Ip/AMfLEbEIjV6hfKFjm/F61d/jTOnxep4qqYjpMCmYmugCkiVVUMzFBbw9/U6vWrHJm7fLwJYJPPgkU8ez03do24S3tT77ffXiZqOqxXK8V/ypMrvJ+5+UBwnUz+eH6A70rafbb9+vdLq01f95cpzRiDSjNxdrsmbNnNGPNinLzvcGwf9X1/O42dZw2pu105vt4vWyUBMHavXu3fvCDH+grX/mK7rvvPudiwbdJeakqC32pJ1g3+FS7QR15csViHXulUYX0v4Jg4YOV27SOdrZvq3VLlyonJ6fMxGWyB8g6S2cyzM3V4gXL1PXVAc5kaITHkShmG+KPFfGDgjw5UhU2B4ZCNjglKuIvFVWlmEUYUaTs2sZVg1AOECrn91UgtIMjXlW7uM/tPF+tneo93ERvt2uv+fPnu7hNEJNwp2flusGmWaqXkVdWlMQL5y9oU9omNR3ZOFCvUhICkhVRsCxgKOTKVqc+YTLsW1nVWZ0aFSFhkdmCnIuahcO789Nykd4T3Ody+J8wDUHohkD5gqA91e8JjZ4+Srt37XaTIsq6ikX9s17Et3DPbq2YlKhtKQ/qaNJnZ/flEZ/ydQzH+W3d7lD/lx5QuzZNNXZsqvbt2+dejsLtvzQasLXZnOO5Gr1oboVzbi9IvjpNGauFG9bG1cWjpAjWP//zP0eDl0K07rrrLg0cONAFyC7tdlWw7XqCVRCRYv5PxV24+IGy5kzXsRbPVliC5VSspnW09/XmWj1vbtTRvSySAOs0iaq/bet2DUkeq9Z1uqtRtcBsB6GCKDmSFXFGd2pTtc5uhqAjThH1ypGnELmCPBlR4hiO7O4ci+weSduRrMjsRSNlXOcc2RPaqNEzzZWSnKJNmzblM7eS97KI6bUeB/LMs4Cam5WdpX5zUvRQ/1pRk52Z7uxTN0as+D+IgxXMEMTfKjg3MPclMEMwYiZkW8ORtGD2oFOqXJDSvHNIlyjwLo3kqqqRWE2vDG2jxcsWO782TJdlxbRdEFOr9zCWmRkZWrdwitYNfkEHku65ZmDQsky6UK3SE2/Xgrd+r05NH1LXju01f8EC93JR0N+wIDYl9T9YQ7qPZGTo3TnTKzzBaj9pjKauWOr6nHiRjtIgWBYlHqJ19913KykpyZkOqet4LLcUwUKGJtQAsZRuduW7Z4fS07V/9FBlNw9UnqhJzfyXKsq2aR3ta9NIc1PHuCnwfN4AJetmMSyJ68kX65EjR7Rh/QYNShmmpn9+S8898KYz6dkMP/PNggQ5PyrIlDmhOwf0vKjuZiZ0RIlgoRFSBSFz+yLO8WFC5cyDjnR1Ur37X9cj9z+vZ59u4HwFVq9ercOHDzvTSFnFsbh1w3OAk+mhQ4c0f+18PTe0nlOUUKtQl8y8x/cFHQmK+Fc5NSvi4J4XeiEIOOqc382cGAnd4JQrZhhGHOQdIYv4dEWJmLsmCN3AvsdTHlW/1BStWL7C+WMxC5b8Frds8TqPNpGVleUGhoVzpmtmYhNt6Pp7HUm8vfgzAAubFRiHfZCrnd1+p/ea3a7mzzykjh06iI/8olxRRnteSxtr7kt7WL95s/pMn1DhCRaO7iPmznRtKl7PwLRp0/S3f/u3UbXpJz/5ibZs2XJTz+PKlSv1/e9/P5qmESzbmukQRQt/rdKOKXhLEayDBw+qVq1a+t3vfheT9bZf/1o9alVRbrMKOHuwADnc8vxjqvzr2/TLX/5Sv/3tb2OCX6zqobB0br/9dv3mN7/Rz3/+c/3wBz/Sz390p564u3XgIxUJj2CqFAQLNSpMuiBLKF3s5zwjT06hiqhX7nxmHEZMhpAtR9Js1mHlDnrkzib6zx/cpm9+89v6p3/6J/H9rV//+tdlHr/CMC1qWi6UmAAAIABJREFUH+3h9jtuV41W1VV7QE3V5IPMKQlRMoQJj/8DFSpi7nPEqUrw+Zxo4NDQeRGCBimzmYimfgWmxsqqgcoVIWKOZIUCl+Iozz2rtHpAv/zNL3XbbbeVi3ZbEOPbbvuVfvKTH+vun31HIxr9pw72+V25ULMgVhlJlbS83f+q7l3f1T/+/df13e99Twyq1EXBcsbr/zvuvkst+/ep8ASr/ZSxqvdq67j2PT/96U/1l3/5l1Ey9KUvfcmNJzdT93ww+otf/GI0TSNWBbcQrXvvvVeDBw92inZpqdm3FMHatWvXVdluwUopzv+tf/M/OoGCVYCQVLT/t9R7SP/+9a9dsyEXB7N4nPP5z/+FvvfNf1HVX9VxM/ggSoRbMPLElvANECoX5yqiSplCxdadi7oVJliRsAyoXI5gRXy06t3fTvf8z0P61le/q89/7vPlFrfi1tW3/uNbuu/te1xwUEemIp++gfg4goXfVVJVFzTU1KeAKEVmAEYc2h1hwjRILCyncOXNEDQzoEvfYmhxTkTJMnUrSuiSqun+jvfpH37xnQqB//e+8UW1rPp9behwW5kmWZCr/b1+q0HP/Vi//tFX9YW/CD7uW9y2VJrnffUb39CLfbpVeILVYcpY1Wj4bIV4Dm6mfUC07rnnHk2ePNmpWSVtNrzlCBYzDm6mggpe2/q3/6MTFZxcQRY3Q7C+UX4JltXbV7/0Df3ux5X1zP3touTKmfwiEdghUQ0jQUTZb8TK7a/cKapqhY+560MqV5172+p/f3SXvvTFr8S0rVkZytr2C1/6gv63zs+d/xPkxqlO7luDEX8qR5byIrIHhChvtiDEqFpf1sAPC7Ni3u8qjphBqoh1xblGqPJMg4EvljnIR/29IiTvNw1+pb/+2l9XiLr48hf/QjVv+5amtfyZ0vv+rsyZDFGt1rf/lV6u/gN9/5tlH/OvffOberFPV0+wPld2SXCs+rsvfOELzopAWAe+lFEaflm3FMHCd6hhw4Z6+OGHY7LWqFVLA5969JYwEW5r+ISeeOB+Va1aVbVr144JfrGqh+tJ56GHHlKNqg+r3gOvOrMfH3/GrOcc1iPO7uanZb5XznxIeIWIwmVO8hYDy10bNSl21UP/10i//uXvVKVKFXG/68lfeTqXdpBQPUGVHrxdlbvcHzitm1+VzfgzohX9yHPIFytCxixEA2Y9M/9BpBypipgBjTQFKlVAxCBz9qkcruN3QLLM+R2frcqq0u2Puq3GL3XX3XepWrVqevDBB8t1nTxUu7baPP+QFnWrqfSk34vZefF2cicPR5MqaUX7Smr52K91152/U+XKVcp0X0H7rfXIw2rzblKFJ1jMKmz0TjtVr149bn0STud/9Vd/FX3R+drXvuaex5vp8x544AF9+ctfjqZZGBnDLPnd735XTzzxhGbOnOk+41XSypWlf0sRLGaO4OgeixVnudNnzypjcqqym9et8LMI97dtrBUzpjlnZj5BAvuPBY6lnQb1tmT2Or30SC+nRkGwIEjO7Bcx/QUqVuB35chWxCyIr5U7NxIvq+A1dqxh9fbq3TVF+PwRg6m8YlVU3VAeykXQ2a1pW/XSyKYRc15AbAK/qSDUgjMRum8KBmoWpKlmP/seYUCo7PxAkarqorlDmAIljDSruHhZjkRF/LWCYKR5aboPREdiaOGzFahied8wrNvvKaVOTxVuAkx0If+0haLKWFb3G/Y4Kq9fsUDLh7bWnqQ/xNVkiEmQ2FYTX7lLbV94XL16dhfOxziul+X2f+HCBW3fvUt9p0+s8ATLRXOf/340Llw82vfixYv19a9/PUqG/vu//1vErLqZvOzYscOpUoURK/Z973vfc4FImVjBN2xtxq4RoJLe3lIEK5ZgMtWVmDVZ82ZV2O8QRv3ImtZx3yNcPX9edGp1aTfUWNQdeb5y+YpGJ81WwyqBEzsEy5zZIUgWtsH22RYzICvn2G/b2j53LWbFKh3UoXVfN5gzIJZHrIrCm7LQ9iEnObk5Gr5omB7u96AIq+AIUSgKe3QGYcT3iv9Roiz4KB9z5hpHulwE9+ADz44sRcIy5JkDA5MiJMyRtogPFtezOtOhi6GVF64hULsColczKUFvjWinFStXKCMjw8VeKi1H16KwvN79YG8reSccye6d27R8Ql9tSXlYR5L+r9SJFibBjZ3vUFLT+/Vayxdc4FxILOSFdlJW2z75og3vObBP/WdOrvAEq/2k0ZqwaL4jvRCaeCylFaYBxYrYWM8++6zef/998ekua4e2La3ye4J1g0jTeXxAoNHVy3Ws7QsVXsHa0ek1rV2+rEwFGr3equPhys44rs5Nh0bJEgTKSJYRJovkjv8VgUNNmTJTYvR/i4EV+dCzpYOy1bpuNy2cv9gNNKX9UF8vLsU9n3LQ7umgUTHX7VynF0e8oJop1d03Ao34ONITmQXooq/3iURgj6hSECpIFquda2TIEarEKqoVCU5qkd3tvEDlquKit7v79WO2ICQrUMVI2yldkRARRIInTaK9P9nvCY2YOkI7d+50nS4DbFkmAUXVC/VgdUEcKUJ+rHh/nNYMeE4Hku4uFZMhqlV639u14M079XbDBL3+6svO/AJ5LQ+4gh/5PJierqHvV/w4WB0njdHMZYvd57ho8/FYSppg4WNFyIYnn3zStUUUK1uo73gsnmDdIOo0Uh7QnN07lfVWi4r7qZymdZTT9Clt7tNZGzducA9oeVVlqLONK3er5aO98kgTqhQkKkKkMA9CoFCj7DeE6cWEwJTI/qY1+d5hd6d8mcLFNUawGnF+7c4aPjhVubk5bhC/wWZWpi6jk0I5cUFFjx1Tv/eTVbt/zaiCFCZBEB0IEOSG/e535CPNzhE+pZpq9uO7g3mO6+68SCwrVCxHlhxxCiK1EwLiwZQaecTM1K+IEubuH0nPfSS6zwPuW4YoZy7WVt8qajOktRYvXexIiQUfjdeAczOVawMGeYfwYvbcsHqZlgxtq92Jfww+s1NCca8gV3t7VdKoVr9Xi+ceU/fu3bRixYpy9S1N8KMfIxbW+AVz1X7imAr7uZx3Jo9Rl8mpWrJ2jfM/ild7LymC9cMf/lDf+c53nI/V7NmzS9XH6lrPsCdY10KoiOM0Uh7QExlHldmnY4X+2HNm87paN6S/du7alS/6eBHQlMnddKiXL3+osSnvOzJlsassyKgjVXwcunoQ+4r/UawcgYoQLH6zmtLFOe7TOHxQOiEgWJAsl1b1LurUNlG7d6e5dlImQbnOTNlgfubMGS3etEjPDqnriJOpT2GCZbMJIUnOeT1iyjPS5QhPJD4WalVApgIiZkSJfYE5MfKx6KSqjpBx3NJFPavVv0ZExcr7UDT3sXwxIzFYH9BjyQ9rwIT+LmDu8ePHy4XaUlQ1GcliS91gltu3Z7eWTR6g7ck1S8zxPa1nJfV84V61atZIo0aNct/SRNEkD5anovJcVvaTT14WIKazly8RCg9EpOAnZirC/5Sr95Rx2rB1S9R0G496KAmCRUDpN998U3PmzMlnCoxH+Qq7pydYhaFyjX3WifCAnjl1Spmpw5TTrG7FnE3YtI7S2zTQ6knjdeDgwehHia8BUZk8nJt1Ul2av5cXr4rI7YRniIRocITJHNhDvla2320JLGr+WBGnePZH94V+t3q6ixYvXOowszZTJoEpRqYYPGnvfFT70JFD6jylYzRIKETGCA3kB7OezfoLCFUe2Qn/z2/MeY5cRXyriPhu+41omZnQBRw1MyOBRJ2/V953Dc0J3ohVkJfgg9Hm+I6i1vTdxpo1f5aL7IwfEwpQea0fyzdb6ghVPfPQHu0b8VyJEaydPSqpY/MnNH78eOfPxj25v+WlGM0p7qcYXkzUWLl+nbpPHiscwSsCoSpYBso1aOYUpe3Z6yYd0E7isZQEweLZ5cXClrLWBj3Bspq5zq09oBdwdF+6UMda16+Qflh87Dnt7dZas3hR1ME9Xg/odVZRvtOpr61r9qjVY32i8a8gRpgBjRw5H6sIaWIfxMuRqogJkX1O0UoICJURLny16lcOgpbaDETMio0f7KiRQ1KFUlIeMTMAra2j2OIw+v7a2frzwD8FZj8XwyqYred8nYh5hVkwSpgiTugQo3zmwoBIOTIFKXOBSKvlzSDMZyLMI2xGunCqr9b3ATkfK5zbIySP9IxgRdOO5CUgXFX0cHIt9R3bW+vXb3CfzmGmG/VT1jpnw784W8s72w8ytytr2IMlQrAwDx7uW0nTutXR8uXL8n2o2fJQnPyWhXPIL2biHbt2qd/0iRWSYDlVbuIojZ8/x0UwN/eOeNRVSRCsstCOrpYHT7Cuhs5VjtFAWZ0f1t40ZXZsWyH9sLKbPa0NKb20ZfPmfD4WV4GmTB66fOmKJg1ZGDXxQZ4ciTKfK8x/mPpCCpb5ZT2X8LoaVH9TLyQEBMuZCSMhG0jDgpHatcTLwgQJ6eryeor27Ekr1yoJ5AP1isEo7cBuvT7+VfcxZUdmML/hxN7ngWAmX+RbgfkJVv6QC0bAIDxmPgziWQUfdXbmv6jZMHBiZx/3M9LkSFT0A9FEfo98LDpyf86LzliMRJEP4mQFqln9Ac9qyuzJ7huFqBimYsVj4InlA0P+z24crcx+d5UIwXLxtpIraVWPBM2emur8XawvjGU5SiMt678PHzmi1Dmz1WFixVOwUK+6ThqjRSsDHzme43i1cU+wSqNVV6B70FDpmE+fOKHMUYODeFgVKaq7Mw82dObBfQcOOCk2ng/ojTYd6ul49il1bzXCkSojQmyjjuyRT94YeXLEqdo7qlerpZo2aqkO7bqp7fM91KQW/laBL5YRMggW51u6pGmkq3W9LlqyKHjTj1fHdqO4cR15hmDh2I4SN25pqh7t/1BkFl+gHFXp9UdV7f3HgGBBgggOamQoBXXK9kWc3QnpEDH1OZKFk3okhpX7PzLD0JkOIUcRsuXIWMShvXokkGmgSoXiXuHQHplB6BzsIz5g5AcShi8W2xp9qqrD8He0avUq5+iM6bM8tu1w3TJP6qMPzurE7FeVkVSSUd4raVfvuzTx3U5KTz/scAvno7z8pm1T5/hhLVqxXN0mVUAz4aTR6jd1orZu3x71v4pXP+QJVnl5MspIPu0B5c0+c+1KZb36YoUyE+Y0raOdnV7XuhXLdezYsXLpEGydyda1e9X68b555sAIITIzoClamPYaVe2k56u20zOPNNEbr72pGTNmaNvWrZoza746v5Kipg93dbMIIVQQMq6168MmR46/WLujRr03Pq7To2/mcTH16vz5C9q6Z4tajn4pmBnoYk4FRAozXWCWC7aQnoA4BeqTkS3buphZEYLlSFAkortdZx+Fjv4fSa9Wv2AGYdRny0V5j3zw2QhdZBuQqYiqZveK5Jl0ifBer38dpc4Y6+KVYfrEfFKeTYXMRL+cvVvHRjyirORKJadgpdyhI0l3aFqv+tq4Yb3rF+w5u5m2Fo9rqW8c9Ldt366B0yqWmRD1ClVu4tzZbtYs1hbKG6/FE6x4IV+O70uDvcJswqxMZfTvqeymdSrGh5+b1tHRls9p7cih2r17tzMFoNbF8wG9kWZCx0++pw5f4tQqlCVWM/85xQqn9WpBINFG1TqpXrXWqv90E/Xt21dr16515AgFh9lzmzZu0cDeI9TqKcI0BDGywoqVSx8TYeSjz40SOqlbu37av3+/y8eNlCFe14Ad9U3Zs3OyNWT+YD2UUivi7xQ4tqNW2XcDnToU8r8ysx7kC38pto4coWo5IoSvVp6TOsceZEZgRMFysbAi8bCIc2Wxs4J0+Rh0nlpF2s6R3QhWxDfMfLMc4SOCPGSLcxKrqlZydb0+7DUtXbbU+adY2IZySxYIPrp1orIG3FuiBIvP4uCLtbJHdc2dMdnNLC6vmJFviAez0abOn6NOFchMCMHqPSlVK1avdn1YvBVaT7Di1ZOX0/vycLLScJnJkLV8cYVRsSCKuwguunRJ9PMKRq7KU2dKXk/knlHPNiNd9HYz3UGsWM2sByF6vupberpWM73UtJVSU1O1b9++6KxJ0qH8bhbdwUOaOn6m3mraV41rdJKFeoCkmYJl6hbxszAtLlu63F1bXrAjn6yQ03Nnz2n19tVqOKy+qiUGpCYw0QVmt6p9Hgg5mQcO7hAaU6wgOZAvR3KcyS5CqvgEjnNmD6K7W0gGTIE2C5Egonxah3AMECtCMpgChQrlzJGRT+Pwm/Nd2Id+1d39UcsCdc0IYRB0NNhXWU+kPKahk4Zq27ZtzlQU77f8m+kKP7p0XsdntS2laO6VtLPX3ZoypKvwYYr34H2juNHGyTsvT+s3bFS/yeMrjLN7h4mjNHrmdNeP0W/Rf8Wz//EE60Zb6S18HQ2WhkvHfCIzUxlDkpVNyIbyrGQ1raMjrepr7Zjh2p2Wlk+9iucDeiPNjLrZsWGfXv5zkiNTZspzChOqFR94rtJR9aq0Vd1HXtCbb7wlgtURgJA6DZeX36yYknJysrV44TJ1bzdATR/G/yrPAR5yBdEyktXk4U4aO3KiTp46mS+9GylPaV1DOcEO9SojK0N9Z/ZxQT4hSxAbCIo5sgfqUaAoQXIcuYqY/VyUdftGYERdynddxHwHeWKNXhs51/YHx4KQDvyGvOWPtRUQOgtQ6s7BhOjyETmXfCTnJ1woY22GttSCJQucGYUXpfJIFvC/upyzR8dGPqbM5NuLbR5EieIjzUcSUaWu56PRlXQ46Q5N7/OCNm7c+JlnpbTaaSzuY+08/VC6psx9X50njCrXJIuZg6hXfSaO1dKVK6Kfxwn3ZbHA7XrTWLVqlf7t3/5Nf/d3f+fWu+66y/Wz15tOeTrfzyKMQW3xgDoV6+JFZW1cq6w3y3Fk96Z1dKxZXW3r2VEbVq9yvlcMsuVy0Im8nU4dvlhNanRzZjuIj5EfZxas1lH1qrdS/bqN1bdPnkkQ5aZgh2T/s3XKzrlz2rplmwYlj1TrOt2iQUqdilUlIG4oZo2qdVSPt/vp0MFDDscYNLkSTyKvTV/Qim3L9QxBRSErEQd2U6OiKlUkPAL/Q6qIXcU5jjBxLCkSviHkn2XHHSmKfqMQ8pR3bjgIKaqWI1ahe0GyjHA5VSrizB4oVJgDLShp4Egf7M+LLo+/15/6PaKh04bkM4VbXZc40DG6wSeffqpzO2Yqq//d12UePNT3ds1pd6cmvHqf9vT+fYRkXdt/KyBjlbSke03NmzXFfReRNlPeFuqZlf6NT6us37hBAyeNU/uJo8pt4FHIVacJozRu1nTt3bs3XyiNeNYPs3XXrVsniBbrli1b3AtcPPNU0vf2BOsmEbYHlM4FZeP0yRM6OmNSuY2LldP0ae1r10JrZ8/UwUOHnAMonY91nuVt4Dl1/Ix6vTLa+V2hVDmC5UI0dNJzVd7Qs4800xuvvaUpU6ZETYJhMlmwvPY/WzAhhhLfgnt/5jx1frmfmtQO1Cvuw/3MJNnmua5atXKV61AsjZtseiV6OWUDh+Onj6vX+z1UKyX4bqBzSo+oTihRLiJ75H8XENTNEAyIUFTZQvEKOcVDoCw8A1vCJ7C1bxPah6MD1QqSlheewcyN5vcFgXIkLhKeAROm+YJxT/JoRI70yJO7NhI3y6lpiVXUZlQrbdqxyQ2y5dFM+PGl8zox9y1lFvPzOChX27vdoYEv3ac2Lz6jxF7dNHtQO21Nrq4jyXcqK+XaJItzdvS+R5OHdldGRma5eXko+ODYs8yL5NGjRzVn4QL1nDC6XKpYFvcKkrh23boyE1qnPPR5BdtFLP73BCsWKEamszMg8ZDmHk5XxuBEZb9Ur3yZCpvW0eGXG2n96OHatSv4IG54ZlV5e0g+/fQT7d5yUG2fSo6aByE+z1d+R/WqvaxG9V5SYp9ErV69ysnolJUyFqecdh5EBDWLWWibNmzWu71HqdWT3Z3J0JkKqwYhGxrX7qBxoyaVyc85FPYIGMHKPZ2rUStGqtfsnuo5q4d6zOquHjO7qceMYO0+s6uCtVuwn2MzOSc4r/uMruo+vYu6T+8qfnfjN9daGjO7Bf9H9nGM8zjenZXr3DWRfaQxrYtLx6Uxs7u73q7L23JdcE/y6vLBNpSeHeea/vNStOPADvdCYW2+MFzK4j5nHszdq+xRf7qm/5ULFJpYSUvfuVOdGyfojbatNXlyEA9sT9puLZ85Uuv619XBpLuKkdYdOpRYSTP6vqBNmzeXazMhzzP9NzMKd+3apdTp09R5fDk0FU4arV7jR2vOwoXOcZ/xiGe5OH1aWWzb5T1PnmDFsAZpxAy2zEY6tmObMnq87b5RSLiD3DIeH4s8ZrR4Vpv799aWDRuUk5PjyGJYzYkhVCWeFHXx4YcfaXbqcjWpCeHpogaVO+j5Km+rbo0WatH0ZY0eNdqZhW70O2rWabGlE8OR9OCBg5o4Zrpea9BbjatHZia6qPAd1avjQB08dNB15HZtiQNxgzewMqHmnLtwzvmP5RzPUXZudoVbc3JzdPzkcZ07f86RhPLW5iFY53bMcLMHIVAuGGghShbH9vW+XZPa/l5tGjyirl06a9myZe7lgnqGWPLcr106T8uHtNSepPudyfBqaRIOYmnP2pr//nRnJizr7fpqjwN5p/8mLtbadWs1aOI4tR8/Uu3LySd0MA12GT9K46dPi/Zr5a0tX61+yuMxT7BiVGvWsTDQ0lHxjbOM9atdhHeioZdlggW5ympeT1sTu2rTqpXO8RCSSGdj5bJtjOAq8WTI75mT59Wn7ejo7MF6lV/Vs481Vcf2nTR//nxXTt7wONfKZ9vrzSDXWd3n5uZq2ZKV6tlukJo91MXFyILcvVK/q1avWlPmzYSGB1s6aNoBK+26oq6Uj7KyUo8sN9oWrrft3Mz5ro4uX9TJBR0D5/YiCFZGUiVt7lpJfRrfp5YvPq+hQ4dq586drp+yMlt9nz13Trt2bNPicX20OelBF/OqKNKGmXBbr3s1+b3eyjp2LIrdzZQpntdS9/QJWVlZWrJ0qfqNG60OE0aWeXOh87saN1IjJk/Qpk2bnFJenvvveLaBWN7bE6xYohmKfH3lyoc6c/q0MlYtCz6jU0ZJliNXLZ7R9j5dtGl5EA+ImVQ8nHQ25WGQKaoK07Yc1CtPJal+5Xf0TPXWalK/lQYOfNd1QDi0xrIDMpzAjAELVWznjl0aMWC8Xq7bU8TXavJwR00YO8X5+dggXlTey8p+awOU71ZZwd7qs6zUQ1H5IJ+Xjx9U9ugnizTpHepbSXPeuFNvNKiuN159RdNnzHB+g4V9g5H0qHOOHTl8WCvmTNDKAfW1P+meItNPT7xTU/s00Y4d2x0BLy/YFYaplZ8XzPT0dM1buEBJZZ1kTRqtzuNH6L2J47Vm7RqnQpofYXmui8Lqp7zt8wSrBGrMHlIa+ZlTp3R05VJldH4t+JROGTIXQq4yWz6nHX0hV8uczR5igEoRHlhLAKISTxKSM3fiKjWp3VkNHntZbVu/oYkTJ+rAgQP5YlvFMiPWmVn98yZMAMOZU+bq7ebJevHBjkrsMkhHjgSfF7HzY5mHWKZV1vMXy7KG0ypP5eY5Pb9rtrIG/uEzpkFMe7t73qHRbe5V64Z/Vo8ePbRy5Urn+MwzTjltDZef36TLC8jxEye0ftVSLR3+unYlV1FGJMhoWNHKiJgJlyyY456t8oRfwXLzP/m3lyQCBL8/d66Sxo5Uh/FlT8lCueo8bqSGjE/VipUrlZWZ6RS48t5/F1Yv5XGfJ1glVGs8pDTyKMlat1pHerUvE47vEKucZk/raKvntW1AH21evdLNnjFyZZ1uee4oL5y7qHGD3leXN5OU2DdZBLnDv8Te7Eqo2l2yhhtbBqmTJ05qzap16tt+qHq8OUDbt+8o1w7BJYmdT/v6EPj4w8s6Ob/TZ9SljKTbtaHzHerVpIpea/OSxo0f76bso07b4HutO9F+OZdrnAP81EHa0v9POpLELENiZkX8vZIraVvPezVjdD9lZ+cUO/1r3T9exym3Pbu4ehBw+P25c5QyJkSy4uyXBbFy5Cp1hN4bl6rlK1Y4lwcLKEq9+SX+CHiCVQJ1wMPJYh0UgzoPauaunTo8fICyXnlBOc3i5fhexwVCPfhmS20eO0I7tmx2vhNI4mHlqgRgKdUkL31wWWk797lP3aSlpbnPefBWap2n1VFJZcrSZ8t9L168oH1792vF0tXOEZ42YeeUVB58uhUbATd78GS6ssfUyRdc9ECfSpr22l16s8mf1LtXD61YEQSbLPh8F6f9cU7wovihc4bfvGqh1o5orX3JfwgFJq0kzITTk1to+44d5d5MSKuxcvOCxIvnwYMHtWjxYg0ZN0Zdxo5wcbLaTx6jeKwQq/YTRqrn6OEaO3mSiy2Fzxhm3dLs4yr20xWb0nmCFRsci0zFHlQ6twsXLir7SLoOz56mo51f17EWzzolqTQc4KOqVesG2t27kzbNmaW9e/bo+PHj7sGkIynum22RhS1DB+hoeJsjuF28pypbGzCiXV6jhZeh6vVZiZCAc3sWBObB5ErCVLet+x0a0vJ+vd6yoYYNG+Yc2SEINvACHO3xeheuIY1z584rbecWrZnUWzv7P6ijyXc69QzT4aKej2jpwjlO8SrvCgrltZVy8wJKjCwCZI6ZOEG9Rg9Xh/EjghmGpaVmcZ+Jo9QpdbhSRo/U1BkztHXrFkd8C/ZxN1LH19sm/PnXRsATrGtjdNNn0NjpcCAxDPonT5zQkS0bdXjEQGW0a67s5vVKLJyDEausFs9qf4e22jJ2hLatX+98g/j+Fg8mHYiRq4ryYBrmlCvenb1hyjaM9U03LJ/ALY3Ax1cu6eTi7spMudPN9FvR4U51fjFBb73xqmbOnOkIAc837c7a4M0ARho8S84B/uhhrZk7Tpverav0pCAC/Lbef9CMsQOdmZB2Xt4Xw40t5aHcuBlTKZrfAAAgAElEQVQQgXzGrFkaMHqEuo4e5tQkiE9JhXNwitXEUeowboR6jBqmYaljtHDhQmfyZbKOuT3Eoo7Le52Vtfx7glXCNWKNni2dEw8qahZvlblZmTq8drUOjRyko++0UTaKVtPYhXQgPERGmwba17Wdto4doe1rVunggQNOtSpoErR8ljAcpZY85QmvpXbjq9wonJ+KhvdViu0PlRACV04dUfbYp7W/TyWNb3uPXmvypJKTEp3J6OTJk+6FLvxyEYs2RxqkycsiwXW3bVih1aPe0N7kB3Sgz52a2b+1du7aXSHMhOFqs3LTd/NiismQCQMTJk9Sv5HD1HXUULUfN0LvQLRiZTrEz2riKHUcO1w9Rg7V4NGjNHP2bDcLmm+lFjbbOxZ1HC63/31zCHiCdXP4FftqGr6t1kFdunzZmbCyIVqbNujQlHFKT+6mjDdb6FibBoGydR2zDnFcP9biGWW83EgH27+s3QP6aMf0Sdq1YZ0OHTqk3OPHHbHjjaegkuIfzGJXpT/RIxB3BDDyXdi7WHvefUjD33hEb7/6ksaNG/eZzz1ZnxOrDFs/wZZ+DLeHA3vTtHb6u9o+4DEt6ltHK5YuqhCzCQtiZmWm78QSgXsF/p3Lly/XpKlTNGj0SPUa+Z46jxnm1Kb2E65T1YoQKvyrOqYOV9dR76nvyGEaNnaMZs6e5Ygz/XhB1crqpGB+/f/xR8ATrDjUAQ+EPay8CSLjo2jxwB49sE8H16zSgWkTdHBwkg73fEeH27+so683U+bLjZTVur6OtXpOx1o9r8w2DZTxygs63K65DnVsq/19Oirtvf7aMWOKdq5eqX1pacrIzHRvmihWBYmV5SMOEPhbegQ8AjeBwKcff6RTuxZoybheendAPy1YsNB9mJ1nvDSfa+7FPY8dO6b1i6ZpyfB2Wr5gVrmK9VbcaqCsLGwhl4Ff7QVnNuSjyqtXr9asWbM0elyqBowYpt4jhqqrI1zvqcPYYeqQOiKIDD9hZGBWZDt+hDqkDlfHscPUZfR76j5yqBJHDNXgUSM0ftJEzZ833ylWECsUQ8yUFc1ftrj4l8fzPMGKU61ZJ8iDyspbER2VOWbzuQZimhzeu1cHt2zU/lXLtXfJAu2ZP0d75s12a9qC95W2eIF2r1yuPZs2av+eNBdAkM6OhxHSxgNJR2CKFffyi0fAI1C+Efj044919vQJ90mUPXv2OLOVPeOlXTLrv1BW9qbt1N60NDdruqL3NZSPlf6VF1jMssS9I0I+ZGvevHmaMnWKxoxP1dDRI/XuiGHqN2yoUoYNUfJ7g5UybKgGDH9Pg0YO1/AxozVuwnhNnzFdixYt0oYNG/T/2TsLcLmqqw3jrgEKtEDxQnEKlBYo7tYCpW1aEiAQKIUEt6IhSLAWKJLiIUKIJ0CMJBB34kbcXUmCtF3/8+6w5j8MM3Pn3jtzZ87Mt5/n3HPnyJZvbfn2Wmvvg1xZHYhJMplYRcleTctb6WWPgAhW9ljl7clUZItGi2YLwgVRYjXc8hUrbNny5bZ02bLvjuXh94qVK43PW2CTpyFC1JjleIdLJ+ANkkJE/89boRSxEBACeUOANkwbp3+gn4i28Zps354WZ/Lw1VcbJomuZckbAAWOOFpuL7tPkiFbTHAXLlwYdoOHKI0ZM8ZGjBgRto0ZNHBgWI0ICRs6dGjQUI0bNy6Yd1mpyKe26O+TJ8ek4+kWuPhKPksERLCyBKomH/OGRIflB423osOf9fejjTH6f02WRWkJASGQewRoz5nae+5TzC7GYsxTdjmv/lPe74IBfTUk060SPklGG4Wmzw9+s0eiT46ZWDs5JR6Ps/q5UwyFQEAEqxCoZ5GmCFEWIOkRISAEigYB9Vnftw44OfKzk8/ks9+Pnl2oXFOILwIiWPGVnXIuBISAEBACRYiAiFERCqUAWRLBKgDoSlIICAEhIASEgBAobQREsEpbviqdEBACQkAICAEhUAAERLAKALqSFAJCQAgIASEgBEobARGs0pavSicEhIAQEAJCQAgUAAERrAKAriSFgBAQAkJACAiB0kZABKu05avSCQEhIASEgBAQAgVAQASrAKArSSEgBISAEBACQqC0ERDBKm35qnTVQIC9bHxXZnZm9p2VqxGlXs2AgPDOAE4ebiXjTR3X/k15ADoSJTu2831Bvhe7aNGi8B3DyG39W2IIiGDFSKB8c2z06NHh+1VDhgwJHxaNUfZjl1W+89a9e3dr1qyZvfPOOzZgwIDwGYvYFSQmGeazIv369bN33303HD179gzf2otJ9mOXTQgV38dzvLt06RI+2xK7gsQow+3bt7czzzzTzjrrLLvyyitt2rRpMcq9slpZBESwKotYAZ/nQ6Cnnnqq7bvvvrbXXnvZk08+WcDclH7SfKz1jDPOsM0228w23nhju+aaa4wPuSrkBwG+y3bVVVfZpptuaptssoldcMEFYZafn9QUK4T23nvvDViD+bHHHqsBP8/V4oUXXrCNNtooHLvvvruNGjUqzykq+kIiIIJVSPQrmfaMGTNs//33TzTQu+66q5Ix6PHKIIAaH0LrHWLdunVFsCoDYCWfhWD9+c9/TuB97rnnGiRXIT8IQLDuvvvuBN5HHXWUTZ06NT+JKdaAgAhWeVUEEawYyXvmzJl22GGH2VZbbRW0Kvfff3+Mch+/rOIjcemll9puu+1mtWrVsptvvlkEK49iXLVqld14442266672i677BJMKIsXL85jiuUd9TfffGONGjUKeIP56aefbkziFPKHwEcffWSXXXaZXXHFFVavXj3hnT+oiyJmEayiEEN2mWCG//rrr9szzzxjTZo0sU8//TS7F/VUlRBghj9mzJjgF9S3b1+bPHmy4beikB8Evv32W5s0aZKBNcfYsWMNGSjkBwEWbUCoHO+RI0fa+vXr85OYYg0IUJ/XrFkTDhze1Z+UdsUQwYqRfFnhwyDEzJNDjbMwwtNKq5rFXXgL75pFID+ppavH6a7nJxeKtSYREMGqSbSVVqwQoOPTNg01JzLhXXNYk5Lwrlm8SY2VyZi9OZYuXapVyTUvghpNUQSrRuGuXmJs0zBhwgT7/PPPDXX+vHnzqheh3s6IAOaS3r172/vvv2+tWrUytsZAg6iQHwQwnwwaNCiBNyZwBiSF/CDA5IFVbNRvjm7dugXTVX5SU6wg0KlTJzvvvPPs/PPPt9q1a9v06dMFTAkjIIIVI+GyTQPbBhxwwAFhqwb8sBTyhwAr2NivZosttghbB+CUqm0a8oc3Poas1Nx8883DIo6LLrpI2zTkD+7g33bfffcFvMH8uOOO0zYNecSbqLWKMM8AF1n0IlhFJpBM2dE2DZnQyf09bdOQe0wzxahtGjKhk/t7aAy1TUPucc0UowhWJnRK754IVoxkyjYNhx56aNCosBEjs0+F/CHANg1oUXbeeWfbYYcd7KabbpIGK39wG9s01K9f33baaSfbcccd7fLLL5cGK494Q7AefvjhgDeYn3LKKdo2II94E3Xnzp1Dn3LxxReHTXVlIswz4AWOXgSrwAKoTPLM8F977TV76qmn7PHHHw/+QZV5X89WDgEGIPzd+vTpE7CeOHGiVm5WDsJKPc3K2PHjxyfwxj8Iv0OF/CCADxafasHPkDo+bNgw+bzlB+pErPQpTCQ4Vq9erf4kgUxp/iOCFSO5surHt2jQNg2FExxyUKg5BIR3zWFNSsI7P3inwzXd9fzkQrHWJAIiWDWJttKKFQJ0fMzyo4c6w/yJUHjnD9tUMYN3Muaq36mQyt01ViYvX748HCtWrNCq5NxBW5QxiWAVpVhSZwr18pQpU8IO1+xyre+0pcYpV1fpDNnlun379tauXTsbPny4OsRcgZsiHrSymKkc7/79+2tn8RQ45eoSE4dx48YFvMG8V69exu7iCvlDoEuXLob/1SWXXBJWzOrTRPnDuhhiFsEqBilkmYe5c+faOeecYz/72c/swAMPDJ/MyfJVPVYFBCCwZ599dvj2I8vYr7vuOjm5VwHHbF/BL+Xqq6+2LbfcMizkYCBioYFCfhBgwsb3TMGb44QTTtA2DfmBOhGrVhEmoCiLf0SwYiRmbdNQs8LSNg01i7e2aahZvCFY2qahZjEXwapZvAudmghWoSVQifQhWGivNttsM9too43s3nvvrcTberSyCKA9ueCCC2z77be3bbbZxm688UZpsCoLYiWeh2CxmSt4b7fddva73/1OGqxK4FfZRyFYDz74YMAazH/9619rZ/HKgljJ5zt06BC04lgi/vCHP0hjWEn84va4CFaMJIZT5Msvv2yNGze2Rx991D755JMY5T5+WWWLAHyCevToYd27dw/+KvrAdv7kiA/WmDFjAt5gPmLECG3TkD+4w+INfDrBmoPPFOlLBXkE3Cz4FLqTOxMKfXorv3gXOnYRrEJLoBLps8KHWacfapyVAC+Hj2qlVQ7BzCIq4Z0FSDl8RHjnEMxIVOlwTXc98qr+jSkCIlgxFZyynX8E6PiSt2hQZ5g/3ME2Feb5S7G8YxbeNS9/JsfaaLTmcS9UiiJYhUK+CunSONl5edKkScau4osXL65CLHolWwQwEQ4cONBYWs3BzuLSGmaLXuWfw0Q4cuTIBN6DBw/WNg2VhzHrN5g80I94/WZLEm3TkDV8VXrwo48+sssuu8yuuOKK4G+obRqqBGNsXhLBio2ozNim4fzzz7fDDz/cDjnkEHvuuedilPv4ZZVtGs4999zg4M4y9uuvv14+KnkUIzP7a6+9NuC91VZb2aWXXion9zzizYTtgQcesK233jocJ554opzc84g3UWsVYZ4BLrLoRbCKTCCZsqNtGjKhk/t72qYh95hmilHbNGRCJ/f3IFjapiH3uGaKUQQrEzqld08EK0YyhWAdfPDBtummm4ZtGu65554Y5T5+WZUGq2ZlBsG65pprEhosdrvWRqP5k4E0WPnDNl3Mbdu2tVNPPdVOO+20YCqcOnVqukd1vQQQEMGKkRDZpoEZ0MMPPxz2r2FptUL+EMAHi6Xr+E18+OGHNnr0aPlg5Q/u8CHzzz//PIH3kCFDtE1DHvF2HyzqN0e/fv1kAs8j3kS9bt06W7JkiS1dutSWLVum/iTPeBc6ehGsQkugEumz6odZJwM/hxyuKwFeDh9FDgo1h4DwrjmsSUl45wfvdLimu56fXCjWmkRABKsm0VZasUKAjo/Dt2rw37EqRIwy6/gK75oRmvCuGZyjqbBSlpWaHGzqSl1XKF0ERLBiJFsa5/Tp023y5MlhqwZt05Bf4aElZKuAjz/+OJhQZCLML97UbzcRYrIaOnSoTIR5hJzBnS1fZCLMI8hJUXft2tWuvPLK8JmcG264wWbOnJn0hH6WEgIiWDGS5rx58+ziiy+2o48+2o444gj75z//GaPcxy+rOLmzLYZ/i5AOUZ8SyZ8c2aaBbxHyHcJtt91W3yLMH9QhZgitf4sQzPUtwjwDrm0a8g9wkaUgglVkAvHs8M07HCIZ0P1gU8B99903rCDkY8+33XZb8Mny+5zRuqD6V6gcAvizsYgAB1Q/JkyYEAYdsObg46yzZ89O3Oc5Vr5JzV85rHkazMDOseaMdpYNGB3v008/3ZBB9BlkpO9BVh5v+gQIbBRLtiG55ZZbEngfdthh4dub0Wf4bh5ETKFyCFC/+a4mH3du3759ODp16hT2efP6veOOO9rzzz+fuN+uXbuwmAa5KJQGAiJYRSrH+fPnJ1YMPvLII8Zx66232s4775zoEE866SRr1KhRuMd9PgDduXNndYhVkCnbAYAvg/qZZ54ZjlNOOcV22mmnBN577LFHWF7t988666yAP8RWoXIIrFmzJqyGPeOMMxJ4s3x99913T+BNXUcGjjdnJhWswFKoHAKQJDTe1FnHE+z32WefBN6uxYrev+6662TGqhzU4WkIVqtWrcKEmHrMUatWrbChqxOsjTfeOGjH/T7nk08+OeyuX4Uk9UoRIiCCVYRCIUss4UVjstlmmyUO3//KG+gmm2ySuMdzu+22m33wwQfSqFRBpqzObNy4sbFju+Nb0Rny1bJlS2lUqoA3Wqi33nrLmMVXhLPfZ8fxZ599VhOIKuCNBgtfwh//+MdZ401/c/vtt+vzOVXAm1f48gZfgvD6W9F5iy22CJPk9evXVzFFvVZsCIhgFZtEvssPMyA2pYM0VdQw/f4FF1xg+A0pVA4BX02FOeq4447LGu8LL7zQpM6vHNbRpxmAzjvvvKzxPuGEE8ICj2gc+j97BDDJ1q1bN2u82dSYvcgIcjvIHmd/EszeeOMN22GHHbLC/KijjgpmReHtCMb/LIJVxDLEF+Lyyy/PqnGiCXjnnXekvaqGPPHDQou1+eabV4g52ivhXQ2wv3v19ddfD07tPklId0az+MQTT2jvt2pCzoedMXWnw9mvox1v0KBB8AOtZpJl+boTUnw2Mcs6runO9Dl8F1L+bqVVXUSwilyeaLF22WWXChvoRRddFLQpro0p8mIVbfbGjRtnxxxzjPDOs4R8AJozZ46dc845FeKN9ootBQj+bp6zWFLRO2Y4rWejxTrwwANt4MCBJYVBIQqDJeLtt9+ucBJx5JFHhi9FFCKPSjN/CIhg5Q/basXsHSJ7XbFvCg6R6WY/aFOaN2+ugadaiG94GV+sJk2aZPTFEt45APo7osQA9O6772Y0o6C9wvdKXy6oHu7ep3Tr1s323HPPtP0J2qu77rpL2qvqwZ14m0nE2WefnRZvtFePPfZYWBGeeEn/lAQCIlhFLEbvEFnmm0mLhe8Ve2QR/J0iLlbRZ238+PFhr7F0hFZ451aEDECZzCjHH3+8VlblEHK0WLVr1047aTvggANswIABOUyxvKNiQcebb76ZdhLBnoajRo0qb5BKtPQiWDEQLL5Yl156acoZEA6UaK/QBIhc5UaY7CXG9hes6kkmWeDdokUL4Z0bqEOdZQDCGZgNXZPxRnv1zDPPBN8U1e/qgw6GHKwojG6J4bizcpDtSviUi0LuEGASwfYXjrOf0V49/PDDppWDucO6mGISwSomaaTJC+QJLVaqFYVoU7SSLQ1wVbjsgzibuqZaUSi8qwBqFq+ggU21olArB7MArwqPsKKwTp06PxjwDzroIK0crAKeFb1CH44WK3lbElYOjh07NrzufU9Fcel+fBAQwYqJrNhcMbrLNTMgNqZr1qyZ9mHKgwxZzZO8LxZ4v/fee1qpmQe8GVySl7Sz7xX+cFpZlQfAzcKu4dF9sdhLr2HDhvocVI7hduKEFiu6LxYa8oceeki+VznGu5iiE8EqJmlkyAuNtHXr1mE3YFcv8508aa8ygFbFW94hsqKQ7z463tJeVRHQLF9LdgZGg+grB7OMQo9VAgF8sa666qpE/cb3ipWD1H9vA5WITo9mQMAxZRLBdzbpU9z3yu9leF23YoqACFYMBOedHb5YrCikcaJqRnvl92JQjNhlEc3JU089FfbFYuUg2ivhnR8x+iDD3mL4YuF79fTTT0s7mx+4E/UYXyz2xWKV8h133KGVg3nC26P1SQS+V3zaTNpZR6Y0zyJYMZGrD+z4Yu26667Gvld8r5Dg92JSlFhlkxWFv/jFL+ziiy8W3jUgOTejnHjiiYmVg6rf+QMeLRa+WIcccoj2vcofzImYWdDBJOK0007TysEEKqX7jwhWzGTLvlhXX311WMnms/6YFSFW2WVF4UsvvWRt2rQJRFaDff7EB7YMQHzfsWnTplo5mD+oQ8zef3Tv3j1oC7VyMM+Afxc9CzrYi2zdunU1k6BSKRgCIlgFg75qCTMATZ06NXwMumox6K1sEfABaPXq1bZmzZpsX9Nz1UQArFetWlXNWPR6tgisXbvWWFXo9T3b9/Rc1RBgRSH9uPCuGn5xeksEK07S+s4cqIZZGKGBu0J+EYhiHP0/v6mWb+zCuGZlL7xrFu9CpyaCVWgJKH0hIASEgBAQAkKg5BAQwSo5kapAQkAICAEhIASEQKEREMEqtASUvhAQAkJACAgBIVByCIhglZxIVSAhIASEgBAQAkKg0AiIYBVaAkpfCAgBISAEhIAQKDkERLBKTqQqkBAQAkJACAgBIVBoBESwCi0BpS8EhIAQEAJCQAiUHAIiWCUnUhVICAgBISAEhIAQKDQCIliFloDSFwJCQAgIASEgBEoOARGskhOpCiQEhIAQEAJCQAgUGgERrEJLQOkLASEgBISAEBACJYeACFbJiVQFEgJCQAgIASEgBAqNgAhWoSWg9IWAEBACQkAICIGSQ0AEq+REqgIJASEgBISAEBAChUZABKvQElD6QkAICAEhIASEQMkhIIJVciJVgYSAEBACQkAICIFCIyCCVWgJKH0hIASEgBAQAkKg5BAQwSo5kapAQkAICAEhIASEQKEREMEqtASUvhAQAkJACAgBIVByCIhglZxIVSAhIASEgBAQAkKg0AiUFcH673//a+vWrbO1a9fat99+G7DnzG+uc/9///ufffXVV+EaZ35n+97XX3+d9r1vvvkmpPef//wnkR7/E7+/t379+kR6/E++kt/jmr/HPX77e8Tl7xEnIZr3VO95maPvVabMwir7+iGshJXaoPor72vVt28Yd71N1OQ4GAbHGvhTVgRr/vz59sILL9iTTz5pEyZMCPCOGTPGHn/8cXv55Zdt+fLlgbx06NDBHn30UWvbtm34vWLFCnvllVfCc59//nl4b9KkSSGeF1980RYvXhyI0ccffxzea9WqVSBGq1evtn//+9/WuHFjGzx4cHhv2rRp1qRJE/vHP/5h8+bNC9d69epljRo1smbNmiXI3dtvv22PPfaYffbZZ+GZ2bNn27PPPhuOWbNmhWv9+/cP77355psJYvfee++Faz169AjPLFiwIKRFmlOnTg3Xhg0bFvLUtGlTW7VqVchr69atQ967dOkSyrJkyRJ76aWXhJWwCnVS9cpMbVD9lfr20hgHUSI4sQuDYp7+lBXBgpjccccdVr9+fYNkEAYMGGDXXXed3XvvvYEoAfprr71mderUsVdffTUIAQJ13333Wb169axfv37hvREjRoR47rzzzgRRevfdd8N7kCfYOMTsoYcesmuuucac8IwbN87++te/2q233mozZswIcX3wwQdWt27dQLzQpK1ZsyaQq6uvvto6deoUnpkyZYrdfPPN4Zg8eXK4BhniGYgYZI53IVLE9f7774dnZs6cGdIizbFjx4ZrELprr73WHnzwwUAqmUn985//DHmH2FH5IH+UTVgJK9UrtUH1V+rbsxkHGTuKfRxkDGYcZxzEgpPPUDYEC8FjokFj8+WXX37PRMhvTG1uLuM5SI6b3rjOfZ5ztS6mxWzeY3CKvgeB8/ecQUPGuMaz5JP0/D039fGs5yHde7xLnsk7ZSAkv8c1yuB58DJH3/M8eHpRc2rye8Jqg7yEleoV7UZtcINrQnK/Ay7J/ZywElb0m+nGs3yNg9S722+/PSgPSCOfoWwIFkBi+uvatWs+8VTcQkAICAEhIASEQBEj8NRTT9nDDz8sDVauZATBuuqqq6xBgwa5ilLxCAEhIASEgBAQAjFDYNGiRYZ/MtqsfIay0mDhn4Qzu4IQEAJCQAgIASFQngjg6sIhgpUj+QMk/lc4gysIASEgBISAEBAC5YkAjvis8JcPVo7kD5CQKxzAFWoGAUgtuOMkj2M9Z37ne9ZQM6XLnAplTHVkfqu471IeHFLdKTWa21RlTb4WfV7/C4F8IpBc9/hdjMHzSZsql76xGOTAinxW4IN5PkNZmQjZ74p9qxTyiwCdBqsSR40aFfYSe+655+z+++8PW0Gw7cTEiRMTqzHzm5PCxU7DZd8xlgJzTJ8+Pe+NOZ+lRaZs8/HWW28Z24Ow+ofA9aVLl9rw4cOte/fu1rt376ApZk+5vn37Ws+ePY295nz1Lb4PbJEyZMgQYwuRfHdw+cQkXdysrGWfvJEjR4a6TltQqFkEICzI4JNPPgl1kj3M8hGo12ybwz6HyHvlypWVSob2w5Y47DPH1jpshaCQfwRatmxpbEmU7/6nrAgW+0Oxn4tC/hCgwrK/F5urHnbYYbbDDjvYVlttZZtvvrltvfXWttNOO9lxxx0XNjFl41c6mDgHiAQk4qOPPrKhQ4cmtv+g433iiSfsRz/6UTiefvrpxL04lhdZ/eEPfwjygyyzjQjhiy++CPvD7bvvvuHescceGwgVm+AeeuihtuOOO4Z94HzgYSA56KCDbK+99gqreDwe9oxjMARHyJdvDRJHrCCOl1xyiYHJ+eefHzCKYzninGfq1T333GO1atWyH//4x/b666/npTi0f8aV3Xff3Y4++mij3lc2QP7OO+8823XXXcNG0iLklUWw8s8zCQLnfI8/ZUWwmCF07Nix8tLQG1kjgNbmL3/5SyBTG220kW266aa27bbbhoF2m222sU022cS4zsDLik40Gvmu5FlnvgoPQqogErvttpv96U9/Smh2IFiPPPJIKPt2220XvgLgWpwqJFPQV8g3i0MoB6SBzXmRGSSILyNAnpHpFltsYcccc0wgWHyB4Kc//Wm49+c//zlsuksh+HoA9WHjjTcOkx0nWGzc+/Of/zzgeOWVVyZwLGjBq5g4pBMcwIRJhn81oorR6bUqIEC9uu2222zLLbcMfQ2bRucjoL296KKLgqwhWUwSKhvQtvGVDvL6s5/9zAYNGlTZKPR8JRGYO3euzZkzRxqsSuKW9nEGBD/SPqQb1UIAsxE75W+22Wahw2EgZbDEoRDzEYMrMzUGYgaf7bffPnQsvplqtRIv0MuQjV122SWU59xzzw0rU8gKnSa796Pt4aDj5VocAyaQ448/PpSRLwC4HyM+jczeIUsMDn/7299s4MCB4T4m0WeeeSZ8LYAvFTiRSkewMLEwg6denHXWWQkc44hXlGAdfvjhIlgFECJ9Cu4IfKHjgQceCPUyH3sND+4AACAASURBVNnIBcEiX+PHj7ejjjoqtKVbbrkl0cbykWfFacHCQr8sE2GOagPkikrsn5nJUbSK5jsEwJfPD6DhYJDELMjnhRYuXJioxFRmvsXIZ4ichKFW9xn+smXLwrcXMblxjYEclTvfWmzevHkwwTGok1Y08BvfhU8//TQ8i88Xz+P7E1W38xyDH35Cffr0Mb63iOkLrSbP01kS0MxAECBIpP2vf/3L+AYZ36FkJSrx8AyfXoI8oo2jzL/+9a9DR45PBWSKzxt169YtHKQbbcz8j2mA+2iH8A/Evwm/JXCIlpHBgs87oBXCfIY5DT82/AjIG9+R5HcyUSUOygSefP6JzfWYyfObPWCi+YniGf2fcoInBArzLt/nJIAdsjn11FND2TEFP//88yEfLH8mj5AmykPePW/JBAsZgyOmQ+IHx1/+8pdhFs8s03FwmWBCpMx82gm5YY7jngfSwRcGvzc0AdQXfHGIH6wxDRAneaQM4JPNcm3eIa8QSOTEli/4czKIYxKPaiejBAsNFvmhLrZo0SJ805QypJo9kwb1ETnzXVK+BcqZNMmrY0FZKTf1mHKCE/WfSQwro5ARmHtZHRvO1EvwZsNl6h2f9Wrfvn1ol1Eco+8U0/9ggFmOuuX1n7aJD6C3TS8nfQjlBBfwIlA/aMdMeKgfvEP9oP3zLVkwI9A2aCPIAl8dvkXruDJZcFlURLCof2i5kT3tCFMl8ozmlfTop+66665Q/9H80j8p5A8BPgN30003ZdUHVicXZWUiZGbAbEYh9wjQOfMRbddO/eIXv0iQWe+M/AwR22+//UJngl+WfzeRTuWQQw4Jmgy++8gghsqcwZvB9+CDDw4fpI4OvAwYdJh8T3LvvfcOz7qvF3lgEGGwJ5BHPuL9k5/8JKT/xhtvhM8lYN5DQ+MkBSfuc845x/bYY48QH6axnXfe2dBGsPsvHS8dJCtReMbNnpBK4ubbkHSY5J88cUA+fABjMIZ4XHbZZeF94gc3yknnip8gEwEnQBAuVryQFvmkoz799NNDntAS4mdy8sknW+fOnRNpgDXlQau0zz77BPMe+SMt8ogZF7Ocp5GuRlDWs88+O8jqyCOPDKSRdyCW+FKBNaQIDMDxxBNPDIMdgwj+WPi/kHcnr8kECxMxpuI999wzzN6JCzLHe2CNfMHyww8/DNpPfNooM+Zm0sPHiYHRCRxEA60pWJ100kmBWJF/SDB1ygc2CBdEn7xR//z9TDhQd6if7ldIPsgPWlnIm5OsKMECI7Qo1EXqMPhjSvrjH/8Y6q3jTzkhDbVr1w7y4Vni53zAAQfYjTfeGCYd3oYgX9QrcII4gyFyRStMfcDPkcHcfd8oF+QBsoF/GHkgL+CI5hCtIYQ1FSlLh0lNX6fstAs+cwImtEkw4gyRpW/3vgES9Pe//z3UKyZ9kGIC9QNTPnKjzbzzzjuhrYMzdQG8aKfIgjaCryB1B6zQVJ9wwgmB4Hufko5gkVeILHWGPow80laQD3WCySfE3OVJ3pgokQ4a4ZrYZbym5VdM6WF5YELu7S9feSsrgnXDDTfY3XffnS8syzpeBkGcoBkg8btq2LDh97RHUXDoxH73u9+FZ+lM8FVigGGQoiMiDgZIOn78exhweY7rDAh8QJv0CHRSF1xwQRjgGeQZOCAUkAmep1NkBkqnycHMhfwRL5oS4uO5I444IhASZrV01lyD9NA577///onn6ADRoEB6IOxRYkAHyqCHgz/542PapMPRqFGjxADMKjo3uZFnykk6dL6ki3aPgdYXAaC9YFDkHvllICUt0vb8c48BAx84AkSCD3wTP+lTJu4zMHGN5yEiEJxMgU6I9HgeXypkR6cEQcFnigGOe2BKfiA1EDfe4zf38FFhYCOkIljUFdLwfCE7BjbqBaQFTQ2DFHEhEycWpMk1tKDIjcEKjSlkgesQDe7xPweEygkW5Nivoznw+pQKC8gXBJmyUg+pD2AJpj6hgFiiNSJECRZlYYBmACffXi8pKwO4D9RoWCD15Il74HHggQeG97hGulHTEaQKuXKP/JA38sLhbYW2AHlEXmDDoEI95x3eJT+k49pkfAmRW3TQj+LBddfKQmorOlh1CkFHy1bRs9xn4oXGJ1UgbeoqX+Pw/IIlbcdlQJuAmOCqAMECL7DgOdosgfqBKR8MIOjgwTMc1G/qB5qv3/zmN+EZroMt/Yiny/9MMMA1HcGiHEyUyBNp0a8xqXRtN3mmL4qWl1XXkC+eh7STV4XcIxCt39H/c5+SWVkRLBo8s3qF3CPAQOEDG4QI52cITaoKTMfE4E9HQgfG7JwBLkqwGGTOOOOMMPNs06ZN8PXxzooBhRkm8aMlIj3igmih+kclD8mhI+Q6ZizMcVGCxXUGaDQSdKzEQ4fGjNPzhcYDMoT5AYLkZOL3v/99MFNgRmRm7J3mr371qzBIkBaDciqCRQeOaprykT7kAzMGaUAE0ZKRPmQLswTEM0qwuAexwXSJiQ4zlRMZCAXf2yRAciCaPI9vB3gxQDGIMXighSEeTI6ZAn5UTmTQCEB4kCmEifSREWkwgKAthGQwwEGKPF+ZCBZmN0gymgTHES0BGj5wJJ3LL788pMFAiY8fZYEIXHPNNYEoUIfQ4KB9iRIs8kWdQS5oODHTuHkHLQj1BoKKL0YmgoXJlziIjzKBMVhi9qPukG80ScRPiBIs3qH+8Q4kkDrh9Yh6jNwJ7dq1S1wHU8rHgIuciZt40IKBFSFKsCgH+UADgknxzDPPTJBVNJ1oUjCrQU7AisHd/eWoD7Q/J37UeTBKFaiLtA/KC26ZDnCHxKJVZfLA70zPk75rkVOlTZ8BFl5H0GZj9qaeQcR9YuY+b8izIoIFpqQLOb7++uvDpAGc0IQ5eWXCAK70K0zQncxdfPHFCRNzKid3zMBMEkmDdkYctEnqOX0O12mftCHvI6m7TPq4h8wrapupcNK17BBAw04fQ73KZygbgkUlBsx8A5pPYRVz3JAA98eh06pojxH3N6AzQTXPoBwlWAw+zHy980Ezg9aA5xlQ8CViduyzUQgJ/icQGLQsmBIwH/E8RAvzB7J3DRbXMXnRcfI8HTLkgY6RWTA+URA17mF2YAbsGibygQaLQEN1Ises0wcn4komWBC86CyVgYeB1AP3Mf04kaQTJx1mya7BYhbNIOf1OEq+eM9XSzHwQLgoJ9ogzIp02Ay24IwGCu0GvkDpAmkwCycODuKIBrB28oM2APOTh2wJluNF3tBGkA5E3a9DStC0cB0Ni/uvkDayc1Mz5IM6AvFxog+ZQEuHjw2aAjd/UacgFhBCSCxxgn26AGHyQRFiyvJ/5IbWCa0QpAYfnlQaLJ7HR8gD2hHXkKJhoo4TIPKYliHyEG4mLAy4EBQvPwMy7xOiBIt2Rxk9YGqnboGZp0G9QyPGNdoW+fe2AhauIYSg4KuaKjjBokyZyBL36AMgWPioYfbOhmBh1kw3AcZPDrMq+Yf8sO0JbYxAP8CkBTJ04YUXhnaLrLMhWNQP0qR+MCmC1Lt2mfbDAg3vg5igM7k65ZRTAlnl2VQaLPKFpcQnUeSDiQJ9CTL1Pohy0JbBlYA83BwPxpByTzs8oD85QwAZMLHwfjRnESdFVFYEi4FFzoNJNSBHP+k8vHNglocTcrpBiw6Fyk1nySCINitZgwU5gjx4oNPyjgmSgdaBjpHZIfHQIdH5McvkQGPh5hDuQ5jorDwOtDIs46Yj9sB9DkxzDGz4lBEPGgVmo+SVuHBmp2MlpFtFmIpg0ZiJ1wkZJiE63GiAUECISIel/phkogQLUxMaPQJ5ZeCpX79+eB7i6RvpMuBGzWNoTRhYkREmVgZ2OvRMAdKLWY28MFjgsxYNkAD8yLgPOYIYeKgswUIrFSVYlIsAXq7xgTSwitFljIkNXxrSZzDEIZmBzAkWdcK1Sp6vqpwdfzAgLeo3pAeiDQGFvEPsPEQ1WJAaNBce0Ia51g9SykIKArIET+RC20HDhEYWAunpkqaTHydY1EkGcyePxEV+L7300pBXJgWQP+J1LQ/1i0mN48hKUDcD83yUKHu+OVN/kStEEK1RRQe+jBBCtKAVPcukBrKL/FIF2qRP4MAtOjEhX7wH4WUCka2JEHN/cv1AVrQT5Ez/4SZ38kR/Rj5Ih4kJmIN1sgaLunvFFVeE/gIShUbRseaM/F2mtB+fTPCeu1lAUmviUy6psC6Ha0xk8IMTwcqRtAGSjgR/D4XcI0Bng/MoHRPkhVmbdxzJqaGG9wGAAYLKjnyiGiwGT3c8ZvCBlLk5kM6JAYhB2ckI1xiI6TSjB9eYTdPBE48TLDo+9p6h0+Q6gTOzVDpHTBHkjZk3AxvaLggM5asOwcJ3g86TeOhMGVSjYfTo0QltAv5SrEBDi+UaLIgeqwA9MJgweye+KMGC4DH7xvwBHtz3g3Kh2YBgRomBx+lnSDMzfI+bVVvRUBMEC+2Q+75wjsqW/5EvB0QLEgEpd4IVxcplHM1/Nv/zHnUTUw6Ex0lKFEvqCrJ07VKUYDEBcFJEehBq97WKEiy0MLQZyoF8qJ/kH/MuRJH00hEsCJmXjzPaGEx9vEPdR6uJNsTNW5lwhKiixU0ViNvxAJOKjso+T3y8kyqgkfYJA5o8N5+le552UZEGizZAnxMNtC1wBzvaOVoqQrp0UhEs2o37cBEPfUiqeotsIFg+0aG/xFRJX8Y9Vni6diuaR/1ffQSY6CDbdHKtfgobYigbDRaNF78cOiOF3CJAJaUjwIREx0CnwsAAWUkOPMtA6P5BzJjxPyFECRYdT9QBFMKAWRHyxgCBap0BzU03EC3Ma5i9kg+IAcSFPDrBYrBxPzHPI+lBVnyAIw/4TKAZQUvh2pLqEKxkDRYDazSgEXPSmEqDhQ8QpjEP6QgWZeUegzvmQHyOmEljrvGBloEbU266ThyCS4ePPMGdZ6OhJggWuDshxTSDNiZZvvxGqweWURMhfixRTUc075X5H3zQsOJzR13FRwetBX5AnjfwYbUgE40owXKfIE8vFcGib6IuQhTBGs0Y5ksGe4ida2LTESw0SlFtMYP+b3/72xAXGk+INnG5/xImMIhrKhx5FpN4qkA+sQCgDSbNTAeTJlZ+omWmX8j0LPd4Hp+qdBostEZMFsAHAoT/jAfKjuaKuu7mYMhKRQSLtkRfFA3gDc6kQ/tjWxkP1AE0WqSDjNNpsPArpO8gDogVmhLMxMl40y/Rt/hEkvaKLx39D+/RbsFcIfcIUD84RLByhC1AoglAe6KQewTAF58lBh06FmbgaEiYJbiWiI6EDpfZvqvI6TRdDR8lWAxM+KV4B0MH6qYVZoQM9qjr3SzJYIr5wgOEjPcxWzG48Cz5yESw6MTd/wLTDOY6Dww8PkBlIlh0roRUJkLSZ0sJxwiyQ569jHTYmPicpKbywcqWYBEvJk5MSXTikEfMGmCB3xqdODJARgwc6QKkAXlyuPnRn80XwULOjmOUcJ522mnfG/zRFDDg+wo03smGYFFXIWMMlByQHpeBly16hpzj8wOxQBNEuryDFgUTMnUdfJAX5KayBAvTkPsXUceiZiuc4N1klYpgkS7aTdoH5eLAp8z9rTD9gSGaUDSiPE+bc2d5ykndoI6AI2fXqEQx4H+IJuZl2h9lznRAODH9Q5B9pWJFz6Ohci1gctrUNdfioqlFK0d7ItDHoGllBR5tk3ZLW6oKwUJ2TmiZUIGJ1w36N+og7Ze80F+k0mDRz7EYg/YFAYc8exzklz6Qvo6+CUKIzAjIAS0pMkJTSlv1e+EB/ckZAiw6qAkCWzYaLCoqjDXTYJIz6ZVpRODLgOwDDh0x5ja0QHQmdIoM7n6fwYTOBzJCiBIstFS8ix8HnT6mXZ/h4+yM0y4dGdoEnoUwMGtHu0MnjaYDsx5+PcxEGXQYIDIRLDpXiB0dHIQNB10GG1aLsa0EaXCPFWV0rISoDxbkjN8M8uQt2cmdchKfb59AB4xTLn44lAfzofuUMYPFsZ48Rx3ZsyVYaGqdqGEyY3CAVDAjZ+ABMw60uuQ1OXjHjibFTaP4yfigxvP5IljICxzRZqBNYfEAuKPtxN+IsiATtB6QDsxaONtDerIlWHwnEmdzBlM0LAzI6QImM/cPQz7UZQg7ecOMg6zIH2ZtBsnKEizecT8e6jhxgi0kiIHaNY5oNt2R3n2wSJf0WUWJNoY2RDvwCQzxggtY+tYoDPq0J0zsaJkxvUP2wZHn3SyWjAd1Ec0xcqBtZzqoM5jxIVhoq6mLFT3PQoV0BIu0kZnXRfZYQ5tIu8HU75Mf2iZ1nH6+KgQLWUCawY/2DuEBVwgqmNFmwJwVmRDtVAQL3NCouq8l9ZkBnbJBmjARU5/AnFXITr6Qka9WZYIHsVfIDwIswMAP0bHPTypltk0DDRE1tEL+EGAmjdO1+43QSdEpMnAwUDhJwf8EMhbVKEYJFqYNDjpOOiPv2PiNJsEHRAYzHOK5T6fIrJNBkMGCjpB8QHToOCvSYJEXVPS8Rz5ZWcXgziDMLJwBgnvMtN2MQgfvmgHKSceItoMOPhXBAnk0ayzHJr8cYIGWwjEDJ/LBIE6oCsFCM+OrLsGGWTd+IQxiPhiBU1RLl6pWkFcvHyTGV0/ybC4JFgO97wFE+XFoRwMHKaVeuCkYcs6AjWzcPAd2mGAYhCET7oOVzkQIeWTwdTlXtA8WxBR5oJVBXpA6sEUThB8P8SBD91GrLMGiXkIWnTxApJiIsF0F9c7rBWm571GUYFHnaV/UeTQf5JE8kU92aae8DCRo+1w7A47cB1efuIArk6FUhBt5Ew8kgYkHxCnTgaYPbS2+LmiBMj3LPZ6HyNBOkwPpcrDgA9lSPtonZaW9OcHFdIhmmzqTjQ9WsomQNAiYH32iQ1rgS330CQuYuWkxHcGibUB6vY4iQ95DnuSdOo4m3zWPpMvEgfIgO98KJhkL/c4NAkxma2IRQdlosOhgWBFFpVfIDwLeQUEMmIWz7JqBhwGdjgbCRCfIQM/MLTpYk6MowaIjZeBj8OR9ZoNortC4oKUgkB5yhQTgK0TnRBp0uDyPHxik2v06GMjw92DmSLzsq8Og7IH40JZhdoTUEQ+dOKuX2CATIkcHSbxujmQWiwM9Az4DIM9DrCCApE2eONAoufaHM0vj0dARH3n2ARIyg9mOwcTxBCdWzzHwQpB41wNaQ/an4h7v+o7V4IJmDG0GRIMOHoKI9oEBA38sfEKcqHp8yWdIJwMBnT4DRFTDgDYOMs1ADSYM4B7QQJFXysdM0WWN+QusGMzBzTXKxIVsSMNxxHcF+fAMRAFtH4MiZaEcPEc50GSAAwEyymIL8KD+pSKQ4Iqpi2eoC1HC7vn3M89yoE1ABl7HwBKZUU/QXKKJpS4Q0DxRV8gr+UOWHqiLkDXugZn7EmEq4jrtg7JxcJ8NTtnvC7ww+7E7O/lxgsVgzco0FvAQJ22FNgc5gyy5qZX0IU74RUXrhONIm4SkQkwIpFGokC5t6gL1CvJBnY72K2CFCdtlQH1AxtRNiDtYEKgfaBqpl9RPfK6SA20C0oepGizBiP6LPgU/QLRT3m5oH8gH7NGqReOjHpAHtOK86/0J7dS/1hAtK1p36hXkS77CyVLJ7W/qB3U9in9uU9gQW1kRLGY3DCoK+UPAKywkgkEDEx/O55gWMBGyBBxnYSc2/jw5ihIsfCogUgzazDTQDjAj9oE0uQTMGCEekCbIHYMuWhGfjZMOB5on4uFgsIumT5zkC58wBhsGN9LlNx0qZ0wFLOVGU0LgfWbdaLIgNMzuISHEA9H0tNCCJKfFNZywGTTRetHBoqGAbHjcnMESswdpUya/zz3SwaTDPdJy8unvkwa4QhDRCGF2JI90/pAwfy78k+IPeeZdZu8MaK6l4VHSRpakjckumi8wIa/cI+9OLhngwIrr0e/xkQ5kACLjOKKFc8w4IzvKQhk4MNXhBxMtB9oLNEjET/rpNCJRzIjX40gBQbhE+tQx5AWJheAwCGL6IZ+k6wFCiJ8NeaAuOInkfjR/UcyIn/qCbIiXNMCCegdOyJZ6h9wIUYLF/7xLW/MVg6y6c8yJm8MDz9IO2RaBtokJNF2b9HcKfY7mnzqNqY2yYjZk+xDqVLStUzfBDRlwzydZ4O/tOFX98HSoD2ACpsiDdKj71C3i9uBtE9kkt02eQfbIjokF/Ql+P6naOP2a7zkHIUSbRfD8eHo65wYB6gN9Zb7xLRuCBZA0jGjjyI2oFEsmBMCdzgrcOUcrdPR/4kgmWD7D4D0OD9H3kv/3tJKv+7uZztF3PJ5ousnvRp/nHr+jR/Lz/jv6Hv9H04re8+dTnTM9x73off6vShqeLgOSr+DC9yQVafFnk9P265yjecp03ePwc/KzlIUjGl/0/+jzmdJNfi7d72jc/O9YZlOnPU7ei8bj1zlHr3v8yeWLPs//yQSLvPi7yfElv+tpVhbHVPEU6pqXNVkG0bIn5y3dveTr0d/87/Lm7IHr0ef8Ome/Hj1H4/Dr0Xcg0/gEYpKEaPk+cNFn9H/uEKD94FMalWnuYv//mMqKYDH7YFaiUJwIpCJYxZnT8soVs3S0WJgyMblodVPh5Z+KYBU+V8pBVRBAq4Z5HNMg5kRpr6qCYuXeYdsaTOsiWJXDLe3TAIlvC34fCsWJAKYh/GqYxeGP4v4gxZnb8soVKnW0V5gJcRCP+vaUFxLFUVp2R8enB9Mtvn5ochTiiQATf3y78I3Ej8xNnfEsTTxy7StuRbByJC+AZPkty54VihMBnIFZTcW33vCBUkdTXHLCr4fVN/gJZTITFleuSzM3LMZgixI+GcX/+R4oShPFwpcKcyH+gPhmsZAjuqq68LlTDqqLQNmYCOmA2LQPW7dCcSKAjCBVHJilUvkqFGfOSz9XLgtkxOG/S7/kxVlCNFbeVtz/qjhzqlxlQoB2xOFtSu0qE1q5u8fKc/aBA/d8hrIhWF6RvVPy36zSYaWHD+h0VvzmuoOPjZxrvOvv8T/XuMc1nvW4XF1PnB5XVd/zhpdNesl5J83kvEfzSf4Iye9xLfk94vLyCauK64ewUr3ytpvcltQGN5gzk/sd9VfpxxJhtWFSl4txkPGNDYvZCJe2mM9QVgSLJdb4K7Rt2zYQH3x8WHrLTs7uWMjSXpYvc/A/gb1XeMb3iYE08ekU4vI9iVg6zCamLHv2pdSwZF+ai0mFjhY1MO9hA0a4LFtnmT776Ph+OSzfZmkw20qwNJzGxXJh/C5Yms5vrrOUmy0JWCZOYFsE4mHZMvESP8uxSQ+zDumTD75DRr5YvkzA8Z9848hMOQhsjyCshBV1QfVKbVD9lfr2UhkHGT/ZqxB3IRGsMNxX/w+zI4hH7dq1wxJnlsGybw/7M7EBKQSGwP41fMqEj/4ysBDYUwkHX/yD2MiOd9lTiA3rIFrE7Z8gYWUCBInADsbEja8EO/6i1YDY8B6+LAgaEofzPf5hEDIC5IbNGfFFYr8lNEbs+4PjNyQOosR1WDib3Pnmdqg869WrFz7sS7xUHsrMexA2iCH5ID9sTOgfDYZosVEnm7BSDgL7tpBPVisJK2GleqU2qP5KfXspjIOMw+yfx1jH2J3PUFYaLDZzc4IE+eBgNRTXAB2wITMQLw43oUFMeIZn/T1IB9e4l/weBIhAnNH3eM7fIy/8ptNCq0R6ye9xnfs8h7aNuHxvKH+Pa6hNCZyJJ9N7yWXmPdL197zMwmqD2TdaP4RV+vqoeqU2mKnfUX/1/TFBWG0YPws1DoYB87s/jK/5CmVDsPIFoOIVAkJACAgBISAEhEAyAiJYyYjotxAQAkJACAgBISAEqomACFY1AdTrQkAICAEhIASEgBBIRkAEKxkR/RYCQkAICAEhIASEQDUREMGqJoB6XQgIASEgBISAEBACyQiIYCUjot9CQAgIASEgBISAEKgmAiJY1QRQrwsBISAEhIAQEAJCIBkBEaxkRPRbCAgBISAEhIAQEALVREAEq5oA6nUhIASEgBAQAkJACCQjIIKVjIh+CwEhIASEgBAQAkKgmgiIYFUTQL0uBISAEBACQkAICIFkBESwkhHRbyEgBISAEBACQkAIVBMBEaxqAqjXhYAQEAJCQAgIASGQjIAIVjIi+i0EhIAQEAJCQAgIgWoiIIJVTQD1uhAQAkJACAgBISAEkhEQwUpGRL+FgBAQAkJACAgBIVBNBESwqgmgXhcCQkAICAEhIASEQDICIljJiBTx7//973/2n//8x7799ttw/Pe//y3i3CprQkAICAEhEEWAPtv7b8706Qqli4AIVoxku2bNGmvfvr2988479vbbb9vw4cNjlHtlVQgIASFQ3ghMmDAh9N304a1bt7bly5eXNyAlXnoRrBgJeObMmXb44Yfb1ltvbZtvvrn9/e9/j1HulVUhIASEQHkj8PLLL9uWW24Z+vB9993XxowZU96AlHjpRbBiJOAZM2bYAQccYBtttFE47r777hjlXlkVAkJACJQ3Ai+88EKi/959991t1KhR5Q1IiZdeBCtGAp41a5Ydf/zxtuuuu9pOO+1kjz76aIxyr6wKASEgBMobgaZNm1qtWrVCH37ooYfauHHjyhuQEi+9CFaMBLxu3TobNGiQ9erVyz755BObOnVqjHKvrAoBISAEyhuBOXPmhP6bPrxfv36GX61C6SIgghVz2WoVSswFqOwLASFQFgik66vTXS8LUEq8kCJYMRIwDZFtGvzQNg0xEp6yKgSEQNkjQJ/t/TdnkavSrhIiWDGSL+rkjh072nvvvWfNmjWzkSNHxij3yqoQEAJCoLwRmDhxYui76cPbtm2rbRpKvDqIYMVIwGzTcOSRR9q2224blvo+8MADMcq9sioEhIAQKG8EXnnllbBFA334fvvtZ2PHji1vQEq89CJYMRKwtmmIkbCUVSEgBIRAEgLapiEJkBL/KYIVIwGzTcOx8jIepwAAIABJREFUxx5rO++8s22//fb28MMPxyj3yqoQEAJCoLwReO2112zHHXcMffjBBx+sbRpKvDqIYMVIwGzT0LdvX+vWrZt17drVJk+eHKPcK6tCQAgIgfJGADcP+m768N69e9vq1avLG5ASL70IVswFrFUoMRegsi8EhEBZIJCur053vSxAKfFCimDFSMA0RJb5+qGGGSPhKatCQAiUPQLqw8urCohgxUjeX375pXXp0sVatWplLVq00HesYiQ7ZVUICAEhgFtHy5YtQx/eoUMHW7FihUApYQREsGIkXJzcjz766ODgvvXWW9uDDz4Yo9wrq0JACAiB8kbg1VdfDdvssEjpgAMO0DYNJV4dRLBiJGBt0xAjYSmrQkAICIEkBLRNQxIgJf5TBCtGAkaDdcwxx4RlvmxU99BDD8Uo98qqEBACQqC8EUCDhfaKrRoOOuggabBKvDqIYMVIwGvXrg1fYu/cuXP4ZM6ECRNilHtlVQgIASFQ3ghMnz7dOnXqZPTh3bt3t1WrVpU3ICVeehGsmAtYKwljLkBlXwgIgbJAIF1fne56WYBS4oUUwYqRgGmIyUeMsq+sCgEhIATKGoHk/lvkqrSrgwhWjOTLNg0ff/yxffDBB9a6dWsbM2ZMjHKvrAoBISAEyhuBKVOmhP6bPhwzobZpKO36IIIVI/n6twh32mmnsNRX3yKMkfCUVSEgBMoeAb5FuMMOOxh9uJzcS786iGDFSMbapiFGwlJWhYAQEAJJCGibhiRASvynCFaMBOwbjbLEd5ttttE2DTGSnbIqBISAEIhu03DggQdqm4YSrxIiWDESMD5YfIm9TZs2wY4vH6wYCU9ZFQJCoOwR+OKLL0L/TR/OZ89WrlxZ9piUMgAiWDGXrlahxFyAyr4QEAJlgUC6vjrd9bIApcQLKYIVIwHTEJOPGGVfWRUCQkAIlDUCyf23yFVpVwcRrBjJl53ce/ToYXyFvV27djZu3LgY5V5ZFQJCQAiUNwJTp0619u3bhz6cLXdkIizt+iCCFSP54uR+3HHHWa1atcJS30ceeSRGuVdWhYAQEALljUDTpk3DFg304YcccogmySVeHUSwilTA3377ra1evTrMcJjlcIwdO9b2228/22ijjcLRsGFDW7NmzfeeQcsltXORClXZEgJCoCwQoA9et27dD/rmJk2aJPrv3XbbzQYMGBC+R+h9PN8m/Oabb8oCo3IopAhWkUqZPa/q1atnZ555pp111lnhOOmkk2zrrbdONNCf/vSndvbZZyfun3vuucYM6euvvy7SUilbQkAICIHSR+C///2vtWzZ0s4///xE/0xfffDBByf67y222MJ++ctfJu7T19euXVtbN5RQ9RDBKlJhor2qX79+ojG61irTec8997Ru3bpJg1WkMlW2hIAQKA8E0GANGjQo7Naeqc9Ovnf55Zfb4sWLywOkMiilCFYRCpnGydGnTx/bd999syZZV111VVBJF2GRlCUhIASEQFkhsH79erv99tttk002yaoPx2TYsWPHxErxsgKrRAsrglXEgmVj0b/+9a9ZNU60V6xKgZgpCAEhIASEQOERGDhwoLFje7KmKtXvK664wpYtW1b4TCsHOUNABCtnUOYnok8//TQrLRb+Wlrymx8ZKFYhIASEQFUQwNH93nvvtU033TQjydp9993D1g2aIFcF5eJ9RwSreGUTcoYv1s0335yxgf74xz8On9Ap8qIoe0JACAiBskNg8ODBFfpi/fGPf7SlS5eWHTalXmARrBhIGC3W3nvvnXIGtPHGG1vdunVt+fLlMSiJsigEhIAQKC8E2Drn1ltvtc022yxlH77LLruEjaPLC5XyKK0IVgzkzF5X+GKlUjOjvWJ3d4LUyzEQprIoBIRA2SDgffLQoUPtZz/72Q8IFhNktFdLliwpG0zKqaAiWEUubRoox2efffa9TUZxknTtlXyvilyIyp4QEAJljQArCu+4444fTJJ/9KMfWefOnbVysERrhwhWTATLisIbb7zxezMg971yEhaToiibQkAICIGyQcC1WKn2xWLfK/lelW5VEMGKkWzZF4vd232J79VXXx0+sxCjIiirQkAICIGyRIAVhXfddVewPNCHs+9Vhw4djF3fFUoTARGsGMkVX6wGDRqEjet+8pOfhF3bY5R9ZVUICAEhUNYIsKLQP5fzpz/9SftelXhtEMGKmYB9X6xrrrnGVqxYEbPcK7tCQAgIgfJFgBWFd955p+He0a5du/IFokxKLoIVM0HztfWHH344sXIwZtlXdoWAEBACZY3AkCFD7P7777dFixaVNQ7lUHgRrBhJGWdJ7PXsecVMSEEICAEhIATihcBXX30VTIP/+c9/tLVOvERX6dyKYFUasuJ5wVenFE+OKp8TCCOraObMmVPhMXfuXMMPLdeBFZrTp0+3KVOm2Lx58+zbb7/NdRJVig/5QqTnz58f8gWxzlbmPIdT7YwZM4zvoWGOaN26tWFinjx5svGFgOS4+M2WH2DAAS7JzyQXhPuYqr/44otwsJ9PRe8kx1HsvynPN998EzAZPnx4WFbfsmXLoEUeN25cqL9xcFQmj3zrLpu2xjO0N2RLHfT2MWvWLIMYKFQNgVJrG1VDoXzeEsEqH1kXZUnZH+a+++6zX/7yl/brX/+6wgOSkOvQr18/+81vfmM///nP7aabbko4nkK0GGimTZsWzgyyHvweA8/s2bPDAOz3cnVmQOQD3uecc46deuqp9uSTT9rXX39dYfS8Byl78cUXA5777befsVv0zjvvbPvss48dddRR9ve//90mTZr0PTJJ5//yyy+HtM4444zE/jyZEmSwbdasmf3iF7+wo48+2v75z3/mBYvkPJAuZUQ2DPpoBfIxeBEnpLNt27Z23nnnhQ/3snfRTjvtZCw0OfTQQw1/SEgseSjmAFF65JFHsm5rJ510kj3zzDMGqTzttNPs8MMPD5ti6qsRxSxl5a2YEBDBKiZplGFe6PR/97vfJbae8C0oUp3ZWPUf//hHzlGCxLBkmjQvvPBCW7x4cUiDAZwv3B9zzDF22WWXhQHdE1+4cKFdeeWVduyxx9qll14atBt+L1dniNLbb79tW2+9dVjazbYcFQ3iEIKxY8cGTLfddtu0uG6++eaBfHXv3j2hkeBdNkMEZz7r0bRp0wpJC0QTmWy55ZbhHT5smw0JrC5G+CJed911QTZnn322TZw4scK8VjZN8KAO8JmTXXfdNS2Wm2yySdil+9///rcxYeC9YgxoLf/85z+nLUdym6Nc7L3Xt29f22OPPcJ7xx9/fKJ9FGMZlSchUEwIiGAVkzTKMC9RgsXAfthhh9m5554btAVoDKLH+eefb+3bt885SiNHjgyDNZv+oSVi8CbMnDnTDjnkkDCwsLSa3x7QbKHxYlA64IADggnF7+XqXBWChWmP5d8MjuQNrRUk8bHHHrMmTZoYJG2vvfYK97j/q1/9yig/oSoEC03Shx9+aLVr1w7ajRYtWnxPK5YrLJLjwdR1yimnhHKgUaIMuSY2mEgffPBB22abbUI6W221lZ155plB+/fcc8+FLVPQ6jjW++67b6ifyK0YA22NOgAh9XZ1wgknBHJMXYAko7Xye7Q3tKDjx4+366+/3n7/+98bBNrbRzGWUXkSAsWEgAhWMUkjKS8MGFU9kqIq2p9RgoXW5Nlnn034ALkvUPTsPlhoSRYsWBD8RNA48RttAyt0MNe42Y7BjnfwReIe/kfJvkWYgMaMGWMjRoywqVOnBhMX/ktc8++HHXTQQeE3cZFnBh3MQwxM+++/v40ePTqk44MrciMOSBn56dWrl40aNSosUICUJJMB3mPgIn88j08TGoe33nqrUhqsd99917bffvuQL8xYmITAz9PDvPPGG28kNqyF1D7wwAMJE1uyBgus0A7179/fJkyYEHxyovknXnzouMeBZs/T4ky58OPhHhvl8sknMAYbfy5aOXkeeYADabJvEGZYMPfn0eIhT0zK4L/77ruHeMErij+/SRe/s549exrfg6OOYOr1uKJpJ/9PfYE8kwYaP8gp8bmPHmXo1q2bQVJ4hgNtJn5o3IOEI398mZI1j9QjTJvcZzUZmBLIF/fACNN17969Q12Llo3neJ56H40fnCGaaDCj5mwvl8vC2xOywvQJfuR9zz33DPUUjHiGM7IgP94+qAuUn3wiE56jfDxHmck3csOsCA6kSV69DoAp5U4lA56lDeALidaMspMe6WcjLy+nzkKgWBAQwSoWSUTyQUdDh+KdEx0UBx1b8kFHxTW/z9nfj0OnFCVYDGKvv/56Vp0pBATNzHHHHRdm1wwUF110USA7+ByhaejatWsYnCENPAcRwtz3+OOPh0HN8RkwYECY1eNHhDmIgQtzz8knnxzIDYMPZjoGUuJ66aWXgvYkqtng3m233ZZwHmfgeeKJJ+zEE08M35Bk3xu0YVdddZWRXnQAJB8M3Ph/HXnkkYYmhHyylBsfmGxNhAxOxO+DPfhQFi+nVzEG+3vuuSeheSHvDNQ8FyVYaDvQ6OGzxRcEjjjiiGBigvRQ5wjUt/fffz/46ODH9uqrrybKRhkZUOvWrRv8d9Cc7b333oaZ6dFHHw0DczRvxEncderUSaSJzPDPQ5PE4E1Ai4l8d9xxx1DWLbbYIviAoWVxgkf9YL8h8k66mLggS2hn3nvvvUCyQ2Rp/pD3p59+OhAr8EQuOLQnB9pa8+bNbYcddgh5gaxAjCA51EfqHWlC7jxQZjR94E6dgwQ74YRcsNs3pmcwx88L3P/2t7+FOEmPAKG5/fbbQ/yY2CEj1F00rZhOs/WTgvSSBmVENhCpaCCvlIUyUBZkSdzkA80lMkfrxcTohRdeCLKi/jIh4VnKA6nmg8Zo+5DB6aefbh988MH3zKnUIyYtt9xyS/Dlw1eQfIE75cRfMFpXonnU/0KgWBEQwSoCydBxcNBpOVFiEKTTZfZGZ4pJhBkhA0j0YPbLdTo9Blg0DryHRocBizg5PI0iKO73shAlWGiwMGVBNpi5Jh9oBMCHgMbItQsMagceeGAYDN1cw4DBIIW5EYLCIOzEA98k0nFfIXyw3MfmggsuCDN3CACO4R4f5+22285++9vfBjMJPlvJ99BeMFNHe4avixMwNEpok9AWcTBoMLAhFwID91lnnZX4ECzPbLrppkY+Gaj4n2uZfLCQLw7fDMaUk7y+8847If5Uf9AQ+MCKiQ0NG3E4wSI9CKGTGMeO6+AKiSBQx/DBAl/wgLiBK3H16NEj5MfLg7nS/cIwtzEAo/0g8HyXLl0CNjxPepisXG6Qb0xU1AHIL4QJXHiO58GYhQDEh4YEcy/v8D5EBaLmaVNfwMZJYshA0h80RsRB/KTDIJ+shfJXkDemVp4lPRYKoP2hLnGNeo3zv4coEaaMPE+gHrCggXzzHtg7cSMPEJPPP/88YIXW8OKLLw7PURcx+xEX7+EvSH+RTciGYEF6XcsFyYK0U3fxDyRN8EfTS3vxvJMP/oeYsfgBXKL3kAcTIOROgIBC1sCK+GgvXnauEQ8kK06BslX1iFM5ldf0CIhgpccmr3e84dFRRUkVBAmytGjhQpszfbpNHz/OpgwfapP79raJ3T+0iV062IRObcMxvksHG9/9Ixv3aW+bMHSITR4z2qZ/8UUYZOgEIWYQmFRkK6+Fq0TkUYJFx8osmg451cFnghj4CMy0IVV05Aw+dM5oApgBu0Mu9+ioWeX11FNPhQGPzprrzLoZiAl09FEndwgsphYGRTRPPI/55Pnnnw+kgcEA3xT3ZYKg4JMDoaA8zOQhdbyHBguNF2Y5BmyIBeVES4N8cIpGO+ODDwMP5WQzWUiX55d3KiJY5NnzyxnTGMEHsfDjuz9orJyMMZCxEpDnnGCRd/IEabn77rvthhtuCJo4rpOX+vXrh/xTdyFYlIu84qOD9geC4YsXwILyYr5EK4RsiQcSyDUCmENQuU48EAY0aGjx0Ij482if0HRAstw/DiLSqFGjQNCYXLCFgpNbFi1AItAasnISIg0pQBaZSAiTFmRHusT1yiuvJCYqURz5nzQhNZ53MECuaHVcfpBJr7tMHPA15HnMzNRl6g2aK56nPkMoKCMaQbDw605gIViXXHJJiANiS5kgzNRrJg+QuGxCNgQLLaS3KXdyd4Ll9RyCi+8f2mGIMPWB8lFXmAhRh6jTXm7u0VapK+CCho5r1DnqwZtvvmn/+te/ghaZOCg/2lyeL9YQ7dPB5+tvvrEv162zFWtW2/xlS23agvk2ae5sGz97ho2bPd3Gz55pU+bNsZmLFtriFctt5ZdrbO369fZNDCbGxSqDYsuXCFYNSyTaCBmc6DDooBlsFy5YYDOnTLYvBvazKZ3b2pQ3/2XTn3nEZj9yp839+y02556/2ry76tvcO663eXfWt7l332Bz7r3JZj3QwKY3ussmPf+4jX37NRvdub2N6d/XJo4fH0gEZItOjBl4slarhov/g+SiBIsO1jtlOtXkgxk7GiJClGBBcNBCEdD4MZjzLnHhqOs7JmOqcFJEp482gJBMsMCLgGbCHdkZzPntARMgJg/SQMsEYSHgF8aAyHUGHScudLiYWtAAcA9yiM8XWic0QlyD6Lz22msJTQn5Y0DjHuWpiGChVXJNA+XD5ytdoIwMxsQNgWAgTyZYEAzXmEAYIJWQIt6hzBA6ypVMsKjXmI9cK0gZ3MTDPcgm2BAPZaJuIgM0IFxjEAYbAnUW4gzxRnaQUSYM1APIH88z+LsseQezJtc5MJOi7SQNZMY9TIeQn0wEC02YE0EIHCYt8EkOXKMN16tXL6QHObr55ptDHvGlc3KIqRJzGYG4IP7kD3Mekyq0tl6fIMeuUSR+/zwWzyMT6hp5d4LFdUyNaADxV+MeOGcTckWwyJe3Dwikk3c0V5AuZEZdYZICRuQZIoVckB0TC65xhgxTbspAu/Y6zYQD4l5MgXxyUDbyC6latnqlTZ031wZNGm+dhgywt/t0t5e7d7EXPu5gz3/Yzp7t0sae6dzGnu3S1v7xUTt7sWsne63nh9a8by/rOnKojZg2xWYvWmir1375g/6atBTig4AIVg3KKtoYnVihrZo7c6ZNHzLQprZuZjOfa2RzHmhgC26/1hbderUtaljXFjWoY4szHNwPR8O6tvDWq23undfb9IfvsIkvPGWft21lowf0C507nRMz2yjRKnSDjRIsSASmFvyIkg9IE1oiBiNClGAxIDNAESCQDz30UJgJM+tl5kznTmCQxzxHR47ZCEdcQjqCxUAWJViu8eIdTFU+IEKWcLomoF2BfJAGxA+NEDNvNDdoFvCt4h4aB0wsDMJuqsMnhwHSA/lm1s+sPhuChaYBTRvx48OCaSddgGwwKPMspAmNAXXBNVhoRcAxahbDB8rxgwx17tw5vJOKYKF98IEUmeKPBgZggeYCcyFpIzvkAnlz7R6+VEw6COQJ7RY44TzN4M1ABolAa0kcECzIntdlcHUNCjhjvsJcR13AVw+MwdafT4URaToZdnMrg2iqQFysoiQvlNnNiZBAtDnIDkKF7xjtnvs8C8n0fd3woXJCCjmGSIIVB//jV8g74Maz9BtOsCgr2tZM5UmVb67limD94Q9/SMiMvGHO9DKyFYiHDh06hLrPPXzUmFhyrVatWuF5JjKY56krHGgwXSsLwXbTtMdXyDN4Uyfoc1auWW0TZ88MBOnN3l3tuS5t7fEOreyx9i3tsXYtrFEWB882bt/Snuj4fiBjLfp9Yn3Hj7ZZixbY2nXrQlqkVxU5FxKnck5bBKuGpB9tjAxadCzzIVb9+tj0N1+2OY/cYQtur/d9MtWwri2uxAEZC89/R8YWNqwbyNaUx++30e++bp9/2semTJoUBiw0PQwMDFbkrVCNNkqwIBKY08AGIph88KznM0qw0DTg1Eygs6ODdp8P/mdQI6BBcK1EvgjWoEGDEuZGBlu0UmhA/EBbRN4gf/jefPTRRwk/JzR0mH480JliQsMMkw3Bgqi42ZQBi4ErXQA/1xrwbKdOnQK2TrDIn5Mu4gB3NIGYrhgc8Wcib1xPJljgjdaJ5zggAF5+zhALiA84QBzQtGFWI00OTHnE4bJOVYZMBAsijEYJguskj3wQNyQGrSar/7xepIqfOog/FO+RT8gxbSVVIC9oV3iWOow5mriRHxgiP/IBwSNvTj4guFHHfTDxOKJ4UYfAkHzwP9rBKMGKyjoTZqnyniuChQnUyTjYsQiBslCOKCmClDv5dYKF9tRNul5GL3+07BAt11SnKktNXANfDmSLjJeuWmkjpk629/v3CdopCNKjETIVCBYkqxJHlIw90eF9e7l7Z+syfJBNnjPb1qxdG+qhiFZNSLv6aYhgVR/DjDF4Y/QGCUlYsmihzRwxzKY3+7fNffDWoKVyDVWCJDlZqu65wQbt1rzb69nkxvfZiJbv2qjBg2zGjOmhk8b042bDynbOGQue5c1kgpXtKsJMBAt/HDpqBruaJlhoxVyLxJmVgWgskg9WHKKRiRIsyAuaEw/Umcps08C7ToAgZMz+fdDzODkjZ8x0UXMf5kuuRwkWPkDROkH8LBpg4ESTwud3uJ9MsKhPaKx4joPVmMSbjAG/kQ+kg938nWC5n5HnlYGMCQEH9ZU00xEs7oEb2lrMs5ggMVHiY0ediOYJQpoukA4ycoIG+cUvK1XApOWmQAgBZNUDuPp2HhA27kEU0BA2bNgwlIdnMe+5Ez4aN+6lwgtsiJPyuwaLslWVeOSKYEXN10yMnHCiuauIYKFx9LrIBAgfxFRlR5OXaiWnY53vs9ctiDZEZ9ys6fZ+/972dOcP7NG2G7RUlSFS2T4L4XqsXcug1fp4xJCg0frq669CPSdPHArFiYAIVp7lQmfv5Cr4gcyYbjO6tLM5je8J5rxAqCBB1SVSmd6ParTuuM4mPNPIhn3YySaMHx8GdEiOz7hrurEWimAxILqfTy5NhPhUuZaMgRWToXfMaAwhE5gzGdwhDGi80KYx8DOwRn2JwAZzGoN8RRosqjHEBk2LkwLiw5zEdQ/URdJ3DQPp/uUvfwk+TeTTCRbpsYs3ddYDeXPzJyZI8k58yQSLa6zSc60EDsyUhescxMk+V+QDMyxkBidyd5jGAZ1BmsBghs8SZAKHdUyJ4JiOYEEoITwQGeRK/JgEwYEB2skvAz/bS6QLYNGxY8eEKRNCiTbKTZe8xzOQThYAQJjAErOia1N5hnKj3eEeJi5MiZQTTRqkiDgIYImfGc/xmSIIIvfAizTRdIEXWljijDq5R30QQ2SV+FMMBAt/M19kgpzxD/S6QlnBk7Jzpq4UIrgs6CfnLllsHw4bZM992DZopoLGqX2LSmmpsiVX/tyGNDaYEF/t0cUGTBgbzJKFtkAUQhZxSlMEK4/SolHSAOj0VyxfbrNHjbSZr79o8+++8f9NgZmIUQ7vJQhcgzq2sOHVNv2Bhjay2Rs2asiQ4E/EoMfARcfmnX4eoUlETQfqq80YpBhcWT3mPhjRM9fRmjDgVFeDhWmqMgQLQsbWBgzsYIQPljvyQjZwROYephtm8wyUDKSsDMNvCJ8nTDtoMXgPskAZGKB9OT8aN/yPIDIMJszsiZu4siFYgIpDt/sO8Q5kD38oBnC0a6zCwwmftHzQR3tCQO5OsLiHpgWne/KCGQ+HbH8PB2XfIDOZYEHo8P+C4BEPZkv2fYLscCBHHPvBAc0VdS+qBcKpGQ0bfm2k64sGwJOyED9Yu5M7/mCQITRMXGfVKNogrpMW10kDzY9rVqJmtURlTPqHgZ7PIYEj5YAA4ZcGKUFGmLvwDXTtC2fqK4OwB+pKq1atgqmM+o3pi7gguNQJD9SnaN7wvQJ3nO3ZZwstID5/bP/Bs5TTNVhxJVhoBTEnImff5oKy4CbANcqJFhUHenz/rr322kC+HLOaONMmOJDjl+vW2uhpX9gbvboG/yo35TkJqomzp9mk0wfWdtBnNnPh/IL02zWBfSmkIYKVByl6o3RytXzpUps1uL/Nfv4xW3jrNQmH9Mr4V+X02e/MhrPvqm+jXvmHjejX12bNnBm0BjVNsqIEi4GHARxflFQHJh6WwzN7rwmCxeCG4zn5Ij/sHI75EUd7Vgv6oMA9/sfvCK0Uu4Y7uWCgx0TF/lnuH8XzaIcYJKkjbCvgK6W4B/FgUGGwQdPCAJ8twWJwh8zgX+VaFQZ+iBqk0p3LKRNaAzRekA9CMsGC0PAMG30ywLnvDNcgDQw6kJ1kgkUdQq7E7asFcUSHXHJ4HtAmQR5IlzyACWZC8kbZ+cAw2IEJ1/gN6SAwMOPDw3XewZ8JcghhRRvm6UIS2T4ATSCkEGIFlqygRIuWKVA+zLjI1vMFBixKwBEdHFxbiLYOYk2dIVAmD2g1KQd55eAdFi8gew/gCBa+4AG548QPifKVr8iR95hglIIGi7IhR3CASLmTPzJC1kxCfHsI8GAFInUriq3jl48z6VAHyN+qNWus//gx9sJH7RMO6zVBqNKlAdHCbPh27242cdYMW//V/5sM84GF4qwaAiJYVcMt41veKNFcLYNc9e9jc564zxY1rBOc0BPapBxqqKpEwBrUsXm31bPR/3jChn7Wx2bMqHmSxUDsewhBCDIdDE50yk6w2LWaa2hDfOBloGJgZyDkgBC5RgHzim+JwADJyjMCpiTvyJlV+zYNECk+I0Pn7mQFHyRWh5FvCBVkwe+hlUCDhdzx/2HQhzC6BoQzWhVMZszQfaBgkGEpO2SAZziIExMjS/4hCwzKzOCJu6LAAIyJDFMTGHicYMX/kAUIE0TEt70gLxxo3EiLfGPOQmtCXjwOSBF59fcYfFIRLPKIhgZfGi8X6XMQP6sz0VJBSD1gTmXHb8gK6fnz5AWSF/0wNQMt5kLk5vizQpOtAtCs4ccDaY3GQ3yQdLR6aELJu8vA85B85hm0cWARJbuD9M9ZAAAgAElEQVTERdykDXmlnmD+TRUfMsOfy59HE8aWIYTo85BMSAQr6cDIy08aYAi5gkDyDm0A0k6cYEAdrkpA80p+iAdTNVq+aCAtVqeSPvlADm7CwwzMBIK8ojX0uol5FyJN/iFLkFQPUV8zCC91n0C7oV7RLpPLTr5YOECZazJ4P84KwZ6jhgdfq0BsKuGwno4g5eo6+cEJftT0qQmSRb4VigMBEawcy4EOiQpOZ4Pmavann9jcR+/8f0f2QpOqSPru/zX/1mtt7LONbGivT8LATwcJKaEc0QEgx1CF6CBEdMD4w7BkPdPBMzjMgi0dMw7iXGPgdS0MecYRtk2bNuFg0PYOh2d4lnd8JRaZwN8FQhKNn+s+kDF4NW3aNGwTwcq8qKM1q9Ewo7GFBIO2+4iAH6ZB/HZwTGbAZ+8lnoecRHHlf8gcvigMJOzTBPlhQ1MIA8v7wYWBjgE/U/B4KTPmOMqEwznaIVbWMUhjegQjJ54eH+9i+sLniQPSilkRAsUqPzZQxTzmpIjnMxEs7vMsg7h/HJmVdGAJuYUkETzPnCEQyA5izGamEEzw9e0Zos9TT/GtYmEExATNHTImHu598sknwWSHwzhxUQY0JfjzJJfdMYieo/licKfOeDnQVhEfK+Awb0K4Pfh7/psz2jIwRY7UAccw+gzvUbfBBvmjkePzN+AOIfMtSniH52gLxId5lDpclQAZxdRJPJAfJzweF3mi7LQPnkE76/Uf3za2veB6tG4iV2RO3aPu+oSFONHw8Y63NZcD6TAxIB5MhO7kzv/gyz1CKmw9r7k6kwYHdXv5qpX20fBB9lTHD4LmKlfEKFfxuCaLPbaGT5kYZFMT/XausC71eESwcihhGiWVm05j5YoVNmdgX5vb+J6iJFeu8UqQrNuusdHPo8n6LAzqDACQn3w21lx3lpniS3evMtfpcFPhQRyZ7jE4OJ4VVTfqDs+TTlVDcpmIE+IB6WBgjobkZ6P3/H/qAXmijB54j4Ny4TyOlomD/33Q9Gc5EwcEgXiiaab7n0GaPBN/Kiyi7xF/Ovw9XeJKxjU5jmh+0/1POpQDIpKpLNH3q5IOZYa0kVa0/JniynQvmh//P93z6a77e5nO6d7lerb3KC/lpvzZlj1Tnip7j3wi55WrV1uPz4fZUx1bFyW5cpLWqP2GFYwvfdzJRk2bktBkpcO7snjo+aojIIJVdex+8CadgXfo88aMsjnPPrLBLMgqvojmqNj+d5I177ZrbQQ+WUOGBB8jZqo+cP2gsLpQ9ggwAKI98lWTmCPRONAGFIRAHBGAlNCPr1231gaOHxN2XIfAOJkp5jParKY9PrSpc2eHSQ7lEMkqbC0UwcoR/j7rgZQsnDnN5rz2fNiGgf2tisbnKgPJI4/sBo/j++Bmb9rYMWOCaQBNAiRLDTVHFaWEomF1Hs7I7iuETxyrFRWEQFwRgJTQ542d/oW98FGHonBoz5bUuX/Ye5/2tAVLFqvfLoJKKIKVAyH4rIeGuWzxYpvT4X1bcOf1hV8tmIFQpdOiQbK+eOBWG9y5Q/DBwbSCRoKOR0EIRBHATIa/GKv52F4Cfxv3QYs+p/+FQBwQoB+nr5uzaKG93atrUZsF05EuSNYTHVpZ1+FDbNXq1SJZBa54Ilg5EADkw30z5g0bbPMeuT2W5CqQrrBPVl0b9VxjGzagf3Cedd8VabFyUFlKKArqPFostqzAv8vrh59LqKgqSokjQJ2lH4eUdBs+OLHPVToiU8zXIVnPd2lro6ZOCT6XlKsY2iQrRVkkw0IJDrTdpT4hE8GqZsfhDRPt1ZK5c23uGy8WtVN7Os2VX99gzqxjs++sb4ObvxNWXKGpYGZHWYuhoVZTZHpdCAgBIfA9BCAhLM4YP2Oa/fOjdrHUXjnpa9S+Zch/80972KKlS4tGiwWhYgsStnrh8C8WfE8QJfZDBKuaAoVwMJNfs3q1ze3Twxbce5PFxe/KSVXy2f2xJjS+zwb17hV2VJYWq5oVRa8LASFQlAh4H750+XL7oH+fWPldOalKPqPFYvVjv7GjfrB6tlBCYKsR/5IBfpvsscdu/aUcRLCqKV1mPl8F7dUcm/vKs/E1DSb7azWoY3PuuM4Gv/t6+J4eezcxw6O8CkJACAiBUkEg9OFffWWjpkyy57q0ibX2Kkq0IFlvfvKxzV+8KGGBKKTMIFhs1uuLYvhihQhWISVS5Gn7zIc9W+YN7m8L7v9b0F4la4Ti+Nu1WOMf/7sN7ds3+GKxh5JWFBZ5pVT2hIAQyBoB78NXrFppbQf0scf4BE0R7dRenby4FmvwhLFFsQGpCFbW1VIPggCNE63O8iVLbM67r9miW68u6v2uKk30GtSxmXfVt4EftLIpX3yR2CyTcisIASEgBOKOQOjDv/7aJs+amfjOYHVITTG+2+LTnrbku++eFrLvFsGKe2upwfxTUV21vGjyRJvf+N5gHqw0iUk2zRXT7+9WFA5/8Rn7fMSIsC8WhLKQjbQGRaykhIAQKHEE6MO/XLvWeo0ctmHlYIlor5zoocX6R5d2NmHGtIK7eIhglXhjymXxnGDROOd/9oktvLN+yZgHnSQGM2HDujbxkbtscO9e4eO9mAnlh5XLmqS4hIAQKAQC9OG4PCxeusSa9epWUubBKMFq3L6l9R45LPHpoUJNkEWwClHLY5omlZStC1YuX2ZzW71tC9E8FfkncZw4VebMxqMz77rBBrRtbdOmTUs00piKTdkWAkJACAQE6MPRyE+eMaNkzYO+ZUOrz3rakgJv2ZAPgoX8+GRXoUhjRU1JqwgrQijNfbQ4CHfpnNk29/nHwt5XG/aQqltyfljzG15tQ19/2caNHx8+cut7YqWBRpeFgBAQAkWPAIPy+nXrbODYUfZkh1Yls3rQtVd+xkz4r4872rTZs8KYVSgykg+CNWvWLLvrrrvCVyQWLVpUdNYVEawqdgPuf7Vw4jib/93O7ZXRDMXp2UW3XGWfP/+EDR82XH5YVawvek0ICIHiQQCSQR/OFwg+Hty/ZMmVk6xnOn1gn0+akNjZvRCSyAfBmjhxov30pz8NG5fWqVPHunTpEr4uUSyr3UWwqljTaJzr139l8wf1swX3bdhcNE6kKdu8Bq1cgzo27vH7bUjfvrZw4cKCzoKqKC69JgSEgBBIIADBYhBetGSxterTs2S2ZnBClXxGQ9dn5DBjSyHGrkKEfBIs31trjz32sKuuusq6d+8erC2UtVAaOzAWwapiTUNwwcG9x4e24I7rStL/KkHCGtSxyQ/eboN6dE84uhey0lZRZHpNCAgBIRAQoP/CxWP2vLn2RrcuJU+wGrdraV0GfBY0dqVMsCBam2yyiUG0/vjHP1qHDh2CRqtQZS4rgoXvEB+cXLp0abUPPnI7a/Ycm/b+u7aw4dUlT7C+uPsm69G6lY0ZMyaQLMqfCxwVR/XrojAUhqoDla8DaONHjB5lL3RqW/IECz+s93p8ZFOnTrXFixcXpO/GfBf9VM6hhx4axpPq1F2+b7j33nsndod3TZaf+ebhX/7yF+vYsaO5j1ZNKgfKimDNmzfPbrvtNqtbt261D+y9f6r9Z3vz2tqlt/9Vir24xt5Y266++EK74oorQoXNBYaKo/r1UBgKQ9WBqtUBTEm161xl973+cskTLEyGDZ5oFLQ6jF2FqDPnnXeebbHFFgkyxGdzfv/731crL7/97W9t2223TcTpxCr5jEaLMnft2jUoWWrKdFhWBGvChAm2zz772MYbb5yTAyHe/cujbGkKQpIwr5XIvc+vudwOrrVjqMi5wk/x5KYeCkfhqDpQtTqw46672C0vPF3yBKtxh1Z26V+vz8m4V9W6lkx6+F3VuPy9VHGmu8Y7u+++eyCZffr0Cdss5dtiXnYEK5M6MZ1gMl0vF4I1CoK18waClQkP3duowtmUMBJGqgPFUQd22KWW3VwWBOt9u+Sm68u+b9p5553twgsvDKsNv/7663zzq/Jycp85c6b96U9/srPOOqvax5lnnmm/Oe00e/HKS0rb/+o7Ddzo+n+03578azv55JPt9NNPrzZ+uZCB4qh+PRaGwrBc68AZZ5xhZ557rt352gslr8HCRFjvwXvtN7/5jTF2FULmxx13nG222WYJkodpj/GkOnn51a9+ZVtttVUizlQTFzRXmCNJ59VXXzX2zqopp/ey0mCxagTHRnyxqnvMnTvXvpg6zaY0f6NMnNxvtI9bNLPhw4fbjBkzqo1fdfHX+9Wvw8JQGJZzHaAf+2zAAHuufauSJ1g4ub/epb2NHj3a5syZU5D+mxV9O+ywQ4IMHXLIITZixIhq5aVv37621157JeJMJliQuFNPPdWaNm1qs2fPThCrmnJ0LxuClWtAiW/t+vU2v/uHtuD20t+mYcqDt9ngnj1s/vz5VhOq1bzrbpWAEBACZY0A/djkqVPtlQ/blzzBeqxdC/ugd4+wgpD9vwoRIENokpwEHXHEEYaiojph0qRJYaNRj9PPmALPP/98e/vttwOxipY511wgU/7LhmBlAqEq91Axrlu/3hYM/MwW3PvX0jYTNqhj4xvfZ0P7fhY0gHRMNVlJqyIfvSMEhIAQSIcA/Rf92PQZM+ytrp1LnmA9zj5YffuE7RmiZCMdPvm4nu+NRjEF1qpVy84555ygsapJU2A6vESw0iFTwXUI1ldffWWLxo+xBQ/dVsJbNdQJZRv1/OM2YvgwW7ZsmXZyr6Bu6LYQEALFjQAEC5cRTKSte3azx9o2L2mS9VT7Vvbp4EGJ3c0LIZ18Eax9993XtttuOzvttNPstddeCxorVwD4uRDlJU0RrCoiD8Hyjz3Pf+7R8LHnUtuaIZSnQR3jY8/D+NjzhAlhJ2BmQIWuuFUUm14TAkJACIT+i36MDZM/7vupNW7Xwhq1b1mSJItyPd+xtQ0fNcrWrFmT8EOq6WqQD4I1ffp0q1evnr311lvBtyyqnSuGMUoEqwq1DMFxIMxVy5bZvOZvlDTBmnVnfRvc9n2bNm2arV27tmANtAqi0itCQAgIgR8gQP/NJHnVypXWf9hQe6b9+yX7wWcc3Jt2aW8TJ0+29evXF6z/zgfBworEzvTIshiDCFYVpeINFMKxoHd3W3hHvZL0w+Jjz1Meut2G9O4Z1OlU6GKtzFUUpV4TAkKgDBGgD6f/Hjt+vP2rU9uSJFhorxq1bW7v9+gaNDxYXSg3R02HXBOsaBmi/9d0uTKlJ4KVCZ0K7iHU4Ic1cbzNf/TOkvTDWtiwro18oYmNGjEiOEh6A60AGt0WAkJACBQ1AvTfOLqzP2Lzrl3ssbYtSs5EiPbqyXYtrWf/vqH/5nu8hSIjuSZYRV25vsucCFY1pERFpcKuWLLY5r31si3io88l8mkc97+afef1NviDVvbF1KnBfi//q2pUGL0qBIRA0SDg/Td+WD379bUm7VqWnh9Wuxb2r45t7PMxYxL+VyJYNVcFRbCqgTUVFcKxdu2XtqD/p7bg3ptKyky4qEEdm/jYvTas72c2f8GCoK3DPFioBloNUelVISAEhMAPEKA/w/F79Jgx9mqJmQkxDzZu28Jad/s4aOkK7d4hDdYPqp8uVIQADRQ185I5s2zeC0/YolvqlIYWq0Edm3d7PRv21ms2Yfx4W7FiRdDWyf+qohqh+0JACMQBASaKHPTf7JnUrkdXe7xt85LRYmEefLZdK+s7aMD39r8q1ARZBCsOraKI8ugNFC3WmtWrbX6vbrbg7htLQ4vVoI5NbnS3DevTKzi3++oTL3MRiUFZEQJCQAhUCQH6M/pvJpCDhw21l9q3Lhln98Ztm1uzDzva5MmTE6u/C0WuEI4IVpWqaHm/RIVFq/PV11/b0jmzbd6rz27YsqFBjDVZDerY3Duut+HN3rCJEyaEzenwNaOchWyg5V3TVHohIATygQD9GhNI9lRq2+1je6Jti9iTLLRXz7drZX369bPFixYVxebQAwcOtP333z/sts6O63zomY1eSznIBysH0qWBMgv68ssvbf6gvrbggQbxXVHYoI6xcnB8k4dteP++4duDdD6UT+QqB5VFUQgBIVA0CNCncdC/LV++3IYMG2qvtG8ddnaP7caj7VrY422aW8sPOwXtFeMSY1Shw4IFC6xly5bh+4B8I7Bjx45hzCx0vvKZvghWNdH1BsqZLQyWL1lkc1u/G9t9sXBsn3H/LTa0UzubOm2arV69OvheUT4FISAEhEApIgABwQl89qxZ9tEnPeyZOGux2rWwl9u1tkGDBxd8awavK9FxEqw5/Jo/U4pnEawcSZXKQqVB27No2lSb91KToAmK07YNkKs5d15vI9/5t00YOzZ8dxAH0HJpDDmqCopGCAiBmCFA/40Wa9WqVTZu3Dhr0bmDPdGmeexMhcGxvU0L+7hnD5s1c6atW7cu0X/HTCQlkV0RrByKkUaKr9KXX66x+SOG2Pwn7ttAsuLgj8U3B2+71sa+2MQ+HzrYFixcGMiiTIM5rCCKSggIgaJFgP4bKwSfXhk4eJA1bdfaGseIZIVNRds0t1ZdOtr48eOD9UH9d2GrmwhWjvFH20MjXblypc37tKfNf+jW4nd6D35XV9uEpx+2EZ99Gj6pgN1ejTPHlUPRCQEhUNQIuBVizpw59kmf3vZim5ax8MfCX+zxNu/ZW+3b2NBhw/TVjSKpZSJYORaEmwoxra1ctszm9upq8x+6rXg/Bt2gji249Rqb2OSR/2vvzmOrrPI/jrvv+xKXaNxi3LcYd6NG4xaNcYtxiaKIK3HQQRzRAWUbQVCGRRCHrXtLW/awFQS6CgiDIBXaCt0olLYUGUCcf76/fA6e5tKgP2B66L087yept7T3Puec1+m9fu455znXls2fa9XV1W7jPYVEPzXYxkScDgEEEIhLAT9VqDeYa3/+2Wbm5dk/J6S5kNVnYnpcfpSORq76Tki10blZVlRcbFpM7jcVVXs42k+AgBXA3ocsbd3Q3NBgtXkzdn1WYZx9jI7WXG14r6OtHtjLls3Ls8qqKjesHBuueIIG+APhlAggELcCes3TUg9d4FO2psymz5plQ7J+D1m58fV5hQpXWiv2TXamLSwosNqaGre0w7855vW7ff/MCFiB/FtC1s6dtrmp0WoX5Nn6flqTpc8rbO89sl5yI2rru75qpf/8zJbnL3A7GesFReFKdfdfgXg4LQIIIBCXAv61TyFLi961Uef0mTNtaGaq9c1OiYuF7wpWvXPS7LOsFBudnWn5+flu9kGL2lnaET9/VgSsAH2hJ6gO3eqdhEaytjQ32/p/L7Ha4QNtY9dO7bpPlva5qure2VaMHWUrlyx2m73p87hiw1UAFk6JAAIIJIRA65BVXl5uefPm2qisdPtHVnL7hiyNouWk2sDMZEvJzbaS70qstrbWXTEYG678/4cSAvwgrSQBK3DH+pD123//64acN1aUW01Omq3v1dU2vvvyAQ1amhKs69rJygf0sB+m5NqaVausvr7efYwCO7UH/kPg9AggkDACPmDpDbJeG/UGtKqyygoKCy0lJ9sGZaRYn3a4wlAjV/0mpNjQjFTLnTbVli1bZhv3cMU34So+/tQIWAegH3zI0hN1+44d1lhfbzXF+VYz8gur+/Btqw8ctOq7dHBrrSp7/tVWjRtpK4sLrbKq0n3+Vuwu7f5F5QCQUAQCCCAQ1wL+9VC3Ghnavn27W0C+dOlSmzxtmo3ISLH+mcnhg5ZGrH5fyD4wI9nGZmXY7Lw8++mnn9xehSxoj98/IwJW4L7Rk9Mf/t2QnhCa299YWWmV82Zb9fABVte9c5tvTLprEfsrVtnzPSv91zBbMX+uVZSVuX1edJVM7Caivo7cIoAAAgjsLuBDll67m5qarKyszBbm51tGTrYNTU2yf2QkWe+cVBeE2vRqQ4Wr7BQbkD7evk5LsUlTp9rixYt3WzPrF7TvXmP+FQ8CBKwD1Aux74Z80NLo0ZYtzbahqtKqixZaZdoYq+nfY1fYeq+j1f+lg+3rTvB+GrD6712s4su+VpqVaqVFBba2vNxNB2ohu14k9I4s9okZGwQPEAnFIIAAAgkjoNdI/9qt0SxNzWlDz/kLFtiESRNtZHqKDUobb/3cqNZ+LoZ3i9dTre+EFPssI8kGp4630ZnpNmX6NCspKbGKigoX8FrPPCQMYsQqSsBqhw73YUtPVi0s15Pll61brb6uzmpW/Wjrvp1t69LHWNWwAVbdr7ut//tfrO5vb9qGbq/bhvdfsw1dO+360vfd3rC6v71l1T3fs3UDeljFiC9sdWaylc6fa2UrV7grSxoaG90aAgWr2LVWvh7tQECRCCCAQEIJ+DehPmjptVtrs7Tv1E+lpVZYWGhTp0+z5KxM+yot2b5IS7L+6UnWLzPJBSbtCq91W1qg3vKVvevKRK2r0ijY5+njbXBqko1KT7WMnBybNWe2G7FSsGpoaHAL2WNfwxMKMIKVJWC1Y6f7gOPfFSkAbdu+ze0CX1+/yWrXrrXKFcttbUmhVcydbRVTc608J93KM5OtPCvFynIzbM20SbZm3hxbU1xoFcv/besqfnZXBSpUaRpSl+1qKpARq3bsaIpGAIGDRiD2dVuv3Xp9VdDSR+ysW7fOVvzwgxUWFNiMGTMse+JEF7hGp6XYyJQkG5Y8zoa4r7HudnjyeBuVkmxj09MsbUKWTZw82fLmzLHvvvvOSktL3adqbN68eY/Byge+gwb2IGwIASsOOjX2CasgpC8/srVt23Z39WFzc7M1NTVaw6ZN1rCp/vevTdbY0GBNmzdb85YttvU//3ELMRXU9HgfqvQi4MtQc3lixkGnUwUEEEhoAf+aqtfX2DfJCluNjY3uje7PFRVuGlFX+y1atMiKioqsoKCg5au4uNiNUC1fvtwtWldA09SjQpVfJ9v6dVxovIYnxp8OASvO+sk/aXWrJ60PXBoWVmj6oy/9vvUT0Z+LJ2ScdTLVQQCBg0LABx3/Wutft/V6rJEtLf9QUNJsgkKTFsgrfPkv/UxvnvV7retq/ebYn9dj+fL8v7mNbwECVhz3j39y7e9tHDeNqiGAAAIHrYB/zfajW/6Ncutb/8bYvzn2j9MtR+ILELDitA95gsVpx1AtBBBA4E8E/Gt3bFja1+//5PT8KoEECFgJ1FlUFQEEEEAAAQQSQ4CAlRj9RC0RQAABBBBAIIEECFgJ1FlUFQEEEEAAAQQSQ4CAlRj9RC0RQAABBBBAIIEECFgJ1FlUFQEEEEAAAQQSQ4CAlRj9RC0RQAABBBBAIIEECFgJ1FlUFQEEEEAAAQQSQ4CAlRj9RC0RQAABBBBAIIEECFgJ1FlUFQEEEEAAgX0R0Can2ileu8b7TVD35fHxfF+1x++W7zdzjaf6ErDiqTeoCwIIIIAAAm0ksGPHDtMHSU+cONHS09MtPz/fqqurXShpoyL2+zT6XN2VK1fa/PnzbdOmTft1HoVGtW/q1KmmD8qOtwBJwNqvbuVBCCCAAAIIxK9ATU2NffTRR3bJJZfYcccd577OOOMMu+eee2zSpEnug6jbs/b6oOsXX3zRTj75ZJsyZcr/WxWFp2XLltk777xjo0aNMoVHfUD222+/bSeccIJ98803cREcYxtCwIrV4HsEEEAAAQQSXODXX3+1fv36uVB19tln20svvWTdunWzhx56yI455hi78sorbd68eS2BRNNsCizNzc32yy+/mEaX/GiQbn/77TfbuXOnm2rU77Zt29Yy5ajpRwUdfek8OvxjVA/9Xo/dunWru/XnVcB6+umn7fDDD7fc3NyWx+kcOpfqocfp/vrSeTIzM+3oo4+2xx57zJqamlxITEpKstdee82+/fbblvJ1DpW9ZcsWdx7V35erW7XBt0e3Kiu2/m3V/QSstpLkPAgggAACCLSzgAKERnquuOIKO/bYY23AgAEu3KhaFRUV9vDDD9uRRx7pRn4ULvw02+eff+6CyrvvvmvJyclWX1/vQonuk5OTYyNGjLDi4mJToOnRo4dlZGRYXV2dzZ071z755BPr06ePff/99+58esyECRPcYwoLC92I0/vvv29ff/21m8pTANpTwFLIW7hwofXq1cu6dOliw4YNsx9//NEFp9WrV9tbb73l6n799de7Om7cuNFmzpzp2rh06dKWILZq1SobPHiwvfnmm27Ea/To0VZbW+t+r/bOmjXLhgwZYkVFRabfqay+ffu21L+tupCA1VaSnAcBBBBAAIF2FlDAGjNmjBu9uuGGG6y8vLylRgo2K1ascGuySkpKXBjKy8uzG2+80U2zaTrx3HPPddN2r776qgtQGtV64IEH3PluuukmN+V4/PHH26mnnmpPPPGEKexolOyoo46yO+64w62r0mjV/fff785z3XXX2Xnnnefur9Gzxx9/3NauXbvHgKXwduGFF7r7XnDBBXbiiSfabbfdZgppkydPNv3s0EMPdXV95JFH3PorhSiVPXLkSBeg1C7VQ9OGF198sZ1//vnuPM8++6xVVla6kauOHTvaEUcc4dp92WWXuTbrHLfffrurfwvY//gNAet/BOThCCCAAAIIxIuAApZGlBRENCWosKNDP299aBRJwUNh4/XXX7eCggI38qRQpICioKaAdd9999khhxxizzzzjE2bNs06dOjgpvYUrL788ktLTU01BRUFKH2vKcR7773XPebOO+90o00KQP4+GpnSwnY/RahF+ApdClOnnHKKGw2bM2eOKQhpClF1UzjSKJumCLWObNGiRdbY2GidOnVybf3qq6/cVN8bb7zh2vP888/bggUL3AL4W2+91dVt0KBBbipU9Vd7brnlFjc9qdG4Sy+91I34qf5tdRCw2kqS8yCAAAIIINDOAhql+vDDD12A0CiPwk7soaCl++hL029+lEfTfzq0PklTgAogCiuahlPA0nTj2LFj3X0USBTAFFw09aY1TE899ZR7jKYBtZ5JAUuBS8HHn1ejTdFHfgwAAAhBSURBVDrvK6+84kbHfMDSonuNpGkRvkacFNp01aPWjekc1157rRuJ01Sl6vHkk0+64KhyYgOWQtrVV19tZ555pps6VLlqp6ZJFSI14qa1Wy+//LILZT179nSjeKq/RtYU5hT+2uogYLWVJOdBAAEEEECgnQUUoIYOHepGejTlpasJ/aHfaWsETbctXrzYrak6/fTTXSgpLS11d9Nico02KWxoxEojRwpYClQKVjoUiHT1n0aSGhoa3GJyjYQpPOmxPmCddtpprixfvtY5HXbYYS6MKZjFBiyt2TrppJNcOVqEf80117h66XuVX1ZW5taCKWApKPmF6T5gaY2YtmzQNOJFF13kRrhUrgKW1pTpcTqPPBSwFLj0GLVX53ruuecIWL6juEUAAQQQQACB3QUUorS3lEaCtE5q3Lhx7oo6/Vx7YCmcKBxpIbnClqbtzjnnHHdVoc6kcKQF6Zpi7Ny5s1vsvrcBS4+JDVgKNQoxKlvn1ciVQpjOu2HDht0ClhbLa+TpqquucgvntVBd66k0fajRLU11Zmdnu6Ck0SZdIdh6BEthUOvOFOz8lYm6glBTpj4waspTU4QKWBptU8DSuQlYu/8d8S8EEEAAAQQQaCWg8KEr7hQiLr/8chemtHeURm40EqVRIa230v20mF1XFWq0SiNbCh1aj6SRLY1Y6T57G7AUnmKnCPVvrcHS1KJG1TQdqTVWKSkpbv2UH8FSiKqqqrK77rrLhcJPP/3UTfF1797dLaL/4IMPXD1UPy2wv/nmm01rtHSlox/B0lSkpkO7du3q2vPoo4+6kDV+/Hg3GqbRMYU/bd9AwGr1B8M/EUAAAQQQQGDvBDQCpFEZXYmnaTldJadRHAUuBR6N7OjQKNHdd9/t1jppBElBRCNfCipaRK4RH10RqJ/7KUIFHd1H66z8FKEWlev8CnJ+ilAjWBqR0giZgp02PNXCda3r0gJ7hTqFOwUsHRqh0vYSuu9ZZ53lRqt0heOMGTPcKJimABUO1RbVWdtR+EXtfqRMV0lqKwqVrTVdCnQy0EJ5jZppCwmNpGmxvOrqR7BeeOEFV5fhw4e7urTFf1iD1RaKnAMBBBBAAIE4ENB0nA7daoRHHyMzcOBA+/jjj90IjvaL8uFK91PA0P5Y2iJB04a6rwKNApAO3VdX42nKTaNMOrR+SqFIm3sqsGhhvBbJax2VzqUpN4UvhTCNGqkOWriuAKWQo7rpvNqHSo/x68T0M4UmhaXevXu7Paq0EF97V+nQ6JO2bNDeVQp7uhJxyZIl7hzajkLn1ZorTRWmpaW5qxH79+/vdopXENTv1V5dgai6qK76meqvoKm6aK1XWx0ErLaS5DwIIIAAAgjEiYCCgz8UUBROFD7+7FDA8WFG94s9x589rvXvNK3oA5YWxOtQsPHHns4b+zPVU3WJ/Vns9zpP63/7c8fe6hwKT/7Ym8fovnt7P3/eP7olYP2RDD9HAAEEEEAAgX0W0FV5Dz74oNuAVNOJUT0IWFHtedqNAAIIIIBAAAGNHE2fPt20wFx7U0X1IGBFtedpNwIIIIAAAgEENMWmaT5NC7bVdFuAagY/JQErODEFIIAAAggggEDUBAhYUetx2osAAggggAACwQUIWMGJKQABBBBAAAEEoiZAwIpaj9NeBBBAAAEEEAguQMAKTkwBCCCAAAIIIBA1AQJW1Hqc9iKAAAIIIIBAcAECVnBiCkAAAQQQQACBqAkQsKLW47QXAQQQQAABBIILELCCE1MAAggggAACCERNgIAVtR6nvQgggAACCCAQXICAFZyYAhBAAAEEEEAgagIErKj1OO1FAAEEEEAAgeACBKzgxBSAAAIIIIAAAlETIGBFrcdpLwIIIIAAAggEFyBgBSemAAQQQAABBBCImgABK2o9TnsRQAABBBBAILgAASs4MQUggAACCCCAQNQECFhR63HaiwACCCCAAALBBQhYwYkpAAEEEEAAAQSiJkDAilqP014EEEAAAQQQCC5AwApOTAEIIIAAAgggEDUBAlbUepz2IoAAAggggEBwAQJWcGIKQAABBBBAAIGoCRCwotbjtBcBBBBAAAEEggsQsIITUwACCCCAAAIIRE2AgBW1Hqe9CCCAAAIIIBBcgIAVnJgCEEAAAQQQQCBqAgSsqPU47UUAAQQQQACB4AIErODEFIAAAggggAACURMgYEWtx2kvAggggAACCAQXIGAFJ6YABBBAAAEEEIiaAAEraj1OexFAAAEEEEAguAABKzgxBSCAAAIIIIBA1AQIWFHrcdqLAAIIIIAAAsEFCFjBiSkAAQQQQAABBKImQMCKWo/TXgQQQAABBBAILkDACk5MAQgggAACCCAQNQECVtR6nPYigAACCCCAQHABAlZwYgpAAAEEEEAAgagJELCi1uO0FwEEEEAAAQSCCxCwghNTAAIIIIAAAghETYCAFbUep70IIIAAAgggEFyAgBWcmAIQQAABBBBAIGoCBKyo9TjtRQABBBBAAIHgAgSs4MQUgAACCCCAAAJREyBgRa3HaS8CCCCAAAIIBBcgYAUnpgAEEEAAAQQQiJoAAStqPU57EUAAAQQQQCC4AAErODEFIIAAAggggEDUBAhYUetx2osAAggggAACwQUIWMGJKQABBBBAAAEEoiZAwIpaj9NeBBBAAAEEEAguQMAKTkwBCCCAAAIIIBA1AQJW1Hqc9iKAAAIIIIBAcAECVnBiCkAAAQQQQACBqAkQsKLW47QXAQQQQAABBIILELCCE1MAAggggAACCERNgIAVtR6nvQgggAACCCAQXICAFZyYAhBAAAEEEEAgagL/B4DjyqdoV+WEAAAAAElFTkSuQmCC) # + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" executionInfo={"elapsed": 929, "status": "ok", "timestamp": 1597591847767, "user": {"displayName": "Tulio Philipe", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiC2kqFihAn3Ile03oz-6rO8qVjEHv1DhGQ0ngQ5g=s64", "userId": "06907869093485551957"}, "user_tz": 180} id="4U5WZnM8DBdF" outputId="0e55fd4f-898f-4919-dc6d-0ff6699b2461" from rx import of, operators as op of("Alpha", "Beta", "Gamma", "Delta", "Epsilon").pipe( op.map(lambda s: len(s)), op.filter(lambda i: i >= 5) ).subscribe(lambda value: print("Recebido {0}".format(value))) # + [markdown] colab_type="text" id="lpAE5X8EDaaX" # **Operadores em cadeia** # + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" executionInfo={"elapsed": 841, "status": "ok", "timestamp": 1597242797928, "user": {"displayName": "Tulio Philipe", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiC2kqFihAn3Ile03oz-6rO8qVjEHv1DhGQ0ngQ5g=s64", "userId": "06907869093485551957"}, "user_tz": 180} id="I5bdLDMbEu4q" outputId="61853647-3a39-447d-be9c-60ba5e40da36" from rx import of, operators as op #define o observable source = of("Alpha", "Beta", "Gamma", "Delta", "Epsilon") #definindo um pipeline de operações composed = source.pipe( op.map(lambda s: len(s)), op.filter(lambda i: i >= 5) ) composed.subscribe(lambda value: print("Recebido {0}".format(value))) # + [markdown] colab_type="text" id="f3coheZhDp-L" # **Construindo os próprios operadores** # + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" executionInfo={"elapsed": 494, "status": "ok", "timestamp": 1597592182135, "user": {"displayName": "Tulio Philipe", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiC2kqFihAn3Ile03oz-6rO8qVjEHv1DhGQ0ngQ5g=s64", "userId": "06907869093485551957"}, "user_tz": 180} id="5C0A1QxmDovA" outputId="d3817ebc-e472-4aa3-c8bb-f43e183ca3b4" import rx from rx import operators as ops def length_more_than_5(): return rx.pipe( ops.map(lambda s: len(s)), ops.filter(lambda i: i >= 5), ) rx.of("Alpha", "Beta", "Gamma", "Delta", "Epsilon").pipe( length_more_than_5() ).subscribe(lambda value: print("Recebido {0}".format(value))) # + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" executionInfo={"elapsed": 559, "status": "ok", "timestamp": 1597592345389, "user": {"displayName": "Tulio Philipe", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiC2kqFihAn3Ile03oz-6rO8qVjEHv1DhGQ0ngQ5g=s64", "userId": "06907869093485551957"}, "user_tz": 180} id="dzllkCGPD07v" outputId="e21cf84a-cfbd-49b4-909c-a777c744b5e7" import rx #criando uma sequencia de operações def lowercase(): def _lowercase(source): def subscribe(observer, scheduler = None): def on_next(value): observer.on_next(value.lower()) return source.subscribe( on_next, observer.on_error, observer.on_completed, scheduler) return rx.create(subscribe) return _lowercase rx.of("Alpha", "Beta", "Gamma", "Delta", "Epsilon").pipe( lowercase() ).subscribe(lambda value: print("Recebido {0}".format(value))) # + [markdown] colab_type="text" id="hTjwYT7BHA2t" # **Operadores - continuação** # + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" executionInfo={"elapsed": 883, "status": "ok", "timestamp": 1597277059168, "user": {"displayName": "Tulio Philipe", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiC2kqFihAn3Ile03oz-6rO8qVjEHv1DhGQ0ngQ5g=s64", "userId": "06907869093485551957"}, "user_tz": 180} id="_f3AOXlOIaxu" outputId="b83aa11e-f1b1-4e86-ac8a-f2a66aa3fff2" import rx from rx import operators as ops import operator a = rx.of(1, 2, 3, 4) b = rx.of(2, 2, 4, 4) a.pipe( ops.zip(b), #retorna uma tupla contendo os valores ops.map(lambda z: operator.mul(z[0], z[1])) #aplica a operação de multiplicação entre cada um dos elementos das duas listas ).subscribe(print) # + colab={"base_uri": "https://localhost:8080/", "height": 170} colab_type="code" executionInfo={"elapsed": 925, "status": "ok", "timestamp": 1597277031877, "user": {"displayName": "Tulio Philipe", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiC2kqFihAn3Ile03oz-6rO8qVjEHv1DhGQ0ngQ5g=s64", "userId": "06907869093485551957"}, "user_tz": 180} id="KOdTFGcsIUNA" outputId="a5cf1ec0-665d-45c3-a716-a7c9159d858d" import rx from rx import operators as ops obs1 = rx.from_([1, 2, 3, 4]) obs2 = rx.from_([5, 6, 7, 8]) obs_list = [obs1, obs2] res = rx.merge(*obs_list) res.subscribe(print) # + colab={"base_uri": "https://localhost:8080/", "height": 170} colab_type="code" executionInfo={"elapsed": 482, "status": "ok", "timestamp": 1597276993561, "user": {"displayName": "Tulio Philipe", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiC2kqFihAn3Ile03oz-6rO8qVjEHv1DhGQ0ngQ5g=s64", "userId": "06907869093485551957"}, "user_tz": 180} id="BjMduBVlHeXW" outputId="757d18d8-1a57-40a9-8d4f-2717bfec9e0e" import rx, operator as op obs1 = rx.from_([1, 2, 3, 4]) obs2 = rx.from_([5, 6, 7, 8]) res = rx.merge(obs1, obs2) #aplica a operação de merge sobre os dados res.subscribe(print) # + [markdown] colab_type="text" id="OFPmVO5cElln" # **Concorrência** # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 90086, "status": "ok", "timestamp": 1597594483080, "user": {"displayName": "Tulio Philipe", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiC2kqFihAn3Ile03oz-6rO8qVjEHv1DhGQ0ngQ5g=s64", "userId": "06907869093485551957"}, "user_tz": 180} id="0PezA80jEoRs" outputId="bae22929-511e-46c5-e3f1-52f2240687fc" import multiprocessing import random import time from threading import current_thread import rx from rx.scheduler import ThreadPoolScheduler from rx import operators as ops def intense_calculation(value): # função sleep para "dormir" e simular uma tarefa de longa duração time.sleep(random.randint(5, 20) * 0.1) return value # calcula a quantidade de CPUs, e cria ThreadPoolScheduler com o número de threads optimal_thread_count = multiprocessing.cpu_count() pool_scheduler = ThreadPoolScheduler(optimal_thread_count) # Cria o Processo 1 rx.of("Alpha", "Beta", "Gamma", "Delta", "Epsilon").pipe( ops.map(lambda s: intense_calculation(s)), ops.subscribe_on(pool_scheduler) ).subscribe( on_next=lambda s: print("PROCESSO 1: {0} {1}".format(current_thread().name, s)), on_error=lambda e: print(e), on_completed=lambda: print("PROCESSO 1 finalizado!"), ) # Cria o Processo 2 rx.range(1, 10).pipe( ops.map(lambda s: intense_calculation(s)), ops.subscribe_on(pool_scheduler) ).subscribe( on_next=lambda i: print("PROCESSO 2: {0} {1}".format(current_thread().name, i)), on_error=lambda e: print(e), on_completed=lambda: print("PROCESSO 2 finalizado!"), ) # Cria o Processo 3, com o loop infinito rx.interval(1).pipe( ops.map(lambda i: i * 100), ops.observe_on(pool_scheduler), ops.map(lambda s: intense_calculation(s)), ).subscribe( on_next=lambda i: print("PROCESSO 3: {0} {1}".format(current_thread().name, i)), on_error=lambda e: print(e), ) input("Pressione alguma tecla para sair\n")
227,395