code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: latest
# language: python
# name: latest
# ---
# # Python
# [](https://colab.research.google.com/github/ritchieng/deep-learning-wizard/blob/master/docs/programming/python/python.ipynb)
# ## Lists
# ### Creating List: Manual Fill
lst = [0, 1, 2 ,3]
print(lst)
# ### Creating List: List Comprehension
lst = [i for i in range(4)]
print(lst)
# ### Joining List with Blanks
# +
# To use .join(), your list needs to be of type string
lst_to_string = list(map(str, lst))
# Join the list of strings
lst_join = ' '.join(lst_to_string)
print(lst_join)
# -
# ### Joining List with Comma
# Join the list of strings
lst_join = ', '.join(lst_to_string)
print(lst_join)
# ### Checking Lists Equal: Method 1
# Returns `True` if equal, and `False` if unequal
# +
lst_unequal = [1, 1, 2, 3, 4, 4]
lst_equal = [0, 0, 0, 0, 0, 0]
print('-'*50)
print('Unequal List')
print('-'*50)
print(lst_unequal[1:])
print(lst_unequal[:-1])
bool_equal = lst_unequal[1:] == lst_unequal[:-1]
print(bool_equal)
print('-'*50)
print('Equal List')
print('-'*50)
print(lst_equal[1:])
print(lst_equal[:-1])
bool_equal = lst_equal[1:] == lst_equal[:-1]
print(bool_equal)
# -
# ### Checking Lists Equal: Method 2
# Returns `True` if equal, and `False` if unequal. Here, `all` essentially checks that there is no `False` in the list.
# +
print('-'*50)
print('Unequal List')
print('-'*50)
lst_check = [i == lst_unequal[0] for i in lst_unequal]
bool_equal = all(lst_check)
print(bool_equal)
print('-'*50)
print('Equal List')
print('-'*50)
lst_check = [i == lst_equal[0] for i in lst_equal]
bool_equal = all(lst_check)
print(bool_equal)
# -
# ## Sets
# ### Removing Duplicate from List
# Sets can be very useful for quickly removing duplicates from a list, essentially finding unique values
# +
lst_one = [1, 2, 3, 5]
lst_two = [1, 1, 2, 4]
lst_both = lst_one + lst_two
lst_no_duplicate = list(set(lst_both))
print(f'Original Combined List {lst_both}')
print(f'No Duplicated Combined List {lst_no_duplicate}')
# -
# ## Lambda, map, filter, reduce, partial
# ### Lambda
# The syntax is simple `lambda your_variables: your_operation`
# #### Add Function
add = lambda x, y: x + y
add(2, 3)
# #### Multiply Function
multiply = lambda x, y: x * y
multiply(2, 3)
# ### Map
# #### Create List
lst = [i for i in range(11)]
print(lst)
# #### Map Square Function to List
# +
square_element = map(lambda x: x**2, lst)
# This gives you a map object
print(square_element)
# You need to explicitly return a list
print(list(square_element))
# -
# #### Create Multiple List
lst_1 = [1, 2, 3, 4]
lst_2 = [2, 4, 6, 8]
lst_3 = [3, 6, 9, 12]
# #### Map Add Function to Multiple Lists
add_elements = map(lambda x, y, z : x + y + z, lst_1, lst_2, lst_3)
print(list(add_elements))
# ### Filter
# #### Create List
lst = [i for i in range(10)]
print(lst)
# #### Filter multiples of 3
multiples_of_three = filter(lambda x: x % 3 == 0, lst)
print(list(multiples_of_three))
# ### Reduce
# The syntax is `reduce(function, sequence)`. The function is applied to the elements in the list in a sequential manner. Meaning if `lst = [1, 2, 3, 4]` and you have a sum function, you would arrive with `((1+2) + 3) + 4`.
from functools import reduce
sum_all = reduce(lambda x, y: x + y, lst)
# Here we've 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9
print(sum_all)
print(1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9)
# ### Partial
# Allows us to predefine and freeze a function's argument. Combined with lambda, it allows us to have more flexibility beyond lambda's restriction of a single line.
# +
from functools import partial
def display_sum_three(a, b, c):
sum_all = a + b + c
print(f'Sum is {sum_all}')
fixed_args_func = partial(display_sum_three, b=3, c=4)
# Given fixed arguments b=3 and c=4
# We add the new variable against the fixed arguments
var_int = 1
fixed_args_func(var_int)
# More advanced mapping with partial
# Add a variable from 0 to 9 to the constants
print('-'*50)
_ = list(map(fixed_args_func, list(range(10))))
# How about using with lambda to modifying constants without
# declaring your function again?
print('-'*50)
_ = list(map(lambda x: fixed_args_func(x, b=2), list(range(10))))
# -
# ## Generators
# - Why: `generators` are typically more memory-efficient than using simple `for loops`
# - Imagine wanting to sum digits 0 to 1 trillion, using a list containing those numbers and summing them would be very RAM memory-inefficient.
# - Using a generator would allow you to sum one digit sequentially, staggering the RAM memory usage in steps.
#
# - What: `generator` basically a function that returns an iterable object where we can iterate one bye one
# - Types: generator functions and generator expressions
# - Dependencies: we need to install a memory profiler, so install via `pip install memory_profiler`
# ### Simple custom generator function example: sum 1 to 1,000,000
# - What: let's create a simple generator, allowing us to iterate through the digits 1 to 1,000,000 (inclusive) one by one with an increment of 1 at each step and summing them
# - How: 2 step process with a `while` and a `yield`
# +
# Load memory profiler
# %load_ext memory_profiler
# Here we take a step from 1
def create_numbers(end_number):
current_number = 1
# Step 1: while
while current_number <= end_number:
# Step 2: yield
yield current_number
# Add to current number
current_number += 1
# Here we sum the digits 1 to 100 (inclusive) and time it
# %memit total = sum(create_numbers(1e6))
print(total)
# -
# #### Without generator function: sum with list
# - Say we don't use a generator, and have a list of digits 0 to 1,000,000 (inclusive) in memory then sum them.
# - Notice how this is double the memory than using a generator!
# %memit total = sum(list(range(int(1e6) + 1)))
print(total)
# #### Without generator function: sum with for loop
# - Say we don't use a generator and don't put all our numbers into a list
# - Notiice how this is much better than summing a list but still worst than a generator in terms of memory?
# +
def sum_with_loop(end_number):
total = 0
for i in range(end_number + 1):
i += 1
total += i
return total
# %memit total = sum_with_loop(int(1e6))
print(total)
# -
# ### Generator expression
# - Like list/dictionary expressions, we can have generator expressions too
# - We can quickly create generators this way, allowing us to make computations on the fly rather than pre-compute on a whole list/array of numbers
# - This is more memory efficient
#
# +
# Define the list
list_of_numbers = list(range(10))
# Find square root using the list comprehension
list_of_results = [number ** 2 for number in list_of_numbers]
print(list_of_results)
# Use generator expression to calculate the square root
generator_of_results = (number ** 2 for number in list_of_numbers)
print(generator_of_results)
for idx in range(10):
print(next(generator_of_results))
# -
# ## Decorators
# - This allows us to to modify our original function or even entirely replace it without changing the function's code.
# - It sounds mind-boggling, but a simple case I would like to illustrate here is using decorators for consistent logging (formatted print statements).
# - For us to understand decorators, we'll first need to understand:
# - `first class objects`
# - `*args`
# - `*kwargs`
# ### First Class Objects
# +
def outer():
def inner():
print('Inside inner() function.')
# This returns a function.
return inner
# Here, we are assigning `outer()` function to the object `call_outer`.
call_outer = outer()
# Then we call `call_outer()`
call_outer()
# -
# ### *args
# - This is used to indicate that positional arguments should be stored in the variable args
# - `*` is for iterables and positional parameters
# +
# Define dummy function
def dummy_func(*args):
print(args)
# * allows us to extract positional variables from an iterable when we are calling a function
dummy_func(*range(10))
# If we do not use *, this would happen
dummy_func(range(10))
# See how we can have varying arguments?
dummy_func(*range(2))
# -
# ### **kwargs
# - `**` is for dictionaries & key/value pairs
# +
# New dummy function
def dummy_func_new(**kwargs):
print(kwargs)
# Call function with no arguments
dummy_func_new()
# Call function with 2 arguments
dummy_func_new(a=0, b=1)
# Again, there's no limit to the number of arguments.
dummy_func_new(a=0, b=1, c=2)
# Or we can just pass the whole dictionary object if we want
new_dict = {'a': 0, 'b': 1, 'c': 2, 'd': 3}
dummy_func_new(**new_dict)
# -
# ### Decorators as Logger and Debugging
# - A simple way to remember the power of decorators is that the decorator (the nested function illustrated below) can
# - (1) access the passed arguments of the decorated function and
# - (2) access the decorated function
# - Therefore this allows us to modify the decorated function without changing the decorated function
# +
# Create a nested function that will be our decorator
def function_inspector(func):
def inner(*args, **kwargs):
result = func(*args, **kwargs)
print(f'Function args: {args}')
print(f'Function kwargs: {kwargs}')
print(f'Function return result: {result}')
return result
return inner
# Decorate our multiply function with our logger for easy logging
# Of arguments pass to the function and results returned
@function_inspector
def multiply_func(num_one, num_two):
return num_one * num_two
multiply_result = multiply_func(num_one=1, num_two=2)
# -
# ## Dates
# ### Get Current Date
import datetime
now = datetime.datetime.now()
print(now)
# ### Get Clean String Current Date
# YYYY-MM-DD
now.date().strftime('20%y-%m-%d')
# ### Count Business Days
# Number of business days in a month from Jan 2019 to Feb 2019
import numpy as np
days = np.busday_count('2019-01', '2019-02')
print(days)
# ## Progress Bars
# ### TQDM
# Simple progress bar via `pip install tqdm`
from tqdm import tqdm
import time
for i in tqdm(range(100)):
time.sleep(0.1)
pass
# ## Check Paths
# ### Check Path Exists
# - Check if directory exists
# +
import os
directory='new_dir'
print(os.path.exists(directory))
# Magic function to list all folders
# !ls -d */
# -
# ### Check Path Exists Otherwise Create Folder
# - Check if directory exists, otherwise make folder
# +
if not os.path.exists(directory):
os.makedirs(directory)
# Magic function to list all folders
# !ls -d */
# Remove directory
# !rmdir new_dir
# -
# ## Exception Handling
# ### Try, Except, Finally: Error
# - This is very handy and often exploited to patch up (save) poorly written code
# - You can use general exceptions or specific ones like `ValueError`, `KeyboardInterrupt` and `MemoryError` to name a few
# +
value_one = 'a'
value_two = 2
# Try the following line of code
try:
final_sum = value_one / value_two
print('Code passed!')
# If the code above fails, code nested under except will be executed
except:
print('Code failed!')
# This will run no matter whether the nested code in try or except is executed
finally:
print('Ran code block regardless of error or not.')
# -
# ### Try, Except, Finally: No Error
# - There won't be errors because you can divide 4 with 2
# +
value_one = 4
value_two = 2
# Try the following line of code
try:
final_sum = value_one / value_two
print('Code passed!')
# If the code above fails, code nested under except will be executed
except:
print('Code failed!')
# This will run no matter whether the nested code in try or except is executed
finally:
print('Ran code block regardless of error or not.')
# -
# ### Assertion
# - This comes in handy when you want to enforce strict requirmenets of a certain value, shape, value type, or others
for i in range(10):
assert i <= 5, 'Value is more than 5, rejected'
print(f'Passed assertion for value {i}')
# ## Asynchronous
#
# ### Concurrency, Parallelism, Asynchronous
# - Concurrency (single CPU core): multiple threads on a single core running in **sequence**, only 1 thread is making progress at any point
# - Think of 1 human, packing a box then wrapping the box
# - Parallelism (mutliple GPU cores): multiple threads on multiple cores running in **parallel**, multiple threads can be making progress
# - Think of 2 humans, one packing a box, another wrapping the box
# - Asynchronous: concurrency but with a more dynamic system that moves amongst threads more efficiently rather than waiting for a task to finish then moving to the next task
# - Python's `asyncio` allows us to code asynchronously
# - Benefits:
# - Scales better if you need to wait on a lot of processes
# - Less memory (easier in this sense) to wait on thousands of co-routines than running on thousands of threads
# - Good for IO bound uses like reading/saving from databases while subsequently running other computation
# - Easier management than multi-thread processing like in parallel programming
# - In the sense that everything operates sequentially in the same memory space
# ### Asynchronous Key Components
# - The three main parts are (1) coroutines and subroutines, (2) event loops, and (3) future.
# - Co-routine and subroutines
# - Subroutine: the usual function
# - Coroutine: this allows us to maintain states with memory of where things stopped so we can swap amongst subroutines
# - `async` declares a function as a coroutine
# - `await` to call a coroutine
# - Event loops
# - Future
# ### Synchronous 2 Function Calls
# +
import timeit
def add_numbers(num_1, num_2):
print('Adding')
time.sleep(1)
return num_1 + num_2
def display_sum(num_1, num_2):
total_sum = add_numbers(num_1, num_2)
print(f'Total sum {total_sum}')
def main():
display_sum(2, 2)
display_sum(2, 2)
start = timeit.default_timer()
main()
end = timeit.default_timer()
total_time = end - start
print(f'Total time {total_time:.2f}s')
# -
# ### Parallel 2 Function Calls
# +
from multiprocessing import Pool
from functools import partial
start = timeit.default_timer()
pool = Pool()
result = pool.map(partial(display_sum, num_2=2), [2, 2])
end = timeit.default_timer()
total_time = end - start
print(f'Total time {total_time:.2f}s')
# -
# ### Asynchronous 2 Function Calls
# For this use case, it'll take half the time compared to a synchronous application and slightly faster than parallel application (although not always true for parallel except in this case)
# +
import asyncio
import timeit
import time
async def add_numbers(num_1, num_2):
print('Adding')
await asyncio.sleep(1)
return num_1 + num_2
async def display_sum(num_1, num_2):
total_sum = await add_numbers(num_1, num_2)
print(f'Total sum {total_sum}')
async def main():
# .gather allows us to group subroutines
await asyncio.gather(display_sum(2, 2),
display_sum(2, 2))
start = timeit.default_timer()
# For .ipynb, event loop already done
await main()
# For .py
# asyncio.run(main())
end = timeit.default_timer()
total_time = end - start
print(f'Total time {total_time:.4f}s')
| docs/programming/python/python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import sys
sys.path.append('../')
import ghiaseddin
# %matplotlib inline
# # Baseline
# +
dataset = ghiaseddin.LFW10(root=ghiaseddin.settings.lfw10_root, attribute_index=0)
# dataset = ghiaseddin.PubFig(root=ghiaseddin.settings.pubfig_root, attribute_index=1)
# dataset = ghiaseddin.Zappos50K1(root=ghiaseddin.settings.zappos_root, attribute_index=0)
extractor = ghiaseddin.GoogLeNet()
# extractor = ghiaseddin.VGG16()
model = ghiaseddin.Ghiaseddin(dataset=dataset, extractor=extractor)
model.load('/home/yasser/ghiaseddin/models/baseline|e:GoogLeNet-d:LFW10-0-bs:16-elr:0.000000-rlr:0.000100-opt:rmsprop-rnl:linear-wd:0.000010-rs:0-iter:1240.npz')
# -
fig = model.generate_embedding()
# # Fine-tuned
# +
dataset = ghiaseddin.LFW10(root=ghiaseddin.settings.lfw10_root, attribute_index=0)
extractor = ghiaseddin.GoogLeNet()
model = ghiaseddin.Ghiaseddin(dataset=dataset, extractor=extractor)
model.load('/home/yasser/ghiaseddin/models/e:GoogLeNet-d:LFW10-0-bs:16-elr:0.000010-rlr:0.000100-opt:rmsprop-rnl:linear-wd:0.000010-rs:0-iter:1240.npz')
# -
fig = model.generate_embedding()
| notebooks/embedding playground.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="6rSgMcG8-wvY" colab_type="text"
# # Project objective
# This project is designed to review random forest method and its python implementation using UCI hand-written digit dataset.
#
# Information about the dataset, some technical details about the used machine learning method(s) and mathematical details of the quantifications approaches are provided in the code.
# + [markdown] id="VjtJFxdsNh05" colab_type="text"
# # Packages we work with in this notebook
# We are going to use the following libraries and packages:
#
# * **numpy**: NumPy is the fundamental package for scientific computing with Python. (http://www.numpy.org/)
# * **sklearn**: Scikit-learn is a machine learning library for Python programming language. (https://scikit-learn.org/stable/)
#
# + id="57oB2idEgr-g" colab_type="code" colab={}
import numpy as np
import sklearn as sk
# + [markdown] id="Bb1Zm7ARN5D5" colab_type="text"
# # Introduction to the dataset
#
# **Name**: UCI ML digit image data
#
# **Summary**: Images of hand-written digits in UCI ML repository
#
# **number of features**: 8*8(64) pixels (features)
#
# **Number of data points (instances)**: 1797
#
# **dataset accessibility**: Dataset is available as part of sklearn package.
#
#
#
# + [markdown] id="QjBnejgpP0Gr" colab_type="text"
# ## Loading the dataset and separating features and labels
# The dataset is available as part of sklearn package. Hence, we do not need to import the data directly from UCI ML repository.
# + id="RILQWrhjQUtF" colab_type="code" outputId="91f62b01-44f9-44f7-adad-f30c1c83b5af" colab={"base_uri": "https://localhost:8080/", "height": 52}
from sklearn import datasets
# Loading digit images
digits = datasets.load_digits()
# separating feature arrays of pixel values (X) and labels (y)
input_features = digits.data
output_var = digits.target
# printing number of features (pixels) and data points
n_samples, n_features = input_features.shape
print("number of samples (data points):", n_samples)
print("number of features:", n_features)
# + [markdown] id="B8CRknwb3Izp" colab_type="text"
# ## Splitting data to training and testing sets
#
# We need to split the data to train and test, if we do not have a separate dataset for validation and/or testing, to make sure about generalizability of the model we train.
#
# **test_size**: Traditionally, 30%-40% of the dataset cna be used for test set. If you split the data to train, validation and test, you can use 60%, 20% and 20% of teh dataset, respectively.
#
# **Note.**: We need the validation and test sets to be big enough for checking generalizability of our model. At the same time we would like to have as much data as possible in the training set to train a better model.
#
# **random_state** as the name suggests, is used for initializing the internal random number generator, which will decide the splitting of data into train and test indices in your case.
#
# + id="p-MQLTHE3JUN" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(input_features, output_var, test_size=0.30, random_state=5)
# + [markdown] id="lIebwbGB3Pss" colab_type="text"
# ## Building the supervised learning model
# We want to build a multi-class classification model as the output variable is categorical with 10 classes. Here we build a random forest model.
#
# ### Decision tree
# A decision tree is built starting from the best feature splitting the data points to 2 purest possible groups. Then each group is splitted again by next best features for purification of groups. Although this process can be continued till getting to 100% purity (having only one class) in each group, it would probably lower than generalizability of the model. Hence, we usually cut the tree before getting to 100% purity.
#
# ### Random forest
# Decision trees usually have high variance, meaning their prediction performance varies largely between datasets. To overcome this issue we can rely on concept of ensemble learning. In ensemble learning we want to use wisdom of crowd instead of single classifier. For example, random forest as an ensemble model uses multiple decision trees to predict class of each data point. Here is the process of bulding a random forest model:
#
# 1) Randomly sampling data points with replacement (bootstrapping)
#
# 2) Randomly selecting the features
#
# 3) Build a decision tree using the randomly selected data points and features in steps 1 and 2.
#
# 4) Building multiple decision trees as decsribed in steps 1 to 3
#
# 5) Using majority vote of all the decision trees as the identified class for a given data point
#
# Note. We don't need to write code for these steps but they will be done automatically when using random forest in python. But we need to know how it works.
#
#
#
# + id="r7-0Y0a93c0R" colab_type="code" outputId="58defadb-861c-4af5-ae52-5ff88ba9fb86" colab={"base_uri": "https://localhost:8080/", "height": 158}
from sklearn.ensemble import RandomForestClassifier
# Create logistic regression object
rf = RandomForestClassifier()
# Train the model using the training sets
rf.fit(X_train, y_train)
# + [markdown] id="Kw5xChiS5mWv" colab_type="text"
# ## Prediction of test (or validation) set
# We now have to use the trained model to predict y_test.
# + id="1ALm8Q6k5mw3" colab_type="code" colab={}
# Make predictions using the testing set
y_pred = rf.predict(X_test)
# + [markdown] id="bjMC2z0Y5zHk" colab_type="text"
# ## Evaluating performance of the model
# We need to assess performance of the model using the predictions of the test set. We use F1 score for this model. Here is the definition of F1 score:
#
#
# * **F1 score** is the harmonic mean of precision and recall as follows
#
# $${\displaystyle {\text{F1}}={\frac {2}{\frac {1}{precision}+ \frac {1}{recall}}}\,} $$
#
# where
#
# * **precision** is the fraction of true positives out of all the positive predictions
#
# $${\displaystyle {\text{precision}}={\frac {tp}{tp+fp}}\,} $$
#
# * **recall** is also referred to as the true positive rate or sensitivity
#
#
#
#
# $${\displaystyle {\text{recall}}={\frac {tp}{tp+fn}}\,} $$
#
#
#
#
# + id="AVaDA0LM8PbJ" colab_type="code" outputId="ed9430ed-b2ee-4903-cde3-9071a5ea07b5" colab={"base_uri": "https://localhost:8080/", "height": 52}
from sklearn import metrics
print("F1 score of the predictions:", metrics.f1_score(y_test, y_pred, average=None))
# + [markdown] id="A_ucoJSd8Vlo" colab_type="text"
# **Note** We cannot use default value of "average" parameter in metrics.f1_score which is "binary" as it is designed for binary classification while we are dealing with multi-class classification here.
# + [markdown] id="M3Nbytxb8lAU" colab_type="text"
# ### Interpretation of results
# As we can see, we could achieve more than 0.94 F1 score for all the classes (digits 0 to 9). However, there is still a gap between class 0 (images of digit 0) and classes 8 and 9 (images of digits 8 and 9). Hence, we need to figure out if the lower F1 scores of classes 8 and 9 are due to lower precision or recall or both. As we can see in the following results, precision and recall of class 8 are the same while precision of class 9 is higher than its recall. Interestingly, there are classes which their precision is higher than their recall (such as classes 0 and 2) while the reverse is true for some other classes (such as class 9).
# + id="f92lALID9WSI" colab_type="code" outputId="3d6edfea-61ae-4657-e9a4-067cee985286" colab={"base_uri": "https://localhost:8080/", "height": 87}
print("precision of the predictions:", metrics.precision_score(y_test, y_pred, average=None))
print("recall of the predictions:", metrics.recall_score(y_test, y_pred, average=None))
| code/project9_randomforest_UCI_digitimage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Age Based on Image Regressor
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
from fastai import *
from fastai.vision import *
from scipy.io import loadmat
from datetime import datetime
# Dataset from https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/
# @article{Rothe-IJCV-2016,
# author = {<NAME> and <NAME> and <NAME>},
# title = {Deep expectation of real and apparent age from a single image without facial landmarks},
# journal = {International Journal of Computer Vision (IJCV)},
# year = {2016},
# month = {July},
# }
# Age extraction code based on https://github.com/yu4u/age-gender-estimation
path = Path('data/ages/wiki_crop')
def calc_age(taken, dob):
birth = datetime.fromordinal(max(int(dob) - 366, 1))
# assume the photo was taken in the middle of the year
if birth.month < 7:
return taken - birth.year
else:
return taken - birth.year - 1
mat_path = path/'wiki.mat'
meta = loadmat(mat_path)
dob = meta['wiki'][0, 0]["dob"][0] # Matlab serial date number
dob
photo_taken = meta['wiki'][0, 0]["photo_taken"][0] # year
age = [calc_age(photo_taken[i], dob[i]) for i in range(len(dob))]
len(age)
type(age)
age_array = np.array(age)
age_array
pic_path = meta['wiki'][0, 0]["full_path"][0]
str(pic_path[0])
all_paths = [str(pic[0]) for pic in pic_path]
all_paths[0]
type(pic_path[0])
face_score = meta['wiki'][0, 0]["face_score"][0]
face_score[0]
type(face_score)
tester = 24
test_path = str(pic_path[tester][0]); age[tester]
fname = path/test_path
img = open_image(fname)
img.show()
img.size
labels = pd.DataFrame(all_paths, columns=['fn_col'])
labels['label_col'] = age_array
labels['face_score'] = face_score
labels.head()
item_index = 273
img = open_image(path/labels['fn_col'][item_index])
img.show(); labels['label_col'][item_index]
labels.describe()
labels[labels['label_col'] > 1000]
open_image(path/'70/2051570_647-05-15_2010.jpg').show()
filtered_labels = labels[np.logical_and(labels['label_col'] > 0, labels['label_col'] < 100)]
filtered_labels.head(3)
filtered_labels.describe()
filtered_labels[filtered_labels['label_col'] > 95].head(10)
open_image(path/'35/44163135_1906-02-03_2004.jpg').show()
filtered_labels = filtered_labels[filtered_labels['face_score'] > 0]
filtered_labels.describe()
data = (ImageItemList.from_df(path=path, df=filtered_labels, cols='fn_col')
.random_split_by_pct()
.label_from_df(cols='label_col', label_cls=FloatList)
.transform(get_transforms(), size=224)
.databunch()
)
data.normalize(imagenet_stats)
data.show_batch(rows=3, figsize=(7,6))
data.train_ds[0]
data.train_ds.y
learn = create_cnn(data, models.resnet34)
learn.loss_func = MSELossFlat()
learn.lr_find()
learn.recorder.plot()
lr = 2e-1
learn.fit_one_cycle(5, slice(lr))
learn.save('age-nov22-stage-1')
learn.unfreeze()
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(1, slice(2e-4, 2e-2))
learn.save('age-nov22-stage-2')
learn.fit_one_cycle(1, slice(2e-4, 2e-2))
learn.save('age-nov22-stage-3')
learn.fit_one_cycle(1, slice(2e-4, 2e-2))
learn.fit_one_cycle(1, slice(2e-4, 2e-2))
learn.save('age-nov22-stage-4')
learn.fit_one_cycle(1, slice(2e-4, 2e-2))
learn.save('age-nov22-stage-5')
learn.fit_one_cycle(1, slice(2e-4, 2e-2))
learn.save('age-nov22-stage-6')
learn.fit_one_cycle(1, slice(2e-4, 2e-2))
learn.save('age-nov22-stage-7')
learn.fit_one_cycle(1, slice(2e-4, 2e-2))
learn.fit_one_cycle(1, slice(2e-4, 2e-2))
learn.save('age-nov22-stage-8')
learn.fit_one_cycle(1, slice(2e-4, 2e-2))
learn.save('age-nov22-stage-9')
learn.fit_one_cycle(1, slice(2e-4, 2e-2))
learn.fit_one_cycle(1, slice(2e-4, 2e-2))
learn.save('age-nov22-stage-10')
learn.fit_one_cycle(1, slice(2e-4, 2e-2))
learn.save('age-nov22-stage-11')
learn.fit_one_cycle(1, slice(2e-4, 2e-2))
learn.load('age-nov22-stage-10')
learn.get_preds()
learn.validate()
def my_predict(learner, img:Image):
ds = learner.data.valid_ds
ds.set_item(img)
res = learner.pred_batch()[0]
ds.clear_item()
return res
my_predict(learn, img)
img.show()
test_image = open_image('./data/ages/estelle-getty.jpg')
test_image.show()
float(learn.predict(test_image)[0][0])
test_image2 = open_image('./data/ages/js.jpeg')
test_image2.show()
int(round(float(learn.predict(test_image2)[0][0]),0))
test_image3 = open_image('./data/ages/lhsProfilePicColor.jpg')
test_image3.show()
int(round(float(learn.predict(test_image3)[0][0]),0))
test_image4 = open_image('./data/ages/jss.jpeg')
test_image4.show()
int(round(float(learn.predict(test_image4)[0][0]),0))
| nbs/dl1/age_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import pandas as pd
from fbprophet import Prophet
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
# +
df = pd.read_csv('https://www.quandl.com/api/v3/datasets/BITFINEX/BTCUSD.csv?api_key=DY13zP9xEZzumGkWckas')
df.head(5)
# -
df['Date'] = pd.DatetimeIndex(df['Date'])
df.dtypes
df2 = df.rename(columns={'Date':'ds', 'High': 'y'})
df2.head()
df2 = df2[['ds','y']]
# +
ax = df2.set_index('ds').plot(figsize=(12, 8))
ax.set_ylabel('Daily high usd to btc')
ax.set_xlabel('Date')
plt.show()
# -
my_model = Prophet(interval_width=0.95)
my_model.fit(df2)
future_dates = my_model.make_future_dataframe(periods=365, freq='D')
future_dates.tail()
forecast = my_model.predict(future_dates)
forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail()
my_model.plot(forecast,
uncertainty=True)
my_model.plot_components(forecast)
| high price.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Tce3stUlHN0L"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" id="tuOe1ymfHZPu"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="qFdPvlXBOdUN"
# # Estimators
# + [markdown] id="MfBg1C5NB3X0"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/guide/estimator"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/estimator.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/estimator.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/estimator.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] id="oEinLJt2Uowq"
# This document introduces `tf.estimator`โa high-level TensorFlow
# API. Estimators encapsulate the following actions:
#
# * training
# * evaluation
# * prediction
# * export for serving
#
# You may either use the pre-made Estimators we provide or write your
# own custom Estimators. All Estimatorsโwhether pre-made or customโare
# classes based on the `tf.estimator.Estimator` class.
#
# For a quick example try [Estimator tutorials](../tutorials/estimator/linear.ipynb). For an overview of the API design, see the [white paper](https://arxiv.org/abs/1708.02637).
# + [markdown] id="Wg5zbBliQvNL"
# ## Advantages
#
# Similar to a `tf.keras.Model`, an `estimator` is a model-level abstraction. The `tf.estimator` provides some capabilities currently still under development for `tf.keras`. These are:
#
# * Parameter server based training
# * Full [TFX](http://tensorflow.org/tfx) integration.
# + [markdown] id="yQ8fQYt_VD5E"
# ## Estimators Capabilities
# Estimators provide the following benefits:
#
# * You can run Estimator-based models on a local host or on a distributed multi-server environment without changing your model. Furthermore, you can run Estimator-based models on CPUs, GPUs, or TPUs without recoding your model.
# * Estimators provide a safe distributed training loop that controls how and when to:
# * load data
# * handle exceptions
# * create checkpoint files and recover from failures
# * save summaries for TensorBoard
#
# When writing an application with Estimators, you must separate the data input
# pipeline from the model. This separation simplifies experiments with
# different data sets.
# + [markdown] id="jQ2PsufpgIpM"
# ## Using pre-made Estimators
#
# Pre-made Estimators enable you to work at a much higher conceptual level than the base TensorFlow APIs. You no longer have to worry about creating the computational graph or sessions since Estimators handle all the "plumbing" for you. Furthermore, pre-made Estimators let you experiment with different model architectures by making only minimal code changes. `tf.estimator.DNNClassifier`, for example, is a pre-made Estimator class that trains classification models based on dense, feed-forward neural networks.
#
# A TensorFlow program relying on a pre-made Estimator typically consists of the following four steps:
# + [markdown] id="mIJPPe26gQpF"
# ### 1. Write an input functions
#
# For example, you might create one function to import the training set and another function to import the test set. Estimators expect their inputs to be formatted as a pair of objects:
#
# * A dictionary in which the keys are feature names and the values are Tensors (or SparseTensors) containing the corresponding feature data
# * A Tensor containing one or more labels
#
# The `input_fn` should return a `tf.data.Dataset` that yields pairs in that format.
#
# For example, the following code builds a `tf.data.Dataset` from the Titanic dataset's `train.csv` file:
# + id="7fl_C5d6hEl3"
import tensorflow as tf
def train_input_fn():
titanic_file = tf.keras.utils.get_file("train.csv", "https://storage.googleapis.com/tf-datasets/titanic/train.csv")
titanic = tf.data.experimental.make_csv_dataset(
titanic_file, batch_size=32,
label_name="survived")
titanic_batches = (
titanic.cache().repeat().shuffle(500)
.prefetch(tf.data.experimental.AUTOTUNE))
return titanic_batches
# + [markdown] id="CjyrQGb3mCcp"
# The `input_fn` is executed in a `tf.Graph` and can also directly return a `(features_dics, labels)` pair containing graph tensors, but this is error prone outside of simple cases like returning constants.
# + [markdown] id="yJYjWUMxgTnq"
# ### 2. Define the feature columns.
#
# Each `tf.feature_column` identifies a feature name, its type, and any input pre-processing.
#
# For example, the following snippet creates three feature columns.
#
# - The first uses the `age` feature directly as a floating-point input.
# - The second uses the `class` feature as a categorical input.
# - The third uses the `embark_town` as a categorical input, but uses the `hashing trick` to avoid the need to enumerate the options, and to set the number of options.
#
# For further information, see the [feature columns tutorial](https://www.tensorflow.org/tutorials/keras/feature_columns).
# + id="lFd8Dnrmhjhr"
age = tf.feature_column.numeric_column('age')
cls = tf.feature_column.categorical_column_with_vocabulary_list('class', ['First', 'Second', 'Third'])
embark = tf.feature_column.categorical_column_with_hash_bucket('embark_town', 32)
# + [markdown] id="UIjqAozjgXdr"
# ### 3. Instantiate the relevant pre-made Estimator.
#
# For example, here's a sample instantiation of a pre-made Estimator named `LinearClassifier`:
# + id="CDOx6lZVoVB8"
import tempfile
model_dir = tempfile.mkdtemp()
model = tf.estimator.LinearClassifier(
model_dir=model_dir,
feature_columns=[embark, cls, age],
n_classes=2
)
# + [markdown] id="QGl9oYuFoYj6"
# For further information, see the [linear classifier tutorial](https://www.tensorflow.org/tutorials/estimator/linear).
# + [markdown] id="sXNBeY-oVxGQ"
# ### 4. Call a training, evaluation, or inference method.
#
# All Estimators provide `train`, `evaluate`, and `predict` methods.
#
# + id="iGaJKkmVBgo2"
model = model.train(input_fn=train_input_fn, steps=100)
# + id="CXkivCNq0vfH"
result = model.evaluate(train_input_fn, steps=10)
for key, value in result.items():
print(key, ":", value)
# + id="CPLD8n4CLVi_"
for pred in model.predict(train_input_fn):
for key, value in pred.items():
print(key, ":", value)
break
# + [markdown] id="cbmrm9pFg5vo"
# ### Benefits of pre-made Estimators
#
# Pre-made Estimators encode best practices, providing the following benefits:
#
# * Best practices for determining where different parts of the computational graph should run, implementing strategies on a single machine or on a
# cluster.
# * Best practices for event (summary) writing and universally useful
# summaries.
#
# If you don't use pre-made Estimators, you must implement the preceding features yourself.
# + [markdown] id="oIaPjYgnZdn6"
# ## Custom Estimators
#
# The heart of every Estimatorโwhether pre-made or customโis its *model function*, which is a method that builds graphs for training, evaluation, and prediction. When you are using a pre-made Estimator, someone else has already implemented the model function. When relying on a custom Estimator, you must write the model function yourself.
#
# So the recommended workflow is:
#
# 1. Assuming a suitable pre-made Estimator exists, use it to build your first model and use its results to establish a baseline.
# 2. Build and test your overall pipeline, including the integrity and reliability of your data with this pre-made Estimator.
# 3. If suitable alternative pre-made Estimators are available, run experiments to determine which pre-made Estimator produces the best results.
# 4. Possibly, further improve your model by building your own custom Estimator.
# + [markdown] id="P7aPNnXUbN4j"
# ## Create an Estimator from a Keras model
#
# You can convert existing Keras models to Estimators with `tf.keras.estimator.model_to_estimator`. Doing so enables your Keras
# model to access Estimator's strengths, such as distributed training.
#
# Instantiate a Keras MobileNet V2 model and compile the model with the optimizer, loss, and metrics to train with:
# + id="kRr7DGZxFApM"
import tensorflow as tf
import tensorflow_datasets as tfds
# + id="XE6NMcuGeDOP"
keras_mobilenet_v2 = tf.keras.applications.MobileNetV2(
input_shape=(160, 160, 3), include_top=False)
keras_mobilenet_v2.trainable = False
estimator_model = tf.keras.Sequential([
keras_mobilenet_v2,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(1)
])
# Compile the model
estimator_model.compile(
optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
# + [markdown] id="A3hcxzcEfYfX"
# Create an `Estimator` from the compiled Keras model. The initial model state of the Keras model is preserved in the created `Estimator`:
# + id="UCSSifirfyHk"
est_mobilenet_v2 = tf.keras.estimator.model_to_estimator(keras_model=estimator_model)
# + [markdown] id="8jRNRVb_fzGT"
# Treat the derived `Estimator` as you would with any other `Estimator`.
# + id="Rv9xJk51e1fB"
IMG_SIZE = 160 # All images will be resized to 160x160
def preprocess(image, label):
image = tf.cast(image, tf.float32)
image = (image/127.5) - 1
image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
return image, label
# + id="Fw8OjwujVBkc"
def train_input_fn(batch_size):
data = tfds.load('cats_vs_dogs', as_supervised=True)
train_data = data['train']
train_data = train_data.map(preprocess).shuffle(500).batch(batch_size)
return train_data
# + [markdown] id="JMb0cuy0gbTi"
# To train, call Estimator's train function:
# + id="4JsvMp8Jge80"
est_mobilenet_v2.train(input_fn=lambda: train_input_fn(32), steps=500)
# + [markdown] id="jvr_rAzngY9v"
# Similarly, to evaluate, call the Estimator's evaluate function:
# + id="kVNPqysQgYR2"
est_mobilenet_v2.evaluate(input_fn=lambda: train_input_fn(32), steps=10)
# + [markdown] id="5HeTOvCYbjZb"
# For more details, please refer to the documentation for `tf.keras.estimator.model_to_estimator`.
| site/en/guide/estimator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ์ ๊ตญ ์ ๊ท ๋ฏผ๊ฐ ์ํํธ ๋ถ์๊ฐ๊ฒฉ ๋ํฅ
# * 2015๋
10์๋ถํฐ 2018๋
4์๊น์ง
# * ์ฃผํ๋ถ์๋ณด์ฆ์ ๋ฐ์ ๋ถ์ํ ์ ์ฒด ๋ฏผ๊ฐ ์ ๊ท์ํํธ ๋ถ์๊ฐ๊ฒฉ ๋ํฅ
# * https://www.data.go.kr/dataset/3035522/fileData.do
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import re
from plotnine import *
import os
fhand = os.listdir()
fhand
pre_sale = pd.read_csv(fhand[2], encoding='euc-kr')
pre_sale.shape
pre_sale.head()
pre_sale.tail()
# ๋ถ์๊ฐ๊ฒฉ์ด ์ซ์ ํ์
์ด ์๋๋๋ค. ์ซ์ ํ์
์ผ๋ก ๋ณ๊ฒฝํด์ค ํ์๊ฐ ์๊ฒ ์ด์.
pre_sale.info()
pre_sale_price = pre_sale['๋ถ์๊ฐ๊ฒฉ(ใก)']
# ์ฐ๋์ ์์ ์นดํ
๊ณ ๋ฆฌ ํํ์ ๋ฐ์ดํฐ์ด๊ธฐ ๋๋ฌธ์ ์คํธ๋ง ํํ๋ก ๋ณ๊ฒฝ
pre_sale['์ฐ๋'] = pre_sale['์ฐ๋'].astype(str)
pre_sale['์'] = pre_sale['์'].astype(str)
# ๋ถ์๊ฐ๊ฒฉ์ ํ์
์ ์ซ์๋ก ๋ณ๊ฒฝํด ์ค๋๋ค.
pre_sale['๋ถ์๊ฐ๊ฒฉ'] = pd.to_numeric(pre_sale_price, errors='coerce')
# ํ๋น ๋ถ์๊ฐ๊ฒฉ์ ๊ตฌํด๋ณผ๊น์.
pre_sale['ํ๋น๋ถ์๊ฐ๊ฒฉ'] = pre_sale['๋ถ์๊ฐ๊ฒฉ'] * 3.3
pre_sale.info()
# ๋ถ์๊ฐ๊ฒฉ์ ๊ฒฐ์ธก์น๊ฐ ๋ง์ด ์์ด์.
pre_sale.isnull().sum()
pre_sale.describe()
# 2017๋
๋ฐ์ดํฐ๋ง ๋ด
๋๋ค.
pre_sale_2017 = pre_sale.loc[pre_sale['์ฐ๋'] == 2017]
pre_sale_2017.shape
# ๊ฐ์ ๊ฐ์ ๊ฐ๊ณ ์๋ ๊ฑธ๋ก ์๋๋ณ๋ก ๋์ผํ๊ฒ ๋ฐ์ดํฐ๊ฐ ๋ค์ด ์๋ ๊ฒ์ ํ์ธํ ์ ์์ต๋๋ค.
pre_sale['๊ท๋ชจ๊ตฌ๋ถ'].value_counts()
# # ์ ๊ตญํ๊ท ๋ถ์๊ฐ๊ฒฉ
# ๋ถ์๊ฐ๊ฒฉ๋ง ๋ดค์ ๋ 2015๋
์์ 2018๋
์ผ๋ก ๊ฐ์๋ก ์ค๋ฅธ ๊ฒ์ ํ์ธํ ์ ์์ต๋๋ค.
pd.options.display.float_format = '{:,.0f}'.format
pre_sale.groupby(pre_sale.์ฐ๋).describe().T
# ## ๊ท๋ชจ๋ณ ์ ๊ตญ ํ๊ท ๋ถ์๊ฐ๊ฒฉ
pre_sale.pivot_table('ํ๋น๋ถ์๊ฐ๊ฒฉ', '๊ท๋ชจ๊ตฌ๋ถ', '์ฐ๋')
# # ์ ๊ตญ ๋ถ์๊ฐ ๋ณ๋๊ธ์ก
# ๊ท๋ชจ๊ตฌ๋ถ์ด ์ ์ฒด๋ก ๋์ด์๋ ๊ธ์ก์ผ๋ก ์ฐ๋๋ณ ๋ณ๋๊ธ์ก์ ์ดํด๋ด
๋๋ค.
# +
# ๊ท๋ชจ๊ตฌ๋ถ์์ ์ ์ฒด๋ก ๋์ด์๋ ๋ฐ์ดํฐ๋ง ๊ฐ์ ธ์จ๋ค.
region_year_all = pre_sale.loc[pre_sale['๊ท๋ชจ๊ตฌ๋ถ'] == '์ ์ฒด']
region_year = region_year_all.pivot_table('ํ๋น๋ถ์๊ฐ๊ฒฉ', '์ง์ญ๋ช
', '์ฐ๋').reset_index()
region_year['๋ณ๋์ก'] = region_year['2018'] - region_year['2015']
max_delta_price = np.max(region_year['๋ณ๋์ก']).astype('int32')*1000
min_delta_price = np.min(region_year['๋ณ๋์ก']).astype('int32')*1000
mean_delta_price = np.mean(region_year['๋ณ๋์ก']).astype('int32')*1000
print('2015๋
๋ถํฐ 2018๋
๊น์ง ๋ถ์๊ฐ๋ ๊ณ์ ์์นํ์ผ๋ฉฐ, ์์น์ก์ด ๊ฐ์ฅ ํฐ ์ง์ญ์ ์ ์ฃผ์ด๋ฉฐ ์์น์ก์ ํ๋น {:,.0f}์์ด๋ค.'.format(max_delta_price))
print('์์น์ก์ด ๊ฐ์ฅ ์์ ์ง์ญ์ ์ธ์ฐ์ด๋ฉฐ ํ๋น {:,.0f}์์ด๋ค.'.format(min_delta_price))
print('์ ๊ตญ ํ๊ท ๋ณ๋์ก์ ํ๋น {:,.0f}์์ด๋ค.'.format(mean_delta_price))
region_year
# -
np.min(region_year['๋ณ๋์ก'])
# # ์ฐ๋๋ณ ๋ณ๋ ๊ทธ๋ํ
(ggplot(region_year_all, aes(x='์ง์ญ๋ช
', y='ํ๋น๋ถ์๊ฐ๊ฒฉ', fill='์ฐ๋'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
# ## ์ง์ญ๋ณ ํ๋น ๋ถ์๊ฐ๊ฒฉ ํฉ๊ณ
# * ์๋ ๋ฐ์ดํฐ๋ก ์ด๋์ ๋ ๊ท๋ชจ๋ก ๋ถ์์ฌ์
์ด ์ด๋ฃจ์ด์ก๋์ง๋ฅผ ๋ด
๋๋ค.
# * ์ ์ฒด ๋ฐ์ดํฐ๋ก ๋ดค์ ๋ ์์ธ, ๊ฒฝ๊ธฐ, ๋ถ์ฐ, ์ ์ฃผ์ ๋ถ์ ์ฌ์
์ด ๋ค๋ฅธ ์ง์ญ์ ๋นํด ๊ท๋ชจ๊ฐ ํฐ ๊ฒ์ผ๋ก ๋ณด์ฌ์ง์ง๋ง ๋ถ์๊ฐ๊ฒฉ๋๋น๋ก ๋๋ ๋ณผ ํ์๊ฐ ์์ต๋๋ค.
pre_sale.pivot_table('ํ๋น๋ถ์๊ฐ๊ฒฉ', '๊ท๋ชจ๊ตฌ๋ถ', '์ง์ญ๋ช
')
# ## ๊ท๋ชจ๋ณ
# ์์ธ์ ๊ฒฝ์ฐ ์ ์ฉ๋ฉด์ 85ใก์ด๊ณผ 102ใก์ดํ๊ฐ ๋ถ์๊ฐ๊ฒฉ์ด ๊ฐ์ฅ ๋น์ธ๊ฒ ๋์ต๋๋ค.
(ggplot(pre_sale, aes(x='์ง์ญ๋ช
', y='ํ๋น๋ถ์๊ฐ๊ฒฉ', fill='๊ท๋ชจ๊ตฌ๋ถ'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
# ์์ ๊ทธ๋ฆฐ ๊ทธ๋ํ๋ฅผ ์ง์ญ๋ณ๋ก ๋๋ ๋ด
๋๋ค.
(ggplot(pre_sale)
+ aes(x='์ฐ๋', y='ํ๋น๋ถ์๊ฐ๊ฒฉ', fill='๊ท๋ชจ๊ตฌ๋ถ')
+ geom_bar(stat='identity', position='dodge')
+ facet_wrap('์ง์ญ๋ช
')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 6))
)
# ๋ฐ์คํ๋กฏ์ ๊ทธ๋ ค๋ด
๋๋ค.
(ggplot(pre_sale, aes(x='์ง์ญ๋ช
', y='ํ๋น๋ถ์๊ฐ๊ฒฉ', fill='๊ท๋ชจ๊ตฌ๋ถ'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
pre_sale_seoul = pre_sale.loc[pre_sale['์ง์ญ๋ช
']=='์์ธ']
(ggplot(pre_sale_seoul)
+ aes(x='์ฐ๋', y='ํ๋น๋ถ์๊ฐ๊ฒฉ', fill='๊ท๋ชจ๊ตฌ๋ถ')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015๋
์์ 2018๋
๊น์ง ๋ถ์๊ฐ ์ฐจ์ด๊ฐ ๊ฐ์ฅ ์ปธ๋ ์ ์ฃผ๋ฅผ ๋ด
๋๋ค.
(ggplot(pre_sale.loc[pre_sale['์ง์ญ๋ช
']=='์ ์ฃผ'])
+ aes(x='์ฐ๋', y='ํ๋น๋ถ์๊ฐ๊ฒฉ', fill='๊ท๋ชจ๊ตฌ๋ถ')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015๋
์์ 2018๋
๊น์ง ๋ถ์๊ฐ ์ฐจ์ด๊ฐ ๊ฐ์ฅ ์์๋ ์ธ์ฐ์ ๋ด
๋๋ค.
(ggplot(pre_sale.loc[pre_sale['์ง์ญ๋ช
']=='์ธ์ฐ'])
+ aes(x='์ฐ๋', y='ํ๋น๋ถ์๊ฐ๊ฒฉ', fill='๊ท๋ชจ๊ตฌ๋ถ')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# # 2013๋
12์~2015๋
9์ 3.3ใก๋น ๋ถ์๊ฐ๊ฒฉ
# * 2015๋
10์๋ถํฐ 2018๋
4์๊น์ง ๋ฐ์ดํฐ๋ ํ๋น ๋ถ์๊ฐ๋ก ์กฐ์ ์ ํด์ฃผ์์๋๋ฐ ์ด ๋ฐ์ดํฐ๋ ํ๋น ๋ถ์๊ฐ๊ฐ ๋ค์ด๊ฐ ์๋ค.
fhand
df = pd.read_csv(fhand[1], \
encoding='euc-kr', skiprows=1, header=0)
df.shape
# pandas์์ ๋ณด๊ธฐ ์ฝ๊ฒ ์ปฌ๋ผ์ ๋ณ๊ฒฝํด ์ค ํ์๊ฐ ์๋ค.
df.head()
year = df.iloc[0]
month = df.iloc[1]
# ๊ฒฐ์ธก์น๋ฅผ ์ฑ์์ค๋ค.
year
# +
# ์ปฌ๋ผ์ ์๋ก ๋ง๋ค์ด ์ฃผ๊ธฐ ์ํด 0๋ฒ์งธ์ 1๋ฒ์งธ ํ์ ํฉ์ณ์ค๋ค.
for i, y in enumerate(year):
if i > 2 and i < 15:
year[i] = '2014๋
' + month[i]
elif i >= 15:
year[i] = '2015๋
' + month[i]
elif i == 2 :
year[i] = year[i] + ' ' + month[i]
elif i == 1:
year[i] = '์๊ตฐ๊ตฌ'
print(year)
# -
df.columns = year
df = df.drop(df.index[[0,1]])
df
# ์ง์ญ ์ปฌ๋ผ์ ์๋ก ๋ง๋ค์ด ์๋์ ์๊ตฐ๊ตฌ๋ฅผ ํฉ์ณ์ค๋ค.
df['๊ตฌ๋ถ'] = df['๊ตฌ๋ถ'].fillna('')
df['์๊ตฐ๊ตฌ'] = df['์๊ตฐ๊ตฌ'].fillna('')
df['์ง์ญ'] = df['๊ตฌ๋ถ'] + df['์๊ตฐ๊ตฌ']
df['์ง์ญ']
melt_columns = df.columns.copy()
melt_columns
df_2013_2015 = pd.melt(df, id_vars=['์ง์ญ'], value_vars=['2013๋
12์', '2014๋
1์', '2014๋
2์', '2014๋
3์',
'2014๋
4์', '2014๋
5์', '2014๋
6์', '2014๋
7์', '2014๋
8์',
'2014๋
9์', '2014๋
10์', '2014๋
11์', '2014๋
12์', '2015๋
1์',
'2015๋
2์', '2015๋
3์', '2015๋
4์', '2015๋
5์', '2015๋
6์',
'2015๋
7์', '2015๋
8์', '2015๋
9์'])
df_2013_2015.head()
df_2013_2015.columns = ['์ง์ญ', '๊ธฐ๊ฐ', '๋ถ์๊ฐ']
df_2013_2015.head()
df_2013_2015['์ฐ๋'] = df_2013_2015['๊ธฐ๊ฐ'].apply(lambda year_month : year_month.split('๋
')[0])
df_2013_2015['์'] = df_2013_2015['๊ธฐ๊ฐ'].apply(lambda year_month : re.sub('์', '', year_month.split('๋
')[1]).strip())
df_2013_2015.head()
# ## ์ง์ญ๋ช
๊ฐ์๊ณผ ๋ถ์ฐ ์ ๋ฆฌ
df_2013_2015['์ง์ญ'].value_counts()
df_2013_2015['์ง์ญ'] = df_2013_2015['์ง์ญ'].apply(lambda x: re.sub('6๋๊ด์ญ์๋ถ์ฐ','๋ถ์ฐ', x))
df_2013_2015['์ง์ญ'] = df_2013_2015['์ง์ญ'].apply(lambda x: re.sub('์ง๋ฐฉ๊ฐ์','๊ฐ์', x))
df_2013_2015['์ง์ญ'].value_counts()
df_2013_2015.describe()
df_2013_2015['๋ถ์๊ฐ๊ฒฉ'] = df_2013_2015['๋ถ์๊ฐ'].str.replace(',', '').astype(int)
(ggplot(df_2013_2015, aes(x='์ง์ญ', y='๋ถ์๊ฐ๊ฒฉ', fill='์ฐ๋'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2015, aes(x='์ง์ญ', y='๋ถ์๊ฐ๊ฒฉ', fill='์ฐ๋'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
# ## ์ด์ 2013๋
๋ถํฐ 2018๋
4์๊น์ง ๋ฐ์ดํฐ๋ฅผ ํฉ์น ์ค๋น๊ฐ ๋จ
df_2015_2018 = pre_sale.loc[pre_sale['๊ท๋ชจ๊ตฌ๋ถ'] == '์ ์ฒด']
print(df_2015_2018.shape)
df_2015_2018.head()
df_2013_2015.columns
df_2013_2015_prepare = df_2013_2015[['์ง์ญ', '์ฐ๋', '์', '๋ถ์๊ฐ๊ฒฉ']]
df_2013_2015_prepare.head()
df_2013_2015_prepare.columns = ['์ง์ญ๋ช
', '์ฐ๋', '์', 'ํ๋น๋ถ์๊ฐ๊ฒฉ']
df_2015_2018.columns
df_2015_2018_prepare = df_2015_2018[['์ง์ญ๋ช
', '์ฐ๋', '์', 'ํ๋น๋ถ์๊ฐ๊ฒฉ']]
df_2015_2018_prepare.head()
df_2015_2018_prepare.describe()
df_2013_2018 = pd.concat([df_2013_2015_prepare, df_2015_2018_prepare])
df_2013_2018.shape
df_2013_2018.head()
df_2013_2015_region= df_2013_2015_prepare['์ง์ญ๋ช
'].unique()
df_2013_2015_region
df_2015_2018_region = df_2015_2018_prepare['์ง์ญ๋ช
'].unique()
df_2015_2018_region
exclude_region = [region for region in df_2013_2015_region if not region in df_2015_2018_region]
exclude_region
df_2013_2018.shape
df_2013_2018.loc[df_2013_2018['์ง์ญ๋ช
'].str.match('์ ๊ตญ|์๋๊ถ')].head()
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['์ง์ญ๋ช
'].str.match('์ ๊ตญ|์๋๊ถ')].index, axis=0, inplace=True)
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['์ง์ญ๋ช
'] == ''].index, axis=0, inplace=True)
(ggplot(df_2013_2018, aes(x='์ฐ๋', y='ํ๋น๋ถ์๊ฐ๊ฒฉ'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018, aes(x='์ง์ญ๋ช
', y='ํ๋น๋ถ์๊ฐ๊ฒฉ', fill='์ฐ๋'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2018)
+ aes(x='์ฐ๋', y='ํ๋น๋ถ์๊ฐ๊ฒฉ')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
df_2013_2018_jeju = df_2013_2018.loc[df_2013_2018['์ง์ญ๋ช
'] == '์ ์ฃผ']
(ggplot(df_2013_2018_jeju)
+ aes(x='์ฐ๋', y='ํ๋น๋ถ์๊ฐ๊ฒฉ')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018)
+ aes(x='์ฐ๋', y='ํ๋น๋ถ์๊ฐ๊ฒฉ')
+ geom_boxplot()
+ facet_wrap('์ง์ญ๋ช
')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 6))
)
df = pd.read_csv(fhand[-1], encoding='euc-kr')
df.isnull().sum()
| house_price/apt_presale_price.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
df = pd.DataFrame()
df['age'] = [14, 12, 11, 10, 8, 6, 8]
np.mean(df['age'])
np.median(df['age'])
import statistics
statistics.mode(df['age'])
df['age'].var()
np.var(df.age)
np.std(df['age'], ddof=1)
np.std(df['age'], ddof=1)/np.sqrt(len(df['age']))
df.describe()
df = pd.DataFrame()
df['age'] = [14, 12, 11, 10, 8, 7, 8]
df.describe()
df = pd.DataFrame()
df['age'] = [14,12,11,10,8,7,1]
df.describe()
import statistics
statistics.mode(df['age'])
| module 6 - cp 5 assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:rmg_env]
# language: python
# name: conda-env-rmg_env-py
# ---
import os.path
import math
import rmgpy
import rmgpy.constants as constants
from IPython.display import display
from rmgpy.data.rmg import RMGDatabase
from rmgpy.thermo.nasa import NASA, NASAPolynomial
from rmgpy.species import Species
from rmgpy.chemkin import readThermoEntry, writeThermoEntry
from rmgpy.thermo.thermoengine import processThermoData
from rmgpy.data.thermo import findCp0andCpInf
# +
databasePath = rmgpy.settings['database.directory']
database = RMGDatabase()
database.load(
path = databasePath,
thermoLibraries = [],
reactionLibraries = [],
seedMechanisms = [],
kineticsFamilies = 'none'
)
# +
data = [
"""C2HBr T04/04C 2.H 1.BR 1. 0.G 200.000 6000.000 1000. 1
6.55399311E+00 3.37962726E-03-1.18362410E-06 1.87797808E-10-1.11059116E-14 2
3.17495713E+04-8.20269727E+00 1.10795098E+00 3.21065018E-02-6.02244383E-05 3
5.45400888E-08-1.86034151E-11 3.26428366E+04 1.67414085E+01 3.39671249E+04 4
""",
"""C2HCl T05/08C 2.H 1.CL 1. 0.G 200.000 6000.000 1000. 1
6.52865585E+00 3.32425623E-03-1.14637403E-06 1.79972218E-10-1.05639468E-14 2
2.51378884E+04-9.16499932E+00 1.25077097E+00 3.10939695E-02-5.78728028E-05 3
5.20651866E-08-1.76611780E-11 2.59985454E+04 1.50044210E+01 2.73367422E+04 4
""",
"""CL J 6/82CL 1 0 0 0G 200.000 6000.000 1000. 1
2.94658358E+00-3.85985408E-04 1.36139388E-07-2.17032923E-11 1.28751025E-15 2
1.36970327E+04 3.11330136E+00 2.26062480E+00 1.54154399E-03-6.80283622E-07 3
-1.59972975E-09 1.15416636E-12 1.38552986E+04 6.57020799E+00 1.45891941E+04 4
""",
"""BR J 6/82BR 1 0 0 0G 200.000 6000.000 1000. 1
0.20866945E+01 0.71459733E-03-0.27080691E-06 0.41519029E-10-0.23016335E-14 2
0.12857696E+05 0.90837335E+01 0.24820782E+01 0.18570465E-03-0.64313029E-06 3
0.84642045E-09-0.30137068E-12 0.12709455E+05 0.68740409E+01 0.13453589E+05 4
""",
"""I J 6/82I 1 0 0 0G 200.000 6000.000 1000. 1
2.61667712E+00-2.66010320E-04 1.86060150E-07-3.81927472E-11 2.52036053E-15 2
1.20582790E+04 6.87896653E+00 2.50041683E+00-4.48046831E-06 1.69962536E-08 3
-2.67708030E-11 1.48927452E-14 1.20947990E+04 7.49816581E+00 1.28402035E+04 4
"""
]
thermo = []
for entry in data:
thermo.append(readThermoEntry(entry)[1])
# -
base = thermo[0].toThermoData()
print base
base = subtractThermoData(base, thermo[3].toThermoData())
base = addThermoData(base, thermo[4].toThermoData())
print base
spc = Species().fromSMILES('C#CCl')
display(spc)
findCp0andCpInf(spc, base)
spc.thermo = processThermoData(spc, base)
print writeThermoEntry(spc)
base2 = thermo[1].toThermoData()
print base2
base2 = subtractThermoData(base2, thermo[2].toThermoData())
base2 = addThermoData(base2, thermo[4].toThermoData())
print base2
# +
def addThermoData(thermoData1, thermoData2):
"""
Add the thermodynamic data `thermoData2` to the data `thermoData1`,
and return `thermoData1`.
"""
for i in range(thermoData1.Tdata.value_si.shape[0]):
thermoData1.Cpdata.value_si[i] += thermoData2.Cpdata.value_si[i]
thermoData1.H298.value_si += thermoData2.H298.value_si
thermoData1.S298.value_si += thermoData2.S298.value_si
return thermoData1
def subtractThermoData(thermoData1, thermoData2):
"""
Subtract the thermodynamic data `thermoData2` from the data `thermoData1`,
and return `thermoData1`.
"""
for i in range(thermoData1.Tdata.value_si.shape[0]):
thermoData1.Cpdata.value_si[i] -= thermoData2.Cpdata.value_si[i]
thermoData1.H298.value_si -= thermoData2.H298.value_si
thermoData1.S298.value_si -= thermoData2.S298.value_si
return thermoData1
# -
| Thermo/ModifyThermo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Dynamic Pharmacophores
#
# Dynamic pharmacophores are obtained from a molecular dynamics simulation of a protein-ligand complex. In this notebook we will obtain pharmacophores from a 10 ns trajectory of the ERalpha.
#
# Different methods have been proposed for obtaining pharmacophores from a molecular dynamics, some are very simple and some more complex. We will explore different methods implemented in OpenPharmacophore.
# %matplotlib inline
from openpharmacophore.dynophore import Dynophore
from IPython.display import Image
import mdtraj as md
# + [markdown] tags=[]
# ## Pharmacophores from a molecular dynamics trajectory
# -
# First we load the trajectory file with mdtraj.
traj_file = "../../data/trajectories/ERalpha.h5"
traj = md.load(traj_file)
print(traj)
print(traj.topology)
# + [markdown] jp-MarkdownHeadingCollapsed=true tags=[]
# ### Pharmacophores from the first and last frame only
#
# A very simple way of obtaining pharmacophores from a trajectory is just to extract a pharmacophore from the first and las frame of the trajectory.
#
# To work with trajectories we will use the Dynophore class. The constructor expects a trajectory object. This class is different from the other pharmacophore classes.
#
# First, we will use the first_and_last_pharmacophore method. We can get a list of all extracted pharmacophores from the trajectory with the attribute pharmacophores.
# -
dynophore = Dynophore(traj)
dynophore.first_and_last_pharmacophore()
dynophore.pharmacophores
dynophore.pharmacophores[0].elements
dynophore.pharmacophores[0].show()
dynophore.pharmacophores[-1].elements
dynophore.pharmacophores[-1].show()
# We can see that the pharmacophore model obtained from the initial frame differs from the last frame pharmacophore.
# + [markdown] tags=[]
# ### Pharmacophore Analysis
#
# To do a bit of a more sophisticated analysis, we can start by obtaining a pharmacophore model for each frame of the trajectory. We can use the pharmacophores_from_frames method which expects a list of frame indices. This can take a while.
# + [markdown] tags=[]
# ### Get Pharmacophores for each frame of the trajectory
# -
dynophore = Dynophore(traj)
frames = list(range(0, traj.n_frames))
dynophore.pharmacophores_from_frames(frames, load_ligand=True)
print(f"Number of pharmacophores {len(dynophore.pharmacophores)}\n")
# Print first three pharmacophores
print(dynophore.pharmacophores[0:3])
# + [markdown] tags=[]
# ### Get all unique pharmacophoric points
# -
# Once we have a lot of pharmacophores, we can analyze the frequency of the pharmacophoric points in the trajectory. We cas use the pharmacophoric_point_frequency method to get a dataframe with each unique pharmacophoric point and its frequency.
dynophore.pharmacophoric_point_frequency()
# All unique pharmacophoric points are stored in a list that can be accessed by the attribute unique_pharmacophoric_points
unique_points = dynophore.unique_pharmacophoric_points
print(f"Number of unique points is {len(unique_points)}")
# + [markdown] tags=[]
# ### Derive a pharmacophore model from the most frequent points
# -
# Another method to get a pharmacophore from a MD trajectory is to get a single pharmacophore model with the points that are above a certain frequency value. Here we get a pharmacophore model with all the points that have a frequency higher than 50%.
freq_threshold = 0.5
frequency_pharmacophore = dynophore.pharmacophore_by_frequency(freq_threshold)
print(frequency_pharmacophore)
print(frequency_pharmacophore.elements)
# ### Analyze frequency of all unique points
# We can also plot the frequency of the pharmacophoric points across the trajectory. This way we can have an idea of the freqency through time.
# +
# Set the frequency threshold to a high value (40%) so that the plot doesn't look too crowded.
freq_threshold = 0.5
dynophore.point_frequency_plot(0.4)
# -
# We can create a pharmacophore from this unique pharmacophoric points
# Create a pharmacophore with the following elements:
elements = ["hb acceptor 1", "hb acceptor 2", "hb donor 1", "hb donor 2"]
custom_pharmacophore = dynophore.pharmacophore_from_unique_points(elements)
custom_pharmacophore
# + [markdown] tags=[]
# ### 2D Representation of the Dynamic Pharmacophore
# -
# Another way to represent a pharmacophore is with a 2D representation of the ligand. In this representation the pharmacophoric points are highligted with it's frequency value. All values have been rounded.
img_file = "./dynophore.png"
dynophore.draw(img_file, img_size=(400, 400), legend="Eralpha", freq_threshold=0.2)
Image(img_file)
# ### Representative Pharmacophore Models
# Representative pharmcophore models (RPMs) are those that have the same pharmacophoric points [3]. We can get all RPMs by callign the following method:
rpms = dynophore.representative_pharmacophore_models()
print(f"{len(rpms)} representative pharmacophore models")
# Print first three
print(rpms[0:3])
# ### Common Hits Approach
# <strong>References</strong>
#
# [1] Wieder, Marcus, <NAME>, <NAME>, <NAME>, and <NAME>. "Comparing pharmacophore models derived from crystal structures and from molecular dynamics simulations." Monatshefte fรผr Chemie-Chemical Monthly 147, no. 3 (2016): 553-563.
#
# [2] Wieder, Marcus, <NAME>, <NAME>, and <NAME>. "Pharmacophore models derived from molecular dynamics simulations of protein-ligand complexes: A case study." Natural product communications 11, no. 10 (2016): 1934578X1601101019.
#
# [3] Wieder, Marcus, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. "Common hits approach: combining pharmacophore modeling and molecular dynamics simulations." Journal of chemical information and modeling 57, no. 2 (2017): 365-385
#
| docs/contents/Pharmacophore/DynamicPharmacophore.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: NLP - GPU (Python3)
# language: python
# name: nlp_gpu
# ---
# # What's new?
#
# With the advent of [eager execution](https://www.tensorflow.org/guide/effective_tf2#eager_execution), TensorFlow 2 does away with explicit calls to Session objects.
# As a matter of fact, TF2 prefers [functions, not sessions](https://www.tensorflow.org/guide/effective_tf2#functions_not_sessions).
# Find more information on the official [Effective TensorFlow 2](https://www.tensorflow.org/guide/effective_tf2) page.
# Here is a brief synopsis as the change from TF1 $\rightarrow$ TF2
#
# **TensorFlow 1.X**
#
# `outputs = session.run(f(placeholder), feed_dict={placeholder: input})`
#
#
# **TensorFlow 2.0**
#
# `outputs = f(input)`
#
# ---
import tensorflow as tf
# ---
# # TensorFlow v2
x = tf.constant([[1., 2.]])
negMatrix = tf.negative(x)
print(negMatrix.numpy())
# ---
# ## TensorFlow v2 Compatibility
#
# You can still use `Session` functionality, but you must do so using the `tf.compat` module:
with tf.compat.v1.Session() as sess:
x = tf.constant([[1., 2.]])
negMatrix = tf.negative(x)
result = sess.run(negMatrix)
print(result)
| TFv2/ch02/Listing 2.06.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/dustiny5/Practice/blob/master/Practice_Notebook.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="BY1Bv3B2pDlk" colab_type="text"
# # Prime Numbers
# + id="MII2AWSet3XY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="e2d7a64f-db17-46bb-82bb-9acec70640ea"
n = 2
prime_list = [2]
while n < 101:
# If even, continue iterating and add 1 to n
if n%2 == 0:
n += 1
continue
# If odd, ...
else:
# Iterate throught half of the prime numbers in the prime_list
for prime in prime_list:
# If n modulo the current prime number is 0
if n%prime == 0:
# Then add 1 to n
n += 1
# Break out of for loop
break
# If n modulo the current prime number is not 0
elif n%prime != 0:
# Increase counter
counter += 1
# Loop again with the next prime number in the prime list
continue
# After the for loop ends then
else:
# Append n to prime_list and increase n by 1
prime_list.append(n)
n += 1
print(prime_list)
# + [markdown] id="tOCcrmibR5qf" colab_type="text"
# # [Advent of Code - Day 1 Part 1: The Tyranny of the Rocket Equation](https://adventofcode.com/2019/day/1)
# + id="WB_-K4o5D8qN" colab_type="code" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY> "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 75} outputId="8b1276c3-6c10-4d30-f3b2-cbabad9392b8"
# Get inputs from site and save it as a txt file then upload here
from google.colab import files
uploaded = files.upload()
# + id="3U6pEa0lS1CD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="c25aa476-3ae6-41ca-d0cb-6de1435239b2"
# Read the file
with open('day1aoc.txt', 'r') as f:
inputs = f.read()
# Split the new line and convert to integer
masses = [int(el) for el in inputs.split('\n')]
# Loop, Calculate, and Print final mass
final_mass = [(mass//3) - 2 for mass in masses]
print(sum(final_mass))
# + [markdown] id="Qf7fcjpfaqNS" colab_type="text"
# # [Advent of Code - Day 1 Part 2: The Tyranny of the Rocket Equation](https://adventofcode.com/2019/day/1#part2)
# + id="3bbwjYWVaPKa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="17fd2e06-ca13-4ced-bead-5a2ec42f75c8"
all_fuel = []
sum_fuel = 0
# Loop through all mass
for mass in masses:
fuel = mass
# While fuel is greater than 0 then
while (fuel//3) >= 0 and (fuel//3) -2 >=0:
# Save total fuel
sum_fuel += (fuel//3) - 2
# Re-save newly calculate fuel for the while loop
fuel = (fuel//3) - 2
# When fuel is greater than 0
else:
all_fuel.append(sum_fuel)
sum_fuel = 0
sum(all_fuel)
# + id="mkTdw5ajgyhD" colab_type="code" colab={}
| Practice_Notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
def test_exercise_85_1(x) -> bool:
import requests
from bs4 import BeautifulSoup
wiki_home = "https://en.wikipedia.org/wiki/Main_Page"
response = requests.get(wiki_home)
def decode_content(r,encoding):
return (r.content.decode(encoding))
contents = decode_content(response,encoding_check(response))
soup = BeautifulSoup(contents, 'html.parser')
txt_dump=soup.text
text_list=[] #Empty list
for d in soup.find_all('div'):
if (d.get('id')=='mp-otd'):
for i in d.find_all('ul'):
text_list.append(i.text)
return text_list == x
| Chapter07/unit_tests/.ipynb_checkpoints/Exercise 85-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # OPTION 2 : Un modรจle unique
#
# Tout classique, on doit utiliser un modรจle qui sait gรฉrer le multi classes unbalanced
# # Imports des librairies de bases
#
# On ajoutera celles qui manquent au fur et ร mesure de nos besoins
# +
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import pandas as pd
import os, gc
# -
# # Dรฉfinition de la seed pour le random
#
# Trรจs important pour qu'on voit les mรชmes choses entre nos deux ordis
RANDOM_SEED = 42;
np.random.seed(RANDOM_SEED)
# # Dรฉfinition des paramรจtres pour Matplot
#
# Rien de bien intรฉrรฉssant
# To plot pretty figures
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# # Set des variables globales
#
# Attention, je n'utilise les variables globales pour la gestion des fichiers. Sinon, c'est mort
# Where to save the figures
PROJECT_ROOT_DIR = "."
DATA_PROCESSED = os.path.join(PROJECT_ROOT_DIR, "data_processed")
# # Fonction pour load les libraires
#
# En vrai, on a juste besoin de pd.read_csv, mais c'รฉtait pour faire joli
def load_data(file,data_path=DATA_PROCESSED, sep=','):
csv_path = os.path.join(data_path, file)
return pd.read_csv(csv_path, sep)
# # On load des donnรฉes
#
TX_data = load_data(file = "working_data.csv");
TX_data=TX_data.apply(pd.to_numeric, errors='ignore')
TX_data.head()
# +
#TX_data.info()
# -
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(TX_data,
test_size=0.2,
random_state=RANDOM_SEED,
stratify=TX_data["CLAIM_TYPE"]
)
def datapreprocess(data):
data=data.apply(pd.to_numeric, errors='ignore')
# Y and X
Y=data["CLAIM_TYPE"]
X=data.drop("CLAIM_TYPE", axis=1,inplace=False)
# Exclude Objets
X=X.select_dtypes(exclude=['object'])
# Work on fare
from sklearn.preprocessing import Imputer
imp = Imputer(missing_values='NaN',strategy='median', axis=1)
X=pd.DataFrame(imp.fit_transform(X),columns=X.columns.values)
return X, Y
# +
X_train,Y_train=datapreprocess(train_set)
X_test, Y_test=datapreprocess(test_set)
del train_set, test_set;
gc.collect()
# +
## test Rapide
# -
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import LabelBinarizer
def multiclass_roc_auc_score(truth, pred):
lb = LabelBinarizer()
lb.fit(truth)
return roc_auc_score(lb.transform(truth), lb.transform(pred), average="weighted")
# Modรจle trouvรฉ grรขce ร la cross validation
from sklearn.ensemble import RandomForestClassifier
rnd_clf= RandomForestClassifier(
bootstrap=True,
class_weight='balanced_subsample',
criterion='gini',
#max_depth=None,
#max_features='auto',
#max_leaf_nodes=18,
#min_impurity_decrease=0.0,
#min_impurity_split=None,
#min_samples_leaf=1,
#min_samples_split=2,
#min_weight_fraction_leaf=0.0,
#n_estimators=50,
n_jobs=1,
#oob_score=True,
random_state=RANDOM_SEED,
verbose=0,
warm_start=False)
rnd_clf.fit(X_train, Y_train);
y_pred_rf = rnd_clf.predict(X_test)
from sklearn.metrics import classification_report
print(classification_report(Y_test, y_pred_rf))
# +
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(matrix):
"""If you prefer color and a colorbar"""
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
cax = ax.matshow(matrix)
fig.colorbar(cax)
conf_mx = confusion_matrix(Y_test, y_pred_rf)
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
plot_confusion_matrix(norm_conf_mx)
# -
# ### Bilan:
#
# On prรฉdit bien les zones en CLAIMS, donc รงa peut รชtre bien pour la suite.
params_cv_long = {#'bootstrap':[True, False],
'max_leaf_nodes':[2, 4, 8, 12, 16, 18],
'min_samples_leaf':list(range(1, 6)),
'min_samples_split':list(range(2, 4)),
'n_estimators':list(range(10, 100, 25))
}
rf_to_cv= RandomForestClassifier(
random_state=RANDOM_SEED,
criterion='gini',
max_features='auto',
class_weight='balanced_subsample',
oob_score=False,
verbose=0,
warm_start=False
)
from sklearn.model_selection import GridSearchCV
rf_gs_cv = GridSearchCV(
estimator=rf_to_cv,
param_grid=params_cv_long,
scoring='roc_auc',
n_jobs=-1,
verbose=1,
cv= 5
)
rf_gs_cv.fit(X_train, Y_train)
rf_gs_cv.best_estimator_
y_pred_cv_rf = rf_gs_cv.predict(X_test)
print("ROC score : {}".format(roc_auc_score(Y_test, y_pred_cv_rf)))
conf_mx = confusion_matrix(Y_test, y_pred_cv_rf)
plot_confusion_matrix(conf_mx)
# ## Bon, on a bien rigolรฉ, maintenant on essaie tout les algos possibles
# +
# Naive Bayes
from sklearn.naive_bayes import BernoulliNB, GaussianNB
clf_BernoulliNB = BernoulliNB()
clf_GaussianNB = GaussianNB()
# Tree
from sklearn.tree import DecisionTreeClassifier
clf_DecisionTreeClassifier = DecisionTreeClassifier()
# Ensemble
from sklearn.ensemble import ExtraTreesClassifier, GradientBoostingClassifier, AdaBoostClassifier
clf_GradientBoostingClassifier = GradientBoostingClassifier()
clf_ExtraTreeClassifier = ExtraTreesClassifier()
clf_AdaBoostClassifier = AdaBoostClassifier()
# Neighbors
from sklearn.neighbors import KNeighborsClassifier, NearestCentroid, RadiusNeighborsClassifier
clf_KNeighborsClassifier = KNeighborsClassifier(n_neighbors=3)
clf_NearestCentroid = NearestCentroid()
clf_RNC = RadiusNeighborsClassifier()
# Semi Supervised
from sklearn.semi_supervised import LabelPropagation, LabelSpreading
clf_LabelPropagation = LabelPropagation()
clf_LabelSpreading = LabelSpreading()
# Discriminant Analysis
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
clf_LDA = LinearDiscriminantAnalysis()
clf_QDA = QuadraticDiscriminantAnalysis()
# Linear Models
from sklearn.linear_model import SGDClassifier, LogisticRegressionCV, Perceptron, PassiveAggressiveClassifier, RidgeClassifier
clf_SGDClassifier = SGDClassifier()
clf_LogisticRegressionCV = LogisticRegressionCV()
clf_Perceptron = Perceptron()
clf_PassiveAggressiveClassifier = PassiveAggressiveClassifier()
clf_RidgeClassifier=RidgeClassifier()
from sklearn.gaussian_process import GaussianProcessClassifier
clf_GaussianProcessClassifier = GaussianProcessClassifier()
# -
list_models=[clf_BernoulliNB,
#clf_GaussianNB,
clf_DecisionTreeClassifier,
clf_GradientBoostingClassifier,
clf_ExtraTreeClassifier,
clf_AdaBoostClassifier,
#clf_KNeighborsClassifier,
#clf_NearestCentroid,
#clf_RNC,
#clf_LabelPropagation,
#clf_LabelSpreading,
#clf_LDA,
#clf_QDA,
#clf_SGDClassifier,
clf_LogisticRegressionCV,
#clf_Perceptron,
#clf_PassiveAggressiveClassifier,
#clf_RidgeClassifier,
#clf_GaussianProcessClassifier
]
for clf in list_models:
try:
clf.fit(X_train,Y_train)
y_pred = clf.predict(X_test)
print(str(clf.__class__.__name__)+" : "+ str(multiclass_roc_auc_score(Y_test, y_pred)))
del clf
except:
print(str(clf.__class__.__name__)+" : Error")
del clf
from mlxtend.classifier import StackingClassifier
from sklearn.model_selection import cross_val_score
from sklearn.metrics import f1_score
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
sclf = StackingClassifier(classifiers=list_models,
meta_classifier=lr)
list_models.append(sclf)
labels = [
'BernoulliNB',
'DecisionTreeClassifier',
'GradientBoostingClassifier',
'ExtraTreeClassifier',
'AdaBoostClassifier',
#'KNeighborsClassifier',
#'LDA',
#'SGDClassifier',
'LogisticRegressionCV',
'Stacking'
]
# +
print('3-fold cross validation:\n')
for clf, label in zip(list_models,
labels):
scores = model_selection.cross_val_score(clf, X_train, Y_train,
cv=3, scoring='f1_weighted')
print("Accuracy: %0.2f (+/- %0.2f) [%s]"
% (scores.mean(), scores.std(), label))
# -
# +
# MODEL!
# +
## Scaler
Inutile pour les arbres, mais utiles pour les autres
# -
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# +
## Normalizer
# -
from sklearn.preprocessing import Normalizer
normy = Normalizer()
# +
# PCA
# -
from sklearn.decomposition import PCA
pca = PCA()
# +
# Feature Union
# +
from sklearn.pipeline import FeatureUnion
from sklearn.decomposition import KernelPCA
estimators = [('linear_pca', PCA(n_components=5)),
('kernel_pca', KernelPCA(n_components=5))]
combined = FeatureUnion(estimators)
# +
#combined.fit_transform(X_train)
# -
# # Feature Selection
# ## Selection des variables avec une variance faible
#
# Ici, on vire tout ce qu'il y a une probabilitรฉ de 0.8 d'รชtre la
from sklearn.feature_selection import VarianceThreshold
sel_vt = VarianceThreshold(threshold=(.8 * (1 - .8)))
# ## Selection des K meilleures variables d'aprรจs le test de Chi2
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
Nb_Var = 15
Chi_select = SelectKBest(chi2, k=Nb_Var)
del Nb_Var
# +
## Selection par rapport ร un modรจle
# -
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
# ### Modรจle de Selection
clf = RandomForestClassifier(n_jobs=-1,
random_state=RANDOM_SEED)
# +
### Contruction
# -
sfm_clf = SelectFromModel(clf)
del clf
# # Metriques
#
# D'abord, notre mรฉtrique ร nous
# +
### Calcul l'AUC de chaque classe
# -
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import LabelBinarizer
def multiclass_roc_auc_score(truth, pred):
lb = LabelBinarizer()
lb.fit(truth)
return roc_auc_score(lb.transform(truth), lb.transform(pred), average="weighted")
# +
### precision_class_AUC (pas safe), est utilisรฉe pour le scoring de modรจle (version adaptรฉe de prรฉcรฉdement)
# -
def precision_class_AUC(estimator, X, y):
return multiclass_roc_auc_score(y, estimator.predict(X))
# +
## Model!
# -
from sklearn.ensemble import RandomForestClassifier
rnd_clf = RandomForestClassifier(n_jobs=-1,
random_state=RANDOM_SEED)
from sklearn.ensemble import GradientBoostingClassifier
gb_clf=GradientBoostingClassifier(random_state=RANDOM_SEED)
# +
# Et on pipeline
# -
from sklearn.pipeline import Pipeline
# +
clf = Pipeline([
#('scaler', scaler),
#('norm', normy),
#('threshold', sel_vt),
#('SelectKBest', Chi_select),
#('reduce_dim', pca),
#('feature_union', combined),
#('feature_selection', sfm_clf),
('classification', gb_clf)
])
clf.fit(X_train, Y_train);
y_pred_rf = clf.predict(X_test)
multiclass_roc_auc_score(Y_test, y_pred_rf)
# -
from sklearn.metrics import confusion_matrix
# +
def plot_confusion_matrix(matrix):
"""If you prefer color and a colorbar"""
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
cax = ax.matshow(matrix)
fig.colorbar(cax)
conf_mx = confusion_matrix(Y_test, y_pred_rf)
plot_confusion_matrix(conf_mx)
# -
params = {'max_leaf_nodes': [4, 8],
'max_depth' : [2, 4],
'n_estimators': [250, 500],
'min_samples_leaf': list(range(1, 3)),
'min_samples_split' : list(range(2, 3)),
}
rf_gs_cv = GridSearchCV(RandomForestClassifier(random_state=RANDOM_SEED, oob_score=True),
params,
scoring='f1_weighted',
n_jobs=-1,
verbose=1)
rf_gs_cv.fit(X_train, Y_train)
rf_gs_cv.best_estimator_
y_pred_cv_rf = rf_gs_cv.predict(X_test)
multiclass_roc_auc_score(Y_test, y_pred_cv_rf)
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.feature_selection import SelectFromModel
from sklearn.svm import LinearSVC
params_cv_long = {'bootstrap':True,
'class_weight':None,
'criterion':'gini',
'max_depth':None,
'max_features':'auto',
'max_leaf_nodes':[4, 8, 12, 16, 18],
'min_impurity_decrease':0.0,
'min_impurity_split':None,
'min_samples_leaf':list(range(1, 6)),
'min_samples_split':list(range(2, 4)),
'min_weight_fraction_leaf':0.0,
'n_estimators':[100, 250, 500],
'n_jobs':-1,
'oob_score':False,
'random_state':RANDOM_SEED,
'verbose':0,
'warm_start':False
}
clf = Pipeline([
('feature_selection', SelectFromModel(RandomForestClassifier(n_estimators=500,
max_leaf_nodes=16,
n_jobs=-1,
random_state=RANDOM_SEED))),
('classification', GridSearchCV(RandomForestClassifier(random_state=RANDOM_SEED),
params,
scoring='f1_weighted',
n_jobs=-1,
verbose=1))
])
clf = Pipeline([
('feature_selection', SelectFromModel(RandomForestClassifier(n_jobs=-1,random_state=RANDOM_SEED))),
('classification', RandomForestClassifier(,
n_jobs=-1,
random_state=RANDOM_SEED,
))
])
clf.fit(X_train, Y_train)
y_pred_rf = clf.predict(X_test)
multiclass_roc_auc_score(Y_test, y_pred_rf)
from sklearn.model_selection import GridSearchCV
params = {'max_leaf_nodes': [4, 8],
'max_depth' : [2, 4],
'n_estimators': list(range(10, 50, 10)),
'min_samples_leaf': list(range(1, 3)),
'min_samples_split' : list(range(2, 3)),
}
rf_gs_cv = GridSearchCV(RandomForestClassifier(random_state=RANDOM_SEED, oob_score=True),
params,
scoring=precision_class_AUC,
n_jobs=-1,
verbose=1)
rf_gs_cv.fit(X_train, Y_train)
rf_gs_cv.best_estimator_
y_pred_cv_rf = rf_gs_cv.predict(X_test)
multiclass_roc_auc_score(Y_test, y_pred_cv_rf)
conf_mx = confusion_matrix(Y_test, y_pred_cv_rf)
plot_confusion_matrix(conf_mx)
| backups/Models_Option_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.2 64-bit (''rainymotion'': conda)'
# language: python
# name: python38264bitrainymotionconda396b8f6fb8a8423b92a2f9b0ebb8f6e9
# ---
# **Experiment for obtaining 24 Hr prediction from Persistence Model in rainymotion library**
#
# Author: <NAME>
#
# File use: For predicting 24 Hr precipitation images.
#
# Date Created: 19-03-21
#
# Last Updated: 20-03-21
#
# Python version: 3.8.2
# +
import h5py
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import scipy.misc
from rainymotion.models import Persistence
from rainymotion.metrics import *
import os
import cv2
import pandas as pd
import wradlib.ipol as ipol # for interpolation
from rainymotion import metrics
from rainymotion import utils
from scipy.ndimage import map_coordinates
import timeit
#from tvl1sindysupport import tvl1utilities -in future our own library
# -
times=['0000','0010', '0020', '0030', '0040', '0050',
'0100', '0110', '0120', '0130', '0140', '0150',
'0200', '0210', '0220', '0230', '0240', '0250',
'0300', '0310', '0320', '0330', '0340', '0350',
'0400', '0410', '0420', '0430', '0440' ,'0450',
'0500', '0510', '0520', '0530', '0540', '0550',
'0600', '0610', '0620', '0630', '0640', '0650',
'0700', '0710', '0720', '0730', '0740', '0750',
'0800', '0810', '0820', '0830', '0840', '0850',
'0900', '0910', '0920', '0930', '0940', '0950',
'1000', '1010', '1020', '1030', '1040', '1050',
'1100', '1110', '1120', '1130', '1140', '1150',
'1200', '1210', '1220', '1230', '1240', '1250',
'1300', '1310', '1320', '1330', '1340', '1350',
'1400', '1410', '1420', '1430', '1440', '1450',
'1500', '1510', '1520', '1530', '1540', '1550',
'1600', '1610', '1620', '1630', '1640', '1650',
'1700', '1710', '1720', '1730', '1740', '1750',
'1800', '1810', '1820', '1830', '1840', '1850',
'1900', '1910', '1920', '1930', '1940', '1950',
'2000', '2010', '2020', '2030', '2040', '2050',
'2100', '2110', '2120', '2130', '2140', '2150',
'2200', '2210', '2220', '2230', '2240', '2250',
'2300', '2310', '2320', '2330', '2340', '2350']
def discrete_cmap(N, base_cmap=None):
"""Create an N-bin discrete colormap from the specified input map"""
# Note that if base_cmap is a string or None, you can simply do
# return plt.cm.get_cmap(base_cmap, N)
# The following works for string, None, or a colormap instance:
base = plt.cm.get_cmap(base_cmap)
color_list = base(np.linspace(0, 1, N))
cmap_name = base.name + str(N)
return base.from_list(cmap_name, color_list, N)
# +
#For plotting map - currently using function as in source code Need to change to Cartopy
def plotMap(title,img, lat1, lat2, long1, long2, outputPath,last=0):
(height, width) = img.shape
# print(img.min(), img.max())
intensity = np.asarray(img, dtype=np.float32)
# print(intensity.min(), intensity.max())
#intensity_level = convert_rep_to_level(intensity).reshape(height, width)
# print(intensity.min(), intensity.max())
intensity_level = np.flipud(intensity)
dLon = (long2 - long1) / width
dLat = (lat2 - lat1) / height
lon = np.arange(long1, long2, dLon)
lat = np.arange(lat1, lat2, dLat)
lons, lats = np.meshgrid(lon, lat)
# print(lons.shape, lats.shape)
fig = plt.figure(figsize=(12, 8))
# Set up Basemap instance
m = Basemap(projection="cyl",
llcrnrlon=long1, urcrnrlon=long2,
llcrnrlat=lat1, urcrnrlat=lat2,
resolution='h')
# Add geographic outlines
m.drawcoastlines(color='black')
m.drawstates()
m.drawcountries()
m.drawmeridians(np.arange(long1, long2, 1), labels=[True, False, False, True])
m.drawparallels(np.arange(lat1, lat2, 1), labels=[True, False, True, False])
#m.drawmeridians(np.arange(new_lon_min, new_lon_max, 1), labels=[False, False, False, False])
#m.drawparallels(np.arange(new_lat_min, new_lat_max, 1), labels=[False, False, False, False])
# Plot Data
#cs = m.contourf(lons, lats, intensity_level, shading='flat', levels=list(range(1, 65)), cmap=get_cmap("jet"))
#cs = m.contourf(lons, lats, intensity_level,shading='flat', levels=list(range(1,65)), cmap=get_cmap("gist_earth"))
cs = m.contourf(lons, lats, intensity_level,shading='flat', levels=list(range(1,65)), cmap=discrete_cmap(8,"jet"))
# Add Colorbar
if last==1:
cb = plt.colorbar(cs ,shrink=1.0) #, extend='both')
# Add Title
plt.title(title)
plt.savefig(outputPath, bbox_inches='tight', pad_inches=0.0)
plt.close()
# +
# Common Initialization
eventName = "TyphoonFaxai"
eventDate ="20190908"
#Latitude and Longitude of <NAME>
lat1 = 32.5
lat2 = 39
long1 = 136
long2 = 143
pred_date = 20190908 #YYYYMMDD
[height, width] = [781,561]
eventNameDate = eventName + "_" + eventDate
startHr = 2
startMin= 40
predStartHr = 300
step = 5 #for rainymotion models
# For radar images
inputFolder = "./ForExperiments/Exp1/RadarImages/HeavyRainfall/For300/"
# outputFolder= "./ForExperiments/Exp1/Results/"
# print(inputFolder)
fileType='.bin'
timeStep = 10 # for Japan Radar Data
modelName = "Persistence"
stepRainyMotion = 5 # 5 minutes
##recentFramePath##
recentFrameFolder = str(pred_date)+"_set_24Hr_bin" #20190908_set_24Hr_bin
recentFramePath = "/home/divya/divya/OneFullDayData_7TestCases_WNIMar5/%s"%recentFrameFolder
print ("\n Recent frame path ",recentFramePath)
inputFolder = recentFramePath
print("\n Input folder is ",inputFolder)
##Output path where predicted images for visual comparison are saved.##
outputimgpath = "/home/divya/divya/OneFullDayData_7TestCases_WNIMar5/24hroutputs/%i/%s/%s"%(pred_date,modelName,"pred_images")
os.makedirs(outputimgpath, exist_ok=True)
print ("\n Output image path is ",outputimgpath)
##Output path where evaluation results are saved as csv files.##
outputevalpath = "/home/divya/divya/OneFullDayData_7TestCases_WNIMar5/24hroutputs/%i/%s/%s"%(pred_date,modelName,"eval_results")
os.makedirs(outputevalpath, exist_ok=True)
print ("\n Output eval results in ",outputevalpath)
savepath = outputimgpath#"Outputs/%i/%s"%(pred_date,pred_times[0])
noOfImages = 2# Model needs 24 frames
step = 5
outputFilePath = outputimgpath+'/'
outputFilePath = outputFilePath + eventNameDate
print(outputFilePath)
hrlimit = len(times)
leadsteps = 6
totinputframes = 2
# -
def gettimes24hr(pred_time):
# times=np.array(times)
inptimes = []
pred_times = []
index = times.index(pred_time)
indexlimit = len(times)
print("Leadsteps are ", leadsteps)
if (index+leadsteps) < indexlimit:
pred_times = times[index:index+leadsteps]
if (index-totinputframes)>=0:
inptimes = times[index-totinputframes:index]
print("PredTimes:",pred_times)
print("InpTimes:",inptimes)
print("Get Time Success..")
return inptimes, pred_times
def readRadarImages(pred_time,inputpath,height,width, noOfImages,fileType):
files = (os.listdir(recentFramePath))
files.sort()
inputRadarImages = []
i = 0
index = times.index(pred_time)
# print(index)
inputframes = times[index-noOfImages:index]
# print(len(inputframes))
while (i<noOfImages):
inputframetime = "_"+inputframes[i]
i = i +1
for fileName in files:
if inputframetime in fileName:
print("The input image at %s is available",inputframetime)
print(fileName)
if fileName.endswith(fileType):
inputFileName =recentFramePath+'/'+fileName
fd = open(inputFileName,'rb')
#print(inputFileName)
# straight to numpy data (no buffering)
inputFrame = np.fromfile(fd, dtype = np.dtype('float32'), count = 2*height*width)
inputFrame = np.reshape(inputFrame,(height,width))
inputFrame = inputFrame.astype('float16')
#print(recentFrame.shape)
inputRadarImages.append(inputFrame)
#else:
# print("Sorry, unable to find file.")
inputRadarImages = np.stack(inputRadarImages, axis=0)
print(inputRadarImages.shape)
return inputRadarImages
# **1.2 Persistence**
def doPersistenceNowcasting(startpredtime, saveimages):
model = Persistence()
model.input_data = readRadarImages(startpredtime,inputFolder,height,width, noOfImages,fileType)
start = timeit.timeit()
nowcastPersistence = model.run()
end = timeit.timeit()
sparseTime = end - start
print("Persistence took ",end - start)
nowcastPersistence.shape
print("Saving the nowcast images. Please wait...")
for i in range(leadsteps):
outFrameName = outputFilePath + '_'+str(predStartHr+(i*5))+'.png'
#matplotlib.image.imsave(outFrameName, nowcastPersistence[i])
if i == leadsteps-1:
last = 1
else:
last = 0
if (saveimages):
plotMap(modelName+' '+str(predStartHr+(i*5)), nowcastPersistence[i], lat1, lat2, long1, long2, outFrameName,last)
print("Finished persistence model nowcasting!")
return nowcastPersistence
# **2. Performance Evaluation**
# +
def getGroundTruthImages(pred_times,leadsteps,recentFramePath,height,width,fileType):
files = (os.listdir(recentFramePath))
files.sort()
groundTruthImages = []
i = 0
while (i<leadsteps):
groundtruthtime = "_"+pred_times[i]
i = i +1
for fileName in files:
if groundtruthtime in fileName:
print("The ground truth at %s is available",groundtruthtime)
print(fileName)
if fileName.endswith(fileType):
inputFileName =recentFramePath+'/'+fileName
fd = open(inputFileName,'rb')
#print(inputFileName)
# straight to numpy data (no buffering)
recentFrame = np.fromfile(fd, dtype = np.dtype('float32'), count = 2*height*width)
recentFrame = np.reshape(recentFrame,(height,width))
recentFrame = recentFrame.astype('float16')
#print(recentFrame.shape)
groundTruthImages.append(recentFrame)
#else:
# print("Sorry, unable to find file.")
groundTruthImages = np.moveaxis(np.dstack(groundTruthImages), -1, 0)
#print(groundTruthImages.shape)
return groundTruthImages
# +
def evaluate(nowcasts):
fileType = '.bin'
leadsteps = 6 # 6 for 1 hr prediction, 18 for 3hr prediction
groundTruthPath = recentFramePath
print(pred_times)
groundTruthImgs = getGroundTruthImages(pred_times,leadsteps,groundTruthPath,height,width,fileType)
maelist = []
farlist = []
podlist= []
csilist= []
thres =1.0
noOfPrecipitationImages = 6
j = 0 # using another index to skip 5min interval data from rainymotion
for i in range(noOfPrecipitationImages):
mae = MAE(groundTruthImgs[i],nowcasts[j])
far = FAR(groundTruthImgs[i],nowcasts[j], threshold=0.1)
pod = POD(groundTruthImgs[i],nowcasts[j], threshold=0.1)
csi = CSI(groundTruthImgs[i],nowcasts[j],thres)
maelist.append(mae)
farlist.append(far)
podlist.append(pod)
csilist.append(csi)
j = j + 2
return csilist,maelist,farlist,podlist
# -
# **2. 24 Hr Prediction**
# +
startpredtime = '0100' #'1100'
index = times.index(startpredtime)
indexlimit = times.index('2250') # Since we have only 6 more ground truths available from this time
print(index)
print("Last prediction is at index ", indexlimit)
csilist = []
maelist = []
podlist = []
farlist = []
pred_time = startpredtime
while index<indexlimit:#len(times):
print(times[index])
saveimages = 0
# if (index==66):
# saveimages=1
intimes, pred_times = gettimes24hr(pred_time)
nowcasts = doPersistenceNowcasting(pred_time,saveimages)
csi,mae,far,pod = evaluate(nowcasts)
csilist.append(csi)
maelist.append(mae)
podlist.append(pod)
farlist.append(far)
index = index+1
pred_time = times[index]
print("Successfully completed persistence nowcasting!")
# -
# For debugging
print(len(maelist))
print("\n\n")
print(len(csilist))
print("\n\n")
print(len(podlist))
print("\n\n")
print(len(farlist))
# **To save results in excel workbook**
# +
import xlwt
from xlwt import Workbook
# Workbook is created
wb = Workbook()
# +
def writeinexcelsheet(sheetname, wb, results):
sheet1 = wb.add_sheet(sheetname)
sheet1.write(0, 0, 'Pred.no.')
sheet1.write(0, 1, 't (pred start time)')
sheet1.write(0, 2, 't + 10')
sheet1.write(0, 3, 't + 20')
sheet1.write(0, 4, 't + 30')
sheet1.write(0, 5, 't + 40')
sheet1.write(0, 6, 't + 50')
col = 0
rows = len(results)
cols = len(results[0])
print(cols)
for rowno in range(rows):
sheet1.write(rowno+1,0,rowno+1)
for col in range(cols):
# print(rowno+1,col+1,results[rowno][col])
sheet1.write(rowno+1,col+1,results[rowno][col].astype('float64'))
# sheet1.write(row, col, str(data))
# print(row,col,data)
# -
writeinexcelsheet('CSI',wb,csilist)
writeinexcelsheet('MAE',wb,maelist)
writeinexcelsheet('FAR',wb,farlist)
writeinexcelsheet('POD',wb,podlist)
excelpath = "/home/divya/divya/OneFullDayData_7TestCases_WNIMar5/24hroutputs/20190908/Persistence/eval_results/"
excelpath = excelpath + 'resultsPersistence.xls'
wb.save(excelpath)
| examples/Persistence24HrPredictionNew.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Plotting a Surface
# +
# %matplotlib notebook
# # %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
"""https://matplotlib.org/mpl_toolkits/mplot3d/tutorial.html"""
from matplotlib import cm # cm stands for color map
# -
# Consider the surface given by
# $$E(w_1, w_2) = w_1^4 + w_2^4 - 16 w_1 w_2.$$
# +
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
"""plt.subplots() is a function that returns a tuple
containing a figure and axes objects."""
x = y = np.linspace(-3,3,100)
X, Y = np.meshgrid(x, y)
Z = X**4+Y**4-16*X*Y
surf = ax.plot_surface(X,Y,Z, cmap=cm.coolwarm)
#surf = ax.plot_surface(X,Y,Z, cmap=cm.PiYG)
ax.set_zlim(0, 200)
fig.colorbar(surf, shrink=0.5, aspect=10)
ax.set_xlabel('X Axis')
ax.set_ylabel('Y Axis')
plt.show()
# -
# ## Gradient Descent
# The gradient of $E$ is
# $$ \nabla E = [ 4 w_1^3-16 w_2, 4 w_2^3 - 16 w_1] . $$
#
# The formula for gradient descent is
# $$ \mathbf w_{k+1} = \mathbf w_k - \eta \nabla E ( \mathbf w_k) . $$
# +
def E(u,v):
return u**4+v**4-16*u*v
eta=0.01
x=1.2; y=1.2
print (0,'\t','x=', x,'\t','y=',y,'\t', 'E=',E(x,y))
# -
for i in range(0,30):
g=4*x**3-16*y
h=4*y**3-16*x
x=x-eta*g
y=y-eta*h
print (i+1,'\t','x=',round(x,3),'\t','y=',round(y,3),'\t','E=',round(E(x,y),3))
#'\t' is the escape sequence for tab.
# ## Linear Regression Revisited
# We will redo the example of multivariate-data in linear regression using gradient descent.
#data = np.genfromtxt('../../data/multivar_simulated/data.csv',skip_header=1,delimiter=',')
data = np.genfromtxt('../../data/multivar_simulated.csv',skip_header=1,delimiter=',')
data[:3,:]
Y = data[:,1]
X1 = data[:,2:]
Y.shape, X1.shape, X.shape
O = np.ones(shape=(X1.shape[0],1))
X = np.concatenate([X1,O],axis=1)
X.shape
# The error function is given by
# $$ E = \sum_{j=1}^{N} (y_j-\sum_{s=1}^{k+1} x_{js}m_{s})^2 .$$
# Write a function for $E$.
# +
#def Er(M):
# formula here
# return the result
# -
# The gradient of $E$ is given by
# $$ \nabla E = -2 X^{\intercal}Y + 2
# X^{\intercal}XM. $$
# Write a function for $\nabla E$.
# +
#def GE(M):
# return formula here
# -
# Choose initial values.
# +
#eta=
#iter_num=
#M=np.array([?,?,?])
# -
# Calculate the initial error.
Er(M)
# Run a loop for gradient descent and print the values of M and Er(M).
# +
#Write a loop here
#
#print M and Er(M)
# -
# Compare the result with the previous result from Linear Regression which was
#
# [ 1.78777492, -3.47899986, 6.0608333 ]
#
#
# ## Newton's Method
# $$ E=w_1^4+ w_2^4 - 16 w_1 w_2$$
#
# $$ \nabla E = [ 4 w_1^3-16 w_2, 4 w_2^3 - 16 w_1] $$
#
# $$\mathbf HE^{-1} \nabla E = \frac 1 {9w_1^2 w_2^2 -16} \begin{bmatrix} 3 w_1^3 w_2^2 - 8 w_2^3 -16w_1 \\ 3 w_1^2 w_2^3 -8 w_1^3 -16w_2 \end{bmatrix}$$
#
# $$\boxed{ \mathbf w_{k+1}= \mathbf w_k - \eta \mathbf H E (\mathbf w_{k})^{-1} \nabla E(\mathbf w_k)}$$
# +
def E(u,v):
return u**4+v**4-16*u*v
eta=1
x=1.2; y=1.2
print (0,'\t','x=', x,'\t','y=',y, '\t','E=',E(x,y))
for i in range(0,10):
d=9*x**2*y**2-16
g=(3*x**3*y**2 -8*y**3 -16*x)/d
h=(3*x**2*y**3 -8*x**3 -16*y)/d
x=x-eta*g
y=y-eta*h
print (i+1,'\t','x=', round(x,3),'\t','y=',round(y,3), '\t','E=',round(E(x,y),3))
# -
# ### Haberman's Survival Data Set
#
# https://archive.ics.uci.edu/ml/datasets/Haberman%27s+Survival
import numpy as np
from sklearn.model_selection import train_test_split
data=np.genfromtxt('../data/haberman.data',delimiter=',')
# + jupyter={"outputs_hidden": true}
data[:10,:]
# -
t_raw = data[:,-1]
X_raw = data[:,:3]
t_raw[:10]
X_raw[:10,:]
t = t_raw%2 # 0: death; 1: survival
O = np.ones(shape=(X_raw.shape[0],1))
X = np.concatenate([X_raw,O],axis=1)
X.shape
# ### Splitting the data
#
# We split the data set into two parts: one for train and the other for test.
X_train, X_test, t_train, t_test = train_test_split(X, t, test_size=0.3)
n_train=X_train.shape[0]
n_test=X_test.shape[0]
print(X_train.shape, t_train.shape)
print(X_test.shape, t_test.shape)
X_train[:10,:]
# Define the function $\sigma(x) = \dfrac {e^x}{e^x+1}= \dfrac 1 {1+e^{-x}}$.
# +
#def sigmoid(x):
# return the function
# -
# Define the error function
# $$ E (\mathbf{w}) = - \frac 1 N \sum_{n=1}^N \{ t_n \ln y_n + (1-t_n) \ln (1-y_n)\}, $$
# where $y_n=\sigma(w_1 x_{n1}+ w_2 x_{n2} + \cdots + w_k x_{nk}+w_{k+1} )$.
#
# This function will be obtained in Logistic Regression.
# +
#def Er(w):
# yn=??
# return ???
# -
# The gradient of $E$ is given by
#
# $$\nabla E= \left [ \frac 1 N \sum_{n=1}^N (y_n-t_n)x_{nj} \right ] = \frac 1 N X^\top (\mathbf y - \mathbf t).$$
#
# +
#def gradE(w):
# yn=??
# return the function
# -
# Set the initial values.
# +
#w=np.array([?,?,?,?])
#eta=
#iter_num=
# -
# Run a loop for gradient descent.
# +
#for i in range(iter_num):
#
# -
print(w)
# We compute the accuracy of the trained model.
t_pred=(sigmoid(X_test@w).round())
print("Train Accuracy:", sum(t_test==t_pred)*100/n_test,"%")
| GradientDescent/lab/gradient-descent.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %autosave 20
import matplotlib.pyplot as plt
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
from astroquery.vizier import Vizier
from sklearn import cluster
from sklearn import mixture
# +
center_coord = SkyCoord('02h21m00s +57d07m42s')
vizier = Vizier(
column_filters={'Bmag': '<13'}, # ัะธัะปะพ ะฑะพะปััะต โ ะทะฒัะทะด ะฑะพะปััะต
row_limit=10000
)
stars = vizier.query_region(
center_coord,
width=1.5 * u.deg,
height=1.5 * u.deg,
catalog='USNO-A2.0',
)[0]
ra = stars['RAJ2000']._data # ะฟััะผะพะต ะฒะพัั
ะพะถะดะตะฝะธะต, ะฐะฝะฐะปะพะณ ะดะพะปะณะพัั
dec = stars['DEJ2000']._data # ัะบะปะพะฝะตะฝะธะต, ะฐะฝะฐะปะพะณ ัะธัะพัั
x = np.stack((ra, dec), axis=1)
i = np.random.choice(x.shape[0], size=x.shape[0], replace=False)
x = x[i]
print(x.shape)
plt.scatter(*x.T)
# +
colors = np.array([[0., 0., 1.], [1., 0., 0.]])
kmeans = cluster.KMeans(n_clusters=2, n_jobs=-1)
y = kmeans.fit_predict(x)
plt.scatter(*x.T, color=colors[y])
# -
gm = mixture.GaussianMixture(n_components=2)
gm.fit(x)
y = gm.predict(x)
plt.scatter(*x.T, color=colors[y])
| misc/jupyter_notebooks/18.11.28/ml.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Linear / Single Variable Regressors
# +
#The explanation was gotten from the towardsdatascience.com website
# -
# A linear regression refers to a regression model that is completely made up of linear variables. Beginning with the simple case, Single Variable Linear Regression is a technique used to model the relationship between a single input independent variable (feature variable) and an output dependent variable using a linear model
# +
#some part of the code gotten from an article on the sci-kit learn official website
# +
#import packages
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.datasets import load_diabetes
import sklearn.metrics as metrics
# %matplotlib inline
import matplotlib.pyplot as plt
# +
x, y = load_diabetes(return_X_y = True)
x = x[:, np.newaxis, 2]
# +
#split data
x_train, x_test, y_train, y_test = train_test_split(x, y)
# +
model = LinearRegression()
model.fit(x_train, y_train)
# +
#predicting results
prediction = model.predict(x_test)
print(prediction)
# +
#now to plot and visualize the data
plt.scatter(x_test, y_test, color='green')
plt.plot(x_test, prediction, color = 'black', linewidth = 2)
plt.xticks(())
plt.yticks(())
plt.show()
# +
#performance of the model
#mean absolute error
mean_absolute_error = round(metrics.mean_absolute_error(y_test, prediction), 2)
#median absolute error
median_absolute_error = round(metrics.median_absolute_error(y_test, prediction), 2)
#mean squared error
mean_squared_error = round(metrics.mean_squared_error(y_test, prediction), 2)
#explained variance score
explained_variance_score = round(metrics.explained_variance_score(y_test, prediction), 2)
#r2 score
r2_score = round(metrics.r2_score(y_test, prediction), 2)
# +
#output performnace measures
print("Model Performance Measures")
print()
print()
print("Mean Absolute Error:", mean_absolute_error)
print("Median Absolute Error:", median_absolute_error)
print("Mean Squared Error:", mean_squared_error)
print("Explained Variance Score:", explained_variance_score)
print("R2 Score:", r2_score)
# -
# ## Explanation of Performance Measures
# #### Mean Absolute Error:
# This is a model evaluation metric used with regression models. The mean absolute error of a model with respect to a test set is the mean of the absolute values of the individual prediction errors on over all instances in the test set.
# #### Median Absolute Error
# To begin, Absolute errors refer to the magnitude of difference between the prediction of an observation and the true value of that observation. MAE takes the average of absolute errors for a group of predictions and observations as a measurement of the magnitude of errors for the entire group.
# #### Mean Squared Error
# Mean squared errors measure the average squared difference between the estimated values and the actual value.
# #### Explained Variance Score
# Explained variance score is used to measure the discrepancy between a model's prediction and actual data provided.
# #### R2 Score
# This is simply the total variance explained by the model over the total variance explained by the model plus the the total variance not explained by the model
| Machine-Learning-Concepts/Supervised-Learning/Regression/Linear-Regressor-or-Single-Variable-Regressor/Linear-Regressor-or-Single-Variable-regressor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import everest
import matplotlib.pyplot as plt
import numpy as np
from astropy.table import Table
from astropy.constants import R_sun, R_earth
import astropy.units as u
from robin import TransitModel, TransitParams
zeit = Table.read('data/zeit.csv')
zeit[6]
# +
params = TransitParams()
for attr in zeit[6].colnames:
setattr(params, attr, zeit[6][attr])
params.limb_dark = 'quadratic'
params.u = [0.65, 0.10]
params.duration = 0.0950
params.p0 = params.rp
params.p1 = params.rp
# +
from toolkit import LightCurve
t, f = np.load('data/211916756.npy')
lc = LightCurve(t + 2454833, f)
plt.scatter(lc.times.jd - 2454833, lc.fluxes/np.nanmedian(lc.fluxes), marker='.')
plt.ylim([0.98, 1.02])
plt.xlabel('BJD - 2454833')
plt.ylabel('Flux')
plt.title("EPIC 211916756")
plt.savefig('plots/211916756.pdf', bbox_inches='tight')
# +
mask_oot = lc.mask_out_of_transit(params, oot_duration_fraction=3)
transits = LightCurve(**mask_oot).get_transit_light_curves(params)
fig, ax = plt.subplots(1, 8, figsize=(14, 2), sharey=True)
times = []
fluxes = []
for i, transit in enumerate(transits):
transit.remove_linear_baseline(params, cadence=30*u.min)
ax[i].scatter(transit.times.jd, transit.fluxes)
times.extend(transit.times.jd)
fluxes.extend(transit.fluxes)
times = np.array(times)
fluxes = np.array(fluxes)
fig.tight_layout()
# -
phased_times = ((times - params.t0) % params.per)
phased_times[phased_times > params.per/2] -= params.per
ferr = np.nanstd(fluxes[(phased_times < -params.duration) | (phased_times > params.duration)])
plt.errorbar(phased_times, fluxes, ferr*np.ones_like(fluxes), fmt='.')
# +
from copy import deepcopy
import emcee
def transit_model(p, times):
p0, p1, inc, a = p#, u1, u2 = p
trial_params = deepcopy(params)
trial_params.p0 = p0
trial_params.p1 = p1
trial_params.inc = inc
trial_params.a = a
trial_params.t0 = 0
# trial_params.u = [u1, u2]
m = TransitModel(trial_params, times, exp_time=0.5/24, supersample_factor=3)
return m.light_curve(trial_params)
def lnprior(p):
p0, p1, inc, a = p #, u1, u2 = p
if ((p0 < 0) or (p1 < 0) or (inc > 90) or (p0 > 1) or (p1 > 1) or (a < 1)): # or
#(u1 > 1) or (u2 < -1) or (u2 > 1) ):
return -np.inf
return 0
def lnlike(p, times, fluxes, ferr):
p0, p1, inc, a = p #, u1, u2 = p
lp = lnprior(p)
if np.isfinite(lp):
return -0.5 * np.nansum((transit_model(p, times) - fluxes)**2 / ferr**2)
return -np.inf
ndim = 4
nwalkers = 2 * ndim
ferr = np.nanstd(fluxes[(phased_times < -params.duration) | (phased_times > params.duration)])
# +
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnlike, args=(phased_times, fluxes, ferr),
threads=8)
p0 = [1e-5 * np.random.randn(ndim) +
np.array([params.p0, params.p1, params.inc, params.a])# , params.u[0], params.u[1]])
for i in range(nwalkers)]
p1 = sampler.run_mcmc(p0, 5000)[0]
sampler.reset()
sampler.run_mcmc(p1, 10000)
sampler.pool.close()
# -
corner(sampler.flatchain, labels='p0, p1, inc, a'.split(', ')); # , u1, u2
plt.hist(sampler.flatchain[:, 0], histtype='stepfilled')
plt.hist(sampler.flatchain[:, 1], histtype='stepfilled')
| 211916756.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment 01 - Pitfalls in Data Mining
# ## CSCI E-96
#
# The goal of data mining is to find important relationships in large complex datasets. These dataset typically contain a large number of variables. The **high-dimensional** nature of the data leads to some commonly encountered pitfalls which lead to incorrect inferences.
#
# In this assignment you will gain a bit of experience with three important concepts in data mining:
#
# 1. **False Discovery Rate Control:** The goal of data mining is to find important relationships in large complex datasets. These dataset typically contain a large number of variables. The **high-dimensional** nature of the data leads to some commonly encountered pitfalls which lead to incorrect inferences. A related problem is cutting off a large-scale analysis when a desired relationship is 'found'. This practice of **p-value mining** often leads to unwarranted inferences. You will apply false discovery rate (FDR) control methods to address this problem.
# 2. **Key-Value Pairs:** Large scale data is typically managed using key-value (KV) pairs. The exercises in this assignment give you some experience working with KV pair data management.
# 3. **Map and Reduce Processes:** Much of large scale data mining requires use of a split-apply-combine approach. The data is split into manageable chunks, analytic transformations are applied, and the result combined or aggregated. A commonly used class of a split-apply-combine algorithm is MapReduce.
#
# In order to keep the scope of this assignment manageable, you will use limited versions of KV pair management and MapReduce. Specifically, you will use common Python tools to implement these concepts rather than dedicated large scale analytic platforms.
# ## Multiple Hypothesis Tesing
#
# Testing multiple hypothesis in high-dimensional data can be problematic. Exhaustively testing all pairwise relationships between variables in a data set is a commonly used, but generally misleading from of **multiple comparisons**. The chance of finding false significance, using such a **data dredging** approach, can be surprisingly high.
#
# In this exercise you will perform multiple comparisons on only 20 **identically distributed independent (iid)** variables. Ideally, such tests should not find significant relationships, but the actual result is quite different.
#
# To get started, execute the code in the cell below to load the required packages.
# +
import pandas as pd
import numpy as np
import numpy.random as nr
from scipy.stats import ttest_ind, f_oneway
from itertools import product, combinations
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.stats.multitest as smt
pd.set_option("display.max_rows", None, "display.max_columns", None)
# -
# In this exercise you will apply a t-test to all pairwise combinations of identical Normally distributed variables. In this case, we will create a data set with 20 iid Normal distributions of 1000 samples each. Execute the code in the cell below to find this data and display the mean and variance of each variable.
# +
ncolumns = 20
nr.seed(234)
normal_vars = nr.normal(size=(1000,ncolumns))
print('The means of the columns are\n', np.mean(normal_vars, axis = 0))
print('\nThe variances of the columns are\n', np.var(normal_vars, axis = 0))
# -
# Notice that means and variances are close to 0.0 and 1.0 respectively. As expected, there is not much difference between these variables.
#
# How many of these t-tests will show **significance** at the 0.05 cut-off level? There are 380 pairwise combinations, so we expect to find a number of falsely significant test results at this level. To find out, complete and execute the code in the cell below to filter the test results and print those that show significance.
# ### Creating a hash
#
# The goal of this exercise is to compute pairwise hypothesis tests of the differences in means for each of the iid Normal vectors. As an intermediate step you will create a Pandas data frame using a hash of the keys of the vectors. The data frame will contain the **key-value**, $(K,V)$, pairs. Each key must represent an index for the two vectors used to compute the test statistic. The keys will then be used to index the results of these hypothesis tests.
#
# The question is, how can we create a hash from the keys for the pair of vectors? In this case to we will use a simple, but far from optimal hash. For the two vector indicies $i, j$, for some key and modulo, $m$, we will compute the hash as:
#
# $$h(i,j) = (i + key*j) mod m$$
#
# > **Computational Note:** The Pandas data frame is an efficient and reasonably scalable **hash table**. The hash function used depends on the type of the key; integer, string, etc. The resulting dictionary of key-value pairs, $(K,V)$, can therefore be access in far less than linear time, often about $O(log(N))$.
#
# If you are not familiar with Python dictionaries you can find a short tutorial [here](https://www.tutorialspoint.com/python_data_structure/python_hash_table.htm), as well as many other places on the web.
# > **Exercise 1-1:** Given that our space of vectors is actually quite small, just 20, we do not need a sophisticated and scalable hash function. This hashed key will then be used to store and retrieve the values using a Python dictionary, in about $O(log(N))$ time.
#
# > In this exercise you will test a simple hash function and its inverse. Examine the code below and notice that the hash function encodes the two indexes into a single integer by simple additional and multiplication. The modulo operation limits the size of the hash table. However, to keep things simple you will not need to implement any hash collision resolution mechanism. As a result, the size of the table is set much larger than required.
#
# > To test this hash, do the following:
# > 1. Create a function called hash function to compute the hash. The arguments to the function are $i$ and $j$, the `hash\_key` and the `modulo\_multiplier`. The defaults of the arguments are $hash\_key=1024$ and $modulo\_multiplier=32$. The modulo number is $hash\_key * modulo\_multiplier$, e.g. $modulo = 32,768$. The multiplier is the ratio of expected values stored, $n$, to the number of unique hash keys, $m$, e.g. the ratio $m/n$.
# > 2. Using the Python [ittertools.combinations](https://docs.python.org/3/library/itertools.html#itertools.combinations) function create all unique pairwise combinations of indexes i and j. The arguments to this function are the indexes to the iid Normal vectors. The iterator is `range(ncolumns)` choose 2, since these comparisons are pairwise.
# > 3. Within this loop call the hash with the values of $i$ and $j$ as arguments.
# > 3. On a single line print the following; the values of i and j, the hash key value, but only if $i \le 6$. The restriction is to keep the printed output shorter.
# +
def hash_function(i, j, hash_key=1024, modulo_multiplier=32):
## Put your code below.
## Simple function is returned
##returns ((i + hash_key) * j) % modulo_multiplier
return ((i + hash_key) * j) % (hash_key*modulo_multiplier)
count =0
hash = {}
harr = []
for i,j in combinations(range(ncolumns), 2):
#if i <= 6:
# print( ' Count = ' + str(count))
hash[(i,j)] = hash_function(i,j)
harr.append( hash_function(i,j))
##print('i = ' + str(i) + ' j = ' + str(j) + ' hash = ' + str(hash) + ' Count hash = ' + str(count+1))
count += 1
combCount = 1
for i,j in combinations(range(ncolumns),2):
if i<= 6:
print('COUNT: '+ str(combCount) + ': hash[(' + str(i)+ ' , '+ str( j) + ')] : ' + str(hash[i,j]))
combCount += 1
# -
# > Examine the key pairs and the hash values. The question is, are there any hash collisions? This can be done as follows:
# ANSWER: No there are not. I checked the Reoccurrances of each hash value and they were 1
# > 5. Compute a list of the hash values for all combinations of $i$ and $j$.
# ANSWER: Shown in the above cell
# with hash[(i,j)] : hash value listed for each count
# > 6. Print the length of the list.
# > 7. Print the length of the unique values of the hash. You can find the unique values in a list with the [numpy.unique](https://numpy.org/doc/stable/reference/generated/numpy.unique.html) function.
# +
## Put your code below.
Count1 = 0
# using the function np.unique we create a array with only unique values on the Hash function value
ui, harr_u = np.unique(harr,return_counts = True)
#for uindex in range(len(harr_u)):
# Count1 += 1
# print('harr_unique(' + str(uindex) + '): ' + str(ui[uindex]) + ' Reoccurrances: '+ str(harr_u[uindex]))
print(' The Length of the list is: '+ str(len(harr)))
print(' The Total Number of unique instances is: '+ str(len(harr_u)))
##print(ui)
# -
# > Examine the results you have printed. Is there any evidence of hash key collisions?
# > The ratio of $m/n$ is deliberately kept high since the simple hash function has no collision resolution mechanism. Optionally, you can try reducing this ration (the multiplier) to 16 and 8, noting the increase in hash collisions.
# > **End of exercise.**
#
# ANSWER: The length of the list is 190/ The Total number of unique instatnces is 190.
# So there are no evidence of hash collisions in this case.
# No: There is no evidence of hash key collisions. There are 190 Combinations with 190 unique values. Each reoccurance has been determined for the unique hash values indicating the number of collisions.
# ### The map process
#
# We are constructing this example a map and a reduce process. The processes are intended to compute the hypothesis test for differences of means between all the pairs of vectors. The first step is the map process, which creates the keys, or values of $i$ and $j$ for these pairs.
#
# > **Exercise 1-2:** You will now create the code for the map task which build a data frame with $i, j$ key pairs indexed by the hash. By the following steps you will create code that represents a map task.
# > 1. Create a data frame with two columns $i$ and $j$ with rows $= hash_key * modulo_multiplier $ and set all values to $= numpy.nan$.
# > 2. Create a loop over all combinations of the pairs of i and j. Done
# > 3. Compute the hash key value for the indexes, i and j.
# > 4. Add the $i$ and $j$ values to the row indexed by the hash key.
# > 5. Return the hash table. ANSWER: used the hash function as the index
# > 6. Execute the function to create the hash table.
# > 7. Compute and print the length of the hash table.
# +
def map_hypothesis(vars, hash_key=1024, modulo_multiplier=32):
# clean up
arr = [[], []]
arr_vals = []
hash_arr = []
arr = [[0,0],
[0,1],
[1,0],
[1,1],
[1,2]]
lenrow = len(arr)
lencol = len(arr[0])
ind = 0
## make the index or rows
##creates a data frame 'blank'
df = pd.DataFrame(arr)
##1. Creates two columns i, j
df.columns = ['i','j']
# declarations
ncols = vars.shape[1]
ncols1 = len(vars[0])
j_val = []
hash_val = []
ncolumns = ncols
#print('ncols-shape :' + str(ncols) + ' ncols1-len(vars[0]) :' + str(ncols1))
## J goes from 0 to 19
ind = 0
#2. loops over all combinations of i, j for ncols length=20 in pairs of 2
for i,j in combinations(range(ncols), 2):
ind += 1
#3. using the function hash_function(i,j) it computes the hash for given function
hash[(i,j)] = hash_function(i,j)
hash_arr.append(hash_function(i,j))
hash_val.append( hash[(i,j)])
#4. This newRow will add the cols i,j when the arr_vals are assigned all at once
newRow = (i,j,hash[i,j])
arr_vals.append(newRow)
df_arr = pd.DataFrame(arr_vals)
df_arr.columns = ['i','j', 'hash']
df_arr.loc[:,'hash'] = pd.DataFrame(hash_val)
combCount = 1
df_arr.index = hash_arr
print('DF_ARR')
print(df_arr.head())
#5. returns the hash table
return df_arr
#6. Execute the function to create the hash table.
hash_table = map_hypothesis(normal_vars)
#7. Compute and print the length of the hash table.
print('Length of Hash Table is: '+ str(len(hash_table)))
# -
# > **End of exercise.**
# ### The shuffle and reduce task
#
# Now that you have the keys for the pairwise combinations of the vectors it is time to perform the reduce process. The reduce process computes the pair-wise t-statistics and p-values. These statistical values are indexed by the keys of the pair of vectors. This process reduces the full vectors of values down to just two numbers for each pair of vectors.
#
# > **Exercise 1-3:** You will now create and apply the following code for the reduce process:
# > 1. Create an empty data frame with columns, `i`, `j`, `t_statistic`, and `p_value`.
# > 2. Using a for loop iterate over all possible (hashed) keys of the data frame. An if statement is used to test if these are valid values of the key, i. Use the [numpy.isnan](https://numpy.org/doc/stable/reference/generated/numpy.isnan.html) function for this test.
# > 3. Extract the values of i and j from the input data frame.
# > 4. Using keys, compute the t-statistic and p-value using [scipy.stats import ttest_ind](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_ind.html).
# > 5. Append a row to the output data frame.
# > 6. Return the data frame, sorted in ascending order, using the [Pandas.DataFrame.sort_values](https://turned.pydata.org/docs/reference/api/pandas.DataFrame.sort_values.html) method and re-indexed using the [Pandas.DataFrame.reset_index](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.reset_index.html) method.
# > 7. Execute your function and save the returned data frame.
# +
def reduce_significance(hash_table, values):
## Create a data framreturn the results of the
## the reduce process. The results are grouped by the first
sig_level = 0.05
ncols = hash_table.shape[1]
nrows = hash_table.shape[0]
tt =ttest_ind(normal_vars[:,0], nr.normal(loc = 0.01, size=(1000,1)))
#1.Create an empty data frame with columns, i, j, t_statistic, and p_value.
#1a. I created an empty dataframe with i, j. so that t_statisticc and p_value
# would be added later in a mass assignment
test_results = pd.DataFrame(columns=['i','j'])
count2 = 0
extract_i = []
extract_j = []
i_arr = []
j_arr = []
data = []
hash_arr = []
hash_a = []
tt_arr = []
pv_arr = []
sig_arr = []
sig_count = 0
# 2. Used a for loop to iterate over all possible hashed keys of DataFrame hash_table
# Looping over all possible keys of the hash_table
for hash_num in range(hash_table.shape[0]):
if not np.isnan(hash_table.iloc[hash_num,0]):
# This is where you lookup in the hash table at super fast speeds
# 3. Extract the values of i and j from the input data frame
ival = hash_table.iloc[hash_num,0]
#
jval = hash_table.iloc[hash_num,1]
#
# 4. Using keys, compute the t-statistic and p-value using scipy.stats import ttest_ind.
tt = ttest_ind(values[:,ival] , values[:,jval]).statistic
#print(tt)
#print(str(len(tt)))
pv = ttest_ind(values[:,ival] , values[:,jval]).pvalue
i_arr.append(values[:,ival])
j_arr.append(values[:,jval])
tt_arr.append(tt)
pv_arr.append(pv)
#sig_arr.append('SIG')
if (pv <= sig_level):
sig_arr.append('SIG')
sig_count += 1
else:
sig_arr.append('Not-Sig')
#5. Append a row to the output data frame
data.append((ival,jval,hash_function(ival,jval)))
print('len(hash_table): ' + str(len(hash_table)) )
#print('len(test_results): ' + str(len(test_results)) )
print('len(tt_arr): '+ str(len(tt_arr)))
print('len(pv_arr): '+ str(len(pv_arr)))
print('len(sig_arr): '+ str(len(sig_arr)))
print('Sig-Count: ' + str(sig_count))
test_results = pd.DataFrame(data)
test_results.columns=['i','j','hash']
test_results.loc[:,'t_statistic'] = tt_arr
test_results.loc[:,'p_value'] = pv_arr
test_results.loc[:,'sig'] = sig_arr
#print('PRINT TEST_RESULTS')
#print(test_results.head())
#6.Return the data frame, sorted in ascending order, usin
return test_results.sort_values('p_value', axis=0, ascending=True).reset_index(drop=True)
#7. Execute your function and save the returned data frame
test_stats = reduce_significance(hash_table, normal_vars)
test_stats.to_csv("test_stats.csv")
# -
# > 8. In the cell below, create a filter for pair test cases which are significant and save these cases in a data frame.
# > 9. Print the number (len) of significant results.
# > 10. Print the rows with the significant test results.
# +
significance_level = 0.05
test_stats = reduce_significance(hash_table, normal_vars)
## Put your code below.
#print(test_stats)
#8. In the cell below, create a filter for pair test cases
# which are significant and save these cases in a data frame.
sig_filter_arr = test_stats.loc[test_stats['sig']=='SIG']
df_sig = pd.DataFrame(sig_filter_arr)
#print('The dataframe with Significant Results are as Follows:')
#print(df_sig)
# print(test_stats.head(10))
print('9. The length of the Significant Results are: ' + str(test_stats.loc[test_stats['sig']=='SIG'].shape[0]))
print('10. The Significant Test Results are as Follows:')
print(test_stats.loc[test_stats['sig']=='SIG'])
# -
# > Notice the large number of apparently significant tests. Answer the following questions:
# > 1. Is the number of false positive cases higher than expected?
# ANSWER 1. 22/190 = 0.11578. Yes this is > our normal 5% acceptable cutoff for significance level
# > 2. Examine which of the iid Normal vectors contribute to the false positive results. Are there vectors which contribute multiple times?
# ANSWER 2. It looks like Column 0 is a high contributor to the false positive cases
# > **End of exercise.**
# Yes. There seems to be an abundant number of possible False Positives. Column vector '0' seems to contribute to alot of these Significant cases.
# ### Bonferroni correction
#
# Several adjustments to the multiple comparisons problem have been proposed. In Dunn published a method know as the **Bonferroni correction** in 1961. The Bonferroni correction is a widely used method to reduce the false positive rate of hypothesis tests. The adjustment is simple:
# $$\alpha_b = \frac{\alpha}{m}\\
# with\\
# m =\ number\ of\ groups$$
#
# Can the Bonferroni correction help? Yes, by greatly increasing the confidence level required for a statistically significant result. The problem with the Bonferroni correction is the reduction in power as the grows smaller. For big data problems with large numbers of groups, this issue can be especially serious.
#
# **Exercise 1-4:** You will now apply the Bonferroni correction to the iid Normal vectors. To do so, you will compute the Bonferroni threshold and the apply it to the p-values:
# +
## Exercise 1-4
## m = number of groups
## alpha = confidence = 0.05
def reduce_significance_bonferroni(hash_table, values,bon_alpha):
## Create a data framreturn the results of the
## the reduce process. The results are grouped by the first
sig_level = 0.05
ncols = hash_table.shape[1]
nrows = hash_table.shape[0]
tt =ttest_ind(normal_vars[:,0], nr.normal(loc = 0.01, size=(1000,1)))
#1.Create an empty data frame with columns, i, j, t_statistic, and p_value.
#1a. I created an empty dataframe with i, j. so that t_statisticc and p_value
# would be added later in a mass assignment
test_results = pd.DataFrame(columns=['i','j'])
#Declarations
count2 = 0
extract_i = []
extract_j = []
i_arr = []
j_arr = []
data = []
hash_arr = []
hash_a = []
tt_arr = []
pv_arr = []
sig_arr = []
sig_count = 0
# 2. Used a for loop to iterate over all possible hashed keys of DataFrame hash_table
# Looping over all possible keys of the hash_table
for hash_num in range(hash_table.shape[0]):
if not np.isnan(hash_table.iloc[hash_num,0]):
# This is where you lookup in the hash table at super fast speeds
# 3. Extract the values of i and j from the input data frame
ival = hash_table.iloc[hash_num,0]
#
jval = hash_table.iloc[hash_num,1]
#
# 4. Using keys, compute the t-statistic and p-value using scipy.stats import ttest_ind.
tt = ttest_ind(values[:,ival] , values[:,jval]).statistic
#print(tt)
#print(str(len(tt)))
pv = ttest_ind(values[:,ival] , values[:,jval]).pvalue
i_arr.append(values[:,ival])
j_arr.append(values[:,jval])
tt_arr.append(tt)
pv_arr.append(pv)
#sig_arr.append('SIG')
if (pv <= bon_alpha):
sig_arr.append('SIG')
sig_count += 1
else:
sig_arr.append('Not-Sig')
#5. Append a row to the output data frame
data.append((ival,jval,hash_function(ival,jval)))
print('len(hash_table): ' + str(len(hash_table)) )
#print('len(test_results): ' + str(len(test_results)) )
print('len(tt_arr): '+ str(len(tt_arr)))
print('len(pv_arr): '+ str(len(pv_arr)))
print('len(sig_arr): '+ str(len(sig_arr)))
print('Sig-Count: ' + str(sig_count))
test_results = pd.DataFrame(data)
test_results.columns=['i','j','hash']
test_results.loc[:,'t_statistic'] = tt_arr
test_results.loc[:,'p_value'] = pv_arr
test_results.loc[:,'sig'] = sig_arr
#print('PRINT TEST_RESULTS')
#print(test_results.head())
print('9. The length of the Significant Results are: ' + str(test_results.loc[test_results['sig']=='SIG'].shape[0]))
print('10. The Significant Test Results are as Follows:')
print(test_results.loc[test_results['sig']=='SIG'])
#6.Return the data frame, sorted in ascending order, usin
return test_results.sort_values('p_value', axis=0, ascending=True).reset_index(drop=True)
alpha = .05
m = 20
# Ex 1-4: You will now apply the Bonferroni correction to the iid Normal vectors. To do so,
# you will compute the Bonferroni threshold and the apply it to the p-values:
bon_nu_alpha = alpha/m
print('bon_nu_alpha: ' + str(bon_nu_alpha))
bon_test_stats = reduce_significance_bonferroni(hash_table, normal_vars,bon_nu_alpha)
# -
# > Even with the Bonferroni correction we have some false significance tests, if only just barely!
# > **End of exercise.**
#
# But, can we detect small effect with Bonferroni correction, as this method significantly reduces power of tests? Execute the code in the cell below, which compares a standard Normal to a Normal with a small mean (effect size), to find out.
# +
nr.seed(567)
ttest_ind(normal_vars[:,0], nr.normal(loc = 0.01, size=(1000,1)))
# -
# Given the Bonferroni correction, this difference in means would not be found significant. This illustrates the downside of the correction, which may prevent detection of significant effects, while still finding false significance.
# ## False Discovery Rate Control Methods
#
# We have seen the potential pitfalls of multiple hypothesis testing. Further, we have seen that a simple approach to **false discovery rate (FDR) control** is not effective. You will now apply more sophisticated FDR control methods to control the FDR.
#
# Inflammatory bowel disease is an auto immune disease that is characterized by chronic inflammation in the digestive tract. In 2020, there were around 2.5 million people with inflammatory bowel disease in the United States. It is estimated that the prevalence of IBD among U.S. population will rise to around 3.5 million by 2030.There are two forms of IBD: Ulcerative Colitis (UC) and Crohnโs disease (CD).
#
# The specific problem we will explore is to determine which genes lead to expression of a certain disease. In this example, there are gene expression data for 97 patients. Some of these patients have ulcerative colitis and others have Crohn's disease, which are believed to be genetically inherited.
#
# One approach to this problem is to perform hypothesis tests on the expression of the genes between patients with the two conditions. Since there are over 10,000 genes there is considerable chance for false discovery. Therefore, careful application of FDR control is required.
#
# To continue with the example, execute the code in the cell below to load the data and print the dimensionality of the data frame.
gene_data = pd.read_csv('../data/ColonDiseaseGeneData-Cleaned.csv')
print('The Dimensions of gene_data: ' + str(gene_data.shape))
# There are data from 97 patients for 10,497 genes. A large number of hypothesis tests are required!
#
# Execute the code in the cell below to view the first 5 columns of the data frame, which includes the expression of the first 4 genes.
# +
print(gene_data.iloc[:,:5].tail(5 ))
gd_head = pd.DataFrame(gene_data.iloc[:,:5].head())
gd_tail = pd.DataFrame(gene_data.iloc[:,:5].tail(5))
#gd_list = list(gene_data)
abridgd_gd = pd.concat([gd_head, gd_tail])
print(abridgd_gd)
gene_data.groupby(['Disease State'])
# -
# ### Holm's method
#
# You will apply two FDR control methods to these data.These methods attempt to conod trol the FDR while not being overly conservative like the Bonferronic correction. The first of these Holm's method.
#
# The Holm's method operates on the ordered set of p-values, $D = \{ p_{(1)}, p_{(2)}, p_{(3)}, \ldots, p_{(n)} \}$. The threshold for the $ith$ p-value, $p(i) is:
#
# $$p(i) \le Threshold(Holm's) = \frac{\alpha}{N - i + 1}$$
#
# For example: for the 10th ordered p-value with 1,000 total tests (genes) and significance level of 0.05, the cutoff is:ย ย
#
# $$p(10) \le \frac{0.05}{1000 - 10 + 1} = 0.00005045$$
# ### Map process
#
# > **Exercise 01-4:** To start the processing of these data you will first create and execute code for a map process. The map process groups the data by the patient's disease into data frame, ulcerative, crohns. The keys for each of these key-value pairs are the gene identifier. Notice that one key is all that is needed in this case. Now do the following to create and execute a function, `map_gene`:
# > 1. Create a logical mask and group the values by `Disease State` into two data frames.
# > 2. Return the transpose of the two data frames, removing the `Disease State` values. The result of this operation should be data frames with gene expressions in the columns and the gene identifier as the row index.
def map_gene(gene_data):
## First, separate the columns by disease type
#1. Create a logical mask and group the values by Disease State into two data frames
## Put your code below.
df_crohns = pd.DataFrame(gene_data)
#df_ulcerative = pd.DataFrame(gene_data.iloc[:,:])
df_ulcerative = pd.DataFrame(gene_data)
c = df_crohns.loc[df_crohns['Disease State']=="Crohn's Disease (CD)"]
u = df_ulcerative.loc[df_crohns['Disease State']=="Ulcerative Colitis (UC)"]
#2b. removing the Disease State values
c.drop('Disease State', inplace = True, axis = 1 )
u.drop('Disease State', inplace = True, axis = 1 )
#2a. Return the transpose of the two data frames
c_T = c.transpose()
u_T = u.transpose()
#print(c_T)
#print(ulcerative)
return u_T, c_T
#u = map_gene(gene_data)
#2c.The result of this operation should be data frames with gene expressions in the columns and the gene identifier as the row index.' )
ulcerative, crohns = map_gene(gene_data)
# > 3. Execute the code in the cells below to display the heads of these data frames and examine the results.
ulcerative.head()
crohns.head()
# ### Reduce process
#
# > **Exercise 01-5:** With the key-value pairs organized by disease state, it is time to create and execute code of a reduce process. The reduce process will compute the pairwise t-statistics and p-values for each gene and return the sorted results. Specifically, your `gene_test` with arguments of the two mapped data frames will do the following:
# > 1. Create an empty data frame with columns gene, t_statistics, and p-value.
# > 2. A for loop iterates over the keys of either of the data frames.
# > 3. Compute the t-statistic and p-value for the gene (key).
# > 4. Append the results to the data frame.
# > 5. Sort the results data frame, inplace, into ascending order.
# > 6. Return the resulting data frame.
# +
def gene_test(ulcerative, crohns):
## Put your code below.
tt_add = []
gene_arr = np.array(crohns.index)
#2. A for loop iterates over the keys of either of the data frames.
for dis_key in crohns.index:
#3.Compute the t-statistic and p-value for the gene (key).
tt1 = ttest_ind(ulcerative.loc[dis_key,:] ,crohns.loc[dis_key,:]).statistic
pv1 = ttest_ind(ulcerative.loc[dis_key,:] ,crohns.loc[dis_key,:]).pvalue
#4. Append the results to the data frame
tt_add.append([dis_key,tt1,pv1])
#1. Create an empty data frame with columns gene, t_statistics, and p-value.
test_results = pd.DataFrame(tt_add,columns=['gene','t_statistic','p_value'])
test_results.set_index =('gene')
#5. Sort the results data frame, inplace, into ascending order
test_results.sort_values('p_value', axis=0, ascending=True,inplace = True)
#6. Return the resulting data frame
return test_results
gene_statistics = gene_test(ulcerative, crohns)
print(gene_statistics.head())
gene_statistics.shape
# -
# ### Significance of results
#
# With the gene data reduced to the t-test statistics, you will now determine the significance of these tests. It is important to understand that scientists believe that expression of a disease, like Corhn's, is only in a small number of genes.
#
# > **Exercise 01-6:** As a first step in understanding the gene expression significance complete and execute the code in the cell below to find the number of 'significant' genes using the simple single hypothesis test cutoff criteria.
# +
significance_level =0.05
## Put your code below.
##gene_statistics = gene_test(ulcerative, crohns)
significance_level = 0.05
def find_the_significance(vars, sig_level):
#print(str(vars.shape))
#print('The sig level is: '+ str(sig_level))
sig_arr = []
vars.loc[:,'p_value']
#print(vars)
for v in range(len(vars)):
#if vars.loc[vars.loc['p_value'] <= sig_level]:
if vars.iloc[v][2]<= sig_level:
sig_arr.append(True)
else:
sig_arr.append(False)
vars.loc[:,'significance']= sig_arr
return vars
#print(vars)
df = find_the_significance(gene_statistics, significance_level)
num_sig =len(df.loc[df.loc[:,'significance']== True,:])
print('The number of significant genes are: ' + str(num_sig))
print(' Using the simple single hypothesis test cutoff criteria of alpha = 0.05.')
# -
# > Does this large number of 'statistically significant' results appear credible, given that only a few genes are thought to have significant expression for this disease?
#
# ANSWER: The total number of genes are: 10497 and 112/10497 gives 0.01067 or 1.06 %. This means
# there are within a reasonably acceptable range for Type I errors. However it also means that
# we would have 112 significant cases to explore. Would that mean although 0.01 acceptance of
# Type I erros, I feel 112 is a lot to ask for.
# > **End of exercise.**
#
# > **Exercise 01-7:** We have already seen that the Bonferroni correction is a rather conservative approach to testing the significance of large numbers of hypotheses. You will now use the Bonferroni correction to test the significance of the gene expression, by completing the code in the cell below.
# +
## Put your code below.
m = 10497
alpha = 0.05
bon_nu_alpha = alpha/m
# Using a previously defined function from above
df= find_the_significance(gene_statistics, bon_nu_alpha)
num_sig =len(df.loc[df.loc[:,'significance']== True,:])
print('Exercis 01-7:')
print('The number of significant genes using the Bonferroni Correction are: ' + str(num_sig))
print(' Using the corrected alpha test cutoff criteria 0.05/10497 = 0.00000476 as the new alpha.')
# -
# > The foregoing result seems reasonable, but is it too conservative?
# > **Exercise 01-08:** You will now apply the Holms method to determining significance of the gene expression test results. In the cell below complete the `holms_significance` function with arguments of the results data frame and the significance level. This function does the following:
# > 1. Find the number of test results and compute the numerator used for the cutoff calculation.
# > 2. Compute the vector of thresholds using the Holms formula. Use the Python `range`function to get the values of the index i. But, keep in mind that range produces a zero-indexed iterator, and the algorithm needs a one-indexed list. Use the [numpy.divide](https://numpy.org/doc/stable/reference/generated/numpy.divide.html) function to perform the vector divide. Save these threshold values in a data frame in a 'holms_threshold' column.
# > 3. Using the threshold values compute a logical vector and save it in a column names 'significance' in the data frame.
# > 4. Return the data frame.
# > Finally, execute the function and save the results in a data frame. Then find the length of the subset where the 'significance' value is True.
# +
def holms_significance(test_results, significance):
## First compute the thresholds for each of the ordered tests
#test_results.sort_values('p_value', axis=0, ascending = True).reset_index(drop=True)
test_results.shape
## Put your code below.
## Declare vars
num_rows_arr = []
holms_sig =[]
isit_sig = []
order_i_num = []
alpha = significance
nrows = len(test_results) # for index
num_rws = range(1,nrows+1,1)
# create index array/ easy to manip df
for n in num_rws:
num_rows_arr.append(n)
# Let's not play with orig df
df_temp = test_results.copy()
# recall Holm's equation: p(i) <= Threshold(Holm) = alpha/(N- i + 1)
# 1b. and compute the numerator used for the cutoff calculation alpha/denom
for irow in range(nrows):
numer = alpha # compute the numerator used for the cutoff calculation
denom = nrows +1- irow+1 # the denominator used for cutoff calculation
# 2a. Compute the vector of thresholds using the Holms formula
# 2c. Use the numpy.divide function to perform the vector divide
theholms = np.divide(numer,denom) # This is the combined Holms eqn
thepval = df_temp.iloc[irow,2] # This pulls the orig pval
holms_sig.append(theholms) # This creates the holms thresh
order_i_num.append(irow)
#3a.Using the threshold values compute a logical vector
if thepval <= theholms: # Ongoing determines significance
isit_sig.append(True)
else:
isit_sig.append(False)
#2d. Save these threshold values in a data frame in a 'holms_threshold' column.
df_temp.loc[:,'holms_threshold']= holms_sig # creates columns in df_temp
#3b. save it in a column names 'significance' in the data frame.
df_temp.loc[:,'significance']= isit_sig # ditto
#df_temp.sort_values('p_value', axis=0, ascending=True).reset_index(drop=True)
df_temp.loc[:,'row_num1']= num_rows_arr
df_temp.loc[:,'i'] = order_i_num
test_results= df_temp.copy()
#4. Return the data frame
return test_results.sort_values('p_value', axis=0, ascending=True).reset_index(drop=True)
## Now we test the significance of the ordered p-values
holms_results = holms_significance(gene_statistics, significance_level)
#1a. Find the number of test results and compute the numerator used for the cutoff calculation
print('The number of test results used are: '+ str(len(holms_results)))
print('The number of significant Holms test results are : '+ str(len(holms_results.loc[holms_results.loc[:,'significance']== True,:])))
print(' ')
print('The first (5) elements of the Holms_Results dataframe are listed below:')
print(' ')
print(holms_results.head())
#1?. Find the number of test results. These are the number of Significant Holms results
# -
# > Despite the general properties that the Holm's method is considered less conservative than the Bonferroni correction the results agree in this case. Does this agreement give you some confidence in the result and why?
# ANSWER: Yes. Using two different means to predict the significant genes resulting in the same number of elements validates these two methods. However we probably can't say for sure that each of the
# elements are the same. :) Maybe for another exercise
# > **End of exercise.**
# You can visualize the results of the Holm's method test. The plot has two key elements:
# 1. Plot the curve of the p-values vs. the order number, i. The line is color coded by significance or not.
# 2. Plot the threshold line. This line is straight since the threshold is a linear function of i.
# +
#print(results)
def plot_significance(results, threshold):
results['number'] = range(len(results))
#results['number'] = results.index
fig, ax = plt.subplots(figsize=(8, 6))
sns.lineplot(x='number',y=threshold, data=results, ax=ax, color='black', linewidth=0.5)
sns.scatterplot(x='number',y='p_value', hue='significance', data=results, s=3, ax=ax)
ax.set_title('Significance of gene expression')
ax.set_xlabel('Gene number')
ax.set_ylabel('p-value')
plot_significance(holms_results.iloc[:500,:], 'holms_threshold')
#plot_significance(holms_results.iloc[0:100,:], 'p_value')
# +
#print(results)
def plot_significance(results, threshold):
results['number'] = range(len(results))
#results['number'] = results.index
fig, ax = plt.subplots(figsize=(8, 6))
sns.lineplot(x='number',y=threshold, data=results, ax=ax, color='black', linewidth=0.5)
sns.scatterplot(x='number',y='p_value', hue='significance', data=results, s=10, ax=ax)
ax.set_title('Significance of gene expression')
ax.set_xlabel('Gene number')
ax.set_ylabel('p-value')
#plot_significance(holms_results.iloc[:500,:], 'holms_threshold')
plot_significance(holms_results.iloc[0:100,:], 'holms_threshold')
# -
# Notice the following about this plot:
# 1. The p-value significance line crosses the threshold point at an apparent break point.
# 2. The significant p-values are all very small since there are so many tests.
# ### Benamini-Hochberg FDR Control
#
# The Benamini-Hochberg FDR control algorithm is another way to control false discoveries. Stat with an ordered set of $n$ p-values, $D = \{ p_{(1)}, p_{(2)}, p_{(3)}, \ldots, p_{(n)} \}$ we define a false discovery rate, $q$:
#
# $$FDR(D) \le q$$
#
# The cutoff threshold for the ith p-value is then:
# $$p_{(i)} \le Threshold(D_q) = \frac{q}{n} i$$
# > **Exercise 01-9:** In this exercise you will apply the Benamini-Hochberg FDR control algorithm for testing the significance of the gene expressions. The `BH_significance` function is quite similar to the Holm's method function you have already created. Given the large number of genes you must use a low false discovery rate, $0.001$, or 1 out of 1,000.
# > Execute your function, saving the result. Then print the number of significant cases.
# +
def BH_significance(test_results, false_discovery_tollerance):
## First compute the thresholds for each of the ordered tests
## Put your code below.
# Let's clean this up
## Declare vars
num_rows_arr = []
bh_sig =[]
isit_sig = []
order_i_num = []
q = false_discovery_tollerance
nrows = len(test_results) # for index
num_rws = range(1,nrows+1,1)
bh_sig = []
# create index array/ easy to manip df
#for n in num_rws:ghost
# num_rows_arr.append(n) ghost
df_temp = test_results.copy()
# order original p_values from smallest to Largest
df_temp.sort_values('p_value', axis=0, ascending = True).reset_index(drop=True)
#df_temp.loc[:,'row_num1']= num_rows_arr ghost
# recall B-H equation: p(i) <= Threshold(D-subq) = (q * i) / n
# where FDR(D) <= q
# D = { p(1), p(2),..p(n)}
# q : false Discover Rate
# EXERCISE REQUIREMENT: apply the Benamini-Hochberg FDR control algorithm
for irow in range(nrows): # this actually should correspond to index of ord pval
denom = nrows
numer = q * irow
the_bh = np.divide(numer,denom) # This is the eqn
thepval = df_temp.iloc[irow,2] # This pulls the orig pval: checked col 'p_value'
# corresponds to this col # 2
bh_sig.append(the_bh) # This creates the bh thresh
order_i_num.append(irow)
if thepval <= the_bh: # Ongoing determines significance
isit_sig.append(True)
else:
isit_sig.append(False)
#df_temp.drop('row_num1', inplace=True, axis=1)
#df_temp.drop('row_num1', inplace=True, axis=1)
df_temp.loc[:,'bh_threshold']= bh_sig # creates columns in df_temp
df_temp.loc[:,'significance']= isit_sig # ditto
# Now to traverse the DataFrame from the bottom up leaving all in place
for find_max_sig_ind in range(nrows-1,-1,-1):
if df_temp['significance'][find_max_sig_ind]== True:
max_index = find_max_sig_ind # Flags where the max Significant pvalue is
break # that's all the info we want here
for bottomup_ind in range(max_index,nrows-1, -1):
#df_temp.loc[:,'significance'] == True
df_temp['significance'][bottomup]= True
#df_temp.sort_values('p_value', axis=0, ascending=True).reset_index(drop=True)
#df_temp.loc[:,'row_num1']= num_rows_arr
#df_temp.loc[:,'i'] = order_i_num
return df_temp
#Exercise 01-9:
BH_results = BH_significance(gene_statistics, 0.001)
print(' Using the Benamini-Hochberg FDR Control:')
print(' The number of total results were : ' + str(len(BH_results)))
print(' ')
print(' Using the discovery rate, 0.001:')
print(' # Signifcant Results : ' + str(len(BH_results.loc[BH_results.loc[:,'significance'],:])))
print(' ')
BH_0001 = BH_significance(gene_statistics, 0.0001)
print(' Using the discovery rate, 0.0001:')
print(' # Signifcant Results : ' + str(len(BH_0001.loc[BH_0001.loc[:,'significance'],:])))
print('')
# -
# > The result is similar to the first two FDR control methods. Given the false discovery parameter of 0.0001 do you think this is a reasonable result?
#
# Yes. There are 70 discoveries from the BH test using a False Discover Rate of 0.001. The False Discovery Rate of 0.0001 shows a greatly reduced number: 2. But we might check for Type II Results
# where there are False negatives. We want the probability of Type I false positives to be less than
# 0.05 or 5%. This test results shows a predicted number of 70/10497 = 0.000696538. Which I would think is acceptable at .069%. and with the Discovery Rate of 0.0001 we get 2/10497 = 0.000190531 or .019%.
# The question we must ask ourselves is, can we live with the possibility of missed discoveries?
# Finally, execute the code in the cell below and examine the resulting plot.
# With this plot using False Discover Rate as 0.001 we get a slope that is noticably higher
# than that of the Holm's plot.
BH_results.sort_values('p_value', axis=0, ascending=True).reset_index(drop=True)
plot_significance(BH_results.iloc[:500,:], 'bh_threshold')
# +
def plot_significance(results, threshold):
results['number'] = range(len(results))
#results['number'] = results.index
fig, ax = plt.subplots(figsize=(8, 6))
sns.lineplot(x='number',y=threshold, data=results, ax=ax, color='black', linewidth=0.5)
sns.scatterplot(x='number',y='p_value', hue='significance', data=results, s=10, ax=ax)
ax.set_title('Significance of gene expression')
ax.set_xlabel('Gene number')
ax.set_ylabel('p-value')
def find_cross(results1,r1_thresh, results2, r2_thresh ):
x = holms_results.iloc[0:100,:0]
y1 = r1_thresh
y2 ='p_value'
y1_pv = holms_results.loc[:,'p_value']
y1_thres = holms_results.loc[:,'holms_threshold']
cross = np.argwhere(np.diff(np.sign(y1_pv-y1_thres))).flatten()
plt.plot(x[cross], f[cross], 'ro')
plt.show()
#plot_significance(holms_results.iloc[:500,:], 'holms_threshold')
plot_significance(holms_results.iloc[0:100,:], 'holms_threshold')
BH_results.sort_values('p_value', axis=0, ascending=True).reset_index(drop=True)
plot_significance(BH_results.iloc[0:100,:], 'bh_threshold')
BH_0001 = BH_significance(gene_statistics, 0.0001)
plot_significance(BH_0001.iloc[0:100,:], 'bh_threshold')
#find_cross(holms_results.iloc[0:100,:],'holms_threshold',BH_results.iloc[0:100,:], 'by_threshold')
# -
# > **Exercise 01-10**: Bonus question. Compare the plot above to the foregoing plot for Holm's method. Are the breaks in slope of the p-value curve at the crossing point with the threshold values reasonable in both cases?
#
# ANSWER: Using the Genes 0:100 we get a closer introsepctive look here. The line plots clearly indicate a bigger slope from the BH method of approximately ~ (0.625 - 0)/ (70-0) = 0.008920 opposed the the nearly 0 slope of the Holms
#
# Holms: ~0 : from the equation P_val(H) = 0.95/(10497-59+1) = .000004789 (Chg in SIg toNon Sig=59):
# : pval(H) = alpha/(n-i +1) shows the ith value as it increases pv goes to infinity but the i change from one to the
# next is insignificant compared to the 10497 neighbor in the denominator. There will be such an insignificant
# change from one i to the next. Having a more zero slope is reasonable.
# BH: : At this breakpoint the variations from one gene to the next gives a more significance at a slope of
# : about 0.008920 where pval(BH) = (0.001 * 70 / 10497) = .00000667 and as the ith value increases, pval goes to
# infinity but the i change from one point to then next changes by about 0.001/10497 and we know the pval ofBH
# :must be less than that. As we can see at point 70 there is a considerable change compared to Holms.
#
# BH_0001: : we note as the number of Significant cases decreases the slope of the breakpoint is less. The break point slope
# : here approaches zero. With two points there is little change in the pvalue and the alpha is so low we do not
# : allow many discovery possibilities.
#
#
# ##### Copyright 2021, <NAME>. All rights reserved.
| notebooks/GOOD ALMOST EX 1-6-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Optimization Overview
#
# 
# # Train Model with CPU
# +
import tensorflow as tf
from tensorflow.python.client import timeline
import pylab
import numpy as np
import os
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
tf.logging.set_verbosity(tf.logging.INFO)
# -
# ## Reset TensorFlow Graph
# Useful in Jupyter Notebooks
tf.reset_default_graph()
# ## Create TensorFlow Session
sess = tf.Session()
print(sess)
# ### Load Model Training and Test/Validation Data
#
num_samples = 100000
# +
import numpy as np
x_train = np.random.rand(num_samples).astype(np.float32)
print(x_train)
noise = np.random.normal(scale=0.01, size=len(x_train))
y_train = x_train * 0.1 + 0.3 + noise
print(y_train)
pylab.plot(x_train, y_train, '.')
# +
import pylab
x_test = np.random.rand(len(x_train)).astype(np.float32)
print(x_test)
noise = np.random.normal(scale=.01, size=len(x_train))
y_test = x_test * 0.1 + 0.3 + noise
print(y_test)
pylab.plot(x_test, y_test, '.')
# -
with tf.device("/cpu:0"):
W = tf.get_variable(shape=[], name='weights')
print(W)
b = tf.get_variable(shape=[], name='bias')
print(b)
x_observed = tf.placeholder(shape=[None],
dtype=tf.float32,
name='x_observed')
print(x_observed)
y_pred = W * x_observed + b
print(y_pred)
# +
learning_rate = 0.025
with tf.device("/cpu:0"):
y_observed = tf.placeholder(shape=[None], dtype=tf.float32, name='y_observed')
print(y_observed)
loss_op = tf.reduce_mean(tf.square(y_pred - y_observed))
optimizer_op = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer_op.minimize(loss_op)
print("Loss Scalar: ", loss_op)
print("Optimizer Op: ", optimizer_op)
print("Train Op: ", train_op)
# -
# ## Randomly Initialize Variables (Weights and Bias)
# The goal is to learn more accurate Weights and Bias during training.
with tf.device("/cpu:0"):
init_op = tf.global_variables_initializer()
print(init_op)
sess.run(init_op)
print("Initial random W: %f" % sess.run(W))
print("Initial random b: %f" % sess.run(b))
# ## View Accuracy of Pre-Training, Initial Random Variables
# We want this to be close to 0, but it's relatively far away. This is why we train!
def test(x, y):
return sess.run(loss_op, feed_dict={x_observed: x, y_observed: y})
test(x_test, y_test)
# ## Setup Loss Summary Operations for Tensorboard
loss_summary_scalar_op = tf.summary.scalar('loss', loss_op)
loss_summary_merge_all_op = tf.summary.merge_all()
# +
train_summary_writer = tf.summary.FileWriter('./linear_model/logs/cpu/train/',
graph=tf.get_default_graph())
test_summary_writer = tf.summary.FileWriter('./linear_model/logs/cpu/test/',
graph=tf.get_default_graph())
# -
# ## Train Model
# +
# %%time
from tensorflow.python.client import timeline
with tf.device("/cpu:0"):
run_metadata = tf.RunMetadata()
max_steps = 401
for step in range(max_steps):
if (step < max_steps - 1):
test_summary_log, _ = sess.run([loss_summary_merge_all_op, loss_op], feed_dict={x_observed: x_test, y_observed: y_test})
train_summary_log, _ = sess.run([loss_summary_merge_all_op, train_op], feed_dict={x_observed: x_train, y_observed: y_train})
else:
test_summary_log, _ = sess.run([loss_summary_merge_all_op, loss_op], feed_dict={x_observed: x_test, y_observed: y_test})
train_summary_log, _ = sess.run([loss_summary_merge_all_op, train_op], feed_dict={x_observed: x_train, y_observed: y_train},
options=tf.RunOptions(trace_level=tf.RunOptions.SOFTWARE_TRACE),
run_metadata=run_metadata)
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
with open('timeline-cpu.json', 'w') as trace_file:
trace_file.write(trace.generate_chrome_trace_format(show_memory=True))
if step % 10 == 0:
print(step, sess.run([W, b]))
train_summary_writer.add_summary(train_summary_log, step)
train_summary_writer.flush()
test_summary_writer.add_summary(test_summary_log, step)
test_summary_writer.flush()
# -
pylab.plot(x_train, y_train, '.', label="target")
pylab.plot(x_train, sess.run(y_pred,
feed_dict={x_observed: x_train,
y_observed: y_train}),
".",
label="predicted")
pylab.legend()
pylab.ylim(0, 1.0)
# ## View Loss Summaries in Tensorboard
# Navigate to the **`Scalars`** and **`Graphs`** tab in TensorBoard
# ## Save Graph For Optimization
# We will use this later.
# +
import os
checkpoint_base_path = './linear_model/cpu/checkpoint'
saver = tf.train.Saver()
graph_model_path = '%s/graph.pb' % checkpoint_base_path
print(graph_model_path)
os.makedirs(checkpoint_base_path, exist_ok=True)
tf.train.write_graph(sess.graph_def,
'.',
graph_model_path,
as_text=False)
checkpoint_model_path = '%s/model.ckpt' % checkpoint_base_path
saver.save(sess,
save_path=checkpoint_model_path)
print(checkpoint_model_path)
# -
os.listdir(checkpoint_base_path)
sess.close()
# +
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
def convert_graph_to_dot(input_graph, output_dot, is_input_graph_binary):
graph = graph_pb2.GraphDef()
with open(input_graph, "rb") as fh:
if is_input_graph_binary:
graph.ParseFromString(fh.read())
else:
text_format.Merge(fh.read(), graph)
with open(output_dot, "wt") as fh:
print("digraph graphname {", file=fh)
for node in graph.node:
output_name = node.name
print(" \"" + output_name + "\" [label=\"" + node.op + "\"];", file=fh)
for input_full_name in node.input:
parts = input_full_name.split(":")
input_name = re.sub(r"^\^", "", parts[0])
print(" \"" + input_name + "\" -> \"" + output_name + "\";", file=fh)
print("}", file=fh)
print("Created dot file '%s' for graph '%s'." % (output_dot, input_graph))
# -
output_dot_path='./graph_cpu.dot'
convert_graph_to_dot(input_graph=graph_model_path, output_dot=output_dot_path, is_input_graph_binary=True)
# + magic_args="-s \"$output_dot_path\"" language="bash"
#
# dot -T png $1 \
# -o ./graph_cpu.png > a.out
# +
from IPython.display import Image
Image('./graph_cpu.png', width=1024, height=768)
# -
# # Prepare Optimized Model for Deployment
# ## Freeze Fully Optimized Graph
# +
from tensorflow.python.tools import freeze_graph
model_parent_path = './linear_model/cpu/checkpoint'
model_graph_path = '%s/graph.pb' % model_parent_path
frozen_model_graph_path = '%s/frozen_model_graph_cpu.pb' % model_parent_path
model_checkpoint_path = '%s/model.ckpt' % model_parent_path
freeze_graph.freeze_graph(input_graph=model_graph_path,
input_saver="",
input_binary=True,
input_checkpoint=model_checkpoint_path,
output_node_names="add",
restore_op_name="save/restore_all",
filename_tensor_name="save/Const:0",
output_graph=frozen_model_graph_path,
clear_devices=True,
initializer_nodes="")
print(frozen_model_graph_path)
# + language="bash"
#
# ls -l ./linear_model/cpu/checkpoint/
# +
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
def convert_graph_to_dot(input_graph, output_dot, is_input_graph_binary):
graph = graph_pb2.GraphDef()
with open(input_graph, "rb") as fh:
if is_input_graph_binary:
graph.ParseFromString(fh.read())
else:
text_format.Merge(fh.read(), graph)
with open(output_dot, "wt") as fh:
print("digraph graphname {", file=fh)
for node in graph.node:
output_name = node.name
print(" \"" + output_name + "\" [label=\"" + node.op + "\"];", file=fh)
for input_full_name in node.input:
parts = input_full_name.split(":")
input_name = re.sub(r"^\^", "", parts[0])
print(" \"" + input_name + "\" -> \"" + output_name + "\";", file=fh)
print("}", file=fh)
print("Created dot file '%s' for graph '%s'." % (output_dot, input_graph))
# -
input_graph='./linear_model/cpu/checkpoint/frozen_model_graph_cpu.pb'
output_dot='./frozen_model_graph_cpu.dot'
convert_graph_to_dot(input_graph=input_graph, output_dot=output_dot, is_input_graph_binary=True)
# + language="bash"
#
# dot -T png ./frozen_model_graph_cpu.dot \
# -o ./frozen_model_graph_cpu.png > b.out
# +
from IPython.display import Image
Image('./frozen_model_graph_cpu.png')
# -
# # Save Model for Deployment and Inference
# ## Reset Default Graph
# +
import tensorflow as tf
tf.reset_default_graph()
# -
# ## Create New Session
sess = tf.Session()
# ## Load Frozen Graph
# +
from tensorflow.python.tools import inspect_checkpoint
inspect_checkpoint.print_tensors_in_checkpoint_file(file_name="./linear_model/cpu/checkpoint/model.ckpt",
tensor_name="",
all_tensors=True,
all_tensor_names=True)
# +
saver = tf.train.import_meta_graph('./linear_model/cpu/checkpoint/model.ckpt.meta')
saver.restore(sess, './linear_model/cpu/checkpoint/model.ckpt')
model_parent_path = './linear_model/cpu/checkpoint/'
frozen_model_graph_path = '%s/frozen_model_graph_cpu.pb' % model_parent_path
print(frozen_model_graph_path)
with tf.gfile.GFile(frozen_model_graph_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
name="",
op_dict=None,
producer_op_list=None
)
print("weights = ", sess.run("weights:0"))
print("bias = ", sess.run("bias:0"))
# -
# ## Create `SignatureDef` Asset for TensorFlow Serving
# +
from tensorflow.python.saved_model import utils
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
graph = tf.get_default_graph()
x_observed = graph.get_tensor_by_name('x_observed:0')
y_pred = graph.get_tensor_by_name('add:0')
inputs_map = {'inputs': x_observed}
outputs_map = {'outputs': y_pred}
predict_signature = signature_def_utils.predict_signature_def(
inputs = inputs_map,
outputs = outputs_map)
print(predict_signature)
# -
# ## Save Model with Assets
# +
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from shutil import rmtree
from tensorflow.contrib import predictor
import numpy as np
import os
saved_model_path = './linear_model/cpu/pipeline_tfserving/0'
os.makedirs(saved_model_path, exist_ok=True)
rmtree(saved_model_path)
import tensorflow.saved_model as saved_model
from shutil import rmtree
saved_model.simple_save(sess,
saved_model_path,
inputs={'inputs': x_observed},
outputs={"outputs": y_pred})
# -
import os
print(saved_model_path)
os.listdir(saved_model_path)
os.listdir('%s/variables' % saved_model_path)
# + language="bash"
# echo "./linear_model/cpu/pipeline_tfserving/0"
# echo ""
# ls -al ./linear_model/cpu/pipeline_tfserving/0
# -
# ## Inspect with [Saved Model CLI](https://www.tensorflow.org/guide/saved_model)
# Note: This takes a minute or two for some reason. Please be patient.
# +
import subprocess
output = subprocess.run(["saved_model_cli", "show", \
"--dir", saved_model_path, "--all"], \
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(output.stdout.decode('utf-8'))
# -
# # Optimized Model for Deployment
# ## Predict with Python (SLOW)
#
# +
from tensorflow.contrib import predictor
import numpy as np
saved_model_path = './linear_model/cpu/pipeline_tfserving/0'
print(saved_model_path)
input_shape = 1
input_data = np.random.random_sample(input_shape)
predict_fn = predictor.from_saved_model(saved_model_path)
# +
# %%time
predictions = predict_fn({'inputs': input_data})
print('Prediction: %s' % predictions["outputs"])
# -
# ## Optimize with TensorFlow Lite
#
# 
# +
import tensorflow as tf
import os
from shutil import rmtree
saved_model_path = './linear_model/cpu/pipeline_tfserving/0'
print(saved_model_path)
tflite_model_base_path = './linear_model/cpu/tflite/'
os.makedirs(tflite_model_base_path, exist_ok=True)
converter = tf.lite.TocoConverter.from_saved_model(saved_model_path)
# TF 1.11+
converter.post_training_quantize = True
tflite_model = converter.convert()
tflite_model_path = '%s/tflite_optimized_model.tflite' % tflite_model_base_path
model_size = open(tflite_model_path, "wb").write(tflite_model)
print('\nModel size reduced to %s bytes' % model_size)
# + magic_args="-s \"$tflite_model_path\"" language="bash"
# echo "ls -al $1"
# echo ""
# ls -al $1
# +
import numpy as np
import tensorflow as tf
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path=tflite_model_path)
interpreter.allocate_tensors()
# +
# Get input and output tensors.
input_details = interpreter.get_input_details()
print('Input Tensor Details: %s' % input_details)
output_details = interpreter.get_output_details()
print('Output Tensor Details: %s' % output_details)
# -
# Test model on random input data.
input_shape = input_details[0]['shape']
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
print('Input: %s' % input_data)
interpreter.set_tensor(input_details[0]['index'], input_data)
# %%time
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
print('Prediction: %s' % output_data)
| 10_pipeline/kubeflow/wip/06_02_Optimize_TFLite.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 1. Setup
import sys
sys.path.append('../../..')
# +
import config
import matplotlib.pyplot as plt
import warnings
from experiments.experiment_utils import *
# +
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
warnings.filterwarnings('ignore')
# -
# ## 2. Experiment stats
# - Architecture: FCRN_A;
# - Train size: (32 vs 64);
# - 5 runs with random train images;
# ### 2.1 Experiment 1
# - Loss: MSE;
# - without data augmentation;
# - without preprocessing steps;
# - full images;
res_list = load_experiments_results('.', ['n_32', 'loss_mse', 'full_img'])
res_df = get_mean_and_std_of_experiments(res_list, ','.join(['vgg_cells', 'n_32', 'loss_mse', 'full_img']))
res_df.head()
res_list = load_experiments_results('.', ['n_64', 'loss_mse', 'full_img'])
res_df = get_mean_and_std_of_experiments(res_list, ','.join(['vgg_cells', 'n_64', 'loss_mse', 'full_img']))
res_df.head()
# ### 2.2 Experiment 2
# - Loss: MAE;
# - without data augmentation;
# - without preprocessing steps;
# - full images;
res_list = load_experiments_results('.', ['n_32', 'loss_mae', 'full_img'])
res_df = get_mean_and_std_of_experiments(res_list, ','.join(['vgg_cells', 'n_32', 'loss_mae', 'full_img']))
res_df.head()
res_list = load_experiments_results('.', ['n_64', 'loss_mae', 'full_img'])
res_df = get_mean_and_std_of_experiments(res_list, ','.join(['vgg_cells', 'n_64', 'loss_mae', 'full_img']))
res_df.head()
# ### 2.3 Experiment 3
# - Loss: MSE;
# - without data augmentation;
# - without preprocessing steps;
# - patches: 4 -> 128x128
res_list = load_experiments_results('.', ['n_32', 'loss_mse', 'patch_4_128x128'])
res_df = get_mean_and_std_of_experiments(res_list, ','.join(['vgg_cells', 'n_32', 'loss_mse', 'patch_4_128x128']))
res_df.head()
res_list = load_experiments_results('.', ['n_64', 'loss_mse', 'patch_4_128x128'])
res_df = get_mean_and_std_of_experiments(res_list, ','.join(['vgg_cells', 'n_64', 'loss_mse', 'patch_4_128x128']))
res_df.head()
# ### 2.4 Experiment 4
# - Loss: LogCosh;
# - without data augmentation;
# - without preprocessing steps;
# - full images;
res_list = load_experiments_results('.', ['n_32', 'loss_logcosh', 'full_img'])
res_df = get_mean_and_std_of_experiments(res_list, ','.join(['vgg_cells', 'n_32', 'loss_logcosh', 'full_img']))
res_df.head()
res_list = load_experiments_results('.', ['n_64', 'loss_logcosh', 'full_img'])
res_df = get_mean_and_std_of_experiments(res_list, ','.join(['vgg_cells', 'n_64', 'loss_logcosh', 'full_img']))
res_df.head()
# ### 2.5 Experiment 5
# - Loss: MAE;
# - without data augmentation;
# - without preprocessing steps;
# - patches: 4 -> 128x128
res_list = load_experiments_results('.', ['n_32', 'loss_mae', 'patch_4_128x128'])
res_df = get_mean_and_std_of_experiments(res_list, ','.join(['vgg_cells', 'n_32', 'loss_mae', 'patch_4_128x128']))
res_df.head()
res_list = load_experiments_results('.', ['n_64', 'loss_mae', 'patch_4_128x128'])
res_df = get_mean_and_std_of_experiments(res_list, ','.join(['vgg_cells', 'n_64', 'loss_mae', 'patch_4_128x128']))
res_df.head()
# ### 2.6 Experiment 6
# - Loss: LogCosh;
# - without data augmentation;
# - without preprocessing steps;
# - patches: 4 -> 128x128
res_list = load_experiments_results('.', ['n_32', 'loss_logcosh', 'patch_4_128x128'])
res_df = get_mean_and_std_of_experiments(res_list, ','.join(['vgg_cells', 'n_32', 'loss_logcosh', 'patch_4_128x128']))
res_df.head()
res_list = load_experiments_results('.', ['n_64', 'loss_logcosh', 'patch_4_128x128'])
res_df = get_mean_and_std_of_experiments(res_list, ','.join(['vgg_cells', 'n_64', 'loss_logcosh', 'patch_4_128x128']))
res_df.head()
| experiments/1_fcrn_a/vgg_cells/experiments_stats.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_pytorch_p36
# language: python
# name: conda_pytorch_p36
# ---
# +
DATA_BUCKET='sagemaker-generatinghumanfaces'
OUTPUT_BUCKET = 'sagemaker-generatinghumanfaces-output'
from sagemaker import get_execution_role
role = get_execution_role()
print(role)
# -
# !pip install imageio
# !pip install tensorboardX
# +
from sagemaker.pytorch import PyTorch
pytorch_estimator = PyTorch(entry_point='train_sagemaker.py',
role=role,
train_instance_type='ml.p3.2xlarge',
train_instance_count=1,
framework_version='1.0.0',
source_dir='./sourcecode',
output_path='s3://{}/generatinghumanfaces/model'.format(OUTPUT_BUCKET),)
# -
pytorch_estimator.fit({'training': 's3://{}'.format(DATA_BUCKET)})
| run_sage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
import re
import json
import numpy as np
from collections import defaultdict
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
module_path = os.path.abspath(os.path.join('../onmt'))
if module_path not in sys.path:
sys.path.append(module_path)
import kp_evaluate
import onmt.keyphrase.utils as utils
import seaborn as sns
import matplotlib.pyplot as plt
# +
dataset_names = ['inspec', 'krapivin', 'nus', 'semeval', 'kp20k', 'duc', 'stackexchange']
dataset_names = ['kp20k', 'inspec', 'krapivin', 'nus', 'semeval']
json_base_dir = '/Users/memray/project/kp/OpenNMT-kpg/data/keyphrase/json/' # path to the json folder
tgt_nums = {}
fig, ax = plt.subplots(figsize=(8, 6))
for dataset_name in dataset_names:
tgt_nums[dataset_name] = []
print(dataset_name)
input_json_path = os.path.join(json_base_dir, dataset_name, '%s_test.json' % dataset_name)
output_json_path = os.path.join(json_base_dir, dataset_name, '%s_test_meng17token.json' % dataset_name)
# with open(input_json_path, 'r') as input_json, open(output_json_path, 'w') as output_json:
with open(input_json_path, 'r') as input_json:
for json_line in input_json:
json_dict = json.loads(json_line)
if dataset_name == 'stackexchange':
json_dict['abstract'] = json_dict['question']
json_dict['keywords'] = json_dict['tags']
del json_dict['question']
del json_dict['tags']
title = json_dict['title']
abstract = json_dict['abstract']
fulltext = json_dict['fulltext'] if 'fulltext' in json_dict else ''
keywords = json_dict['keywords']
if isinstance(keywords, str):
keywords = keywords.split(';')
json_dict['keywords'] = keywords
if len(keywords) < 16:
tgt_nums[dataset_name].append(len(keywords))
# sns.distplot(np.asarray(tgt_nums, dtype=int), bins=15, color="r", kde=False, rug=False);
# Plot a simple histogram with binsize determined automatically
# sns.distplot(tgt_nums, kde=False, color="b", ax=ax)
# # Plot a kernel density estimate and rug plot
# sns.distplot(tgt_nums, hist=False, rug=True, color="r")
# # Plot a filled kernel density estimate
# sns.distplot(tgt_nums, hist=False, color="g", kde_kws={"shade": True})
# # Plot a histogram and kernel density estimate
# sns.distplot(tgt_nums, hist=True, color="m", ax=ax)
# sns.distplot(tgt_nums["kp20k"] , color="skyblue", label="KP20k", bins=15, kde=False, rug=False, hist_kws=dict(alpha=0.7))
sns.distplot(tgt_nums["kp20k"] , color="teal", label="KP20k", bins=15, kde=False, rug=False, hist_kws=dict(alpha=0.7, edgecolor="k", linewidth=1))
# sns.distplot(tgt_nums["inspec"] , color="red", label="Inspec", bins=15, kde=False, rug=False, hist_kws=dict(alpha=0.7))
# sns.distplot(tgt_nums["krapivin"] , color="olive", label="Krapivin", bins=15, kde=False, rug=False, hist_kws=dict(alpha=0.7))
# sns.distplot(tgt_nums["nus"] , color="gold", label="NUS", bins=15, kde=False, rug=False, hist_kws=dict(alpha=0.7))
# sns.distplot(tgt_nums["semeval"] , color="teal", label="Semeval", bins=15, kde=False, rug=False, hist_kws=dict(alpha=0.7))
ax.set(xlabel='Number of keyphrases in doc', ylabel='Number of documents')
sns.plt.legend()
plt.show()
# -
| notebook/dataset_stat.ipynb |
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# formats: ipynb
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **This notebook is an exercise in the [Feature Engineering](https://www.kaggle.com/learn/feature-engineering) course. You can reference the tutorial at [this link](https://www.kaggle.com/ryanholbrook/clustering-with-k-means).**
#
# ---
#
# # Introduction #
#
# In this exercise you'll explore our first unsupervised learning technique for creating features, k-means clustering.
#
# Run this cell to set everything up!
# +
# Setup feedback system
from learntools.core import binder
binder.bind(globals())
from learntools.feature_engineering_new.ex4 import *
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.model_selection import cross_val_score
from xgboost import XGBRegressor
# Set Matplotlib defaults
plt.style.use("seaborn-whitegrid")
plt.rc("figure", autolayout=True)
plt.rc(
"axes",
labelweight="bold",
labelsize="large",
titleweight="bold",
titlesize=14,
titlepad=10,
)
def score_dataset(X, y, model=XGBRegressor()):
# Label encoding for categoricals
for colname in X.select_dtypes(["category", "object"]):
X[colname], _ = X[colname].factorize()
# Metric for Housing competition is RMSLE (Root Mean Squared Log Error)
score = cross_val_score(
model, X, y, cv=5, scoring="neg_mean_squared_log_error",
)
score = -1 * score.mean()
score = np.sqrt(score)
return score
# Prepare data
df = pd.read_csv("../input/fe-course-data/ames.csv")
# -
# The k-means algorithm is sensitive to scale. This means we need to be thoughtful about how and whether we rescale our features since we might get very different results depending on our choices. As a rule of thumb, if the features are already directly comparable (like a test result at different times), then you would *not* want to rescale. On the other hand, features that aren't on comparable scales (like height and weight) will usually benefit from rescaling. Sometimes, the choice won't be clear though. In that case, you should try to use common sense, remembering that features with larger values will be weighted more heavily.
#
# # 1) Scaling Features
#
# Consider the following sets of features. For each, decide whether:
# - they definitely should be rescaled,
# - they definitely should *not* be rescaled, or
# - either might be reasonable
#
# Features:
# 1. `Latitude` and `Longitude` of cities in California
# 2. `Lot Area` and `Living Area` of houses in Ames, Iowa
# 3. `Number of Doors` and `Horsepower` of a 1989 model car
#
# Once you've thought about your answers, run the cell below for discussion.
# View the solution (Run this cell to receive credit!)
q_1.check()
# -------------------------------------------------------------------------------
#
# # 2) Create a Feature of Cluster Labels
#
# Creating a k-means clustering with the following parameters:
# - features: `LotArea`, `TotalBsmtSF`, `FirstFlrSF`, `SecondFlrSF`,`GrLivArea`
# - number of clusters: 10
# - iterations: 10
#
# (This may take a moment to complete.)
# +
X = df.copy()
y = X.pop("SalePrice")
# YOUR CODE HERE: Define a list of the features to be used for the clustering
features = ['LotArea', 'TotalBsmtSF', 'FirstFlrSF', 'SecondFlrSF','GrLivArea']
# Standardize
X_scaled = X.loc[:, features]
X_scaled = (X_scaled - X_scaled.mean(axis=0)) / X_scaled.std(axis=0)
# YOUR CODE HERE: Fit the KMeans model to X_scaled and create the cluster labels
kmeans = KMeans(n_clusters=10, n_init=10, random_state=0)
X["Cluster"] = kmeans.fit_predict(X_scaled)
# Check your answer
q_2.check()
# +
# Lines below will give you a hint or solution code
#q_2.hint()
#q_2.solution()
# -
# You can run this cell to see the result of the clustering, if you like.
Xy = X.copy()
Xy["Cluster"] = Xy.Cluster.astype("category")
Xy["SalePrice"] = y
sns.relplot(x="value", y="SalePrice", hue="Cluster", col="variable", height=4,
aspect=1, facet_kws={'sharex': False}, col_wrap=3,
data=Xy.melt(value_vars=features, id_vars=["SalePrice", "Cluster"],))
# And as before, `score_dataset` will score your XGBoost model with this new feature added to training data.
score_dataset(X, y)
# -------------------------------------------------------------------------------
#
# The k-means algorithm offers an alternative way of creating features. Instead of labelling each feature with the nearest cluster centroid, it can measure the distance from a point to all the centroids and return those distances as features.
#
# # 3) Cluster-Distance Features
#
# Now add the cluster-distance features to your dataset. You can get these distance features by using the `fit_transform` method of `kmeans` instead of `fit_predict`.
# +
kmeans = KMeans(n_clusters=10, n_init=10, random_state=0)
# YOUR CODE HERE: Create the cluster-distance features using `fit_transform`
X_cd = kmeans.fit_transform(X_scaled)
# Label features and join to dataset
X_cd = pd.DataFrame(X_cd, columns=[f"Centroid_{i}" for i in range(X_cd.shape[1])])
X = X.join(X_cd)
# Check your answer
q_3.check()
# +
# Lines below will give you a hint or solution code
#q_3.hint()
#q_3.solution()
# -
# Run this cell to score these new features, if you like.
score_dataset(X, y)
# # Keep Going #
#
# [**Apply principal components analysis**](https://www.kaggle.com/ryanholbrook/principal-component-analysis) to create features from variation in your data.
# ---
#
#
#
#
# *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/221677) to chat with other Learners.*
| Feature Engineering/4. Clustering With K-Means.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.5 64-bit
# name: python3
# ---
# +
import os
import pandas as pd
import re
pwd = os.getcwd()
# -
df = pd.read_csv(pwd + '\\raw_data\\Data - IT_Companies_Algiers_Details_Raw.csv')
df
df.fillna('', inplace=True)
df
# +
df['working_hours'].str.replace('^De','Me', regex=True)
df['opening_at'] = df['closing_at'] = ''
def to_hour_format(base_text, first):
pattern = 'De (\d+h|\d+h:\d+) ร (\d+h:\d+|\d+h)'
if(not isinstance(base_text, str)):
base_text = ''
result = re.search(pattern,base_text,re.IGNORECASE)
if result:
if first:
return result.group(1)
else:
return result.group(2)
else:
return ''
df.opening_at = df.apply(lambda x:
to_hour_format(x['working_hours'], True),axis=1
)
df.closing_at = df.apply(lambda x:
to_hour_format(x['working_hours'], False),axis=1
)
del df['working_hours']
df
df.to_csv(pwd + '\\dataframes\\Data - IT_Companies_Algiers_Details_Processed.csv')
# -
| 9 - Data Cleaning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/pabair/ml-kurs-ss21/blob/master/8_PyTorch_Digits.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="TOBUyiZq3d2u"
# # PyTorch Digits Example
# This example classifies the digit dataset using a neural net.
#
# -
# First we need to install with `conda install pytorch` or `pip3 install torch` depending on your setup.
# + [markdown] id="LhdvnPe4Q-pO"
# ### 0. Preamble
# + id="K8-YOrlu3w8z"
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
import matplotlib.pyplot as plt
torch.manual_seed(1)
np.random.seed(1)
# -
# The following lines checks for GPU availability on the machine and sets the GPU as processing device (if available). If you are on Google Colab you can enable GPU support in the menu via `Runtime > Change runtime type` and select `GPU` as hardware accelerator.
# + colab={"base_uri": "https://localhost:8080/"} id="S3DgoJj735Gr" outputId="e56e337f-c44f-4bc3-c1ae-e532bb999356"
if(torch.cuda.is_available()):
processing_chip = "cuda:0"
print(f"{torch.cuda.get_device_name(0)} available")
else:
processing_chip = "cpu"
print("No GPU available")
device = torch.device(processing_chip)
device
# + [markdown] id="Y8EgmXccAr9b"
# ### 1. Data Preperation
# -
# We work with the already know digit dataset. For more details on the dataset, check our [previous](https://github.com/pabair/ml-kurs-ss21/blob/master/2_Logistische_Regression_Digits.ipynb) notebook.
#
# + id="DZoYUZfQ_TU8"
from sklearn.datasets import load_digits
data, labels = load_digits(return_X_y = True)
# + id="KfY2iF0WTlWu"
from sklearn.model_selection import train_test_split
train_X, test_X, train_y, test_y = train_test_split(data, labels, test_size=0.2, random_state=0)
# + colab={"base_uri": "https://localhost:8080/"} id="fQqdZZ16AHBe" outputId="75f7aff9-a647-4010-bafa-8141fc7aa46d"
train_X
# -
# To be able to use the data in PyTorch, we need to convert them into PyTorch tensors. Such a tensor can be thought of an efficient way to represent lists and matrices (similar to Numpy), with the additional benefit that they can be moved to the GPU (the .to(device) part in the code below) and that they support automatic backpropagation (more on this later):
train_x = torch.Tensor(train_X).float().to(device)
test_x = torch.Tensor(test_X).float().to(device)
train_y =torch.Tensor(train_y).long().to(device)
test_y = torch.Tensor(test_y).long().to(device)
# + [markdown] id="5wcTXnyu7NWK"
# ### 2. Model definition
#
# -
# We define now the strucutre of our neural network. For this we create a class that is a subclass from PyTorch's nn.Module. By convention we put in the `__init__` method the layers we want to use in the network and in the `forward` method how data flows through this network.
#
# Our network has 64 input features, one hidden layer with 5 neurons and 10 output neurons. The hidden layer uses a Relu activation function. Note that the output layer does not have a softmax activation (unlike we have seen it in the lecture). It rather gives out a raw score for each class (more on this later).
# + id="_W47oZ534E-1"
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.hidden = nn.Linear(64, 5)
self.output = nn.Linear(5, 10)
def forward(self, x):
z = F.relu(self.hidden(x))
z = self.output(z) # no softmax. see CrossEntropyLoss()
return z
# + [markdown] id="gJyy5JG_84vs"
# ### 3. Model Training
# -
# We can now start training our network. We run several epochs in which we first predict on the training data with our network and than backpropagate the loss. For this we use PyTorch's build-in optimizer that runs gradient descent on the weights of the network. Hence, in every episode we reduce the loss on the training data and improve our network.
#
# As loss function we use cross entropy, which consumes the raw scores from the prediction and internally applies a softmax (that is why we do not need the softmax as last layer in the network).
#
# Note that all training data is passed at once to our network (line `net(train_x)` ), since PyTorch will predict on all data points in parallel.
# + colab={"base_uri": "https://localhost:8080/"} id="7RQHZvvyAFzV" outputId="85c7caef-49ee-443f-d052-30af1758c4a2"
# create network, move it to device and set it to training-mode
net = Net().to(device)
net.train()
# define the parameters for training
no_epochs = 1000
learning_rate = 0.01
loss_func = nn.CrossEntropyLoss() # applies softmax() internally
optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate)
print("\nStarting training ")
train_losses = []
for epoch in range(0, no_epochs):
optimizer.zero_grad()
y_hat = net(train_x)
loss = loss_func(y_hat, train_y)
loss.backward()
optimizer.step()
train_losses.append(loss.item())
if epoch % 100 == 0:
print(f"Loss in epoch {epoch} is {loss.item()}")
print("Done training ")
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="fMPxG1i873W7" outputId="8ed83937-f40a-4e0e-a259-e409fff8fc9e"
fig = plt.figure()
plt.plot(range(0, no_epochs), train_losses, color='blue')
plt.legend(['Train Loss'], loc='upper right')
plt.xlabel('number of epochs')
plt.ylabel('loss')
# + [markdown] id="cN8btFSP3yU2"
# ### 4. Model Evaluation
# -
# Finally, we check the model accuracy on the test data. For this we predict on the test data, identify the class with the highest score and compare it to the true label.
# + colab={"base_uri": "https://localhost:8080/"} id="wsmVfo49Kytp" outputId="73bdda50-20d6-41e3-bb45-ce40c114f6fc"
net.eval() # set network to evaluation mode
y_pred = net(test_x)
_, predicted = torch.max(y_pred.data, 1)
correct = (predicted == test_y).sum().item()
print(f"Accuarcy is {100. * correct / len(test_x)}%")
# -
# ### 5. Tasks
# 1. Our accuracy is not so good on the test data. Try the following to improve the model:
# - Increase the hidden layer size to 10 neurons, train the model and compare the accuracy on the test data.
# - Add a second hidden layer with 5 neurons, train the model and compare the accuracy on the test data.
# 2. Check if you can decrease the training loss even further if you train for more epochs. However, this can easily result in overfitting. To check that, calculate the accuracy on the test data already during training after each epoch. Show the plot the results as a second plot similiar to the one above. (Note: Normally we need to do this check on seperate validation data, not on our test data).
# 3. Take the titanic data set and try to train a neural network on it. Use only the label `Survived` and the features `Pclass`, `Age`, `Sibsp`, `Parch` and `Fare`. Drop all N/As before training.
| 8_PyTorch_Digits.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ChamisaE/ds4a-eda-notebooks/blob/master/MappingFinal.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="f5neHaOsVuo4" outputId="6d30da6b-0f4a-42ab-f88d-6c1d80bcc294"
# Load the Drive helper and mount
from google.colab import drive
# This will prompt for authorization.
drive.mount('/content/drive')
# + id="P2dJ6fjvSnCL" colab={"base_uri": "https://localhost:8080/"} outputId="122c2a71-b90e-405e-eb79-2aeb7ba8802e"
pip install geopandas
# + id="_TcK-ptzViAx"
# Import required packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import geopandas as gpd
# + id="ZdIitNCNS-wH"
#Yuwa
tmap1 = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_01/tl_2019_01_tract.shp"
map_1 = gpd.read_file(tmap1)
tmap2 = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_02/tl_2019_02_tract.shp"
map_2 = gpd.read_file(tmap2)
tmap4 = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_04/tl_2019_04_tract.shp"
map_4 = gpd.read_file(tmap4)
tmap5 = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_05/tl_2019_05_tract.shp"
map_5 = gpd.read_file(tmap5)
tmap6 = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_06/tl_2019_06_tract.shp"
map_6 = gpd.read_file(tmap6)
tmap8 = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_08/tl_2019_08_tract.shp"
map_8 = gpd.read_file(tmap8)
tmap9 = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_09/tl_2019_09_tract.shp"
map_9 = gpd.read_file(tmap9)
tmap56 = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_56/tl_2019_56_tract.shp"
map_56 = gpd.read_file(tmap56)
# + id="yHEnN5AXW0ro"
#Mike
#import shp file of dataset
tmap10 = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_10/tl_2019_10_tract.shp"
map_10 = gpd.read_file(tmap10)
#import shp file of dataset
tmap11 = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_11/tl_2019_11_tract.shp"
map_11 = gpd.read_file(tmap11)
#import shp file of dataset
tmap12 = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_12/tl_2019_12_tract.shp"
map_12 = gpd.read_file(tmap12)
#import shp file of dataset
tmap13 = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_13/tl_2019_13_tract.shp"
map_13 = gpd.read_file(tmap13)
#import shp file of dataset
tmap15 = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_15/tl_2019_15_tract.shp"
map_15 = gpd.read_file(tmap15)
#import shp file of dataset
tmap16 = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_16/tl_2019_16_tract.shp"
map_16 = gpd.read_file(tmap16)
#import shp file of dataset
tmap17 = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_17/tl_2019_17_tract.shp"
map_17 = gpd.read_file(tmap17)
# + id="H-WOpZxxTSOh"
#Chamisa
#import shp file of dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_32/tl_2019_32_tract.shp"
map_32 = gpd.read_file(tmap)
#import shp file of dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_33/tl_2019_33_tract.shp"
map_33 = gpd.read_file(tmap)
#import shp file of dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_34/tl_2019_34_tract.shp"
map_34 = gpd.read_file(tmap)
#import shp file of dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_35/tl_2019_35_tract.shp"
map_35 = gpd.read_file(tmap)
#import shp file of dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_36/tl_2019_36_tract.shp"
map_36 = gpd.read_file(tmap)
#import shp file of dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_37/tl_2019_37_tract.shp"
map_37 = gpd.read_file(tmap)
#import shp file of dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_38/tl_2019_38_tract.shp"
map_38 = gpd.read_file(tmap)
# + colab={"base_uri": "https://localhost:8080/", "height": 162} id="iwz_Wf_tXB8_" outputId="e21309c5-a3c7-4be7-aee3-85c2a14c07da"
#Lauro & Erica
#import dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_18/tl_2019_18_tract.shp"
map_18 = gpd.read_file(tmap)
# check the GeoDataframe
map_18.head(2)
#import dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_19/tl_2019_19_tract.shp"
map_19 = gpd.read_file(tmap)
# check the GeoDataframe
map_19.head(2)
#import dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_20/tl_2019_20_tract.shp"
map_20 = gpd.read_file(tmap)
# check the GeoDataframe
map_20.head(2)
#import dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_21/tl_2019_21_tract.shp"
map_21 = gpd.read_file(tmap)
# check the GeoDataframe
map_21.head(2)
#import dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_22/tl_2019_22_tract.shp"
map_22 = gpd.read_file(tmap)
# check the GeoDataframe
map_22.head(2)
#import dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_23/tl_2019_23_tract.shp"
map_23 = gpd.read_file(tmap)
# check the GeoDataframe
map_23.head(2)
#import dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_24/tl_2019_24_tract.shp"
map_24 = gpd.read_file(tmap)
# check the GeoDataframe
map_24.head(2)
#import dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_25/tl_2019_25_tract.shp"
map_25 = gpd.read_file(tmap)
# check the GeoDataframe
map_25.head(2)
#import dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_26/tl_2019_26_tract.shp"
map_26 = gpd.read_file(tmap)
# check the GeoDataframe
map_26.head(2)
#import dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_27/tl_2019_27_tract.shp"
map_27 = gpd.read_file(tmap)
# check the GeoDataframe
map_27.head(2)
#import dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_28/tl_2019_28_tract.shp"
map_28 = gpd.read_file(tmap)
# check the GeoDataframe
map_28.head(2)
#import dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_29/tl_2019_29_tract.shp"
map_29 = gpd.read_file(tmap)
# check the GeoDataframe
map_29.head(2)
#import dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_30/tl_2019_30_tract.shp"
map_30 = gpd.read_file(tmap)
# check the GeoDataframe
map_30.head(2)
#import dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_31/tl_2019_31_tract.shp"
map_31 = gpd.read_file(tmap)
# check the GeoDataframe
map_31.head(2)
# + id="B11rxReKXh2b"
#Ad
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_39/tl_2019_39_tract.shp"
map_39 = gpd.read_file(tmap)
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_40/tl_2019_40_tract.shp"
map_40 = gpd.read_file(tmap)
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_41/tl_2019_41_tract.shp"
map_41 = gpd.read_file(tmap)
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_42/tl_2019_42_tract.shp"
map_42 = gpd.read_file(tmap)
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_44/tl_2019_44_tract.shp"
map_44 = gpd.read_file(tmap)
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_45/tl_2019_45_tract.shp"
map_45 = gpd.read_file(tmap)
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_46/tl_2019_46_tract.shp"
map_46 = gpd.read_file(tmap)
# + id="XE2KN5RVXHFK"
#Aryana
#import shp file of dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_47/tl_2019_47_tract.shp"
map_47 = gpd.read_file(tmap)
#import shp file of dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_48/tl_2019_48_tract.shp"
map_48 = gpd.read_file(tmap)
#import shp file of dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_49/tl_2019_49_tract.shp"
map_49 = gpd.read_file(tmap)
#import shp file of dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_50/tl_2019_50_tract.shp"
map_50 = gpd.read_file(tmap)
#import shp file of dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_51/tl_2019_51_tract.shp"
map_51 = gpd.read_file(tmap)
#import shp file of dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_53/tl_2019_53_tract.shp"
map_53 = gpd.read_file(tmap)
#import shp file of dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_54/tl_2019_54_tract.shp"
map_54 = gpd.read_file(tmap)
#import shp file of dataset
tmap = "/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/State_55/tl_2019_55_tract.shp"
map_55 = gpd.read_file(tmap)
# + [markdown] id="fkMqa8sRZYfO"
# # Stack
# + id="bOLhS-5CZQIQ"
#creating dataframe of map tables from shape files
df1= pd.concat([map_1, map_2, map_4, map_5, map_6, map_8, map_9, map_10, map_12, map_13, map_15, map_16, map_17, map_18, map_19, map_20, map_21, map_22, map_23, map_24, map_25, map_26, map_27, map_28, map_29, map_30, map_31, map_32, map_33, map_34, map_35, map_36, map_37, map_38, map_39, map_40, map_41, map_42, map_44, map_45, map_46, map_47, map_48, map_49, map_50, map_51, map_53, map_54, map_55, map_56], ignore_index=True)
# + id="-Xn6pWMpanGP"
#Reduce to columns listed, need to keep geometry
df1 = df1[['STATEFP', 'COUNTYFP', 'TRACTCE', 'GEOID','geometry']]
# + id="X-azNEL4jELt"
df2 = pd.DataFrame(df1)
df2= df2.rename(columns=str.lower)
df1 = gpd.GeoDataFrame(df2)
# + colab={"base_uri": "https://localhost:8080/", "height": 202} id="Jk-twHZZt-F6" outputId="6cba4c2f-fd33-40b5-d93f-ee9205b984ee"
df2.head()
# + id="ttWjkTDltmqu"
#Save as csv
df2.to_csv(r"/content/drive/My Drive/DS4A Team 114/Data Sets/census_tracts/_stacked_maps_all/all_census_tracts_2019", index = False)
# + colab={"base_uri": "https://localhost:8080/", "height": 299} id="NuaeIbegbWAt" outputId="1221abd9-b6e5-462a-c328-b3dde06de937"
#plot all states in collated map
plt.rcParams['figure.figsize'] = [70, 70]
df1.plot(legend=True);
# + id="UrRBGx3sbZr2"
#read in the final dataset with correct geoid lengths
df_main = pd.read_csv('/content/drive/My Drive/DS4A Team 114/Data Sets/Clean Data Sets/merged_final_v4.csv',low_memory=False,dtype={"geoid":np.str})
# + colab={"base_uri": "https://localhost:8080/"} id="crenZCklmqM5" outputId="09143d5c-3810-4665-cbd5-ca378d992466"
#checking minimum geoid length
df_main['geoid'].astype(str).map(len).min()
# + colab={"base_uri": "https://localhost:8080/"} id="v1Tg5_U7mwRW" outputId="1d3300cc-ff26-49fc-fe19-15d05020f60f"
#checking maximum geoid length
df_main['geoid'].astype(str).map(len).max()
# + id="_j13946mjsXs"
#filtering the df final to just 3 columns: geoid, has transit, high transit usage
df_main = df_main[['geoid','has_transit','high_transit_usage']]
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="S3xY3w3MlKNf" outputId="15a6aab8-6307-4e8d-de47-00a7b54e5fef"
#checking df for columns
df_main.head()
# + id="6NNTs0DikEhH"
# + id="EJMZtufzkJxY"
#merge df1(map) with main dataset (df_main)
for_plotting = pd.merge(df_main, df1, on="geoid", how="outer")
anothaone = gpd.GeoDataFrame(for_plotting)
# + colab={"base_uri": "https://localhost:8080/", "height": 589} id="qyrO2tjGdYzJ" outputId="5885d2b6-9d14-4f9d-e055-3e1140a4d4df"
#checking for plotting df
anothaone
# + colab={"base_uri": "https://localhost:8080/", "height": 693} id="F7DjxuhTg9qA" outputId="bbda8961-974e-4668-b7b1-9e7eb09a1f01"
#map for census tracts with transit present
ax = anothaone.dropna().plot(column='has_transit', cmap = 'plasma', figsize=(12,12), k=2, legend=True);
# + id="2ulLjV1Fj0v-"
#map for census tracts with no transit
#map for census tracts with high_transit_usage
#ax2 = for_plotting.dropna().plot(column='high_transit_usage', cmap = 'plasma', figsize=(52,52), k=2, legend = True);
# + id="4EB5ykUYuaGP"
| MappingFinal.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.6.4
# language: julia
# name: julia-0.6
# ---
# <center>
# <b><span style="font-size: 1em;">
# EXAMPLES
# </span></b>
# <br>
# <b><span style="font-size: 2em;">
# FLOWUnsteady
# </span></b>
# <br>
# <span style="font-size: 1em;">
# Last Update: 04/2020
# </span>
# </center>
# This document reports the results of the example simulations under [`examples/`](https://github.com/byuflowlab/FLOWUnsteady/tree/master/examples)
using PyCall
command1 = "<iframe width=\"600\" height=\"337\" src=\"https://www.youtube.com/embed/"
command2 = "?modestbranding=1&autohide=1&showinfo=0&rel=0&controls=1\"
frameborder=\"0\" allow=\"autoplay; encrypted-media\"
allowfullscreen></iframe>";
# # Heaving Wing
# [`examples/heavingwing.jl`](https://github.com/byuflowlab/FLOWUnsteady/blob/master/examples/heavingwing.jl)
#
# Swept wing in heaving motion. This example tests FLOWUnsteady's capacity to capture unsteady aerodynamics.
# <img src="https://media.githubusercontent.com/media/byuflowlab/FLOWUnsteady/master/docs/resources/vid/bertinsheaving00_1and3.gif" alt="Vid here" width="700px">
# <img src="img/heavingwing00.png" alt="Pic here" width="600px">
# <img src="img/heavingwing02.png" alt="Pic here" width="900px">
# <img src="img/heavingwing03.png" alt="Pic here" width="900px">
HTML(command1*"NimSqJWjRAY"*command2)
# <img src="img/heaving00.png" alt="Pic here" width="700px">
# <img src="img/heaving01.png" alt="Pic here" width="800px">
# # Tandem Heaving Wing
# [`examples/tandemheavingwing.jl`](https://github.com/byuflowlab/FLOWUnsteady/blob/master/examples/tandemheavingwing.jl)
#
# Tandem wing configuration with the front wing in heaving motion shedding a wake that impinges on the back wing. This example tests FLOWUnsteady's capacity to capture wing-on-wing interactions.
HTML(command1*"Pch94bKpjrQ"*command2)
# <img src="img/tandemheaving00.png" alt="Pic here" width="900px">
# <img src="img/tandemheaving01.png" alt="Pic here" width="800px">
# # Cross-wind Circular Path
# [`examples/circularpath.jl`](https://github.com/byuflowlab/FLOWUnsteady/blob/master/examples/circularpath.jl)
#
# Swept wing in a circular path under cross wind perpendicular to the plane of rotation. This example tests FLOWUnsteady's capacity to capture unsteady aerodynamics.
# <img src="https://media.githubusercontent.com/media/byuflowlab/FLOWUnsteady/master/docs/resources/vid/circularpath03_1and2.gif" alt="Vid here" width="700px">
# <img src="img/circularpath03.png" alt="Pic here" width="600px">
# <img src="img/circularpath03_2.png" alt="Pic here" width="600px">
# Notice that the lift coefficient oscillates because I hard-coded it to be taken from force in the $y$-direction.
# <img src="img/circlesim.jpg" alt="Pic here" width="600px">
# # Blown Wing
# [`examples/blownwing/`](https://github.com/byuflowlab/FLOWUnsteady/blob/master/examples/blownwing)
#
# Swept wing with two propellers. This example tests FLOWUnsteady's capacity to capture rotor-on-wing and wing-on-rotor aerodynamic and aeroacoustic interactions.
HTML(command1*"3REcIdIXrZA"*command2)
# # Wind-harvesting Aircraft
HTML(command1*"iFM3B4_N2Ls"*command2)
# # eVTOL Transition
# **Maneuver definition**
HTML(command1*"-xTHvwIe_34"*command2)
# **Rotor-on-rotor interactions**
HTML(command1*"f_AkQW37zqs"*command2)
# # Rotor Aeroacoustics
# See this notebook for more details and validation on rotor noise: [`examples/rotornoise/singlerotor.ipynb`](https://nbviewer.jupyter.org/github/byuflowlab/FLOWUnsteady/blob/master/examples/rotornoise/singlerotor.ipynb)
# **Single-rotor**
# <img src="https://media.githubusercontent.com/media/byuflowlab/FLOWUnsteady/master/examples/rotornoise/vid/dji9443_ccblade01_1.gif" alt="Vid here" width="700px">
# **Multirotor interactions**
# <img src="https://media.githubusercontent.com/media/byuflowlab/FLOWUnsteady/master/docs/resources/vid/val_piv_multi16_005D_99_1_noise1_cropped00.gif" alt="Vid here" width="400px">
# <img src="https://media.githubusercontent.com/media/byuflowlab/FLOWUnsteady/master/docs/resources/vid/cfdnoise_ningdji_multi_005D_03_15.gif" alt="Vid here" width="700px">
# <img src="https://media.githubusercontent.com/media/byuflowlab/FLOWUnsteady/master/docs/resources/vid/cfdnoise_ningdji_multi_005D_03_18.gif" alt="Vid here" width="900px">
| docs/resources/examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Intake-Postgres Plugin: Joins Demo
#
# The following notebook demonstrates "join" functionality using the _Intake-Postgres_ plugin. Its purpose is to showcase a variety of scenarios in which an _Intake_ user may want to query their PostgreSQL-based relational datasets.
#
# Joins are to be executed within the following scenarios:
#
# - One database, two tables
# - Two databases, several tables
#
#
# ## Setup
# 1. Download the PostgreSQL/PostGIS Docker images. With [Docker installed](https://www.docker.com/community-edition), execute:
# ```
# for db_inst in $(seq 0 4); do
# docker run -d -p $(expr 5432 + $db_inst):5432 --name intake-postgres-$db_inst mdillon/postgis:9.6-alpine;
# done
# ```
# All subsequent `docker run` commands will start containers from this image.
#
# 1. In the same conda environment as this notebook, install `pandas`, `sqlalchemy`, `psycopg2`, `shapely`, and (optionally) `postgresql`:
# ```
# conda install pandas sqlalchemy psycopg2 shapely postgresql
# ```
# The `postgresql` package is only for the command-line client library, so that we can verify that results were written to the database (externally from our programs).
#
# 1. Finally, install the _intake-postgres_ plugin:
# ```
# conda install -c intake intake-postgres
# ```
#
#
# ## Loading the data
#
# Because _Intake_ only supports reading the data, we need to insert the data into our databases by another means. The general approach below relies on partitioning a pre-downloaded CSV file and inserting its partitions into each table. This can be thought of as a rudimentary form of application-level "sharding".
#
# The code (below) begins by importing the necessary modules:
# +
from __future__ import print_function, absolute_import
## For downloading the test data
import os
import requests
import urllib
import zipfile
## For inserting test data
import pandas as pd
from sqlalchemy import create_engine
## For using Intake
from intake.catalog import Catalog
## Global variables
N_PARTITIONS = 5
# -
# Here we download the data, if it doesn't already exist:
# +
# %%time
# Define download sources and destinations.
# For secure extraction, 'fpath' must be how the zip archive refers to the file.
loan_data = {'url': 'https://resources.lendingclub.com/LoanStats3a.csv.zip',
'fpath': 'LoanStats3a.csv',
'table': 'loan_stats',
'date_col': 'issue_d',
'normalize': ['term', 'home_ownership', 'verification_status', 'loan_status', 'addr_state', 'application_type', 'disbursement_method']}
decl_loan_data = {'url': 'https://resources.lendingclub.com/RejectStatsA.csv.zip',
'fpath': 'RejectStatsA.csv',
'table': 'reject_stats',
'date_col': 'Application Date',
'normalize': ['State']}
# Do the data downloading and extraction
for data in [loan_data, decl_loan_data]:
url, fpath = data['url'], data['fpath']
if os.path.isfile(fpath):
print('{!r} already exists: skipping download.\n'.format(fpath))
continue
try:
dl_fpath = os.path.basename(urllib.parse.urlsplit(url).path)
print('Downloading data from {!r}...'.format(url))
response = requests.get(url)
except:
raise ValueError('Download error. Check internet connection and URL.')
try:
with open(dl_fpath, 'wb') as fp:
print('Writing data...'.format(dl_fpath))
fp.write(response.content)
try:
print('Extracting data...')
with zipfile.ZipFile(dl_fpath, 'r') as zip_ref:
zip_ref.extract(fpath)
if os.path.isfile(dl_fpath) and dl_fpath.endswith('.zip'):
os.remove(dl_fpath)
except:
raise ValueError('File extraction error. Is the downloaded file a zip archive?')
except:
raise ValueError('File write error. Check destination file path and permissions')
print('Success: {!r}\n'.format(fpath))
# -
# Next, we partition the data into `N_PARTITIONS` groups, and persist each partition into a separate database instance. Although there are many ways we can choose to partition the dataset, here we partition by the date the loans were issued (or if they were rejected, the date when they were applied for):
# %time
for data in [loan_data, decl_loan_data]:
fpath, date_col, table = data['fpath'], data['date_col'], data['table']
norm_cols = data['normalize']
pcol = '_' + date_col # Used for partitioning the data
df = pd.read_csv(fpath, skiprows=1)
print('# {}: {}'.format(table, len(df)))
print('# {} valued at N/A: {}'.format(table, len(df[df[date_col].isna()])))
df.dropna(axis=0, subset=[date_col], inplace=True)
df[pcol] = pd.to_datetime(df[date_col]) # , format='%b-%Y')
# Cast strs with '%' into floats, so we can do analysis more easily
if 'int_rate' in df.columns:
df['int_rate'] = df['int_rate'].str.rstrip('%').astype(float)
df.sort_values(pcol, inplace=True)
grouped = df.groupby(pd.qcut(df[pcol],
N_PARTITIONS,
labels=list(range(N_PARTITIONS))))
# Normalize what we can, store into first db instance
engine = create_engine('postgresql://postgres@localhost:{}/postgres'.format(5432))
for norm_col in norm_cols:
norm_col_cats = df[norm_col].astype('category')
norm_df = pd.DataFrame({'id': pd.np.arange(len(norm_col_cats.cat.categories)),
norm_col: norm_col_cats.cat.categories.values})
df.loc[:, norm_col] = norm_col_cats.cat.codes
print('Persisting normalized column, {!r}...'.format(norm_col+'_codes'))
norm_df.to_sql(norm_col+'_codes', engine, if_exists='replace')
for group_id, group_df in grouped:
print('\n###', group_id)
start = group_df[pcol].min().strftime('%b-%Y')
end = group_df[pcol].max().strftime('%b-%Y')
# Save each partition to a different database
print('Persisting {} {} from {} to {}...'.format(len(group_df), table, start, end))
engine = create_engine('postgresql://postgres@localhost:{}/postgres'.format(5432+group_id))
try:
group_df.drop(columns=pcol).to_sql(table, engine, if_exists='fail') #'replace')
except ValueError:
pass # Table already exists, so do nothing.
print()
# Verify the data was written, by connecting to the databases directly with the `psql` command-line tool:
# +
# Save each query from the `psql` command as HTML
# !for db_inst in $(seq 0 4); do \
# psql -h localhost -p $(expr 5432 + $db_inst) -U postgres -q -H \
# -c 'select loan_amnt, term, int_rate, issue_d from loan_stats limit 5;' \
# > db${db_inst}.html; \
# done
# Display the HTML files
from IPython.display import display, HTML
for db_inst in range(N_PARTITIONS):
display(HTML('db{}.html'.format(db_inst)))
# -
# ## Reading the data (with Intake-Postgres)
# Write out a __joins\_catalog.yml__ file with the appropriate schema:
# +
# %%writefile joins_catalog.yml
plugins:
source:
- module: intake_postgres
sources:
# Normalized columns
term_codes:
driver: postgres
args:
uri: 'postgresql://postgres@localhost:5432/postgres'
sql_expr: 'select id, term from term_codes'
home_ownership_codes:
driver: postgres
args:
uri: 'postgresql://postgres@localhost:5432/postgres'
sql_expr: 'select id, home_ownership from home_ownership_codes'
verification_status_codes:
driver: postgres
args:
uri: 'postgresql://postgres@localhost:5432/postgres'
sql_expr: 'select id, verification_status from verification_status_codes'
loan_status_codes:
driver: postgres
args:
uri: 'postgresql://postgres@localhost:5432/postgres'
sql_expr: 'select id, loan_status from loan_status_codes'
addr_state_codes:
driver: postgres
args:
uri: 'postgresql://postgres@localhost:5432/postgres'
sql_expr: 'select id, addr_state from addr_state_codes'
application_type_codes:
driver: postgres
args:
uri: 'postgresql://postgres@localhost:5432/postgres'
sql_expr: 'select id, application_type from application_type_codes'
disbursement_method_codes:
driver: postgres
args:
uri: 'postgresql://postgres@localhost:5432/postgres'
sql_expr: 'select id, disbursement_method from disbursement_method_codes'
State_codes:
driver: postgres
args:
uri: 'postgresql://postgres@localhost:5432/postgres'
sql_expr: 'select id, "State" from "State_codes"'
# loan_stats data
loans_1:
driver: postgres
args:
uri: 'postgresql://postgres@localhost:5432/postgres'
sql_expr: 'select issue_d, term, application_type, disbursement_method, home_ownership, verification_status, loan_status, loan_amnt, int_rate from loan_stats'
loans_5:
driver: postgres
args:
uri: 'postgresql://postgres@localhost:5436/postgres'
sql_expr: 'select issue_d, term, application_type, disbursement_method, home_ownership, verification_status, loan_status, loan_amnt, int_rate from loan_stats'
# reject_stats data
rejects_1:
driver: postgres
args:
uri: 'postgresql://postgres@localhost:5432/postgres'
sql_expr: 'select "Application Date", "State", "Amount Requested" from reject_stats'
rejects_5:
driver: postgres
args:
uri: 'postgresql://postgres@localhost:5436/postgres'
sql_expr: 'select "Application Date", "State", "Amount Requested" from reject_stats'
# Joins
join_db_1_to_1:
driver: postgres
parameters:
interest_lowbound:
description: "Lower-bound for interest rate in query"
type: float
default: 0.0
min: 0.0
args:
uri: 'postgresql://postgres@localhost:5432/postgres'
sql_expr: !template "
select issue_d,
term_codes.term,
application_type_codes.application_type,
disbursement_method_codes.disbursement_method,
home_ownership_codes.home_ownership,
verification_status_codes.verification_status,
loan_status_codes.loan_status,
loan_amnt,
int_rate
from loan_stats
inner join term_codes on loan_stats.term = term_codes.id
inner join application_type_codes on loan_stats.application_type = application_type_codes.id
inner join disbursement_method_codes on loan_stats.disbursement_method = disbursement_method_codes.id
inner join home_ownership_codes on loan_stats.home_ownership = home_ownership_codes.id
inner join verification_status_codes on loan_stats.verification_status = verification_status_codes.id
inner join loan_status_codes on loan_stats.loan_status = loan_status_codes.id
where int_rate > {{ interest_lowbound }}"
# -
# Access the catalog with Intake:
# %time
catalog = Catalog('joins_catalog.yml')
catalog
# Inspect the metadata about the first source (optional):
catalog.loans_1.discover()
catalog.application_type_codes.discover()
catalog.join_db_1_to_1.discover()
# Read the data from the sources:
# %%time
catalog.loans_1.read().tail()
# %%time
catalog.loans_5.read().tail()
# ## _JOIN_ with one database
#
# Here is our **JOIN**, with default parameters (`interest_lowbound == 0.0`):
# %%time
catalog.join_db_1_to_1.read().tail()
# Next, with our own parameter value(s):
# %%time
catalog.join_db_1_to_1(interest_lowbound=15.0).read().tail()
# ## _JOIN_ with two databases
#
# For a **JOIN** between tables of two separate databases, we first connect to the tables we are interested in. Then we **JOIN** (aka `.merge()`) them together afterward:
# %%time
loans_5_df = catalog.loans_5.read()
term_df = catalog.term_codes.read()
application_type_df = catalog.application_type_codes.read()
disbursement_method_df = catalog.disbursement_method_codes.read()
home_ownership_df = catalog.home_ownership_codes.read()
verification_status_df = catalog.verification_status_codes.read()
loan_status_df = catalog.loan_status_codes.read()
term_df
loans_5_df.tail()
for col, lookup_df in [('term', term_df),
('application_type', application_type_df),
('disbursement_method', disbursement_method_df),
('home_ownership', home_ownership_df),
('verification_status', verification_status_df),
('loan_status', loan_status_df)]:
loans_5_df = pd.merge(loans_5_df, lookup_df,
how='left', on=None,
left_on=col, right_on='id',
suffixes=['_', ''])
loans_5_df.drop(columns=col+'_', inplace=True)
if 'id_' in loans_5_df.columns:
loans_5_df.drop(columns='id_', inplace=True)
if 'id' in loans_5_df.columns:
loans_5_df.drop(columns='id', inplace=True)
loans_5_df.tail()
| examples/Joins Demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1 style="text-align:center">Undamped Response to Harmonic Seismic Inputs</h1>
# <h3 style="text-align:center">MCHE 485: Mechanical Vibrations</h3>
# <p style="text-align:center">Dr. <NAME> <br>
# <a href="mailto:<EMAIL>"><EMAIL></a><br>
# http://www.ucs.louisiana.edu/~jev9637/ </p>
# <p style="text-align:center">
# <img src="http://shared.crawlab.org/MassSpring_Seismic_Horiz.png" alt="A Mass-Spring-Damper System" width=50%/></a><br>
# <strong> Figure 1: A Mass-Spring System </strong>
# </p>
#
# This notebook examines the frequency response of a simple mass-spring system like the one shown in Figure 1. It has a position input, $ y(t) $, that is known to be harmonic connected to the mass, $ m $, via a spring of stiffness $ k $. The position of the mass is defined by $ x(t) $.
#
# The equation of motion for the system is:
#
# <!-- the \quad commmand just adds a space in the math mode -->
# $ \quad m \ddot{x} + kx = ky $
#
# We could also write this equation in terms of the damping ratio, $\zeta$, and natural frequency, $\omega_n$.
#
# $ \quad \ddot{x} + \omega_n^2x = \omega_n^2 y$
#
# For information on how to obtain this equation, you can see the lectures at the [class website](http://www.ucs.louisiana.edu/~jev9637/MCHE485.html).
import numpy as np # Grab all of the NumPy functions with nickname np
# We want our plots to be displayed inline, not in a separate window
# %matplotlib inline
# Import the plotting functions
import matplotlib.pyplot as plt
# Define the System Parameters
m = 1.0 # kg
k = (2.*np.pi)**2. # N/m (Selected to give an undamped natural frequency of 1Hz)
wn = np.sqrt(k/m) # Natural Frequency (rad/s)
# Let's use the closed-form, steady-state solution we developed in lecture:
#
# Assume:
#
# $ \quad y(t) = \bar{y} \sin{\omega t} $
#
# Then, the solution $x(t)$ should have the form:
#
# $ \quad x(t) = a \sin{\omega t} + b \cos{\omega t} $
#
# We saw that when we substituted this assumed solution into the equations of motion for this undamped system, the $b$ constant was elimiinated, leaving:
#
# $ \quad x(t) = \frac{\omega_n^2}{\omega_n^2 - \omega^2}\bar{y} \sin{\omega t} $
#
# or
#
# $ \quad x(t) = \frac{\omega_n^2}{\omega_n^2 - \omega^2} y(t) $
#
# So, $ \frac{\omega_n^2}{\omega_n^2 - \omega^2} $ gives us the relationship between the input $ y(t) $ and the system response $ x(t) $. Let's plot that for a range of frequencies.
# +
# Set up input parameters
w = np.linspace(0,wn*3,500) # Frequency range for freq response plot, 0-3x wn with 500 points in-between
x_amp = (wn**2) / (wn**2 - w**2)
# Let's mask the discontinuity, so it isn't plotted
pos = np.where(np.abs(x_amp) >= 15)
x_amp[pos] = np.nan
w[pos] = np.nan
# +
# Make the figure pretty, then plot the results
# "pretty" parameters selected based on pdf output, not screen output
# Many of these setting could also be made default by the .matplotlibrc file
fig = plt.figure(figsize=(6,4))
ax = plt.gca()
plt.subplots_adjust(bottom=0.2,left=0.15,top=0.96,right=0.96)
plt.setp(ax.get_ymajorticklabels(),family='serif',fontsize=18)
plt.setp(ax.get_xmajorticklabels(),family='serif',fontsize=18)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.grid(True,linestyle=':',color='0.75')
ax.set_axisbelow(True)
plt.xlabel(r'Input Frequency $\left(\omega\right)$',family='serif',fontsize=22,weight='bold',labelpad=10)
plt.ylabel(r'$ \frac{\omega_n^2}{\omega_n^2 - \omega^2} $',family='serif',fontsize=22,weight='bold',labelpad=10)
plt.ylim(-10.0,10.0)
plt.yticks([0])
plt.xticks([0,1],['0','$\omega = \omega_n$'])
plt.plot(w/wn, x_amp, linewidth=2)
# If you want to save the figure, uncomment the commands below.
# The figure will be saved in the same directory as your IPython notebook.
# Save the figure as a high-res pdf in the current folder
# plt.savefig('MassSpring_SeismicFreqResp_Amplitude.pdf',dpi=300)
fig.set_size_inches(9,6) # Resize the figure for better display in the notebook
# -
# ### Magnitude of the Response
# We can also plot the magnitude of this.
# +
# Define the magnitude of the response
x_mag = np.abs(x_amp)
# Make the figure pretty, then plot the results
# "pretty" parameters selected based on pdf output, not screen output
# Many of these setting could also be made default by the .matplotlibrc file
fig = plt.figure(figsize=(6,4))
ax = plt.gca()
plt.subplots_adjust(bottom=0.2,left=0.15,top=0.96,right=0.96)
plt.setp(ax.get_ymajorticklabels(),family='serif',fontsize=18)
plt.setp(ax.get_xmajorticklabels(),family='serif',fontsize=18)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.grid(True,linestyle=':',color='0.75')
ax.set_axisbelow(True)
plt.xlabel(r'Input Frequency $\left(\omega\right)$',family='serif',fontsize=22,weight='bold',labelpad=10)
plt.ylabel(r'$ \left|\frac{\omega_n^2}{\omega_n^2 - \omega^2} \right|$',family='serif',fontsize=22,weight='bold',labelpad=10)
plt.ylim(0.0,10.0)
plt.yticks([0])
plt.xticks([0,1],['0','$\omega = \omega_n$'])
plt.plot(w/wn,x_mag,linewidth=2)
# If you want to save the figure, uncomment the commands below.
# The figure will be saved in the same directory as your IPython notebook.
# Save the figure as a high-res pdf in the current folder
# savefig('MassSpring_SeismicFreqResp_Magnitude.pdf',dpi=300)
fig.set_size_inches(9,6) # Resize the figure for better display in the notebook
# +
# Set up input parameters
w = np.linspace(0,wn*3,500) # Frequency range for freq response plot, 0-3x wn with 500 points in-between
xddot_amp = -(wn**2 * w**2) / (wn**2 - w**2)
# Let's mask the discontinuity, so it isn't plotted
pos = np.where(np.abs(xddot_amp) >= 600)
xddot_amp[pos] = np.nan
w[pos] = np.nan
# -
# ### Acceleration
# Now, let's look at the acceleration of the mass, $\ddot{x}(t)$, for the same seismic input, y(t).
#
# $ \quad \ddot{x}(t) = \frac{-\omega_n^2\omega^2}{\omega_n^2 - \omega^2}\bar{y} \sin{\omega t} $
#
# or
#
# $ \quad x(t) = \frac{-\omega_n^2\omega^2}{\omega_n^2 - \omega^2} y(t) $
#
# So, $ \frac{-\omega_n^2\omega^2}{\omega_n^2 - \omega^2} $ gives us the relationship between the input $ y(t) $ and the acceleration of the mass, $ \ddot{x}(t) $. Let's plot that for a range of frequencies.
# +
# Make the figure pretty, then plot the results
# "pretty" parameters selected based on pdf output, not screen output
# Many of these setting could also be made default by the .matplotlibrc file
fig = plt.figure(figsize=(6,4))
ax = plt.gca()
plt.subplots_adjust(bottom=0.20,left=0.15,top=0.96,right=0.96)
plt.setp(ax.get_ymajorticklabels(),family='serif',fontsize=18)
plt.setp(ax.get_xmajorticklabels(),family='serif',fontsize=18)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.grid(True,linestyle=':',color='0.75')
ax.set_axisbelow(True)
plt.xlabel(r'Input Frequency $\left(\omega\right)$',family='serif',fontsize=22,weight='bold',labelpad=10)
plt.ylabel(r'$ \frac{-\omega_n^2 \omega^2}{\omega_n^2 - \omega^2} $',family='serif',fontsize=22,weight='bold',labelpad=10)
plt.ylim(-500.0,500.0)
plt.yticks([0])
plt.xticks([0,1],['0','$\omega = \omega_n$'])
plt.plot(w/wn,xddot_amp,linewidth=2)
# If you want to save the figure, uncomment the commands below.
# The figure will be saved in the same directory as your IPython notebook.
# Save the figure as a high-res pdf in the current folder
# plt.savefig('MassSpring_SeismicFreqResp_AccelAmplitude.pdf',dpi=300)
fig.set_size_inches(9,6) # Resize the figure for better display in the notebook
# -
# ### Normalization
# We could also normalize the input frequency, $ \omega $, based on its relationship to the natural frequency of the system, $ \omega_n$. To do so, divide both the numerator and denominator of:
#
# $ \quad x(t) = \frac{\omega_n^2}{\omega_n^2 - \omega^2}\bar{y} \sin{\omega t} $
#
# by $\omega_n$. We can then define $\Omega \equiv \frac{\omega}{\omega_n} $ and write:
#
# $ \quad x(t) = \left(\frac{1}{1-\Omega^2}\right) \bar{y} \sin{\omega t} $
#
# The term $\frac{1}{1-\Omega^2}$ then gives us a normalized version of the amplitude of vibration by plotting it versus $\Omega$.
#
# +
# Set up input parameters
wnorm = np.linspace(0,3,500) # Frequency range for freq response plot, 0-3x wn with 500 points in-between
xnorm_amp = (1) / (1 - wnorm**2)
# Let's mask the discontinuity, so it isn't plotted
pos = np.where(np.abs(xnorm_amp) >= 15)
x_amp[pos] = np.nan
wnorm[pos] = np.nan
# +
# Make the figure pretty, then plot the results
# "pretty" parameters selected based on pdf output, not screen output
# Many of these setting could also be made default by the .matplotlibrc file
fig = plt.figure(figsize=(6,4))
ax = plt.gca()
plt.subplots_adjust(bottom=0.23,left=0.15,top=0.96,right=0.96)
plt.setp(ax.get_ymajorticklabels(),family='serif',fontsize=18)
plt.setp(ax.get_xmajorticklabels(),family='serif',fontsize=18)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.grid(True,linestyle=':',color='0.75')
ax.set_axisbelow(True)
plt.xlabel(r'Normalized Frequency $\left(\Omega = \frac{\omega}{\omega_n}\right)$',family='serif',fontsize=22,weight='bold',labelpad=5)
plt.ylabel(r'$ \frac{1}{1 - \Omega^2} $',family='serif',fontsize=22,weight='bold',labelpad=15)
plt.ylim(-5.0,5.0)
plt.yticks([0,1])
plt.xticks([0,1],['0','$\Omega = 1$'])
plt.plot(wnorm, xnorm_amp, linewidth=2)
# If you want to save the figure, uncomment the commands below.
# The figure will be saved in the same directory as your IPython notebook.
# Save the figure as a high-res pdf in the current folder
# savefig('MassSpring_SeismicFreqResp_NormAmp.pdf',dpi=300)
fig.set_size_inches(9,6) # Resize the figure for better display in the notebook
# -
# ### Magnitude of the Response
# We can also plot the magnitude of the normalized response
# +
# Take the absolute value to get the magnitude
xnorm_mag = np.abs(xnorm_amp)
# Make the figure pretty, then plot the results
# "pretty" parameters selected based on pdf output, not screen output
# Many of these setting could also be made default by the .matplotlibrc file
fig = plt.figure(figsize=(6,4))
ax = plt.gca()
plt.subplots_adjust(bottom=0.23,left=0.15,top=0.96,right=0.96)
plt.setp(ax.get_ymajorticklabels(),family='serif',fontsize=18)
plt.setp(ax.get_xmajorticklabels(),family='serif',fontsize=18)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.grid(True,linestyle=':',color='0.75')
ax.set_axisbelow(True)
plt.xlabel(r'Normalized Frequency $\left(\Omega = \frac{\omega}{\omega_n}\right)$',family='serif',fontsize=22,weight='bold',labelpad=5)
plt.ylabel(r'$ \left| \frac{1}{1 - \Omega^2} \right|$',family='serif',fontsize=22,weight='bold',labelpad=15)
plt.ylim(0.0,5.0)
plt.yticks([0,1])
plt.xticks([0,1],['0','$\Omega = 1$'])
plt.plot(wnorm,xnorm_mag,linewidth=2)
# If you want to save the figure, uncomment the commands below.
# The figure will be saved in the same directory as your IPython notebook.
# Save the figure as a high-res pdf in the current folder
# plt.savefig('MassSpring_SeismicFreqResp_NormMag.pdf',dpi=300)
fig.set_size_inches(9,6) # Resize the figure for better display in the notebook
# -
# <hr style="border: 0px;
# height: 1px;
# text-align: center;
# background: #333;
# background-image: -webkit-linear-gradient(left, #ccc, #333, #ccc);
# background-image: -moz-linear-gradient(left, #ccc, #333, #ccc);
# background-image: -ms-linear-gradient(left, #ccc, #333, #ccc);
# background-image: -o-linear-gradient(left, #ccc, #333, #ccc);">
# #### Licenses
# Code is licensed under a 3-clause BSD style license. See the licenses/LICENSE.md file.
#
# Other content is provided under a [Creative Commons Attribution-NonCommercial 4.0 International License](http://creativecommons.org/licenses/by-nc/4.0/), CC-BY-NC 4.0.
# This cell will just improve the styling of the notebook
# You can ignore it, if you are okay with the default sytling
from IPython.core.display import HTML
import urllib.request
response = urllib.request.urlopen("https://cl.ly/1B1y452Z1d35")
HTML(response.read().decode("utf-8"))
| Jupyter Notebooks/Undamped Response to Harmonic Seismic Inputs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Example: Using the High-Level Estimator
#
# In this example, we want to show you how to use the `NaturalPosteriorNetwork` estimator to train and evaluate both single NatPN models and ensembles thereof (NatPE).
#
# Although the following is not required, we first want to hide some useless output generated by PyTorch Lightning and fix the seed for reproducibility...
from natpn import suppress_pytorch_lightning_logs
suppress_pytorch_lightning_logs()
import pytorch_lightning as pl
pl.seed_everything(42)
# ## Load the Data
#
# Then, we can define the data that we want to use. For simplicity, we will use the Sensorless Drive dataset. This is one of 8 datasets that are provided directly by this repository and which can be found in the `natpn.datasets` package.
from natpn.datasets import SensorlessDriveDataModule
dm = SensorlessDriveDataModule()
# If you want to use your own dataset with the `NaturalPosteriorNetwork` estimator, you can simply subclass `natpn.datasets.DataModule` which is a simple extension of `pytorch_lightning.LightningDataModule`.
# ## NatPN
#
# Having defined the data that we want to use, we can proceed to initialize the `NaturalPosteriorNetwork` estimator.
from natpn import NaturalPosteriorNetwork
# This estimator provides plenty of initialization options and you should consult the documentation of the `__init__` method. In the following, we initalize an estimator which is configured as follows:
#
# - It uses a tabular encoder (which is required for tabular data)
# - It uses 16 layers of radial transforms (the default) for the evidence-producing normalizing flow
# - It runs both training and fine-tuning for at most 150 epochs
# - It uses a learning rate of `3e-3` and decays it by a factor `0.25` if the validation loss plateaus for at least 1/20-th of the training (i.e. 7 epochs)
estimator = NaturalPosteriorNetwork(
encoder="tabular",
flow_num_layers=16,
learning_rate=3e-3,
learning_rate_decay=True,
trainer_params=dict(max_epochs=150),
)
# Note that you can pass more options to the estimator itself and configure specifics of the training procedure by passing additional options to the `trainer_params` flag which configures a `pytorch_ligthning.Trainer`. For example, if you wanted to use a GPU, you should pass `gpus=1` as an additional option to `trainer_params`.
# ### Train the Estimator
#
# Using the defined estimator, we can simply call `fit` to train it on the data - just like for any Scikit-learn estimator! For the Sensorless Drive dataset, fitting should take a few minutes only...
estimator.fit(dm)
# Just like for any Scikit-learn estimator, the `fit` method returns the estimator such that calls to its methods can be chained.
# ### Evaluate the Performance
#
# Now that the estimator is trained, we can finally evaluate it. For this purpose, the estimator provides two methods:
#
# - `score` computes the scores on the test set that is provided by the data module. For classification, it computes accuracy and Brier score. For regression, it computes RMSE and calibration.
# - `score_ood_detection` measures the model's ability to distinguish the items in the test set from items from unrelated datasets as provided by the data module. For each such unrelated dataset (first level of the returned dictionary), it computes multiple metrics: the aleatoric and epistemic confidence, measured by both the area under the precision-recall curve (AUC-PR) and the area under the receiver operating characteristic curve (AUC-ROC).
estimator.score(dm)
estimator.score_ood_detection(dm)
# ## NatPE
#
# Using the `NaturalPosteriorNetwork` estimator to train an ensemble of models is straightforward: the only change that is required is to set `ensemble_size` to the desired number of ensemble members. For this example, we choose an ensemble size of 5.
estimator = NaturalPosteriorNetwork(
ensemble_size=5,
encoder="tabular",
flow_num_layers=16,
learning_rate=3e-3,
learning_rate_decay=True,
trainer_params=dict(max_epochs=150),
)
# ### Train the Ensemble Members
#
# When the estimator is trained, it trains each ensemble member independently and one after the other.
estimator.fit(dm)
# ### Evaluate the Performance
#
# For evaluation, it then merges the individually trained ensemble members into a single model.
estimator.score(dm)
estimator.score_ood_detection(dm)
| examples/estimator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pathlib import Path
import numpy as np
import pandas as pd
# -
DATA = Path.home() / 'work' / 'data'
DATA_RAW = DATA / 'raw'
DATA_PROCESSED = DATA / 'processed'
train_truncated = pd.read_parquet(DATA_PROCESSED / 'train_ts_truncated.parquet')
test = pd.read_parquet(DATA_PROCESSED / 'test_ts_truncated.parquet')
train_target = pd.read_parquet(DATA_PROCESSED / 'train_target.parquet')
test_target = pd.read_parquet(DATA_PROCESSED / 'test_target.parquet')
final_test = pd.read_parquet(DATA_RAW / 'test_values.parquet')
recipe_metadata = pd.read_csv(DATA_RAW / 'recipe_metadata.csv', index_col=0)
submission_format = pd.read_csv(DATA_RAW / 'submission_format.csv', index_col=0)
# # Preprocessing
to_drop = ['tank_lsh_acid', 'tank_lsh_pre_rinse', 'target_time_period']
train_truncated = train_truncated.drop(to_drop, axis=1)
test = test.drop(to_drop, axis=1)
final_test = final_test.drop(to_drop, axis=1)
# # Implementation
metadata = ['object_id', 'pipeline']
phase_order = ['pre_rinse', 'caustic', 'intermediate_rinse', 'acid']
def phase_count(timeseries):
df = timeseries.groupby('process_id')['phase'].unique()
df = df.apply('|'.join).str.get_dummies().astype(bool)
df = df.reindex(phase_order, axis=1)
df.columns = 'phase:' + df.columns
return df
def phase_duration(timeseries):
df = timeseries.groupby(['process_id', 'phase'])['timestamp'].agg(['min', 'max'])
df['duration'] = (df['max'] - df['min']).dt.total_seconds()
df = df[['duration']].unstack(-1)
df.columns = df.columns.to_series().apply(lambda g: ':'.join(g[::-1])) # phase first in the name
return df
def total_turbidity(timeseries):
"""Calculates the target value for all phases (for the entire duration)."""
df = timeseries.groupby(['process_id', 'phase'])['turbidity'].sum().unstack()
df.columns = df.columns.to_series() + ':total_turbidity'
return df
def get_first_and_last(data, groupby_cols, float_cols, N):
first = data[groupby_cols + float_cols].groupby(groupby_cols).head(N).groupby(groupby_cols).mean()
last = data[groupby_cols + float_cols].groupby(groupby_cols).tail(N).groupby(groupby_cols).mean()
first.columns = pd.MultiIndex.from_tuples([(col, 'first%d' % N) for col in first.columns])
last.columns = pd.MultiIndex.from_tuples([(col, 'last%d' % N) for col in last.columns])
return pd.concat([first, last], axis=1)
def apply_funcs(data, groupby_cols=['process_id']):
"""Faster than aggregating all functions at once. The hard part is recreating the multi-index columns."""
float_cols = data.select_dtypes('float').columns.tolist()
# agg standard functions
standard = data.groupby(groupby_cols)[float_cols].agg(['sum', 'median', 'mean', 'std', 'max', 'min'])
# apply quantile
qs = [.2, .8]
quantiles = data.groupby(groupby_cols)[float_cols].quantile(qs).unstack(-1)
quantiles.columns = pd.MultiIndex.from_tuples([(col[0], 'q%d' % int(100 * col[1])) for col in quantiles.columns.get_values()])
# calculate average of first and last values
first_and_last = get_first_and_last(data, groupby_cols, float_cols, 10) # last param somewhat optimized
return pd.concat([standard, quantiles, first_and_last], axis=1)
def groupby_and_apply(timeseries, prefix=''):
df1 = apply_funcs(timeseries)
df1.columns = prefix + df1.columns.to_series().apply(':'.join)
df2 = apply_funcs(timeseries, ['process_id', 'phase'])
df2 = df2.unstack(-1) # make phase a third index level of columns
df2.columns = prefix + df2.columns.to_series().apply(':'.join)
return pd.concat([df1, df2], axis=1)
def float_features_fast(timeseries, add_diff=True):
"""Select features of the timeseries, its derivative, all grouped by process or process and phase."""
df1 = groupby_and_apply(timeseries)
if add_diff:
float_cols = timeseries.select_dtypes('float').columns.tolist()
# diff = timeseries[float_cols].diff().where(timeseries['process_id'].shift() == timeseries['process_id'], np.nan)
smoothing_window = 10
diff = timeseries.groupby('process_id')[float_cols].transform(
lambda g: g.rolling(smoothing_window).mean().diff(smoothing_window))
df2 = groupby_and_apply(pd.concat([timeseries[['process_id', 'phase']], diff], axis=1).dropna(), prefix='diff:')
df3 = groupby_and_apply(pd.concat([timeseries[['process_id', 'phase']], diff.abs()], axis=1).dropna(), prefix='absdiff:')
return pd.concat([df1, df2, df3], axis=1)
return df1
def boolean_features_fast(timeseries):
bool_columns = timeseries.select_dtypes('bool').columns.tolist()
data = timeseries[['process_id', 'phase'] + bool_columns].copy()
data[bool_columns] = data[bool_columns].astype(int)
# percentage of time the boolean value is "on" for each phase...
df = data.groupby(['process_id', 'phase'])[bool_columns].agg(['sum', 'mean'])
df = df.unstack(-1)
df.columns = df.columns.to_series().apply(':'.join)
# ... and in total
df1 = data.groupby('process_id')[bool_columns].agg(['sum', 'mean'])
df1.columns = df1.columns.to_series().apply(':'.join)
# first and last values
fl = get_first_and_last(data, ['process_id', 'phase'], bool_columns, 10)
fl = fl.unstack(-1)
fl.columns = fl.columns.to_series().apply(':'.join)
fl1 = get_first_and_last(data, ['process_id'], bool_columns, 10)
fl1.columns = fl1.columns.to_series().apply(':'.join)
return pd.concat([df, df1, fl, fl1], axis=1)
def harmonic_features(timeseries):
"""Calculates three maximums of the power spectrum."""
def power_spectrum(t):
A = np.fft.fft(t) # Discrete Fourier Transform
return np.abs(A) ** 2
df = timeseries.groupby('process_id')['turbidity'].apply(lambda x: np.sort(power_spectrum(x))[-3:]).apply(pd.Series)
df.columns = ['harmonic:power_spectrum_max{}'.format(i) for i in [3, 2, 1]]
return df
def join_features(timeseries):
features = timeseries.groupby('process_id')[metadata].first().astype(str)
features = features.join(recipe_metadata.astype(str).apply(''.join, axis=1).rename('recipe'))
if features.isna().any().any():
raise ValueError('Missing values for %s' % str(metadata))
timeseries['log_return_turbidity'] = np.log(1 + timeseries['return_turbidity'])
timeseries['turbidity'] = np.maximum(timeseries['return_flow'], 0) * timeseries['return_turbidity']
timeseries['log_turbidity'] = np.log(1 + timeseries['turbidity'].clip(lower=0))
print('Adding boolean features...')
features = features.join(boolean_features_fast(timeseries))
print('Adding phase count...')
features = features.join(phase_count(timeseries))
print('Adding phase duration...')
features = features.join(phase_duration(timeseries))
print('Adding total turbidity...')
features = features.join(total_turbidity(timeseries))
print('Adding float features (this may take a while)...')
features = features.join(float_features_fast(timeseries, add_diff=False))
print('Adding harmonic features...')
features = features.join(harmonic_features(timeseries))
return features
# # Feature calculation
# %%time
train_features = join_features(train_truncated)
# %%time
test_features = join_features(test)
# %%time
final_test_features = join_features(final_test)
# ## Sanity check
assert (train_features.index == train_target.index).all()
assert (test_features.index == test_target.index).all()
assert (final_test_features.index == submission_format.index).all()
# ## Save
train_features.to_parquet(DATA_PROCESSED / 'train_features.parquet')
test_features.to_parquet(DATA_PROCESSED / 'test_features.parquet')
final_test_features.to_parquet(DATA_PROCESSED / 'final_test_features.parquet')
| notebooks/feature_engineering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Continuaciรณn clase de optimizaciรณn de cรณdigo
# Paquetes a utilizar
from functools import reduce
import numpy as np
import scipy.special as sps
from itertools import cycle # Librerรญa para hacer ciclos
import matplotlib.pyplot as plt
# # Variables locales, globales y no locales
# * ## Variable global
# +
x = "global"
def foo():
global x
x = x * 2
print("x inside :", x * 2)
foo()
print("x outside:", x)
# -
# ยฟQuรฉ sucede si intento asignar un valor a la variables x desde la funciรณn?
# +
x = "global"
def foo():
global x
x = x * 2
print('dentro de la funciรณn,', x)
foo()
print('fuera de la funciรณn, ', x)
# -
# * ## Variable local
# Intentando acceder a una variable local
# +
def foo():
y = "local"
foo()
print(y)
# -
# Mostrando el valor de una variable local
# +
def foo():
y = "local"
def local_f():
nonlocal y
y = y * 2
print(y)
local_f()
print(y)
foo()
# -
# **Variables locales y globales en el mismo cรณdigo**
# +
x = "global"
def foo():
global x
y = "local"
x = x * 2
print(y)
print(x)
foo()
# -
# **Variables locales y globales con el mismo nombre**
# +
x = 5
def foo():
global x
x = 10
print("local x:", x)
foo()
print("global x:", x)
# -
# * ## Variables no locales
# Crear una variable no local
# +
y = 8
def outer():
x = "local"
def inner():
global y
nonlocal x
x = "nonlocal"
y = 10
print("inner:", x)
print('inner 2:', y)
inner()
print("outer:", x)
print("outer 2:", y)
outer()
# -
# ### Ejercicio:
# Resolver de manera eficiente la siguiente ecuaciรณn a diferencia
# $$x_n = (ax_{nโ1} + b) \mod m$$
# Haciendo uso de las variables no locales y compresiรณn de listas o funciones map
# +
# %%time
# Parรกmetros del modelo
a,b,n,m,x0 = 1,2,5,9,1
# 1. Mรฉtodo ineficiente
xn = [x0]
for i in range(n - 1):
xn.append((a * xn[-1] + b) % m)
xn
# +
# %%time
# 2. Mรฉtodo funciones anidadas
a,b,n,m,x0 = 1,2,5,9,1
def principal(a,b,n,m,x0):
xn = np.zeros(n)
xn[0] = x0
def secundaria(i):
nonlocal xn
xn[i] = (a * xn[i -1] + b) % m
[secundaria(i) for i in range(1, n)]
return xn
principal(a,b,n,m,x0)
# -
# 3. Usando funciones y variables globales
# ### Ejercicio: Vectorizar distribuciรณn de poisson
# $$\textbf{Funciรณn de densidad de probabilidad}\\p(k)=\frac{\lambda^k e^{-\lambda}}{k!},\quad k\in \mathbb{N}$$
# +
N = 30
l = [1, 3, 5]
k = np.arange(N)
p = lambda k,l: (l**k * np.exp(-l)) / sps.factorial(k)
# Graficar para todo l una grรกfica de p(k) usando subplots (como matrices)
k = np.arange(20)
[plt.plot(p(l, k), '*', label=f'$\lambda={l}$') for l in [3, 5, 2]]
plt.legend()
# contruir el legend en latex para cada l
# -
plt.plot(np.array([p(l, k) for l in [2,5,9]]).T, '*')
# +
## Forma de resolver el problema de manera tรญpica (para olvidar)
cycol = cycle('bgrcmk')
p = lambda k,l:(l**k*np.exp(-l))/sps.gamma(k)
# Como se harรญa de forma ineficiente
l= 1
# Grรกficas en diferentes subplots
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10,10));
ax1.title.set_text('Densidad de distruciรณn de probabilidad')
ax2.title.set_text('Distruciรณn de probabilidad acumulada')
for i in range(3):
P = []
c = next(cycol)
for k in range(60):
P.append(p(k,l))
ax1.plot(P,'o',c=c,label='$\lambda$=%s' % l, markersize=2)
ax1.legend()
ax2.plot(np.cumsum(P),'o',c=c,label='$\lambda$=%s' % l, markersize=3)
ax2.legend()
ax2.hlines(np.cumsum(P),range(len(P)),range(1,len(P)+1),color=c)
l +=20
plt.subplots_adjust(hspace=.4)
plt.show()
# -
# ## Resolverlo de manera vectorizada
#
# +
# Ahora crea una funciรณn tal que al llamarla cree una grรกfica de p(k) para un
# lambda dado
def plotExponential(lamb, N, cycol):
# -
# # Ejercicios
# 1. Use filter to eliminate all words that are shorter than 4 letters from a list of words
# `list='the notion of a lambda function goes all the way back to the origin of computer science'`
########### Soluciรณn
texto='the notion of a lambda function goes all the way back to the \
origin of computer science'
# 2. Use filter to determine the percentage of Fahrenheit temperatures in a list are within the range 32 to 80
########### Soluciรณn
np.random.seed(55555)
temperatures = np.random.uniform(25,110,30)
temperatures
# +
# Solucionarlo usando la funciรณn filter
# Solucionarlo usando filtro de vectores
# -
# 3. Use reduce to find the lower left corner (minimum x and minimum y value) for a list of point locations
#
# > **Hint**: Explore the command `np.minimum.reduce` [link](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ufunc.reduce.html)
########### Soluciรณn 5
np.random.seed(55555)
point_xy = np.random.randint(0,100,[30,2])
print(point_xy)
# +
# Soluciรณn
# -
# 4. Find all of the numbers from 1-1000 that are divisible by 7
# +
# Lista de nรบmeros
number = np.arange(1,1001)
# Resultado usando for tradicional
# Resultado usando compresiรณn de listas
# Resultado usando la funciรณn filter
# -
# 5. Find all of the numbers from 1-1000 that have a 3 in them
# +
# Lista de nรบmeros
number = np.arange(1,1001)
# Resultado usando for tradicional
# Resultado usando compresiรณn de listas
# Resultado usando la funciรณn filter
# -
# 6. Imagine una rutina contable utilizada en una librerรญa. Funciona en una lista con sublistas, que se ven asรญ:
# 
# Escriba un programa en Python, que devuelve una lista con tuplas de tamaรฑo 2. Cada tupla consiste en el nรบmero de pedido y el costo total del pedido. El producto debe aumentarse en 10โฌ si el valor de la orden es inferior a 100,00 โฌ.
# Escribe un programa Python usando unicamente las funciones **lambda y map**.
########### Soluciรณn
orders = [ ["34587", "Learning Python, <NAME>", 4, 40.95],
["98762", "Programming Python, <NAME>", 5, 56.80],
["77226", "Head First Python, <NAME>", 3,32.95],
["88112", "Einfรผhrung in Python3, <NAME>", 3, 24.99]]
# ### Forma alternativa
# +
# Creando una funciรณn que incremente en 10 euros si el pedido es menor a 100
# -
# 7. La misma librerรญa, pero esta vez trabajamos en una lista diferente. Las sublistas de nuestras listas se ven asรญ:
# [nรบmero de orden, (nรบmero de artรญculo, cantidad, precio por unidad), ... (nรบmero de artรญculo, cantidad, precio por unidad)]
#
# `orders = [[1, ("5464", 4, 9.99), ("8274",18,12.99), ("9744", 9, 44.95)],
# [2, ("5464", 9, 9.99), ("9744", 9, 44.95)],
# [3, ("5464", 9, 9.99), ("88112", 11, 24.99)],
# [4, ("8732", 7, 11.99), ("7733",11,18.99), ("88112", 5, 39.95)] ]`
#
# Escriba un programa que devuelva una lista de dos tuplas que tengan la informaciรณn de (nรบmero de orden, cantidad total de pedido). Nuevamente, tenga en cuenta que si el pedido debe aumentarse en 10โฌ si el valor de la orden es inferior a 100,00 โฌ. Utilice la funciรณn `Reduce`.
# +
########### Soluciรณn
orders = [ [1, ("5464", 4, 9.99), ("8274",18,12.99), ("9744", 9, 44.95)],
[2, ("5464", 9, 9.99), ("9744", 9, 44.95)],
[3, ("5464", 9, 9.99), ("88112", 11, 24.99)],
[4, ("8732", 7, 11.99), ("7733",11,18.99), ("88112", 5, 39.95)] ]
| TEMA-1/Clase5_ContOptimizacionProgramacion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# run_smemp_apf.ipynb
# Script to run Specmatch-Emp model on APF spectra and output results including derived stellar properties
# and residual between target and best matching spectra.
# Last modified ?? by <NAME>
#
# NOTE: This script is modified from smemp_multifile.ipynb (itself modified from C.N's smemp.py). Modifications from smemp.py include added ability to process mulitple stellar
# targets from a directory, new deblazing function, option to simplistically remove cosmic rays, and new output
# files including output of residual. See project project write-up for details.
# Modifications are noted by the initials ADZ and the date.
# Input: path to a directory containing APF fits spectra files. If more than one file corresponds to a star,
# they must be grouped into a subdirectory.
# Output: Specmatch-emp derived Stellar properties in specmatch_results.csv, fits file for each star containing normalized,
# deblazed target spectrum, residual between target sepctrum and linear combinatino of best matching spectra, and
# shifted wavelength scale. Also produces a log file. Please update file paths for these if needed before running.
# -
# ADZ: allow importing .ipynb scripts
# !pip install import-ipynb
# +
# standard imports
import import_ipynb #ADZ ADD 6/23/20
import pandas as pd #ADZ ADD 7/13/20
from astroquery.simbad import Simbad #ADZ ADD 8/6/20
import sys, os
from os import listdir
from os.path import isfile, join
import csv
from pylab import *
import pylab
import astropy.io.fits as pf
from astropy.io import fits
# Specmatch imports (relative to current directory)
sys.path.insert(0, '/mnt_home/azuckerman/BL_APF_DAP/specmatch_emp')
from specmatch_emp.specmatchemp import library
from specmatch_emp.specmatchemp import plots as smplot
from specmatch_emp.specmatchemp.spectrum import Spectrum
from specmatch_emp.specmatchemp.specmatch import SpecMatch
# Other local imports
#from deblaze import afs_deblaze # ADZ comment out
from rescale import get_rescaled_wave_soln
from rescale import resample
from optparse import OptionParser
from bstar_deblaze import bstar_deblazed2 #ADZ ADD 7/17/20
# -
'''
Teff_trend = np.poly1d(np.loadtxt('Teff_trend.csv'))
feh_trend = np.poly1d(np.loadtxt('feh_trend.csv'))
R_R_trend = np.poly1d(np.loadtxt('R_R_trend.csv'))
detrend_uncertainties = np.loadtxt('detrended_parameter_RMSE_values.csv', delimiter = ',', dtype = float, skiprows = 1, usecols = (1,2,3))
Teff_det_u = detrend_uncertainties[0]
feh_det_u = detrend_uncertainties[1]
R_det_u = detrend_uncertainties[2]
def detrend(sm):
Teff_detrended = sm.results['Teff'] + Teff_trend(sm.results['Teff'])
feh_detrended = sm.results['feh'] + feh_trend(sm.results['feh'])
if sm.results['radius'] < 1 or sm.results['radius'] > 2:
R_detrended = sm.results['radius']
else:
R_detrended = sm.results['radius'] + R_R_trend(sm.results['radius'])*sm.results['radius']
return Teff_detrended, feh_detrended, R_detrended
'''
# +
#def write_results(fd_raw, fd_detrended, my_spectrum, sm, write_new = False):
def write_results(fd, my_spectrum, sm, filenames, write_new = False):
"""
Write to a csv the derived properties of a target following
the SpecMatch process
Args:
fd(File): object for the csv file to write detrended and un-detrended stellar property results to
my_spectrum (spectrum.Spectrum): Target spectrum
sm (specmatch.SpecMatch): Contains results of the algorithm
write_new (Boolean): Whether to write to a new csv file
"""
fieldnames = ['apf_name', 'filenames', 'Teff', 'u_Teff', 'Teff_detrended', 'u_Teff_detrended', 'radius','u_radius',
'radius_detrended', 'u_radius_detrended', 'logg', 'u_logg', 'feh','u_feh', 'feh_detrended',
'u_feh_detrended', 'mass', 'u_mass', 'age', 'u_age', 'best_mean_chi_squared']
thewriter = csv.DictWriter(fd, fieldnames=fieldnames)
if (write_new): thewriter.writeheader()
thewriter.writerow({'apf_name' : my_spectrum.name,
'filenames': filenames,
'Teff' : '{0:.3f}'.format(sm.results_nodetrend['Teff']),
'u_Teff' : '{0:.3f}'.format(sm.results_nodetrend['u_Teff']),
'Teff_detrended' : '{0:.3f}'.format(sm.results['Teff']),
'u_Teff_detrended' : '{0:.3f}'.format(sm.results['u_Teff']),
'radius' : '{0:.3f}'.format(sm.results_nodetrend['radius']),
'u_radius' : '{0:.3f}'.format(sm.results_nodetrend['u_radius']),
'radius_detrended' : '{0:.3f}'.format(sm.results['radius']),
'u_radius_detrended' : '{0:.3f}'.format(sm.results['u_radius']),
'logg' : '{0:.3f}'.format(sm.results_nodetrend['logg']),
'u_logg' : '{0:.3f}'.format(sm.results_nodetrend['u_logg']),
'feh' : '{0:.3f}'.format(sm.results_nodetrend['feh']),
'u_feh' : '{0:.2f}'.format(sm.results_nodetrend['u_feh']),
'feh_detrended' : '{0:.3f}'.format(sm.results['feh']),
'u_feh_detrended' : '{0:.3f}'.format(sm.results['u_feh']),
'mass' : '{0:.3f}'.format(sm.results_nodetrend['mass']),
'u_mass' : '{0:.3f}'.format(sm.results_nodetrend['u_mass']),
'age' : '{0:.2f}'.format(sm.results_nodetrend['age']),
'u_age' : '{0:.2f}'.format(sm.results_nodetrend['u_age']),
'best_mean_chi_squared' :
'{0:.2f}'.format(best_mean_chi_squared)})
# Teff_detrended, feh_detrended, R_detrended = detrend(sm)
# thewriter = csv.DictWriter(fd_detrended, fieldnames=fieldnames)
# if (write_new): thewriter.writeheader()
# thewriter.writerow({'apf_name' : my_spectrum.name,
# 'Teff' : '{0:.3f}'.format(sm.results['Teff']),
# 'u_Teff' : '{0:.3f}'.format(sm.results['u_Teff']),
# 'radius' : '{0:.3f}'.format(sm.results['radius']),
# 'u_radius' : '{0:.3f}'.format(sm.results_nodetrend['u_radius']),
# 'logg' : '{0:.3f}'.format(sm.results['logg']),
# 'u_logg' : '{0:.3f}'.format(sm.results['u_logg']),
# 'feh' : '{0:.3f}'.format(sm.results['feh']),
# 'u_feh' : '{0:.2f}'.format(sm.results['u_feh']),
# 'mass' : '{0:.3f}'.format(sm.results['mass']),
# 'u_mass' : '{0:.3f}'.format(sm.results['u_mass']),
# 'age' : '{0:.2f}'.format(sm.results['age']),
# 'u_age' : '{0:.2f}'.format(sm.results['u_age']),
# 'best_mean_chi_squared' :
# '{0:.2f}'.format(best_mean_chi_squared)})
# -
def run_specmatch(path_name, lib, display_plots): #ADZ: made this into a function 6/29/20
print('EMPIRICAL SPECMATCH'), print()
parser = OptionParser()
parser.add_option("-f", "--file", action='store', type='string',
dest="pathname",
help="pass the path of the FITS file(s) as an argument")
parser.add_option("-o", action='store', type='string',
dest="outputpath",
help="pass the path to a csv file to write to "
"as an argument")
parser.add_option("-p", action="store_true", dest="plot",
help='plot')
parser.add_option("--all", action="store_true", dest="all",
help='plots all wavelength regions')
parser.add_option("--best", action="store_true", dest="best",
help='plots the reference, modified reference and residuals '
'for each of the best matches.')
parser.add_option("--chi", action="store_true", dest="chi",
help='plot the chi-squared surface from the pairwise \
matching procedure')
parser.add_option("--ref", action="store_true", dest="ref",
help='plot the locations of the best references used in the \
linear combination step')
parser.add_option("--sr", action="store_true", dest="ref",
help='save the residuals ')
(options, sys.argv) = parser.parse_args(sys.argv)
# if no path given in command, prompt user for a path to a file or a directory
# from which to acquire fits files
# NOTE: program currently only known to work if all the files in the directory
# are fits files and are intended targets
#ADZ 6/29/20: removed this if statement; pathname is given when this function is called.
#if (options.pathname == None):
#path_name = input('Please enter the path to the FITS file(s) of a star: ')
#print()
#else:
# path_name = options.pathname
try:
filenames = [f for f in listdir(path_name) if isfile(join(path_name, f))]
except NotADirectoryError: # path to one file
path_split = path_name.split('/')
path_split[:-1] = ['/'.join(path_split[:-1])]
filenames = []
filename = path_split[-1]
filenames.append(filename)
path_name = path_split[0]
print(filenames)
#print('got here')
# ADZ: remove test below 7/8/20 -> See function check2 in check_file_labeling.ipynb
# (I tested that all files in each dir are for same star in that func.)
# check to see if files are for the same star
# NOTE: program currently does not work if the input directory contains fits
# files for multiple stars
#names = set() #ADZ comment out
#for filename in filenames:
# file = pf.open(path_name + '/' + filename,ignore_missing_end=True) #ADZ ADD ignore_missing_end=True
# header = file[0].header
# name = header['TOBJECT']
# names.add(name)
# if (len(names) > 1):
# print('Spectra Addition Error: ')
# print('This program sums the spectra for a star.')
# print('Please only provide the path to FITS files for the same star' +
# ' for a run of this program.')
# sys.exit()
#display_plots = False
if (options.plot or options.chi or options.best or options.ref):
display_plots = True # bool var for whether or not to display plots
# Prompt for regions to plot
if ((display_plots) and (options.all == None)):
print("0 : 5000 to 5100 ร
")
print("1 : 5100 to 5200 ร
")
print("2 : 5200 to 5300 ร
")
print("3 : 5300 to 5400 ร
")
print("4 : 5400 to 5500 ร
")
print("5 : 5500 to 5600 ร
")
print("6 : 5600 to 5700 ร
")
print("7 : 5700 to 5800 ร
")
print("8 : 5800 to 5900 ร
")
print("Please enter the corresponding numbers for " +
"the wavelength regions to be plotted.")
print("Separate the numbers with spaces.")
print("Default option is only region 1. Simply press enter for " +
"default option.")
print("Enter \'all\' to plot all the regions.")
while(True):
inp = input('โ ')
try:
if (inp == ''): # Default - plot region 1
regions = [1]
elif (inp == 'all'):
regions = (list(range(9)))
else:
regions = [int(region) for region in sort(inp.split(" "))]
if (False in [(0 <= region <= 8) for region in regions]):
continue
except ValueError: continue
break
else: # plot all
regions = (list(range(9)))
# Read in data from wavelength solution
wave_soln = (pf.open('apf_wav.fits'))[0].data
# Sum all of the data files for a star
data = np.zeros((79, 4608))
ve = False
counter = 0
for filename in filenames:
file = pf.open(path_name + '/' + filename)
data_part = file[0].data
if counter == 0: #ADZ 7/26/20: get the header from the first file for this star, to use for the residual fits file
use_header = file[0].header
counter += 1
if (str(np.shape(data_part)) != '(79, 4608)'):
print(str(np.shape(data_part)) + ' is an incompatible data shape.')
print('Cannot perform shift-and-match process.')
sys.exit()
try:
data += data_part
except ValueError:
ve = True
if (ve):
print("Value Error occurred during spectra summation.")
header = file[0].header
name = header['TOBJECT']
print('Running SpecMatch-Emp on ' + name + ':')
for filename in filenames:
print(filename)
ve = False
#Deblaze the orders: 31 to 52 (range of focus in the SME library)
for order_inc in range(22):
try: #ADZ 7/17/20: use B-star deblaze instead of afs_deblaze
# data[30 + order_inc, :4607] = afs_deblaze(data[30 + order_inc],
# 30 + order_inc)[0]
data[30 + order_inc, :4600] = bstar_deblazed2(data,30 + order_inc)
except ValueError: ve = True
if (ve): print("Value Error occurred during blaze correction.")
#ADZ: option to remove cosmic rays (simplistically) from normalized, deblazed spectrum
# NOTE: set to FALSE when running this for results other than calibration
remove_cosmic_rays = True
def remove_cosmic_rays(spect): # must input a normalized, deblazed spectrum
new_spect = spect
for i in range(len(spect)):
old_value = spect[i]
if old_value > 1.4:
new_value = np.median(spect[i-3:i+3])
new_spect[i] = new_value
print('replaced value ' + str(old_value) + ' with '+ str(new_value) +' at ' + str(i))
return new_spect
if remove_cosmic_rays == True:
data = remove_cosmic_rays(data)
# Get a wavelength solution rescaled onto the scale of the library
wave_soln_ref = get_rescaled_wave_soln()
# Resample the spectrum onto the new wavelength scale
data_new = resample(wave_soln_ref, wave_soln, data)
# Create spectrum object
my_spectrum = Spectrum(np.asarray(wave_soln_ref), np.asarray(data_new))
my_spectrum.name = name
#lib = specmatchemp.library.read_hdf() ADZ 8/10/20 moved this to outer loop so can remove stars from library
sm = SpecMatch(my_spectrum, lib)
# Perform shift
sm.shift()
# Perform match
sm.match()
# Perform lincomb
# NOTE: detrend() is called within lincomb(),
# so after this sm.results() gives detrended and sm.results_nodetrend() gives non-detrended results.
sm.lincomb()
# Chi squared values of the best match
chi_squares = []
chi_squares.append(sm.match_results.iloc[0]['chi_squared_5000'])
chi_squares.append(sm.match_results.iloc[0]['chi_squared_5100'])
chi_squares.append(sm.match_results.iloc[0]['chi_squared_5200'])
chi_squares.append(sm.match_results.iloc[0]['chi_squared_5300'])
chi_squares.append(sm.match_results.iloc[0]['chi_squared_5400'])
chi_squares.append(sm.match_results.iloc[0]['chi_squared_5500'])
chi_squares.append(sm.match_results.iloc[0]['chi_squared_5600'])
chi_squares.append(sm.match_results.iloc[0]['chi_squared_5700'])
chi_squares.append(sm.match_results.iloc[0]['chi_squared_5800'])
best_mean_chi_squared = np.mean(np.asarray(chi_squares))
# Plot HR diagram
fig1 = figure(figsize=(12, 10))
sm.plot_references(verbose=True)
# plot target onto HR diagram
axes = fig1.axes
axes[0].plot(sm.results['Teff'], sm.results['radius'], '*', ms=15, color='red', label='Target')
axes[1].plot(sm.results['Teff'], sm.results['radius'], '*', ms=15, color='red')
axes[2].plot(sm.results['feh'], sm.results['radius'], '*', ms=15, color='red')
axes[3].plot(sm.results['feh'], sm.results['radius'], '*', ms=15, color='red')
axes[0].legend(numpoints=1, fontsize='small', loc='best')
plt.savefig(plots_out_path +'Stellar_properties/stellar_properties_'+ name)
fig1.show()
# Plot reference spectra and linear combinations
fig2 = plt.figure(figsize=(12,6))
sm.plot_lincomb()
plt.savefig(plots_out_path + 'Ref_lincomb_spectra/ref_and_lincomb_spectra_' + name)
fig2.show()
# method of plotting currently not used
if (display_plots):
# Plot figures
fignum = 0
for r in regions:
if (options.chi):
fignum += 1
plt.figure(fignum)
fig3 = pylab.gcf()
fig3.canvas.set_window_title('Chi-Squared Surface: Region ' + str(r))
sm.plot_chi_squared_surface(region=r, num_best=None)
if (options.best):
fignum += 1
plt.figure(fignum)
fig1 = pylab.gcf()
fig1.canvas.set_window_title('Best Match Spectra: Region ' + str(r))
sm.plot_best_match_spectra(region=r, wavlim='all', num_best=None)
if (options.ref):
fignum += 1
plt.figure(fignum)
fig2 = pylab.gcf()
fig2.canvas.set_window_title('References: Region ' + str(r))
sm.plot_references(region=r, num_best=None, verbose=True)
fignum += 1
plt.figure(fignum)
fig = pylab.gcf()
fig.canvas.set_window_title('Linear Combination: Region ' + str(r))
sm.plot_lincomb(region=r, wavlim='all')
plt.show()
# Plot shift
fignum += 1
plt.figure(fignum, figsize=(10,5))
fig4 = pylab.gcf()
fig4.canvas.set_window_title('Shift')
sm.target_unshifted.plot(normalize=True, plt_kw={'color':'forestgreen'},
text='Target (unshifted)')
sm.target.plot(offset=0.5, plt_kw={'color':'royalblue'},
text='Target: ' + name + ' (shifted)')
sm.shift_ref.plot(offset=1, plt_kw={'color':'firebrick'},
text='Reference: ' +sm.shift_ref.name)
plt.xlim(5160,5190)
plt.ylim(0,2.2)
plt.show()
# ADZ 7/10/20:
# return the (normalized, deblazed) target and the residual between the target spectrum and the linear combination of
# best matched spectra
mt_lincomb = sm.lincomb_matches[0:9]
residual_all_regions = np.zeros([0,0])
target_all_regions = np.zeros([0,0])
wl_all_regions = np.zeros([0,0])
for n in range(9):
residual = mt_lincomb[n].target.s - mt_lincomb[n].modified.s # Changed this 7/27, was opposite order
target = mt_lincomb[n].target.s
residual_all_regions = np.append(residual_all_regions, residual)
target_all_regions = np.append(target_all_regions, target)
wl_all_regions = np.append(wl_all_regions, mt_lincomb[n].target.w) # Added 8/7, was calculating wl scale below
#ADZ: create header-data-units to store output
obs_name = filenames[0].split('.')[0] # letters corresponding to the set of observations used for this target
# NOTE: assumes all spectra used are from single set of observations!
use_header.set('RESID', 'YES','Residual output of Specmatch-emp (HDU 1)')
use_header.set('SPECT', str(filenames).replace(']','').replace('[','').replace('\'','') ,'Spectra files used for target spectrum')
use_header.set('NDR', 'YES','Normalized, deblazed, registered spctrm (HDU 2)')
print(str(name) + ' COMPLETE') #ADZ 6/29/20: include name in statement
return target_all_regions, residual_all_regions, wl_all_regions, use_header, obs_name, my_spectrum, sm, best_mean_chi_squared, options
# +
# ----------------------------------- ALL CODE BELOW THIS LINE ADDED BY ADZ -------------------------------------
# options
save_SM_object = 0 # to save specmathc objects
display_plots = 0 # to make plots
residuals_out_path = 'APF_spectra/NDRR_all_apf/' # directory to save residuals to
properties_out_path = 'SM_steller_properties/' # directory to save steller property results to
results_filename = 'specmatch_results_all_apf.csv' # filename for stellar property results within properties_out_path dir
plots_out_path = 'SM_output_plots_all_apf/' # directory to save plots to
# Get spectra filelist
path_to_dir = input('Enter the path to the directory that contains the spectra: ')
filelist = os.listdir(path_to_dir)
#filelist = filelist[0:20] # for testing to save time only run on this subset
# note if more than one spectra for a star, place in a subdirectory.
try:
filelist.remove('.ipynb_checkpoints') # remove hidden file in this directory
filelist.remove('HIP5643_spectra') # remove problematic spectrum; produces an error but not due to labeling (GJ54.1)
except ValueError:
pass
# send output to 'specmatch_output.txt' (this is a log file for the current run)
# CAUTION! Deletes existing log files
if os.path.exists("specmatch_output.txt"):
os.remove("specmatch_output.txt")
old_stdout = sys.stdout
sys.stdout = open('specmatch_output.txt', 'w')
# get wavelengths for residuals
#wl_regions = [[5000,5100],[5101,5200],[5201,5300],[5301,5400],[5401,5500],[5501,5600],[5601,5700],[5701,5800],[5801,5900]]
#region_lens = [5835, 5722, 5612, 5508, 5406, 5309, 5216, 5124, 5037]
#wl_all_regions = np.zeros([0,0])
#for n in range(9):
# wl = np.linspace(wl_regions[n][0], wl_regions[n][1], region_lens[n])
# wl_all_regions = np.append(wl_all_regions, wl)
#lib = specmatchemp.library.read_hdf() # Moved below so can remove stars and replace after running each star
# Run Specmatch-emp and save results, normalized, deblazed, registered target, and residuals
nameslist = []
pixel_shifts = []
empty_dirs = []
for filename in filelist:
# get HIP star name from filename
#apf_name_conversion = pd.read_csv('apf_name_conversion_updated.csv')
apf_log_file = pd.read_csv('./apf_log_full_16Aug2021.csv')
if filename.startswith('HIP'): # is a directory of spectra
HIP_name = filename.split('_')[0]
if len(os.listdir(path_to_dir + '/' + filename)) < 1:
print('Skipping ' + filename + ' due to empty directory.')
empty_dirs += [filename]
continue
elif filename.endswith('fits'): # is a single spectrum
row = apf_log_file[apf_log_file['Filename'] == (filename.split('.')[0] + '.' + filename.split('.')[1] + '.fits')]
HIP_name = row['HIP_name'].values.tolist()[0]
# Remove star from library (for use in calibration run)
lib = library.read_hdf()
idx_GL570B = lib.get_index('GL570B') # remove this one as it is in error according to S. Yee
lib.pop(idx_GL570B)
lib_names = lib.library_params['cps_name'].to_list()
result_table = Simbad.query_objectids(HIP_name)
alt_names = result_table.to_pandas()
alt_names = alt_names.iloc[:,0].str.decode('utf-8') # gets rid of weird formatting
if HIP_name == 'HIP80824': # This is the only GJ star (that we are running) that isn't listed in library as GL
lib_name = 'GJ628'
else: # get the library name
lib_name = [name.replace(' ','').replace('HD','').replace('GJ','GL') for name in alt_names.to_list() if name.replace(' ','').replace('HD','').replace('GJ','GL') in lib_names]
idx = lib.get_index(lib_name) # get the idx in the library
if idx == []:
print('Could not find star ' + HIP_name + ' in catalog in order to remove.')
else:
star = lib.pop(idx) # remove star from library
print('For ' + HIP_name + ', removing corresponding star: ' + star[0]['cps_name'])
# run Specmatch!
star_target, star_residual, wl_scale, use_header, obs_name, my_spectrum, sm,best_mean_chi_squared, options= run_specmatch(str(path_to_dir) + '/' + filename, lib, display_plots) # Run Specmatch on each star
# save residual to fits file
target_hdu = fits.PrimaryHDU(star_target, use_header)
resid_hdu = fits.ImageHDU(star_residual)
wl_hdu = fits.ImageHDU(wl_scale)
hdu = fits.HDUList([target_hdu, resid_hdu, wl_hdu])
hdu.writeto(residuals_out_path + HIP_name + '_' + obs_name + '_NDRR.fits')
path_name = str(path_to_dir) + '/' + filename
try:
filenames = [f for f in listdir(path_name) if isfile(join(path_name, f))]
except NotADirectoryError: # path to one file
path_split = path_name.split('/')
filenames = path_split[-1]
# Record results to csv file
# if not provided an output file name, specmatch_results.csv is written to
# (and created if not already)
if (options.outputpath != None):
if (not options.outputpath.endswith('.csv')):
if (not options.outputpath.endswith('/')):
options.outputpath += '/'
options.outputpath += 'specmatch_results.csv'
if isfile(options.outputpath):
with open(options.outputpath,'a') as fd:
write_results(fd, my_spectrum, sm, filenames)
else:
with open(options.outputpath, 'w', newline='') as fd:
write_results(fd, my_spectrum, sm, filenames, write_new = True)
else:
if isfile(properties_out_path + results_filename):
with open(properties_out_path + results_filename,'a') as fd:
#with open(properties_out_path + 'specmatch_results_detrended_test.csv','a') as fd_detrended:
write_results(fd, my_spectrum, sm, filenames)
else:
with open(properties_out_path + results_filename, 'w', newline='') as fd:
# with open(properties_out_path + 'specmatch_results_detrended_test.csv', 'w', newline='') as fd_detrended:
write_results(fd, my_spectrum, sm, filenames, write_new = True)
# find the (approximate) pixel shift used during shifting
# NOTE: this is currently not saved anywhere, and has not yet been shown to provide the correct shfit value!
pixel_shift = np.median(sm.shift_data['lag'])
pixel_shifts = pixel_shifts + [pixel_shift]
# Can save the entire SpecMatch object using:
if save_SM_object:
save_sm_path = './Specmatch_objects/' + HIP_name + '.hdf'
sm.to_hdf(save_sm_path)
sys.stdout = old_stdout
# +
# Save pixel shifts, if desired
#HIP_names = pd.read_csv('Star_list.csv')['HIP_NAME'].to_list()
#pix_shift_array = np.array(pixel_shifts)
#df = pd.DataFrame(pix_shift_array, index = HIP_names, columns =['Pixel_shifts'])
#df.to_csv('Pixel_shifts.csv')
# +
#./APF_spectra/apf_spectra_highest_SNR/HIP93873_spectra
#./APF_spectra/apf_spectra_lite # for testing only
# +
#ranw.242.NDR.fits (GJ 244), ranw.314.NDR.fits (HIP69673), ranx.273.NDR.fits (HIP83207) and raqt.232.NDR.fits (HIP91262)
# -
lib.STAR_PROPS
inspect.getmodule(SpecMatch)
SpecMatch.shift.__path__
| Anna/SM-Emp_scripts/run_smemp_apf_editing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sqlalchemy tutorial
# ### Python's SQLAlchemy and Object-Relational Mapping
#
# A common task when programming any web service is the construction of a solid database backend. In the past, programmers would write raw SQL statements, pass them to the database engine and parse the returned results as a normal array of records. Nowadays, programmers can write Object-relational mapping (ORM) programs to remove the necessity of writing tedious and error-prone raw SQL statements that are inflexible and hard-to-maintain. This tutorial is provided by [pythoncentral](https://www.pythoncentral.io/introductory-tutorial-python-sqlalchemy/). A good explanatory example can be found [here](https://leportella.com/english/2019/01/10/sqlalchemy-basics-tutorial.html).
# <p align="center">
# <img src="https://www.pythoncentral.io/wp-content/uploads/2013/04/SQLAlchemyPersonAddress.png" alt="OSI layer model example."/></p>
# +
'''
sqlite_ex.py
'''
import sqlite3
con = sqlite3.connect('Example.db')
cur = con.cursor()
cur.execute('''
CREATE TABLE person
(id INEGER PRIMARY KEY ASC, name varchar(250) NOT NULL)
''')
cur.execute('''
CREATE TABLE address
(id INTEGER PRIMARY KEY ASC, street_name varchar(250), street_number varchar(250), post_code varchar(250) NOT NULL, person_id INTEGER NOT NULL, FOREIGN KEY(person_id) REFERENCES person(id))
''')
cur.execute("INSERT INTO person VALUES(1, 'pythoncentral')")
cur.execute("INSERT INTO address VALUES(1, 'python road', '1', '00000', 1)")
con.commit()
con.close()
# +
'''
sqlite_query.py
This example shows how to use the CURD (Create, Update, Read, Delete) data in the database.
'''
import sqlite3
con = sqlite3.connect('Example.db')
cur = con.cursor()
cur.execute('SELECT * FROM person')
print(cur.fetchall())
cur.execute('SELECT * FROM address')
print(cur.fetchall()[0])
cur.execute('UPDATE person SET name = ?',('pythonmain',))
cur.execute('SELECT * FROM person')
print(cur.fetchall())
cur.execute("INSERT into person VALUES(2, 'pythondivision')")
cur.execute('SELECT * FROM person')
print(cur.fetchall())
cur.execute('DELETE FROM person WHERE id=?',(1,))
cur.execute('SELECT * FROM person')
print(cur.fetchall())
con.close()
# -
# ### Python's SQLAlchemy and Declarative
# There are three most important components in writing SQLAlchemy code:
#
# - A Table that represents a table in a database.
# - A mapper that maps a Python class to a table in a database.
# - A class object that defines how a database record maps to a normal Python object.
#
# Instead of having to write code for Table, mapper and the class object at different places, SQLAlchemy's declarative allows a Table, a mapper and a class object to be defined at once in one class definition.
# +
'''
sqlalchemy_declarative.py
This example shows how to setup a data base with sqlalchemy.
'''
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class Person(Base):
__tablename__= 'person'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
class Address(Base):
__tablename__ = 'address'
id = Column(Integer, primary_key=True)
street_name = Column(String(250))
street_number = Column(String(250))
post_code = Column(String(250),nullable=False)
person_id = Column(Integer, ForeignKey('person.id'))
person = relationship(Person)
engine = create_engine('sqlite:///sqlalchemy_example.db')
Base.metadata.create_all(engine)
# +
'''
sqlalchemy_insert.py
This example shows how to store data into the sqlalchemy data base.
'''
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
#from sqlalchemy_declaritive import Address, Base, Person
engine = create_engine('sqlite:///sqlalchemy_example.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
new_person = Person(name='Nawin')
session.add(new_person)
session.commit()
new_address = Address(post_code='48103',street_name ='West Huron Street', person=new_person)
session.add(new_address)
session.commit()
# +
'''
sqlalchemy_declarative.py
This example shows how to query the data from the sqlalchemy data base.
'''
#from sqlalchemy import Person, Base, Address
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///sqlalchemy_example.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
for i in session.query(Person).all():
print("Person Id:{} Name:{}".format(i.id, i.name))
person = session.query(Person).first()
print("Person Id:{} Name:{}".format(person.id, person.name))
address = session.query(Address).first()
print("Address Id:{} Person Name: {}".format(address.id, address.person.name))
finding = session.query(Person).filter(Person.name==address.person.name).first()
print("Address Id:{} Person Name: {}".format(finding.id, finding.name))
# -
# ### Summary of Python's SQLAlchemy
#
# In this article, we learned how to write database code using SQLAlchemy's declaratives. Compared to writing the traditional raw SQL statements using sqlite3, SQLAlchemy's code is more object-oriented and easier to read and maintain. In addition, we can easily create, read, update and delete SQLAlchemy objects like they're normal Python objects.
#
# You might be wondering that if SQLAlchemy's just a thin layer of abstraction above the raw SQL statements, then it's not very impressive and you might prefer to writing raw SQL statements instead. In the following articles of this series, we're going to investigate various aspects of SQLAlchemy and compare it against raw SQL statements when they're both used to implement the same functionalities. I believe at the end of this series, you will be convinced that SQLAlchemy is superior to writing raw SQL statements.
| Designing_RESTful_APIs/Exercices/L3/Quiz5/sqlalchemy_tutorial/Sqlalchemy_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Titanic
#
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
print(os.listdir("../ProyectoQuepo"));
BaseDatos = pd.read_csv('../ProyectoQuepo/train.csv')
# +
#BaseDatos.info();
# -
from random import choice
BaseDatos['Age'] = BaseDatos['Age'].fillna(BaseDatos['Age'].mean())
BaseDatos['Embarked'] = BaseDatos['Embarked'].fillna(choice(['S','C','Q']))
BaseDatos['Cabin2']=BaseDatos[BaseDatos['Cabin'].notnull()]['Cabin'].apply(lambda x: str(x)[0])
# +
a = BaseDatos.loc[:,['Pclass','Ticket','Cabin2']]
a[a['Cabin2'].notnull()].sort_values(by='Pclass').values
cabins_for_classes =[]
cabins_for_classes.append([a.loc[a['Pclass']==1]['Cabin2'].value_counts()])
cabins_for_classes.append([a.loc[a['Pclass']==2]['Cabin2'].value_counts()])
cabins_for_classes.append([a.loc[a['Pclass']==3]['Cabin2'].value_counts()])
cabins_for_classes
# -
PrimeraClase=['C']*59+['B']*47+['D']*29+['E']*25+['A']*15+['T']
SegundaClase=['F']*8+['E']*4+['D']*4
TerceraClase=['F']*5+['G']*4+['E']*3
for i in range(len(BaseDatos)):
if str(BaseDatos['Cabin2'][i]) == 'nan':
if BaseDatos['Pclass'][i] == 1:
BaseDatos['Cabin2'][i]=choice(PrimeraClase)
elif BaseDatos['Pclass'][i]== 2:
BaseDatos['Cabin2'][i]=choice(SegundaClase)
elif BaseDatos['Pclass'][i]== 3:
BaseDatos['Cabin2'][i]=choice(TerceraClase)
BaseDatos
# +
#BaseDatos.info()
# -
| .ipynb_checkpoints/proyecto-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# # Managing datasets - MongoClient
from PythonLib.dataset.mongodb import MongoClient
# We designed a simple wrapper around `pymongo` to facilitate the task of retriving our datasets. This approch simplifies the process of creating, sharing, using and managing the knowledge and information by promoting simple and easy to understand **declarative code** instead of long and time consuming procedural code.
dataset = MongoClient(db = {
# db let us connect to the database and manipulate our dataset
"mongo_host": "mongodb",
"mongo_port": 27017,
"mongo_db_name": "NETWORK-AND-DISCRETE-LOCATION"
}, q = {
# q let us select precisiely the which dataset we want to work on
'metadata.author': '<NAME>',
'metadata.topic': 'NETWORK-AND-DISCRETE-LOCATION',
'metadata.dataset': '49-nodes'
},f=None
# f let us specify the fields to return (None means return all fields)
)
# The newly created object `dataset` let you retrive the first element of the specified database using the method **get**
dataset.get().head()
# # Data Vizualisation - Networks
from PythonLib.dataviz.network import Network
# Create a network object using anytype of dataset object from the `PythonLib`.
network = Network(dataset)
# You can plot the nodes of your newly created network using the function **plot_nodes**
network.plot_nodes()
# More customisaton are availible such as the size and color of nodes. Provide the name of the column that represent a given property (ex: node_color="FIXED COST"); you may also provide a 2-tuple defining the name of the column that represent a given property and the scale factor associated to that property (ex: node_size=("CITY POP",1/1500))
network.plot_nodes(node_size=("CITY POP",1/1500), node_color="FIXED COST")
| DOC-How to use PythonLib.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Homework and bake-off code: Formatting guide
__author__ = "Insop"
__version__ = "CS224u, Stanford, Spring 2021"
# ## Contents
#
# 1. [Overview](#Overview)
# 1. [Original system code](#Original-system-code)
# 1. [Modifying provided code in the original notebook](#Modifying-provided-code-in-the-original-notebook)
# 1. [External imports](#External-imports)
# 1. [Custom code](#Custom-code)
# 1. [Long running test code](#Long-running-test-code)
# ## Overview
#
# This notebook provides a list of Dos and Don'ts for writing code for original systems and bake-offs.
# ## Original system code
# Our assignments need to handle specific homework questions and also very open ended original systems that can have arbitrary dependencies and data requirements, so our instructions have to be quite detailed to handle both.
#
# Here's one quick reminder/clarification of a common issue:
#
# Please be sure to include your Original System code and bake-off call within the scope of this `if` conditional:
#
# ```
# if 'IS_GRADESCOPE_ENV' not in os.environ:
# test_evaluate_pooled_bert(evaluate_pooled_bert)
# ```
#
# This ensures that the autograder **does not** attempt to run your original system code. This includes any `import` statements used in your Original System โ they should be within the `if` conditional.
#
# Overall โ please do not modify any portion of these cells other than
#
# 1. the comment spaces for system text description and peak score reporting; and
# 2. the space in the `if` conditional where you are meant to put your code.
#
# Since we encourage creativity and do not want to constrain things, your original system code will instead be awarded credit manually by CFs after the assignment due date. This is also why you will not see a full grade out of 10 until after the submission deadline, when CFs have manually awarded the original system points.
# ### Modifying provided code in the original notebook
#
# Please do not modify provided code in the original notebook, such as changing the function arguments or default parameters. The autograder will call functions to test the homework problem code, and the autograder uses the function arguments as shown in the original notebook.
# Here is an example (from [hw_colors.ipynb](hw_colors.ipynb)) where the provided code was modified to use `func(vocab, 'data/glove.6B/glove.6B.50d.txt')` instead of the original code `func(vocab, 'glove.6B.50d.txt')`. This might work fine in your local environment; however, the autograder will separately call `func` the same way as shown in the original notebook. That's why we suggest you to not modify the provided code.
def test_create_glove_embedding(func):
vocab = ['NLU', 'is', 'the', 'future', '.', '$UNK', '<s>', '</s>']
# DON'T modify functions like this!
#
# glove_embedding, glove_vocab = func(vocab, 'data/glove.6B/glove.6B.50d.txt')
# DO KEEP the code as it was, since the autograder calls functions in
# the same way shown in this line:
glove_embedding, glove_vocab = func(vocab, 'glove.6B.50d.txt')
assert isinstance(glove_embedding, np.ndarray), \
"Expected embedding type {}; got {}".format(
glove_embedding.__class__.__name__, glove_embedding.__class__.__name__)
assert glove_embedding.shape == (8, 50), \
"Expected embedding shape (8, 50); got {}".format(glove_embedding.shape)
assert glove_vocab == vocab, \
"Expected vocab {}; got {}".format(vocab, glove_vocab)
# ### External imports
# +
#
# DON'T!
#
# This will cause the autograder to fail!
pip install 'git+https://github.com/NVIDIA/dllogger'
# Directly importing external modules outside of `if 'IS_GRADESCOPE_ENV'` scope
# will also cause the autograder to fail.
# -
#
# DO!
#
# This is good!
#
if 'IS_GRADESCOPE_ENV' not in os.environ:
# You can install and import modules of your choice --
# for example:
# https://github.com/NVIDIA/dllogger/issues/1
pip install 'git+https://github.com/NVIDIA/dllogger'
# ### Custom code
#
# DON'T!
#
# This type of custom code will fail, since the autograder is not
# equipped with a GPU:
#
try:
t_gpu = torch.randn(3,3, device='cuda:0')
except AssertionError as err:
print(err)
t_gpu
#
# DO
#
# This is good!
#
if 'IS_GRADESCOPE_ENV' not in os.environ:
# This is okay since this code will not run in the autograder
# environment:
try:
t_gpu = torch.randn(3,3, device='cuda:0')
except AssertionError as err:
print(err)
t_gpu
# ### Long running test code
#
# Any long running test code should be inside the `if` conditional block.
#
# DON'T!
#
# This type of custom code will cause the autograder to time out:
#
my_test_function_runs_an_hour()
#
# DO
#
# This is good!
#
if 'IS_GRADESCOPE_ENV' not in os.environ:
# Run as many tests as you wish!
my_test_function_runs_an_hour()
| hw_formatting_guide.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Day 1: Sonar Sweep
#
# Scan depth
from collections import Counter
import graphviz
import networkx as nx
from networkx.drawing.nx_pylab import draw_networkx
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
import seaborn as sns
import re
import timeit
df = pd.read_csv('inputs/day1.txt', header=None)
x = df.iloc[:,0]
# ## Part one
#
# Shift points by one position and subtract. Count positive elements.
((x - x.shift(1).bfill()).values.flatten() > 0).sum()
# ## Part two
#
# Scan with window
wx = x.rolling(window=3).sum().dropna()
((wx - wx.shift(1).bfill()).values.flatten() > 0).sum()
# # Day 2: Dive!
#
# Move submarine, determine position
df = pd.read_csv('inputs/day2.txt', header=None, delimiter=' ')
df.columns = ['direction', 'val']
df.direction.unique()
# ## Part one
#
# Control interpretation one
forward = df[df.direction == 'forward'].val.sum()
down = df[df.direction == 'down'].val.sum()
up = df[df.direction == 'up'].val.sum()
forward * (down-up)
# ## Part two
#
# Control interpretation two
hori, aim, depth = 0, 0, 0
for cmd, val in df.values:
if cmd == 'down':
aim += val
if cmd == 'up':
aim -= val
if cmd == 'forward':
hori += val
depth += aim*val
hori*depth
# # Day 3: Binary Diagnostic
#
# Decode sensors
# Load
df = pd.read_csv('inputs/day3.txt', header=None, names=['raw'], dtype={'raw': str})
#n_bits = df.raw
n_bits = df.raw.str.len().unique()[0]
print(f'n_bits = {n_bits}')
df.head()
# ## Part one
#
# Mask bits
gamma_str = "".join([
str(int(df.raw.str.slice(start=i, stop=i+1).astype(int).sum() > len(df) / 2))
for i in range(n_bits)
])
gamma = int(gamma_str, 2)
epsilon_str = "".join(['1' if s == '0' else '0' for s in gamma_str])
epsilon = int(epsilon_str, 2)
print(f'{gamma_str} => {gamma}')
print(f'{epsilon_str} => {epsilon}')
print(f'result = {gamma} * {epsilon} = {gamma * epsilon}')
# ## Part two
#
# Filter bits
# +
oxy = df.copy()
i = 0
while len(oxy) > 1:
most_common = str(int(oxy.raw.str.slice(start=i, stop=i+1).astype(int).sum() >= len(oxy) / 2))
oxy = oxy[oxy.raw.str.slice(start=i, stop=i+1) == most_common]
i = (i + 1) % n_bits
oxy_rating_str = oxy.raw.values[0]
oxy_rating = int(oxy_rating_str, 2)
co2 = df.copy()
i = 0
while len(co2) > 1:
most_common = str(int(co2.raw.str.slice(start=i, stop=i+1).astype(int).sum() < len(co2) / 2))
co2 = co2[co2.raw.str.slice(start=i, stop=i+1) == most_common]
i = (i + 1) % n_bits
co2_rating_str = co2.raw.values[0]
co2_rating = int(co2_rating_str, 2)
print(f'{oxy_rating_str} => {oxy_rating}')
print(f'{co2_rating_str} => {co2_rating}')
print(f'life support rating = {oxy_rating} * {co2_rating} = {oxy_rating * co2_rating}')
# -
# # Day 4: Giant Squid
#
# Play bingo
# +
# Load data
with open('inputs/day4.txt') as fi:
lines = fi.readlines()
len(lines)
def get_boards(lines):
rows = []
for line in lines:
line = line.strip()
if not line: continue
rows.append([int(s) for s in line.split()])
if len(rows) == 5:
yield np.array(rows)
rows = []
all_draws = [int(s) for s in lines[0].strip().split(',')]
boards = list(get_boards(lines[1:]))
print(all_draws)
print(boards[0])
print(boards[0].T)
# -
# ## Part one
#
# Find first board to win
# +
def check_full(row, draws):
measure = set(row).difference(draws)
return len(measure) == 0
def find_board(boards, all_draws):
for round_i in range(len(all_draws)):
draw = all_draws[round_i]
draws = all_draws[:round_i+1]
for board_i, board in enumerate(boards):
for row in np.vstack([board, board.T]):
if check_full(row, draws):
return draw, draws, board, board_i, row
draw, draws, board, board_i, row = find_board(boards, all_draws)
unmarked = list(set(board.flatten()).difference(draws))
print(f'Board = {board_i}')
print(board)
print(f'Draws = [{draw}], {draws}')
print(f'Unmarked = {unmarked}')
score = np.array(unmarked).sum() * draw
print(f'Score = {score}')
# -
# ## Part two
#
# Find last board to win
# +
def check_full(row, draws):
measure = set(row).difference(draws)
return len(measure) == 0
def find_board(boards, all_draws):
remain = np.ones(len(boards))
for round_i in range(len(all_draws)):
draw = all_draws[round_i]
draws = all_draws[:round_i+1]
for board_i, board in enumerate(boards):
for row in np.vstack([board, board.T]):
if check_full(row, draws):
remain[board_i] = 0
if remain.sum() == 0:
return draw, draws, board, board_i, row
draw, draws, board, board_i, row = find_board(boards, all_draws)
unmarked = list(set(board.flatten()).difference(draws))
print(f'Board = {board_i}')
print(board)
print(f'Winning row = {row}')
print(f'Draws = [{draw}], {draws}')
print(f'Unmarked = {unmarked}')
score = np.array(unmarked).sum() * draw
print(f'Score = {score}')
# -
# ## Day 5: Hydrothermal Venture
#
# Hydrothermal vents
with open('inputs/day5.txt') as fi:
lines = np.array([
[int(s) for s in re.match(r'^(\d+),(\d+) -> (\d+),(\d+)$', line).groups()]
for line in fi.readlines()])
# ## Analysis
lines.min(), lines.max()
# ## Part one
#
# - Interpolate between end points using np.linspace
# - Add lines together in matrix.
# - Values > 1 correspond to intersections.
# - Only horizontal and vertical lines.
# +
def interpolate(x1, y1, x2, y2):
if x1 == x2:
# vertical case
n_steps = abs(y1-y2) + 1
return [(x1, y) for y in np.linspace(y1, y2, n_steps).astype(int)]
elif y1 == y2:
# horizontal case
n_steps = abs(x1-x2) + 1
return [(x, y1) for x in np.linspace(x1, x2, n_steps).astype(int)]
else:
# slanted case
n_steps = min(abs(x1 - x2)+1, abs(y1-y2)+1)
return list(zip(
np.linspace(x1,x2, n_steps),#.astype(int),
np.linspace(y1,y2, n_steps)#.astype(int)
))
def intersections(lines):
dim = lines.max()+1
grid = np.zeros((dim,dim))
for line in lines:
for x,y in interpolate(*line):
#print(x,y)
assert x == int(x) and y == int(y)
grid[int(x)][int(y)] += 1
return grid.astype(int)
# +
test_lines = lines[(lines[:,0] == lines[:,2]) | (lines[:,1] == lines[:,3])]
print(test_lines[:5], '...')
grid = intersections(test_lines)
sns.heatmap(grid.T)
print(f'Result = {len(grid[grid > 1].flatten())}')
# Result = 6822 wrong
# -
# ## Part two
#
# Can reuse the intersections function, because it was general to begin with.
# +
test_lines = lines
grid = intersections(test_lines)
sns.heatmap(grid.T)
print(f'Result = {len(grid[grid > 1].flatten())}')
# -
# ## Day 6: Lanternfish
#
# Predict growth of fish population
with open('inputs/day6.txt') as fi:
fish = np.array([int(s) for s in fi.read().strip().split(',')])
print(np.unique(fish))
fish
# ## Part one
#
# Exponential, but short horison. Naive simulator will work.
def sim_naive(pop, days=1):
# will use a lot of memory if days is large!
pop = np.array(pop) # copy initial population
for i in range(days):
#if i % 10 == 0: print(f'Day {i}')
n_reset = len(pop[pop == 0])
pop = np.concatenate([
pop[pop == 0] + 6, # reset fish
pop[pop > 0] - 1, # decremented fish
pop[pop == 0] + 8, # new fish
])
return len(pop)
result = sim_naive(fish, days=80)
print(f'Result = {result}')
# ## Part two
#
# Keep an array of counters for each timer value. Roll values left each day.
# +
def fast_sim(pop, days=10):
counters = np.zeros(9).astype(int)
for t in pop: counters[t] += 1
for _ in range(days):
n_spawns = counters[0]
counters = np.roll(counters, -1)
counters[8] = n_spawns
counters[6] += n_spawns
return sum(counters)
print(f'Result = {fast_sim(fish, days=256)}')
# -
# # Day 7: The Treachery of Whales
#
# Calculate crab fuel; I'm thinking earth mover algorithm of some sort.
with open('inputs/day7.txt') as fi:
pos = np.array([int(s) for s in fi.read().strip().split(',')])
print(len(pos))
# ## Part one
#
# Constant fuel burn. I was initially surprised that the mean does not work.
#
# Why mean does not work. Imagine that you have :
# - 9 crabs on position 0
# - 1 crab on position 10
# - The mean position is 1, so 9 crabs move 1 and 1 crab moves 9 (18 total)
# - Better that 1 crab at position 10 moves 10 steps to position 0 (10 total)
#
n_targets = range(len(pos))
fuel_costs = [np.abs(pos - target).sum() for target in n_targets]
sns.lineplot(x=targets, y=fuel_costs)
print(f'Mean crab position = {pos.mean()}')
opt_target = np.argmin(fuel_costs)
print(f'Optimal crab position: {opt_target}')
print(f'Optimal fuel cost: {fuel_costs[opt_target]}')
# ## Part two
#
# Increasing fuel burn. Surprisingly, the mean almost works.
# +
def cost(pos, target):
dist = np.abs(pos - target)
return dist*(dist+1)/2
vcost = np.vectorize(cost)
fuel_costs = [vcost(pos, target).sum() for target in n_targets]
sns.lineplot(x=targets, y=fuel_costs)
print(f'Mean crab position = {pos.mean()}')
opt_target = np.argmin(fuel_costs)
print(f'Optimal crab position: {opt_target}')
print(f'Optimal fuel cost: {fuel_costs[opt_target].astype(int)}')
# -
# # Day 8: Seven Segment Search
#
# Descramble the digital display (mixed wires)
with open('inputs/day8.txt') as fi:
lines = fi.readlines()
for line in lines[:3]:
print(line.strip())
print('...')
# ## Part one
#
# 2, 3, 4 and 7 are uniquely identifiable. So easy peasy.
# chop off entries (left) part and only keep output (right) part
outputs = np.array([line.split('| ')[1].strip().split() for line in lines]).flatten()
# map output to lengths
lens = pd.Series([len(s) for s in outputs])
# count occurences of 2, 3, 4, and 7 lengths, corresponding to digits 1, 4, 7 and 8
print(f'Number of 1s, 4s, 7s and 8s in output = {lens.isin([2,4,3,7]).sum()}')
# ## Part two
#
# Time to call Sherlock...
#
# ```
# aaaa
# b c
# b c
# dddd
# e f
# e f
# gggg
#
# ```
digits = {
0: 'abcefg',
1: 'cf',
2: 'acdeg',
3: 'acdfg',
4: 'bcdf',
5: 'abdfg',
6: 'abdefg',
7: 'acf',
8: 'abcdefg',
9: 'abcdfg'
}
# +
def splitline(line):
entries, outputs = line.strip().split(' | ')
entries = entries.split()
outputs = outputs.split()
return entries, outputs
def decode(entries, output):
return num
splitlines = [splitline(line) for line in lines]
splitlines[0]
# bd -> cf
# dfb -> acf
# badc -> bcdf
# ac -> bd
#
# f -> a, because acf-cf = a
#
# -
# # Day 9: Smoke Basin
#
# The cave of low points and basins
with open('inputs/day9.txt') as fi:
a = np.array([
[float(s)
for s in line.strip()]
for line in fi.readlines()
])
ex = np.array([
[2,1,9,9,9,4,3,2,1,0],
[3,9,8,7,8,9,4,9,2,1],
[9,8,5,6,7,8,9,8,9,2],
[8,7,6,7,8,9,6,7,8,9],
[9,8,9,9,9,6,5,6,7,8]
])
# ## Part one
#
# Create an infinite boundary around array. Makes it easier to find low points.
# +
def make_edge(src, fill=np.inf):
# create slightly larger array
m = np.full((src.shape[0]+2, src.shape[1]+2), -1.0)
# copy original array to middle
m[1:-1,1:-1] = src[:]
# copy fill value to edges
m[1:-1,0] = fill # left side
m[1:-1,-1] = fill # right side
m[0,:] = fill # top
m[-1,:] = fill # bottom, from self
return m.astype(type(fill))
def find_lowpoints(a):
b = make_edge(a)
return a < np.min([
b[1:-1,0:-2], # left
b[1:-1,2:], # right
b[0:-2,1:-1], # top
b[2:,1:-1], # bottom
], axis=0)
# -
np.sum(a[find_lowpoints(a)]+1)
# ## Part two
#
# Use a stack for depth-first search from low points to trace basins
# +
def trace_basins(a):
"""
Use depth-first search to trace basins
"""
world = make_edge(a, fill=9)
lows = find_lowpoints(world)
low_locs = np.where(lows)
basins = []
for low in zip(*low_locs):
#print('Begin trace')
stack = [low] # push x,y on stack
basin = [low] # basin members added on push
while stack:
x, y = stack.pop()
for h,v in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
neighbor = (x+h, y+v)
if neighbor not in basin and world[neighbor] != 9:
stack.append(neighbor)
basin.append(neighbor)
basins.append(basin)
#print(f'Basin searched for low at {low}. Size = {len(basin)}')
return basins
#traces = trace_basins(ex)
traces = trace_basins(a)
np.prod(sorted([len(b) for b in traces])[::-1][:3])
# -
# # Day 10: Syntax Scoring
#
# Syntax checker
with open('inputs/day10.txt') as fi:
lines = [line.strip() for line in fi.readlines()]
# ## Part one
#
# Use a stack. Read line from left to right and use a stack of characters to keep track of balance
# - push on left-char
# - pop-and-check on right-char
# +
syntax_points = {')': 3, ']': 57, '}': 1197, '>': 25137}
LEFT_CHARS = '([{<'
RIGHT_CHARS = ')]}>'
def syntax_check(lines):
errors = []
for line_num, line in enumerate(lines):
stack = []
for c in line:
if c in LEFT_CHARS:
# push on stack
stack.append(c)
elif c in right_chars:
popped = stack.pop()
if LEFT_CHARS.index(popped) != RIGHT_CHARS.index(c):
errors.append((line_num, c))
break
else:
raise Exception(f'Invalid character on line {line_num}: {c}')
return errors
errors = syntax_check(lines)
total_error = sum([syntax_points[c] for line_num, c in errors])
print(f'Total error: {total_error}')
# -
# ## Part two
#
# - Again use a stack, but a little differently this time
# - The two functions can probably be combined into one
# - Use np.median to find median
# +
completion_points = {
')': 1,
']': 2,
'}': 3,
'>': 4,
}
def autocomplete(lines):
lines_with_errors = {line_num for line_num, _ in syntax_check(lines)}
all_completions = []
for line_num, line in enumerate(lines):
if line_num in lines_with_errors: continue
stack = []
for c in line:
if c in LEFT_CHARS:
# push on stack
stack.append(c)
elif c in RIGHT_CHARS:
stack.pop()
else:
raise Exception(f'Invalid character on line {line_num}: {c}')
# reverse stack and translate left -> corresponding right
completions = [RIGHT_CHARS[LEFT_CHARS.index(c)] for c in stack[::-1]]
all_completions.append(completions)
return all_completions
def score_completion(completion):
total_score = 0
for c in completion:
total_score *= 5
total_score += completion_points[c]
return total_score
scores = [score_completion(completion) for completion in autocomplete(lines)]
print(f'Median score: {np.median(scores).astype(int)}')
# -
# # Day 11: Dumbo Octopus
#
# A cellular automaton
# +
with open('inputs/day11.txt') as fi:
init = np.array(
[
[int(s) for s in line.strip()]
for line in fi.readlines()
]
)
def compact(a):
return "\n".join([
"".join([str(v) if v<10 else '*' for v in row])
for row in a
])
print(compact(init))
# +
def cellular(init, steps=1):
state = init
for i in range(steps):
# STEP 1
state = state + 1
# STEP 2
flashed = state > 9
x, y = np.where(flashed)
while len(x):
# for each 10+'er, increase square kernel by 1
buffer = np.zeros(state.shape).astype(int)
buffer = make_edge(buffer, fill=-np.inf)
x += 1; y += 1 # compensate for edge
buffer[x-1, y-1] += 1
buffer[x, y-1] += 1
buffer[x+1, y-1] += 1
buffer[x-1, y] += 1
buffer[x, y] += 1
buffer[x+1, y] += 1
buffer[x-1, y+1] += 1
buffer[x, y+1] += 1
buffer[x+1, y+1] += 1
buffer = buffer[1:-1, 1:-1].astype(int)
state += buffer
x,y = np.where((state > 9) & (np.invert(flashed)))
flashed = state > 9
# STEP 3
state[np.where(state > 9)] = 0
state = state.astype(int)
yield state
n_flashed = 0
for state in cellular(init, steps=100):
n_flashed += len(state[state == 0])
print(f'Total flashed = {n_flashed}')
# -
for step, state in enumerate(cellular(init, steps=1000)):
if state.sum() == 0.0:
print(f'First synchronous flash at step = {step+1}')
break
# # Day 12: Passage Pathing
#
# Path finding. I'm thinking depth-first search.
with open('inputs/day12.txt') as fi:
edges = [l.strip().split('-') for l in fi.readlines()]
nbs = {} # neighbors dict
for u,v in edges:
# it will add neighbors of end, but who cares
nbs.setdefault(u, set()).add(v)
nbs.setdefault(v, set()).add(u)
print(nbs)
g = graphviz.Graph('Cave System', graph_attr={'rankdir':'LR', 'size': '10'})
for e in edges:
g.edge(e[0], e[1])
g
# ## Part one
#
# Use stack to implement depth-first search pushing and popping path elements, e.g. `[v0, v1, ...]`.
init_path = ['start']
stack = [init_path]
found = []
while stack:
path = stack.pop()
last_vert = path[-1]
for nb in nbs[last_vert]:
extended = path + [nb]
if nb == 'end':
found.append(extended)
continue
if nb.isupper() or nb not in path:
# push extended path
stack.append(extended)
print(f'Found {len(found)} paths from start -> end')
# ## Part two
#
# Can visit a single small cave twice, not including the start and end
# +
init_path = ['start']
stack = [init_path]
found = []
def single_visit(path, nb):
only_small = [v for v in path if v.islower()]
return len(only_small) == len(set(only_small))
while stack:
path = stack.pop()
last_vert = path[-1]
for nb in nbs[last_vert]:
extended = path + [nb]
if nb == 'start': continue
if nb == 'end':
found.append(extended)
continue
if nb.isupper() or nb not in path or single_visit(path, nb):
# push extended path
stack.append(extended)
print(f'Found {len(found)} paths from start -> end')
# -
# # Day 13: Transparent Origami
#
# Use numpy's flip functions, i.e. np.flipud and np.fliplr along with some np.max
# +
with open('inputs/day13.txt') as fi:
x, y = [], []
folds = []
for line in fi.readlines():
line = line.strip()
if line == '':
continue
if line.startswith('fold'):
segments = line.split(' ')
dimension, place = segments[2].split('=')
folds.append([dimension, int(place)])
continue
if line[0].isdigit():
first, second = line.split(',')
x.append(int(first))
y.append(int(second))
x = np.array(x)
y = np.array(y)
print(f'x min = {x.min()}, x max = {x.max()}')
print(f'y min = {y.min()}, y max = {y.max()}')
for fold in folds:
print(fold)
paper = np.zeros((x.max()+1, y.max()+2))
paper[x,y] = 1.0
print(f'There are {paper.sum().astype(int)} dots on the paper with dimensions {paper.shape}')
# -
# ## Part one
#
# Fold paper along vertical line, i.e. first fold. Flip the "right" paper and compute max of two papers, when aligned right.
dimension, place = folds[0]
print(f'Original paper shape = {paper.shape}')
print(f'Cut paper at {dimension} = {place}')
left, right = paper[:place, :], paper[place+1:, :]
print(f'Left paper shape = {left.shape}')
print(f'Right paper shape = {right.shape}')
print(f'Papers have same size: {left.shape == right.shape}')
print(f'Flipping "right" paper up/down, because numpy arrays grow downwards in x')
right = np.flipud(right)
print(f'Smushing papers together...')
smush = np.max([left, right], axis=0)
print(f'There are {smush.sum().astype(int)} dots on smushed paper')
# ## Part two
# Do the same, but more times, and "print" the resulting ASCII art. Also, not simple folds with equal-sized sides.
# +
smush = paper.copy()
print(f'Begin folding paper with shape {smush.shape}'); print()
for i, (dim, place) in enumerate(folds):
#print(f'Begin fold {i+1} at {dim} = {place}')
if dim == 'x':
first, second = smush[:place, :], smush[place+1:, :]
second = np.flipud(second)
if first.shape == second.shape:
# simple case
smush = np.max([first, second], axis=0)
else:
# advanced case
if first.shape[0] < second.shape[0]:
# swap
temp = first
first = second
second = first
smush = first
offset = first.shape[0] - second.shape[0]
smush[offset:,:] += second
smush = (smush > 1).astype(int)
if dim == 'y':
first, second = smush[:, :place], smush[:, place+1:]
second = np.fliplr(second)
if first.shape == second.shape:
# simple case
smush = np.max([first, second], axis=0)
else:
# advanced case
if first.shape[1] < second.shape[1]:
# swap
temp = first
first = second
second = first
smush = first
offset = first.shape[1] - second.shape[1]
smush[:,offset:] += second
smush = (smush > 1).astype(int)
#print(f'- Smushed shapes {first.shape} <- {second.shape}')
#print(f'- Equal size? {first.shape == second.shape}')
#print(f'Smush now has shape {smush.shape} ...')
#print()
def show_code(smush):
return "\n".join([
"".join(['#' if v == 1 else '.' for v in row])
for row in smush
])
print(show_code(np.flipud(smush.astype(int))))
# -
'RHALRCRA'
# # Day 14: Extended Polymerization
#
# Polymer synthesis
with open('inputs/day14.txt') as fi:
template = None
rules = {}
for i, line in enumerate(fi.readlines()):
line = line.strip()
if i == 0:
template = line
continue
if i == 1:
continue
pair, insert = line.split(' -> ')
rules[pair] = insert
# ## Part one
# +
def synthesize(template, rules):
for i in range(0, len(template)-1, 1):
pair = template[i:i+2]
insert = rules.get(pair) or ''
head = pair[0] if i == 0 else ''
tail = pair[1]
yield f'{head}{insert}{tail}'
def create_polymer(template, rules, steps=10):
polymer = template[:]
for step in range(steps):
print(f'Step {step}')
polymer = "".join(synthesize(polymer, rules))
return polymer
polymer = create_polymer(template, rules, steps=10)
freq = Counter(polymer).most_common()
print(f'Answer = {freq[0][1] - freq[-1][1]}')
freq
# -
# ## Part two
#
# Below takes too long. I need a trick to solve faster. Need some insight into the problem
# +
# polymer = create_polymer(template, rules, steps=10)
# -
# # Day 15: Chiton
#
# Dijkstra's algorithm will come in handy. To construct graph from input, shift array four ways to create node pairs.
# +
with open('inputs/day15.txt') as fi:
nodes = np.array([
[int(s) for s in line.strip()]
for line in fi.readlines()
])
test_nodes = np.ones((3,3)).astype(int)
def construct_graph(nodes):
# Todo: only need one vertical and one horisontal shift... change
G = nx.DiGraph()
shape = nodes.shape
n_nodes = shape[0]*shape[1]
weights = nodes.flatten()
idx = np.arange(0, n_nodes).reshape(shape)
# right shift
for u, v in zip(idx[:,:-1].flatten(), idx[:,1:].flatten()):
G.add_edge(u, v, weight=weights[v])
G.add_edge(v, u, weight=weights[u])
# down shift
for u, v in zip(np.rot90(idx)[:,:-1].flatten(), np.rot90(idx)[:,1:].flatten()):
G.add_edge(u,v,weight=weights[v])
G.add_edge(v,u,weight=weights[u])
return G
G = construct_graph(test_nodes)
nx.draw(G)
# -
# ## Part one
#
# This is the shortest-path problem. I'll use NetworkX.
# +
from networkx.classes.function import path_weight
from networkx.algorithms.shortest_paths.generic import shortest_path_length
G = construct_graph(nodes)
source = 0 # top/left
target = len(G.nodes())-1 # bottom/right
pathlen = shortest_path_length(G, source=source, target=target, weight='weight')
print(pathlen) # 458
# -
# ## Part two
#
# Reuse graph constructor, but expand raw input to more nodes
def extend_network(nodes, dims=5):
rows = []
for i in range(dims):
row = np.hstack([
# cycle-length of nine
np.mod(nodes-1+(i+j),9)+1
for j in range(dims)])
rows.append(row)
return np.vstack(rows)
local_test = np.ones((3,3)).astype(int)*8
extended_nodes = extend_network(local_test, dims=5)
print(local_test);print()
print(extended_nodes);print()
extended_nodes = extend_network(nodes, dims=5)
G = construct_graph(extended_nodes)
source = 0 # top/left
target = len(G.nodes())-1 # bottom/right
pathlen = shortest_path_length(G, source=source, target=target, weight='weight')
print(pathlen) # 2800
# # Day 16: Packet Decoder
#
# Bit wrangling... yawn
# # Day 17: Trick Shot
#
# Trick shot. I'm thinking parameter search.
# # Day 18: Snailfish
# # Day 19: Beacon Scanner
# # Day 20: Trench Map
# # Day 21: Dirac Dice
# # Day 22: Reactor Reboot
# # Day 23: Amphipod
| advent-of-code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import qutip as qt
import numpy as np
import scipy
from scipy import constants
from scipy.linalg import expm, sinm, cosm
import itertools, sys
import matplotlib.pyplot as plt
from tqdm import tqdm as tqdm
from qutip.ipynbtools import HTMLProgressBar
pi = np.pi
e = constants.e
h = constants.h
hbar = constants.hbar
ep0 = constants.epsilon_0
mu0 = constants.mu_0
Phi0 = h/(2*e)
kb = constants.Boltzmann
# path -----
from pathlib import Path
p = Path.cwd()
print(p.parent)
sys.path.append(str(p.parent))
# sys.path.append(str(p.parent)+'\\Qsystem-simulation\\src\\')
import cQEDSim.core.systemConst as scon
import cQEDSim.core.pulseShaping as ping
# -
# # Definitions of parametric drive
# ---
# +
def tunable_freq_transmon(flx, f0, d):
# see : A Quantum Engineer's Guide to Superconducting Qubits
_c = np.sqrt(np.cos(flx)**2 + (d*np.sin(flx))**2)
f = f0*np.sqrt(_c)
return f
def frt(t, args):
flx_offset = args_td['flx_offset']
flx = args['flx'](t) + flx_offset
f0, d = args['f0'], args['d']
ft = tunable_freq_transmon(flx, f0, d)
return ft
def frt2(t, args):
freq = args_td['freq']
amp = args_td['amp']
flx_offset = args_td['flx_offset']
flx = np.sin(1*freq*t)*amp + flx_offset
f0, d = args['f0'], args['d']
ft = tunable_freq_transmon(flx, f0, d)
return ft
def frt3(t, args):
freq = args_td['freq']
amp = args_td['amp']
flx_offset = args_td['flx_offset']
# flx = np.sin(2*pi*freq*t)*amp + flx_offset
f0, d = args['f0'], args['d']
ft_offset = tunable_freq_transmon(flx_offset, f0, d)
return ft_offset + np.sin(2*pi*freq*t)*amp
def eq(x, args):
ftrgt = args['f_trgt']
f0, d = args['f0'], args['d']
return abs(ftrgt - tunable_freq_transmon(x, f0, d))
# flx = np.linspace(-2*pi,pi*2,1001)
# fts = tunable_freq_transmon(flx, 7, .7)
# plt.plot(flx, fts)
# -
# # Building physical system
# ---
# System : Transmon + TunableCoupler(Transmon) + Transmon
#
# See also :
# - [Analysis of a parametrically driven exchange-type gate and a two-photon excitation gate between superconducting qubits](http://arxiv.org/abs/1708.02090)
# - [Implementation of Conditional-Phase Gates based on tunable ZZ-Interactions](http://arxiv.org/abs/2005.08863)
# 
Nq = 3
gc1, gc2, g12 = .1, .1, .005
Q1 = scon.transmon(f01=5.0, alpha=.3, N=10, Nq=Nq)
Q2 = scon.transmon(f01=5.5, alpha=.3, N=10, Nq=Nq)
Qc = scon.transmon(f01=6.5, alpha=.2, N=10, Nq=Nq)
QQQ = scon.QQQ(Q1, Q2, Qc, gc1, gc2, g12)
QQQ.plotDressedEnergyLevels(coupler_exitation_stop=1)
# # Calculation of static-ZZ coupling
# ---
# +
Nq = 3
gc1, gc2, g12 = .1, .1, .005
Q1 = scon.transmon(f01=5.0, alpha=.3, N=10, Nq=Nq)
Q2 = scon.transmon(f01=5.5, alpha=.3, N=10, Nq=Nq)
Ns = 21
fcs = np.linspace(6.8, 7.5, Ns)
sZZ = np.zeros(Ns)
eGG = np.zeros(Ns)
for i in tqdm(range(Ns)):
_Qc = scon.transmon(f01=fcs[i], alpha=.2, N=10, Nq=Nq)
QQQzz = scon.QQQ(Q1, Q2, _Qc, gc1, gc2, g12)
sZZ[i] = QQQzz.staticZZ
eGG[i] = QQQzz.effectiveCoupling
# +
fig = plt.figure(2, dpi=100)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax1.set_title('Effective coupling and Static ZZ')
ax1.plot(fcs, eGG*1e3, marker='.', label=r'$g_{eff}$', color='tab:blue')
ax1.set_xticks([])
ax1.set_ylabel('Coupling [MHz]')
ax2.plot(fcs, sZZ*1e6, marker='.', label=r'$\Omega_{ZZ}$', color='tab:orange')
ax2.hlines(max(sZZ*1e6), fcs[0], fcs[-1], linestyle='dashed', label=r'Min : $|\Omega_{ZZ}|=$'+'{:.0f}KHz'.format(abs(max(sZZ))*1e6), color='tab:green')
# ax2.set_ylim([min(sZZ*1e6)-1e1,1e1])
ax2.set_ylabel('Static ZZ [KHz]')
ax2.set_xlabel('Coupler frequency [GHz]')
ax1.legend()
ax1.grid()
ax2.legend()
ax2.grid()
idx = np.argmax(sZZ)
fc_zz0 = fcs[idx]
print('fc_zz0={}'.format(fc_zz0))
# -
# # iSWAP gate simulation with parametric drive
# ---
# +
fc_top, d = 8, 0
args_eq = {'f_trgt': fc_zz0, 'f0': fc_top, 'd': d}
flx_DC = scipy.optimize.fsolve(eq, pi/6, args=args_eq)[0]
Nq = 3
gc1, gc2, g12 = .1, .1, .005
Q1 = scon.transmon(f01=5.0, alpha=.3, N=10, Nq=Nq)
Q2 = scon.transmon(f01=5.5, alpha=.3, N=10, Nq=Nq)
fc = tunable_freq_transmon(flx_DC, fc_top, d)
print('f_coup_idle={} GHz'.format(fc))
Qc = scon.transmon(f01=fc, alpha=.2, N=10, Nq=Nq)
QQQ = scon.QQQ(Q1, Q2, Qc, gc1, gc2, g12)
fiSWAP = abs(QQQ.fd1 - QQQ.fd2)
H0 = QQQ.Hq1 + QQQ.Hq2 + QQQ.duff_partc
Hint = QQQ.Hintc1 + QQQ.Hintc2 + QQQ.Hint12
Nbc_t = QQQ.Nbc
H = [H0+Hint, [Nbc_t, frt2]]
ket000 = scon.ket_3Qsys(0, 0, 0, Nq, Nq, Nq)
ket100 = scon.ket_3Qsys(1, 0, 0, Nq, Nq, Nq)
ket010 = scon.ket_3Qsys(0, 1, 0, Nq, Nq, Nq)
ket110 = scon.ket_3Qsys(1, 1, 0, Nq, Nq, Nq)
ket001 = scon.ket_3Qsys(0, 0, 1, Nq, Nq, Nq)
e_ops = [ket100*ket100.dag(), ket010*ket010.dag(), ket001*ket001.dag(), ket110*ket110.dag()]
# +
args_td = {}
# args_td['flx'] = flux_pulse.wf_after
args_td['flx_offset'] = flx_DC
args_td['d'] = 0
args_td['f0'] = fc_top
args_td['freq'] = 0.492 # fiSWAP # .4
print('freq_flx : {}'.format(args_td['freq']))
args_td['amp'] = .12*pi
time_sim = np.linspace(0, 2000, 201)
res = qt.mesolve(H, ket100, time_sim, args=args_td, c_ops=[],
e_ops=e_ops, options=qt.Options(nsteps=1e4))
# -
t_test = np.linspace(0, 100, 1001)
ft = [frt2(tt, args_td) for tt in t_test]
plt.figure('fc', dpi=100)
plt.title('Time dependent tunable coupler frequency')
plt.xlabel('time [ns]')
plt.ylabel('freq. [GHz]')
plt.plot(t_test, ft)
drive_freq = np.linspace(0.49, 0.51, 51)
Nfreq = len(drive_freq)
Ntime = len(time_sim)
elist_Q1 = np.zeros([Nfreq, Ntime])
elist_Q2 = np.zeros([Nfreq, Ntime])
elist_Qc = np.zeros([Nfreq, Ntime])
elist_11 = np.zeros([Nfreq, Ntime])
for i in tqdm(range(Nfreq)):
args_td['freq'] = drive_freq[i]
res = qt.mesolve(H, ket010, time_sim, args=args_td, c_ops=[],
e_ops=e_ops, options=qt.Options(nsteps=1e4))
es = res.expect
elist_Q1[i], elist_Q2[i], elist_Qc[i], elist_11[i] = es
# +
plt.figure(1, dpi=150)
plt.pcolor(time_sim, drive_freq, elist_Q1)
plt.colorbar()
plt.title('Q1 population')
plt.xlabel('Pulse length [ns]')
plt.ylabel('Flux-pulse frequency [GHz]')
# +
plt.figure(1, dpi=150)
plt.pcolor(time_sim, drive_freq, elist_Q2)
plt.colorbar()
plt.title('Q2 population')
plt.xlabel('Pulse length [ns]')
plt.ylabel('Flux-pulse frequency [GHz]')
# -
| example_notebook/iSWAPgate_tunableCoupler.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pickle
import numpy as np
import time
import random
import matplotlib
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.model_selection import train_test_split
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# ## Preparing dataset:
f1 = '/home/user/Desktop/spring22/thesis/gait/features/ks20/sequences.pickle'
f2 = '/home/user/Desktop/spring22/thesis/gait/features/ks20/raw-augmented.pickle'
augmented = False
features = f2 if augmented else f1
# +
# How to read and use?
from sklearn.model_selection import train_test_split
with open(file=features, mode='rb+') as file:
X, y = pickle.load(file=file)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=2)
X_train.shape
# -
# ## Set Parameters:
#
# +
n_steps = 20 # timesteps per series
# Input Data
training_data_count = len(X_train)
test_data_count = len(X_test)
n_input = len(X_train[0][0]) # num input parameters per timestep
n_hidden = 64 # Hidden layer num of features
n_classes = 20
#updated for learning-rate decay
# calculated as: decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
# decaying_learning_rate = True
decaying_learning_rate = False
learning_rate = 0.0025 # used if decaying_learning_rate set to False
learning_rate = tf.convert_to_tensor(
learning_rate, dtype=None, dtype_hint=None, name=None
)
init_learning_rate = 0.005
decay_rate = 0.96 # the base of the exponential in the decay
decay_steps = 100000 # used in decay every 60000 steps with a base of 0.96
global_step = tf.Variable(0, trainable=False)
lambda_loss_amount = 0.0015
# training_iters = training_data_count * 300 # epochs
training_iters = training_data_count * 50 # epochs
batch_size = 512
display_iter = batch_size * 8 # To show test set accuracy during training
print("(X shape, y shape, every X's mean, every X's standard deviation)")
print(X_train.shape, y_test.shape, np.mean(X_test), np.std(X_test))
print("\nThe dataset has not been preprocessed, is not normalised etc")
# -
# ## Utility functions for training:
# +
def LSTM_RNN(_X, _weights, _biases):
# model architecture based on "guillaume-chevalier" and "aymericdamien" under the MIT license.
_X = tf.transpose(_X, [1, 0, 2]) # permute n_steps and batch_size
_X = tf.reshape(_X, [-1, n_input])
# Rectifies Linear Unit activation function used
_X = tf.nn.relu(tf.matmul(_X, _weights['hidden']) + _biases['hidden'])
# Split data because rnn cell needs a list of inputs for the RNN inner loop
_X = tf.split(_X, n_steps, 0)
# Define two stacked LSTM cells (two recurrent layers deep) with tensorflow
lstm_cell_1 = tf.nn.rnn_cell.LSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cell_2 = tf.nn.rnn_cell.LSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cells = tf.nn.rnn_cell.MultiRNNCell([lstm_cell_1, lstm_cell_2], state_is_tuple=True)
outputs, states = tf.nn.static_rnn(lstm_cells, _X, dtype=tf.float32)
# A single output is produced, in style of "many to one" classifier, refer to http://karpathy.github.io/2015/05/21/rnn-effectiveness/ for details
lstm_last_output = outputs[-1]
# Linear activation
return tf.matmul(lstm_last_output, _weights['out']) + _biases['out']
def extract_batch_size(_train, _labels, _unsampled, batch_size):
# Fetch a "batch_size" amount of data and labels from "(X|y)_train" data.
# Elements of each batch are chosen randomly, without replacement, from X_train with corresponding label from Y_train
# unsampled_indices keeps track of sampled data ensuring non-replacement. Resets when remaining datapoints < batch_size
shape = list(_train.shape)
shape[0] = batch_size
batch_s = np.empty(shape)
batch_labels = np.empty((batch_size,1))
for i in range(batch_size):
# Loop index
# index = random sample from _unsampled (indices)
index = random.choice(_unsampled)
batch_s[i] = _train[index]
batch_labels[i] = _labels[index]
_unsampled.remove(index)
return batch_s, batch_labels, _unsampled
def one_hot(y_):
# One hot encoding of the network outputs
# e.g.: [[5], [0], [3]] --> [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]
y_ = y_.reshape(len(y_))
n_values = int(np.max(y_)) + 1
return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS
# -
# ## Build the network:
# +
# Graph input/output
x = tf.placeholder(tf.float32, [None, n_steps, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
# Graph weights
weights = {
'hidden': tf.Variable(tf.random_normal([n_input, n_hidden])), # Hidden layer weights
'out': tf.Variable(tf.random_normal([n_hidden, n_classes], mean=1.0))
}
biases = {
'hidden': tf.Variable(tf.random_normal([n_hidden])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
pred = LSTM_RNN(x, weights, biases)
# Loss, optimizer and evaluation
l2 = lambda_loss_amount * sum(
tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()
) # L2 loss prevents this overkill neural network to overfit the data
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred)) + l2 # Softmax loss
if decaying_learning_rate:
learning_rate = tf.train.exponential_decay(init_learning_rate, global_step*batch_size, decay_steps, decay_rate, staircase=True)
#decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps) #exponentially decayed learning rate
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost,global_step=global_step) # Adam Optimizer
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# -
# ## Train the network:
# +
test_losses = []
test_accuracies = []
train_losses = []
train_accuracies = []
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
init = tf.global_variables_initializer()
sess.run(init)
# Perform Training steps with "batch_size" amount of data at each loop.
# Elements of each batch are chosen randomly, without replacement, from X_train,
# restarting when remaining datapoints < batch_size
step = 1
time_start = time.time()
unsampled_indices = list(range(0,len(X_train)))
while step * batch_size <= training_iters:
#print (sess.run(learning_rate)) #decaying learning rate
#print (sess.run(global_step)) # global number of iterations
if len(unsampled_indices) < batch_size:
unsampled_indices = list(range(0,len(X_train)))
batch_xs, raw_labels, unsampled_indicies = extract_batch_size(X_train, y_train, unsampled_indices, batch_size)
batch_ys = one_hot(raw_labels)
# check that encoded output is same length as num_classes, if not, pad it
if len(batch_ys[0]) < n_classes:
temp_ys = np.zeros((batch_size, n_classes))
temp_ys[:batch_ys.shape[0],:batch_ys.shape[1]] = batch_ys
batch_ys = temp_ys
# Fit training using batch data
_, loss, acc = sess.run(
[optimizer, cost, accuracy],
feed_dict={
x: batch_xs,
y: batch_ys
}
)
train_losses.append(loss)
train_accuracies.append(acc)
# Evaluate network only at some steps for faster training:
if (step*batch_size % display_iter == 0) or (step == 1) or (step * batch_size > training_iters):
# To not spam console, show training accuracy/loss in this "if"
print("Iter #" + str(step*batch_size) + \
": Learning rate = " + "{:.6f}".format(sess.run(learning_rate)) + \
": Batch Loss = " + "{:.6f}".format(loss) + \
", Accuracy = {}".format(acc))
# Evaluation on the test set (no learning made here - just evaluation for diagnosis)
loss, acc = sess.run(
[cost, accuracy],
feed_dict={
x: X_test,
y: one_hot(y_test)
}
)
test_losses.append(loss)
test_accuracies.append(acc)
print("PERFORMANCE ON TEST SET: " + \
"Batch Loss = {}".format(loss) + \
", Accuracy = {}".format(acc))
step += 1
print("Optimization Finished!")
# Accuracy for test data
one_hot_predictions, accuracy, final_loss = sess.run(
[pred, accuracy, cost],
feed_dict={
x: X_test,
y: one_hot(y_test)
}
)
test_losses.append(final_loss)
test_accuracies.append(accuracy)
print("FINAL RESULT: " + \
"Batch Loss = {}".format(final_loss) + \
", Accuracy = {}".format(accuracy))
time_stop = time.time()
print("TOTAL TIME: {}".format(time_stop - time_start))
# -
# ## Results:
#
#
# +
# %matplotlib inline
font = {
'family' : 'Bitstream Vera Sans',
'weight' : 'bold',
'size' : 18
}
matplotlib.rc('font', **font)
width = 12
height = 12
plt.figure(figsize=(width, height))
indep_train_axis = np.array(range(batch_size, (len(train_losses)+1)*batch_size, batch_size))
#plt.plot(indep_train_axis, np.array(train_losses), "b--", label="Train losses")
plt.plot(indep_train_axis, np.array(train_accuracies), "g--", label="Train accuracies")
indep_test_axis = np.append(
np.array(range(batch_size, len(test_losses)*display_iter, display_iter)[:-1]),
[training_iters]
)
#plt.plot(indep_test_axis, np.array(test_losses), "b-", linewidth=2.0, label="Test losses")
plt.plot(indep_test_axis, np.array(test_accuracies), "b-", linewidth=2.0, label="Test accuracies")
plt.title("Training session's Accuracy over Iterations")
plt.legend(loc='lower right', shadow=True)
plt.ylabel('Training Accuracy')
plt.xlabel('Training Iteration')
plt.show()
# Results
predictions = one_hot_predictions.argmax(1)
print("Testing Accuracy: {}%".format(100*accuracy))
print("")
print("Precision: {}%".format(100*metrics.precision_score(y_test, predictions, average="weighted")))
print("Recall: {}%".format(100*metrics.recall_score(y_test, predictions, average="weighted")))
print("f1_score: {}%".format(100*metrics.f1_score(y_test, predictions, average="weighted")))
print("")
print("Confusion Matrix:")
print("Created using test set of {} datapoints, normalised to % of each class in the test dataset".format(len(y_test)))
confusion_matrix = metrics.confusion_matrix(y_test, predictions)
#print(confusion_matrix)
normalised_confusion_matrix = np.array(confusion_matrix, dtype=np.float32)/np.sum(confusion_matrix)*100
# Plot Results:
width = 12
height = 12
plt.figure(figsize=(width, height))
plt.imshow(
normalised_confusion_matrix,
interpolation='nearest',
cmap=plt.cm.Blues
)
plt.title("Confusion matrix \n(normalised to % of total test data)")
plt.colorbar()
tick_marks = np.arange(n_classes)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
# -
# ##
| notebooks/lstm/lstm-ks20-raw.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 420-A52-SF - Algorithmes d'apprentissage supervisรฉ - Hiver 2020 - Spรฉcialisation technique en Intelligence Artificielle - <NAME>, M.Sc.
# <br/>
# 
# <br/>
# **Objectif:** cette sรฉance de travaux pratique propose la mise en oeuvre sous forme de code Python de l'**algorithme du gradient en rรฉgression linรฉaire simple**. Le jeu de donnรฉes utilisรฉ sera une version simplifiรฉe du jeu de donnรฉes *Advertising*
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
# ### 0 - Chargement des bibliothรจques
# +
# Manipulation de donnรฉes
import numpy as np
import pandas as pd
from collections import defaultdict
# Visualisation de donnรฉes
import matplotlib.pyplot as plt
import seaborn as sns
# Outils divers
from tqdm.notebook import tqdm_notebook
from tqdm import tqdm
# -
# Configuration de la visualisation
sns.set(style="darkgrid", rc={'figure.figsize':(11.7,8.27)})
# ### 1 - Lecture du jeu de donnรฉes advertising
# **Exercice 1**: ร l'aide de la bibiothรจques *pandas*, lire le fichier `advertising-univariate.csv`
# Complรฉter le code ci-dessous ~ 1 ligne
df = None
# ### 2 - Dรฉfinition du modรจle et de la fonction de coรปt
# **Exercice 2**: complรฉter la fonction ci-dessous reprรฉsentant le modรจle de rรฉgression linรฉaire simple (hypothรจse)
#
# Pour rappel, le modรจle de rรฉgression linรฉaire simple est
#
# $h_{\theta}(x)=\theta_{0} + \theta_{1}x$
def hypothesis(x, theta_0, theta_1):
# Complรฉter le code ~ 1 ligne
h = None
return h
# **Exercice 3**: complรฉter la fonction ci-dessous permettant le calcul du coรปt (fonction de coรปt)
#
# Pour rappel, la fonction de coรปt en rรฉgression linรฉaire simple s'exprime sous la forme
#
# $J(\theta_{0},\theta_{1})= \frac{1}{2m}\sum\limits_{i=1}^{m}(h_{\theta}(x^{(i)}-y^{(i)}))^{2}$
def cost_function(x,y, theta_0, theta_1):
assert len(x) == len(y)
# Complรฉter le code ~ 1-4 lignes
cost = None
return cost
# ### 3 - Prรฉparation de la structure de donnรฉes
x = df['TV'].values
y = df['sales'].values
x = x/100 # Cette mise ร l'รฉchelle permet un meilleur affichage des rรฉsultats
# ### 4 - Algorithme du gradient
# **Exercice 4**: Complรฉter l'algorithme du gradient. Choisir les valeurs initiales des paramรจtres $\theta_0$ et $\theta_1$, la valeurs du **pas** ($\alpha$) et le **nombre d'itรฉrations**. Un test de convergence ne sera pas utilisรฉ ici.
#
# $
# \text{Rรฉpรฉter pendant n_iterations}
# \{\\
# \theta_{0}:= \theta_{0} - \alpha\frac{1}{m}\sum\limits_{i=1}^{m}(h_{\theta}(x^{(i)})-y^{(i)})\\
# \theta_{1}:= \theta_{1} - \alpha\frac{1}{m}\sum\limits_{i=1}^{m}(h_{\theta}(x^{(i)})-y^{(i)})\times x^{(i)}
# \\
# \}
# $
# +
theta_0 = None
theta_1 = None
alpha = None
n_iterations = None
m = len(x)
history = defaultdict(list)
for i in tqdm(range(0, n_iterations)):
# ~ 4-6 lignes de code
None
# Sauvegarde des valeurs intermรฉdiaires de theta_0, theta_1 et du coรปt
if i%5000 == 0:
cost = cost_function(x, y, theta_0, theta_1)
history['theta_0'].append(theta_0)
history['theta_1'].append(theta_1)
history['cost'].append(cost)
print(f'Theta_0 = {theta_0}')
print(f'Theta_1 = {theta_1}')
# -
# ### 5 - Affichage du modรจle
# +
ax = sns.scatterplot(x=x, y=y, s=60)
ax.set_xlabel("Budget de publicitรฉ TV (en 100 000 USD)", fontsize=14)
ax.set_ylabel("Ventes (en millier d'unitรฉs)", fontsize=14)
X = np.linspace(0,3,100)
Y = hypothesis(X,theta_0,theta_1)
plt.plot(X,Y, color="g")
# -
# ### 6 - Affichage de l'รฉvolution de $\theta_{0}$, $\theta_{1}$ et la fonction de coรปt lors de l'optimisation
sns.set(rc={'figure.figsize':(18.27,4.27)})
f, (ax1, ax2, ax3) = plt.subplots(1, 3)
ax1.plot(history['theta_0'])
ax1.set_title(r'$\theta_{0}$')
ax1.set_xlabel('Itรฉrations')
ax2.plot(history['theta_1'])
ax2.set_title(r'$\theta_{1}$')
ax2.set_xlabel('Itรฉrations')
ax3.plot(history['cost'])
ax3.set_title(r'Fonction de coรปt $J(\theta_{0},\theta_{1})$')
ax3.set_xlabel('Itรฉrations')
# ### 7 - Affichage de l'รฉvolution de $\theta_{0}$ et $\theta_{1}$ sur les courbes de niveau (fonction de coรปt)
# Remarque: afin de visualiser les รฉvolutions des paramรจtres, vous devrez รฉventuellement diminuer la valuer du pas.
# +
sns.set(style="darkgrid", rc={'figure.figsize':(11.7,8.27)})
theta_0s = np.linspace(0,10,50)
theta_1s = np.linspace(0,10,50)
mesh = np.zeros((50,50))
for i, theta0 in enumerate(theta_0s):
for j, theta1 in enumerate(theta_1s):
mesh[i,j] = cost_function(x, y, theta0, theta1)
fig, ax = plt.subplots()
XX, YY = np.meshgrid(theta_0s,theta_1s)
CS = ax.contour(XX, YY, mesh, levels=20)
for h in history:
ax.plot(history['theta_1'], history['theta_0'], 'bo')
ax.clabel(CS, inline=1, fontsize=10)
ax.set_xlabel(r'$\theta_{1}$')
ax.set_ylabel(r'$\theta_{0}$')
# -
# ### Fin du TP
| nbs/01-regression-lineaire-simple/01-TP2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#######################################################################
# Copyright (C) #
# 2016-2018 <NAME>(<EMAIL>) #
# 2016 <NAME>(<EMAIL>) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
# -
# # Windy Grid World
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# %matplotlib inline
# +
# world height
WORLD_HEIGHT = 7
# world width
WORLD_WIDTH = 10
# wind strength for each column
WIND = [0, 0, 0, 1, 1, 1, 2, 2, 1, 0]
# possible actions
ACTION_UP = 0
ACTION_DOWN = 1
ACTION_LEFT = 2
ACTION_RIGHT = 3
# probability for exploration
EPSILON = 0.1
# Sarsa step size
ALPHA = 0.5
# reward for each step
REWARD = -1.0
START = [3, 0]
GOAL = [3, 7]
ACTIONS = [ACTION_UP, ACTION_DOWN, ACTION_LEFT, ACTION_RIGHT]
# -
def step(state, action):
i, j = state
if action == ACTION_UP:
return [max(i - 1 - WIND[j], 0), j]
elif action == ACTION_DOWN:
return [max(min(i + 1 - WIND[j], WORLD_HEIGHT - 1), 0), j]
elif action == ACTION_LEFT:
return [max(i - WIND[j], 0), max(j - 1, 0)]
elif action == ACTION_RIGHT:
return [max(i - WIND[j], 0), min(j + 1, WORLD_WIDTH - 1)]
else:
assert False
# play for an episode
def episode(q_value):
# track the total time steps in this episode
time = 0
# initialize state
state = START
# choose an action based on epsilon-greedy algorithm
if np.random.binomial(1, EPSILON) == 1:
action = np.random.choice(ACTIONS)
else:
values_ = q_value[state[0], state[1], :]
action = np.random.choice([action_ for action_, value_ in enumerate(values_) if value_ == np.max(values_)])
# keep going until get to the goal state
while state != GOAL:
next_state = step(state, action)
if np.random.binomial(1, EPSILON) == 1:
next_action = np.random.choice(ACTIONS)
else:
values_ = q_value[next_state[0], next_state[1], :]
next_action = np.random.choice([action_ for action_, value_ in enumerate(values_) if value_ == np.max(values_)])
# Sarsa update
q_value[state[0], state[1], action] += \
ALPHA * (REWARD + q_value[next_state[0], next_state[1], next_action] -
q_value[state[0], state[1], action])
state = next_state
action = next_action
time += 1
return time
# +
def figure_6_3():
q_value = np.zeros((WORLD_HEIGHT, WORLD_WIDTH, 4))
episode_limit = 500
steps = []
ep = 0
while ep < episode_limit:
steps.append(episode(q_value))
# time = episode(q_value)
# episodes.extend([ep] * time)
ep += 1
steps = np.add.accumulate(steps)
plt.plot(steps, np.arange(1, len(steps) + 1))
plt.xlabel('Time steps')
plt.ylabel('Episodes')
plt.show()
# display the optimal policy
optimal_policy = []
for i in range(0, WORLD_HEIGHT):
optimal_policy.append([])
for j in range(0, WORLD_WIDTH):
if [i, j] == GOAL:
optimal_policy[-1].append('G')
continue
bestAction = np.argmax(q_value[i, j, :])
if bestAction == ACTION_UP:
optimal_policy[-1].append('U')
elif bestAction == ACTION_DOWN:
optimal_policy[-1].append('D')
elif bestAction == ACTION_LEFT:
optimal_policy[-1].append('L')
elif bestAction == ACTION_RIGHT:
optimal_policy[-1].append('R')
print('Optimal policy is:')
for row in optimal_policy:
print(row)
print('Wind strength for each column:\n{}'.format([str(w) for w in WIND]))
figure_6_3()
| chapter06/windy_grid_world.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import warnings
warnings.filterwarnings('ignore')
# +
import numpy as np
import argparse
import cv2
from PIL import Image, ImageOps
from numpy import asarray
import os
import numpy as np
import pandas as pd
# -
# Jupyter only
from matplotlib import pyplot as plt
from IPython.display import display
# %matplotlib inline
# +
fps = '1FPS'
frame_dir = '../../data/DAiSEE/' + fps + '/data/'
faces_dir = '../../data/DAiSEE/' + fps + '/dataFaces/'
caffe_dir = '../../models/caffe/'
# -
# load model from disk
net = cv2.dnn.readNetFromCaffe(caffe_dir + 'deploy.prototxt.txt', caffe_dir + 'res10_300x300_ssd_iter_140000.caffemodel')
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
# +
# extract a single face from a given photograph
def extract_face(image_file, required_size=(300, 300)):
image = cv2.imread(image_file)
(h, w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300,300)), 1.0, (300, 300), (103.93, 116.77, 123.68))
net.setInput(blob)
detections = net.forward()
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.5:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
face = image[startY:endY, startX:endX]
return face
# load images and extract faces for all images in a directory
def load_faces(directory, destination):
faces = list()
cnt = 0
for filename in os.listdir(directory):
path = directory + filename
if filename[-3:]=='jpg':
try:
face = extract_face(path)
cv2.imwrite(destination + filename, face)
cnt +=1
except:
continue
return cnt
# +
# Create image file structure for faces data and grab the faces
cols = ['b0', 'b1', 'b2', 'b3']
dirs = ['train', 'test', 'validation']
for d in dirs:
for c in cols:
in_dir = frame_dir + d + '/' + c + '/'
out_dir = faces_dir + d + '/' + c + '/'
# make the faces directory if it doesn't already exist
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# get the faces and save
c = load_faces(in_dir, out_dir)
print(out_dir, c)
# -
| src/cnn/.ipynb_checkpoints/2_get_faces_dnn-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### DEMDP07
# # Stochastic Optimal Economic Growth Model
#
# Welfare maximizing social planner must decide how much society should consume and invest. Unlike the deterministic model, this model allows arbitrary constant relative risk aversion, capital depreciation, and stochastic production shocks. It lacks a known closed-form solution.
# - States
# - s stock of wealth
# - Actions
# - k capital investment
# - Parameters
# - alpha relative risk aversion
# - beta capital production elasticity
# - gammma capital survival rate
# - sigma production shock volatility
# - delta discount factor
import numpy as np
import matplotlib.pyplot as plt
from compecon import BasisChebyshev, DPmodel, DPoptions, qnwlogn, demo
import seaborn as sns
import pandas as pd
# ### Model parameters
ฮฑ, ฮฒ, ฮณ, ฯ, ฮด = 0.2, 0.5, 0.9, 0.1, 0.9
# ## Analytic results
#
# The deterministic steady-state values for this model are
# +
kstar = ((1 - ฮด*ฮณ)/(ฮด*ฮฒ))**(1/(ฮฒ-1)) # steady-state capital investment
sstar = ฮณ*kstar + kstar**ฮฒ # steady-state wealth
# -
# ## Numeric results
# ### State space
# The state variable is s="Wealth", which we restrict to $s\in[5, 10]$.
#
# Here, we represent it with a Chebyshev basis, with $n=10$ nodes.
n, smin, smax = 10, 5, 10
basis = BasisChebyshev(n, smin, smax, labels=['Wealth'])
# ### Continuous state shock distribution
m = 5
e, w = qnwlogn(m, -ฯ**2/2,ฯ**2)
# ### Action space
# The choice variable k="Investment" must be nonnegative.
def bounds(s, i=None, j=None):
return np.zeros_like(s), 0.99*s
# ### Reward function
# The reward function is the utility of consumption=$s-k$.
def reward(s, k, i=None, j=None):
sk = s - k
u = sk**(1-ฮฑ) / (1-ฮฑ)
ux= -sk **(-ฮฑ)
uxx = -ฮฑ * sk**(-1-ฮฑ)
return u, ux, uxx
# ### State transition function
# Next period, wealth will be equal to production from available initial capital $k$, that is $s' = k^\beta$
def transition(s, k, i=None, j=None, in_=None, e=None):
g = ฮณ * k + e * k**ฮฒ
gx = ฮณ + ฮฒ*e * k**(ฮฒ-1)
gxx = ฮฒ*(ฮฒ-1)*e * k**(ฮฒ-2)
return g, gx, gxx
# ### Model structure
# The value of wealth $s$ satisfies the Bellman equation
# \begin{equation*}
# V(s) = \max_k\left\{\log(s-k) + \delta V(k^\beta) \right\}
# \end{equation*}
#
# To solve and simulate this model,use the CompEcon class `DPmodel`
growth_model = DPmodel(basis, reward, transition, bounds,e=e,w=w,
x=['Investment'],
discount=ฮด )
# ### Solving the model
#
# Solving the growth model by collocation.
S = growth_model.solve()
# `DPmodel.solve` returns a pandas `DataFrame` with the following data:
# We are also interested in the shadow price of wealth (the first derivative of the value function).
#
# To analyze the dynamics of the model, it also helps to compute the optimal change of wealth.
S['shadow price'] = growth_model.Value(S['Wealth'],1)
S['D.Wealth'] = transition(S['Wealth'], S['Investment'],e=1)[0] - S['Wealth']
S.head()
# ### Solving the model by Linear-Quadratic Approximation
# The DPmodel.lqapprox solves the linear-quadratic approximation, in this case arround the steady-state. It returns a LQmodel which works similar to the DPmodel object.
# We also compute the shadow price and the approximation error to compare these results to the collocation results.
growth_lq = growth_model.lqapprox(sstar, kstar)
L = growth_lq.solution(basis.nodes)
L['shadow price'] = L['value_Wealth']
L['D.Wealth'] = L['Wealth_next']- L['Wealth']
L.head()
# ## Plotting the results
# ### Optimal Policy
fig1 = demo.figure('Optimal Investment Policy', 'Wealth', 'Investment')
plt.plot(S.Investment, label='Chebychev Collocation')
plt.plot(L.Investment, label='L-Q Approximation')
demo.annotate(sstar, kstar,'$s^*$ = %.2f\n$V^*$ = %.2f' % (sstar, kstar),'bo', (10, -17),ms=10)
plt.legend(loc= 'upper left')
# ### Value Function
fig2 = demo.figure('Value Function', 'Wealth', 'Value')
plt.plot(S.value, label='Chebychev Collocation')
plt.plot(L.value, label='L-Q Approximation')
plt.legend(loc= 'upper left')
# ### Shadow Price Function
fig3 = demo.figure('Shadow Price Function', 'Wealth', 'Shadow Price')
plt.plot(S['shadow price'], label='Chebychev Collocation')
plt.plot(L['shadow price'], label='L-Q Approximation')
plt.legend(loc= 'upper right')
# ### Chebychev Collocation Residual
fig4 = demo.figure('Bellman Equation Residual', 'Wealth', 'Residual')
plt.hlines(0,smin,smax,'k',linestyles='--')
plt.plot(S[['resid']])
plt.ticklabel_format(style='sci', axis='y', scilimits=(-1,1))
# ### Wealth dynamics
#
# Notice how the steady-state is stable in the Chebyshev collocation solution, but unstable in the linear-quadratic approximation. In particular, simulated paths of wealth in the L-Q approximation will converge to zero, unless the initial states is within a small neighborhood of the steady-state.
# +
fig5 = demo.figure('Wealth dynamics', 'Wealth', 'Wealth change', figsize=[8,5])
plt.plot(S['D.Wealth'], label='Chebychev Collocation')
plt.plot(L['D.Wealth'], label='L-Q Approximation')
plt.hlines(0,smin,smax,linestyles=':')
demo.annotate(sstar, 0, f'$s^* = {sstar:.2f}$\n$\Delta s^* = {0:.2f}$', 'bo', (10, 10),ms=10,fs=11)
plt.legend(loc= 'lower left')
# -
# ### Simulating the model
#
# We simulate 21 periods of the model starting from $s=s_{\min}$
T = 21
nrep = 50_000
data = growth_model.simulate(T, np.tile(smin,nrep))
# ### Simulated State and Policy Paths
subdata = data[data['_rep'].isin(range(3))]
opts = dict(spec='r*', offset=(0, -15), fs=11, ha='right')
fig6 = demo.figure('Simulated and Expected Wealth','Period', 'Wealth',[0, T + 0.5])
plt.plot(data[['time','Wealth']].groupby('time').mean())
plt.plot(subdata.pivot('time','_rep','Wealth'),lw=1)
demo.annotate(T, sstar, f'steady-state wealth\n = {sstar:.2f}', **opts)
fig7 = demo.figure('Simulated and Expected Investment','Period', 'Investment',[0, T + 0.5])
plt.plot(data[['time','Investment']].groupby('time').mean())
plt.plot(subdata.pivot('time','_rep','Investment'),lw=1)
demo.annotate(T, kstar, f'steady-state investment\n = {kstar:.2f}', **opts)
# ### Ergodic Wealth Distribution
subdata = data[data['time']==T][['Wealth','Investment']]
stats =pd.DataFrame({'Deterministic Steady-State': [sstar, kstar],
'Ergodic Means': subdata.mean(),
'Ergodic Standard Deviations': subdata.std()}).T
stats
fig8 = demo.figure('Ergodic Wealth and Investment Distribution','Wealth','Probability',[4.5, 9.5])
sns.kdeplot(subdata['Wealth'])
sns.kdeplot(subdata['Investment'])
# +
#demo.savefig([fig1,fig2,fig3,fig4,fig5,fig6,fig7,fig8])
| notebooks/dp/07 Stochastic Optimal Economic Growth Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_tensorflow_p36
# language: python
# name: conda_tensorflow_p36
# ---
# # Traveling Salesman Problem with Reinforcement Learning
# ## Description of Problem
#
# The travelling salesman problem (TSP) is a classic algorithmic problem in the field of computer science and operations research. Given a list of cities and the distances between each pair of cities, the problem is to find the shortest possible route that visits each city and returns to the origin city.
#
# The problem is NP-complete as the number of combinations of cities grows larger as we add more cities.
#
# In the classic version of the problem, the salesman picks a city to start, travels through remaining cities and returns to the original city.
#
# In this version, we have slightly modified the problem, presenting it as a restaurant order delivery problem on a 2D gridworld. The agent (driver) starts at the restaurant, a fixed point on the grid. Then, delivery orders appear elsewhere on the grid. The driver needs to visit the orders, and return to the restaurant, to obtain rewards. Rewards are proportional to the time taken to do this (equivalent to the distance, as each timestep moves one square on the grid).
# ## Why Reinforcement Learning?
#
# For canonical Operations problems like this one, we're very interested about RL's potential to push the state of the art.
#
# There are a few reasons we think RL offers some unique value for this type of problem:
# 1. RL seems to perform well in high-dimensional spaces, when an approximate solution to a complex problem may be of greater value than an exact/optimal solution to a simpler problem.
# 2. RL can do quite well in partially observed environments. When there are aspects of a problem we don't know about and therefore can't model, which is often the case in the real-world (and we can pretend is the case with these problems), RL's ability to deal with the messiness is valuable.
# 3. RL may have things to teach us! We've seen this to be the case with Go, and Dota 2, where the RL agent came up with innovative strategies that have later been adopted by human players. What if there are clever strategies we can use to solve versions of TSP, Knapsack, Newsvendor, or extensions of any of those? RL might surprise us.
# ## Easy Version of TSP
# In the Easy Version, we are on a 5x5 grid. All orders are generated at the start of the episode. Order locations are fixed, and are invariant (non-random) from episode to episode. The objective is to visit each order location, and return to the restaurant. We have a maximum time-limit of 50 steps.
#
# ### States
# At each time step, our agent is aware of the following information:
#
# 1. For the Restuarant:
# 1. Location (x,y coordinates)
#
# 2. For the Driver
# 1. Location (x,y coordinates)
# 2. Is driver at restaurant (yes/no)
#
# 3. For each Order:
# 1. Location (x,y coordinates)
# 2. Status (Delivered or Not Delivered)
# 3. Time (Time taken to deliver reach order -- incrementing until delivered)
# 4. Miscellaneous
# 1. Time since start of episode
# 2. Time remaining until end of episode (i.e. until max time)
#
# ### Actions
# At each time step, our agent can take the following steps:
# - Up - Move one step up in the map
# - Down - Move one step down in the map
# - Right - Move one step right in the map
# - Left - Move one step left in the map
#
# ### Rewards
# Agent gets a reward of -1 for each time step. If an order is delivered in that timestep, it gets a positive reward inversely proportional to the time taken to deliver. If all the orders are delivered and the agent is back to the restaurant, it gets an additional reward inversely proportional to time since start of episode.
#
# ## Using AWS SageMaker for RL
#
# AWS SageMaker allows you to train your RL agents in cloud machines using docker containers. You do not have to worry about setting up your machines with the RL toolkits and deep learning frameworks. You can easily switch between many different machines setup for you, including powerful GPU machines that give a big speedup. You can also choose to use multiple machines in a cluster to further speedup training, often necessary for production level loads.
# ### Prerequisites
#
# #### Imports
#
# To get started, we'll import the Python libraries we need, set up the environment with a few prerequisites for permissions and configurations.
# +
import sagemaker
import boto3
import sys
import os
import glob
import re
import subprocess
from IPython.display import HTML
import time
from time import gmtime, strftime
sys.path.append("common")
from misc import get_execution_role, wait_for_s3_object
from sagemaker.rl import RLEstimator, RLToolkit, RLFramework
# -
# #### Settings
#
# You can run this notebook from your local host or from a SageMaker notebook instance. In both of these scenarios, you can run the following in either local or SageMaker modes. The local mode uses the SageMaker Python SDK to run your code in a local container before deploying to SageMaker. This can speed up iterative testing and debugging while using the same familiar Python SDK interface. You just need to set local_mode = True.
# +
# run in local mode?
local_mode = False
env_type = "tsp-easy"
# create unique job name
job_name_prefix = "rl-" + env_type
# S3 bucket
sage_session = sagemaker.session.Session()
s3_bucket = sage_session.default_bucket()
s3_output_path = "s3://{}/".format(s3_bucket)
print("S3 bucket path: {}".format(s3_output_path))
# -
# #### Install docker for `local` mode
#
# In order to work in `local` mode, you need to have docker installed. When running from you local machine, please make sure that you have docker or docker-compose (for local CPU machines) and nvidia-docker (for local GPU machines) installed. Alternatively, when running from a SageMaker notebook instance, you can simply run the following script to install dependenceis.
#
# Note, you can only run a single local notebook at one time.
# only run from SageMaker notebook instance
if local_mode:
# !/bin/bash common/setup.sh
# #### Create an IAM role
# Either get the execution role when running from a SageMaker notebook `role = sagemaker.get_execution_role()` or, when running from local machine, use utils method `role = get_execution_role('role_name')` to create an execution role.
# +
try:
role = sagemaker.get_execution_role()
except:
role = get_execution_role()
print("Using IAM role arn: {}".format(role))
# -
# #### Setup the environment
#
# The environment is defined in a Python file called โTSP_env.pyโ and the file is uploaded on /src directory.
#
# The environment also implements the init(), step(), reset() and render() functions that describe how the environment behaves. This is consistent with Open AI Gym interfaces for defining an environment.
#
#
# 1. Init() - initialize the environment in a pre-defined state
# 2. Step() - take an action on the environment
# 3. reset()- restart the environment on a new episode
# 4. render() - get a rendered image of the environment in its current state
# #### Configure the presets for RL algorithm
#
# The presets that configure the RL training jobs are defined in the โpreset-tsp-easy.pyโ file which is also uploaded on the /src directory. Using the preset file, you can define agent parameters to select the specific agent algorithm. You can also set the environment parameters, define the schedule and visualization parameters, and define the graph manager. The schedule presets will define the number of heat up steps, periodic evaluation steps, training steps between evaluations.
#
# These can be overridden at runtime by specifying the RLCOACH_PRESET hyperparameter. Additionally, it can be used to define custom hyperparameters.
# !pygmentize src/preset-tsp-easy.py
# #### Write the Training Code
#
# The training code is written in the file โtrain-coach.pyโ which is uploaded in the /src directory.
# First import the environment files and the preset files, and then define the main() function.
# !pygmentize src/train-coach.py
# ### Train the RL model using the Python SDK Script mode
#
# If you are using local mode, the training will run on the notebook instance. When using SageMaker for training, you can select a GPU or CPU instance. The RLEstimator is used for training RL jobs.
#
# 1. Specify the source directory where the environment, presets and training code is uploaded.
# 2. Specify the entry point as the training code
# 3. Specify the choice of RL toolkit and framework. This automatically resolves to the ECR path for the RL Container.
# 4. Define the training parameters such as the instance count, job name, S3 path for output and job name.
# 5. Specify the hyperparameters for the RL agent algorithm. The RLCOACH_PRESET or the RLRAY_PRESET can be used to specify the RL agent algorithm you want to use.
# 6. Define the metrics definitions that you are interested in capturing in your logs. These can also be visualized in CloudWatch and SageMaker Notebooks.
# +
# %%time
if local_mode:
instance_type = "local"
else:
instance_type = "ml.m4.4xlarge"
estimator = RLEstimator(
entry_point="train-coach.py",
source_dir="src",
dependencies=["common/sagemaker_rl"],
toolkit=RLToolkit.COACH,
toolkit_version="1.0.0",
framework=RLFramework.TENSORFLOW,
role=role,
instance_type=instance_type,
instance_count=1,
output_path=s3_output_path,
base_job_name=job_name_prefix,
hyperparameters={
# expected run time 12 mins for TSP Easy
"RLCOACH_PRESET": "preset-"
+ env_type,
},
)
estimator.fit(wait=local_mode)
# -
# ### Store intermediate training output and model checkpoints
#
# The output from the training job above is stored on S3. The intermediate folder contains gifs and metadata of the training.
# +
job_name = estimator._current_job_name
print("Job name: {}".format(job_name))
s3_url = "s3://{}/{}".format(s3_bucket, job_name)
if local_mode:
output_tar_key = "{}/output.tar.gz".format(job_name)
else:
output_tar_key = "{}/output/output.tar.gz".format(job_name)
intermediate_folder_key = "{}/output/intermediate".format(job_name)
output_url = "s3://{}/{}".format(s3_bucket, output_tar_key)
intermediate_url = "s3://{}/{}".format(s3_bucket, intermediate_folder_key)
print("S3 job path: {}".format(s3_url))
print("Output.tar.gz location: {}".format(output_url))
print("Intermediate folder path: {}".format(intermediate_url))
tmp_dir = "/tmp/{}".format(job_name)
os.system("mkdir {}".format(tmp_dir))
print("Create local folder {}".format(tmp_dir))
# -
# ### Visualization
# #### Comparing against a baseline policy
# !pip install gym
# !pip install pygame
os.chdir("src")
# +
# Get baseline reward
from TSP_env import TSPEasyEnv
from TSP_baseline import get_mean_baseline_reward
baseline_mean, baseline_std_dev = get_mean_baseline_reward(env=TSPEasyEnv(), num_of_episodes=1)
print(baseline_mean, baseline_std_dev)
# -
os.chdir("../")
# #### Plot metrics for training job
# We can pull the reward metric of the training and plot it to see the performance of the model over time.
# +
import pandas as pd
import matplotlib
# %matplotlib inline
# csv_file has all the RL training metrics
# csv_file = "{}/worker_0.simple_rl_graph.main_level.main_level.agent_0.csv".format(tmp_dir)
csv_file_name = "worker_0.simple_rl_graph.main_level.main_level.agent_0.csv"
key = intermediate_folder_key + "/" + csv_file_name
wait_for_s3_object(s3_bucket, key, tmp_dir)
csv_file = "{}/{}".format(tmp_dir, csv_file_name)
df = pd.read_csv(csv_file)
x_axis = "Episode #"
y_axis_rl = "Training Reward"
y_axis_base = "Baseline Reward"
df[y_axis_rl] = df[y_axis_rl].rolling(5).mean()
df[y_axis_base] = baseline_mean
y_axes = [y_axis_rl]
ax = df.plot(
x=x_axis,
y=[y_axis_rl, y_axis_base],
figsize=(18, 6),
fontsize=18,
legend=True,
color=["b", "r"],
)
fig = ax.get_figure()
ax.set_xlabel(x_axis, fontsize=20)
# ax.set_ylabel(y_axis,fontsize=20)
# fig.savefig('training_reward_vs_wall_clock_time.pdf')
# -
# #### Visualize the rendered gifs
# The latest gif file found in the gifs directory is displayed. You can replace the tmp.gif file below to visualize other files generated.
# +
key = intermediate_folder_key + "/gifs"
wait_for_s3_object(s3_bucket, key, tmp_dir)
print("Copied gifs files to {}".format(tmp_dir))
glob_pattern = os.path.join("{}/*.gif".format(tmp_dir))
gifs = [file for file in glob.iglob(glob_pattern, recursive=True)]
extract_episode = lambda string: int(
re.search(".*episode-(\d*)_.*", string, re.IGNORECASE).group(1)
)
gifs.sort(key=extract_episode)
print("GIFs found:\n{}".format("\n".join([os.path.basename(gif) for gif in gifs])))
# visualize a specific episode
gif_index = -1 # since we want last gif
gif_filepath = gifs[gif_index]
gif_filename = os.path.basename(gif_filepath)
print("Selected GIF: {}".format(gif_filename))
os.system(
"mkdir -p ./src/tmp_render/ && cp {} ./src/tmp_render/{}.gif".format(gif_filepath, gif_filename)
)
HTML('<img src="./src/tmp_render/{}.gif">'.format(gif_filename))
# -
# ### Evaluation of RL models
#
# We use the last checkpointed model to run evaluation for the RL Agent.
#
# #### Load checkpointed model
#
# Checkpointed data from the previously trained models will be passed on for evaluation / inference in the checkpoint channel. In local mode, we can simply use the local directory, whereas in the SageMaker mode, it needs to be moved to S3 first.
# +
# %%time
wait_for_s3_object(s3_bucket, output_tar_key, tmp_dir)
if not os.path.isfile("{}/output.tar.gz".format(tmp_dir)):
raise FileNotFoundError("File output.tar.gz not found")
os.system("tar -xvzf {}/output.tar.gz -C {}".format(tmp_dir, tmp_dir))
if local_mode:
checkpoint_dir = "{}/data/checkpoint".format(tmp_dir)
else:
checkpoint_dir = "{}/checkpoint".format(tmp_dir)
print("Checkpoint directory {}".format(checkpoint_dir))
# +
# %%time
if local_mode:
checkpoint_path = "file://{}".format(checkpoint_dir)
print("Local checkpoint file path: {}".format(checkpoint_path))
else:
checkpoint_path = "s3://{}/{}/checkpoint/".format(s3_bucket, job_name)
if not os.listdir(checkpoint_dir):
raise FileNotFoundError("Checkpoint files not found under the path")
os.system("aws s3 cp --recursive {} {}".format(checkpoint_dir, checkpoint_path))
print("S3 checkpoint file path: {}".format(checkpoint_path))
# -
# #### Run the evaluation step
#
# Use the checkpointed model to run the evaluation step.
# +
estimator_eval = RLEstimator(
role=role,
source_dir="src/",
dependencies=["common/sagemaker_rl"],
toolkit=RLToolkit.COACH,
toolkit_version="1.0.0",
framework=RLFramework.TENSORFLOW,
entry_point="evaluate-coach.py",
instance_count=1,
instance_type=instance_type,
base_job_name=job_name_prefix + "-evaluation",
hyperparameters={
"RLCOACH_PRESET": "preset-tsp-easy",
"evaluate_steps": 200, # max 4 episodes
},
)
estimator_eval.fit({"checkpoint": checkpoint_path})
# -
# ## Medium version of TSP <a name="TSP-Medium"></a>
# We make the problem much harder in this version by randomizing the location of destiations each episode. Hence, RL agent has to come up with a general strategy to navigate the grid. Parameters, states, actions, and rewards are identical to the Easy version of TSP.
#
# ### States
# At each time step, our agent is aware of the following information:
#
# 1. For the Restuarant:
# 1. Location (x,y coordinates)
#
# 2. For the Driver
# 1. Location (x,y coordinates)
# 2. Is driver at restaurant (yes/no)
#
# 3. For each Order:
# 1. Location (x,y coordinates)
# 2. Status (Delivered or Not Delivered)
# 3. Time (Time taken to deliver reach order -- incrementing until delivered)
# 4. Miscellaneous
# 1. Time since start of episode
# 2. Time remaining until end of episode (i.e. until max time)
#
#
# ### Actions
# At each time step, our agent can take the following steps:
# - Up - Move one step up in the map
# - Down - Move one step down in the map
# - Right - Move one step right in the map
# - Left - Move one step left in the map
#
# ### Rewards
# Agent gets a reward of -1 for each time step. If an order is delivered in that timestep, it gets a positive reward inversely proportional to the time taken to deliver. If all the orders are delivered and the agent is back to the restaurant, it gets an additional reward inversely proportional to time since start of episode.
# ## Using AWS SageMaker for RL <a name="SM-TSP-Medium"></a>
#
# ### Train the model using Python SDK/Script mode
#
# Skipping through the basic setup, assuming you did that already for the easy version. For good results, we suggest you train for at least 1,000,000 steps. You can edit this either as a hyperparameter in the cell or directly change the preset file.
# +
# %%time
# run in local mode?
local_mode = False
# create unique job name
job_name_prefix = "rl-tsp-medium"
# +
# %%time
if local_mode:
instance_type = "local"
else:
instance_type = "ml.m4.4xlarge"
estimator = RLEstimator(
entry_point="train-coach.py",
source_dir="src",
dependencies=["common/sagemaker_rl"],
toolkit=RLToolkit.COACH,
toolkit_version="1.0.0",
framework=RLFramework.TENSORFLOW,
role=role,
instance_type=instance_type,
instance_count=1,
output_path=s3_output_path,
base_job_name=job_name_prefix,
hyperparameters={
"RLCOACH_PRESET": "preset-tsp-medium",
},
)
estimator.fit(wait=local_mode)
# -
# ## Visualize, Compare with Baseline and Evaluate
#
# You can follow the same set of code used for TSP easy version.
# ## Vehicle Routing Problem with Reinforcement Learning <a name="VRP-Easy"></a>
#
# Vehicle Routing Problem (VRP) is a similar problem where the algorithm optimizes the movement of a fleet of vehicles. Our VRP formulation is a bit different, we have a delivery driver who accepts orders from customers, picks up food from a restaurant and delivers it to the customer. The driver optimizes to increase the number of successful deliveries within a time limit.
#
# Key differences from TSP:
#
# - Pathing is now automatic. Instead of choosing "left, right, up, down", now you just select your destination as your action. The environment will get you there in the fewest steps possible.
# - Since the routing/pathing is now taken care of, we add in complexity elsewhere...
# - There can be more than one restaurant, each with a different type of order (e.g. Pizzas vs. Burritos). Each order will have a different type, and you have to visit the correct restuarant to pick up an order before dropping it off.
# - Drivers have a limited capacity; they cannot pick up an infinite number of orders. Instead, they can only have (e.g. 5) orders in the car at any given time. This means they will have to return to the restaurant(s) in between deliveries to pick up more supply.
# - Orders now come in dynamically over time, rather than all being known at time zero. Each time step, there is some probability that an order will be generated.
# - As the driver/agent, we now have the choice to fulfill an order or not -- there's no penalty associated with not accepting an order, but a potential penalty if we accept an order and fail to deliver it before Timeout.
#
# ### States
# At each time step, our agent is aware of the following information:
#
# 1. For each Restuarant:
# 1. Location (x,y coordinates)
#
# 1. For the Driver
# 1. Location (x,y coordinates)
# 2. Capacity (maximum # of orders you can carry, at one time, on the driver)
# 3. Used Capacity (# of orders you currently carry on the driver)
#
# 1. For each Order:
# 1. Location (x,y coordinates)
# 2. Status (Accepted, Not Accepted, Delivered, Not Delivered)
# 3. Type (Which restuarant the order belongs to, like Pizza, or Burrito)
# 4. Time (Time since order was generated)
# 5. Promise (If you deliver the order by this time, you get a bonus reward)
# 6. Timeout (If you deliver the order after this time, you get a penalty)
#
# ### Actions
# At each time step, our agent can do ONE of the following:
# - Choose a restaurant to visit (incremental step L,R,U,D, will be auto-pathed)
# - Choose an order to visit (incremental step L,R,U,D, will be auto-pathed)
# - Accept an order (no movement will occur)
# - Do nothing
#
# ### Rewards
# - Driver gets a reward of -1 for each time step.
# - If driver delivers order, get a reward proportional to the time taken to deliver (extra bonus for beating Promise time)
# - If order expires (reaches Timeout), get a penalty
# ## Using AWS SageMaker RL <a name="SM-VRP-Easy"></a>
#
# ### Train the model using Python SDK/Script mode
#
# Skipping through the basic setup, assuming you did that already for the easy version. For good results, we suggest a minimum of 5,000,000 steps of training.
# +
# %%time
# run in local mode?
local_mode = False
# create unique job name
job_name_prefix = "rl-vrp-easy"
# +
# %%time
if local_mode:
instance_type = "local"
else:
instance_type = "ml.m4.4xlarge"
estimator = RLEstimator(
entry_point="train-coach.py",
source_dir="src",
dependencies=["common/sagemaker_rl"],
toolkit=RLToolkit.COACH,
toolkit_version="1.0.0",
framework=RLFramework.TENSORFLOW,
role=role,
instance_type=instance_type,
instance_count=1,
output_path=s3_output_path,
base_job_name=job_name_prefix,
hyperparameters={
"RLCOACH_PRESET": "preset-vrp-easy",
},
)
estimator.fit(wait=local_mode)
# -
# ## Visualize, Compare with Baseline and Evaluate
#
# You can follow the same set of code used for TSP easy version.
| reinforcement_learning/rl_traveling_salesman_vehicle_routing_coach/rl_traveling_salesman_vehicle_routing_coach.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import Imputer
from sklearn.pipeline import FeatureUnion
from datetime import datetime
import gc
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import os
train = pd.read_csv('train.csv', encoding = "ISO-8859-1")
test_x = pd.read_csv('test.csv', encoding = "ISO-8859-1")
test_y = pd.read_csv('test_salaries.csv')
train.info()
obj_cols = train.select_dtypes('object')
obj_cols.head()
obj_cols = train.select_dtypes('object')
obj_cols.head()
train.Born.head()
for c in obj_cols.columns:
print('Obj Col: ', c, ' Number of Unqiue Values ->', len(obj_cols[c].value_counts()))
fig, ax=plt.subplots(1,2,figsize=(18,10))
obj_cols['Cntry'].value_counts().sort_values().plot(kind='barh',ax=ax[0])
ax[0].set_title("Counts of Hockey Players by Country");
obj_cols['Cntry'].value_counts().plot(kind='pie', autopct='%.2f', shadow=True,ax=ax[1]);
ax[1].set_title("Distribution of Hockey Players by Country");
fig, ax=plt.subplots(1,1,figsize=(12,8))
obj_cols['Team'].value_counts().plot(kind='bar',ax=ax);
plt.title('Counts of Team Values');
sal_gtmil = train[train.Salary >= 1e7]
sal_gtmil.head(10)
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
def data_clean(x):
## Were going to change Born to date time
x['Born'] = pd.to_datetime(x.Born, yearfirst=True)
x['dowBorn'] = x.Born.dt.dayofweek
x["doyBorn"] = x.Born.dt.dayofyear
x["monBorn"] = x.Born.dt.month
x['yrBorn'] = x.Born.dt.year
## Drop Pr/St due to NaNs from other countries and First Name
x.drop(['Pr/St','First Name'], axis=1, inplace=True)
ocols = ['City', 'Cntry', 'Nat', 'Last Name', 'Position', 'Team']
for oc in ocols:
temp = pd.get_dummies(x[oc])
x = x.join(temp, rsuffix=str('_'+oc))
x['Hand'] = pd.factorize(x.Hand)[0]
x.drop(ocols, axis=1, inplace=True)
x.drop(['Born'],axis=1,inplace=True)
return x
try:
del train, x0, xc, test
except:
pass
train = pd.read_csv('train.csv', encoding = "ISO-8859-1")
train.head()
test_x = pd.read_csv('test.csv', encoding = "ISO-8859-1")
##test_y = pd.read_csv('test_salaries.csv')
test_x.head()
full = train.merge(test_x, how='outer')
#print(train.shape, test.shape, full.shape)
print(train.shape, test_x.shape, full.shape)
y = np.log(full.Salary.dropna())
full0 = full.drop(['Salary'],axis=1)
fig, ax=plt.subplots(1,1,figsize=(10,6))
y.plot(ax=ax);
plt.title("Ln Salary");
obj_cols.columns
full_c = data_clean(full0)
print(full0.shape, full_c.shape)
full_c.head()
ss = StandardScaler()
full_cs = ss.fit_transform(full_c)
train_c = full_cs[:612]
test_c = full_cs[612:]
print(train_c.shape, y.shape, test_c.shape)
type(y)
# +
folds = 3
lgbm_params = {
"max_depth": -1,
"num_leaves": 1000,
"learning_rate": 0.01,
"n_estimators": 1000,
"objective":'regression',
'min_data_in_leaf':64,
'feature_fraction': 0.8,
'colsample_bytree':0.8,
"metric":['mae','mse'],
"boosting_type": "gbdt",
"n_jobs": -1,
"reg_lambda": 0.9,
"random_state": 123
}
preds = 0
for f in range(folds):
xt, xv, yt, yv = train_test_split(train_c, y.values, test_size=0.2, random_state=((f+1)*123))
xtd = lgb.Dataset(xt, label=yt)
xvd = lgb.Dataset(xv, label=yv)
mod = lgb.train(params=lgbm_params, train_set=xtd,
num_boost_round=1000, valid_sets=xvd, valid_names=['valset'],
early_stopping_rounds=20, verbose_eval=20)
preds += mod.predict(test_c)
preds = preds/folds
# -
acts = pd.read_csv('test_salaries.csv', encoding="ISO-8859-1")
acts['preds'] = np.exp(preds)
acts.head()
import matplotlib
from sklearn.metrics import mean_absolute_error, mean_squared_error
fig, ax=plt.subplots(1,1,figsize=(12,8))
acts.plot(ax=ax, style=['b-','r-']);
plt.title("Comparison of Preds and Actuals");
plt.ylabel('$');
ax.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
plt.tight_layout()
mse = mean_squared_error(np.log(acts.Salary), np.log(acts.preds))
mae = mean_absolute_error(np.log(acts.Salary), np.log(acts.preds))
print("Ln Level Mean Squared Error :", mse)
print("Ln Level Mean Absolute Error :", mae)
fi_df = pd.DataFrame( 100*mod.feature_importance()/mod.feature_importance().max(),
index=full_c.columns, #mod.feature_name(),
columns =['importance'])
fig, ax=plt.subplots(1,1,figsize=(12,8))
fi_df.sort_values(by='importance',ascending=True).iloc[-20:].plot(kind='barh', color='C0', ax=ax)
plt.title("Normalized Feature Importances");
import statsmodels.api as sma
# +
top10 = fi_df.sort_values(by='importance',ascending=True).iloc[-10:].index
top10
exog = pd.DataFrame(test_c, columns=full_c.columns)[list(top10)].fillna(0)
# -
ols = sma.OLS(exog=exog, endog=acts.Salary)
ols_fit = ols.fit()
print(ols_fit.summary())
# +
import matplotlib.pyplot as plt
y_pos = np.arange(len(w_results))
weight_variant_names = ["{ 'xgb': 0.33, 'rf': 0.33, 'svm' : 0.34}",
"{ 'xgb': 0.9, 'rf': 0.05, 'svm' : 0.05}",
"{ 'xgb': 0.8, 'rf': 0.1, 'svm' : 0.1}",
"{ 'xgb': 0.5, 'rf': 0.3, 'svm' : 0.2}",
"{ 'xgb': 0.3, 'rf': 0.2, 'svm' : 0.5}",
"{ 'xgb': 0.3, 'rf': 0.5, 'svm' : 0.2}"]
plt.bar(y_pos, w_results, align='center', alpha=0.5)
plt.xticks(y_pos, weight_variant_names, rotation=90)
plt.ylabel('rmse')
plt.ylim(1300000,1450000)
plt.title('RMSE of different ensemble model weights')
plt.show()
# -
ols_preds = ols_fit.predict()
fig, ax=plt.subplots(1,1,figsize=(12,8))
acts.Salary.plot(ax=ax, color='C1');
ax.plot(ols_preds, color='C0');
plt.title("Comparison of StatsModels Preds and Actuals");
plt.ylabel('$');
plt.legend(['salary actual','ols preds']);
ax.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
plt.tight_layout()
| Salary Prediction for NHL Players.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:pyprobml]
# language: python
# name: conda-env-pyprobml-py
# ---
# # Discrete Probability Distribution Plot
# +
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import seaborn as sns
try:
from probml_utils import savefig, latexify
except ModuleNotFoundError:
# %pip install -qq git+https://github.com/probml/probml-utils.git
from probml_utils import savefig, latexify
# + tags=["hide-input"]
latexify(width_scale_factor=2)
# +
# Bar graphs showing a uniform discrete distribution and another with full mass on one value.
N = 4
def make_graph(probs, N, save_name, fig=None, ax=None):
x = jnp.arange(1, N + 1)
if fig is None:
fig, ax = plt.subplots()
ax.bar(x, probs, align="center")
ax.set_xlim([min(x) - 0.5, max(x) + 0.5])
ax.set_xticks(x)
ax.set_yticks(jnp.linspace(0, 1, N + 1))
ax.set_xlabel("$x$")
ax.set_ylabel("$Pr(X=x)$")
sns.despine()
if len(save_name) > 0:
savefig(save_name)
return fig, ax
uniform_probs = jnp.repeat(1.0 / N, N)
_, _ = make_graph(
uniform_probs, N, "uniform_histogram_latexified"
) # Do not add .pdf or .png as it is automatically added by savefig method
delta_probs = jnp.array([1, 0, 0, 0])
_, _ = make_graph(delta_probs, N, "delta_histogram_latexified");
# -
# ## Demo
#
# You can see different examples of discrete distributions by changing the seed in the following demo.
# +
from ipywidgets import interact
@interact(random_state=(1, 10), N=(2, 10))
def generate_random(random_state, N):
key = jax.random.PRNGKey(random_state)
probs = jax.random.uniform(key, shape=(N,))
probs = probs / jnp.sum(probs)
fig, ax = make_graph(probs, N, "")
ax.set_yticks(jnp.linspace(0, 1, 11))
| notebooks/book1/02/discrete_prob_dist_plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercise Set 8: Introduction to Web Scraping
#
# *Afternoon, August 16, 2018*
#
# In this Exercise Set we shall practice our webscraping skills utiilizing only basic python. We shall cover variations between static and dynamic pages.
# ## Exercise Section 8.1: Scraping Jobnet.dk
#
# This exercise you get to practice locating the request that the JavaScript sends to get the job data that it builds the joblistings from. You should use the **>Network Monitor<** tool in your browser.
#
# Furthermore you practice spotting how the pagination is done, without clicking on the next page button, but instead changing a small parameter in the URL.
# > **Ex. 8.1.1:** Hit the joblisting webpage here: https://job.jobnet.dk/CV and locate the request that gets the joblisting data using the a tool called the **>Network Monitor<**. To do this open monitor tool and press the search button on the website. Go to the _network_ pane in the monitor and look at the results. Which two _methods_ does your browser use to communicate with the webserver? What does status code 200 mean?
# >
# >> _Hint:_ The network monitor is launched by pressing ctrl+shift+i in most browsers. Filter by XHR files.
# +
# [Answer to Ex. 8.1.1 here]
# -
# > **Ex. 8.1.2.:** Use the `request` module to collect the first page of job postings and unpack the relevant `json` data into a `pandas` DataFrame.
# +
# [Answer to Ex. 8.1.2 here]
# -
# > **Ex. 8.1.3.:** Store and print the 'TotalResultCount' value for later use. Also create a dataframe from the 'JobPositionPostings' field in the json.
# +
# [Answer to Ex. 8.1.3 here]
# -
# > **Ex. 8.1.4:** This exercise is about paging the results. We need to understand the websites pagination scheme. Scroll down the webpage and press the next page button. Describe how the parameters of the url changes as you turn the pages.
#
# +
# [Answer to Ex. 8.1.4 here]
# -
# > **Ex. 8.1.5:** Design a`for` loop using the `range` function that changes this paging parameter in the URL. Use the TotalResultCount parameter from before to define the limits of the range function. Store these urls in a container.
# >
# >**Bonus** Change the SortValue parameter from BestMatch to CreationDate, to make the sorting amendable to updating results daily.
# >
# >> _Hint:_ See that the parameter is an offset and that this relates to the number of results pr. call made.
# +
# [Answer to Ex. 8.1.5 here]
# -
# > **Ex. 8.1.6:** Pick 20 random links using the `choice` function in the `random` module; collect the links using the `request` module. Also use the `time.sleep()` function to limit the rate of your calls. Make sure to save the links already collected in a `set()` container to avoid having to reload links already collected. ***extra***: monitor the time left to completing the loop by using `tqdm.tqdm()` function.
# +
# [Answer to Ex. 8.1.6 here]
# -
# > **Ex. 8.1.7:** Load all the results into a DataFrame.
# +
# [Answer to Ex. 8.1.7 here]
# -
# ## Exercise Section 8.2: Scraping Trustpilot.com
# Now for a slightly more elaborate, yet still simple scraping problem. Here we want to scrape trustpilot for user reviews. This data is very nice since it provides free labeled data (rating) to train a machine learning model to understand positive and negative sentiment.
#
# Here you will practice crawling a website collecting the links to each company review page, and finally locate another behind the scenes JavaScript request that gets the review data in a neat json format.
# > **Ex. 8.2.1:** Visit the https://www.trustpilot.com/ website and locate the categories page. From this page you find links to company listings. Get the category page using the `requests` module and extract each link to a specific category page from the HTML. This can be done using the basic python `.split()` string method. Make sure only links within the ***/categories/*** section are kept, checking each string using the ```if 'pattern' in string``` condition.
# >
# >> *Hint:* The links are relative. You need to add the domain name
# >>
# >> *Hint #2:* You will need to convert the page response to a string, using the `.text` property.
#
# +
# [Answer to Ex. 8.2.1]
# -
# > **Ex. 8.2.2:** Get one of the category section links (any one will do). Write a function to extract the links to the company review pages from the HTML.
#
# +
# [Answer to Ex. 8.2.2]
# -
# > **Ex. 8.2.3:** Figure out how the pagination is done, by following how the url changes when pressing the **next page**-button to obtain more company listings. Write a function that builds links to paging all the company listing results of each category. This includes parsing the number of subpages of each category and changing the correct parameter in the url.
# >
# >> *Hint:* Find the maximum number of result pages, right before the next page button and make a loop change the page parameter of the url.
#
# +
# [Answer to Ex.8.2.3]
# -
# > **Ex. 8.2.4:** Loop through all categories and build the paging links using the above defined function.
# +
# [Answer to Ex.8.2.4]
# -
# > **Ex. 8.2.5:** Randomly pick one of category listing links you have generated, and get the links to the companies listed using the other function defined.
#
# +
# [Answer to Ex.8.2.5]
# -
# > **Ex. 8.2.6:** Visit one of these links and inspect the **>Network Monitor<** to locate the request that loads the review data. Use the requests module to retrieve this link and unpack the json results to a pandas DataFrame.
# >
# >> _Hint:_ Look for a request which sends a the link to a file called `jsonId`
# +
#[Answer to Ex.8.2.6]
# -
# Congratulations on coming this far. By now you are almost - still need to figure out how to page the reviews and to find the company ID in the html -, ready to deploy a scraper collecting all reviews on trustpilot.
# If you wanna see just how valuable such data could be visit the follow blogpost: https://blog.openai.com/unsupervised-sentiment-neuron/
| material/session_8/exercise_8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
from datetime import date
from datetime import timedelta
# %run Data.ipynb
# +
#Stock Information
# +
#Note the ticker_info that we will be mentioning here is located in the Data.ipynb file
#Pulls in the symbol from our ticker_info
stock_ticker = ticker_info["symbol"]
#Pulls in the short name for the company from our ticker_info
stock_name = ticker_info["shortName"]
#Pulls in the sector from our ticker_info
stock_sector = ticker_info["sector"]
#Pulls in the stock industry from our ticker_info
stock_industry = ticker_info["industry"]
#Creates a dictionary containing our aforementioned variables
stock_information_dic = {"Ticker" : stock_ticker, "Name" : stock_name, "Sector" : stock_sector, "Industry" : stock_industry}
#Creates a DataFrame from the previously mentioned dictionary with no index (for aestethics)
stock_information_df = pd.DataFrame(data=stock_information_dic, index=[""])
#rotates the Axis of our DataFrame
stock_information_df = stock_information_df.T
display(stock_information_df)
# +
#Stock Description
# +
#Note the ticker_info that we will be mentioning here is located in the Data.ipynb file
#Pulls in the Long Business Summary from our ticker_info
stock_description = ticker_info["longBusinessSummary"]
#Creates a dictionary containing our stock_description
stock_description_dic= {"" : stock_description}
#Creates a DataFrame from the previously mentioned dictionary with no index (for aestethics)
stock_description_df = pd.DataFrame(data=stock_description_dic, index=[""])
#Styles the weight, height and text centering for our DataFrame
stock_description_df = stock_description_df.style.set_properties(subset=[""], **{'width': '100x', "height": "300px"}, **{'text-align': 'center'})
display(stock_description_df)
# +
#Pricing Info
# +
#Note the ticker_info that we will be mentioning here is located in the Data.ipynb file
#Pulls in the Current Stock Price from the ticker_info DataFrame and rounds it
stock_curr_price = round((ticker_info["regularMarketPrice"]),2)
#Pulls in the Yesterday's Stock Price from the ticker_info DataFrame and rounds it
stock_ystdy_price = round((ticker_info["regularMarketPreviousClose"]),2)
#Pulls in the 50 Day Average Stock Price from the ticker_info DataFrame and rounds it
stock_50dyavg_price = round((ticker_info["fiftyDayAverage"]),2)
#Calculates the value difference since todays and yesterdays price and rounds it
change_since_ystdy = round((stock_curr_price - stock_ystdy_price),2)
#Calculates the % difference since todays and yesterdays price and rounds it
pct_change_since_ystdy = round(((change_since_ystdy / stock_ystdy_price) * 100),2)
#creates a Dictionary containing all the information we generated or calculated in our previous steps
stock_price_dic = {"Current Price ($)" : stock_curr_price, "Yesteday Closing Price ($)" : stock_ystdy_price, "Fifty Day Average Price ($)" : stock_50dyavg_price, "Change since Yesterday ($)" : change_since_ystdy, "Change since Yesterday (%)": pct_change_since_ystdy}
#creates a DataFrame using our dictionary and desired columns and no index (for aesthethics)
stock_price_df = pd.DataFrame(data=stock_price_dic, index=[""])
#rotates the Axis of our DataFrame
stock_price_df = stock_price_df.T
display(stock_price_df)
# +
#Analyst Info
# +
#Note the ticker_info that we will be mentioning here is located in the Data.ipynb file
#pulls in the Number of Analysts Ratings/Opinions from the ticker_info Dataframe
number_of_analysts = ticker_info["numberOfAnalystOpinions"]
#pulls in the Target Median Price from the ticker_info Dataframe
target_median_price = round(ticker_info["targetMedianPrice"],2)
#pulls in the Target Mean Price from the ticker_info Dataframe
target_mean_price = round(ticker_info["targetMeanPrice"],2)
#Calculates the %difference between the target median price and the current price and rounds it
pct_diff_target_median = round((((target_median_price - stock_curr_price) / target_median_price) * 100), 2)
#Calculates the %difference between the target mean price and the current price and rounds it
pct_diff_target_mean = round((((target_mean_price - stock_curr_price) / target_mean_price) * 100), 2)
#creates a Dictionary containing all the information we generated or calculated in our previous steps
stock_analysts_dic = {"# of Analyst Forecasts" : number_of_analysts, "Median Target Price" : target_median_price, "% Difference Current Price to Median Target": pct_diff_target_median, "Mean Target Price" : target_mean_price, "% Difference Current Price to Mean Target": pct_diff_target_mean,}
#creates a DataFrame using our dictionary and desired columns and no index (for aesthethics)
stock_analysts_df = pd.DataFrame(data=stock_analysts_dic, index=[""])
#rotates the Axis of our DataFrame
stock_analysts_df = stock_analysts_df.T
display(stock_analysts_df)
# +
#Stock Evolution
# +
#Note the ticker_hists (for all time periods) that we will be mentioning here is located in the Data.ipynb file
#1 Week Numbers and calculations
#Pulls the 1Wk Closing Stock Price from ticker_hist_1wk_df and rounds it
stock_1wk_price = round(ticker_hist_1wk_df["Close"][0],2)
#calculates the % difference between current price and 1Wk Closing Price and rounds it
stock_1wk_change = round((((stock_curr_price - stock_1wk_price) / stock_1wk_price) * 100), 2)
#2 Week Numbers and calculations
#Pulls the 2Wk Closing Stock Price from ticker_hist_2wk_df and rounds it
stock_2wk_price = round(ticker_hist_2wk_df["Close"][0],2)
#calculates the % difference between current price and 2Wk Closing Price and rounds it
stock_2wk_change = round((((stock_curr_price - stock_2wk_price) / stock_2wk_price) * 100), 2)
#1 Year Numbers and calculations
#Pulls the 1Yr Closing Stock Price from ticker_hist_1yr_df and rounds it
stock_1yr_price = round(ticker_hist_1yr_df["Close"][0],2)
#calculates the % difference between current price and 1Yr Closing Price and rounds it
stock_1yr_change = round((((stock_curr_price - stock_1yr_price) / stock_1yr_price) * 100), 2)
#3 Year Numbers and Calculations
#Pulls the 3Yr Closing Stock Price from ticker_hist_3yr_df and rounds it
stock_3yr_price = round(ticker_hist_3yr_df["Close"][0],2)
#calculates the % difference between current price and 3Yr Closing Price and rounds it
stock_3yr_change = round((((stock_curr_price - stock_3yr_price) / stock_3yr_price) * 100), 2)
#Finds our calculates the dates for each timeperiod whic will then be used as labels when creating our DataFrame
stock_today_date = pd.Timestamp.today().date()
stock_yesterday_date = (stock_today_date - timedelta(days = 1))
stock_1wk_date = ticker_hist_1wk_df["Date"][0].date()
stock_2wk_date = ticker_hist_2wk_df["Date"][0].date()
stock_1yr_date = ticker_hist_1yr_df["Date"][0].date()
stock_3yr_date = ticker_hist_3yr_df["Date"][0].date()
#creates a Dictionary containing all the information we generated or calculated in our previous steps
stock_evolution_dic = {"Date" : [stock_today_date, stock_yesteday_date, stock_1wk_date, stock_2wk_date, stock_1yr_date, stock_3yr_date], "Price" : [stock_curr_price, stock_ystdy_price, stock_1wk_price, stock_2wk_price, stock_1yr_price, stock_3yr_price], "% Change" : ["-", change_since_ystdy, stock_1wk_change, stock_2wk_change, stock_1yr_change, stock_3yr_change]}
#stores our desired column names
columns = ["Actual", "Last Close", "1 Week Close", "2 Week Close", "1 Year Close", "3 Year Close"]
#creates a DataFrame using our dictionary and desired columns
stock_evolution_df = pd.DataFrame.from_dict(stock_evolution_dic, orient='index', dtype=None, columns = columns)
stock_evolution_df
# +
#Stock News
# +
#Note the ticker_news_df that we will be mentioning here is located in the Data.ipynb file
#pulls in a DataFrame of ticker_news_df (containing stock news) and saves it to a variable
stock_news_df = ticker_news_df
stock_news_df
# +
#Stock Points Calculator
#please note that the user can adjust the pointage and criteria to their own liking
#In this section we will rate the stock in six criteria, assigning for each a certain pointage. The criteria are an adaptation of William O'Neill's (How to Make Money in Stocks)
#The pointage is the following
#Size of the Company (20 points, the bigger the company the higher the points)
#Average Volume (20 points, the bigger the volume the higher the points)
#Potential Increase - Calculated by comparing current price vs target price (30 Points - the higher the potential increase the better)
#Net Income Growth - Last Quarter (10 Points - the higher the better)
#Net Income Growth - Last Year (10 Points - the higher the better)
#Institutional Ownership - (10 Points - the higher the better)
# +
#Note that some of the DataFrames that we will be mentioning here are located in the Data.ipynb file
#pulls the market cap from the ticker_info DataFrame
stock_market_cap = ticker_info["marketCap"]
#pulls in the average daily volume (10 days) from the ticker_info DataFrame
stock_avg_volume = ticker_info["averageVolume10days"]
#pct_diff_target_mean
#Calculates the % Earnings Growth since the last quarter and rounds it
stock_pct_qtr_earnings = round((((ticker_financials_qt_df.iloc[:,0]["Net Income Applicable To Common Shares"]) - (ticker_financials_qt_df.iloc[:,1]["Net Income Applicable To Common Shares"])) / (ticker_financials_qt_df.iloc[:,0]["Net Income Applicable To Common Shares"]) * 100), 2)
#Calculates the % Earnings Growth in the Last year and rounds it
stock_pct_yr_earnings = round((((ticker_financials_yr_df.iloc[:,0]["Net Income Applicable To Common Shares"]) - (ticker_financials_yr_df.iloc[:,1]["Net Income Applicable To Common Shares"])) / (ticker_financials_yr_df.iloc[:,0]["Net Income Applicable To Common Shares"]) * 100), 2)
#Calculates the percentage held by institutions
pct_held_institutions = round((ticker_info["heldPercentInstitutions"] * 100), 2)
points_mc = 0
if stock_market_cap < 1000000000:
points_mc = 0
elif stock_market_cap > 1000000000000:
points_mc = 5
elif stock_market_cap > 2500000000000:
points_mc = 10
elif stock_market_cap > 5000000000000:
points_mc = 15
elif:
print(points_mc)
# -
| Tests/Resources_nc_edits.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + deletable=true editable=true
# %matplotlib inline
# + [markdown] deletable=true editable=true
# # Probability and Statistics Review
#
# Based on "Machine Learning: A Probabilistic Perspective" by <NAME> (Chapter 2).
# + [markdown] deletable=true editable=true
# # What is probability?
#
# * At least two different interpretations:
# * **Frequentist**: probabilities are long-run frequencies of events
# * **Bayesian**: probabilities are used to quantify our **uncertainty**
#
# One advantage of the Bayesian interpretation is that it can be used to model events that do not have long-term frequencies.
#
# + [markdown] deletable=true editable=true
# # A brief review of probability theory
#
# ## Discrete random variables
#
# $p(A)$ denotes the probability that the event $A$ is true
#
# * $0 \leq p(A) \leq 1$
#
# We write $p(\bar{A})$ to denote the probability of the event not $A$
#
# * $p(\bar{A}) = 1 - p(A)$
#
# We can extend the notion of binary events by defining a **discrete random variable** $X$ which can take on any value from a finite or countably infinite set $\mathcal{X}$. We denote the probability of the event that $X = x$ by $p(X = x)$ or just $p(x)$ for short.
#
# * $0 \leq p(x) \leq 1$
# * $\sum_{x \in \mathcal{X}} p(x) = 1$
#
# Let's look at some discrete distributions:
# + deletable=true editable=true
import numpy as np
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 2)
ax[0].bar([1, 2, 3, 4],[0.25, 0.25, 0.25, 0.25], align='center')
ax[0].set_ylim([0, 1])
_ = ax[0].set_xticks([1, 2, 3, 4])
ax[0].set_title('Uniform distribution')
ax[1].bar([1, 2, 3, 4],[0, 1.0, 0, 0], align='center')
ax[1].set_ylim([0, 1])
_ = ax[1].set_xticks([1, 2, 3, 4])
ax[1].set_title('Degenerate distribution')
# + [markdown] deletable=true editable=true
# ## Fundamental rules
#
# ### Probability of a union of two events
#
# Given two events, $A$ and $B$, we define the probability of $A$ or $B$ as
#
# $$
# \begin{align}
# p(A \lor B) &= p(A) + p(B) - p(A \land B) \\
# &= p(A) + p(B) & \text{if $A$ and $B$ are mutually exclusive}
# \end{align}
# $$
#
# ### Joint probabilities
#
# We define the probability of the joint event $A$ and $B$ as
#
# $$
# p(A,B) = p(A \land B) = p(A|B)p(B)
# $$
#
# Given a **joint distribution** on two events p(A,B), we define the **marginal distribution** as
#
# $$
# p(A) = \sum_b p(A,B) = \sum_b p(A|B)p(B)
# $$
#
# ### Conditional probability
#
# We define the **conditional probability** of event $A$, given that event $B$ is true, as
#
# $$
# \begin{align}
# p(A|B) &= \frac{p(A,B)}{p(B)} & \text{if $p(B) > 0$}
# \end{align}
# $$
#
# ## Bayes' rule
#
# Manipulating the basic definition of conditional probability gives us one of the most important formulas in probability theory
#
# $$p(X=x|Y=y) = \frac{p(X=x,Y=y)}{P(Y=y)} = \frac{p(Y=y|X=x)p(X=x)}{\sum_{x'}p(Y=y|X=x')p(X=x')}$$
#
# ## Independence and conditional independence
#
# We say $X$ and $Y$ are **unconditionally independent** or **marginally independent**, denoted $X \perp Y$, if we can represent the joint as the product of the two marginals, i.e.,
#
# $$X \perp Y \Longleftrightarrow p(X,Y) = p(X)p(Y)$$
#
# <img width=400px src="images/pxyGrid.svg">
#
# In general, we say a **set** of variables is mutually independent if the joint can be written as a product of marginals.
#
# We say $X$ and $Y$ are **conditionally independent** given $Z$ iff the conditional joint can be written as a product of conditional marginals:
#
# $$X \perp Y|Z \Longleftrightarrow p(X,Y|Z)=p(X|Z)p(Y|Z)$$
#
# CI assumptions allow us to build large probabilistic models from small pieces.
#
# ## Continuous random variables
#
# Suppose $X$ is some uncertain continuous quantity. The probability that $X$ lies in any interval $a \leq X \leq b$ can be computed as follows. Define the events $A = (X \leq a), B = (X \leq b)$ and $W = (a < X \leq b)$. We have that $B = A \vee W$, and since $A$ and $W$ are mutually exclusive, the sum rule gives
#
# $$p(B) = p(A) + p(W)$$
#
# and hence
#
# $p(W) = p(B) - p(A)$
#
# Define the function $F(q) \triangleq p(X \leq q)$. This is called the **cumulative distribution function** or **cdf** of $X$. This is a monotonically non-decreasing function.
# + deletable=true editable=true
# CDF of Gaussian N(0,1)
import scipy.stats as stats
f = lambda x : stats.norm.cdf(x, 0, 1)
x = np.arange(-3, 3, 0.1)
y = f(x)
plt.plot(x, y, 'b')
plt.title('CDF')
# + [markdown] deletable=true editable=true
# Using the above notation, we have
# $$p(a < X \leq b) = F(b) - F(a)$$
#
# Now define $f(x) = \frac{d}{dx} F(x)$ (we assume this derivative exists); this is called a **probability density function** or **pdf**. Given a pdf, we can compute the probability of a continuous variable being in a finite interval as follows:
#
# $$P(a < X \leq b) = \int_a^b f(x) dx$$
# + deletable=true editable=true
# PDF of Gaussian N(0,1)
# shaded area has 0.05 of the mass
# also written mu +/- 2 \sigma
f = lambda x : stats.norm.pdf(x, 0, 1)
x = np.arange(-4, 4, 0.1)
y = f(x)
plt.plot(x, y, 'b')
l_x = np.arange(-4, -1.96, 0.01)
plt.fill_between(l_x, f(l_x))
u_x = np.arange(1.96, 4, 0.01)
plt.fill_between(u_x, f(u_x))
plt.title('PDF')
# + [markdown] deletable=true editable=true
# We require $p(x) \geq 0$, but it is possible for $p(x)>1$ for any given $x$, so long as the density integrates to 1.
# + deletable=true editable=true
# Example of p(x) > 1, Uniform distribution on (0, 0.5)
f = lambda x: stats.uniform.pdf(x, 0, 0.5)
x = np.arange(-0.5, 1, 0.01)
y = f(x)
plt.plot(x, y, 'b')
plt.title('Uniform PDF')
# + [markdown] deletable=true editable=true
# ## Mean and variance
#
# The most familiar property of a distribution is its **mean**, or **expected value**, denoted by $\mu$. For discrete rv's, it is defined as $\mathbb{E}[X] \triangleq \sum_{x \in \mathcal{X}} x p(x)$, and for continuous rv's, it is defined as $\mathbb{E}[X] \triangleq \int_{\mathcal{X}} x p(x) dx$.
#
# The **variance** is a measure of the "spread" of a distribution, denoted by $\sigma^2$. This is defined as follows:
#
# $$
# \begin{align}
# \text{var}[X] & \triangleq \mathbb{E}\left[ \left( X - \mu\right)^2 \right] = \int \left( x - \mu \right) ^2 p(x) dx \\\
# &= \int x^2 p(x)dx + \mu^2 \int p(x) dx - 2 \mu \int x p(x) dx = \mathbb{E}[X^2] - \mu^2
# \end{align}
# $$
#
# from which we derive the useful result
#
# $$\mathbb{E}[X^2] = \mu^2 + \sigma^2$$
#
# The **standard deviation** is defined as
#
# $$\text{std}[X] \triangleq \sqrt{\text{var}[X]}$$
# + [markdown] deletable=true editable=true
# # Some common discrete distributions
#
# ## The binomial and Bernoulli distributions
#
# Suppose we toss a coin $n$ times. Let $X \in {0, \ldots, n}$ be the number of heads. If the probability of heads is $\theta$, then we say $X$ has a **binomial** distribution, written as $X \sim \text{Bin}(n, \theta)$. The probability mass function (pmf) is given by
#
# $$\text{Bin}(k|n,\theta) \triangleq {n\choose k} \theta^k(1 - \theta)^{n-k}$$
#
# where
# $$ {n\choose k} \triangleq \frac{n!}{(n-k)!k!}$$
#
# is the number of ways to choose $k$ items from $n$.
#
# This distribution has a mean of $n\theta$ and a variance of $n\theta(1-\theta)$.
# + deletable=true editable=true
fig, ax = plt.subplots(1, 2)
x = np.arange(11)
f = lambda x : stats.binom.pmf(x, 10, 0.25)
ax[0].bar(x, f(x), align='center')
#ax[0].set_ylim([0, 1])
_ = ax[0].set_xticks(x)
ax[0].set_title(r'$\theta$ = 0.25')
f = lambda x : stats.binom.pmf(x, 10, 0.9)
ax[1].bar(x, f(x), align='center')
#ax[1].set_ylim([0, 1])
_ = ax[1].set_xticks(x)
ax[1].set_title(r'$\theta$ = 0.9')
# + [markdown] deletable=true editable=true
# Now suppose we toss a coin only once. Let $X \in {0,1}$ be a binary random variable, with probability of "success" or "heads" of $\theta$. We say that $X$ has a **Bernoulli** distribution. This is written as $X \sim \text{Ber}(\theta)$, where the pmf is defined as
#
# $$\text{Ber}(x|\theta) = \theta^{\mathbb{I}(x=1)}(1-\theta)^{\mathbb{I}(x=0)}$$
#
# In other words,
#
# $$ \text{Ber}(x|\theta) = \left\{
# \begin{array}{rl}
# \theta &\mbox{ if $x=1$} \\
# 1 - \theta &\mbox{ if $x=0$}
# \end{array}
# \right. $$
#
# This is obviously just a special case of a Binomial distribution with $n=1$.
#
# ### The multinomial and multinoulli distribution
#
# To model the outcomes of tossing a $K$-sided die, we can use the **multinomial** distribution. This is defined as follows: let $\mathbf{x}=(x_1, \ldots, x_K)$ be a random vector, where $x_j$ is the number of times side $j$ of the die occurs. Then $\mathbf{x}$ has the following pmf:
#
# $$\text{Mu}(\mathbf{x}|n, \mathbf{\theta}) \triangleq {n \choose x_1,\ldots,x_K} \prod_{j=1}^K \theta_j^{x_j}$$
#
# where $\theta_j$ is the probability that side $j$ shows up, and
#
# $${n \choose x_1,\ldots,x_K} \triangleq \frac{n!}{x_1!x_2! \ldots x_K!}$$
#
# is the **multinomial coefficient** (the number of ways to divide a set of size $n=\sum_{k=1}^K x_k$ into subsets with sizes $x_1$ up to $x_K$).
# + [markdown] deletable=true editable=true
# Now suppose $n=1$. This is like rolling a $K$-sided dice once, so $\mathbf{x}$ will be a vector of 0s and 1s (a bit vector), in which only one bit can be turned on. Specifically, if the dice shows up as face $k$, then the $k$'th bit will be on. In this case, we can think of $x$ as being a scalar categorical random variable with $K$ states (values), and $\mathbf{x}$ is its **dummy encoding**, that is, $\mathbf{x} = \left[\mathbb{I}(x=1),\ldots,\mathbb{I}(x=K)\right]$. For example, if $K=3$, we encode the states 1, 2, and 3 as $(1, 0, 0), (0, 1, 0)$ and $(0, 0, 1)$. This is also called **one-hot encoding**. In this case, the pmf becomes
#
# $$\text{Mu}(\mathbf{x}|1, \mathbf{\theta}) = \prod_{j=1}^K \theta_j^{\mathbb{I}(x_j=1)}$$
#
# This very common special case is known as a **categorical** or **discrete** distribution (<NAME>'s text adopts the term **multinoulli distribution** by analogy with the binomial/Bernoulli distinction). We will use the following notation
#
# $$\text{Cat}(x|\mathbf{\theta}) \triangleq \text{Mu}(\mathbf{x}|1, \mathbf{\theta})$$
# + [markdown] deletable=true editable=true
# # Some common continuous distributions
#
# ## Gaussian (normal) distribution
#
# The most widely used distribution in statistics and machine learning is the Gaussian or normal distribution. Its pdf is given by
#
# $$\mathcal{N}(x|\mu, \sigma^2) \triangleq \frac{1}{\sqrt{2 \pi \sigma^2}} e^{-\frac{1}{2\sigma^2}(x - \mu)^2}$$
#
# where $\mu = \mathbb{E}[X]$ is the mean (and mode), and $\sigma^2 = \text{var}[X]$ is the variance. $\frac{1}{\sqrt{2 \pi \sigma^2}}$ is the normalization constant needed to ensure the density integrates to 1.
#
# We write $X \sim \mathcal{N}(\mu, \sigma^2)$ to denote that $p(X=x) = \mathcal{N}(x|\mu, \sigma^2)$. If $X \sim \mathcal{N}(0,1)$, we say $X$ follows a **standard normal** distribution.
#
# We will sometimes talk about the **precision** of a Gaussian, by which we mean the inverse variance: $\lambda = 1/\sigma^2$.
#
# The Gaussian distribution is the most widely used distribution in statistics. Why?
#
# * It has two parameters that are easy to interpret
# * The central limit theorem tells us that sums of independent random variables have an approximately Gaussian distribution, making it a good fit for modeling residual errors or "noise"
# * The Gaussian distribution makes the least number of assumptions (i.e. has maximum entropy) which makes it a good default choice in many cases
# * It has a simple mathematical form, which results in easy to implement, but often highly effective methods
#
# ## The Student $t$ distribution
#
# One problem with the Gaussian distribution is that it is sensitive to outliers, since the log-probability only decays quadratically with distance from the centre. A more robust distribution is the **Student** $t$ **distribution**. Its pdf is as follows
#
# $$\mathcal{T}(x|\mu, \sigma^2, \nu) \propto \left[ 1 + \frac{1}{\nu} \left( \frac{x-\mu}{\sigma}\right)^2\right]^{-\left(\frac{\nu + 1}{2}\right)}$$
#
# where $\mu$ is the mean, $\sigma^2>0$ is the scale parameter, and $\nu > 0$ is called the **degrees of freedom**.
#
# The distribution has the following properties:
#
# mean = $\mu$, mode = $\mu$, var = $\frac{\nu \sigma^2}{(\nu - 2)}$
#
# The variance is only defined if $\nu > 2$. The mean is only defined if $\nu > 1$. It is common to use $\nu = 4$, which gives good performance in a range of problems. For $\nu \gg 5$, the Student distribution rapidly approaches a Gaussian distribution and loses its robustness properties.
#
# ## The Laplace distribution
#
# Another distribution with heavy tails is the **Laplace distribution**, also known as the **double sided exponential** distribution. This has the following pdf:
#
# $$\text{Lap}(x|\mu,b) \triangleq \frac{1}{2b} \exp \left( - \frac{|x - \mu|}{b}\right)$$
#
# Here $\mu$ is a location parameter and $b>0$ is a scale parameter. This distribution has the following properties:
#
# mean = $\mu$, mode = $\mu$, var = $2b^2$
#
# Not only does it have heavier tails, it puts more probability density at 0 than the Gaussian. This property is a useful way to encourage sparsity in a model, as we will see later.
# + deletable=true editable=true
# Show Gaussian, Student, Laplace pdfs and log pdfs
fig, ax = plt.subplots(2, 1, sharex=True)
g = lambda x : stats.norm.pdf(x, loc=0, scale=1)
t = lambda x : stats.t.pdf(x, df=1, loc=0, scale=1)
l = lambda x : stats.laplace.pdf(x, loc=0, scale=1/np.sqrt(2))
x = np.arange(-4, 4, 0.1)
ax[0].plot(x, g(x), 'b-', label='Gaussian')
ax[0].plot(x, t(x), 'r.', label='Student')
ax[0].plot(x, l(x), 'g--', label='Laplace')
ax[0].legend(loc='best')
ax[0].set_title('pdfs')
ax[1].plot(x, np.log(g(x)), 'b-', label='Gaussian')
ax[1].plot(x, np.log(t(x)), 'r.', label='Student')
ax[1].plot(x, np.log(l(x)), 'g--', label='Laplace')
ax[1].set_title('log pdfs')
# + deletable=true editable=true
# Demonstrate fitting Gaussian, Student, and Laplace to data
# with and without outliers
n = 30 # n data points
np.random.seed(0)
data = np.random.randn(n)
outliers = np.array([8, 8.75, 9.5])
nn = len(outliers)
nbins = 7
# fit each of the models to the data (no outliers)
model_g = stats.norm.fit(data)
model_t = stats.t.fit(data)
model_l = stats.laplace.fit(data)
fig, ax = plt.subplots(2, 1, sharex=True)
x = np.arange(-10, 10, 0.1)
g = lambda x : stats.norm.pdf(x, loc=model_g[0], scale=model_g[1])
t = lambda x : stats.t.pdf(x, df=model_t[0], loc=model_t[1], scale=model_t[2])
l = lambda x : stats.laplace.pdf(x, loc=model_l[0], scale=model_l[1])
ax[0].hist(data, bins=25, range=(-10, 10),
normed=True, alpha=0.25, facecolor='gray')
ax[0].plot(x, g(x), 'b-', label='Gaussian')
ax[0].plot(x, t(x), 'r.', label='Student')
ax[0].plot(x, l(x), 'g--', label='Laplace')
ax[0].legend(loc='best')
ax[0].set_title('no outliers')
# fit each of the models to the data (with outliers)
newdata = np.r_[data, outliers] # row concatenation
model_g = stats.norm.fit(newdata)
model_t = stats.t.fit(newdata)
model_l = stats.laplace.fit(newdata)
g = lambda x : stats.norm.pdf(x, loc=model_g[0], scale=model_g[1])
t = lambda x : stats.t.pdf(x, df=model_t[0], loc=model_t[1], scale=model_t[2])
l = lambda x : stats.laplace.pdf(x, loc=model_l[0], scale=model_l[1])
ax[1].hist(newdata, bins=25, range=(-10, 10),
normed=True, alpha=0.25, facecolor='gray')
ax[1].plot(x, g(x), 'b-', label='Gaussian')
ax[1].plot(x, t(x), 'r.', label='Student')
ax[1].plot(x, l(x), 'g--', label='Laplace')
ax[1].set_title('with outliers')
# + [markdown] deletable=true editable=true
# # Joint probability distributions
#
# A **joint probability distribution** has the form $p(x_1,\ldots,x_D)$ for a set of $D>1$ variables, and models the (stochastic) relationships between the variables. If all the variables are discrete, we can represent the joint distribution as a big multi-dimensional array, with one variable per dimension. However, the number of parameters needed to define such a model is $O(K^D)$, where $K$ is the number of states for each variable.
#
# We can define high dimensional joint distributions using fewer parameters by making conditional independence assumptions. In the case of continuous distributions, an alternative approach is to restrict the form of the pdf to certain functional forms, some of which are examined below.
#
# ## Covariance and correlation
#
# The **covariance** between two rv's $X$ and $Y$ measures the degree to which $X$ and $Y$ are (linearly) related. Covariance is defined as
#
# $$\text{cov}[X,Y] \triangleq \mathbb{E}\left[\left(X - \mathbb{E}[X]\right)\left(Y - \mathbb{E}[Y]\right)\right]=\mathbb{E}[XY] - \mathbb{E}[X]\mathbb{E}[Y]$$
#
# If $\mathbf{x}$ is a $d$-dimensional random vector, its **covariance matrix** is defined to be the following symmetric, positive semi-definite matrix:
#
# $$
# \begin{align}
# \text{cov}[\mathbf{x}] & \triangleq \mathbf{E} \left[\left(\mathbf{x} - \mathbb{E}[\mathbf{x}]\right)\left(\mathbf{x} - \mathbb{E}[\mathbf{x}]\right)^T\right]\\
# & = \left( \begin{array}{ccc}
# \text{var}[X_1] & \text{cov}[X_1, X_2] & \ldots & \text{cov}[X_1, X_d] \\
# \text{cov}[X_2, X_1] & \text{var}[X_2] & \ldots & \text{cov}[X_2, X_d] \\
# \vdots & \vdots & \ddots & \vdots\\
# \text{cov}[X_d, X_1] & \text{cov}[X_d, X_2] & \ldots & \text{var}[X_d]
# \end{array} \right)
# \end{align}
# $$
#
# Covariances can be between $-\infty$ and $\infty$. Sometimes it is more convenient to work with a normalized measure, with finite bounds. The (Pearson) **correlation coefficient** between $X$ and $Y$ is defined as
#
# $$\text{corr}[X,Y] \triangleq \frac{\text{cov}[X,Y]}{\sqrt{\text{var}[X]\text{var}[Y]}}$$
#
# A **correlation matrix** has the form
#
# $$
# \mathbf{R} = \left( \begin{array}{ccc}
# \text{corr}[X_1, X_1] & \text{corr}[X_1, X_2] & \ldots & \text{corr}[X_1, X_d] \\
# \text{corr}[X_2, X_1] & \text{corr}[X_2, X_2] & \ldots & \text{corr}[X_2, X_d] \\
# \vdots & \vdots & \ddots & \vdots\\
# \text{corr}[X_d, X_1] & \text{corr}[X_d, X_2] & \ldots & \text{corr}[X_d, X_d]
# \end{array} \right)
# $$
#
# One can show that $-1 \leq \text{corr}[X,Y] \leq 1$. Hence, in a correlation matrix, each entry on the diagonal is 1, and the other entries are between -1 and 1. One can also show that $\text{corr}[X,Y]=1$ iff $Y=aX + b$ for some parameters $a$ and $b$, i.e. there is a *linear* relationship between $X$ and $Y$. A good way to think of the correlation coefficient is as a degree of linearity.
#
# If $X$ and $Y$ are independent, meaning $p(X,Y)=p(X)p(Y)$, then $\text{cov}[X,Y]=0$, and hence $\text{corr}[X,Y]=0$ so they are uncorrelated. However, the converse is not true: *uncorrelated does not imply independent*. Some striking examples are shown below.
#
# <img src="images/Correlation_examples.png">
#
#
# Source: http://upload.wikimedia.org/wikipedia/commons/0/02/Correlation_examples.png
# + [markdown] deletable=true editable=true
# ## The multivariate Gaussian
#
# The **multivariate Gaussian** or **multivariate normal (MVN)** is the most widely used joint probability density function for continuous variables. The pdf of the MVN in $D$ dimensions is defined by the following
#
# $$\mathcal{N}(\mathbf{x}|\boldsymbol\mu,\mathbf{\Sigma}) \triangleq \frac{1}{(2 \pi)^{D/2}|\mathbf{\Sigma}|^{1/2}} \exp \left[ - \frac{1}{2} \left(\mathbf{x} - \boldsymbol\mu \right)^T \mathbf{\Sigma}^{-1} \left(\mathbf{x} - \boldsymbol\mu\right)\right]$$
#
# where $\boldsymbol\mu = \mathbb{E}[\mathbf{x}] \in \mathbb{R}^D$ is the mean vector, and $\Sigma = \text{cov}[\mathbf{x}]$ is the $D \times D$ covariance matrix. Sometimes we will work in terms of the **precision matrix** or **concentration matrix** instead. This is just the inverse covariance matrix, $\Lambda = \Sigma^{-1}$. The normalization constant $(2 \pi)^{-D/2}|\Lambda|^{1/2}$ ensures that the pdf integrates to 1.
#
# The figure below plots some MVN densities in 2d for three different kinds of covariance matrices. A full covariance matrix has $D(D+1)/2$ parameters (we divide by 2 since $\Sigma$ is symmetric). A diagonal covariance matrix has $D$ parameters, and has 0s on the off-diagonal terms. A **spherical** or **isotropic** covariance, $\Sigma = \sigma^2 \mathbf{I}_D$, has one free parameter.
# + deletable=true editable=true
# plot a MVN in 2D and 3D
import matplotlib.mlab as mlab
from scipy.linalg import eig, inv
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
delta = 0.05
x = np.arange(-10.0, 10.0, delta)
y = np.arange(-10.0, 10.0, delta)
X, Y = np.meshgrid(x, y)
S = np.asarray([[2.0, 1.8],
[1.8, 2.0]])
mu = np.asarray([0, 0])
Z = mlab.bivariate_normal(X, Y, sigmax=S[0, 0], sigmay=S[1, 1],
mux=mu[0], muy=mu[1], sigmaxy=S[0, 1])
#fig, ax = plt.subplots(2, 2, figsize=(10, 10),
# subplot_kw={'aspect': 'equal'})
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(2, 2, 1)
CS = ax.contour(X, Y, Z)
plt.clabel(CS, inline=1, fontsize=10)
ax.set_xlim((-6, 6))
ax.set_ylim((-6, 6))
ax.set_title('full')
# Decorrelate
[D, U] = eig(S)
S1 = np.dot(np.dot(U.T, S), U)
Z = mlab.bivariate_normal(X, Y, sigmax=S1[0, 0], sigmay=S1[1, 1],
mux=mu[0], muy=mu[0], sigmaxy=S1[0, 1])
ax = fig.add_subplot(2, 2, 2)
CS = ax.contour(X, Y, Z)
plt.clabel(CS, inline=1, fontsize=10)
ax.set_xlim((-10, 10))
ax.set_ylim((-5, 5))
ax.set_title('diagonal')
# Whiten
A = np.dot(np.sqrt(np.linalg.inv(np.diag(np.real(D)))), U.T)
mu2 = np.dot(A, mu)
S2 = np.dot(np.dot(A, S), A.T) # may not be numerically equal to I
#np.testing.assert_allclose(S2, np.eye(2)) # check
print np.allclose(S2, np.eye(2))
# plot centred on original mu, not shifted mu
Z = mlab.bivariate_normal(X, Y, sigmax=S2[0, 0], sigmay=S2[1, 1],
mux=mu[0], muy=mu[0], sigmaxy=S2[0, 1])
ax = fig.add_subplot(2, 2, 3)
CS = ax.contour(X, Y, Z)
plt.clabel(CS, inline=1, fontsize=10)
ax.set_xlim((-6, 6))
ax.set_ylim((-6, 6))
ax.set_title('spherical')
# demonstration of how to do a surface plot
axx = fig.add_subplot(2, 2, 4, projection='3d')
surf = axx.plot_surface(X, Y, Z, rstride=5, cstride=5, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
axx.set_title('spherical')
| _posts/_code/RBC-DeepLearning-Notebooks/03 - Probability.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import os
import sys
os.environ["PYSPARK_PYTHON"] = "/home/ec2-user/spark-2.4.4-bin-hadoop2.7/python"
os.environ["JAVA_HOME"] = "/usr/java/jdk1.8.0_161/jre"
os.environ["SPARK_HOME"] = "/home/ec2-user/spark-2.4.4-bin-hadoop2.7"
os.environ["PYLIB"] = os.environ["SPARK_HOME"] + "/python/lib"
sys.path.insert(0, os.environ["PYLIB"] + "/py4j-0.10.7-src.zip")
sys.path.insert(0, os.environ["PYLIB"] + "/pyspark.zip")
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("GradedQuestions").getOrCreate()
df = spark.read.load("employees_1.json", format = "json")
df.show()
# The data type of the column salary and DOB is ___ and ____ respectively. Fill in the blanks with the correct options.
#
# - integer, string
#
# - string, integer
#
# - string, date
#
# - string, string
df.printSchema()
# Qn: In the previous question, you read in a data file with will all string columns. Now your task is to add new columns by converting the salary and date to the right data types. Form the following make the correct code segments to complete your task.
#
# More than one option can be correct.
#
# - `df = df.withColumn('salary_int',df["salary"].cast(Integer()))`
#
# - `df = df.withColumn('salary_int',df["salary"].cast(IntegerType()))`
#
# - `df = df.withColumn('DOB_date',df["DOB"].cast(DateType()))`
#
# - `df = df.withColumn('DOB_date',df["DOB"].cast(Date()))`
#
# +
from pyspark.sql.types import IntegerType
from pyspark.sql.types import DateType
df = df.withColumn('salary_int',df["salary"].cast(IntegerType()))
df = df.withColumn('DOB_date',df["DOB"].cast(DateType()))
# -
# Qn: In the earlier question, you converted the salary and the date columns to the right data types. Now to save the dataframe with only the two corrected columns (salary_int, DOB_date) as a csv file on the EC2 instance, which of the following code will correct? More than one option can be correct.
#
#
# - df.write.csv('emplyee_corrected', Header = True)
#
# - df.select('salary_int','DOB_date').write.csv('emplyee_corrected.csv')
#
# - df.select('salary_int','DOB_date').write.option("header", "true").csv('emplyee_corrected.csv')
#
# - df.write.option("header", "true").csv('emplyee_corrected.csv')
df.select('salary_int','DOB_date').write.csv('emplyee_corrected.csv')
df.select('salary_int','DOB_date').write.option("header", "true").csv('emplyee_corrected1.csv')
| Course_4-Big_Data_Processing_using_Apache_Spark/Module_2-Spark_Structured_APIs/3-Structured_APIs_Usage/Graded_Questions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/JHORIZ-RODEL-AQUINO/CPEN-21A-CPE-1-2/blob/main/Control_Structure.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="5iL-L7HHG6Jv"
# ##If Statement
# + colab={"base_uri": "https://localhost:8080/"} id="exevpb4GIDUo" outputId="9f176f4f-ce3b-458c-fcdb-349eadaa2b2c"
a = 12
b = 100
if b > a:
print("b is greater than a")
# + [markdown] id="LDwv_KvQIext"
# ##Elif Statment
# + colab={"base_uri": "https://localhost:8080/"} id="wDOE_syEIfMr" outputId="1a33a855-f211-46c5-80f9-b7dd746224a6"
a = 120
b = 100
if b > a:
print("b is greater than a")
elif a > b:
print("a is greater than b")
# + [markdown] id="-9eYI2DsI6S0"
# ##Else Statement
# + colab={"base_uri": "https://localhost:8080/"} id="96cPsSGNJP2O" outputId="928aa87d-278b-4e13-e9e6-ee70fdacdbad"
a = 100
b = 100
if b > a:
print("b is greater than a")
elif a > b:
print("a is greater than b")
else:
print("a and b are equal")
# + [markdown] id="SfIvYi68JlWc"
# ##Short Hand If Statement
# + colab={"base_uri": "https://localhost:8080/"} id="JrXuMQlXJy1S" outputId="73ecfccd-fe66-4629-ee2f-1eb143691240"
a = 45
b = 30
if a > b: print("a is greater than b")
# + [markdown] id="l-ut0rveKLOH"
# ##Short Hand Else Statement
# + colab={"base_uri": "https://localhost:8080/"} id="Ow2OUWiTKLnW" outputId="e8184871-6f3c-4792-e742-b8d77ac57fab"
a = 30
b = 45
print("a is greater than b") if a > b else print("b is greater than a")
# + [markdown] id="kMPnG0MZLFA2"
# ##And Logic Condition
# + colab={"base_uri": "https://localhost:8080/"} id="8BltG4swLNUt" outputId="4af29184-33e5-4040-a843-cafd9240b3b6"
x = 7
if x > 5 and x > 6:
print("Both conditions are True")
# + [markdown] id="x7makQOELZ4l"
# ##Or Logic Condition
# + colab={"base_uri": "https://localhost:8080/"} id="ibfixG_bLelr" outputId="6372b58d-1cb8-45ed-f28a-a21df3da853b"
x = 18
if x > 18 or x < 15:
print("True")
else:
print("False")
# + [markdown] id="XLSvgdN0NGQy"
# ##Nested If
# + colab={"base_uri": "https://localhost:8080/"} id="1v6LVNPGNiRC" outputId="12792c08-2511-4752-ccc7-7199ab3b1795"
x = 21
if x > 10:
print("Above 10")
if x > 20:
print("Above 20")
else:
print("below 20")
else:
print("less than 10")
# + [markdown] id="AndB0oO4Nlwj"
# ##Example 1
# + colab={"base_uri": "https://localhost:8080/"} id="rCmVR877NsAC" outputId="7e333aaa-38d4-4a38-d230-b4e5caa34488"
age = int(input("Enter your age:"))
if age >= 18:
print("You are qualified to vote")
else:
print("You are not qualified to vote")
# + [markdown] id="XZ7TcPeXN1V_"
# ##Example 2
# + colab={"base_uri": "https://localhost:8080/"} id="NkusADz3N2zS" outputId="a01a7acf-65d5-447a-dc75-4f1b57f15ba9"
number = float(input("Enter the number:"))
if number == 0:
print("Zero")
elif number > 0:
print("Positive")
else:
print("Negative")
# + [markdown] id="eI6Hae4iSa_w"
# ##Example 3
# + colab={"base_uri": "https://localhost:8080/"} id="-_4B8bgoSdf5" outputId="9376cd06-35f9-417a-d587-42c69236776f"
grade = float(input("Enter your grade: "))
if grade >= 75:
print("Passed")
elif grade >= 74 and grade < 75:
print("Remedial")
else:
print("Failed")
| Control_Structure.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + slideshow={"slide_type": "skip"}
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# + [markdown] slideshow={"slide_type": "slide"}
#
# # Python 3
# ## ัััะพะบะธ, ะฑะฐะนัั, ัะฐะฑะพัะฐ ั ัะฐะนะปะฐะผะธ
#
# MIPT 2020
#
# + slideshow={"slide_type": "subslide"}
s = "string"
"string" is 'string'
id("string") == id('string')
id("string")
s[1]
s[2:4]
# -
'aa' * 10000 is "aa" * 10000
s += "abadaba"
a = ""
for i in range(10):
a += "bb"
print(id(a))
a = ""
for i in range(10):
a += "bb"
print(a)
print(id(a))
# + slideshow={"slide_type": "-"}
s[1] = 2
# + slideshow={"slide_type": "subslide"}
s = list("string1")
s[0] = "S"
' '.join(s)
s
# -
words = {'some', 'order', 'words'}
a = [x for x in words]
' '.join(a)
# + slideshow={"slide_type": "subslide"}
words = "A few words".split()
words
' '.join(words)
# + slideshow={"slide_type": "subslide"}
"str" + "ing"
"a" * 10
# + slideshow={"slide_type": "subslide"}
"str" + 10
# -
s = 'abadaba'
'abada' in s
# + slideshow={"slide_type": "subslide"}
', '.join(dir("string"))
# + slideshow={"slide_type": "subslide"}
"Word".lower()
"Word".upper()
"Word".swapcase()
"word lala".title()
"word lala another".capitalize()
# + slideshow={"slide_type": "subslide"}
" aaa ".rstrip()
" aaa ".lstrip()
" aaa \t".strip()
# + slideshow={"slide_type": "subslide"}
"aaaaa".replace("aa", "2a")
# + slideshow={"slide_type": "subslide"}
"abc".startswith("a")
"abc".endswith("c")
# + slideshow={"slide_type": "subslide"}
"aaa\nbb\n\nc".split()
"aaa\nbb\n\nc".splitlines()
"aaa\nbb\n\nc".split('\n')
# + slideshow={"slide_type": "subslide"}
strings = ["a", "1", "1.0", " ", "\t"]
for s in strings:
print(f"'{s}'", s.isalpha(), s.isdigit(), s.isspace(), sep="\t")
# + [markdown] slideshow={"slide_type": "slide"}
# ## String formatting
# + slideshow={"slide_type": "-"}
f"2 + 2 = {2 + 2}"
"2 + 2 = {}".format(2+2)
"2 + 2 = %s" % (2+2) # very old variant
# -
"{1} {2} {1} {3}".format(1, 2, 3, 4)
# + slideshow={"slide_type": "subslide"}
i = 1
s = "s"
d = {}
f"a {s} {i}"
f"{ 'inner:' + s }"
f"{3.1415:.2f}"
f"{{}}"
f"{'{}'}"
# -
repr(x for x in range(5))
str(x for x in range(5))
# + slideshow={"slide_type": "subslide"}
s = "{} {}"
i = 1
s.format(i, s)
s.format(1, 2)
"{1} {0} {2} {0}".format("first", "second", "third")
"{:.2f}".format(3.1415)
"{{}} {}".format("a")
"x = {x}, y = {y}".format(1, y=3)
# + [markdown] slideshow={"slide_type": "slide"}
# ## string
# +
import string
string.ascii_letters
string.ascii_lowercase
string.ascii_uppercase
string.digits
string.hexdigits
string.octdigits
string.whitespace
# +
import sys
sys.getsizeof("abbb๐๐๐๐")
# + slideshow={"slide_type": "slide"}
smile = "๐"
'a'.encode()
'a'.encode('utf-32')
smile.encode()
smile.encode('utf-8')
smile.encode('utf-16')
smile.encode('utf-32')
smile.encode("ascii")
# + slideshow={"slide_type": "subslide"}
"string".encode("utf-8").decode("utf-8")
smile = "smile: ๐"
a = smile.encode('ascii', errors="replace") # errors='ignore'
a.decode('ascii')
# -
a = bytearray()
a.extend(b'hey there')
a[5]
ord('h'), ord('๐')
chr(128522)
# + slideshow={"slide_type": "slide"}
with open("hello_utf16le.txt", encoding="utf-16le", mode="w") as file:
file.write("Hello!")
file = open("hello_utf16le.txt", "rb")
binary = file.read()
file.close()
binary
binary2 = b'\x00H\x00e\x00l\x00l\x00o\x00!'
binary.decode('utf-16le')
binary2.decode('utf-16be')
# -
with open("hello.txt", mode="w+b") as file:
file.write(b"Hello!")
file.tell()
file.seek(0)
file.read()
file.seek(-2, io.SEEK_END)
file.read()
# + [markdown] slideshow={"slide_type": "-"}
# modes:
#
# * w - write
# * r - read
# * a - append
# * w+ - read and write
# * x - create new file, error if exists
# * t - text mode (default)
# * b - binary mode
# +
import io
with io.open("io_hello", "w") as out:
out.write("the same")
# +
ss = io.StringIO("init value\nsecond value\n")
print(ss.readline().strip())
for line in ss.readlines():
print(line)
ss.getvalue()
ss.close()
with io.StringIO("init value\nsecond value\n") as ss:
print(ss.readline().strip())
for line in ss.readlines():
print(line)
ss.getvalue()
| seminars/05_bytes_files/Seminar_05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1 - Straits Times Headlines Cleaning
# This notebook takes in the scraped URLS of all news articles from the sitemap of https://straitstimes.com and processes the data to generate a dataset of news article headlines to train a neural network to generate its own headlines. Also generates a cleaned dataset for EDA.
# +
import pandas as pd
import numpy as np
import json
pd.options.display.max_colwidth = 150
df = pd.read_csv("../data/stsitemap_20191001-093746.csv")
df = df.drop(df.index[0])
# -
df.sample(5)
# #### Drop URLs not from Straits Times
df.loc[df['url'].str.contains('https://www.straitstimes.com/')==False]
df.drop(df.loc[df['url'].str.contains('https://www.straitstimes.com/')==False].index, inplace=True)
print(f'Total Number of URLS: {df.shape[0]}')
# #### Generate headlines and categories from URLs
def clean_headline(url):
'''Generate headlines from URL
'''
try:
headline = url.split('/')[-1]
headline = headline.split('-')
# Caps first letter of every word
headline = [ w[0].upper() + w[1:] if len(w) > 0 else w for w in headline ]
headline = ' '.join(headline)
return headline
except Exception as e:
print(url, e)
return url
def clean_category(url):
'''Generate categories from URL
'''
try:
category = url.replace('https://www.straitstimes.com/','').split('/')[:-1]
# Caps first letter of every word
category = [ w[0].upper() + w[1:] if len(w) > 0 else w for w in category ]
category = '/'.join(category)
return category
except Exception as e:
print(url, e)
return url
# #### Test functions on one sample
sample = df['url'].sample(1).iloc[0]
print(f'URL: {sample}')
print(f'Headline: {clean_headline(sample)}')
print(f'Category: {clean_category(sample)}')
# #### Generate headlines for all URLs
df['Headline'] = df['url'].apply(lambda x: clean_headline(x))
df['Headline'].sample(10)
# #### Check and remove duplicates
df['Headline'].duplicated().value_counts()
df.drop(df['Headline'].loc[df['Headline'].duplicated()].index, inplace=True)
# #### Generate categories for all URLs
df['Category'] = df['url'].apply(lambda x: clean_category(x))
df['Category'].sample(10)
# #### Create word count and character count features
# +
def count_words(text):
# Count words in headlines
return len(text.split(' '))
def count_chars(text):
# Count characters in headline
# Includes whitespace
return len(text)
df['Wordcount'] = df['Headline'].apply(lambda x: count_words(x))
df['Charcount'] = df['Headline'].apply(lambda x: count_chars(x))
# -
# #### Inspect 95th percentile for word count
df['Headline'].loc[df['Wordcount'] >= df['Wordcount'].quantile(0.95)].sample(10)
# #### Inspect Headlines with the least words (5th percentile)
df['Headline'].loc[df['Wordcount'] <= df['Wordcount'].quantile(0.05)].sample(10)
# Inspecting headlines with the least words reveals many headlines that are in fact names of recurring news segments such as 'Next 48 Hours' and 'On Facebook'. These segments were not duplicates because they are appended with an increment version number at the end of the sub-headine. They also tend to be short in word count.
# We will attempt to remove these recurring news segments from the dataset.
# Titles that end with number are likely repetitive sub headlines that represent
# Recurring news segments such as "Next 48 Hours" nad "Punchlines ..."
# Remove all these segment titles
df['last_word_digit'] = df['Headline'].apply(lambda x: x.split(' ')[-1].isdigit()) # True if last word is digit
df['Headline'].loc[(df['Wordcount'] <= df['Wordcount'].quantile(0.05)) & (df['last_word_digit'])].sample(10)
# #### Drop the recurring news segments
df.drop(df['Headline'].loc[(df['Wordcount'] <= df['Wordcount'].quantile(0.05)) & (df['last_word_digit'])].index, inplace=True)
# #### Remove Other Repetitive headlines
df.drop(df['Headline'].loc[df['Headline'].str.contains("Singapore Shares Open")].index[1:], inplace=True)
df.drop(df['Headline'].loc[df['Headline'].str.contains("The Straits Times News In A Minute ")].index[1:], inplace=True)
df.drop(df['Headline'].loc[df['Headline'].str.contains("Top Stories From The Straits Times ")].index[1:], inplace=True)
# #### Shuffle rows
df = df.sample(frac=1).reset_index(drop=True)
# #### Export cleaned dataset for training
with open('../data/st_headlines.txt', 'w') as f:
f.write(df['Headline'].to_csv(index=False, header=False))
# #### Export mappings from text to id and vice versa
with open('../data/st_headlines.txt', 'r') as f:
vocab = sorted(set(f.read()))
with open('../data/st_char2idx.txt', 'w') as char2idx_f:
char2idx = json.dumps({u:i for i, u in enumerate(vocab)})
char2idx_f.write(char2idx)
with open('../data/st_idx2char.txt', 'w') as idx2char_f:
idx2char = json.dumps(vocab)
idx2char_f.write(idx2char)
# #### Export cleaned dataset for EDA
df[['Headline', 'Category', 'Wordcount', 'Charcount']].to_csv("../data/st_sitemap_clean.csv", index=False)
| notebooks/1_st_headlines_clean.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
dt = np.dtype([('instance_no', int),
('exp_no', int),
('method', int), # 1 = white box, 2 = euclidean_PCA, 3 = hog, 4 = euclidean_PCA category, 5 = hog category, 6 = ais
('pca_n', int),
('percentage_of_data', float),
('percentile', float),
('mc_euclidean_no_batches', int), # stuff
('mc_hog_no_batches', int), # stuff
('sigma_ais', float),
('mc_attack_log', float),
('mc_attack_eps', float),
('mc_attack_frac', float),
('mc_attack_log_50', float),
('mc_attack_eps_50', float),
('mc_attack_frac_50', float),
('white_box_50', float),
('white_box_11', float),
('ais_50', float),
('ais_acc_rate_50', float)
])
# +
data = pd.DataFrame(np.loadtxt('Param_Change.csv', dtype=dt))
data = data.assign(mem_inf_adv_mc_log = lambda x: (x.mc_attack_log_50-0.5)*2)
data = data.assign(mem_inf_adv_mc_ones = lambda x: (x.mc_attack_eps_50-0.5)*2)
data = data.assign(mem_inf_adv_wb = lambda x: (x.white_box_50-0.5)*2)
data = data.assign(mem_inf_adv_ais = lambda x: (x.ais_50-0.5)*2)
def convert_to_set_prob(x):
prob = np.zeros((len(x),))
for i in range(len(x)):
if x[i] > 0:
prob[i] = 1
elif x[i] == 0:
prob[i] = 0.5
elif x[i] < 0:
prob[i] = 0
return prob
data = data.assign(set_accuracy_mc_log = lambda x: convert_to_set_prob(x.mem_inf_adv_mc_log))
data = data.assign(set_accuracy_mc_ones = lambda x: convert_to_set_prob(x.mem_inf_adv_mc_ones))
data = data.assign(set_accuracy_wb = lambda x: convert_to_set_prob(x.mem_inf_adv_wb))
data = data.assign(set_accuracy_ais = lambda x: convert_to_set_prob(x.mem_inf_adv_ais))
data = data.assign(set_mem_inf_adv_mc_log = lambda x: 2*x.set_accuracy_mc_log-1)
data = data.assign(set_mem_inf_adv_mc_ones = lambda x: 2*x.set_accuracy_mc_ones-1)
data = data.assign(set_mem_inf_adv_wb = lambda x: 2*x.set_accuracy_wb-1)
data = data.assign(set_mem_inf_adv_ais = lambda x: 2*x.set_accuracy_ais-1)
white_box = data[data.method == 1]
pca_cat = data[data.method == 4]
hog_cat = data[data.method == 5]
ais = data[data.method == 6]
rec_attack = data[data.method == 42]
# -
# # Reconstruction Attack
#
print(len(rec_attack))
print(rec_attack.mc_attack_eps_50.mean())
print(rec_attack.set_accuracy_mc_ones.mean())
plot_rec = rec_attack[['mc_euclidean_no_batches', 'mc_attack_eps_50']].groupby(['mc_euclidean_no_batches']).mean().reset_index()
rec_attack[['mc_euclidean_no_batches', 'mc_attack_eps_50']].groupby(['mc_euclidean_no_batches']).std()
# +
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rcParams.update({'font.size': 20})
f = plt.figure(figsize=(14, 6))
ax = plt.subplot()
plt.plot(plot_rec.mc_euclidean_no_batches, plot_rec.mc_attack_eps_50, linewidth=2)
plt.xlabel('n', fontsize='large')
# plt.ylabel('Accuracy (\\%)', fontsize='large')
plt.grid(True)
# plt.legend()
plt.tight_layout(w_pad=1)
ax.set_xscale("log")
plt.savefig('Scatter_Bagging.pgf', bbox_inches="tight")
# -
| Monte-Carlo-Attacks/Monte-Carlo-MNIST_CVAE/Evaluation_Param_n/.ipynb_checkpoints/Evaluation-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/sjanwalkar/Sentiment-Analysis/blob/main/Sentiment_Ana.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="MqOESdCHTxTB"
# # Sentiment Analysis
# + id="JpWnpsqtUAYF"
import re
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from keras.models import Sequential, load_model
from keras.layers import Dense, LSTM, Embedding, Dropout
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
# + id="N5Cv-XFQUIYu"
data = pd.read_csv("/content/drive/MyDrive/Tweets.csv")
# + colab={"base_uri": "https://localhost:8080/", "height": 506} id="T8r1aYUEWLeg" outputId="3c379f03-2ff2-4f31-d804-b53c0fc5e537"
data.sample(frac=1).reset_index(drop=True)
print(data.shape)
data.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="ZJtw0apCYWpg" outputId="7993696a-fcd8-4c98-d511-22f164562075"
# Just selecting airline sentiments and airline, removing rest all columns
data = data[["airline_sentiment","text"]]
data
# + [markdown] id="jBnxhGp6kw7x"
# Data Exploratory analysis
# + colab={"base_uri": "https://localhost:8080/"} id="28nY2TTtk3iL" outputId="734558b0-4b04-4d51-eae5-16f0279344b1"
data.airline_sentiment.unique()
# + id="KI8LfdC6lAOV"
sentiment_counts = pd.DataFrame(data['airline_sentiment'].value_counts())
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="D94L64HhlQtZ" outputId="5e47b734-7ba9-4d7c-e0c3-40a7c35c3aab"
# Plotting
plt.bar(sentiment_counts.index, sentiment_counts['airline_sentiment'])
# + colab={"base_uri": "https://localhost:8080/", "height": 500} id="abANNVejl7ZE" outputId="a4dc8766-1bbc-4ca3-bf0b-13bee185ea34"
plt.figure(figsize=(10,8))
data['text'].str.len().plot.hist()
# + [markdown] id="LsvBfgWwmVGi"
# Preprocessing
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="JpgoD9_VmZVS" outputId="0a435600-21a9-4b56-86c7-264adf7b0f5a"
data['text']=data['text'].str.replace('@VirginAmerica', '')
data['text']=data['text'].str.replace('@united', '')
data['text']=data['text'].str.replace('@SouthwestAir', '')
data['text']=data['text'].str.replace('@JetBlue', '')
data['text']=data['text'].str.replace('@USAirways', '')
data['text']=data['text'].str.replace('@AmericanAir', '')
data.head()
# + colab={"base_uri": "https://localhost:8080/"} id="nMci1k-SpeJK" outputId="92ffdc4d-4f6b-4431-d767-a817f1b13141"
data['text'] = data['text'].apply(lambda x :x.lower())
data['text'] = data['text'].apply(lambda x : re.sub('[^a-zA-Z0-9\s]','', x))
data['text'].head(10)
# + colab={"base_uri": "https://localhost:8080/"} id="CzqvB7A-rpdX" outputId="aa0e0709-1114-4e13-c34a-aaa8526ce88d"
tokenizer = Tokenizer(num_words=5000, split=" ")
tokenizer.fit_on_texts(data['text'].values)
X = tokenizer.texts_to_sequences(data['text'].values)
X = pad_sequences(X)
X[:5]
# + [markdown] id="8NJu8qhfuENo"
# Creating Model
# + id="iEBeg1MOuGm0"
model = Sequential()
model.add(Embedding(5000, 256, input_length= X.shape[1]))
model.add(Dropout(0.3))
model.add(LSTM(256, return_sequences=True, dropout=0.3, recurrent_dropout=0.2))
model.add(LSTM(256, dropout=0.3, recurrent_dropout=0.2))
model.add(Dense(3, activation='softmax'))
# + colab={"base_uri": "https://localhost:8080/"} id="awOIfl5huVzC" outputId="a8666cfb-5c05-4655-aef1-54946d3c3069"
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
# + id="4EKgzYL5vizO"
y = pd.get_dummies(data['airline_sentiment'])
# + id="7hAuYW_2vq_p"
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.2, random_state=0)
# + [markdown] id="g8sktjR0v8iG"
# Training Model
# + colab={"background_save": true, "base_uri": "https://localhost:8080/"} id="dOfIlqO9v7e_" outputId="a11da992-6f67-41a3-dd2e-1a1ac5a050ab"
batch_size =32
epoch = 8
model.fit(X_train, y_train, epochs=epoch, batch_size=batch_size, verbose=2)
# + [markdown] id="LS6GyIhjRHnN"
# Testing model
# + id="308qoPYMRJzN"
predictions = model.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="7U4Wsq7OmQmY" outputId="a2133bda-0204-4414-e39b-052d14a1c9a9"
pos_count, neu_count, neg_count = 0, 0, 0
real_pos, real_neu, real_neg = 0, 0, 0
for i, pred in enumerate(prediction):
if np.argmax(pred)==2:
pos_count+=1
elif np.argmax(pred)==1:
neu_count+=1
else:
neg_count+=1
print("Predict Pos Count = {} ".format(pos_count))
print("Predict Neutral Count = {} ".format(neu_count))
print("Predict Negative Count = {} ".format(neg_count))
# + id="mEYEfusBrmre"
y_test.reset_index(drop=True, inplace=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="uwHcP5e9r1MB" outputId="99f4d515-9b46-4b85-993b-b7b97709675d"
y_test
# + id="00hWExXcxLPv"
| Sentiment_Ana.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ___
#
# <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>
# ___
# # Python Text Basics Assessment - Solutions
#
# Welcome to your assessment! Complete the tasks described in bold below by typing the relevant code in the cells.
# ## f-Strings
# #### 1. Print an f-string that displays `NLP stands for Natural Language Processing` using the variables provided.
# +
abbr = 'NLP'
full_text = 'Natural Language Processing'
# Enter your code here:
print(f'{abbr} stands for {full_text}')
# -
# ## Files
# #### 2. Create a file in the current working directory called `contacts.txt` by running the cell below:
# %%writefile contacts.txt
First_Name Last_Name, Title, Extension, Email
# #### 3. Open the file and use .read() to save the contents of the file to a string called `fields`. Make sure the file is closed at the end.
# +
# Write your code here:
with open('contacts.txt') as c:
fields = c.read()
# Run fields to see the contents of contacts.txt:
fields
# -
# ## Working with PDF Files
# #### 4. Use PyPDF2 to open the file `Business_Proposal.pdf`. Extract the text of page 2.
# +
# Perform import
import PyPDF2
# Open the file as a binary object
f = open('Business_Proposal.pdf','rb')
# Use PyPDF2 to read the text of the file
pdf_reader = PyPDF2.PdfFileReader(f)
# Get the text from page 2 (CHALLENGE: Do this in one step!)
page_two_text = pdf_reader.getPage(1).extractText()
# Close the file
f.close()
# Print the contents of page_two_text
print(page_two_text)
# -
# #### 5. Open the file `contacts.txt` in append mode. Add the text of page 2 from above to `contacts.txt`.
#
# #### CHALLENGE: See if you can remove the word "AUTHORS:"
# Simple Solution:
with open('contacts.txt','a+') as c:
c.write(page_two_text)
c.seek(0)
print(c.read())
# CHALLENGE Solution (re-run the %%writefile cell above to obtain an unmodified contacts.txt file):
with open('contacts.txt','a+') as c:
c.write(page_two_text[8:])
c.seek(0)
print(c.read())
# ## Regular Expressions
# #### 6. Using the `page_two_text` variable created above, extract any email addresses that were contained in the file `Business_Proposal.pdf`.
# +
import re
# Enter your regex pattern here. This may take several tries!
pattern = r'\w+@\w+.\w{3}'
re.findall(pattern, page_two_text)
# -
# ### Great job!
| NLP_COURSE/00-Python-Text-Basics/04-Python-Text-Basics-Assessment-Solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Date+Time Basics
# **Inhalt:** Mit Zeit-Datentyp umgehen
#
# **Nรถtige Skills:** Erste Schritte mit Pandas
#
# **Lernziele:**
# - Text in Zeit konvertieren
# - Zeit in Text konvertieren
# - Zeit-Informationen extrahieren
# - Einfache Zeit-Operationen
# ## Libraries
import pandas as pd
from datetime import datetime
from datetime import timedelta
# ## Zeitformat Codes
# Extrakt, die volle Liste: http://strftime.org/. Diese Format-Codes brauchen wir, um mit Daten zu arbeiten.
# | Code | Description | *Example* |
# |--------|---------|--------|
# | **`%a`** | Weekday as localeโs abbreviated name. | *Mon* |
# | **`%A`** | Weekday as localeโs full name. | *Monday* |
# | **`%d`** | Day of the month as a zero-padded decimal number. | *30* |
# | **`%-d`** | Day of the month as a decimal number. (Platform specific) | *30* |
# | **`%b`** | Month as localeโs abbreviated name. | *Sep* |
# | **`%B`** | Month as localeโs full name. | *September* |
# | **`%m`** | Month as a zero-padded decimal number. | *09* |
# | **`%-m`** | Month as a decimal number. (Platform specific) | *9* |
# | **`%y`** | Year without century as a zero-padded decimal number. | *13* |
# | **`%Y`** | Year with century as a decimal number. | *2013* |
# | **`%H`** | Hour (24-hour clock) as a zero-padded decimal number. | *07* |
# | **`%-H`** | Hour (24-hour clock) as a decimal number. (Platform specific) | *7* |
# | **`%I`** | Hour (12-hour clock) as a zero-padded decimal number. | *07* |
# | **`%-I`** | Hour (12-hour clock) as a decimal number. (Platform specific) | *7* |
# | **`%p`** | Localeโs equivalent of either AM or PM. | *AM* |
# | **`%M`** | Minute as a zero-padded decimal number. | *06* |
# | **`%-M`** | Minute as a decimal number. (Platform specific) | *6* |
# | **`%S`** | Second as a zero-padded decimal number. | *05* |
# | **`%-S`** | Second as a decimal number. (Platform specific) | *5* |
# | **`%j`** | Day of the year as a zero-padded decimal number. | *273* |
# | **`%-j`** | Day of the year as a decimal number. (Platform specific) | *273* |
# | **`%U`** | Week number of the year (Sunday as the first day of the week) as a zero padded decimal number. All days in a new year preceding the first Sunday are considered to be in week 0. | *39* |
# | **`%W`** | Week number of the year (Monday as the first day of the week) as a decimal number. All days in a new year preceding the first Monday are considered to be in week 0. | *39* |
# | **`%c`** | Localeโs appropriate date and time representation. | *Mon Sep 30 07:06:05 2013* |
# | **`%x`** | Localeโs appropriate date representation. | *09/30/13* |
# | **`%X`** | Localeโs appropriate time representation. | *07:06:05* |
# | **`%%`** | A literal '%' character. | *%*
# ## Text to Time
# Eine hรคufige Situation, wenn man von irgendwo Daten importiert:
# - Wir haben einen bestimmten String, zB: "1981-08-23"
# - Wir wollen den String in ein Datetime-Objekt verwandeln, um sie zu analysieren
# - Dazu benutzen wir die Pandas-Funktion `to_datetime()`
my_birthday_date = pd.to_datetime('1981-08-23', format='%Y-%m-%d')
# Das Ergebnis wird uns als "Timestamp" angezeigt.
my_birthday_date
# Die Funktion erkennt einige Standardformate automatisch
my_date = pd.to_datetime('1981-08-23 08:15:25')
my_date
# **Platz** zum ausprobieren. Kreiere ein Datetime-Objekt aus folgenden Strings:
# Beispiel: '23.08.1981'
my_date = pd.to_datetime('23.08.1981', format='%d.%m.%Y')
my_date
# Do it yourself: 'Aug 23, 1981'
my_date = pd.to_datetime('Aug 23, 1981', format='%b %d, %Y')
my_date
# '18.01.2016, 18:25 Uhr'
my_date = pd.to_datetime('18.01.2016, 18:25 Uhr', format='%d.%m.%Y, %H:%M Uhr')
my_date
# '5. May 2014'
my_date = pd.to_datetime('5. May 2014', format='%d. %B %Y')
my_date
# '5. Mai 2014'
my_date = pd.to_datetime('5. Mai 2014'.replace('Mai','May'), format='%d. %B %Y')
my_date
# ## Time to Text
# Brauchen wir typischerweise bei der Anzeige oder beim Export von DAten
# - Wir haben bereits ein Datetime-Objekt erstellt
# - jetzt wollen wir es nach einem bestimmten Schema anzeigen
# - dafรผr dient die Funktion `strftime()`, die jedes Datetime-Objekt hat
# Das Datums-Ojbekt haben wir bereits:
my_date = pd.to_datetime('1981-08-23 08:15:25')
# Als Text:
my_text = my_date.strftime(format='%Y-%m-%d')
my_text
# **Quiz**: Lass `strftime()` den folgenden Text ausgeben:
# Beispiel: 'Aug 23, 1981'
my_text = my_date.strftime(format='%b %d, %Y')
my_text
# Do it yourself: #'23.8.81, 08:15:25'
my_text = my_date.strftime(format='%d.%-m.%y, %H:%M:%S')
my_text
# 'Sunday, 23. of August 1981, 8:15 AM'
my_text = my_date.strftime(format='%A, %d. of %B, %-I:%M %p')
my_text
# ## Time properties
# `strftime()` ist nicht die einzige Mรถglichkeit, Daten als Text anzuzeigen.
#
# Wir kรถnnen auch direkt einzelne Eigenschaften eines Datetime-Objekts abfragen.
#
# Taken from https://pandas.pydata.org/pandas-docs/stable/timeseries.html
# | Property | Description |
# |----------|------------|
# | **`.year`** | - The year of the datetime |
# | **`.month`** | - The month of the datetime |
# | **`.day`** | - The days of the datetime |
# | **`.hour`** | - The hour of the datetime |
# | **`.minute`** | - The minutes of the datetime |
# | **`.second`** | - The seconds of the datetime |
# | **`.microsecond`** | - The microseconds of the datetime |
# | **`.nanosecond`** | - The nanoseconds of the datetime |
# | **`.date`** | - Returns datetime.date (does not contain timezone information) |
# | **`.time`** | - Returns datetime.time (does not contain timezone information) |
# | **`.dayofyear`** | - The ordinal day of year |
# | **`.weekofyear`** | - The week ordinal of the year |
# | **`.week`** | - The week ordinal of the year |
# | **`.dayofweek`** | - The number of the day of the week with Monday=0, Sunday=6 |
# | **`.weekday`** | - The number of the day of the week with Monday=0, Sunday=6 |
# | **`.weekday_name`** | - The name of the day in a week (ex: Friday) |
# | **`.quarter`** | - Quarter of the date: Jan-Mar = 1, Apr-Jun = 2, etc. |
# | **`.days_in_month`** | - The number of days in the month of the datetime |
# | **`.is_month_start`** | - Logical indicating if first day of month (defined by frequency) |
# | **`.is_month_end`** | - Logical indicating if last day of month (defined by frequency) |
# | **`.is_quarter_start`** | - Logical indicating if first day of quarter (defined by frequency) |
# | **`.is_quarter_end`** | - Logical indicating if last day of quarter (defined by frequency) |
# | **`.is_year_start`** | - Logical indicating if first day of year (defined by frequency) |
# | **`.is_year_end`** | - Logical indicating if last day of year (defined by frequency) |
# | **`.is_leap_year`** | - Logical indicating if the date belongs to a leap year |
# Das funktioniert dann ganz einfach:
my_date.year
my_date.day
my_date.is_month_start
# **Quiz**:
# In welcher Jahreswoche liegt unser Datum `my_date`?
my_date.weekofyear
# Um was fรผr einen Wochentag handelt es sich (Zahl)?
my_date.dayofweek
# ## Zeitintervalle
# "Timedelta" ist ein spezieller Datentyp, der kein Datum, sondern einen Zeitintervall modelliert.
#
# Wir kรถnnen diesen Datentyp z.B. fรผr Vergleiche zwischen zwei Daten brauchen.
# Die folgenden Intervalle stehen uns dabei zur Verfรผgung:
#
# **`weeks`** - Wochen
#
# **`days`** - Tage
#
# **`hours`** - Stunden
#
# **`minutes`** - Minuten
#
# **`seconds`** - Sekunden
#
# **`microseconds`** - Mikrosekunden
# Ein Intervall erstellen wir mit der Funktion `timedelta()`
d = timedelta(days=2)
d
d = timedelta(hours=1)
d
# Wir kรถnnen die Argumente beliebig kombinieren
d = timedelta(days=3, hours=10, minutes=25, seconds=10)
d
# Wir kรถnnen ein Zeitintervall zu einem Datetime-Objekt addieren oder subtrahieren:
my_date + d
my_date - d
# Ein Timedelta erhalten wir auch, wenn wir die Differenz von zwei Daten bilden:
my_date_1 = pd.to_datetime('1981-08-23', format='%Y-%m-%d')
my_date_2 = pd.to_datetime('1981-08-25', format='%Y-%m-%d')
d = my_date_2 - my_date_1
d
# Die Info erhalten wir wiederum, indem wir die Eigenschaft abfragen:
d.days
# **Quiz:** Wie viele Tage liegen zwischen folgenden Daten?
my_string_1 = '2001/09/11'
my_string_2 = '2016/11/09'
#Antwort
my_date_1 = pd.to_datetime(my_string_1, format='%Y/%m/%d')
my_date_2 = pd.to_datetime(my_string_2, format='%Y/%m/%d')
d = my_date_2 - my_date_1
d.days
# **Quiz:** Ich werde ab dem 1. Januar 2019 um 0:00 Uhr wรคhrend 685648 Sekunden keinen Alkohol trinken. An welchem Datum greife ich wieder zum Glas?
#Antwort
newyear = pd.to_datetime('2019-01-01', format='%Y-%m-%d')
d = timedelta(seconds=685648)
my_date = newyear + d
my_date.strftime(format='%Y-%m-%d')
# ## Hier und Jetzt
# Last but not least: eine Funktion, die uns das aktuelle Datum samt Zeit angibt:
jetzt = datetime.today()
jetzt
# Wir kรถnnen dieses Datum wie jedes andere Datum auch anzeigen:
jetzt.strftime(format='%Y-%m-%d: %H:%M:%S')
# Wir kรถnnen auch damit herumrechnen:
d = timedelta(days=1)
(jetzt - d).strftime(format='%Y-%m-%d: %H:%M:%S')
| 11 Pandas Teil 3/Date+Time Basics L.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/woongjoonchoi/Tutorials/blob/main/Numpy/numpy_stride_tricks.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="QxUpuF2QR0K7"
import numpy as np
# + [markdown] id="1hRVL0q-oO1h"
# Practice from : https://towardsdatascience.com/advanced-numpy-master-stride-tricks-with-25-illustrated-exercises-923a9393ab20#9c37
# + [markdown] id="Um-w5tcMR7aA"
# # 1D exercise
# + [markdown] id="km8OC3F_SBFC"
# 1) slice first 3 elements
# + colab={"base_uri": "https://localhost:8080/"} id="FgyMfjNxR6Vc" outputId="1ec27d84-acec-4ac6-9d8e-79ff0e897c8c"
x = np.asarray(range(1,26) , np.int8).reshape(5,5)
x=np.lib.stride_tricks.as_strided(x,shape=(3,) , strides =(1,))
print(x)
# + [markdown] id="jCBePd6-Jv-f"
# similar
# + colab={"base_uri": "https://localhost:8080/"} id="xtb7-rAvJvjQ" outputId="b321daa6-06aa-44fa-c2eb-872414a00271"
x = np.asarray(range(1,26) , np.int8).reshape(5,5)
print(x[0,:3])
# + [markdown] id="oPbXfAmdSe7Z"
# 2)slice first 8 elements
# + id="xxnFXl_JSeZ6" colab={"base_uri": "https://localhost:8080/"} outputId="ec4929a2-941a-4cdd-c719-98489f7a8750"
x= np.asarray(range(1,26) , np.int8).reshape(5,5)
x= np.lib.stride_tricks.as_strided(x,shape=(8,) , strides=(1,))
print(x)
# + [markdown] id="E0LIafCBJ5S0"
# simliar
#
# + colab={"base_uri": "https://localhost:8080/"} id="7QSJHYAIJ6Ip" outputId="95d2e32d-3c99-41b2-8bb3-e78ca6510d5c"
x= np.asarray(range(1,26) , np.int8).reshape(5,5)
print(x[0,:8])
# + [markdown] id="eqEbv9XglKaI"
# 3)flatten 2-d array
# input item: 2bytes
# + colab={"base_uri": "https://localhost:8080/"} id="p2obvh8LlREL" outputId="10658c96-de0d-4d38-e290-eef2f2d3117c"
x = np.asarray(range(1,26) , np.int16).reshape(5,5)
x=np.lib.stride_tricks.as_strided(x,shape=(25,) ,strides =(2,))
# a=np.array([1,3])
print(x)
# print(a is np.asarray(a))
# + [markdown] id="wJA_BL80KOaj"
# similar
# + colab={"base_uri": "https://localhost:8080/"} id="sb2fmEx5KPPe" outputId="2e015bf7-7c8e-471d-e41f-1602dde3f616"
x = np.asarray(range(1,26) , np.int16).reshape(5,5)
x.ravel()
# + [markdown] id="MxnsrnrQmMYE"
# 4) skipe every other element
# input item : 1bytes
# + colab={"base_uri": "https://localhost:8080/"} id="QcPmCJaUmFvp" outputId="ddccb9cc-3031-4839-bbf0-9bec532c19b2"
x = np.asarray(range(1,26) , np.int8).reshape(5,5)
x = np.lib.stride_tricks.as_strided(x,shape=(3,),strides=(2,))
print(x)
# + [markdown] id="TFRkURMTmleV"
# 5)slice first column
# input item : 8 bytes
# + colab={"base_uri": "https://localhost:8080/"} id="gZoAhV_XmkYb" outputId="bc7b3182-02f9-428f-b6ce-73976d314652"
x = np.asarray(range(1,26) , np.int64).reshape(5,5)
x=np.lib.stride_tricks.as_strided(x,shape=(5,),strides =(40,) )
print(x)
# + [markdown] id="oBg97SYnKXEq"
# similar
# + colab={"base_uri": "https://localhost:8080/"} id="mbPFCMCTKXpP" outputId="7240d4d4-0833-4a44-c6a5-14a8f703b438"
x = np.asarray(range(1,26) , np.int64).reshape(5,5)
x[0,::2]
# + [markdown] id="CK6W8i4dnmqX"
# 6)diagonal element
# input item : 8 bytes
# + colab={"base_uri": "https://localhost:8080/"} id="XJFWta5enlPt" outputId="2edd7164-ae64-4154-cb27-386cb0f97484"
x= np.asarray(range(1,26) , np.int64).reshape(5,5)
x=np.lib.stride_tricks.as_strided(x,shape=(5,),strides=(48,))
print(x)
# + [markdown] id="bOugQ7YYKjBv"
# similar
# + colab={"base_uri": "https://localhost:8080/"} id="znWBEPH9KjZ0" outputId="0060dbd3-658d-431b-ae5a-2a7f3bb8886e"
x= np.asarray(range(1,26) , np.int64).reshape(5,5)
x.diagonal()
# + [markdown] id="R1v77iPgn6io"
# 7)repeat element
# input item : 8 bytes
# + colab={"base_uri": "https://localhost:8080/"} id="UHi-jbAbn6BW" outputId="e585a22b-46f2-4838-eede-c41cb5ea8b9c"
x=np.asarray(range(1,26) , np.int64 ).reshape(5,5)
x=np.lib.stride_tricks.as_strided(x , shape=(5,) , strides=(0,))
print(x)
# + [markdown] id="aFEaVxd8Kp_9"
# simliar
# + id="Fz8fc508KpgB"
# + [markdown] id="4EPxLtejoMnH"
# # 2D -exercise
# + [markdown] id="lX9fJ0cNIKma"
# 8) simple 2d slicing
# + colab={"base_uri": "https://localhost:8080/"} id="z9-vTwEsoVYV" outputId="6913b1bd-8e11-45ff-db13-73c62073fd5c"
x=np.asarray(range(1,26) , np.int64).reshape(5,5)
x=np.lib.stride_tricks.as_strided(x,shape=(4,3) , strides=(40,8))
print(x)
# + [markdown] id="laOLSEAsoOCp"
# 9) Slice a zigzag
#
# Input itemsize: 8 bytes | Input strides: (40, 8)
# + colab={"base_uri": "https://localhost:8080/"} id="YceaUWXtIQ8b" outputId="a8553b84-be26-4fdd-e2fe-a157c301d8d7"
x = np.asarray(range(1,26) , np.int64).reshape(5,5)
x = np.lib.stride_tricks.as_strided(x,shape=(4,2),strides=(48,8))
print(x)
# + [markdown] id="qGII7b43JHee"
# 10 ) sparse strding
# input itemsize : 8 bytes
# + colab={"base_uri": "https://localhost:8080/"} id="FsHcdwcxJExp" outputId="ebe2e290-858c-49db-fe2f-64d1b31d6a58"
x = np.asarray(range(1,26) , np.int64 ).reshape(5,5)
x = np.lib.stride_tricks.as_strided(x, shape = (3,3) , strides = (80,16))
print(x)
# + [markdown] id="THjN_3txK_vb"
# similar
#
# + colab={"base_uri": "https://localhost:8080/"} id="w3bryvtkLAY9" outputId="20263b3e-4dd9-40d2-e5b6-eaadb42957f1"
x = np.asarray(range(1,26) , np.int64).reshape(5,5)
x[::2,::2]
# + [markdown] id="4OzNxWUVLHnE"
# 11 ) Transpose a 2d array
# input item size : 1 byte
# + colab={"base_uri": "https://localhost:8080/"} id="4DZI7mEOLFuC" outputId="4b4b2ee7-0c8f-4d6d-e911-1898fd3142f3"
x = np.asarray(range(1,26) , np.int8).reshape(5,5)
np.lib.stride_tricks.as_strided(x,shape=(3,3),strides =(1,5) )
# + [markdown] id="aHmzADVRLnbr"
# similar
# + colab={"base_uri": "https://localhost:8080/"} id="RgiP6wPALmnx" outputId="eff414aa-13d8-48a8-ba0d-2853e4a46304"
x = np.asarray(range(1,26) , np.int8).reshape(5,5)
x[:3,:3].T
# + [markdown] id="xPyzK_KIMD7x"
# 12 ) repeat first column 4 times
# input item size : 4 bytes
# + colab={"base_uri": "https://localhost:8080/"} id="qVeifIX0Lvih" outputId="a1512938-1880-4ee0-8ce6-d0ee37c852be"
x = np.asarray(range(1,26) , np.int32).reshape(5,5)
np.lib.stride_tricks.as_strided(x,shape=(5,4),strides=(20,0))
# + [markdown] id="XyZgZHK5MgQo"
# similar
# + colab={"base_uri": "https://localhost:8080/"} id="CEfQDwgsMg2B" outputId="5a14c06a-85f0-4ff7-bef4-3ce39575195f"
x = np.asarray(range(1,26) , np.int32).reshape(5,5)
np.broadcast_to(x[:,0,None], (5,4))
# + [markdown] id="hAsZPqFyMuN_"
# 13 ) reshape 1d array
# nput itemsize: 8 bytes | Input strides: (8,)
# + id="3IAf5vN1Mtpz" colab={"base_uri": "https://localhost:8080/"} outputId="cb136ef1-d00d-46a7-956f-a6d46aac5a3c"
x = np.asarray(range(1,13) , np.int64 )
np.lib.stride_tricks.as_strided(x,shape=(4,3) , strides = (24,8) )
# + [markdown] id="ttLIZcyN-rZK"
# similar
# + colab={"base_uri": "https://localhost:8080/"} id="R5569h3I-rAH" outputId="721fe6e0-a4c5-4b3b-d960-5aa2da94cabb"
x.reshape(4,3)
# + [markdown] id="HuMD7BAg-wAD"
# 14) Slide a 1D window
#
# Input itemsize: 1 byte | Input strides: (1,
# + colab={"base_uri": "https://localhost:8080/"} id="a47kB7tP-tiW" outputId="d376fe22-63a0-4291-a4e3-05c8ea9b8947"
x = np.asarray(range(1,11) , np.int8)
np.lib.stride_tricks.as_strided(x , shape = (8,3) , strides = (1,1))
# + [markdown] id="XjMfmZC0_Ktj"
# similar
# + id="_n87_3L__KGk"
# ์ด๊ฒ์ ํ ์ ์๋ method๊ฐ ์๋ค.
# + [markdown] id="4DIsFTLB_V_u"
# 15 ) Slide a 2D window then flatten
#
# nput itemsize: 1 byte | Input strides: (2, 1
#
# + colab={"base_uri": "https://localhost:8080/"} id="zS7myM8f_caE" outputId="97e16b38-97ae-4b73-b6f1-6cd905b3860d"
x = np.asarray( [0,1,10,11,20,21,30,31,40,41,50,51],np.int8).reshape(6,2)
np.lib.stride_tricks.as_strided(x , shape=(4,6) , strides = (2,1))
# + [markdown] id="CYoB9VxBAGYE"
# similar
# + id="gp1gV_FYAGyJ"
# ์ด๊ฒ์ ๋น์ทํ๊ฒ ํ ์๋ ์๋ค.
# + [markdown] id="j1uBCZb2AJC7"
# 16 ) collapse an axis from an 3-D array
# Input itemsize: 1 byte |
# + colab={"base_uri": "https://localhost:8080/"} id="uWDsJCBIATZk" outputId="3ea39ba8-7402-4021-b590-c393159e0c4e"
x = np.asarray(range(1,13),np.int8).reshape(3,2,2)
np.lib.stride_tricks.as_strided(x, shape = (3,4) , strides = (4,1))
# + [markdown] id="Nlj9DbGyAuim"
# similar
# + colab={"base_uri": "https://localhost:8080/"} id="VH5ShCahAvFy" outputId="b1b9c97c-ecda-4feb-c6f3-eca678b3cd42"
x.reshape(3,4)
# + [markdown] id="idXSXalMAxGp"
# # 3D exercises
# + [markdown] id="uS2MxcN5A164"
# 17) 2 cornsers
# Input itemsize: 2 bytes
# + colab={"base_uri": "https://localhost:8080/"} id="kEYU2FN3AwqK" outputId="c1b6a570-b3e7-40c2-eff1-f02317df588b"
x = np.asarray(range(1,26) , np.int16).reshape(5,5)
np.lib.stride_tricks.as_strided(x , shape = (2,2,2) ,strides = (30,10,2))
# + [markdown] id="LjfLqBPeBavv"
# similar
# + id="YpWw5hG5BbON"
# this may not be achieved concisely
# + [markdown] id="LE0uui9MBcVA"
# 18 ) Straggered Slicing
# Inputitem 1bytes
# + colab={"base_uri": "https://localhost:8080/"} id="cnh54rdhBisF" outputId="45dcfbc7-824d-4de9-cc9c-cecb98b8437d"
x = np.asarray(range(1,26) , np.int8).reshape(5,5)
np.lib.stride_tricks.as_strided(x , shape=(2,2,3 ), strides = (10,6,1))
# + [markdown] id="VAON1Y60CCwr"
# similar
# + id="PueSEfggCCWH"
# method๋ก ๋น์ทํ๊ฒ ์๋ค.
# + [markdown] id="wxGAWFBACHZu"
# 19 ) Repeat a 2D array
# input item 2bytes
#
# + colab={"base_uri": "https://localhost:8080/"} id="FGNv_H0qCG9Y" outputId="d18016b5-99ba-4430-d87b-e47f2d6c399f"
x = np.asarray(range(1,26) , np.int16).reshape(5,5)
np.lib.stride_tricks.as_strided(x , shape=(3,2,4) , strides = (0,10,2))
# + [markdown] id="eoQvEzTkC-Ys"
# similar
# + id="260OE3_MC_B_"
# method๋ก ๋น์ทํ๊ฒ ์๋ค.
# + [markdown] id="pJOnY-aCDAa5"
# 20 ) 3D transpose
# Input item 4bytes
# + colab={"base_uri": "https://localhost:8080/"} id="YKFkJ0MtDE_X" outputId="3dd8974a-26de-4d26-cbcc-8248a38b7e16"
x = np.asarray(range(1,13) , np.int32).reshape(3,2,2)
np.lib.stride_tricks.as_strided(x , shape=(3,2,2) , strides = (16,4,8))
# + [markdown] id="dJzYwB99FQeD"
# similar
# + colab={"base_uri": "https://localhost:8080/"} id="YI3dAzX6FQ3b" outputId="905c4397-a5cb-4b6e-9a72-0d6cf5d39ef0"
np.swapaxes(x,1,2)
# + [markdown] id="dtd1F6AwFeWc"
# 21) slide 2d array
# Input item ; 8bytes
# + colab={"base_uri": "https://localhost:8080/"} id="34G6gugKFhH9" outputId="0fc8b1ba-4d37-4d7f-d5c4-d1e3f79fa704"
x = np.asarray(range(1,21) , np.int64).reshape(4,5)
np.lib.stride_tricks.as_strided(x, shape = (3,2,5) , strides = (40,40,8))
# + [markdown] id="LmNnETLMF_hC"
# similar
# + id="-aQtOKpwGHXI"
# method๋ก ํ ์ ์๋ค.
# + [markdown] id="6stldHByGKEo"
# 22 ) Reshape 1 d array to 3d
# input item : 1bytes
# + colab={"base_uri": "https://localhost:8080/"} id="oED8MkMuGI5l" outputId="b06905e9-5255-45f1-97cf-d9f011f7008d"
x = np.asarray(range(1,13) , np.int8)
np.lib.stride_tricks.as_strided(x,shape=(2,2,3) , strides = ( 6,3,1))
# + [markdown] id="-Wc8Khp2GmHx"
# similar
# + colab={"base_uri": "https://localhost:8080/"} id="paKp0e8XF5Ot" outputId="a72bc1c3-60bc-47d4-d687-aaaee555d380"
x.reshape(2,2,3)
# + [markdown] id="gxpJCva3GqqT"
# # 4D exrcises
# + [markdown] id="jhKMLlx4GujD"
# 23 ) Slide a 2D respective field for convolutions
# input item : 1bytes
# + colab={"base_uri": "https://localhost:8080/"} id="HxmJkEK2Gz1q" outputId="337d85e8-e656-4a5a-d66b-51baafdeb502"
x = np.asarray(range(1,26) , np.int8)
np.lib.stride_tricks.as_strided(x, shape =(2,2,3,3) , strides =(10,2,5,1))
# + [markdown] id="gwD3NTI-H4Ve"
# similar
# + id="_5qPDLz3H5L7"
# no simliar method
# + [markdown] id="ox6_IuTeH6qt"
# 24) Repeat a 3D tensor
#
# input item : 8 bytes
# + colab={"base_uri": "https://localhost:8080/"} id="YFJsU-4fH6Qe" outputId="c82b7756-3d50-4a7f-b88b-dbfd243da68c"
x = np.asarray(range(1,13) , np.int64).reshape(2,2,3)
np.lib.stride_tricks.as_strided(x, shape =(2,2,2,3) , strides = (48,0 ,24,8))
# + [markdown] id="nzPo63wMIptz"
# similar
# + id="I0lITXNCIqfC"
# no similar method
# + [markdown] id="lWrJHH6OIrqE"
# 25 ) Reshape 1D array to 4D array
# input item : 8 bytes
# + colab={"base_uri": "https://localhost:8080/"} id="3Hzb4qy-IviH" outputId="3f1d49ac-71e5-4650-e4a2-3aa4189cb407"
x = np.asarray(range(1,16) , np.int64)
np.lib.stride_tricks.as_strided(x, shape = (2,2,2,2) , strides = (64,32,16,8))
| Numpy/numpy_stride_tricks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutorial 2 - Analyzing Taxi Time Series Data with Time Series Chains
#
# This example utilizes the main takeways from the research papers: [Matrix Profile VII](https://www.cs.ucr.edu/~eamonn/chains_ICDM.pdf).
#
# We will be looking at data from taxi passengers in NYC and will be seeing if we can find any chains within the time series to find trends.
# ## Getting Started
#
# Let's import the packages that we'll need to load, analyze, and plot the data.
import pandas as pd
import stumpy
import numpy as np
# ## Loading Some Data
#
# First, we'll download historical data that represents the half-hourly average of the number of NYC taxi passengers over 75 days in the Fall of 2014.
#
#
# We extract that data and insert it into a pandas dataframe, making sure the timestamps are stored as *datetime* objects and the values are of type *float64*.
df = pd.read_csv("https://raw.githubusercontent.com/stanford-futuredata/ASAP/master/Taxi.csv", sep=',')
df['value'] = df['value'].astype(np.float64)
df['timestamp'] = pd.to_datetime(df['timestamp'])
df.head()
m = 48
stump_results = stumpy.stump(df['value'].values, m=m)
out_df = pd.DataFrame(stump_results, columns=['mp', 'inx', 'left', 'right'])
out_df.head()
S, C = stumpy.allc(out_df['left'].values, out_df['right'].values)
C
# ## Resources
#
# [Matrix Profile VII](https://www.cs.ucr.edu/~eamonn/chains_ICDM.pdf)
| notebooks/Tutorial_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow, keras
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.image as mpimg
import csv
import cv2
def get_answer_list(file_name, screen_num):
axis_list = list()
with open(file_name, 'r') as csv_file:
reader = csv.DictReader(csv_file, delimiter='\t')
for row in reader:
if int(row['screen_num']) == screen_num:
coordinate = [int(row['x1']), int(row['y1']),\
int(row['x2']), int(row['y2'])]
axis_list.append(coordinate)
return axis_list
#์ด๋ฏธ์ง ๋น์จ ์กฐ์
def rescale_img(img, scale_percent):
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
reimg = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
return reimg
# ์ด๋ฏธ์ง ๋ฐ์ดํฐ ์ฝ๊ธฐ
def show_img(img):
img_resized=rescale_img(img, 30)
cv2.imshow('mser', img_resized)
cv2.waitKey(0)
cv2.destroyAllWindows()
test_img_url = '/home/kimsoohyun/00-Research/02-Graph/02-image_detection/04-clickable/dataset/00-screenshot/kr.co.company.hwahae/3.png'
answer_url = '/home/kimsoohyun/00-Research/02-Graph/02-image_detection/04-clickable/dataset/01-csv/clickable/kr.co.company.hwahae.csv'
answer_list = get_answer_list(answer_url, 3)
img = cv2.imread(test_img_url)
print(len(answer_list))
# Shi-Tomasi Corner Detection
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray,1000,0.01,10)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(img,(x,y),3,(255,0,0),10)
# +
# Canny Detection
def canny_detection(img):
axis_dict = dict()
gray_vis = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
clahe = cv2.createCLAHE(clipLimit=8.0, tileGridSize=(16,16))
cl1 = clahe.apply(gray_vis)
edges = cv2.Canny(cl1, 120, 125)
#print("[EDGES] ", edges.shape)
arr = np.array(edges)
result = np.where(arr==np.amax(arr))
# print("[RESULT]", result)
result_arr = list(zip(result[0], result[1]))
#print(result_arr)
for index, axis in result_arr:
if index not in axis_dict.keys():
axis_dict.update({index:list()})
axis_dict[index].append(axis)
return axis_dict
result = canny_detection(img)
for key in result.keys():
for element in result[key]:
cv2.circle(img, (element, key), \
1, (255,0,0), 3)
show_img(img)
# -
# Harris Corner
def harris_corner(img):
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# find Harris corners
gray = np.float32(gray)
dst = cv2.cornerHarris(gray,2,3,0.04)
dst = cv2.dilate(dst,None)
ret, dst = cv2.threshold(dst,0.01*dst.max(),255,0)
dst = np.uint8(dst)
# find centroids
ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)
# define the criteria to stop and refine the corners
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners = cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria)
# Now draw them
res = np.hstack((centroids,corners))
res = np.int0(res)
print(res)
img[res[:,1],res[:,0]]=[0,0,255]
img[res[:,3],res[:,2]] = [0,255,0]
return res
img = harris_corner(img)
# +
#MSER Algorithm
def mser_algo(img):
mser = cv2.MSER_create()
gray_vis = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#clahe = cv2.createCLAHE(clipLimit=8.0, tileGridSize=(16,16))
#cl1 = clahe.apply(clahe)
regions, _ = mser.detectRegions(gray_vis)
hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
return hulls
#hulls = mser_algo(img)
#for i, contour in enumerate(hulls):
# x, y, w, h = cv2.boundingRect(contour)
# cv2.rectangle(img, (x, y+h), (x+w, y), (255, 0, 0), 3)
#cv2.polylines(img, hulls, 1, (255, 0, 0))
# +
#contouring
gray_vis = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#ret,thresh = cv2.threshold(gray_vis,127,255,0)
clahe = cv2.createCLAHE(clipLimit=8.0, tileGridSize=(16,16))
cl1 = clahe.apply(gray_vis)
edges = cv2.Canny(cl1, 120, 125)
arr = np.array(edges)
result = np.where(arr==np.amax(arr))
contours, hierarchy = cv2.findContours(edges,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#img = cv2.drawContours(img, contours, -1, (0, 255, 0), 3)
cnt = contours[0]
for cnt in contours:
area = cv2.contourArea(cnt)
x, y, w, h = cv2.boundingRect(cnt)
rect_area = w*h
extend = float(area) / rect_area
cv2.rectangle(img, (x, y), (x+w+int(extend), y+h+int(extend)), (255, 0, 0), 3)
print(np.array(contours))
# -
# ์ ๋ต ๋ฐ์ดํฐ ๊ทธ๋ฆฌ๊ธฐ
answer_list
for axis in answer_list:
cv2.rectangle(img, (axis[0], axis[1]), (axis[2], axis[3]), (0, 0, 255), 3)
# +
#get Orientation and magnitude
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
print(img.shape)
X = cv2.Sobel(gray, cv2.CV_32F, 1, 0)
Y = cv2.Sobel(gray, cv2.CV_32F, 0, 1)
orien = cv2.phase(X, Y, angleInDegrees=True)
mag = cv2.magnitude(X, Y)
#for i, contour in enumerate(hulls):
#x, y, w, h = cv2.boundingRect(contour)
# cv2.rectangle(img, (x, y+h), (x+w, y), (255, 0, 0), 3)
show_img(mag)
# -
nonzero_orien = np.nonzero(orien)
nonzero_orien_loc = zip(nonzero_orien[0], nonzero_orien[1])
#for x,y in nonzero_orien_loc:
# print(x, y)
nonzero_mag = np.nonzero(mag)
nonzero_mag_loc = zip(nonzero_mag[0], nonzero_mag[1])
for x,y in nonzero_mag_loc:
print(x, y)
import pandas as pd
df = pd.DataFrame(data = mag)
df.head(50)
# +
#Edge Detection์ ํตํด์ ๋ฒํผ ์์๋ค ๋ชจ๋ ์๋ผ๋ด๊ธฐ
'''
1. ์ด๋ฏธ์ง ์ฝ์ด๋ค์ด๊ธฐ ์ํ ์ด๋ฏธ์ง: abysrium pole
2. Canny Detection
3. ์ขํ๋ฆฌ์คํธ ์ ์ฅ
4. ์ขํ๋ฆฌ์คํธ ์ฝ์ด ์ด๋ฏธ์ง crop๋ฆฌ์คํธ ์ ์ฅ
'''
test_img_url = '/home/kimsoohyun/00-Research/02-Graph/02-image_detection/04-clickable/dataset/00-screenshot/kr.co.company.hwahae/3.png'
img = cv2.imread(test_img_url)
print(img.shape)
result = canny_detection(img)
#cv2.rectangle(img, (result[0][0], result[0][1]), \
# (result[0][2], result[0][3]), \
# (255,0,0), 10)
#for key in result.keys():
# if len(result[key]) < 4:
# print(key, len(result[key]))
#if cv2.waitKey(1) & 0XFF == ord('q'):
# cv2.destroyAllWindows()
#print(result)
for key in result.keys():
for element in result[key]:
cv2.circle(img, (element, key), \
1, (255,0,0), 3)
print(result.keys())
#plt.imshow(img)
# +
test_img_url = '/home/kimsoohyun/Downloads/Screenshot_20200418-220702_Abyssrium Pole.jpg'
img = cv2.imread(test_img_url)
img_gray = cv2.imread(test_img_url, cv2.COLOR_BGR2GRAY)
mser = cv2.MSER_create()
regions, _ = mser.detectRegions(img_gray)
img_clone = img.copy()
for r in regions:
(x,y,w,h) = cv2.boundingRect(np.reshape(r,(-1,1,2)))
cv2.rectangle(img_clone,(x,y),(x+w,y+h),(0,0,255),1)
print(img.shape)
#plt.imshow(img_clone)
cv2.imshow('mser', img_clone)
cv2.waitKey(0)
cv2.destroyAllWindows()
# !ls dataset/03-models/model
#๋ชจ๋ธ export
model = tensorflow.keras.models.load_model('./dataset/03-models/model/my_model.h5')
# +
# CNN predict
# +
# ํด๋ฆญ์์๋ฅผ ์ฐพ์์ ์ด๋ฏธ์ง์ ํํํ๊ธฐ
| 02-image_detection/04-clickable/ipynb/PredictModel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **.handle_comment(data)**
#
# This method is called when a comment is encountered (e.g. <!--comment-->).
# The data argument is the content inside the comment tag:
#
# from HTMLParser import HTMLParser
#
# class MyHTMLParser(HTMLParser):
# def handle_comment(self, data):
# print "Comment :", data
#
#
# **.handle_data(data)**
#
# This method is called to process arbitrary data (e.g. text nodes and the content of <script>...</script> and <style>...</style>).
#
# The data argument is the text content of HTML.
#
# from HTMLParser import HTMLParser
#
# class MyHTMLParser(HTMLParser):
# def handle_data(self, data):
# print "Data :", data
# +
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser):
def handle_comment(self, comment):
if '\n' in comment:
print('>>> Multi-line Comment')
else:
print(' Single-line Comment')
print(comment)
def handle_data(self, data):
if data == '\n': return
print('>>> Data')
print(data)
html = ""
for _ in range(int(input())):
html += input().rstrip()
html += '\n'
parser = MyHTMLParser()
parser.feed(html)
parser.close()
| Python/13. regex and parsing/84. html parser part 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# cd /mnt/sda/otani/Experiments/moment_retrieval
# %matplotlib inline
import pandas as pd
import numpy as np
import spacy
import matplotlib.pyplot as plt
from src.toolbox.utils import _nms
from src.toolbox.eval import evaluate, accumulate_metrics
from src.toolbox.utils import _nms
from src.toolbox.visualization import plot_performance_per_class, plot_performance_per_duration
from src.toolbox.data_converters import CharadesSTA2Instances
import pickle as pkl
import neptune
import os
project = neptune.init("mayu-ot/moment-retrieval")
exp_id = "MOM-52"
if not os.path.exists(f"tmp/{exp_id}/TAN_vgg_rgb_test.pkl"):
exps = project.get_experiments(id=exp_id)
print("downloading results ...")
exps[0].download_artifact("TAN_vgg_rgb_test.pkl", f"tmp/{exp_id}")
print("done!")
# load ground truth testset
test_data = CharadesSTA2Instances(pd.read_csv('data/processed/test.csv'))
# evaluate R@K (IoU>m)
predictions = pkl.load(open(f"tmp/{exp_id}/TAN_vgg_rgb_test.pkl", "rb"))
preds = []
for p in predictions:
query = (p[0][0], p[0][1]+".") # 2D-TAN remove '.' in description. Add to recover original input description.
seg = p[1]
keep = _nms(np.asarray(seg), np.arange(len(seg))[::-1], thresh=0.5, top_k=5)
seg = [seg[i] for i in keep]
preds.append((query, seg, np.arange(len(seg))[::-1].tolist()))
results = evaluate(test_data, preds)
summary = accumulate_metrics(results)
summary
# Check relation between success rates and iput video durations
plt.rcParams.update({'font.size': 14})
fig = plot_performance_per_duration(results, test_data, ax=plt.gca())
# +
# per-action performance
from src.toolbox.eval import get_first_action, categorize_results, summarize_results_per_class
from src.toolbox.utils import _load_top_actions
top_actions = _load_top_actions("charade")
cat_fn = lambda x: get_first_action(x, top_actions)
categorized_results = categorize_results(results, cat_fn)
metrics_per_cls = summarize_results_per_class(categorized_results)
class_labels = list(metrics_per_cls.keys())
frequent_class = [label for label in class_labels if metrics_per_cls[label]["n_instance"] > 10]
_ = plot_performance_per_class({l: v for l, v in metrics_per_cls.items() if l in frequent_class})
| notebooks/report/2DTAN_CharadeSTA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the MIT License.
# # Data Preparation
# ---
#
# This notebook creates simulated solar and energy consumption from a real-home dataset in Australia to walk you through the process of training many models and forecasting on Azure Machine Learning.
#
# This notebook walks you through all the necessary steps to configure the data for this solution accelerator, including:
#
# 1. Generate the sample data
# 2. Split in training/forecasting sets
# 3. Connect to your workspace and upload the data to its Datastore
#
# ### Prerequisites
# If you have already run the [00_Setup_AML_Workspace](00_Setup_AML_Workspace.ipynb) notebook you are all set.
#
# ## 1.0 Generate sample data
#
# The creation of the synthetic datasets are as follows
#
# 1. We will create datasets for 5 suburbs; each suburb will have 10 homes each - total of 50 models to build/train upon
# 2. For each home; we will create shift the Solar/Temp/General Usage values with a random normal variable with the means/std of the original dataset
# 3. This would produce 50 separate files in a folder;
# + tags=[]
import os
import pandas as pd
import numpy as np
np.random.seed(0)
# +
# Read the raw dataset
df = pd.read_csv("data/sampleEnergy.csv")
# Split reference data into solar and general usage and join
general_usage_df = df[df["RateTypeDescription"] == "Generalusage"][["EndDate", "ProfileReadValue"]].reset_index(drop=True)
solar_df = df[df["RateTypeDescription"] == "Solar"][["EndDate", "ProfileReadValue", "DeviceNumber", "QualityFlag", "BOMTEMP"]].reset_index(drop=True)
pd.concat([general_usage_df, solar_df], axis=1, join="inner")
# process reference dataset
processed_df = solar_df.copy()
processed_df["Generalusage"] = general_usage_df["ProfileReadValue"]
processed_df["Solar"] = solar_df["ProfileReadValue"]
processed_df["Temp"] = solar_df["BOMTEMP"]
processed_df["NetEnergy"] = processed_df["Solar"] - processed_df["Generalusage"]
processed_df["EndDate"] = pd.to_datetime(processed_df["EndDate"], format="%d/%m/%Y %H:%M")
processed_df.drop(["ProfileReadValue", "BOMTEMP"], axis=1, inplace=True)
# Create time-based features
processed_df["Quarter"] = processed_df["EndDate"].dt.quarter
processed_df["Month"] = processed_df["EndDate"].dt.month
processed_df["Weekday"] = processed_df["EndDate"].dt.weekday
processed_df["Hour"] = processed_df["EndDate"].dt.hour
processed_df["WeekOfTheMonth"] = processed_df["EndDate"].dt.week
processed_df["Weekend"] = (processed_df["Weekday"] >= 5).astype(np.int)
processed_df["DateOfMonth"] = ((processed_df["EndDate"].dt.day // 7) + 1)
processed_df["AMPM"] = (processed_df["EndDate"].dt.hour>11).astype(np.int)
# +
# Create folder to write data
folder_path = "data/synthetic/"
os.makedirs(folder_path, exist_ok=True)
# Create simulated suburbs and homes
suburbs = ["Manly", "Bondi", "StKildas", "AlbertPark", "NorthBridge"]
homes = list(range(1, 11, 1))
# Calculate mean / std of changes for Generalusage, Solar, Temp to augment baseline data
generalusage_mean, generalusage_std = processed_df["Generalusage"].diff().mean(), processed_df["Generalusage"].diff().std()
solar_mean, solar_std = processed_df["Solar"].diff().mean(), processed_df["Solar"].diff().std()
temp_mean, temp_std = processed_df["Temp"].diff().mean(), processed_df["Temp"].diff().std()
# Generate synthetic data
for suburb_idx, suburb in enumerate(suburbs):
temp_delta = np.random.normal(temp_mean, temp_std, processed_df.shape[0])
for home_idx, home in enumerate(homes):
suburb_name = suburb
home_name = f"home{home}"
suburb_home_df = processed_df.copy()
suburb_home_df["DeviceNumber"] += suburb_idx + home_idx
suburb_home_df["Temp"] += temp_delta
suburb_home_df["Generalusage"] += temp_delta
suburb_home_df["Solar"] += temp_delta
suburb_home_df.loc[suburb_home_df["Generalusage"] < 0, "Generalusage"] = 0
suburb_home_df.loc[suburb_home_df["Solar"] < 0, "Solar"] = 0
# suburb_home_df["NetEnergy"] = suburb_home_df["Solar"] - suburb_home_df["Generalusage"]
suburb_home_df["Suburb"] = suburb_name
suburb_home_df["Home"] = home_name
print(f"Writing synthetic data for {suburb_name} {home_name}")
suburb_home_df.to_csv(folder_path + f"{suburb_name}_{home_name}.csv", index=False)
# -
# ## 2.0 Split data in two sets
#
# We will now split each dataset in two parts: one will be used for training, and the other will be used for simulating batch forecasting. The training files will contain the data records before '2020-06-01' and the last part of each series will be stored in the inferencing files.
#
# Finally, we will upload both sets of data files to the Workspace's default [Datastore](https://docs.microsoft.compython/api/azureml-core/azureml.core.datastore(class)).
# +
from scripts.helper import split_data
# Split each file and store in corresponding directory
train_path, inference_path = split_data(folder_path, 'EndDate', '2020-06-01')
# -
# ## 3.0 Upload data to Datastore in AML Workspace
#
# In the [setup notebook](00_Setup_AML_Workspace.ipynb) you created a [Workspace](https://docs.microsoft.com/python/api/azureml-core/azureml.core.workspace.workspace). We are going to register the data in that enviroment.
# +
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
# Take a look at Workspace
ws.get_details()
# -
# We will create a new Datastore and upload data into that data store. Feel free to read the [Datastore](https://docs.microsoft.com/azure/machine-learning/how-to-access-data) documentation. Please create a container before running the code below. AzureML Datastore functions DOES NOT create a container for you; it merely registers the datastore to be used later.
#
# A Datastore is a place where data can be stored that is then made accessible for training or forecasting. Please refer to [Datastore documentation](https://docs.microsoft.com/python/api/azureml-core/azureml.core.datastore(class)) on how to access data from Datastore.
# +
## Create a new datastore
from azureml.core import Datastore
blob_datastore_name='energy' # Name of the datastore to workspace
container_name=os.getenv("BLOB_CONTAINER", "energytest") # Name of Azure blob container
account_name=os.getenv("BLOB_ACCOUNTNAME", "<StorageAccountName>") # Storage account name
account_key=os.getenv("BLOB_ACCOUNT_KEY", "<storageaccountkey>") # Storage account access key
blob_datastore = Datastore.register_azure_blob_container(workspace=ws,
datastore_name=blob_datastore_name,
container_name=container_name,
account_name=account_name,
account_key=account_key)
# -
# If you'd like to use AzureData Lake as a Datastore; here is a code example below. Please note that you'd need to create a ServicePrinicpal to access the data on the Datastore
# +
adlsgen2_datastore_name = 'adlsgen2datastore'
subscription_id=os.getenv("ADL_SUBSCRIPTION", "<my_subscription_id>") # subscription id of ADLS account
resource_group=os.getenv("ADL_RESOURCE_GROUP", "<my_resource_group>") # resource group of ADLS account
account_name=os.getenv("ADLSGEN2_ACCOUNTNAME", "<my_account_name>") # ADLS Gen2 account name
tenant_id=os.getenv("ADLSGEN2_TENANT", "<my_tenant_id>") # tenant id of service principal
client_id=os.getenv("ADLSGEN2_CLIENTID", "<my_client_id>") # client id of service principal
client_secret=os.getenv("ADLSGEN2_CLIENT_SECRET", "<my_client_secret>") # the secret of service principal
adlsgen2_datastore = Datastore.register_azure_data_lake_gen2(workspace=ws,
datastore_name=adlsgen2_datastore_name,
account_name=account_name, # ADLS Gen2 account name
filesystem='test', # ADLS Gen2 filesystem
tenant_id=tenant_id, # tenant id of service principal
client_id=client_id, # client id of service principal
client_secret=client_secret) # the secret of service principal
# +
# Connect to default datastore
# datastore = ws.get_default_datastore()
#or connect to the external blob_store
datastore = blob_datastore
target_path = 'energy'
# Upload train data
ds_train_path = target_path + '_train'
datastore.upload(src_dir=train_path, target_path=ds_train_path, overwrite=True)
# Upload inference data
ds_inference_path = target_path + '_inference'
datastore.upload(src_dir=inference_path, target_path=ds_inference_path, overwrite=True)
# -
# ### *[Optional]* If data is already in Azure: create Datastore from it
#
#
#
# <div style="color:red">
# If your data is already in Azure you don't need to upload it from your local machine to the default datastore. Instead, you can create a new Datastore that references that set of data.
# The following is an example of how to set up a Datastore from a container in Blob storage where the sample data is located.
#
# In this case, the orange juice data is available in a public blob container, defined by the information below. In your case, you'll need to specify the account credentials as well. For more information check [the documentation](https://docs.microsoft.com/python/api/azureml-core/azureml.core.datastore.datastore#register-azure-blob-container-workspace--datastore-name--container-name--account-name--sas-token-none--account-key-none--protocol-none--endpoint-none--overwrite-false--create-if-not-exists-false--skip-validation-false--blob-cache-timeout-none--grant-workspace-access-false--subscription-id-none--resource-group-none-).
# </div>
# + tags=["automl"]
# blob_datastore_name = "automl_many_models"
# container_name = "automl-sample-notebook-data"
# account_name = "automlsamplenotebookdata"
# + tags=["automl"]
# from azureml.core import Datastore
# datastore = Datastore.register_azure_blob_container(
# workspace=ws,
# datastore_name=blob_datastore_name,
# container_name=container_name,
# account_name=account_name,
# create_if_not_exists=True
# )
# if 0 < dataset_maxfiles < 11973:
# ds_train_path = 'oj_data_small/'
# ds_inference_path = 'oj_inference_small/'
# else:
# ds_train_path = 'oj_data/'
# ds_inference_path = 'oj_inference/'
# -
# ## 4.0 Register dataset in AML Workspace
#
# The last step is creating and registering [datasets](https://docs.microsoft.com/azure/machine-learning/concept-data#datasets) in Azure Machine Learning for the train and inference sets.
#
# Using a [FileDataset](https://docs.microsoft.com/python/api/azureml-core/azureml.data.file_dataset.filedataset) is currently the best way to take advantage of the many models pattern, so we create FileDatasets in the next cell. We then [register](https://docs.microsoft.com/azure/machine-learning/how-to-create-register-datasets#register-datasets) the FileDatasets in your Workspace; this associates the train/inference sets with simple names that can be easily referred to later on when we train models and produce forecasts.
# +
from azureml.core.dataset import Dataset
# Create file datasets
ds_train = Dataset.File.from_files(path=datastore.path(ds_train_path), validate=False)
ds_inference = Dataset.File.from_files(path=datastore.path(ds_inference_path), validate=False)
# Register the file datasets
dataset_name = 'energy50'
train_dataset_name = dataset_name + '_train'
inference_dataset_name = dataset_name + '_inference'
ds_train.register(ws, train_dataset_name, create_new_version=True)
ds_inference.register(ws, inference_dataset_name, create_new_version=True)
# -
# ## 5.0 *[Optional]* Interact with the registered dataset
#
# After registering the data, it can be easily called using the command below. This is how the datasets will be accessed in future notebooks.
energy_ds = Dataset.get_by_name(ws, name=train_dataset_name)
energy_ds
# It is also possible to download the data from the registered dataset:
download_paths = energy_ds.take(5).download()
download_paths
# Let's load one of the data files to see the format:
# +
import pandas as pd
sample_data = pd.read_csv(download_paths[0])
sample_data.head(10)
# -
# ## Next Steps
#
# Now that you have created your datasets, you are ready to move to one of the training notebooks to train and score the models:
#
# - Automated ML: please open [02_AutoML_Training_Pipeline.ipynb](Automated_ML/02_AutoML_Training_Pipeline/02_AutoML_Training_Pipeline.ipynb).
# - Custom Script: please open [02_CustomScript_Training_Pipeline.ipynb](Custom_Script/02_CustomScript_Training_Pipeline.ipynb).
| 01_Data_Preparation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 8 09:50:11 2021
@author: anwar
"""
from flask import request
from flask import Flask, jsonify
import json
import requests
import subprocess
from launchAlgo import transform
from flask_restful import Api
from flask_cors import CORS
app = Flask(__name__)
api = Api(app)
cors = CORS(app, resources={r"/*": {"origins": "*"}})
from instance.Instance import Instance
from Problem.Instance_from_Json import createInstance
instance=Instance()
MyInstance=createInstance(instance)
url=''
master=''
for node in MyInstance.nodes:
if node.Status=="Leader":
master=node.name
@app.route('/getjson/', methods=['GET'])
def getjson():
f = open(r"instanceExamples/data.json")
data = json.load(f)
response = app.response_class(
response=json.dumps(data),
mimetype='application/json'
)
return response
@app.route('/getenergy/', methods=['GET'])
def getcsv():
f = open(r"./energy.json")
data = json.load(f)
response = app.response_class(
response=json.dumps(data),
mimetype='application/json'
)
return response
# @app.route('/geturl/', methods=['GET'])
# def geturl():
# response = app.response_class(
# response=json.dumps(url),
# mimetype='application/json'
# )
# return response
@app.route('/geturl/', methods=['POST'])
def geturl():
content = request.get_data()
c=json.loads(content)
print('content',str(c))
global url
url=str(c)
return 'url posted'
@app.route('/update/', methods=['POST'])
def events():
content = request.get_data()
c=json.loads(content)
#print(c)
with open(r"instanceExamples/data.json", 'w') as f:
json.dump(c, f)
with open(r"./instanceExamples/data.json", "r") as file:
data= json.load(file)
for node in data['nodes']:
if node['max_power_consumption']==0:
node['activated']="false"
with open(r"./instanceExamples/data.json", "w") as file:
with open(r"./instanceExamples/data.json", "w") as file:
json.dump(data, file)
return 'JSON posted'
@app.route('/getStatus/', methods=['POST'])
def getStatus():
content = request.get_data()
c=json.loads(content)
print(c)
update(c)
return json.dumps(c)
# with open(r"./instanceExamples/data.json", "r") as file:
# data= json.load(file)
# data['objectives']=c
# with open(r"./instanceExamples/data.json", "w") as file:
# json.dump(data, file)
# return json.dumps(c)
def update(result):
with open(r"./instanceExamples/data.json", "r") as file:
data= json.load(file)
for row in result:
for node in data['nodes']:
if(node['name']==row[0]):
node['Manager Status']=row[2]
if(row[2]=='Reachable'):
#cmd = ('docker-machine ssh manager docker node promote '+str(node['name'])).split()
#cmd = ('docker-machine ssh '+str(Instance.nodes[0].name)+' docker node promote '+str(node['name'])).split()
cmd = ('docker-machine ssh '+str(master)+' docker node promote '+str(node['name'])).split()
print(cmd)
p = subprocess.Popen(cmd)
output, errors = p.communicate()
with open(r"./instanceExamples/data.json", "w") as file:
json.dump(data, file)
@app.route('/getweights/', methods=['POST'])
def weights():
content = request.get_data()
c=json.loads(content)
#print(c)
with open(r"./instanceExamples/data.json", "r") as file:
data= json.load(file)
data['objectives']=c
with open(r"./instanceExamples/data.json", "w") as file:
json.dump(data, file)
return json.dumps(c)
@app.route('/UpdateStatus/', methods=['GET'])
def get_nodes():
machines=[]
print(master)
with open(r"./test3.txt",'w') as file :
#cmd = ('docker-machine ssh manager docker node ls').split()
cmd = ('docker-machine ssh '+str(master)+ ' docker node ls').split()
#cmd = ('ssh pi@'+str(master)+ ' docker node ls').split()
p = subprocess.Popen(cmd,stdout=file)
output, errors = p.communicate()
info=[]
with open(r"./test3.txt",'r') as file:
for line in file:
line=line.replace("*",'')
groupe=line.split()
info.append(groupe)
machines.append(groupe[1])
del machines[0]
print(info)
del(info[0])
for i,node_info in enumerate(info):
print(node_info[2])
if (node_info[2]=='Down'and MyInstance.nodes[i].activated=='true'):
print(node_info)
with open(r"./instanceExamples/data.json", "r") as file:
data= json.load(file)
data['nodes'][i]['activated']="false"
with open(r"./instanceExamples/data.json", "w") as file:
json.dump(data, file)
new_approach()
if (node_info[2]=='Ready' and MyInstance.nodes[i].activated=='false'):
print(node_info)
with open(r"./instanceExamples/data.json", "r") as file:
data= json.load(file)
data['nodes'][i]['activated']="true"
with open(r"./instanceExamples/data.json", "w") as file:
json.dump(data, file)
new_approach()
if (info[1][4]=='Unreachable'):
msg="Leader is down, another manager has been selected!"
with open(r"./instanceExamples/data.json", "r") as file:
data= json.load(file)
data['nodes'][0]['activated']="false"
with open(r"./instanceExamples/data.json", "w") as file:
json.dump(data, file)
return(jsonify(msg))
else:
#print(groupe)
msg=""
#print(msg)
return(jsonify(msg))
@app.route('/default/', methods=['GET'])
def get():
cmd = ('docker-machine '+str(master)+' docker stack deploy --compose-file initial-docker-compose.yml p').split()
#cmd = ('ssh pi@'+str(Instance.nodes[0].name)+' docker stack deploy --compose-file initial-docker-compose.yml p').split()
p = subprocess.Popen(cmd,stdout = subprocess.PIPE)
output, errors = p.communicate()
print(output)
print(errors)
result=jsonify("done")
return (result)
# @app.route('/getcpu/', methods=['GET'])
# def get_cpu_per_container():
# Instance=createInstance(instance)
# for node in Instance.nodes:
# r = requests.get(url+'api/v1/query?query=sum(irate(container_cpu_usage_seconds_total%7Bcontainer_label_com_docker_swarm_node_id%3D~"'+str(node.cluster_id[0])+'"%2C%20id%3D~"%2Fdocker%2F.*"%7D%5B5m%5D))%20by%20(name)%20*%20100%20&g0.tab=1')
# for metric in json.loads(r.text)['data']['result']:
# name=(metric['metric']['name'])
# with open(r"./instanceExamples/data.json", "r") as file:
# data= json.load(file)
# for con in data['containers']:
# if (con['name'] in name):
# con['cpu_usage']=float(metric['value'][1])
# with open(r"./instanceExamples/data.json", "w") as file:
# json.dump(data, file)
# return (jsonify('done'))
@app.route('/getmem/', methods=['GET'])
def get_mem_per_container():
for node in MyInstance.nodes:
r = requests.get(url+'api/v1/query?query=avg_over_time(container_memory_usage_bytes%7Bcontainer_label_com_docker_swarm_node_id%3D~"'+str(node.cluster_id[0])+'"%2C%20id%3D~"%2Fdocker%2F.*"%7D%5B5m%5D)%2F1024%2F1024&g0.tab=1')
for metric in json.loads(r.text)['data']['result']:
name=(metric['metric']['name'])
with open(r"./instanceExamples/data.json", "r") as file:
data= json.load(file)
for con in data['containers']:
if (con['name'] in name):
con['mem_usage']=float(metric['value'][1])
with open(r"./instanceExamples/data.json", "w") as file:
json.dump(data, file)
for node in MyInstance.nodes:
#print(node.cluster_id)
r = requests.get(url+'api/v1/query?query=sum(node_memory_MemTotal_bytes%20*%20on(instance)%20group_left(node_name)%20node_meta%7Bnode_id%3D~"'+str(node.cluster_id[0])+'"%7D)%2F1000%2F1000&g0.tab=1')
#print(json.loads(r.text)['data']['result'][0]['value'][1])
with open(r"./instanceExamples/data.json", "r") as file:
data= json.load(file)
for con in data['nodes']:
if (con['name'] == node.name):
con['Maxmem']=float(json.loads(r.text)['data']['result'][0]['value'][1])
with open(r"./instanceExamples/data.json", "w") as file:
json.dump(data, file)
for node in MyInstance.nodes:
r = requests.get(url+'api/v1/query?query=sum(irate(container_cpu_usage_seconds_total%7Bcontainer_label_com_docker_swarm_node_id%3D~"'+str(node.cluster_id[0])+'"%2C%20id%3D~"%2Fdocker%2F.*"%7D%5B5m%5D))%20by%20(name)%20*%20100%20&g0.tab=1')
for metric in json.loads(r.text)['data']['result']:
name=(metric['metric']['name'])
with open(r"./instanceExamples/data.json", "r") as file:
data= json.load(file)
for con in data['containers']:
if (con['name'] in name):
con['cpu_usage']=float(metric['value'][1])
with open(r"./instanceExamples/data.json", "w") as file:
json.dump(data, file)
return (jsonify('done'))
# @app.route('/getMaxmem/', methods=['GET'])
# def get_Maxmem_penode():
# Instance=createInstance(instance)
# for node in Instance.nodes:
# #print(node.cluster_id)
# r = requests.get(url+'api/v1/query?query=sum(node_memory_MemTotal_bytes%20*%20on(instance)%20group_left(node_name)%20node_meta%7Bnode_id%3D~"'+str(node.cluster_id[0])+'"%7D)%2F1000%2F1000&g0.tab=1')
# #print(json.loads(r.text)['data']['result'][0]['value'][1])
# with open(r"./instanceExamples/data.json", "r") as file:
# data= json.load(file)
# for con in data['nodes']:
# if (con['name'] == node.name):
# con['Maxmem']=float(json.loads(r.text)['data']['result'][0]['value'][1])
# with open(r"./instanceExamples/data.json", "w") as file:
# json.dump(data, file)
# return (jsonify('done'))
@app.route('/newapproach/', methods=['GET'])
def new_approach():
Myinst= Instance()
Myinstance=createInstance(Myinst)
time=transform(Myinstance)
y = {"containers":len(Myinstance.containers),
"exectime": round(time, 2)
}
with open(r"./instanceExamples/exec.json", "r") as file:
data= json.load(file)
data["times"].append(y)
with open(r"./instanceExamples/exec.json", "w") as file:
json.dump(data, file)
#cmd = ('docker-machine ssh manager docker stack deploy --compose-file updated-docker-compose.yml p ').split()
#cmd = ('docker-machine ssh '+str(Instance.nodes[0].name)+' docker stack deploy --compose-file updated-docker-compose.yml p ').split()
cmd = ('ssh pi@'+str(master)+' docker stack deploy --compose-file updated-docker-compose.yml p ').split()
p = subprocess.Popen(cmd)
output, errors = p.communicate()
# print(output)
# print(errors)
# ssh.connect(hostname='ec2-54-87-55-164.compute-1.amazonaws.com', username='ubuntu',pkey=privkey)
# #ssh.connect("ubuntu@ec2-54-87-55-164.compute-1.amazonaws.com",)
# stdin, stdout, stderr=ssh.exec_command('rmdir test')
# print (stdout.read())
# print(stderr.read())
result=jsonify(time)
return (result)
@app.route('/getexectime/', methods=['GET'])
def gettimes():
f = open(r"./instanceExamples/exec.json", "r")
data = json.load(f)
response = app.response_class(
response=json.dumps(data),
mimetype='application/json'
)
return response
if __name__ == '__main__':
app.run(port='5002')
# -
| source-code/scheduling-container-in-python/Constrained Approach/.ipynb_checkpoints/Untitled-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ky13-troj/ml/blob/main/pre_procssing.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="i-rjropSh-uF"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# + id="LZA-Ydq0K-km"
df = pd.read_csv('/content/drive/MyDrive/M.L/Data Files/1. ST Academy - Crash course and Regression files/House_Price.csv', header=0)
# + colab={"base_uri": "https://localhost:8080/", "height": 217} id="iNslZQUMLGEX" outputId="9a44c745-0bc9-4de3-8a60-1c33ee21e725"
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="-b-TRsgULHzQ" outputId="6490dd0c-0bd1-4d82-8112-5c55bba44597"
df.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 307} id="hu0yeHZAMVcJ" outputId="6d20fa3f-533d-42f5-8d59-69391aabc2bb"
df.describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 476} id="eqJqTZmCR_uI" outputId="5a755dae-a5e7-4a94-b6db-f1ab831be151"
sns.jointplot(x='n_hot_rooms', y='price', data=df)
'''
Here we can see most pof the value lies in range 0-20 but
there are two points that lies in betweeen 80 to 100.
and that eans they are outliers.
'''
# + colab={"base_uri": "https://localhost:8080/", "height": 476} id="sVnKZt_iTOo8" outputId="0cb3b398-2130-46ff-f3e0-7261501a9ea4"
sns.jointplot(x="rainfall", y='price', data=df)
'''
most of our values lie in the range of 20 to 60 but
ther's only on epoint that lies in 0 - 5 or 6.
imples that its an outlier.
'''
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="WviFKvFxUk1Q" outputId="1ea2da72-6530-4c42-ab68-46e56f8245f6"
#catagorical variable:
sns.countplot(x="airport", data=df)
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="05urmTO1VBn3" outputId="dd50feeb-0794-4960-b402-2d3ba0927bb8"
sns.countplot(x="waterbody", data=df)
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="hW-tt3dZVQMu" outputId="298ac6cf-b842-40c1-be30-78579bfbbb59"
sns.countplot(x="bus_ter", data=df)
# + [markdown] id="nXWgP6VdWRKc"
# 1. Missing values in n_hos_beds
# 2. Skewness or outliers in crime_rate
# 3. outliers in n_hot_rooms and rainfall
# 4. bus_term has taken only one value
#
# > Indented block
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="dd9ILQXdVX0b" outputId="a9461dee-ad01-486e-f111-15a278c6733f"
df.info()
# + colab={"base_uri": "https://localhost:8080/"} id="l9CYIElWbFRN" outputId="09236ca6-c447-41c4-85f8-9a5b00f635c6"
np.percentile(df.n_hot_rooms,[99])
# + colab={"base_uri": "https://localhost:8080/"} id="J6FPAo34bkmA" outputId="e246febb-e258-4f30-cd10-e77d8c2c5f46"
np.percentile(df.n_hot_rooms,[99])[0]
# + id="Tw850oDNbxxU"
up = np.percentile(df.n_hot_rooms,[99])[0]
# + colab={"base_uri": "https://localhost:8080/", "height": 247} id="80On15uNb4LP" outputId="97cbb99c-e480-4d56-a00c-c49fd001d140"
df[(df.n_hot_rooms > up)]
# + colab={"base_uri": "https://localhost:8080/"} id="U21pm7mncCLL" outputId="cc07066d-7df6-4df7-cd92-d360e57888e0"
df.n_hot_rooms[df.n_hot_rooms>3*up] = 3*up
# + colab={"base_uri": "https://localhost:8080/"} id="vw9bClK5ckda" outputId="fcf3df51-255d-4413-e221-93cfb11e8e2e"
np.percentile(df.rainfall,[1])
# + id="fuCe6FO4dLj9"
lp = np.percentile(df.rainfall,[1])[0]
# + colab={"base_uri": "https://localhost:8080/", "height": 97} id="loR33SgodRAW" outputId="8c29be8d-45e5-4c05-861f-4e064aeed352"
df[(df.rainfall<lp)]
# + colab={"base_uri": "https://localhost:8080/"} id="RAMiFPm3dYS7" outputId="7d062a5d-2c72-41e6-c954-47bfa83cdb66"
df.rainfall[df.rainfall<0.3*lp] = 0.3*lp
# + colab={"base_uri": "https://localhost:8080/", "height": 458} id="YPCfRcVIduC4" outputId="205c5ed1-5b34-4cc0-dffa-6e9bc7166e99"
sns.jointplot(x="crime_rate", y="price", data = df)
# + id="CR_SV6YkeDmA" colab={"base_uri": "https://localhost:8080/", "height": 307} outputId="6bf6a7a1-d333-4855-b127-17f40d7a8e33"
df.describe()
# + colab={"base_uri": "https://localhost:8080/"} id="E8abIb3AzS9r" outputId="d6026654-2d77-45f9-81e6-3ddc3682ff38"
df.info()
# + id="xWgbJtTh2hUE"
df.n_hos_beds = df.n_hos_beds.fillna(df.n_hos_beds.mean())
# + colab={"base_uri": "https://localhost:8080/"} id="nJsuAAum3H6w" outputId="495b622f-dd13-40b0-d579-bdabd19271f0"
df.info()
# + id="gZer69ti3Kp2"
#df = df.fillna(df.mean())
# + [markdown] id="EyA6w71i8KrG"
#
# + colab={"base_uri": "https://localhost:8080/", "height": 458} id="t1o_EdczXNJ8" outputId="a82085e6-2d5e-4677-9633-1525816b9642"
sns.jointplot(x="crime_rate", y ="price", data=df)
# + id="ohTfVrnYXVVO"
df.crime_rate = np.log(1+df.crime_rate)
# + colab={"base_uri": "https://localhost:8080/", "height": 458} id="KIplAoI7Xxzf" outputId="1b3fff00-847a-481d-a937-3b40ae2712d3"
sns.jointplot(x="crime_rate", y ="price", data=df)
# + id="9akld00mX0yV"
df['avg_dist'] = (df.dist1+df.dist2+df.dist3+df.dist4)/4
# + colab={"base_uri": "https://localhost:8080/", "height": 307} id="hkgnInoVYdsJ" outputId="c444c478-8a4d-4c4e-c785-ca33c0cf9cb7"
df.describe()
# + id="AsdU9ivgYf-h"
del df['dist1']
# + id="7tSpjmV7jNlT"
del df['dist2']
# + id="hOYvO4Wnk8M0"
del df['dist3']
del df['dist4']
# + id="0hEjaPpylCMT"
del df['bus_ter']
# + colab={"base_uri": "https://localhost:8080/", "height": 217} id="8tnAjFVdlJPm" outputId="9470a863-2877-450a-f34d-d6a9fc3d8780"
df.head()
# + id="FuOImPOFlKmM"
df = pd.get_dummies(df)
# + colab={"base_uri": "https://localhost:8080/", "height": 234} id="dkTX0eiXd6JU" outputId="3c0b1ec0-8ebd-49e2-94af-c96f6e1c75ed"
df.head()
# + id="Y-Z2YNJ5d8Zw"
del df['airport_NO']
del df['waterbody_None']
# + colab={"base_uri": "https://localhost:8080/", "height": 234} id="jPBpGiSteod6" outputId="82c669dc-f304-4716-97b2-e0c04b107713"
df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 611} id="PBv9d7jGeqGe" outputId="dc4eea83-cc7b-4383-c516-47356ab2f718"
df.corr()
# + id="jPMYSDljh6sT"
del df['parks']
# + colab={"base_uri": "https://localhost:8080/", "height": 234} id="MGinaoeRjOC3" outputId="5bc5dea8-97fa-48f2-d2b4-92714e39d7cb"
df.head()
# + id="OiKRuq8gjQMU"
| pre_procssing.ipynb |
'''
Kรผtรผphaneleri Import Etme
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
'''
Veriyi yรผkleme
'''
df = pd.read_csv("data.csv")
'''
Veriye Genel Bakฤฑล
'''
df.head()
df.info()
df.columns
df.drop(columns=["id", "Unnamed: 32"], axis=1, inplace=True)
df = df.rename(columns={"diagnosis": "target"})
'''
Veri Gรถrselleลtirme
'''
sns.pairplot(df, vars=['radius_mean', 'texture_mean', 'perimeter_mean','area_mean', 'smoothness_mean', 'compactness_mean'], hue='target')
plt.show()
sns.countplot(df["target"])
plt.show()
target=[]
for diagnosis in df.target:
if diagnosis == "M":
target.append((1))
else:
target.append(0)
df.target = target
# +
# Bagฤฑmlฤฑ deฤiลkenimizle sadece yรผksek kolerasyonu (iliลkisi) olan kolonlara bakalฤฑm.
treshold = 0.5 # kolerasyonu 0.75den yรผksek olanlarฤฑ grafiฤini รงizdirelim
corr_matrix=df.corr()
filtre = np.abs(corr_matrix['target'])>treshold # np.abs mutlak deฤerini alarak รงalฤฑลฤฑr
corr_feature = corr_matrix.columns[filtre].tolist() # Belirlediฤimiz treshold(eลik deฤer)'i geรงen รถzellikleri listeye รงevirdik
corr=df[corr_feature].corr()
plt.figure(figsize=(18,10))
sns.heatmap(corr, annot=True, linecolor="w", linewidths=.5, fmt=".1f")
plt.show()
# +
'''
Verimizi modelimiz iรงin baฤฤฑmlฤฑ(hedef) ve baฤฤฑmsฤฑz deฤiลkenlere ayฤฑrma
'''
x = df.drop(columns=["target"], axis = 1)
y = df["target"]
# +
'''
Verimizi eฤitim(train) ve test(test-validation) olarak ayฤฑrma
'''
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)
# +
'''
Makine รฤrenmesi Modelimizi Eฤitme
'''
from sklearn.svm import SVC
svc_model = SVC()
svc_model.fit(x_train, y_train)
# -
'''
Modelimizi Doฤrulama(Validation)
'''
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
y_predictions = svc_model.predict(x_test)
y_predictions
cm = confusion_matrix(y_test, y_predictions)
print(cm)
accuracy_score(y_test, y_predictions)
print(classification_report(y_test, y_predictions))
'''
Modelimizi Geliลtirme 1. Kฤฑsฤฑm (Model Improving)
'''
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
x_train_scaled = scaler.fit_transform(x_train)
x_test_scaled = scaler.transform(x_test)
svc_model_normalized = SVC()
svc_model_normalized.fit(x_train_scaled, y_train)
y_predict = svc_model_normalized.predict(x_test_scaled)
cm = confusion_matrix(y_test, y_predict)
print(cm)
accuracy_score(y_test, y_predict)
print(classification_report(y_test, y_predict))
# +
'''
Modelimizi Geliลtirme 2. Kฤฑsฤฑm (Model Improving)
'''
param_grid = {"C": [0.1, 1, 10, 100], "gamma": [1, 0.1, 0.01, 0.001], "kernel": ["rbf"]}
from sklearn.model_selection import GridSearchCV
grid = GridSearchCV(SVC(), param_grid, refit = True, verbose = 4)
grid.fit(x_train_scaled, y_train)
# -
grid.best_params_
grid_predict = grid.predict(x_test_scaled)
cm = confusion_matrix(y_test, grid_predict)
print(cm)
accuracy_score(y_test, grid_predict)
print(classification_report(y_test, grid_predict))
| gogus-kanseri-teshisi.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Messtechnik HS2021 - Tutorial 8
# ## Aufgabe 1: Fredholmintegral und Tikhonov-Regularisierung
# ---------
# Abstandsmessungen zwischen zwei paramagnetischen Zentren (Elektronenspins mit $S = 1/2$) werden in der EPR-Spektroskopie mittels der DEER-Pulssequenz gemacht. Ein Fredholmintegral erster Ordnung beschreibt das experimentell gemessene Signal
#
# $$ S(t) = \int_0^{\rm t_{max}} K(t,r) P(r) dr $$
#
# wobei $K(t,r)$ das dipolare Kernel ist, welches das Zeitsignal mit der Abstandsverteilung $P(r)$ verbindet. In der Realitรคt, sind das exprimentelle Signal $\mathbf{S}$ und die Abstandsverteilung $\mathbf{P}$ Vektoren der Lรคnge $n$ und $m$. Das dipolare Kernel $\mathrm{K}$ ist folglich eine Matrix mit der Dimension $ m \times n $. Das Zeitsignal wird somit als lineare Matrixoperation beschrieben:
#
# $$ \mathbf{S} = \mathrm{K} \mathbf{P} $$
#
#
# ---------
# ### 1a)
# Importieren Sie das package `deerlab` und laden Sie die experimentellen Zeitachse und Daten, indem Sie die Datei `DEER_signal.npz` mit der Numpyfunktion `load()` verwenden.
# ### 1b)
# Definieren Sie eine linear Distanzachse $\mathrm{r}$ im Bereich $[1.5,10]\, {\rm nm}$ mit gleicher Anzahl Punkte wie $t$ und berechnen Sie mit der Deerlab-Funktion `dipolarkernel()` das passende dipolare Kernel $\mathbf{K}$ fรผr die Zeitachse $\mathbf{t}$ und Distanzachse $\mathbf{r}$.
#
# ### 1c)
# Berechnen Sie die Konditionszahl der dipolaren Kernelmatrix $\mathbf{K}$ und kommentieren Sie das Resultat in Bezug auf das weitere Fittingvorgehen.
# ### 1d)
# Berechnen Sie die Abstandsverteilung $\mathbf{P}$ durch Inversion der Kernelmatrix anhand der Gleichung:
#
# $$ \mathbf{P} = \mathbf{K}^{-1} \mathbf{S} $$
#
# Stellen Sie das Signal und die erhaltene Abstandsverteilung graphisch dar. Kommentieren Sie das Resultat der Abstandsverteilung und vergleichen Sie es mit der realen Abstandsverteilung ($\mathbf{r}_\text{truth}$, $\mathbf{P}_\text{truth}$), welche Sie auch in `DEER_signal.npz` als Variablen r und P finden.
# ### 1e)
# Finden Sie nun mit Hilfe der Tikhonov-Regularisierung
#
# $$ \mathbf{P}_\text{opt} = \text{argmin}\left\{ \frac{1}{2} ||\mathbf{K}\mathbf{P} - \mathbf{S}||_2^2 + \frac{\alpha^2}{2} || \mathbf{LP} ||_2^2 \right\} $$
#
# eine optimale Lรถsung fรผr den Fit des experimentelle Signal, um die richtige Abstandsverteilung herauszufinden.
# Der Regularisierungsparameter $\alpha$ wรคgt die รbereinstimmung der Daten mit dem Fit mit dem Glรคtte-Penalty
#
# ($||\mathbf{LP}||_2^2$) ab.
# Je grรถsser $\alpha$ desto mehr wird das Glรคttekriterium berรผcksichtigt und je kleiner $\alpha$ desto grรถsser die Datenรผbereinstimmung mit dem Fit.
#
# Verwenden Sie fรผr das Datenfitting und Herausfinden der Abstandsverteilung die Funktion `fitregmodel()` von `deerlab`. Probieren Sie unterschiedliche Regularisierungsparameter $\alpha$ zwischen $10^{-5}$ und $10$ aus und kommentieren Sie den Einfluss auf den Signalfit wie auch auf die resultierene Abstandsverteilung.
#
# *Hinweis*:
# Verwenden Sie `help()` um die nรถtigen Inputs fรผr die `fitregmodel`-Funktion herauszufinden. Verwenden Sie `regparam='tikhonov'` um die Tikhonov-Regularisierung zu wรคhlen und geben Sie der Funktion $\alpha$ รผber `regparam=` weiter.
# ### Zusatz:
# Um eine gute Wahl fรผr den Regularisierungsparameter $\alpha$ zu treffen gibt es unterschiedliche Auswahlkriterien wie AIC, BIC, etc.
# Hier zeigen wir Ihnen die Unterschiede vom AIC, LR und srGCV Auswahlkriterium
#
# Mehr Informationen zu Auswahlskriterien fรผr die Auswertung von DEER-Messungen in der EPR finden Sie in den folgenden Papers:
# - [Journal of Magnetic Resonance 288 (2018) 58โ68](10.1016/j.jmr.2018.01.021)
# - [Journal of Magnetic Resonance 300 (2019) 28โ40](10.1016/j.jmr.2019.01.008)
#
# ## Aufgabe 2: Lorentzverteilter stochastischer Prozess
# ---------
#
# Bei einem stochastischen Prozess ist das Ergebnis einer Messung nicht vorhersehbar. Trotzdem ist es wichtig Informationen รผber diesen stochastischen Prozess zu gewinnen, indem man charakteristische Grรถssen wie die Wahrscheinlichkeitsdichtefunktion und die spektrale Leistungsdichte betrachtet. Auch Autokorrelation und Kreuzkorrelation sind wichtige Hilfsmittel um das Signal zu charakterisieren und am Ende besser zu verstehen.
#
# ---------
# ### 2a)
# Nehmen Sie an ein stochastischer Prozess $Y$ mit den Werten $y$ sei unkorreliert und lorentzverteilt mit der Wahrscheinlichkeitsdichtefunktion:
# $$ q(y) = \frac{\beta}{\pi \left(\beta^2 + y^2 \right) } $$
#
# Generieren Sie aus $N = 10^5$ gleichverteilten Zufallszahlen $x$ (siehe `numpy.random.rand`), mit der Wahrscheinlichkeitdichtefunktion-Eigenschaft
#
# $$ p(x)dx = dx \,\,\,\,\,\,\,\,{\rm wenn} \,\,\,0 < x < 1 ,$$
#
# lorenztverteilte Zufallszahlen $y$.
#
# Stellen Sie die Zufallszahlen $x$ und $y$ in Abhรคngigkeit von $N$ graphisch dar.
#
# *Hinweis*: Verwenden Sie, um die Zufallsvariabel $y(x)$ aus $p(x)$ und $q(y)$ zu berechnen, die folgende mathematische Formel:
# $$ p(x) \frac{dx}{dy} = q(y) $$
# ### 2b)
# Zeigen Sie anhand von Histogrammen (siehe `matlibplot.pyplot.hist()`), dass die Verteilung der Zufallszahlen $x$ und $y$ wirklich den Wahrscheinlichkeitsdichtefunktionen $p(x)$ und $q(y)$ folgt.
#
# *Hinweis*: Die gleichverteilte Wahrscheinlichkeitsdichtefunktion $p(x)$ kann beschrieben werden als:
# $$ p(x) = \frac{1}{B-A} \,\,\,\,\,\,\,\,{\rm wenn} \,\,\,A < x < B .$$
# ### 2c)
# Die Autokorrelation kann als Faltung eines Signals mit sich selbst betrachtet werden. Schreiben Sie eine Funktion, die รผber das Faltungstheorem die Autokorrelation eines Zeitsignals berechnet.
# ### 2d)
# Importieren Sie die Zeitachse und das zu analysierende Signal aus dem File `timesignal.npz`, indem Sie die `numpy.load()` verwenden.
# Probieren Sie mit Hilfe der Fourier Transformation und der Autokorrelationsfunktion die 2 Frequenzen des Signals herauszufinden.
| tutorial_8_regularization_autocorr/tutorial_8_blank.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Files - CSV
#
# *Creating News Files and Loading/Saving Existing Ones*
#
# ---
#
# ## Author
#
# [<NAME>](mailto:<EMAIL>), [thePort.us](http://thePort.us)<br />
# Instructor of Ancient History and Digital Humanities<br />
# Department of History<br />
# [University of South Florida](https://github.com/usf-portal)
| notebooks/concepts/files/2-csv-files.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Wl9lghxHbD9-" colab_type="text"
# # Linear Regression with Python Scikit Learn
#
# In this task we will predict the percentage of marks that a student is expected to score based upon the number of hours they studied. This is a simple linear regression task as it involves just two variables.
# + [markdown] id="K7iMB1skbdV5" colab_type="text"
# ## Code
# + id="zZa5T85wbaH9" colab_type="code" colab={}
# Importing all libraries required in this notebook
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# + id="lCnmLwZ7bccd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 360} outputId="6e725482-a6ef-4b3f-aede-c4e6582e8b85"
# Reading data from remote link
url = "http://bit.ly/w-data"
d1 = pd.read_csv(url)
print("Data imported successfully")
d1.head(10)
# + [markdown] id="zZ5L4yIMbs7H" colab_type="text"
# **Plotting the distribution of scores**
# + id="FicE-0eKbsHm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="5d05062a-1bc0-439c-e7b3-0ec1bc4c34c9"
d1.plot(x='Hours', y='Scores', style='^',color="red")
plt.title('Hours Vs Percentage')
plt.xlabel('Hours Studied')
plt.ylabel('Percentage Score')
plt.show()
# + [markdown] id="Ai09qpCYb3xZ" colab_type="text"
# **Data Preparation**
# + id="GHSCBAlibm4x" colab_type="code" colab={}
X = d1.iloc[:, :-1].values
y = d1.iloc[:, 1].values
# + id="155qkyJRcFL2" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2, random_state=0)
# + [markdown] id="Bv1ZXJBXcO_G" colab_type="text"
# **Alogrithm Training**
# + id="i21juOlrcN6k" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="dae279e0-4e77-4c89-9128-dbf0ecc23ad1"
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
print("Training complete :)")
# + id="syJe5y-ScWyd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="8ed6b2d5-1e9d-42ec-afdc-930cd62c0318"
# Plotting the regression line
line = regressor.coef_*X+regressor.intercept_
# Plotting for the test data
plt.scatter(X, y)
plt.plot(X, line,color="red");
plt.show()
# + [markdown] id="9kwy57F2cVop" colab_type="text"
# **Predictions**
# + id="-FWgijaicdbI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 101} outputId="ca78d584-226a-430f-a3d3-620160488441"
print(X_test) # Testing data - In Hours
y_pred = regressor.predict(X_test) # Predicting the scores
# + id="RmunK0U_dEVJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="514e6aa4-4587-4a77-96bf-a14676fd5c8e"
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
df
# + id="DKhwSDxvclI-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6a5a4f50-3c9b-4f48-f713-308c9d95504d"
regressor.predict([[9.25]])
# + id="af8IvappcnvG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c2fa8245-4cc9-4728-80b7-653170d1e965"
from sklearn.metrics import r2_score
r2_score(y_test,y_pred)
| TSF_TASK2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## CNN model performance evaluation
def financial_performance_model_short(prices, labels, short_limit_factor = 0.2):
"""
Prices: dataframe with true prices
Labels: labels predicted by model
Assumption for short-selling: short positions need to be exited at end of observation period
"""
available_capital_lst = [10000]
available_capital = 10000
transaction_fee = 5
fee_sum = 0
execution_price = 0
investment_sum = 0
nr_shares_purchased = 0
nr_shares_sold = 0
nr_shares_held = [0]
nr_shares_shorted = [0]
transaction_list = [0]
for i, label in enumerate(labels):
if label == 0 or label == transaction_list[-1]:
pass
# If label = 1: go long and exit any short positions
elif label == 1 and available_capital_lst[-1] > 0:
#Determine exeuction price --> Closing price of observation day
execution_price = prices.close.iloc[i]
#Determine total sum available for investment --> Total available capital - transaction fee
investment_sum = available_capital_lst[-1] - transaction_fee
#Adjust most recent entry in available capital list: last entry minus investment sum and transaction fee
available_capital_lst.append(available_capital_lst[-1] - investment_sum - transaction_fee)
# Determine number of shares purchased --> total investment sum divided by execution price
nr_shares_purchased = investment_sum / execution_price
# Determine number of shares held --> Total number of shares purchased minus any short position if applicable
nr_shares_held.append(nr_shares_held[-1] + nr_shares_purchased - nr_shares_shorted[-1])
# Track transactions in the list --> add "Long" entry
transaction_list.append(label)
# Generate output
# a) if there was short position: print that short position was closed and that long position was built
if nr_shares_shorted[-1] > 0:
print(f"Day {i}:")
print(f"Short position closed: repurchase of {nr_shares_shorted[-1]} shares.")
print(f"Long position built: {round(nr_shares_held[-1],2)} units. Total value: {round(nr_shares_held[-1] * execution_price,2)} euros")
nr_shares_shorted.append(0)
else:
print(f"Day {i}: purchase of {round(nr_shares_purchased,2)} units for total of {round(investment_sum,2)} euros")
fee_sum += transaction_fee
print("")
elif label == -1 and nr_shares_held[-1] > 0:
execution_price = prices.close.iloc[i]
#Set number of shares of long position that is being closed
long_position_closed = round(nr_shares_held[-1],2)
#Set base capital for calculation of short limit based on closed long position
short_limit_base = long_position_closed * execution_price
#Find number of units shorted based on short_limit_base and factor:
nr_shares_shorted.append(round(short_limit_base * short_limit_factor / execution_price,2))
#Find total number of unit solds --> Sum of closed long position and shorted units
nr_shares_sold = long_position_closed + nr_shares_shorted[-1]
nr_shares_held.append(0)
sale_sum = nr_shares_sold * execution_price - transaction_fee
available_capital_lst.append(sale_sum)
transaction_list.append(label)
print(f"Day {i}:")
print(f"Total sale:{round(nr_shares_sold,2)} units for total of {round(sale_sum,2)} euros")
print(f"Closed long position: {long_position_closed} units")
print(f"New short position: {nr_shares_shorted[-1]} units")
print("")
fee_sum += transaction_fee
if i == (len(labels) - 1):
# At end of observation period, short positions need to be closed
closing_sum = round(nr_shares_shorted[-1] * prices.close.iloc[i],2)
available_capital_lst.append(available_capital_lst[-1] - closing_sum)
print(f"End of observation period.")
print(f"Short position of {nr_shares_shorted[-1]} units closed for {closing_sum} euros.")
nr_shares_shorted.append(0)
total_final_capital = available_capital_lst[-1] + nr_shares_held[-1] * prices.close.iloc[-1]
total_return = total_final_capital / available_capital_lst[0] - 1
print("")
print(f"End capital on day {len(prices)}: {round(total_final_capital,2)} euros")
print(f"Total return: {round(total_return*100,2)}%")
print(f"Shares held at end of period: {round(nr_shares_held[-1],2)}")
print(f"Total fee spending: {fee_sum}")
# ## Buy & Hold Performance Evaluation
def buy_hold_evaluation(data):
start_capital = 10000
nr_shares_purchased = start_capital / data.close.iloc[0]
end_capital = round(nr_shares_purchased * data.close.iloc[-1],2)
total_return = round(end_capital / start_capital - 1,2)
print(f"End capital: {end_capital} euros")
print(f"Total return through Buy & Hold: {total_return*100}%")
# ## SMA Strategy Performance evaluation
import numpy as np
import pandas as pd
def generate_SMA_signals(sma_s,sma_l, data):
"""
sma_s = Number of days for Simple Moving Average short period
sma_l = Number of days for Simple Moving Average long period
data = Price data of asset
"""
# Calculate SMAs
if sma_s < sma_l:
data[f"SMA_{sma_s}_days"] = data.close.rolling(sma_s).mean()
data[f"SMA_{sma_l}_days"] = data.close.rolling(sma_l).mean()
# Check crossovers and determine positions
data["position"] = np.where(data[f"SMA_{sma_s}_days"] > data[f"SMA_{sma_l}_days"], 1, -1)
return data
else:
print("Simple Moving Average short period (sms_s) needs to be smaller than Simple Moving Average long period (sms_l)")
def sma_evaluation_short(data, sma_s, sma_l, short_limit_factor = 0.2):
"""
data: Dataframe with price data
Assumption for short-selling: short positions need to be exited at end of observation period
"""
data = generate_SMA_signals(sma_s,sma_l, data)
available_capital_lst = [10000]
available_capital = 10000
transaction_fee = 5
fee_sum = 0
execution_price = 0
investment_sum = 0
nr_shares_purchased = 0
nr_shares_sold = 0
nr_shares_held = [0]
nr_shares_shorted = [0]
transaction_list = [0]
for i, position in enumerate(data.position):
if position == transaction_list[-1]:
transaction_list.append(position)
pass
# If position = 1: go long and exit any short positions
elif position == 1 and available_capital_lst[-1] > 0:
#Determine exeuction price --> Closing price of observation day
execution_price = data.close.iloc[i]
#Determine total sum available for investment --> Total available capital - transaction fee
investment_sum = available_capital_lst[-1] - transaction_fee
#Adjust most recent entry in available capital list: last entry minus investment sum and transaction fee
available_capital_lst.append(available_capital_lst[-1] - investment_sum - transaction_fee)
# Determine number of shares purchased --> total investment sum divided by execution price
nr_shares_purchased = investment_sum / execution_price
# Determine number of shares held --> Total number of shares purchased minus any short position if applicable
nr_shares_held.append(nr_shares_held[-1] + nr_shares_purchased - nr_shares_shorted[-1])
# Track transactions in the list --> add "Long" entry
transaction_list.append(position)
# Generate output
# a) if there was short position: print that short position was closed and that long position was built
if nr_shares_shorted[-1] > 0:
print(f"Day {i}:")
print(f"Short position closed: repurchase of {nr_shares_shorted[-1]} shares.")
print(f"Long position built: {round(nr_shares_held[-1],2)} units. Total value: {round(nr_shares_held[-1] * execution_price,2)} euros")
nr_shares_shorted.append(0)
else:
print(f"Day {i}: purchase of {round(nr_shares_purchased,2)} units for total of {round(investment_sum,2)} euros")
fee_sum += transaction_fee
print("")
elif position == -1 and nr_shares_held[-1] > 0:
execution_price = data.close.iloc[i]
#Set number of shares of long position that is being closed
long_position_closed = round(nr_shares_held[-1],2)
#Set base capital for calculation of short limit based on closed long position
short_limit_base = long_position_closed * execution_price
#Find number of units shorted based on short_limit_base and factor:
nr_shares_shorted.append(round(short_limit_base * short_limit_factor / execution_price,2))
#Find total number of unit solds --> Sum of closed long position and shorted units
nr_shares_sold = long_position_closed + nr_shares_shorted[-1]
nr_shares_held.append(0)
sale_sum = nr_shares_sold * execution_price - transaction_fee
available_capital_lst.append(sale_sum)
transaction_list.append(position)
print(f"Day {i}:")
print(f"Total sale:{round(nr_shares_sold,2)} units for total of {round(sale_sum,2)} euros")
print(f"Closed long position: {long_position_closed} units")
print(f"New short position: {nr_shares_shorted[-1]} units")
print("")
fee_sum += transaction_fee
if i == (len(data) - 1):
# At end of observation period, short positions need to be closed
closing_sum = nr_shares_shorted[-1] * data.close.iloc[i]
available_capital_lst.append(available_capital_lst[-1] - closing_sum)
print("End of observation period")
print(f"Short position of {nr_shares_shorted[-1]} units closed for {closing_sum} euros.")
nr_shares_shorted.append(0)
total_final_capital = available_capital_lst[-1] + nr_shares_held[-1] * data.close.iloc[-1]
total_return = total_final_capital / available_capital_lst[0] - 1
print("")
print(f"End capital on day {len(data)}: {round(total_final_capital,2)} euros")
print(f"Total return: {round(total_return*100,2)}%")
print(f"Shares held at end of period: {round(nr_shares_held[-1],2)}")
print(f"Total fee spending: {fee_sum}")
# ## Mean-Reversion Strategy Performance Evaluation
def generate_mean_reversion_signals(sma, std_dev, data):
data[f"SMA_{sma}_days"] = data.close.rolling(sma).mean()
data["distance"] = data.close - data[f"SMA_{sma}_days"]
data[f"Lower_Bollinger"] = data[f"SMA_{sma}_days"] - data.close.rolling(sma).std() * std_dev
data[f"Upper_Bollinger"] = data[f"SMA_{sma}_days"] + data.close.rolling(sma).std() * std_dev
#If closing price < Lower Bollinger Band --> asset is oversold, go long --> position = 1
data["position"] = np.where(data.close < data.Lower_Bollinger, 1, np.nan)
#If closing price > Upper Bollinger Band --> asset is overbought, go short --> position = -1
data["position"] = np.where(data.close > data.Upper_Bollinger, -1, data["position"])
#If price crosses SMA: Go neutral
data["position"] = np.where(data.distance * data.distance.shift(1) < 0, 0, data["position"])
#If none of the previous conditions is met: Hold previous position
data["position"] = data.position.ffill().fillna(0)
return data
def mean_rev_evaluation(sma, std_dev, data, short_limit_factor = 0.2):
"""
data: Dataframe with price data
Assumption for short-selling: short positions need to be exited at end of observation period
"""
data = generate_mean_reversion_signals(sma, std_dev, data)
available_capital_lst = [10000]
available_capital = 10000
transaction_fee = 5
fee_sum = 0
execution_price = 0
investment_sum = 0
nr_shares_purchased = 0
nr_shares_sold = 0
nr_shares_held = [0]
nr_shares_shorted = [0]
transaction_list = [0]
for i, position in enumerate(data.position):
if position == transaction_list[-1]:
transaction_list.append(position)
pass
# If position = 1: go long and exit any short positions
elif position == 1 and available_capital_lst[-1] > 0:
#Determine exeuction price --> Closing price of observation day
execution_price = data.close.iloc[i]
#Determine total sum available for investment --> Total available capital - transaction fee
investment_sum = available_capital_lst[-1] - transaction_fee
#Adjust most recent entry in available capital list: last entry minus investment sum and transaction fee
available_capital_lst.append(available_capital_lst[-1] - investment_sum - transaction_fee)
# Determine number of shares purchased --> total investment sum divided by execution price
nr_shares_purchased = investment_sum / execution_price
# Determine number of shares held --> Total number of shares purchased minus any short position if applicable
nr_shares_held.append(nr_shares_held[-1] + nr_shares_purchased - nr_shares_shorted[-1])
# Track transactions in the list --> add "Long" entry
transaction_list.append(position)
# Generate output
# a) if there was short position: print that short position was closed and that long position was built
if nr_shares_shorted[-1] > 0:
print(f"Day {i}:")
print(f"Short position closed: repurchase of {nr_shares_shorted[-1]} shares.")
print(f"Long position built: {round(nr_shares_held[-1],2)} units. Total value: {round(nr_shares_held[-1] * execution_price,2)} euros")
nr_shares_shorted.append(0)
else:
print(f"Day {i}: purchase of {round(nr_shares_purchased,2)} units for total of {round(investment_sum,2)} euros")
fee_sum += transaction_fee
print("")
elif position == -1 and nr_shares_held[-1] > 0:
execution_price = data.close.iloc[i]
#Set number of shares of long position that is being closed
long_position_closed = round(nr_shares_held[-1],2)
#Set base capital for calculation of short limit based on closed long position
short_limit_base = long_position_closed * execution_price
#Find number of units shorted based on short_limit_base and factor:
nr_shares_shorted.append(round(short_limit_base * short_limit_factor / execution_price,2))
#Find total number of unit solds --> Sum of closed long position and shorted units
nr_shares_sold = long_position_closed + nr_shares_shorted[-1]
nr_shares_held.append(0)
sale_sum = nr_shares_sold * execution_price - transaction_fee
available_capital_lst.append(sale_sum)
transaction_list.append(position)
print(f"Day {i}:")
print(f"Total sale:{round(nr_shares_sold,2)} units for total of {round(sale_sum,2)} euros")
print(f"Closed long position: {long_position_closed} units")
print(f"New short position: {nr_shares_shorted[-1]} units")
print("")
fee_sum += transaction_fee
# If position = 0: Close any short and long positions
elif position == 0:
if nr_shares_held[-1] > 0:
sale_sum = nr_shares_held[-1] * data.close.iloc[i] - transaction_fee
available_capital_lst.append(available_capital_lst[-1] + sale_sum)
fee_sum += transaction_fee
print(f"Day {i}:")
print(f"Went neutral. Long position closed - sold {round(nr_shares_held[-1],2)} units for {round(sale_sum,2)} euros.")
print("")
nr_shares_held.append(0)
transaction_list.append(0)
elif nr_shares_shorted[-1] > 0:
buy_sum = nr_shares_shorted[-1] * data.close.iloc[i]
available_capital_lst.append(available_capital_lst[-1] - buy_sum)
fee_sum += transaction_fee
print(f"Day {i}:")
print(f"Went neutral. Short position closed - bought {round(nr_shares_shorted[-1],2)} units for {round(buy_sum,2)} euros.")
print("")
nr_shares_held.append(0)
transaction_list.append(0)
if i == (len(data) - 1) and nr_shares_shorted[-1] > 0:
# At end of observation period, short positions need to be closed
closing_sum = nr_shares_shorted[-1] * data.close.iloc[i]
available_capital_lst.append(available_capital_lst[-1] - closing_sum)
print("End of observation period")
print(f"Short position of {nr_shares_shorted[-1]} units closed for {closing_sum} euros.")
nr_shares_shorted.append(0)
total_final_capital = available_capital_lst[-1] + nr_shares_held[-1] * data.close.iloc[-1]
total_return = total_final_capital / available_capital_lst[0] - 1
print("")
print(f"End capital on day {len(data)}: {round(total_final_capital,2)} euros")
print(f"Total return: {round(total_return*100,2)}%")
print(f"Shares held at end of period: {round(nr_shares_held[-1],2)}")
print(f"Total fee spending: {fee_sum}")
| Financial Performance & Benchmarking.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: torchdyn
# language: python
# name: torchdyn
# ---
# ### Contributing to torchdyn and DiffEqML
# `torchdyn` is designed to be a community effort: we welcome all contributions of tutorials, model variants, numerical methods and applications related to continuous and implicit deep learning. We do not have specific style requirements, though we subscribe to many of Jeremy Howard's [ideas](https://docs.fast.ai/dev/style.html).
#
# We use `poetry` to manage requirements, virtual python environment creation, and packaging. To install `poetry`, refer to [the docs](https://python-poetry.org/docs/).
# To set up your dev environment, run `poetry install`. In example, `poetry run pytest` will then run all `torchdyn` tests inside your newly created env.
#
# `poetry` does not currently offer a way to select `torch` wheels based on desired `cuda` and `OS`, and will install a version without GPU support. For CUDA `torch` wheels,
# run `poetry run poe force_cuda11`, or add your version to `pyproject.toml`.
#
# If you wish to run `jupyter` notebooks within your newly created poetry environments, use `poetry run ipython kernel install --user --name=torchdyn` and switch the notebook kernel.
#
#
# **Choosing what to work on:** There is always [ongoing work](https://github.com/DiffEqML/torchdyn/issues) on new features, tests and tutorials. Contributing to any of the above is extremely valuable to us. If you wish to work on additional features not currently WIP, feel free to reach out on Slack or via email. We'll be available to discuss details.
#
# #### On the scope of `torchdyn` and missing features
#
# The scope of the library is currently quite large as it spans deep learning, numerical methods and differential equations. While we have attempted to design a general API for state-of-the-art approaches in the field, not everything has made its way into the library so far, and it is thus possible that certain methods or classes might have to be tuned for specific applications.
#
# We have used `torchdyn` extensively for our own research and publications, including:
# * Dissecting Neural ODEs [NeurIPS20, oral]
# * Hypersolvers [NeurIPS20]
# * Graph Neural ODEs [AAAI workshop]
# * Differentiable Multiple Shooting Methods
# * Neural Hybrid Automata
# * Optimal Energy Shaping
# * Learning Stochastic Optimal Policies via Gradient Descent [L-CSS]
| docs/contributing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from pyspark.sql import SparkSession
spark=SparkSession.builder.appName('test 5').getOrCreate()
df = spark.read.csv('dataset/test3.csv',header=True,inferSchema=True)
df.show(3)
df.printSchema()
df.columns
training = df.drop('MRP_Imputed')
training.show(3)
training.columns
from pyspark.ml.feature import VectorAssembler
featureassembler=VectorAssembler(inputCols=["Weight","MRP"],outputCol="Independent Features")
output=featureassembler.transform(training)
output.show()
output.columns
finalized_data=output.select("Independent Features","MRP")
finalized_data.show()
from pyspark.ml.regression import LinearRegression
##train test split
train_data,test_data=finalized_data.randomSplit([0.75,0.25])
regressor=LinearRegression(featuresCol='Independent Features', labelCol='MRP')
regressor=regressor.fit(train_data)
### Coefficients
regressor.coefficients
### Intercepts
regressor.intercept
### Prediction
pred_results=regressor.evaluate(test_data)
pred_results.predictions.show()
pred_results.meanAbsoluteError,pred_results.meanSquaredError
| pyspark 5 ML.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py36]
# language: python
# name: conda-env-py36-py
# ---
# # Handling Datasets in PyTorch
#
# In this section we will explain some concepts including:
# - Datasets, Epoch, Iteration
# - Accessing popular vision datasets via torchvision
# - Approach to accessing custom datasets in vision
# ## Concepts:
#
# - Dataset: Is a collecletion of training examples
# - Epoch: One pass of the entire dataset through your model during training
# - Batch: A subset of training examples passed through your model at a time
# - Iteration: An iteration is a single pass of a batch
# - Here, a pass would involve a forward and backward propagation
#
# Following image will help you understand the concepts better.
from IPython.display import Image
Image("example_2.png")
# Now we will go through some exercises in the next file.
| Handling Datasets in PyTorch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Muzzamal-Hameed/Deep-Learning-Models/blob/main/Fake_image_detection.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="NSsgAWpinyBn"
from google.colab import drive
# + colab={"base_uri": "https://localhost:8080/"} id="FMXyEJ9yHI9Y" outputId="d4f50cdd-7943-4cc5-ac44-0891c25ccd89"
drive.mount('/content/gdrive')
# + id="9XDkrjWZHaND"
# ! mkdir ~/.kaggle
# + id="l5IxYziIHvky"
# ! cp kaggle.json ~/.kaggle/
# + id="ta_QzS8lHxdP"
# ! chmod 600 ~/.kaggle/kaggle.json
# + colab={"base_uri": "https://localhost:8080/"} id="WBVIAbL-IOMz" outputId="8d68c4dc-a419-4376-8b02-7e99dbcb1964"
# ! kaggle datasets download -d ciplab/real-and-fake-face-detection
# + colab={"base_uri": "https://localhost:8080/"} id="K42GgUKWIP_v" outputId="7c94d71b-eebb-4f32-873b-5e79f99c7e35"
# !pip install split-folders
# + colab={"base_uri": "https://localhost:8080/"} id="-WHy56oCISdI" outputId="54086f5e-0347-4236-9e01-b0fdd1d79322"
# ! unzip real-and-fake-face-detection.zip
# + id="9ZIKWFC4IVIU"
import matplotlib.pyplot as plt
import tensorflow
import numpy as np
import pandas as pd
import cv2
import os
import skimage.io as io
import skimage.transform as tf
import skimage.color as color
import torch
# + colab={"base_uri": "https://localhost:8080/"} id="4cI-bG7iIap5" outputId="352a4143-94fb-4f03-950b-004ca6767e06"
# !pip install haroun==0.1.1
# import my Library (Pytorch Framework)
from haroun import Data, Model, ConvPool
from haroun.augmentation import augmentation
from haroun.losses import rmse
# + id="IoMGu8DWIsJ_"
# path = pathlib.Path.cwd().parent / "input" / "real-and-fake-face-detection"
path = "/content/real_and_fake_face"
images = []
labels = []
for directory in os.listdir(path):
data_path = path +"/"+ directory
for im in os.listdir(data_path)[:]:
image = io.imread(f"{data_path}/{im}")
image = tf.resize(image, (64, 64))
images.append(image)
if directory == "training_fake":
labels.append("fake")
elif directory == "training_real":
labels.append("real")
# + id="VMpQtEfsIvUG"
images = np.array(images)
labels = np.array(labels)
images, labels = augmentation(images, labels, flip_y=True, flip_x=True, brightness=True)
# + colab={"base_uri": "https://localhost:8080/"} id="vPSv4jo3I21f" outputId="8e115a43-a97c-45ea-c4cd-d55af68ce56d"
classes = {'real': 0, 'fake': 1}
data = Data(loader=(images, labels), classes=classes)
data.shape()
# + colab={"base_uri": "https://localhost:8080/"} id="m9f4mGboJELb" outputId="782494a6-61c3-4bb2-97f3-aef053b80c61"
data.stat()
# + colab={"base_uri": "https://localhost:8080/", "height": 313} id="ETUHb4aBJEh6" outputId="35f97560-f613-4b0c-eb2e-07cb61284923"
data.show()
# + colab={"base_uri": "https://localhost:8080/"} id="cSJfelY7Jf9d" outputId="846182fc-9318-4712-f5d4-db73898df99e"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data.dataset(split_size=0.3, shuffle=True, random_state=35,
images_format=torch.float32, labels_format=torch.float32,
permute=True, one_hot=True, device=device)
# + id="342lHw8TJhVH"
class Network(torch.nn.Module):
def __init__(self):
super(Network, self).__init__()
self.input_norm = torch.nn.BatchNorm2d(3, affine=False)
self.layer1 = ConvPool(in_features=3, out_features=8)
self.layer2 = ConvPool(in_features=8, out_features=16)
self.layer3 = ConvPool(in_features=16, out_features=32)
self.layer4 = ConvPool(in_features=32, out_features=64)
self.layer5 = ConvPool(in_features=64, out_features=128)
self.layer6 = ConvPool(in_features=128, out_features=256)
self.net = torch.nn.Sequential(self.layer1, self.layer2, self.layer3,
self.layer4, self.layer5, self.layer6)
self.fc1 = torch.nn.Linear(in_features=256, out_features=128)
self.bn1 = torch.nn.BatchNorm1d(128)
self.fc2 = torch.nn.Linear(in_features=128, out_features=32)
self.bn2 = torch.nn.BatchNorm1d(32)
self.fc3 = torch.nn.Linear(in_features=32, out_features=8)
self.bn3 = torch.nn.BatchNorm1d(8)
self.fc4 = torch.nn.Linear(in_features=8, out_features=2)
self.lin = torch.nn.Sequential(self.fc1, self.bn1, self.fc2, self.bn2,
self.fc3, self.bn3, self.fc4)
def forward(self, X: torch.Tensor) -> torch.Tensor:
X = self.input_norm(X)
X = self.net(X)
X = X.reshape(X.size(0), -1)
X = self.lin(X)
X = torch.nn.functional.elu(X, alpha=1.0, inplace=False)
return X
# + colab={"base_uri": "https://localhost:8080/"} id="PgX-PyQMJwyP" outputId="c2184caf-520f-4bcb-bd55-afac90734fb3"
net = Network()
fakedetector = Model(net, "adam", rmse, device)
fakedetector.train(train_data=(data.train_inputs, data.train_outputs),
val_data=(data.val_inputs, data.val_outputs),
epochs=100, patience=20, batch_size=128, learning_rate=1.0E-3)
# + colab={"base_uri": "https://localhost:8080/"} id="jsafa4PfJ1a5" outputId="bdee5743-a6f9-4380-d0f9-619c80bf27f9"
fakedetector.evaluate(test_data=(data.test_inputs, data.test_outputs))
# + colab={"base_uri": "https://localhost:8080/", "height": 330} id="ZKDt0xNaKSlW" outputId="4fe918f4-b00f-4403-e163-c881c8c02ba3"
fakedetector.plot()
fakedetector.save(path="./", checkpoint_name="module")
# + id="rJCbzWDSKS4Y"
torch.save(fakedetector, "model_v2.h5")
# + id="npQn7u0HShBS"
# + id="gYKXjSupT86Q"
fakedetector = torch.load(PATH)
fakedetector.eval()
| Fake_image_detection.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.3
# language: julia
# name: julia-1.6
# ---
name = "CombineSignals"
residual_alphas = [];
using DataFrames
using Random
import XGBoost
using NBInclude
@nbinclude("Alpha.ipynb");
@nbinclude("XGBoostFeatures.ipynb");
# ## Train a linear model
downcast_to_int(x) = isinteger(x) ? Int(x) : x
alphas = [
["UserItemBiases"]
["ItemCF.$K" for K in downcast_to_int.([2^4, 2^6, 2^8, 2^10])]
["ItemCFResid.$K" for K in downcast_to_int.([2^4, 2^6, 2^8, 2^10])]
["MatrixFactorization.$K" for K in downcast_to_int.([10, 20, 40])]
["ItemCFRelated.$name" for name in ["all"]]
# ["UserCF.1024"]
["ItemCFEmbed.1024"] # 0.12%
]
ฮฒ = get_indep("validation", alphas) \ get_dep("validation")
# + tags=[]
function evaluate(X, y, ฮฒ)
y_pred = X * ฮฒ
y_pred = clamp.(y_pred, 1, 10)
rmse(y, y_pred), mae(y, y_pred), r2(y, y_pred), mse(y, y_pred)
end;
# -
evaluate(get_indep("test", alphas), get_dep("test"), ฮฒ)
# ## Train an XGBoost model
function get_augmented_indep(split, alphas)
hcat(get_indep(split, alphas), get_xgboost_features(split))
end;
function get_xgboost_split(training_perc)
Random.seed!(20220104)
val_rows = length(get_dep("validation"))
val_shuffle = shuffle(1:val_rows)
val_train_size = Int(round(val_rows * training_perc))
X = convert.(Float32, get_augmented_indep("validation", alphas))
Y = convert.(Float32, get_dep("validation") - get_indep("validation", alphas) * ฮฒ)
X_val_train = X[val_shuffle[1:val_train_size], :]
Y_val_train = Y[val_shuffle[1:val_train_size]]
X_val_test = X[val_shuffle[val_train_size+1:end], :]
Y_val_test = Y[val_shuffle[val_train_size+1:end]]
(
XGBoost.DMatrix(X_val_train, label = Y_val_train),
XGBoost.DMatrix(X_val_test, label = Y_val_test),
)
end;
function fast_test_mse()
# TODO early stopping
dtrain, dtest = get_xgboost_split(0.9999)
bst = XGBoost.xgboost(
dtrain,
200,
watchlist = [(dtrain, "train"), (dtest, "test")],
objective = "reg:squarederror",
nthread = Threads.nthreads(),
)
end;
function evaluate(bst)
X_test = convert.(Float32, get_augmented_indep("test", alphas))
Y_test = convert.(Float32, get_dep("test"))
preds = XGBoost.predict(bst, X_test)
preds += get_indep("test", alphas) * ฮฒ
rmse(Y_test, clamp.(preds, 1, 10))
end;
# + jupyter={"outputs_hidden": true} tags=[]
bst = fast_test_mse();
# +
# [200] train-rmse:1.074031 test-rmse:1.079130
# 1.080729819370984
# -
@info "XGBoost model test rmse $(evaluate(bst))"
xgboost_model_fn = "../../data/alphas/$name/xgb.model"
XGBoost.save(bst, xgboost_model_fn)
write_params(Dict("ฮฒ" => ฮฒ, "alphas" => alphas, "bst" => xgboost_model_fn));
| notebooks/TrainingAlphas/CombineSignals.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# name: pythonjvsc74a57bd0fcbb1ef9f507839a1a8deab4eeab6ce456c0aa3e9723d1242394540b18e281aa
# ---
# # Colombian Constitution EDA.
# Brief resume about the methodology and the results that we got.
# ## About the Document
# The Political Constitution of the Republic of Colombia of 1991 is the Magna Carta of the Republic of Colombia. It was promulgated in the Constitutional Gazette number 114 of Thursday, July 4, 1991, and is also known as the Constitution of Human Rights. It replaced the Political Constitution of 1886 and was issued during the presidency of the liberal Cรฉsar Gaviria.1
# ## DS Pipeline
#
#
# ## Document Data Model
#
# PowerPoint graph, meanwhile we can use the hierarchy of the articles dictionary
#
# ```
# hierarchy = {
# 'TITULO' : 'h1',
# 'DISPOSICIONES' : 'h1',
# 'CAPITULO' : 'h2',
# 'ARTรCULO' : 'p'
#
# }
# ```
# ## EDA
# ### Libraries
# +
import numpy as np
import pandas as pd
# For visualizations
import matplotlib.pyplot as plt
# For regular expressions
import re
# For handling string
import string
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import tensorflow_hub as hub
import tensorflow_text
from os import path
# from Model import *
# -
root_folder = "LegalSearcher/ElasticSearch"
constitution_f = f'{root_folder}/embedding.json'
filepath = path.abspath(path.join('','..', '..', constitution_f))
#import Dataset
df = pd.read_json(filepath)
root_folder = "LegalSearcher/ElasticSearch"
embbed_model = f'{root_folder}/model/'
filepath = path.abspath(path.join('','..', '..',embbed_model))
embed = hub.load(filepath)
# +
print("Downloading pre-trained embeddings from tensorflow hub...")
# embed = hub.load("https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3")
# embbed_model = f'{root_folder}/model/'
# filepath = path.abspath(path.join('','..', '..',embbed_model))
# embed = hub.load(filepath)
text_ph = tf.placeholder(tf.string)
embeddings = embed(text_ph)
print("Done.")
print("Creating tensorflow session...")
session = tf.Session()
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
print("Done.")
# -
##### EMBEDDING #####
def embed_text(text):
vectors = session.run(embeddings, feed_dict={text_ph: text})
return [vector.tolist() for vector in vectors]
# +
# ##### EMBEDDING #####
# def embed_text(text):
# vectors = session.run(embeddings, feed_dict={text_ph: text})
# return [vector.tolist() for vector in vectors]
# -
df=df[['id','embedding']]
df.replace(r'\n','', regex=True,inplace=True)
df['embedding'] = df['embedding'].apply(lambda x:x[1:])
df.rename(columns={'embedding':'article_list'},inplace=True)
df.head()
df['embedds'] = df['article_list'].apply(lambda x:embed(x)[0])
df.head()
# art1_res = embed(art1)
query0 = embed_text(['Colombia es un Estado social de derecho, organizado en forma de Repรบblica unitaria, descentralizada, con autonomรญa de sus entidades territoriales, democrรกtica, participativa y pluralista, fundada en el respeto de la dignidad humana, en el trabajo y la solidaridad de las personas que la integran y en la prevalencia del interรฉs general'])[0]
query1 = embed_text(['organizacion de colombia'])[0]
query2 = embed_text(['Fines esenciales del estado'])[0]
query3 = embed_text(['Representantes del pueblo'])[0]
query4 = embed_text(['derechos humanos'])[0]
query5 = embed_text(['tratados y convenios'])[0]
query6 = embed_text(['Lorem ipsum dolor sit amet'])[0]
query7 = embed_text(['organos del poder publico'])[0]
query8 = embed_text(['Propiedad Intelectual'])[0]
query9 = embed_text(['cuidado del medio ambiente'])[0]
query10 = embed_text(['Ley de 1992'])[0]
query11 = embed_text(['pizza'])[0]
np.average(query0)
# Cosine Similarity Function
def similarity(A,B):
cos_sim = np.dot(A,B)/(np.linalg.norm(A)*np.linalg.norm(B))+1
return cos_sim
# +
# A = Article in Constitution
B = query5
count = 0
res = []
for index, row in df.iterrows():
cos_sim = similarity(row['embedds'],B)
if cos_sim > count:
count = cos_sim
res = [index, row['article_list'],count]
elif count < 1.2:
res = "Sorry parce we Couldn't find anything about that in the Constitution."
# print(f'Query ingresada: {B}')
print(res)
# -
from fastparquet import write
write('Cons.parq', df)
| SentenceEncoder/Embedds.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import gc
import re
import tqdm
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import GPT2Model
from utils import *
from icd9cms.icd9 import search
import pickle
with open("./data/pcode_dict.txt", "rb") as fp:
icd9_pcode_dict = pickle.load(fp)
def print_seq_dsc(seq):
cds = seq.split()
tp = 'START'
for c in cds:
if c == '<START>':
print('=' * 9 + ' START ' + '=' * 9)
elif c == '<DSTART>':
tp = 'DX'
print('=' * 10 + ' DXS ' + '=' * 10)
elif c == '<PSTART>':
tp = 'PR'
print('=' * 10 + ' PRS ' + '=' * 10)
elif c == '<END>':
print('=' * 10 + ' END ' + '=' * 10)
elif c == '<UNK>':
print(f'{c}:Unknown Code')
else:
if tp == 'DX':
d = search(c)
if d:
print(d)
if tp == 'PR':
pr_cd = re.sub(r'\.', '', c)
if pr_cd in icd9_pcode_dict:
print(f"{pr_cd}:{icd9_pcode_dict[pr_cd]}")
else:
print(f'{pr_cd}:Unknown Code')
NTDBGPT2_lm = AutoModelForCausalLM.from_pretrained('dracoglacius/NTDB-GPT2')
NTDBGPT2_tokenizer = AutoTokenizer.from_pretrained('dracoglacius/NTDB-GPT2')
NTDBGPT2_embed = GPT2Model.from_pretrained('dracoglacius/NTDB-GPT2')
# ## ECodes
#
# * E812.0 = Other motor vehicle traffic accident involving collision with motor vehicle injuring driver of motor vehicle other than motorcycle.
# * E885.9 = Accidental fall from other slipping tripping or stumbling
# * E966.0 = Assault by cutting and piercing instrument
# * E965.4 = Assault-firearm NEC:Assault by other and unspecified firearm
# * E924.0 = Acc-hot liquid & steam - Accident caused by hot liquids and vapors, including steam
# # Adversarial Examples
#
# * E812.0 = Other motor vehicle traffic accident involving collision with motor vehicle injuring driver of motor vehicle other than motorcycle.
# * E965.4 = Assault-firearm NEC:Assault by other and unspecified firearm
# * E924.0 = Acc-hot liquid & steam - Accident caused by hot liquids and vapors, including steam
#
# 1. From the training set obtain the ECode and DCodes for E812.0, E965.4, and E924.0
# 1. Create 6 sets by mixing the stem and procedure combinations
# 1. Exclude sets with total token length > 20
# 1. Create embeddings
#
# * We count these as adversarial example since the stems and procedures themselves come from the training data
# * The question is whether the OOD classifier can identify them as OOD based on the sequential information learned
trn_seq = np.load("./data/25k_train_seqs_3_22_E8859_E8120_E9660_E9654_E9240.npy")
# ## Separate Data
#
# #### Training Data is In Domain Data
e8120_trn_seq = [x for x in trn_seq if 'E812.0' in x] # 5000 items
e9654_trn_seq = [x for x in trn_seq if 'E965.4' in x] # 5000 items
e9240_trn_seq = [x for x in trn_seq if 'E924.0' in x] # 5000 items
# #### Adversarial Data
#
# * Stemm: E812.0 + Procedures: E965.4
# * Stemm: E812.0 + Procedures: E924.0
# * Stemm: E965.4 + Procedures: E812.0
# * Stemm: E965.4 + Procedures: E924.0
# * Stemm: E924.0 + Procedures: E812.0
# * Stemm: E924.0 + Procedures: E965.4
# +
e8120_trn_stem = [x.split('<PSTART>')[0] for x in e8120_trn_seq]
e8120_trn_prcs = [x.split('<PSTART>')[1] for x in e8120_trn_seq]
e9654_trn_stem = [x.split('<PSTART>')[0] for x in e9654_trn_seq]
e9654_trn_prcs = [x.split('<PSTART>')[1] for x in e9654_trn_seq]
e9240_trn_stem = [x.split('<PSTART>')[0] for x in e9240_trn_seq]
e9240_trn_prcs = [x.split('<PSTART>')[1] for x in e9240_trn_seq]
# -
e8120_e9654_adv_seq = [s + '<PSTART>' + p for s,p in zip(e8120_trn_stem, e9654_trn_prcs)]
e8120_e9240_adv_seq = [s + '<PSTART>' + p for s,p in zip(e8120_trn_stem, e9240_trn_prcs)]
e9654_e8120_adv_seq = [s + '<PSTART>' + p for s,p in zip(e9654_trn_stem, e8120_trn_prcs)]
e9654_e9240_adv_seq = [s + '<PSTART>' + p for s,p in zip(e9654_trn_stem, e9240_trn_prcs)]
e9240_e8120_adv_seq = [s + '<PSTART>' + p for s,p in zip(e9240_trn_stem, e8120_trn_prcs)]
e9240_e9654_adv_seq = [s + '<PSTART>' + p for s,p in zip(e9240_trn_stem, e9654_trn_prcs)]
def get_hidden_embeddings(hidden_states, is_train=True, use_last=True):
if is_train:
"""
The first hidden_state contains the whole sequence
"""
_em = torch.squeeze(torch.stack(hidden_states[0]).transpose(0,2), dim=1)
else:
_start = torch.squeeze(torch.stack(hidden_states[0]).transpose(0,2), dim=1)
_hs = torch.stack([torch.reshape(torch.stack(x), [13, 768]) for x in hidden_states[1:]])
_em = torch.concat([_start, _hs])
if use_last:
return _em[-1, :, :]
else:
return _em
def get_embeddings(sequences, is_train=True, use_last=True):
token_layer_embeddings = []
for seq in tqdm.tqdm(sequences):
seq_ids = NTDBGPT2_tokenizer.encode(seq, return_tensors='pt')
if len(seq_ids[0]) > 19:
continue
out = NTDBGPT2_lm.generate(
seq_ids,
do_sample=True,
#min_length=10,
#max_length=12,
#top_p=0.9,
top_k=0,
return_dict_in_generate=True,
forced_eos_token_id=NTDBGPT2_tokenizer.eos_token_id,
#repetition_penalty=3.0,
#length_penalty=1.0,
#num_return_seqs=1,
output_hidden_states=True
)
token_layer_embeddings.append(get_hidden_embeddings(out.hidden_states, is_train, use_last))
if use_last:
return torch.stack(token_layer_embeddings)
else:
return token_layer_embeddings
# #### Get Sequence Embeddings of All Layers
def clean_seq(seq):
return ' '.join(x for x in seq.split() if x)
def create_adversarial_embedding_data(ecode1, ecode2, seqs):
_all_token_layer_embeddings = get_embeddings(seqs, use_last=False)
np.save(f"./outputs/{ecode1}_{ecode2}_adv_all_em.npy", _all_token_layer_embeddings)
_end_token_layer_embeddings = torch.stack([x[-1,:,:] for x in _all_token_layer_embeddings])
np.save(f"./outputs/{ecode1}_{ecode2}_adv_end_em.npy", _end_token_layer_embeddings)
del _all_token_layer_embeddings
del _end_token_layer_embeddings
gc.collect()
create_adversarial_embedding_data('e8120', 'e9654', e8120_e9654_adv_seq)
create_adversarial_embedding_data('e8120', 'e9240', e8120_e9240_adv_seq)
create_adversarial_embedding_data('e9654', 'e8120', e9654_e8120_adv_seq)
create_adversarial_embedding_data('e9654', 'e9240', e9654_e9240_adv_seq)
create_adversarial_embedding_data('e9240', 'e8120', e9240_e8120_adv_seq)
create_adversarial_embedding_data('e9240', 'e9654', e9240_e9654_adv_seq)
np.save('outputs/e8120_e9654_adv_seq.npy', e8120_e9654_adv_seq)
np.save('outputs/e8120_e9240_adv_seq.npy', e8120_e9240_adv_seq)
np.save('outputs/e9654_e8120_adv_seq.npy', e9654_e8120_adv_seq)
np.save('outputs/e9654_e9240_adv_seq.npy', e9654_e9240_adv_seq)
np.save('outputs/e9240_e8120_adv_seq.npy', e9240_e8120_adv_seq)
np.save('outputs/e9240_e9654_adv_seq.npy', e9240_e9654_adv_seq)
| sequence_embedding_adversarial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:adventofcode]
# language: python
# name: conda-env-adventofcode-py
# ---
import re
import numpy as np
import matplotlib.pyplot as plt
# +
def split(s):
return re.split('<|>|=<|,', s)
def extract(l):
pos = (int(l[1]), int(l[2]))
vel = (int(l[4]), int(l[5]))
return pos, vel
def parse():
l = ! cat input.txt | tr '\n' ';'
l = list(map(split, l[0].split(';')))
return list(map(extract, l[:-1]))
l = parse()
# -
pos, vel = zip(*l)
pos = np.array(pos)
posx, posy = zip(*pos)
posx = list(posx)
posy = list(posy)
# +
l = parse()
def coords(pos):
posx, posy = zip(*pos)
posx = list(posx)
posy = list(posy)
posy = list(map(lambda x: -1*x, posy))
return posx, posy
def diameter(pos):
return np.max(list(map(np.linalg.norm, pos)))
def draw(diam):
pos, vel = zip(*l)
pos = np.array(pos)
vel = np.array(vel)
c = 1
while diameter(pos) > diam:
pos += vel
c += 1
while diameter(pos) <= diam:
pos += vel
posx, posy = coords(pos)
plt.scatter(posx, posy)
plt.show()
print(c)
print(diameter(pos))
c += 1
def iterate(iters):
pos, vel = zip(*l)
pos = np.array(pos)
vel = np.array(vel)
c = 1
while c <= iters:
pos += vel
c += 1
posx, posy = coords(pos)
plt.scatter(posx, posy)
plt.show()
# -
# ## Solution
iterate(10054)
| 2018/ferran/day10/the_stars_align.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy
import scipy.io.wavfile
from scipy.fftpack import dct
# sample_rate, signal = scipy.io.wavfile.read('../data/19700101_000000.WAV') # File assumed to be in the same directory
sample_rate, signal = scipy.io.wavfile.read("/home/mariussolomes/final_project/summer2019/RealCoo/data/testBird.wav")
# signal = signal[0:int(3.5 * sample_rate)] # Keep the first 3.5 seconds
# +
# with open('../../spectrogram/spectro/data/test.txt', 'w') as handle:
# for x in signal:
# handle.writelines(str(x) + "\n")
# -
pre_emphasis = 0.97
emphasized_signal = numpy.append(signal[0], signal[1:] - pre_emphasis * signal[:-1])
frame_size = 0.025
frame_stride = 0.01
# +
frame_length, frame_step = frame_size * sample_rate, frame_stride * sample_rate # Convert from seconds to samples
signal_length = len(emphasized_signal)
frame_length = int(round(frame_length))
frame_step = int(round(frame_step))
num_frames = int(numpy.ceil(float(numpy.abs(signal_length - frame_length)) / frame_step)) # Make sure that we have at least 1 frame
pad_signal_length = num_frames * frame_step + frame_length
z = numpy.zeros((pad_signal_length - signal_length))
pad_signal = numpy.append(emphasized_signal, z) # Pad Signal to make sure that all frames have equal number of samples without truncating any samples from the original signal
indices = numpy.tile(numpy.arange(0, frame_length), (num_frames, 1)) + numpy.tile(numpy.arange(0, num_frames * frame_step, frame_step), (frame_length, 1)).T
frames = pad_signal[indices.astype(numpy.int32, copy=False)]
# -
frames *= numpy.hamming(frame_length)
# frames *= 0.54 - 0.46 * numpy.cos((2 * numpy.pi * n) / (frame_length - 1)) # Explicit Implementation **
NFFT = 1024
mag_frames = numpy.absolute(numpy.fft.rfft(frames, NFFT)) # Magnitude of the FFT
pow_frames = ((1.0 / NFFT) * ((mag_frames) ** 2)) # Power Spectrum
nfilt = 40
# +
low_freq_mel = 0
high_freq_mel = (2595 * numpy.log10(1 + (sample_rate / 2) / 700)) # Convert Hz to Mel
mel_points = numpy.linspace(low_freq_mel, high_freq_mel, nfilt + 2) # Equally spaced in Mel scale
hz_points = (700 * (10**(mel_points / 2595) - 1)) # Convert Mel to Hz
bin = numpy.floor((NFFT + 1) * hz_points / sample_rate)
fbank = numpy.zeros((nfilt, int(numpy.floor(NFFT / 2 + 1))))
for m in range(1, nfilt + 1):
f_m_minus = int(bin[m - 1]) # left
f_m = int(bin[m]) # center
f_m_plus = int(bin[m + 1]) # right
for k in range(f_m_minus, f_m):
fbank[m - 1, k] = (k - bin[m - 1]) / (bin[m] - bin[m - 1])
for k in range(f_m, f_m_plus):
fbank[m - 1, k] = (bin[m + 1] - k) / (bin[m + 1] - bin[m])
filter_banks = numpy.dot(pow_frames, fbank.T)
# -
filter_banks = numpy.where(filter_banks == 0, numpy.finfo(float).eps, filter_banks) # Numerical Stability
filter_banks = 20 * numpy.log10(filter_banks) # dB
from matplotlib import pyplot as plt
plt.figure(figsize=(40, 20))
plt.imshow(filter_banks.T)
plt.show()
# with open("", "rb") as handle:
with open("/home/mariussolomes/final_project/summer2019/Bela-Spectrogram/data/bird", "r") as handle:
data = handle.read()
data = [eval(x) for x in data.split()]
filter_banks.shape
data = numpy.array(data).reshape(filter_banks.shape)
plt.figure(figsize=(40, 20))
plt.imshow(data.T)
plt.show()
plt.figure(figsize=(40, 20))
plt.imshow(filter_banks.T - data.T)
plt.show()
# +
# max(abs((data - filter_banks)))
# +
# filter_banks = data
# -
num_ceps = 20 # 12
mfcc = dct(filter_banks, type=2, axis=1, norm='ortho')[:, 1 : (num_ceps + 1)] # Keep 2-13
cep_lifter = 22
(nframes, ncoeff) = mfcc.shape
n = numpy.arange(ncoeff)
lift = 1 + (cep_lifter / 2) * numpy.sin(numpy.pi * n / cep_lifter)
mfcc *= lift #*
filter_banks -= (numpy.mean(filter_banks, axis=0) + 1e-8)
from matplotlib import pyplot as plt
plt.figure(figsize=(50, 5))
plt.imshow(mfcc.T)
plt.show()
mfcc.shape
| notebooks/spectrogram_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
def load_data(folder, start_idx_runs, n_runs, n_nodes):
data = pd.read_csv('{}/evaluation_output.csv'.format(folder))
data['run'] = start_idx_runs
for i in range(1,n_runs):
temp = pd.read_csv('{}/evaluation_output_{}.csv'.format(folder, i))
temp['run'] = start_idx_runs+i
data = data.append(temp)
return data
n_runs = 10
n_nodes = 21
# load data where bad nodes sent a*c messages
data_ac = load_data('good20bad1C3', 0, 5, n_nodes)
data_ac['setting'] = 'ac'
# load data where bad nodes sent 1/2*a*c messages
data_05ac = load_data('good20bad1C3messages05', 6, 5, n_nodes)
data_05ac['setting'] = '05ac'
data = data_ac.append(data_05ac)
display(data)
# +
def counts(data):
counts = pd.DataFrame(range(n_runs), columns=['run'])
# counting number of messages sent by good and bad nodes
message_counts = data.groupby(['run', 'good'])['seqNum'].sum().reset_index()
counts['count_bad'] = message_counts[message_counts['good'] == False]['seqNum'].values
counts['count_good'] = message_counts[message_counts['good'] == True]['seqNum'].values
counts['count'] = counts['count_bad'] + counts['count_good']
# compute whether there is agreement
agreement = data.groupby('run')['value'].nunique() == 1
counts['agreement'] = agreement.values
setting = data.groupby('run')['setting'].max()
counts['setting'] = setting.values
return counts
counts_data = counts(data)
display(counts_data)
# +
from scipy import stats
counts_data_ac = counts_data[counts_data['setting'] == 'ac']
counts_data_05ac = counts_data[counts_data['setting'] == '05ac']
print(stats.ttest_rel(counts_data_ac['count_good'], counts_data_ac['count_bad']))
print(stats.ttest_rel(counts_data_05ac['count_good'], counts_data_05ac['count_bad']))
# +
import numpy as np
def compute_complexity(counts):
O = []
# Compute the complexity limit for number of messages sent
for _, run in counts.iterrows():
T = min(n_nodes^2, run['count_bad'])
O.append((T+n_nodes)*np.log(n_nodes))
return O
O = compute_complexity(counts_data)
# +
import seaborn as sns
import matplotlib.pyplot as plt
sns.catplot(x='good', y='seqNum', kind='bar', hue='setting', data=data, legend=False)
plt.axhline(O[0], linestyle='--', color='k', label='assumed complexity')
plt.ylim(0)
plt.ylabel('number of messages')
plt.legend(frameon=False)
plt.show()
# -
sns.catplot(x='agreement', y='count_bad', kind='bar', hue='setting', data=counts_data)
plt.ylim(0)
plt.ylabel('number of messages sent by bad nodes')
plt.show()
# +
from scipy.stats import pearsonr
corr_ac, _ = pearsonr(counts_data_ac['count_bad'], counts_data_ac['agreement'])
corr_05ac, _ = pearsonr(counts_data_05ac['count_bad'], counts_data_05ac['agreement'])
print(corr)
print(corr_05ac)
# -
| ByzantineProtocol/experiments/evaluation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from table_reader import TableReader
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import Ridge, Lasso, ElasticNet
from sklearn.model_selection import train_test_split as tts
from sklearn.metrics import mean_squared_error, r2_score
# -
# Hey folks, I'm just trying out a proof-of-concept jupyter notebook that uses our data retrieval code.
#
# I got sick of working with environment variables so I switched to a new method to store our DB password:
# 1. Create a file called config.json in the project root.
# 2. Inside, config.json should look like this:
# {
# "database_url":"database_url_goes_here"
# }
#
# TableReader's other vector methods are geodata_vector() and reviews_vector(). Be sure to call close() when you're done so it terminates the connection to the DB.
tr = TableReader()
df = tr.properties_vector(include_amenitites=True)
tr.close()
features = df[df.columns.drop(['price', 'listingID'])]
label = df['price']
model = ElasticNet()
esfm = SelectFromModel(model)
esfm.fit(features, label)
print(list(features.iloc[:, esfm.get_support(indices=True)]))
model = Lasso()
sfm = SelectFromModel(model)
sfm.fit(features, label)
print(list(features.iloc[:, sfm.get_support(indices=True)]))
X_train, X_test, y_train, y_test = tts(features, label, test_size=0.2)
# +
model = Ridge()
sfm = SelectFromModel(model)
sfm.fit(features, label)
print(list(features.iloc[:, sfm.get_support(indices=True)]))
# -
clf = Ridge(alpha=0.5)
clf.fit(X_train.iloc[:, sfm.get_support(indices=True)], y_train.iloc[:, sfm.get_support(indices=True)])
print(mean_squared_error(y_test, clf.predict(X_test)))
print(clf.score(X_test, y_test))
elastic_data = df[list(features.iloc[:, esfm.get_support(indices=True)])]
corr = elastic_data.corr()
plt.figure(figsize=(12, 12))
ax = sns.heatmap(
corr,
vmin=-1, vmax=1, center=0,
cmap=sns.diverging_palette(20, 220, n=200),
square=True
)
ax.set_xticklabels(
ax.get_xticklabels(),
rotation=45,
horizontalalignment='right'
);
# +
from sklearn.svm import SVR
from sklearn.neural_network import MLPRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import BayesianRidge, LinearRegression
from yellowbrick.regressor import ResidualsPlot
regressors = {
"support vector machine": SVR(),
"multilayer perceptron": MLPRegressor(),
"nearest neighbors": KNeighborsRegressor(),
"bayesian ridge": BayesianRidge(),
"linear regression": LinearRegression(),
}
for _, regressor in regressors.items():
visualizer = ResidualsPlot(regressor)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.poof()
# -
| model-evaluation-v2-with feature seletion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import statsmodels.api as sm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_excel("Fertility Rate and Education Relationship Data 2013.xlsx")
df.head(15)
df
print(df['Fertility rate, total (births per woman) 2013'])
print(df['GDP (current US$)2013'])
X = df['GDP (current US$)2013']
y = df['Fertility rate, total (births per woman) 2013']
X
y
length = len(df['Country Name'])
country_name = df['Country Name']
country_tag = df['Country Abbreviation']
# +
# Create a plot.
plt.figure(figsize = (20, 20))
plt.scatter(X, y)
plt.title("2013")
plt.xlabel("GDP (current US$)")
plt.ylabel("Fertility rate, total (births per woman)")
# Add country name tag.
for i in range(length):
if country_name[i] == "Taiwan":
plt.text(X[i], y[i]*1.02, country_name[i], fontsize=10, color = "green", style = "italic", weight = "light", verticalalignment='center', horizontalalignment='right',rotation=0)
else:
plt.text(X[i], y[i]*1.02, country_name[i], fontsize=10, color = "r", style = "italic", weight = "light", verticalalignment='center', horizontalalignment='right',rotation=0)
plt.show()
# +
# Ordinary least square method.
# -
mod = sm.OLS(y, X)
res = mod.fit()
print(res.summary())
# +
# Replace X with log(X)
# -
X = np.log(X)
X
y
# +
# Create a plot.
plt.figure(figsize = (20, 15))
plt.scatter(X, y)
plt.title("2013")
plt.xlabel("GDP (current US$)")
plt.ylabel("Birth rate, crude (per 1,000 people)")
# Add country name tag.
for i in range(length):
if country_name[i] == "Taiwan":
plt.text(X[i], y[i]*1.02, country_name[i], fontsize=10, color = "green", style = "italic", weight = "light", verticalalignment='center', horizontalalignment='right',rotation=0)
else:
plt.text(X[i], y[i]*1.02, country_name[i], fontsize=10, color = "r", style = "italic", weight = "light", verticalalignment='center', horizontalalignment='right',rotation=0)
plt.show()
# -
# Ordinary least square method.
mod = sm.OLS(y, X)
res = mod.fit()
print(res.summary())
| Fertility Rate Per Woman and GDP Model 2013.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Example UDF Sum-of-squares
#
# This turtorial performs the following steps:
#
# 1. Create input table `udf_sos_in` with 1000 rows of random data for $x_1$ and $x_2$.
# 2. Create output table `udf_sos_out` where results will be stored.
# 2. Create a UDF that will calculate $y={x_1}^2+{x_2}^2 $ from the input table and save results to the output table.
# 4. Execute the UDF.
# 5. Compare the actual and expected results.
#
# Also See:
# * [Sum of Squares Tutorial](https://www.kinetica.com/docs/udf/python/examples/dist_noncuda_sum_of_squares/dist_noncuda_sum_of_squares.html)
# * [Running Python UDFs](https://www.kinetica.com/docs/udf/python/running.html)
# * [Python UDF API](https://www.kinetica.com/docs/udf/python/writing.html)
# * [UDF Simulator](https://www.kinetica.com/docs/udf/simulating_udfs.html)
#
# ### Import dependencies
# +
# Local libraries should automatically reload
# %reload_ext autoreload
# %autoreload 1
# to access Kinetica Jupyter I/O functions
import sys
sys.path.append('../KJIO')
import numpy as np
import pandas as pd
# %aimport kodbc_io
# %aimport kapi_io
INPUT_TABLE = 'udf_example_sos_in'
OUTPUT_TABLE = 'udf_example_sos_out'
SCHEMA = 'TEST'
# -
# ### Create input data table
#
# Create a table named with `udf_sos_in` with 1000 rows of random numbers into x1 and x2 colomns.
# +
NUM_ROWS = 1000
# Create a dataframe from a dict of series.
_input_df = pd.DataFrame({
'id' : np.array(range(NUM_ROWS), dtype='int32'),
'x1' : pd.Series(np.random.randn(NUM_ROWS)*10, dtype='float32'),
'x2' : pd.Series(np.random.randn(NUM_ROWS)*10, dtype='float32'),
}).set_index('id')
kapi_io.save_df(_input_df, INPUT_TABLE, SCHEMA)
# -
# ### View Input table contents
kodbc_io.get_df("""
select top 10 * from {}
""".format(INPUT_TABLE))
# ### Create an empty output table
# +
_output_df = pd.DataFrame({
'id' : pd.Series(None, dtype='int32'),
'y' : pd.Series(None, dtype='float32'),
}).set_index('id')
kapi_io.save_df(_output_df, OUTPUT_TABLE, SCHEMA)
# -
# ### Below is the contents of the UDF
#
# A python file named `udf_sos_proc.py` will be saved in the current folder
# +
# %%writefile udf_sos_proc.py
from kinetica_proc import ProcData
proc_data = ProcData()
proc_name = proc_data.request_info['proc_name']
data_segment_id = proc_data.request_info['data_segment_id']
run_id = proc_data.request_info['run_id']
print('UDF Start: {} ({}-{})'.format(proc_name, run_id, data_segment_id))
in_table = proc_data.input_data[0]
col_in_x1 = in_table['x1']
col_in_x2 = in_table['x2']
col_in_id = in_table['id']
out_table = proc_data.output_data[0]
col_out_y = out_table['y']
col_out_id = out_table['id']
# Extend the output table by the number of record entries in the input table
out_table.size = in_table.size
# Use the first column in the output table as the output column
# Loop through all the input table columns
for i in xrange(0, in_table.size):
col_out_y[i] = col_in_x1[i]**2 + col_in_x2[i]**2
col_out_id[i] = col_in_id[i]
# we will get the results when the proc finishes
result_rows = str(out_table.size)
proc_data.results['result_rows'] = result_rows
proc_data.complete()
print('UDF Complete: {} rows ({}-{})'.format(result_rows, run_id, data_segment_id))
# -
# ### Execute the UDF
#
# Submit the script for execution and monitor the results.
# +
# %aimport kudf_io
kudf_io.create_proc(
_proc_name='sos_proc',
_file_paths=['./udf_sos_proc.py'])
_result = kudf_io.submit_proc(_proc_name='sos_proc',
_params={},
_input_table_names=[INPUT_TABLE],
_output_table_names=[OUTPUT_TABLE])
# -
# ### Query the results
#
# We should see that the `diff` column shows zero.
kodbc_io.get_df('''
SELECT
in_t.x1,
in_t.x2,
out_t.y AS actual_result,
FLOAT(in_t.x1 * in_t.x1 + in_t.x2 * in_t.x2) AS expected_result,
FLOAT(in_t.x1 * in_t.x1 + in_t.x2 * in_t.x2) - out_t.y AS diff
FROM {} as out_t
INNER JOIN {} AS in_t
ON in_t.id = out_t.id
LIMIT 10
'''.format(OUTPUT_TABLE, INPUT_TABLE))
| notebooks/Examples/ex_kudf_io.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CSV Splitter
# <NAME>, <NAME>
# ## Importing dataset
from datetime import datetime
from pandas import read_csv
# ### Read train data
date_parser = lambda dates: datetime.strptime(dates, '%Y-%m-%d')
train_data = read_csv('dataset/atm_train.csv', parse_dates=['date'], date_parser=date_parser, index_col='date')
train_data.head()
# ### Read test data
date_parser = lambda dates: datetime.strptime(dates, '%d/%m/%Y')
test_data = read_csv('dataset/atm_test.csv', delimiter=';', parse_dates=['date'], date_parser=date_parser, index_col='date')
test_data.head()
# ## Preprocessing
# +
from pandas import DataFrame
import os
# -
# ### Preprocess train data
x_train = train_data[train_data.columns.difference(['currency', 'unplanned_deliveries', 'unplanned_returns', 'Trips', 'deposit', 'X1', 'Pre.Withdrawals', 'Carrier.Cost', 'saldo akhir'])]
x_train = x_train.replace('K', '', regex=True)
x_train.head()
# #### Save preprocessed train data as CSV
# +
train_data_dir = 'dataset/train/'
if not os.path.exists(train_data_dir):
os.makedirs(train_data_dir)
# +
atm_numbers = x_train['no. ATM'].unique()
x_trains = {}
for atm_number in atm_numbers:
x_trains[atm_number] = x_train[x_train['no. ATM'] == str(atm_number)].drop(['no. ATM'], axis=1)
x_trains[atm_number].to_csv(train_data_dir + atm_number + '_train.csv')
# -
y_train = train_data['Withdrawals']
# ### Preprocess test data
x_test = test_data
x_test = x_test.replace('K', '', regex=True)
x_test.head()
# #### Save preprocessed test data as CSV
# +
test_data_dir = 'dataset/test/'
if not os.path.exists(test_data_dir):
os.makedirs(test_data_dir)
# -
x_test.to_csv(test_data_dir + 'test.csv')
| csv-splitter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Geometric Series for Elementary Economics
# + hide-output=false
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
import sympy as sym
from sympy import init_printing
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
# + hide-output=false
# True present value of a finite lease
def finite_lease_pv_true(T, g, r, x_0):
G = (1 + g)
R = (1 + r)
return (x_0 * (1 - G**(T + 1) * R**(-T - 1))) / (1 - G * R**(-1))
# First approximation for our finite lease
def finite_lease_pv_approx_1(T, g, r, x_0):
p = x_0 * (T + 1) + x_0 * r * g * (T + 1) / (r - g)
return p
# Second approximation for our finite lease
def finite_lease_pv_approx_2(T, g, r, x_0):
return (x_0 * (T + 1))
# Infinite lease
def infinite_lease(g, r, x_0):
G = (1 + g)
R = (1 + r)
return x_0 / (1 - G * R**(-1))
# + hide-output=false
def plot_function(axes, x_vals, func, args):
axes.plot(x_vals, func(*args), label=func.__name__)
T_max = 50
T = np.arange(0, T_max+1)
g = 0.02
r = 0.03
x_0 = 1
our_args = (T, g, r, x_0)
funcs = [finite_lease_pv_true,
finite_lease_pv_approx_1,
finite_lease_pv_approx_2]
## the three functions we want to compare
fig, ax = plt.subplots()
ax.set_title('Finite Lease Present Value $T$ Periods Ahead')
for f in funcs:
plot_function(ax, T, f, our_args)
ax.legend()
ax.set_xlabel('$T$ Periods Ahead')
ax.set_ylabel('Present Value, $p_0$')
plt.show()
# + hide-output=false
# Convergence of infinite and finite
T_max = 1000
T = np.arange(0, T_max+1)
fig, ax = plt.subplots()
ax.set_title('Infinite and Finite Lease Present Value $T$ Periods Ahead')
f_1 = finite_lease_pv_true(T, g, r, x_0)
f_2 = np.ones(T_max+1)*infinite_lease(g, r, x_0)
ax.plot(T, f_1, label='T-period lease PV')
ax.plot(T, f_2, '--', label='Infinite lease PV')
ax.set_xlabel('$T$ Periods Ahead')
ax.set_ylabel('Present Value, $p_0$')
ax.legend()
plt.show()
# + hide-output=false
# First view
# Changing r and g
fig, ax = plt.subplots()
ax.set_title('Value of lease of length $T$')
ax.set_ylabel('Present Value, $p_0$')
ax.set_xlabel('$T$ periods ahead')
T_max = 10
T=np.arange(0, T_max+1)
rs, gs = (0.9, 0.5, 0.4001, 0.4), (0.4, 0.4, 0.4, 0.5),
comparisons = ('$\gg$', '$>$', r'$\approx$', '$<$')
for r, g, comp in zip(rs, gs, comparisons):
ax.plot(finite_lease_pv_true(T, g, r, x_0), label=f'r(={r}) {comp} g(={g})')
ax.legend()
plt.show()
# + hide-output=false
# Second view
fig = plt.figure()
T = 3
ax = fig.gca(projection='3d')
r = np.arange(0.01, 0.99, 0.005)
g = np.arange(0.011, 0.991, 0.005)
rr, gg = np.meshgrid(r, g)
z = finite_lease_pv_true(T, gg, rr, x_0)
# Removes points where undefined
same = (rr == gg)
z[same] = np.nan
surf = ax.plot_surface(rr, gg, z, cmap=cm.coolwarm,
antialiased=True, clim=(0, 15))
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.set_xlabel('$r$')
ax.set_ylabel('$g$')
ax.set_zlabel('Present Value, $p_0$')
ax.view_init(20, 10)
ax.set_title('Three Period Lease PV with Varying $g$ and $r$')
plt.show()
# + hide-output=false
# Creates algebraic symbols that can be used in an algebraic expression
g, r, x0 = sym.symbols('g, r, x0')
G = (1 + g)
R = (1 + r)
p0 = x0 / (1 - G * R**(-1))
init_printing()
print('Our formula is:')
p0
# + hide-output=false
print('dp0 / dg is:')
dp_dg = sym.diff(p0, g)
dp_dg
# + hide-output=false
print('dp0 / dr is:')
dp_dr = sym.diff(p0, r)
dp_dr
# + hide-output=false
# Function that calculates a path of y
def calculate_y(i, b, g, T, y_init):
y = np.zeros(T+1)
y[0] = i + b * y_init + g
for t in range(1, T+1):
y[t] = b * y[t-1] + i + g
return y
# Initial values
i_0 = 0.3
g_0 = 0.3
# 2/3 of income goes towards consumption
b = 2/3
y_init = 0
T = 100
fig, ax = plt.subplots()
ax.set_title('Path of Aggregate Output Over Time')
ax.set_xlabel('$t$')
ax.set_ylabel('$y_t$')
ax.plot(np.arange(0, T+1), calculate_y(i_0, b, g_0, T, y_init))
# Output predicted by geometric series
ax.hlines(i_0 / (1 - b) + g_0 / (1 - b), xmin=-1, xmax=101, linestyles='--')
plt.show()
# + hide-output=false
bs = (1/3, 2/3, 5/6, 0.9)
fig,ax = plt.subplots()
ax.set_title('Changing Consumption as a Fraction of Income')
ax.set_ylabel('$y_t$')
ax.set_xlabel('$t$')
x = np.arange(0, T+1)
for b in bs:
y = calculate_y(i_0, b, g_0, T, y_init)
ax.plot(x, y, label=r'$b=$'+f"{b:.2f}")
ax.legend()
plt.show()
# + hide-output=false
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(6, 10))
fig.subplots_adjust(hspace=0.3)
x = np.arange(0, T+1)
values = [0.3, 0.4]
for i in values:
y = calculate_y(i, b, g_0, T, y_init)
ax1.plot(x, y, label=f"i={i}")
for g in values:
y = calculate_y(i_0, b, g, T, y_init)
ax2.plot(x, y, label=f"g={g}")
axes = ax1, ax2
param_labels = "Investment", "Government Spending"
for ax, param in zip(axes, param_labels):
ax.set_title(f'An Increase in {param} on Output')
ax.legend(loc ="lower right")
ax.set_ylabel('$y_t$')
ax.set_xlabel('$t$')
plt.show()
# -
| Geometric Series for Elementary Economics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# Language Translation with nn.Transformer and torchtext
# ======================================================
#
# This tutorial shows, how to train a translation model from scratch using
# Transformer. We will be using `Multi30k <http://www.statmt.org/wmt16/multimodal-task.html#task1>`__
# dataset to train a German to English translation model.
#
# Data Sourcing and Processing
# ----------------------------
#
# `torchtext library <https://pytorch.org/text/stable/>`__ has utilities for creating datasets that can be easily
# iterated through for the purposes of creating a language translation
# model. In this example, we show how to use torchtext's inbuilt datasets,
# tokenize a raw text sentence, build vocabulary, and numericalize tokens into tensor. We will use
# `Multi30k dataset from torchtext library <https://pytorch.org/text/stable/datasets.html#multi30k>`__
# that yields a pair of source-target raw sentences.
#
#
#
#
# +
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
from torchtext.datasets import Multi30k
from typing import Iterable, List
SRC_LANGUAGE = 'de'
TGT_LANGUAGE = 'en'
# Place-holders
token_transform = {}
vocab_transform = {}
# Create source and target language tokenizer. Make sure to install the dependencies.
# pip install -U spacy
# python -m spacy download en_core_web_sm
# python -m spacy download de_core_news_sm
token_transform[SRC_LANGUAGE] = get_tokenizer('spacy', language='de_core_news_sm')
token_transform[TGT_LANGUAGE] = get_tokenizer('spacy', language='en_core_web_sm')
# helper function to yield list of tokens
def yield_tokens(data_iter: Iterable, language: str) -> List[str]:
language_index = {SRC_LANGUAGE: 0, TGT_LANGUAGE: 1}
for data_sample in data_iter:
yield token_transform[language](data_sample[language_index[language]])
# Define special symbols and indices
UNK_IDX, PAD_IDX, BOS_IDX, EOS_IDX = 0, 1, 2, 3
# Make sure the tokens are in order of their indices to properly insert them in vocab
special_symbols = ['<unk>', '<pad>', '<bos>', '<eos>']
for ln in [SRC_LANGUAGE, TGT_LANGUAGE]:
# Training data Iterator
train_iter = Multi30k(split='train', language_pair=(SRC_LANGUAGE, TGT_LANGUAGE))
# Create torchtext's Vocab object
vocab_transform[ln] = build_vocab_from_iterator(yield_tokens(train_iter, ln),
min_freq=1,
specials=special_symbols,
special_first=True)
# Set UNK_IDX as the default index. This index is returned when the token is not found.
# If not set, it throws RuntimeError when the queried token is not found in the Vocabulary.
for ln in [SRC_LANGUAGE, TGT_LANGUAGE]:
vocab_transform[ln].set_default_index(UNK_IDX)
# -
# Seq2Seq Network using Transformer
# ---------------------------------
#
# Transformer is a Seq2Seq model introduced in `โAttention is all you
# needโ <https://papers.nips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf>`__
# paper for solving machine translation tasks.
# Below, we will create a Seq2Seq network that uses Transformer. The network
# consists of three parts. First part is the embedding layer. This layer converts tensor of input indices
# into corresponding tensor of input embeddings. These embedding are further augmented with positional
# encodings to provide position information of input tokens to the model. The second part is the
# actual `Transformer <https://pytorch.org/docs/stable/generated/torch.nn.Transformer.html>`__ model.
# Finally, the output of Transformer model is passed through linear layer
# that give un-normalized probabilities for each token in the target language.
#
#
#
# +
from torch import Tensor
import torch
import torch.nn as nn
from torch.nn import Transformer
import math
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# helper Module that adds positional encoding to the token embedding to introduce a notion of word order.
class PositionalEncoding(nn.Module):
def __init__(self,
emb_size: int,
dropout: float,
maxlen: int = 5000):
super(PositionalEncoding, self).__init__()
den = torch.exp(- torch.arange(0, emb_size, 2)* math.log(10000) / emb_size)
pos = torch.arange(0, maxlen).reshape(maxlen, 1)
pos_embedding = torch.zeros((maxlen, emb_size))
pos_embedding[:, 0::2] = torch.sin(pos * den)
pos_embedding[:, 1::2] = torch.cos(pos * den)
pos_embedding = pos_embedding.unsqueeze(-2)
self.dropout = nn.Dropout(dropout)
self.register_buffer('pos_embedding', pos_embedding)
def forward(self, token_embedding: Tensor):
return self.dropout(token_embedding + self.pos_embedding[:token_embedding.size(0), :])
# helper Module to convert tensor of input indices into corresponding tensor of token embeddings
class TokenEmbedding(nn.Module):
def __init__(self, vocab_size: int, emb_size):
super(TokenEmbedding, self).__init__()
self.embedding = nn.Embedding(vocab_size, emb_size)
self.emb_size = emb_size
def forward(self, tokens: Tensor):
return self.embedding(tokens.long()) * math.sqrt(self.emb_size)
# Seq2Seq Network
class Seq2SeqTransformer(nn.Module):
def __init__(self,
num_encoder_layers: int,
num_decoder_layers: int,
emb_size: int,
nhead: int,
src_vocab_size: int,
tgt_vocab_size: int,
dim_feedforward: int = 512,
dropout: float = 0.1):
super(Seq2SeqTransformer, self).__init__()
self.transformer = Transformer(d_model=emb_size,
nhead=nhead,
num_encoder_layers=num_encoder_layers,
num_decoder_layers=num_decoder_layers,
dim_feedforward=dim_feedforward,
dropout=dropout)
self.generator = nn.Linear(emb_size, tgt_vocab_size)
self.src_tok_emb = TokenEmbedding(src_vocab_size, emb_size)
self.tgt_tok_emb = TokenEmbedding(tgt_vocab_size, emb_size)
self.positional_encoding = PositionalEncoding(
emb_size, dropout=dropout)
def forward(self,
src: Tensor,
trg: Tensor,
src_mask: Tensor,
tgt_mask: Tensor,
src_padding_mask: Tensor,
tgt_padding_mask: Tensor,
memory_key_padding_mask: Tensor):
src_emb = self.positional_encoding(self.src_tok_emb(src))
tgt_emb = self.positional_encoding(self.tgt_tok_emb(trg))
outs = self.transformer(src_emb, tgt_emb, src_mask, tgt_mask, None,
src_padding_mask, tgt_padding_mask, memory_key_padding_mask)
return self.generator(outs)
def encode(self, src: Tensor, src_mask: Tensor):
return self.transformer.encoder(self.positional_encoding(
self.src_tok_emb(src)), src_mask)
def decode(self, tgt: Tensor, memory: Tensor, tgt_mask: Tensor):
return self.transformer.decoder(self.positional_encoding(
self.tgt_tok_emb(tgt)), memory,
tgt_mask)
# -
# During training, we need a subsequent word mask that will prevent model to look into
# the future words when making predictions. We will also need masks to hide
# source and target padding tokens. Below, let's define a function that will take care of both.
#
#
#
# +
def generate_square_subsequent_mask(sz):
mask = (torch.triu(torch.ones((sz, sz), device=DEVICE)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def create_mask(src, tgt):
src_seq_len = src.shape[0]
tgt_seq_len = tgt.shape[0]
tgt_mask = generate_square_subsequent_mask(tgt_seq_len)
src_mask = torch.zeros((src_seq_len, src_seq_len),device=DEVICE).type(torch.bool)
src_padding_mask = (src == PAD_IDX).transpose(0, 1)
tgt_padding_mask = (tgt == PAD_IDX).transpose(0, 1)
return src_mask, tgt_mask, src_padding_mask, tgt_padding_mask
# -
# Let's now define the parameters of our model and instantiate the same. Below, we also
# define our loss function which is the cross-entropy loss and the optmizer used for training.
#
#
#
# +
torch.manual_seed(0)
SRC_VOCAB_SIZE = len(vocab_transform[SRC_LANGUAGE])
TGT_VOCAB_SIZE = len(vocab_transform[TGT_LANGUAGE])
EMB_SIZE = 512
NHEAD = 8
FFN_HID_DIM = 512
BATCH_SIZE = 128
NUM_ENCODER_LAYERS = 3
NUM_DECODER_LAYERS = 3
transformer = Seq2SeqTransformer(NUM_ENCODER_LAYERS, NUM_DECODER_LAYERS, EMB_SIZE,
NHEAD, SRC_VOCAB_SIZE, TGT_VOCAB_SIZE, FFN_HID_DIM)
for p in transformer.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
transformer = transformer.to(DEVICE)
loss_fn = torch.nn.CrossEntropyLoss(ignore_index=PAD_IDX)
optimizer = torch.optim.Adam(transformer.parameters(), lr=0.0001, betas=(0.9, 0.98), eps=1e-9)
# -
# Collation
# ---------
#
# As seen in the ``Data Sourcing and Processing`` section, our data iterator yields a pair of raw strings.
# We need to convert these string pairs into the batched tensors that can be processed by our ``Seq2Seq`` network
# defined previously. Below we define our collate function that convert batch of raw strings into batch tensors that
# can be fed directly into our model.
#
#
#
# +
from torch.nn.utils.rnn import pad_sequence
# helper function to club together sequential operations
def sequential_transforms(*transforms):
def func(txt_input):
for transform in transforms:
txt_input = transform(txt_input)
return txt_input
return func
# function to add BOS/EOS and create tensor for input sequence indices
def tensor_transform(token_ids: List[int]):
return torch.cat((torch.tensor([BOS_IDX]),
torch.tensor(token_ids),
torch.tensor([EOS_IDX])))
# src and tgt language text transforms to convert raw strings into tensors indices
text_transform = {}
for ln in [SRC_LANGUAGE, TGT_LANGUAGE]:
text_transform[ln] = sequential_transforms(token_transform[ln], #Tokenization
vocab_transform[ln], #Numericalization
tensor_transform) # Add BOS/EOS and create tensor
# function to collate data samples into batch tesors
def collate_fn(batch):
src_batch, tgt_batch = [], []
for src_sample, tgt_sample in batch:
src_batch.append(text_transform[SRC_LANGUAGE](src_sample.rstrip("\n")))
tgt_batch.append(text_transform[TGT_LANGUAGE](tgt_sample.rstrip("\n")))
src_batch = pad_sequence(src_batch, padding_value=PAD_IDX)
tgt_batch = pad_sequence(tgt_batch, padding_value=PAD_IDX)
return src_batch, tgt_batch
# -
# Let's define training and evaluation loop that will be called for each
# epoch.
#
#
#
# +
from torch.utils.data import DataLoader
def train_epoch(model, optimizer):
model.train()
losses = 0
train_iter = Multi30k(split='train', language_pair=(SRC_LANGUAGE, TGT_LANGUAGE))
train_dataloader = DataLoader(train_iter, batch_size=BATCH_SIZE, collate_fn=collate_fn)
for src, tgt in train_dataloader:
src = src.to(DEVICE)
tgt = tgt.to(DEVICE)
tgt_input = tgt[:-1, :]
src_mask, tgt_mask, src_padding_mask, tgt_padding_mask = create_mask(src, tgt_input)
logits = model(src, tgt_input, src_mask, tgt_mask,src_padding_mask, tgt_padding_mask, src_padding_mask)
optimizer.zero_grad()
tgt_out = tgt[1:, :]
loss = loss_fn(logits.reshape(-1, logits.shape[-1]), tgt_out.reshape(-1))
loss.backward()
optimizer.step()
losses += loss.item()
return losses / len(train_dataloader)
def evaluate(model):
model.eval()
losses = 0
val_iter = Multi30k(split='valid', language_pair=(SRC_LANGUAGE, TGT_LANGUAGE))
val_dataloader = DataLoader(val_iter, batch_size=BATCH_SIZE, collate_fn=collate_fn)
for src, tgt in val_dataloader:
src = src.to(DEVICE)
tgt = tgt.to(DEVICE)
tgt_input = tgt[:-1, :]
src_mask, tgt_mask, src_padding_mask, tgt_padding_mask = create_mask(src, tgt_input)
logits = model(src, tgt_input, src_mask, tgt_mask,src_padding_mask, tgt_padding_mask, src_padding_mask)
tgt_out = tgt[1:, :]
loss = loss_fn(logits.reshape(-1, logits.shape[-1]), tgt_out.reshape(-1))
losses += loss.item()
return losses / len(val_dataloader)
# -
# Now we have all the ingredients to train our model. Let's do it!
#
#
#
# +
from timeit import default_timer as timer
NUM_EPOCHS = 18
for epoch in range(1, NUM_EPOCHS+1):
start_time = timer()
train_loss = train_epoch(transformer, optimizer)
end_time = timer()
val_loss = evaluate(transformer)
print((f"Epoch: {epoch}, Train loss: {train_loss:.3f}, Val loss: {val_loss:.3f}, "f"Epoch time = {(end_time - start_time):.3f}s"))
# function to generate output sequence using greedy algorithm
def greedy_decode(model, src, src_mask, max_len, start_symbol):
src = src.to(DEVICE)
src_mask = src_mask.to(DEVICE)
memory = model.encode(src, src_mask)
ys = torch.ones(1, 1).fill_(start_symbol).type(torch.long).to(DEVICE)
for i in range(max_len-1):
memory = memory.to(DEVICE)
tgt_mask = (generate_square_subsequent_mask(ys.size(0))
.type(torch.bool)).to(DEVICE)
out = model.decode(ys, memory, tgt_mask)
out = out.transpose(0, 1)
prob = model.generator(out[:, -1])
_, next_word = torch.max(prob, dim=1)
next_word = next_word.item()
ys = torch.cat([ys,
torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=0)
if next_word == EOS_IDX:
break
return ys
# actual function to translate input sentence into target language
def translate(model: torch.nn.Module, src_sentence: str):
model.eval()
src = text_transform[SRC_LANGUAGE](src_sentence).view(-1, 1)
num_tokens = src.shape[0]
src_mask = (torch.zeros(num_tokens, num_tokens)).type(torch.bool)
tgt_tokens = greedy_decode(
model, src, src_mask, max_len=num_tokens + 5, start_symbol=BOS_IDX).flatten()
return " ".join(vocab_transform[TGT_LANGUAGE].lookup_tokens(list(tgt_tokens.cpu().numpy()))).replace("<bos>", "").replace("<eos>", "")
# -
print(translate(transformer, "Eine Gruppe von Menschen steht vor einem Iglu ."))
# References
# ----------
#
# 1. Attention is all you need paper.
# https://papers.nips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf
# 2. The annotated transformer. https://nlp.seas.harvard.edu/2018/04/03/attention.html#positional-encoding
#
#
| docs/_downloads/55438afbe9f596f4b1ed6b2c4987ebdb/translation_transformer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Tony607/mmdetection_object_detection_demo/blob/master/mmdetection_train_custom_coco_data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="An830Vi4rMz6"
# # [How to train an object detection model with mmdetection](https://www.dlology.com/blog/how-to-train-an-object-detection-model-with-mmdetection/) | DLology blog
# + colab_type="code" id="IUB0TkVJJ751" colab={}
# You can add more model configs like below.
MODELS_CONFIG = {
'faster_rcnn_r50_fpn_1x': {
'config_file': 'configs/faster_rcnn_r50_fpn_1x.py'
},
'cascade_rcnn_r50_fpn_1x': {
'config_file': 'configs/cascade_rcnn_r50_fpn_1x.py',
},
'retinanet_r50_fpn_1x': {
'config_file': 'configs/retinanet_r50_fpn_1x.py',
}
}
# + [markdown] colab_type="text" id="Sr1Gn9OTrfKr"
# ## Your settings
# + colab_type="code" id="GGTdkzBJJ71E" colab={}
# TODO: change URL to your fork of my repository if necessary.
git_repo_url = 'https://github.com/Tony607/mmdetection_object_detection_demo'
# Pick the model you want to use
# Select a model in `MODELS_CONFIG`.
selected_model = 'faster_rcnn_r50_fpn_1x' # 'cascade_rcnn_r50_fpn_1x'
# Total training epochs.
total_epochs = 8
# Name of the config file.
config_file = MODELS_CONFIG[selected_model]['config_file']
# + [markdown] colab_type="text" id="9FZrzBIazgqm"
# ## Install Open MMLab Detection Toolbox
# Restart the runtime if you have issue importing `mmdet` later on.
#
# + colab_type="code" id="tsAkdXVP99NC" outputId="c5f34afe-c82a-45b2-88d2-a86c924d47d0" colab={"base_uri": "https://localhost:8080/", "height": 1000}
import os
from os.path import exists, join, basename, splitext
# %cd /content
project_name = os.path.abspath(splitext(basename(git_repo_url))[0])
mmdetection_dir = os.path.join(project_name, "mmdetection")
if not exists(project_name):
# clone "depth 1" will only get the latest copy of the relevant files.
# !git clone -q --recurse-submodules --depth 1 $git_repo_url
# dependencies
# !pip install -q mmcv terminaltables
# build
# !cd {mmdetection_dir} && python setup.py install
import sys
sys.path.append(mmdetection_dir)
import time
import matplotlib
import matplotlib.pylab as plt
plt.rcParams["axes.grid"] = False
# + [markdown] colab_type="text" id="JRg5LpevakhO"
# ## Stash the repo if you want to re-modify `voc.py` and config file.
# + colab_type="code" id="mxd1V4deJTBz" colab={}
# # !cd {mmdetection_dir} && git config --global user.email "<EMAIL>" && git config --global user.name "Tony607" && git stash
# + [markdown] colab_type="text" id="ONzS8Y7JJPXu"
# ### parse data classes
# + colab_type="code" id="ngMDpgzhJGRB" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="67473398-6b57-4dda-bb48-64852b69acf8"
# %cd {project_name}
import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
anno_path = os.path.join(project_name, "data/VOC2007/Annotations")
classes_names = []
xml_list = []
for xml_file in glob.glob(anno_path + "/*.xml"):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall("object"):
classes_names.append(member[0].text)
classes_names = list(set(classes_names))
classes_names.sort()
classes_names
# + [markdown] id="YzphdOG_MQx2" colab_type="text"
# ## voc2coco
# [Previously](https://www.dlology.com/blog/how-to-train-an-object-detection-model-with-mmdetection/), we have trained a mmdetection model with custom annotated dataset in Pascal VOC data format. We will reuse the annotation by converting them into voco data format. Read more about it here
# [How to create custom COCO data set for object detection](https://www.dlology.com/blog/how-to-create-custom-coco-data-set-for-object-detection/)
# + id="t9ai2SjxMFpV" colab_type="code" colab={}
import sys
import os
import json
import xml.etree.ElementTree as ET
import glob
START_BOUNDING_BOX_ID = 1
PRE_DEFINE_CATEGORIES = None
# If necessary, pre-define category and its id
# PRE_DEFINE_CATEGORIES = {"aeroplane": 1, "bicycle": 2, "bird": 3, "boat": 4,
# "bottle":5, "bus": 6, "car": 7, "cat": 8, "chair": 9,
# "cow": 10, "diningtable": 11, "dog": 12, "horse": 13,
# "motorbike": 14, "person": 15, "pottedplant": 16,
# "sheep": 17, "sofa": 18, "train": 19, "tvmonitor": 20}
def get(root, name):
vars = root.findall(name)
return vars
def get_and_check(root, name, length):
vars = root.findall(name)
if len(vars) == 0:
raise ValueError("Can not find %s in %s." % (name, root.tag))
if length > 0 and len(vars) != length:
raise ValueError(
"The size of %s is supposed to be %d, but is %d."
% (name, length, len(vars))
)
if length == 1:
vars = vars[0]
return vars
def get_filename_as_int(filename):
try:
filename = filename.replace("\\", "/")
filename = os.path.splitext(os.path.basename(filename))[0]
return int(filename)
except:
raise ValueError("Filename %s is supposed to be an integer." % (filename))
def get_categories(xml_files):
"""Generate category name to id mapping from a list of xml files.
Arguments:
xml_files {list} -- A list of xml file paths.
Returns:
dict -- category name to id mapping.
"""
classes_names = []
for xml_file in xml_files:
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall("object"):
classes_names.append(member[0].text)
classes_names = list(set(classes_names))
classes_names.sort()
return {name: i for i, name in enumerate(classes_names)}
def convert(xml_files, json_file):
json_dict = {"images": [], "type": "instances", "annotations": [], "categories": []}
if PRE_DEFINE_CATEGORIES is not None:
categories = PRE_DEFINE_CATEGORIES
else:
categories = get_categories(xml_files)
bnd_id = START_BOUNDING_BOX_ID
for xml_file in xml_files:
tree = ET.parse(xml_file)
root = tree.getroot()
path = get(root, "path")
if len(path) == 1:
filename = os.path.basename(path[0].text)
elif len(path) == 0:
filename = get_and_check(root, "filename", 1).text
else:
raise ValueError("%d paths found in %s" % (len(path), xml_file))
## The filename must be a number
image_id = get_filename_as_int(filename)
size = get_and_check(root, "size", 1)
width = int(get_and_check(size, "width", 1).text)
height = int(get_and_check(size, "height", 1).text)
image = {
"file_name": os.path.basename(filename.replace("\\","/")),
"height": height,
"width": width,
"id": image_id,
}
json_dict["images"].append(image)
## Currently we do not support segmentation.
# segmented = get_and_check(root, 'segmented', 1).text
# assert segmented == '0'
for obj in get(root, "object"):
category = get_and_check(obj, "name", 1).text
if category not in categories:
new_id = len(categories)
categories[category] = new_id
category_id = categories[category]
bndbox = get_and_check(obj, "bndbox", 1)
xmin = int(get_and_check(bndbox, "xmin", 1).text) - 1
ymin = int(get_and_check(bndbox, "ymin", 1).text) - 1
xmax = int(get_and_check(bndbox, "xmax", 1).text)
ymax = int(get_and_check(bndbox, "ymax", 1).text)
assert xmax > xmin
assert ymax > ymin
o_width = abs(xmax - xmin)
o_height = abs(ymax - ymin)
ann = {
"area": o_width * o_height,
"iscrowd": 0,
"image_id": image_id,
"bbox": [xmin, ymin, o_width, o_height],
"category_id": category_id,
"id": bnd_id,
"ignore": 0,
"segmentation": [],
}
json_dict["annotations"].append(ann)
bnd_id = bnd_id + 1
for cate, cid in categories.items():
cat = {"supercategory": "none", "id": cid, "name": cate}
json_dict["categories"].append(cat)
os.makedirs(os.path.dirname(json_file), exist_ok=True)
json_fp = open(json_file, "w")
json_str = json.dumps(json_dict)
json_fp.write(json_str)
json_fp.close()
# + id="ELIwzk5mMbyF" colab_type="code" colab={}
xml_dir = "data/VOC2007/Annotations/"
xml_files = glob.glob(os.path.join(xml_dir, "*.xml"))
trainval_split_file = "data/VOC2007/ImageSets/Main/trainval.txt"
test_split_file = "data/VOC2007/ImageSets/Main/test.txt"
def get_split_xml_files(xml_files, split_file):
with open(split_file) as f:
file_names = [line.strip() for line in f.readlines()]
return [xml_file for xml_file in xml_files if os.path.splitext(os.path.basename(xml_file))[0] in file_names]
trainval_xml_files = get_split_xml_files(xml_files, trainval_split_file)
test_xml_files = get_split_xml_files(xml_files, test_split_file)
convert(trainval_xml_files, "data/coco/trainval.json")
convert(test_xml_files, "data/coco/test.json")
# + [markdown] colab_type="text" id="M3c6a77tJ_oF"
# ## Modify object detectio model config file to take our custom coco dataset.
# + colab_type="code" id="85KCI0q7J7wE" outputId="3c0a9cf1-43c0-4d03-a1ac-02728de02564" colab={"base_uri": "https://localhost:8080/", "height": 35}
import os
config_fname = os.path.join(project_name, 'mmdetection', config_file)
assert os.path.isfile(config_fname), '`{}` not exist'.format(config_fname)
config_fname
# + colab_type="code" id="7AcgLepsKFX-" outputId="8dea4a6e-d07e-42fe-c6a4-93fd50766f9b" colab={"base_uri": "https://localhost:8080/", "height": 1000}
import re
fname = config_fname
with open(fname) as f:
s = f.read()
work_dir = re.findall(r"work_dir = \'(.*?)\'", s)[0]
# Update `num_classes` including `background` class.
s = re.sub('num_classes=.*?,',
'num_classes={},'.format(len(classes_names) + 1), s)
s = re.sub('ann_file=.*?\],',
"ann_file=data_root + 'VOC2007/ImageSets/Main/trainval.txt',", s, flags=re.S)
s = re.sub('total_epochs = \d+',
'total_epochs = {} #'.format(total_epochs), s)
if "CocoDataset" in s:
s = re.sub("data_root = 'data/coco/'",
"data_root = 'data/'", s)
s = re.sub("annotations/instances_train2017.json",
"coco/trainval.json", s)
s = re.sub("annotations/instances_val2017.json",
"coco/test.json", s)
s = re.sub("annotations/instances_val2017.json",
"coco/test.json", s)
s = re.sub("train2017", "VOC2007/JPEGImages", s)
s = re.sub("val2017", "VOC2007/JPEGImages", s)
else:
s = re.sub('img_prefix=.*?\],',
"img_prefix=data_root + 'VOC2007/JPEGImages',".format(total_epochs), s)
with open(fname, 'w') as f:
f.write(s)
# !cat {config_fname}
# + [markdown] id="OZ3bR_UX9WJ-" colab_type="text"
# ## Train the model
# + colab_type="code" id="xm-a0NkgKb4m" outputId="760908ef-3820-4394-c09f-3c93cdef3c97" colab={"base_uri": "https://localhost:8080/", "height": 274}
# %cd {project_name}
# !python mmdetection/tools/train.py {config_fname}
# + [markdown] id="kO7N_Pl99-h4" colab_type="text"
# ### Verify the checkpoint file exists.
# + colab_type="code" id="yFrN7i2Qpr1H" outputId="ce85939f-6d56-4696-94db-71c140b70a24" colab={"base_uri": "https://localhost:8080/", "height": 54}
# %cd {project_name}
checkpoint_file = os.path.join(work_dir, "latest.pth")
assert os.path.isfile(
checkpoint_file), '`{}` not exist'.format(checkpoint_file)
checkpoint_file = os.path.abspath(checkpoint_file)
checkpoint_file
# + [markdown] colab_type="text" id="4uCmYPNCVpqP"
# ## Test predict
#
# Turn down the `score_thr` if you think the model is missing any bbox.
# Turn up the `score_thr` if you see too much overlapping bboxes with low scores.
# + colab_type="code" id="FNTFhKuVVhMr" colab={}
import time
import matplotlib
import matplotlib.pylab as plt
plt.rcParams["axes.grid"] = False
import mmcv
from mmcv.runner import load_checkpoint
import mmcv.visualization.image as mmcv_image
# fix for colab
def imshow(img, win_name='', wait_time=0): plt.figure(
figsize=(50, 50)); plt.imshow(img)
mmcv_image.imshow = imshow
from mmdet.models import build_detector
from mmdet.apis import inference_detector, show_result, init_detector
# + colab_type="code" id="qVJBetouno4q" colab={}
score_thr = 0.7
# build the model from a config file and a checkpoint file
model = init_detector(config_fname, checkpoint_file)
# test a single image and show the results
img = 'data/VOC2007/JPEGImages/15.jpg'
result = inference_detector(model, img)
show_result(img, result, classes_names,
score_thr=score_thr, out_file="result.jpg")
# + colab_type="code" id="smM4hrXBo9_E" outputId="ff532f19-3829-4f05-9dea-16a29952a98e" colab={"base_uri": "https://localhost:8080/", "height": 617}
from IPython.display import Image
Image(filename='result.jpg')
# + [markdown] colab_type="text" id="1xXO9qELNnOw"
# ## Download the config file
# + colab_type="code" id="ufug_6bONd9d" colab={}
from google.colab import files
files.download(config_fname)
# + [markdown] colab_type="text" id="R2YcLN7GObJZ"
# ## Download checkpoint file.
# + [markdown] colab_type="text" id="rMCrafjhN61s"
# ### Option1 : upload the checkpoint file to your Google Drive
# Then download it from your Google Drive to local file system.
#
# During this step, you will be prompted to enter the token.
# + colab_type="code" id="eiRj6p4vN5iT" colab={}
# Install the PyDrive wrapper & import libraries.
# This only needs to be done once in a notebook.
# !pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# Authenticate and create the PyDrive client.
# This only needs to be done once in a notebook.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
fname = os.path.basename(checkpoint_file)
# Create & upload a text file.
uploaded = drive.CreateFile({'title': fname})
uploaded.SetContentFile(checkpoint_file)
uploaded.Upload()
print('Uploaded file with ID {}'.format(uploaded.get('id')))
# + [markdown] colab_type="text" id="bqC9kHuVOOw6"
# ### Option2 : Download the checkpoint file directly to your local file system
# This method may not be stable when downloading large files like the model checkpoint file. Try **option 1** instead if not working.
# + colab_type="code" id="AQ5RHYbTOVnN" colab={}
files.download(checkpoint_file)
| mmdetection_train_custom_coco_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Developing an AI application
#
# Going forward, AI algorithms will be incorporated into more and more everyday applications. For example, you might want to include an image classifier in a smart phone app. To do this, you'd use a deep learning model trained on hundreds of thousands of images as part of the overall application architecture. A large part of software development in the future will be using these types of models as common parts of applications.
#
# In this project, you'll train an image classifier to recognize different species of flowers. You can imagine using something like this in a phone app that tells you the name of the flower your camera is looking at. In practice you'd train this classifier, then export it for use in your application. We'll be using [this dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) of 102 flower categories, you can see a few examples below.
#
# <img src='assets/Flowers.png' width=500px>
#
# The project is broken down into multiple steps:
#
# * Load and preprocess the image dataset
# * Train the image classifier on your dataset
# * Use the trained classifier to predict image content
#
# We'll lead you through each part which you'll implement in Python.
#
# When you've completed this project, you'll have an application that can be trained on any set of labeled images. Here your network will be learning about flowers and end up as a command line application. But, what you do with your new skills depends on your imagination and effort in building a dataset. For example, imagine an app where you take a picture of a car, it tells you what the make and model is, then looks up information about it. Go build your own dataset and make something new.
#
# First up is importing the packages you'll need. It's good practice to keep all the imports at the beginning of your code. As you work through this notebook and find you need to import a package, make sure to add the import up here.
# +
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from PIL import Image
import json
# -
# ## Load the data
#
# Here you'll use `torchvision` to load the data ([documentation](http://pytorch.org/docs/0.3.0/torchvision/index.html)). The data should be included alongside this notebook, otherwise you can [download it here](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz). The dataset is split into three parts, training, validation, and testing. For the training, you'll want to apply transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. You'll also need to make sure the input data is resized to 224x224 pixels as required by the pre-trained networks.
#
# The validation and testing sets are used to measure the model's performance on data it hasn't seen yet. For this you don't want any scaling or rotation transformations, but you'll need to resize then crop the images to the appropriate size.
#
# The pre-trained networks you'll use were trained on the ImageNet dataset where each color channel was normalized separately. For all three sets you'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1.
#
data_dir = 'flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# +
# DONE: Define your transforms for the training, validation, and testing sets
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
# DONE: Load the datasets with ImageFolder
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
test_data = datasets.ImageFolder(test_dir, transform=test_transforms)
valid_data = datasets.ImageFolder(valid_dir, transform=test_transforms)
# DONE: Using the image datasets and the trainforms, define the dataloaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=32, shuffle=False)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=32, shuffle=False)
# +
# Test out the loader to validate that it looks reasonable
import helper
data_iter = iter(test_loader)
images, labels = next(data_iter)
fig, axes = plt.subplots(figsize=(10,4), ncols=4)
for ii in range(4):
ax = axes[ii]
helper.imshow(images[ii], ax=ax)
# -
# ### Label mapping
#
# You'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers.
# +
# when reporting results we need to map category labels to names and need to map
# model indexes to labels. Save those parameters here.
# TODO: leaving these as global variables for now since they are characteristics of
# the data
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
# grab the mapping of class labels to model indexes and the inverse
class_to_idx = train_data.class_to_idx
idx_to_class = dict([(v, k) for k, v in class_to_idx.items()])
# -
# # Building and training the classifier
#
# Now that the data is ready, it's time to build and train the classifier. As usual, you should use one of the pretrained models from `torchvision.models` to get the image features. Build and train a new feed-forward classifier using those features.
#
# We're going to leave this part up to you. If you want to talk through it with someone, chat with your fellow students! You can also ask questions on the forums or join the instructors in office hours.
#
# Refer to [the rubric](https://review.udacity.com/#!/rubrics/1663/view) for guidance on successfully completing this section. Things you'll need to do:
#
# * Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html) (If you need a starting point, the VGG networks work great and are straightforward to use)
# * Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout
# * Train the classifier layers using backpropagation using the pre-trained network to get the features
# * Track the loss and accuracy on the validation set to determine the best hyperparameters
#
# We've left a cell open for you below, but use as many as you need. Our advice is to break the problem up into smaller parts you can run separately. Check that each part is doing what you expect, then move on to the next. You'll likely find that as you work through each part, you'll need to go back and modify your previous code. This is totally normal!
#
# When training make sure you're updating only the weights of the feed-forward network. You should be able to get the validation accuracy above 70% if you build everything right. Make sure to try different hyperparameters (learning rate, units in the classifier, epochs, etc) to find the best model. Save those hyperparameters to use as default values in the next part of the project.
# +
# TODO: Build and train your network
# load a pre-trained network
# Define a new feed forward network as a classifier, using ReLU activations and dropout
# Train the classifier layers using backpropagation using the pre-trained network to get the features
# Track the loss and accuracy on the validation set to determine the best hyperparameters
# -
# Setup the device so that it can use cuda if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Helper class from lesson 5 to create a classifier with different paramters
class Network(nn.Module):
def __init__(self, input_size, output_size, hidden_layers, drop_p=0.5):
''' Builds a feedforward network with arbitrary hidden layers.
Arguments
---------
input_size: integer, size of the input
output_size: integer, size of the output layer
hidden_layers: list of integers, the sizes of the hidden layers
drop_p: float between 0 and 1, dropout probability
'''
super().__init__()
# Add the first layer, input to a hidden layer
self.hidden_layers = nn.ModuleList([nn.Linear(input_size, hidden_layers[0])])
# Add a variable number of more hidden layers
layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:])
self.hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in layer_sizes])
self.output = nn.Linear(hidden_layers[-1], output_size)
self.dropout = nn.Dropout(p=drop_p)
def forward(self, x):
''' Forward pass through the network'''
# Forward through each layer in hidden_layers, with ReLU activation and dropout
for linear in self.hidden_layers:
x = F.relu(linear(x))
x = self.dropout(x)
x = self.output(x)
return F.log_softmax(x, dim=1)
# +
def get_classifier_in_features(model):
'''Return the number of inputs to the classifier for the given model'''
module = next(iter(model.classifier.children()))
return module.in_features
def create_network(model, params):
'''
Create a new test network from a pretrained feature detection model and
input parameters defining the state of the model. Can be used to create
a new model if the params dict does not define state parameters. Otherwise
the state parameters are loaded into the new classifier.
model - a pretrained imagenet feature detection model
params - dict containing create and state parameters:
'output_size', 'hidden_layers', 'drop_p', 'lr' are required for both new and saved models
'state_dict' contains the state dictionary for a saved model
Returns the model and optimizer.
'''
# freeze the parameters of the feature detection model
for param in model.parameters():
param.requires_grad = False
# create the new classifier
input_size = get_classifier_in_features(model)
# Create an instance of the network to replace the classifier
classifier = Network(input_size=input_size,
output_size=params['output_size'],
hidden_layers=params['hidden_layers'],
drop_p=params['drop_p'])
# Load saved state if provided
if 'state_dict' in params:
classifier.load_state_dict(params['state_dict'])
# Replace the origianl classifier
model.classifier = classifier
# Create the optimizer and load state if necessary
optimizer = optim.Adam(model.classifier.parameters(), lr=params['lr'])
if 'opt_state_dict' in params:
optimizer.load_state_dict(params['opt_state_dict'])
return model, optimizer
# +
model_params = {'output_size': 102,
'hidden_layers': [4000, 1000, 200],
'drop_p': 0.5,
'lr': 0.001}
model, optimizer = create_network(models.vgg16(pretrained=True), model_params)
criterion = nn.NLLLoss()
# +
def validation(model, testloader, criterion):
'''
Run testloader data through the model and return loss and accuracy
Relies on device global variable
Relies on caller to turn off gradient calculation
Relies on caller to put model in eval mode
'''
test_loss = 0
accuracy = 0
for images, labels in testloader:
images, labels = images.to(device), labels.to(device)
output = model.forward(images)
test_loss += criterion(output, labels).item()
ps = torch.exp(output)
equality = (labels.data == ps.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
return test_loss, accuracy
def train(model, train_loader, valid_loader, epochs, print_every=40, fake=False):
'''
Train our image classification model
model - the image classification model
train_loader - DataLoader for training data
valid_loader = DataLoader for validation data
epochs - number of epochs to train
print_every - number of steps to print out progress
'''
print("Start model training")
# Use cuda if available
model.to(device)
if fake:
epochs = 1
print_every = 1
steps = 0
for e in range(epochs):
# ensure that we are in training mode to start
model.train()
# loss to report
running_loss = 0
# iterate through training data
for inputs, labels in train_loader:
steps += 1
# move to device cuda if available
inputs, labels = inputs.to(device), labels.to(device)
# training steps
optimizer.zero_grad()
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# report loss and current accuracy
running_loss += loss.item()
if steps % print_every == 0:
# Make sure network is in eval mode for inference
model.eval()
if fake:
test_loss, accuracy = 0.0, 0.0
else:
# Turn off gradients for validation, saves memory and computations
with torch.no_grad():
test_loss, accuracy = validation(model, valid_loader, criterion)
print("Epoch: {}/{}... ".format(e+1, epochs),
"Loss: {:.4f}".format(running_loss/print_every),
"Test Loss: {:.3f}.. ".format(test_loss/len(valid_loader)),
"Test Accuracy: {:.3f}".format(accuracy/len(valid_loader)))
running_loss = 0
# Make sure training is back on
model.train()
if fake:
break
print("End model training")
# -
# Train!
train(model, train_loader, valid_loader, epochs=3, fake=True)
# ## Testing your network
#
# It's good practice to test your trained network on test data, images the network has never seen either in training or validation. This will give you a good estimate for the model's performance on completely new images. Run the test images through the network and measure the accuracy, the same way you did validation. You should be able to reach around 70% accuracy on the test set if the model has been trained well.
# +
# DONE: Do validation on the test set
model.eval()
with torch.no_grad():
test_loss, accuracy = validation(model, test_loader, criterion)
print("Test Loss: {:.3f}.. ".format(test_loss/len(test_loader)))
print("Test Accuracy: {:.3f}.. ".format(accuracy/len(test_loader)))
# -
# ## Save the checkpoint
#
# Now that your network is trained, save the model so you can load it later for making predictions. You probably want to save other things such as the mapping of classes to indices which you get from one of the image datasets: `image_datasets['train'].class_to_idx`. You can attach this to the model as an attribute which makes inference easier later on.
#
# ```model.class_to_idx = image_datasets['train'].class_to_idx```
#
# Remember that you'll want to completely rebuild the model later so you can use it for inference. Make sure to include any information you need in the checkpoint. If you want to load the model and keep training, you'll want to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. You'll likely want to use this trained model in the next part of the project, so best to save it now.
def save_checkpoint(model, model_params, optimizer, filepath):
checkpoint = dict(model_params)
checkpoint['class_to_idx'] = train_data.class_to_idx
checkpoint['state_dict'] = model.classifier.state_dict()
checkpoint['opt_state_dict'] = optimizer.state_dict()
torch.save(checkpoint, path)
path = 'checkpoint_test.pth'
# DONE: Save the checkpoint
save_checkpoint(model, model_params, optimizer, path)
# Load the state dictionary from the checkpoint path
state = torch.load(path)
state.keys()
# ## Loading the checkpoint
#
# At this point it's good to write a function that can load a checkpoint and rebuild the model. That way you can come back to this project and keep working on it without having to retrain the network.
# DONE: Write a function that loads a checkpoint and rebuilds the model
# def create_network(model, params) loads an saved model as well as building a new model
model2, optimizer2 = create_network(models.vgg16(pretrained=True), state)
# # Inference for classification
#
# Now you'll write a function to use a trained network for inference. That is, you'll pass an image into the network and predict the class of the flower in the image. Write a function called `predict` that takes an image and a model, then returns the top $K$ most likely classes along with the probabilities. It should look like
#
# ```python
# probs, classes = predict(image_path, model)
# print(probs)
# print(classes)
# > [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
# > ['70', '3', '45', '62', '55']
# ```
#
# First you'll need to handle processing the input image such that it can be used in your network.
#
# ## Image Preprocessing
#
# You'll want to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training.
#
# First, resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) methods. Then you'll need to crop out the center 224x224 portion of the image.
#
# Color channels of images are typically encoded as integers 0-255, but the model expected floats 0-1. You'll need to convert the values. It's easiest with a Numpy array, which you can get from a PIL image like so `np_image = np.array(pil_image)`.
#
# As before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. You'll want to subtract the means from each color channel, then divide by the standard deviation.
#
# And finally, PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array. You can reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions.
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# DONE: Process a PIL image for use in a PyTorch model
im = Image.open(image)
size_orig = im.size
# resize torchvision.transforms.resize(256) reduces the shortest dimension to 256
if size_orig[0] < size_orig[1]:
size_reduced = (256, 256 * size_orig[1] / size_orig[0])
else:
size_reduced = (size_orig[0] / size_orig[1] * 256, 256)
im.thumbnail(size_reduced)
# center crop to 224x224
width, height = im.size
left = (width - 224) / 2
upper = (height - 224) / 2
im = im.crop((left, upper, left + 224, upper + 224))
# get array data and normalize
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = np.array(im)
np_image = np_image / 255.0
np_image = np_image - mean
np_image = np_image / std
np_image = np_image.transpose((2, 0, 1))
return torch.from_numpy(np_image)
# To check your work, the function below converts a PyTorch tensor and displays it in the notebook. If your `process_image` function works, running the output through this function should return the original image (except for the cropped out portions).
def imshow(image, ax=None, title=None, hideTicks=False):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.numpy().transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
if title:
ax.set_title(title)
if hideTicks:
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.tick_params(axis='both', length=0)
ax.set_xticklabels('')
ax.set_yticklabels('')
return ax
validation_image = process_image('flowers/test/87/image_05466.jpg')
ax = imshow(validation_image, hideTicks=False)
# ## Class Prediction
#
# Once you can get images in the correct format, it's time to write a function for making predictions with your model. A common practice is to predict the top 5 or so (usually called top-$K$) most probable classes. You'll want to calculate the class probabilities then find the $K$ largest values.
#
# To get the top $K$ largest values in a tensor use [`x.topk(k)`](http://pytorch.org/docs/master/torch.html#torch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. You need to convert from these indices to the actual class labels using `class_to_idx` which hopefully you added to the model or from an `ImageFolder` you used to load the data ([see here](#Save-the-checkpoint)). Make sure to invert the dictionary so you get a mapping from index to class as well.
#
# Again, this method should take a path to an image and a model checkpoint, then return the probabilities and classes.
#
# ```python
# probs, classes = predict(image_path, model)
# print(probs)
# print(classes)
# > [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
# > ['70', '3', '45', '62', '55']
# ```
# DONE: Implement the code to predict the class from an image file
def predict(image_path, model, topk = 5):
'''
Return the topk probabilities and labels for the image using the trained model
probs is nparray of probabilities
classes is list of the class labels
Usage:
probs, classes = predict(image_path, model)
print(probs)
print(classes)
> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
> ['70', '3', '45', '62', '55']
'''
# convert the image path into tensor
image = process_image(image_path)
# run the image through the model
model.eval()
with torch.no_grad():
# ensure model and data are in the same precision
# package image data using unsqueeze
output = model.double().forward(image.unsqueeze(0))
ps = torch.exp(output)
# extract probabilities and classes from the model output
top = ps.topk(topk)
probs = top[0][0].numpy()
indices = top[1][0].numpy().tolist()
labels = [idx_to_class[idx] for idx in indices]
return probs, labels
# ## Sanity Checking
#
# Now that you can use a trained model for predictions, check to make sure it makes sense. Even if the testing accuracy is high, it's always good to check that there aren't obvious bugs. Use `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image. It should look like this:
#
# <img src='assets/inference_example.png' width=300px>
#
# You can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file (should have been loaded earlier in the notebook). To show a PyTorch tensor as an image, use the `imshow` function defined above.
sc_image_path = 'flowers/test/87/image_05466.jpg'
sc_probs, sc_classes = predict(sc_image_path, model2)
print(sc_probs)
print(sc_classes)
# +
# DONE: Display an image along with the top 5 classes
def image_path_to_labels(image_path):
cat = image_path.split('/')[-2]
name = cat_to_name[cat]
return cat, name
sc_class_names = [cat_to_name[cat] for cat in sc_classes]
sc_cat, sc_name = image_path_to_labels(sc_image_path)
sc_image_data = process_image(sc_image_path)
ax = plt.subplot(111)
ax = imshow(sc_image_data, title=sc_name, hideTicks=True, ax=ax)
plt.show()
ax = plt.subplot(111)
plt.barh(sc_class_names, sc_probs, align='center')
plt.gca().invert_yaxis()
plt.show()
| Image Classifier Project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import figure1
import figure5
import pickle
# %pylab inline
# %load_ext autoreload
# %autoreload 2
sparse, img, ANGLE, parameters = figure5.Figure5.angle(flags=[True,False,False])
imshow(img)
sparse
parameters
# ## Write image to folder containing Mask R-CNN demo script
with open('Mask_RCNN_code/samples/angle_img.pickle', 'wb') as f:
pickle.dump(img, f)
with open('Mask_RCNN_code/samples/angle_img.pickle', 'rb') as f:
loaded_img = pickle.load(f)
loaded_img
imshow(loaded_img)
| Test_Stimuli.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 2021๋
9์ 7์ผ ํ์์ผ
# ### BaekJoon - ์ํํ (Python)
# ### ๋ฌธ์ : https://www.acmicpc.net/problem/4358
# ### ๋ธ๋ก๊ทธ : https://somjang.tistory.com/entry/BaekJoon-4358%EB%B2%88-%EC%83%9D%ED%83%9C%ED%95%99-Python
# ### Solution
# +
from sys import stdin
input = stdin.readline
def ecology():
tree_dict = {}
tree_name_list = set()
tree_num = 0
while True:
tree = input().rstrip()
if not tree:
break
tree_num += 1
if tree not in tree_dict.keys():
tree_dict[tree] = 1
tree_name_list.add(tree)
else:
tree_dict[tree] += 1
tree_names = sorted(list(tree_name_list))
return tree_dict, tree_names, tree_num
if __name__ == "__main__":
tree_dict, tree_names, tree_num = ecology()
for tree in tree_names:
print(f"{tree} {((tree_dict[tree] / tree_num) * 100):.4f}")
| DAY 401 ~ 500/DAY476_[BaekJoon] ์ํํ (Python).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # General setting
#
# In all of the following graphs we use the transition probabilites estimated from group 4 of <NAME>'s 1987 paper. We stylize the cost parameters to (50, 400), instead if (10, 2.27). The simulation exercises are always with 1000 buses over 100000 periods. For the construction of the uncertainty sets we always use the mean number of observations 4292 / 78.
# # Observations
from data.data_reading import data_reading
from data.data_processing import data_processing
from ruspy.estimation.estimation_transitions import estimate_transitions
beta = 0.9999
init_dict = {
"groups": "group_4",
"binsize": 1000,
}
data_reading()
repl_data = data_processing(init_dict)
state_count = repl_data["state"].value_counts().sort_index()
bin_size = 10000
df_num_obs(bin_size, init_dict, state_count)
get_number_observations(bin_size, init_dict, state_count)
# # Transition probabilities
state = 15
get_probabilities(state)
get_probabilities_bar(15)
df_probability_shift(state)
get_probability_shift(state)
get_probability_shift_data(state)
# # Policy Features
#
df_maintenance_probabilties()
get_maintenance_probabilities()
# # Policy performance
# +
max_period = 50
init_dict = {
"discount_factor": 0.9999,
"buses": 1,
"periods": 90,
"seed": 16,
}
df = get_demonstration_df(init_dict)
df
# -
get_demonstration(df, max_period)
df_thresholds()
get_replacement_thresholds()
get_decision_rule_df()
get_performance_decision_rules()
df = get_difference_df()
df[df["omega"] == 0.01]
get_difference_plot()
| notebooks/03_1_ex_post.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Regresion Lineal con scikit-learn
# ### _Este patquete selecciona por si solo los rasgos mas indicados para convertirse en variables predictoras_
# * Seleccion a traves del metodo hacia atras
# * **Dataset name:** mtcars
# * **URL:** https://www.kaggle.com/lavanya321/mtcars
#
# En este ejercicio dedici cambiar el dataset.
# * La relacion lineal no esta tan definida como en anteriores casos
# ## Conozcamos los datos
# +
import pandas as pd #lib dataset management
import numpy as np #number lib
import matplotlib.pyplot as plt#plots and graphics
from sklearn.feature_selection import RFE #Resourced Fisher Elimination
from sklearn.svm import SVR #super vector machine
# -
data = pd.read_csv("../datasets/mtcars.csv")
data.head()
data.shape
data.dtypes
# %matplotlib inline
data.plot(kind="scatter", x="disp", y="mpg")
# La relacion entre hp y mpg no es completamente lineal, sin embargo para efectos de uso de la libreria podemos usarla y ver como se comporta
data.corr()
plt.matshow(data.corr())#correlacion entre las variables
# * mas brillante significa mejor relacion
# ## Realizando el modelo
predictoras = ["hp","disp","drat", "cyl", "wt", "qsec", "vs","am", "gear", "carb"]#columnas que en principio seran predictoras
X = data[predictoras] #predictoras
Y = data["mpg"] #variable a predecir (Metros Por Galon)
estimator = SVR(kernel="linear")#indico estimador
selector = RFE(estimator,3, step=1) #le indico # de varaibles deseadas
selector = selector.fit(X,Y)
selector.support_ #que variables se quedaron? --> drat, cyl,wt
selector.ranking_ #las seleccionadas son 1, las otras se ordenan de acuerdo a su significatividad (como una competencia)
# Este metodo nos ahorra muchisimas lineas de codigo, evita los errores humanos.
# * Como primera impresion podemos pensar que la relacion lineal mas notable sera entre "Hp" y "mpg". Sin embargo la libreria elimino esa variable y se quedo con otras 3: drat, cyl,wt
# * Como humanos puede ser dificil ver y aceptar que el dato que segun nuestra razon consideramos mas probable sea incluso el menos preciso para el desarrollo del modelo
# * "hp" ocupo el lugar 6. Es decir un dato verdaderamente malo prediciendo los "mpg" en una relacion lineal
# * Quiza en otro tipo de relacion este dato sea mas valorado
# ### Usando las varaibles seleccionadas
from sklearn.linear_model import LinearRegression
X_pred = X[["drat", "cyl","wt"]]
lm = LinearRegression()
lm.fit(X_pred,Y)
lm.intercept_ #la alpha
lm.coef_
lm.score(X_pred,Y) #valor de R^2 ajustado
| notebooks/01-2_MultipleLinearRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="ojU5bp3wNah2" colab_type="text"
# # swapcase()
# + [markdown] id="7TZXi4rUNeYL" colab_type="text"
# swapcase() methodu bรผyรผk harfleri kรผรงรผk harflere
#
# kรผรงรผk harfleri bรผyรผk harflere รงeviri
# + id="OnL77HzxNqVg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="4e56ff95-4ad7-4b11-fa58-3680d9d2d35a"
kardiz = "python"
print(kardiz.swapcase())
kardiOne = "PYTHON"
print(kardiOne.swapcase())
kardizTwo = "Python"
print(kardizTwo.swapcase())
# + [markdown] id="8Hx15UXnOU0j" colab_type="text"
# bu metodda tรผrkรงe karakterle problem yaลฤฑyoruz
#
# bu problemi รงรถzebilmek iรงin:
# + id="Ak9qZNxAOcNx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="c806d5d2-1847-4401-9d5d-191f899b969b"
kardiz = "istanbul"
for i in kardiz:
if i == "ฤฐ":
kardiz = kardiz.replace("ฤฐ","i")
elif i == "i":
kardiz = kardiz.replace("i","ฤฐ")
else:
kardiz = kardiz.replace(i,i.swapcase())
print(kardiz)
# + [markdown] id="2LuuTP72PH4l" colab_type="text"
# # + for dรถngรผsรผ sayesinde harfleri tek tek iลlemden geรงiriyoruz
# # + bu kodlar sayesinde sorun yaratacak yerlere ekleme ve รงฤฑkarma yapฤฑyoruz
# # + daha sonra geri kalan harfler iรงin ise swapcase() methodunu uyguluyoruz.
| karakterDiziMetod/swapcase.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py38
# language: python
# name: py38
# ---
# # Calculating Pi with fractions
#
# - toc: false
# - badges: true
# - comments: true
# - author: <NAME>
# - categories: [algorythms, twitter]
# 
# $\pi = \frac{4}{1+\frac{1^2}{2 + recursion(n)}}$
# + tags=[]
import numpy as np
def frac_one(n:int, i:int=0):
i += 1
n -= 1
nominator = 1+2*i
if n == 0:
return 1
else:
return 2 + (1+2*i)**2/frac_one(n, i)
def frac_inf(n:int, i:int=0):
i += 1
n -= 1
nominator = 1+2*i
if n == 0:
return np.inf
else:
return 2 + (1+2*i)**2/frac_inf(n, i)
# + tags=[]
def pi_approx(func, n_iters):
return 4 / (1 + 1/func(n_iters))
# + tags=[]
def pi_mixed(n_iters):
a = pi_approx(frac_one, n_iters)
b = pi_approx(frac_inf, n_iters)
return (a+b)/2
# -
# + tags=[]
# let's use our laptop's recursion limit to get maximum accuracy
import sys
max_recursion = int(sys.getrecursionlimit()*.98)
max_recursion
# + tags=[]
pi_approx(frac_one, max_recursion)
# + tags=[]
pi_approx(frac_inf, max_recursion)
# + tags=[]
print(f"""
At recursion depth of {max_recursion} the approximation error is:
* {(np.pi - pi_approx(frac_one, max_recursion))/np.pi:.3%} for frac_one
* {(np.pi - pi_approx(frac_inf, max_recursion))/np.pi:.3%} for frac_inf
* {(np.pi - pi_mixed(max_recursion))/np.pi:.12%} for pi_mixed
"""
)
# -
| _notebooks/2021-05-06 Infinite Pi Fraction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Scientific Python
#
# ### Matrices
# Dealing with vectors and matrices efficiently requires the **numpy** library. For the sake of brevity we will import this with a shorter name:
import numpy as np
# The numpy supports arrays and matrices with many of the features that would be familiar to matlab users. See here quick summary of [numpy for matlab users](https://docs.scipy.org/doc/numpy-dev/user/numpy-for-matlab-users.html).
#
# Appart from the convenience, the numpy methods are also much faster at performing operations on matrices or arrays than performing arithmetic with numbers stored in lists.
#
x = np.array([1,2,3,4,5])
y = np.array([2*i for i in x])
x+y # element wise addition
X = x[:4].reshape(2,2) # turn into a matrix/table
2*X # multiply by a scalar
# However watch out: array is not quite a matrix. For proper matrix operations you need to use the matrix type. Unlike **array**s that can have any number of dimensions, matrices are limited to 2 dimension. However matrix multiplication does what you would expect from a linear algebra point of view, rather than an element-wise multiplication:
Y = np.matrix(X)
print("X=Y=\n",Y)
print("array X*X=\n",X*X,'\nmatrix Y*Y=\n',Y*Y)
# Much more information on how to use numpy is available at [quick start tutorial](https://docs.scipy.org/doc/numpy-dev/user/quickstart.html)
# ### Plotting
# There are lots of configuration options for the **matplotlib** library that we are using here. For more information see [http://matplotlib.org/users/beginner.html]
#
# To get started we need the following bit of 'magic' to make the plotting work:
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# Now we can try something simple:
# +
plt.plot([1,2,3,4])
plt.ylabel('some numbers')
plt.show()
# +
# A slightly more complicated plot with the help of numpy
X = np.linspace(-np.pi, np.pi, 256, endpoint=True)
C, S = np.cos(X), np.sin(X)
plt.plot(X, C)
plt.plot(X, S)
plt.show()
# -
# Annotating plots can be done with methods like **text()** to place a label and **annotate()**. For example:
t = np.arange(0.0, 5.0, 0.01)
line, = plt.plot(t, np.cos(2*np.pi*t), lw=2)
plt.annotate('local max', xy=(2, 1), xytext=(3, 1.5),
arrowprops=dict(facecolor='black', shrink=0.05),
)
# text can include basic LaTeX commands - but need to mark
# string as raw (r"") or escape '\' (by using '\\')
plt.text(1,-1.5,r"Graph of $cos(2\pi x)$")
plt.ylim(-2,2)
plt.show()
# Here is an example of how to create a basic surface contour plot.
#
# +
import matplotlib.mlab as mlab # for bivariate_normal to define our surface
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y) # define mesh of points
Z1 = np.exp(-X**2 - Y**2)
Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2)
Z = (Z1 - Z2) * 2
# -
# Create a simple contour plot with labels using default colors. The
# inline argument to clabel will control whether the labels are draw
# over the line segments of the contour, removing the lines beneath
# the label
plt.figure()
#CS = plt.contour(X, Y, Z)
CS = plt.contour(X, Y, Z)
plt.clabel(CS, inline=1, fontsize=10)
plt.title('Simplest default with labels')
plt.show()
| JupyterNotebooks/2019/08-Scientific Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Imports
# ## Package Imports
# +
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', None)
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split, KFold, cross_val_score, \
RandomizedSearchCV, GridSearchCV
from sklearn.ensemble import RandomForestRegressor
# -
# ## Data Imports
df1_sleep_orig = pd.read_csv('person1_oura_2019-09-30_2020-09-30_trends.csv')
df2_sleep_orig = pd.read_csv('person2_oura_2020-07-22_2020-10-01_trends.csv')
df3_sleep_orig = pd.read_csv('person3_oura_2019-01-01_2020-09-22_trends.csv')
df_sleep = pd.concat([df1_sleep_orig, df2_sleep_orig, df3_sleep_orig])
df_sleep.head()
df_sleep.shape
df_sleep.dtypes
# # Data Transformations
# ## Column renaming
df_sleep.rename(columns = lambda x: x.lower().replace(' ', '_'), inplace = True)
# ## Removing columns
list_sleep_cols = ['date', 'sleep_score', 'total_bedtime', 'total_sleep_time',
'awake_time', 'rem_sleep_time', 'light_sleep_time',
'deep_sleep_time', 'restless_sleep', 'sleep_efficiency',
'sleep_latency', 'sleep_timing', 'bedtime_start',
'bedtime_end', 'average_resting_heart_rate',
'lowest_resting_heart_rate', 'average_hrv',
'temperature_deviation_(ยฐc)', 'respiratory_rate']
df_sleep2 = df_sleep[list_sleep_cols].copy()
df_sleep3 = df_sleep2.copy()
# ## Dropping Nans
df_sleep3.isnull().sum()
df_sleep3.dropna(subset=['sleep_score', 'average_hrv'], inplace=True)
df_sleep3.isnull().sum()
# ## Removing Outliers
df_sleep4 = df_sleep3.copy()
df_sleep4['sleep_score'].hist()
df_sleep4.describe()
Q1 = df_sleep4.quantile(0.25)
Q3 = df_sleep4.quantile(0.75)
IQR = Q3 - Q1
print(IQR)
df_sleep5 = df_sleep4.loc[~((df_sleep4 < (Q1 - 1.5 * IQR)) |
(df_sleep4 > (Q3 + 1.5 * IQR))).any(axis=1)].copy()
df_s = df_sleep5.copy()
df_s.shape
# # Random Forest
# ## Data Prep
list_raw_sleep_cols = ['total_bedtime', 'total_sleep_time', 'awake_time',
'rem_sleep_time', 'light_sleep_time',
'deep_sleep_time', 'restless_sleep', 'sleep_efficiency',
'sleep_latency', 'sleep_timing',
'average_resting_heart_rate',
'lowest_resting_heart_rate', 'average_hrv',
'temperature_deviation_(ยฐc)', 'respiratory_rate']
list_col_poly = [f'x{i}' for i in range(0, len(list_raw_sleep_cols))]
df_col_names = pd.DataFrame(list_col_poly, index=list_raw_sleep_cols,
columns=['Poly Name'])
df_col_names;
X = df_s[list_raw_sleep_cols]
y = df_s[['sleep_score']]
# ## Train/ CV/ Test Split
# We have split 80/20 between train/ cv and test
X_train_cv, X_test, y_train_cv, y_test = train_test_split(X, y, test_size=.20,
random_state=1)
# We will split our train/cv 60/20 into train and cv now.
X_train, X_cv, y_train, y_cv = train_test_split(X_train_cv, y_train_cv,
test_size=.25,
random_state=1)
# ## Tuning Parameters
# ### Randomized CV Search
# +
n_estimators = [int(x) for x in np.linspace(start=100, stop=800, num=8)]
max_features = ['auto', 'sqrt']
max_depth = [int(x) for x in np.linspace(10, 100, num=10)]
max_depth.append(None)
min_samples_split = [2, 5, 10, 15]
min_samples_leaf = [1, 2, 4]
bootstrap = [True, False]
degree = [1, 2, 3, 4]
random_grid = {'poly__degree': degree,
'rf__n_estimators': n_estimators,
'rf__max_features': max_features,
'rf__max_depth': max_depth,
'rf__min_samples_split': min_samples_split,
'rf__min_samples_leaf': min_samples_leaf,
'rf__bootstrap': bootstrap
}
# +
# Use the random grid to search for best hyperparameters
# First create the base model to tune
rf_pipe = Pipeline([
('poly', PolynomialFeatures()),
('rf', RandomForestRegressor())
])
# Random search of parameters, using 3 fold cross validation,
# search across 100 different combinations, and use all available cores
rf_random = RandomizedSearchCV(estimator=rf_pipe,
param_distributions=random_grid,
n_iter=10, cv=3, verbose=2, random_state=1,
scoring='neg_mean_squared_error', n_jobs = -1)
# Fit the random search model
rf_random.fit(X_train_cv, np.ravel(y_train_cv))
# -
rf_random.best_params_
# ### Model Performance
# +
rf_pipe_opt = Pipeline([
('poly', PolynomialFeatures(2)),
('rf', RandomForestRegressor(
n_estimators=rf_random.best_params_['rf__n_estimators'],
max_features=rf_random.best_params_['rf__max_features'],
min_samples_split=rf_random.best_params_['rf__min_samples_split'],
max_depth=rf_random.best_params_['rf__max_depth'],
min_samples_leaf=rf_random.best_params_['rf__min_samples_leaf'],
bootstrap=rf_random.best_params_['rf__bootstrap']
))
])
rf_pipe_opt.fit(X_train, np.ravel(y_train))
y_train_pred = rf_pipe_opt.predict(X_train)
y_cv_pred = rf_pipe_opt.predict(X_cv)
print('Train error:', mean_squared_error(y_train_pred, y_train))
print('CV error:', mean_squared_error(y_cv_pred, y_cv))
print('R^2 train', r2_score(y_train_pred, y_train))
print('R^2 CV', r2_score(y_cv_pred, y_cv))
# -
y_test_pred = rf_pipe_opt.predict(X_test)
print('Test error:', mean_squared_error(y_test_pred, y_test))
print('R^2 Test', r2_score(y_test_pred, y_test))
# ### Grid Search
param_grid = {'poly__degree': [2],
'rf__n_estimators': [100, 200, 300, 500],
'rf__max_features': ['auto'],
'rf__max_depth': [50, 60, 70],
'rf__min_samples_split': [4, 5, 6],
'rf__min_samples_leaf': [1, 2, 3],
'rf__bootstrap': [True],
}
# Create a based model
rf_pipe = Pipeline([
('poly', PolynomialFeatures()),
('rf', RandomForestRegressor())
])
# Instantiate the grid search model
grid_search = GridSearchCV(estimator=rf_pipe, param_grid=param_grid,
scoring='neg_mean_squared_error',
cv=3, n_jobs=-1, verbose=2)
grid_search.fit(X_train_cv, np.ravel(y_train_cv))
grid_search.best_params_
# +
rf_pipe_opt2 = Pipeline([
('poly', PolynomialFeatures(
degree=grid_search.best_params_['poly__degree'])
),
('rf', RandomForestRegressor(
n_estimators=grid_search.best_params_['rf__n_estimators'],
max_features=grid_search.best_params_['rf__max_features'],
min_samples_split=grid_search.best_params_['rf__min_samples_split'],
max_depth=grid_search.best_params_['rf__max_depth'],
min_samples_leaf=grid_search.best_params_['rf__min_samples_leaf'],
bootstrap=grid_search.best_params_['rf__bootstrap']
))
])
rf_pipe_opt2.fit(X_train, np.ravel(y_train))
y_train_pred = rf_pipe_opt2.predict(X_train)
y_cv_pred = rf_pipe_opt2.predict(X_cv)
print('Train error:', mean_squared_error(y_train_pred, y_train))
print('CV error:', mean_squared_error(y_cv_pred, y_cv))
print('R^2 train', r2_score(y_train_pred, y_train))
print('R^2 CV', r2_score(y_cv_pred, y_cv))
# -
y_test_pred = rf_pipe_opt2.predict(X_test)
print('Test error:', mean_squared_error(y_test_pred, y_test))
print('R^2 Test', r2_score(y_test_pred, y_test))
arr_resid = np.ravel(y_test) - y_test_pred
plt.scatter(y_test, arr_resid)
plt.title('Residual Error Plot for Random Forest')
plt.xlabel('Sleep Score')
plt.ylabel('Residual Error');
# # Results
# In terms of performance, here is how of our model did:
#
# | Name | Test Error (MSE) | R^2 |
# |------------------------------|------------|------|
# | Random Forest | 9.22 | .777 |
# +
fi = rf_pipe_opt2['rf'].feature_importances_
df_fi = pd.DataFrame(zip(X.columns, fi),
columns=['Features', 'Importance'])
df_fi = df_fi.sort_values(by='Importance')
#plot
plt.subplots(figsize=(15, 10))
plt.barh(df_fi['Features'], df_fi['Importance'])
plt.xlabel('Normalized Cumulative Gini Index Drop')
plt.ylabel('Features')
plt.title('Feature Importances');
| notebooks/sleep_data_random_forest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## ๅทดๆฏๅกไธ่งๅฝข
# ```
# 1
# 1 1
# 1 2 1
# 1 3 3 1
# 1 4 6 4 1
# ```
#่งฃไธ
while True:
n = input('่ซ่ผธๅ
ฅ0~5:')
if n == '0':
break
if n == '1':
print('1')
elif n == '2':
print(' 1')
print('1 1')
elif n == '3':
print(' 1')
print(' 1 1')
print('1 2 1')
elif n == '4':
print(' 1')
print(' 1 1')
print(' 1 2 1')
print('1 3 3 1')
elif n == '5':
print(' 1')
print(' 1 1')
print(' 1 2 1')
print(' 1 3 3 1')
print('1 4 6 4 1')
print('็ตๆ')
n = int(input('่ซ่ผธๅ
ฅ1~100:'))
for i in range(2, n+1):
for j in rang(2, i):
if i / j == 0:
print()
| Python/Pascal triangle.ipynb |