code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6.7 64-bit
# language: python
# name: python3
# ---
# ## Imports
# +
import cv2 as cv
import numpy as np
import os
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout, BatchNormalization
from keras.optimizers import adam_v2
from tensorflow.keras import layers
from tensorflow.keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('fivethirtyeight')
# -
# ## Test GPU
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
print("GPUs available:",len(physical_devices))
# ## Prepare data
# ## Import train and test data
# +
train_dir = 'C:/Users/lucas/OneDrive/Documentos/GitHub/real-pokedex/dataset/train/'
test_dir = 'C:/Users/lucas/OneDrive/Documentos/GitHub/real-pokedex/dataset/test/'
val_split = 0.3
num_pokemons = len(os.listdir(train_dir))
X_train = []
y_train = []
p = 0
for pokedir in os.listdir(train_dir):
for pokemon in os.listdir(train_dir+pokedir+'/')[:40]:
imgdata = cv.imread(train_dir+pokedir+'/'+pokemon)
imgdata = cv.resize(imgdata, (32, 32),0,0,cv.INTER_LINEAR)
X_train.append(imgdata)
y_train.append(p)
p += 1
X_train = np.array(X_train).reshape((len(X_train), 32, 32, 3))
y_train = np.array(y_train).reshape((len(y_train),1))
X_test = []
y_test = []
p = 0
for pokedir in os.listdir(test_dir):
for pokemon in os.listdir(test_dir+pokedir+'/')[:20]:
imgdata = cv.imread(test_dir+pokedir+'/'+pokemon)
imgdata = cv.resize(imgdata, (32, 32),0,0,cv.INTER_LINEAR)
X_test.append(imgdata)
y_test.append(p)
p += 1
X_test = np.array(X_test).reshape((len(X_test), 32, 32, 3))
y_test = np.array(y_test).reshape((len(y_test),1))
X_train = X_train/255.0
X_test = X_test/255.0
y_train = to_categorical(y_train, num_classes=166)
y_test = to_categorical(y_test, num_classes=166)
# -
# ## Import data using `ImageDataGenerator`
datagen = ImageDataGenerator()
train_it = datagen.flow_from_directory('dataset/train/', batch_size=batch_size, target_size=(32,32))
test_it = datagen.flow_from_directory('dataset/test/', batch_size=batch_size, target_size=(32, 32))
X_train, y_train = train_it.next()
X_test, y_test = test_it.next()
print(X_train.shape)
print(X_test.shape)
# model.fit_generator(train_it, epochs = num_epochs, steps_per_epoch=16, validation_data=test_it, validation_steps=8)
# # Setup and train model
# +
model = Sequential()
model.add(Conv2D(32, 3, padding = 'same', activation = 'relu', input_shape =(32,32,3), kernel_initializer = 'he_normal'))
model.add(BatchNormalization(axis = -1))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, 3, padding = 'same', kernel_initializer = 'he_normal', activation = 'relu'))
model.add(BatchNormalization(axis = -1))
model.add(Conv2D(64, 3, padding = 'same', kernel_initializer = 'he_normal', activation = 'relu'))
model.add(BatchNormalization(axis = -1))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, 3, padding = 'same', kernel_initializer = 'he_normal', activation = 'relu'))
model.add(BatchNormalization(axis = -1))
model.add(Conv2D(128, 3, padding = 'same', kernel_initializer = 'he_normal', activation = 'relu'))
model.add(BatchNormalization(axis = -1))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(256, 3, padding = 'same', kernel_initializer = 'he_normal', activation = 'relu'))
model.add(BatchNormalization(axis = -1))
model.add(Conv2D(256, 3, padding = 'same', kernel_initializer = 'he_normal', activation = 'relu'))
model.add(BatchNormalization(axis = -1))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation = 'relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(256, activation = 'relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(num_pokemons, activation = 'softmax'))
lr = 1e-4
opt = adam_v2.Adam(learning_rate=lr)
model.compile(
loss= 'categorical_crossentropy', \
optimizer= opt, \
metrics = ['accuracy']
)
num_epochs = 100
batch_size = 128
hist = model.fit(X_train, y_train, batch_size = batch_size, epochs = num_epochs, verbose=0, validation_split = 0.5)
# save the model
# model.save('pokedex.model')
# -
# ## Plot loss and accuracy
# +
# Plot learning curves
fig = plt.figure(figsize = (17, 8))
plt.subplot(121)
plt.plot(hist.history['accuracy'], label = 'acc')
plt.plot(hist.history['val_accuracy'], label = 'val_acc')
plt.legend()
plt.grid()
plt.title(f'accuracy')
plt.subplot(122)
plt.plot(hist.history['loss'], label = 'loss')
plt.plot(hist.history['val_loss'], label = 'val_loss')
plt.legend()
plt.grid()
plt.title(f'loss')
plt.show()
# -
# ## Test model
pred = model.predict(X_test)
index = 60 #the index you want to test
result = np.argmax(pred[index])
result_class = classes[result]
print("Predicted index: %d" % np.argmax(pred[index]))
print("Predicted should match label: %d" %(np.where(y_test[index]==1)[0]+1))
plt.imshow(X_test[index], interpolation='nearest')
plt.show()
model.evaluate(X_test, y_test, verbose=0)
| CNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Max profit with k transactions
project_name = "max-profit-with-k-transactions"
# ## Problem Statement
#
# You are given an array which contains integers repressenting the `prices` of a stock during a certain day. The index of the array represents the day in which the stock was valued at that price.
#
# For example given the array `prices` = [3, 8, 1]:
#
# on day 0 (prices[0]) the price of the stock was 3
# on day 1 (prices[1]) the price of the stock was 8
# on day 2 (prices[2]) the price of the stock was 1
#
# If you are allowed to make at most `k` numbers of transactions, find the maximum profit you can achieve.
#
# -You can only engage in one transaction at a time, if you bought then you must sell before buying again.
# -You must buy before you sell (obviously) so you cant buy on day two and sell on day one for instance.
#
# Here is an example of how to achieve this:
#
# Input: k = 2, prices = [3,2,6,5,0,3]
# Output: 7
# Buy on day 2 (price = 2) and sell on day 3 (price = 6), profit = 6-2 = 4. Then buy on day 5 (price = 0) and sell on day 6 (price = 3), profit = 3-0 = 3.
#
# Source: https://leetcode.com/problems/best-time-to-buy-and-sell-stock-iv/
# ## The Method
#
# Here's the systematic strategy we'll apply for solving problems:
#
# 1. State the problem clearly. Identify the input & output formats.
# 2. Come up with some example inputs & outputs. Try to cover all edge cases.
# 3. Come up with a correct solution for the problem. State it in plain English.
# 4. Implement the solution and test it using example inputs. Fix bugs, if any.
# 5. Analyze the algorithm's complexity and identify inefficiencies, if any.
# 6. Apply the right technique to overcome the inefficiency. Repeat steps 3 to 6.
#
# This approach is explained in detail in [Lesson 1](https://jovian.ai/learn/data-structures-and-algorithms-in-python/lesson/lesson-1-binary-search-linked-lists-and-complexity) of the course. Let's apply this approach step-by-step.
# ## Solution
#
#
# ### 1. State the problem clearly. Identify the input & output formats.
#
# While this problem is stated clearly enough, it's always useful to try and express in your own words, in a way that makes it most clear for you.
#
#
# **Problem**
#
# > We need to find the maximum amount of profit we can achieve, within a certain number of transactions, where we can only engange in a single transaction at a time, until we reach the end of the prices array.
#
# transaction profit = (price the stock is currently at) - (price at which we bought the stock)
#
# final profit = the sum of all transaction profits
#
# <br/>
#
#
# **Input**
#
# 1. `prices` = an array of stock prices ordered in a chronological sequential manner, where the index represents the day at which the stock was values at that price
# 2. `k` = maximum number of transactions we are allowed to perform
#
#
# **Output**
#
# 1. `profit` = the maximum profit we can achieve
#
#
# <br/>
#
# Based on the above, we can now create a signature of our function:
def calculate_max_profit(prices, k):
pass
# Save and upload your work before continuing.
# ### 2. Come up with some example inputs & outputs. Try to cover all edge cases.
#
# Our function should be able to handle any set of valid inputs we pass into it. Here's a list of some possible variations we might encounter:
#
# 1. Single transaction which result in profit
# 2. Single transaction with no way to profit
# 3. Multiple transactions which result in profit
# 4. Multiple transactions with no way to profit
# 5. Situations were the inputs don't make sense, zero or negative number of max transactions and negative stock values
#
# We'll express our test cases as dictionaries, to test them easily. Each dictionary will contain 2 keys: `input` (a dictionary itself containing one key for each argument to the function and `output` (the expected result from the function).
test1 = {
'input': {
'prices' : [1, 2, 1],
'k' : 1
},
'output': 1
}
test2 = {
'input': {
'prices' : [3, 2, 1],
'k' : 1
},
'output': 0
}
test3 = {
'input': {
'prices' : [3, 2, 6, 5, 0, 3],
'k' : 2
},
'output': 7
}
test4 = {
'input': {
'prices' : [9, 7, 6, 5, 2, 1],
'k' : 2
},
'output': 0
}
test5 = {
'input': {
'prices' : [-1, -2, -1],
'k' : -1
},
'output': 0
}
test5 = {
'input': {
'prices' : [5, 11, 3, 50, 60, 90],
'k' : 2
},
'output': 93
}
tests = [test1, test2, test3, test4, test5]
# ### 3. Come up with a correct solution for the problem. State it in plain English.
#
# Our first goal should always be to come up with a _correct_ solution to the problem, which may not necessarily be the most _efficient_ solution.
#
# Our problem has quite a lot of dimensionality when we think about it. Not only do we need to worry about the profit we gain if we were to sell on any particular day, but we also have to take into account any potential profits we could already have aquired from selling on previous days. In order to represent all of this intricacy we will need to create a 2D array to house all of our profit combinations, where the the row index represents the number of transactions we can make and the collumns index represent the max profit we could make up until that day.
#
# Lets illustrate this by going through an example
#
# prices : [3, 2, 6, 5, 0, 3]
# profits :[[0, 0, 0, 0, 0, 0],
# [0, 0, 4, 4, 4, 4],
# [0, 0, 4, 4, 4, 7]]
#
# Our 2D `profits` array will be of size `k + 1` * `len(prices) - 1`.
# Each value in the array will be filled with the maximum profit we can aquire up until that day with the given number of transactions specified by the row number.
#
# When `k` = 0 we have no profits since no transactions can occur, so `profits[0] = [0, 0, 0, 0, 0, 0]`.
#
# When `k` = 1 we have:
# 1. profits[1][0] = 0 since we cant make any transactions yet
# 2. profits[1][1] = 0 since the only transaction we can make would be 2-3 which is <0 which is not profitable.
# 3. profits[1][2] = 4 since the max profit at this day would be 6-2, same goes for the rest of the days since the price of the stock never goes above 6 so 6-2 is the best option we get `profits[1] =[0, 0, 4, 4, 4, 4]`
#
# When `k` = 2 we mostly the same values as the ones when `k` = 1 since there is no combinations of transactions to get over a profit of 4, until we reach the last day where we have the combinations (6-2) + (3-0) = 7
#
#
# Now the question on everyones minds should be how do we calculate this value algorithmically. Heres how:
#
# profits[i][j] = max(profits[i][j-1], prices[j] + max(-prices[x] + profits[i-1][x])) where 0 <= x < j
#
# So say we are on day 6 (`j` = 5) and we can make two transactions `k`= 2 (`i` = 2)
#
# x = 0: -prices[0] + profits[1][0] = -3 + 0 = -3
# x = 1: -prices[1] + profits[1][1] = -2 + 0 = -2
# x = 2: -prices[2] + profits[1][2] = -6 + 4 = -2
# x = 3: -prices[3] + profits[1][3] = -5 + 4 = -1
# x = 4: -prices[4] + profits[1][4] = -0 + 4 = 4 => the max value
#
# So our equations becomes:
#
# profits[2][5] = max(profits[2][4], prices[5] + 4) = max(4, 3 + 4) = max(4, 7) = 7
#
# Here are the steps we should take:
# 1. Instantiate the `profits` array.
# 2. Iterate from 0 to k + 1 to traverse the rows of `profits`
# 3. Iterate from 0 to len(prices) to traverse the days of `profits`
# 4. Iterate from 0 to current day and calculate `max(-prices[x] + profit[i-1][x])`
# 5. Calculate `max(profits[i][j-1], prices[j] + max(-prices[x] + profit[i-1][x]))`
# 6. The largest value of profits is our final result.
# ### 4. Implement the solution and test it using example inputs. Fix bugs, if any.
# !pip install jovian --upgrade --quiet
import jovian
# +
import numpy as np
def calculate_max_profit(prices, k, display_steps=False):
# Instantiate the profits array
profits = [[0] * len(prices) for _ in range(k + 1)]
# Iterate over the number of transactions
for i in range(k + 1):
# Iterate over the days
for j in range(len(prices)):
# If we cant make any transactions set current profit to 0
if i == 0 or j == 0:
profits[i][j] = 0
else:
x_max = [max(-prices[x] + profits[i-1][x] for x in range(j))]
# Calculate the max profit for the current day
profits[i][j] = max(profits[i][j-1], prices[j] + max(x_max))
if display_steps:
print('k=',i , 'day=', j, 'maxprofit=', profits[i][j])
if display_steps:
print(np.matrix(profits))
# Return the largest profit
return np.amax(profits)
# -
def test_function(test, method, display_steps=False):
inputs, output = test['input'], test['output']
prices, k = inputs['prices'], inputs['k']
print('Input:', inputs)
print('Expected output:', output)
result = method(prices, k, display_steps)
print('Actual output:', result)
print('Match:', result == output)
test_function(test1, calculate_max_profit, display_steps=True)
test_function(test2, calculate_max_profit, display_steps=True)
test_function(test3, calculate_max_profit, display_steps=True)
test_function(test4, calculate_max_profit, display_steps=True)
test_function(test5, calculate_max_profit, display_steps=True)
# ### 5. Analyze the algorithm's complexity and identify inefficiencies.
# Since we are creating and traversing a 2D array of profits, with size proportional to the length of our prices array and the number of transactions we are allowed to conduct, we would expect our time complexity to be `O(len(prices) * (k + 1))`.
#
# However for each iteration of our 2D array we also have to iterate over past profits gained from previous numbers of transactions, and this number can vary between `0` and `len(prices)`.
#
# Since we will only consider the worst case scenario this leads to our time complexity actually being `O(len(prices)^2 * (k+1))`
#
# Lets just call it `O(n^2 * m)`
#
# Meanwhile our space complexity will be `O(n*m)` since that is the size of the array we are working with will depend on length of `prices` and number of transactions `k`.
time_complexity = 'O(n^2 * m)'
space_complexity = 'O(n * m)'
# ### 6. Apply the right technique to overcome the inefficiency.
# Our time complexity is currently quadratinc, this is generally regarded as horrible.
#
# Our space complexity is linear, which would generally be fair but could be better.
#
# However there are a few ways for us to improve this through the application of a DYNAMIC PROGRAMMING concept called MEMOIZATION.
#
# If we take a look at our algorith one more time `max(profits[i][j-1], prices[j] + max(-prices[x] + profit[i-1][x]))` specifically to the `max(-prices[x] + profit[i-1][x])` section, remeber that `0 <= x < j`, we notice that we are repeating a lof of the same calculations. Instead of recalculating `max(-prices[x] + profit[i-1][x])` for each single day, we save the previous days max, compare it to the current days value and take the max between them.
#
# This will completely get rid of the need to iterate for `0 <= x < j` and bring our time complexity down to `O(n*m)`
# ### 7. Come up with a correct solution for the problem. State it in plain English.
#
# Come with the optimized correct solution and explain it in simple words below:
#
# 1. Instantiate the `profits` array.
# 2. Iterate from 0 to k + 1 to traverse the rows of `profits`
# 3. Iterate from 0 to len(prices) to traverse the days of `profits`
# 4. Calculate `max(-prices[x] + profit[i-1][x])` and save it
# 5. Calculate `max(profits[i][j-1], prices[j] + max_so_far)`
# 6. The largest value of profits is our final result
# ### 8. Implement the solution and test it using example inputs. Fix bugs, if any.
# +
import sys
def calculate_max_profit_time_optimized(prices, k, display_steps=False):
# Instantiate the profits array
profits = [[0] * len(prices) for _ in range(k + 1)]
# Iterate over the number of transactions
for i in range(k + 1):
# The variable where we will store our x_max initialized to the smalles possible integer
max_so_far = -sys.maxsize - 1
# Iterate over the days
for j in range(len(prices)):
# If we cant make any transactions set current profit to 0
if i == 0 or j == 0:
profits[i][j] = 0
else:
max_so_far = max(max_so_far, -prices[j-1] + profits[i-1][j-1])
# Calculate the max profit for the current day
profits[i][j] = max(profits[i][j-1], prices[j] + max_so_far)
if display_steps:
print('k=',i , 'day=', j, 'maxprofit=', profits[i][j])
if display_steps:
print(np.matrix(profits))
# Return the largest profit
return np.amax(profits)
# -
test_function(test1, calculate_max_profit_time_optimized, display_steps=True)
test_function(test2, calculate_max_profit_time_optimized, display_steps=True)
test_function(test3, calculate_max_profit_time_optimized, display_steps=True)
test_function(test4, calculate_max_profit_time_optimized, display_steps=True)
test_function(test5, calculate_max_profit_time_optimized, display_steps=True)
# All looks good, now lets fix our space complexity. Our space complexity is currently `O(n * m)` which is linear and fair. But if we consider a very large prices array where we can condunt a very large number of transactions, our `profits` array starts looking quite large and hard to manage. However there is a way to reduce the slope of our space complexities linearity. The smaller the slope, the more efficiet our space usage is.
#
# If we take a look at our `profits` array and our algorith again, we will notice that at any moment in time we are never working with more than two rows from our `profits` array.
#
# `max_so_far = max(max_so_far, -prices[j-1] + profits[i-1][j-1])`
#
# `profits[i][j] = max(profits[i][j-1], prices[j] + max_so_far)`
#
# We are always restricted to the `i-1`th and `i`th rows. So there is no reason to create a `n*m` array, we can simply use two 1D arrays where we store the profits from the previous `k-1` transactions and the current `k` transactions and once we finished all the values for the current `k` we make `k-1` array equal to the `k` array and move on to calculate the values of the next row.
#
# Let's implement this now.
# +
import sys
def calculate_max_profit_time_and_space_optimized(prices, k, display_steps=False):
# Instantiate the profits arrays
profits_last_k = [0] * len(prices)
profits_current_k = [0] * len(prices)
# Iterate over the number of transactions
for i in range(1, k + 1):
# The variable where we will store our x_max, initialized to the smalles possible integer
max_so_far = -sys.maxsize - 1
# Iterate over the days
for j in range(len(prices)):
# If we cant make any transactions set current profit to 0
if j == 0:
profits_current_k[j] = 0
else:
max_so_far = max(max_so_far, -prices[j-1] + profits_last_k[j-1])
# Calculate the max profit for the current day
profits_current_k[j] = max(profits_current_k[j-1], prices[j] + max_so_far)
if display_steps:
print('k=',i , 'day=', j, 'maxprofit=', profits_current_k[j])
profits_last_k = profits_current_k
if display_steps:
print(profits_current_k)
# Return the largest profit
return max(profits_current_k)
# -
test_function(test1, calculate_max_profit_time_and_space_optimized, display_steps=True)
test_function(test2, calculate_max_profit_time_and_space_optimized, display_steps=True)
test_function(test3, calculate_max_profit_time_and_space_optimized, display_steps=True)
test_function(test4, calculate_max_profit_time_and_space_optimized, display_steps=True)
test_function(test5, calculate_max_profit_time_and_space_optimized, display_steps=True)
# ### 9. Analyze the algorithm's complexity and identify inefficiencies, if any.
# We have succesfully reduced the time complexity of our algorithm to `O(n*m)` and our space complexity to `O(n)`
optimized_time_complexity = 'O(n*m)'
optimized_space_complexity = 'O(n)'
# Lets run some benchmarks and compare all three approaches to see how much each one of our optimization steps has helped.
# We will create a large array of prices with random integer numbers in order to compare all our algorithms.
# +
import random
# Lets create a very long list of random integer values for testing
prices = []
k = 1000
for i in range(0, k):
n = random.randint(0, 30)
prices.append(n)
print(prices)
# Since we can't tell what the actual output will be, we wont care if the test passes, we are only interested in execution time
benchmark_test = {
'input':{
'prices': prices,
'k': k
},
'output': 0
}
# -
from jovian.pythondsa import evaluate_test_cases
evaluate_test_cases(calculate_max_profit, [benchmark_test])
evaluate_test_cases(calculate_max_profit_time_optimized, [benchmark_test])
evaluate_test_cases(calculate_max_profit_time_and_space_optimized, [benchmark_test])
# As we can see, the difference is VERY remarkable, between the unoptimized solution and the time optimized one.
#
# It also seems that we gained a bit of speed from our space optimization as well. Makes sense since searching for information through a small amount of memory will always be faster than searching for the same information in a large amout of memory.
jovian.commit()
| max-profit-with-k-transactions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
def dydx(x,y):
#set derivatives
#our equation is d^2y/dx^2 = -y
#so we can write
#dydx = z
#dzdx = -y
#we'll set y = y[0] (the 1st element in the array)
#and z = y[1] (the 2nd element in the array)
#declare the array
y_derivs = np.zeros(2)
#set dydx = z
y_derivs[0] = y[1]
#set dzdx = -y
y_derivs[1] = -1*y[0]
#we now return the array
return y_derivs
def ck_mv_core(dydx,xi,yi,N,h):
#advance f by a step h
#declare k? arrays
k1 = np.zeros(N)
k2 = np.zeros(N)
k3 = np.zeros(N)
k4 = np.zeros(N)
k5 = np.zeros(N)
k6 = np.zeros(N)
#half step (i + 1/2)
x_ipoh = xi + 0.5*h
#advance x at 1 step
x_ipo = xi + h
#declare a temp y array
y_temp = np.zeros(nv)
#get k1 values
y_derivs = dydx(xi,yi)
k1[:] = h*y_derivs[:]
#get k2 values
y_temp[:] = yi[:] + 0.5*k1[:]
y_derivs = dydx(x_ipoh,y_temp)
k2[:] = h*y_derivs[:]
#get k3 values
y_temp[:] = yi + 0.5*k2[:]
y_derivs = dydx(x_ipoh,y_temp)
k3[:] = h*y_derivs[:]
#get k4 values
y_temp[:] = yi + 0.5*k3[:]
y_derivs = dydx(x_ipoh,y_temp)
k4[:] = h*y_derivs[:]
#get k5 values
y_temp[:] = yi + 0.5*k4[:]
y_derivs = dydx(x_ipoh,y_temp)
k5[:] = h*y_derivs[:]
#get k6 values
y_temp[:] = yi + 0.5*k5[:]
y_derivs = dydx(x_ipo,y_temp)
k6[:] = h*y_derivs[:]
yipo = yi + 37/378*k1 + 250/621*k3 + 125/594*k4 + 512/1771*k6
return yipo
def ck_mv_ad(dydx, xi, yi, nv, h, tol):
#Define a safety scale
SAFETY = 0.9
H_NEW_FAC = 2.0
#set a max number of iterations to catch infinite loops for the while
imax = 10000
#set an iteration variable
i=0
#Create an error
Delta = np.full(nv, 2*tol)
#remember the step
h_step = h
#step adjustment
while(Delta.max()/tol > 1.0):
#estimate error by taking step size h vs. 2 steps of h/2
y_2 = ck_mv_core(dydx, xi, yi, nv, h_step)
y_1 = ck_mv_core(dydx, xi, yi, nv, 0.5*h_step)
y_11 = ck_mv_core(dydx, xi+0.5*h_step, y_1, nv, 0.5*h_step)
#Compute the error
Delta = np.fabs(y_2 - y_11)
#if error is too large, take smaller step
if (Delta.max()/tol > 1.0):
#our error is too large, decrease the step size
h_step *= SAFETY * (Delta.max()/tol)**(-0.25)
if(i>=imax):
print ("Too many iterations in rk4_mv_ad()")
raise StopIteration("Ending after i = ", i)
#iterate
i+=1
#next time, try a bigger step
h_new = np.fmin(h_step * (Delta.max()/tol)**(-0.9), h_step * H_NEW_FAC)
#return the answer, a new step, and the step we actually took
return y_2, h_new, h_step
def ck_mv(dydx,a,b,y_a,tol):
#dydx is deriv wrt x
#a is lower bound
#b is upper bound
#y_a are uppper boundary conditions
#tol is tolerance for integrating y
#define starting step
xi = a
yi = y_a.copy()
#an initial step size -- make it very small!!
h = 1.0e-4*(b-a)
#set max iteration count
imax = 10000
#set an iteration variable
i=0
#set number of coupled ODEs to the size of y_a
nv = len(y_a)
x = np.full(1,a)
y = np.full((1,nv),y_a) #makes an '1 x nv' size matrix array
#set a flag
flag = 1
while(flag):
#calculate y_i+1
yi_new, h_new, h_step = ck_mv_ad(dydx,xi,yi,nv,h,tol)
#update the step
h = h_new
if(xi+h_step>b):
#take a smaller step
h = b-xi
yi_new, h_new, h_step = ck_mv_ad(dydx,xi,yi,nv,h,tol)
#break
flag = 0
#update values
xi += h_step
yi[:] = yi_new[:]
#add the step to the arrays
x = np.append(x,xi)
y_new = np.zeros((len(x),nv)) #matrix size of length of 'x array' by 'nv'
y_new[0:len(x)-1,:] = y
y_new[-1,:] = yi[:] #assigning the to the row '-1' the array 'yi'
del y
y = y_new
#prevent overflow
if(i>=imax):
print("Max iterations reached.")
raise StopIteration("iteration number: ",i)
#iterate
i+=1
#output info in a fixed column of width 3 (%3), d is an int, \t is a tab, 9 floating points w/ 8 decimals
s = "i= %3d\tx = %9.8f\th = %9.8f\tb = %9.8f" % (i,xi,h_step,b)
print(s)
#break if new xi is b
if(xi==b):
flag = 0
#return answer
return x ,y
# +
a = 0.0
b = 2.0 *np.pi
y_0 = np.zeros(2)
y_0[0] = 0.0
y_0[1] = 1.0
nv = 2
tol = 1.0e-6
#perform integration
x, y = ck_mv(dydx,a,b,y_0,tol)
# -
plt.plot(x,y[:,0],'o',label='y(x)')
plt.plot(x,y[:,1],'o',label='dydx(x)')
xx = np.linspace(0,2.0*np.pi,1000)
plt.plot(xx,np.sin(xx),label='sin(x)')
plt.plot(xx,np.cos(xx),label='cos(x)')
plt.xlabel('x')
plt.ylabel('y')
plt.legend(frameon=0)
# +
sine = np.sin(x)
cosine = np.cos(x)
y_error = (y[:,0]-sine)
dydx_error = (y[:,1]-cosine)
plt.plot(x,y_error,'o',label='y(x) Error')
plt.plot(x,dydx_error,'o',label='dydx(x) Error')
plt.legend(frameon=0)
# -
| hw-5/hw-5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table>
# <tr>
# <td>
# <img src='./text_images/nvidia.png' width="200" height="450">
# </td>
# <td> & </td>
# <td>
# <img src='./text_images/udacity.png' width="350" height="450">
# </td>
# </tr>
# </table>
# # Deep Reinforcement Learning for Optimal Execution of Portfolio Transactions
# # Introduction
#
# This notebook demonstrates how to use Deep Reinforcement Learning (DRL) for optimizing the execution of large portfolio transactions. We begin with a brief review of reinforcement learning and actor-critic methods. Then, you will use an actor-critic method to generate optimal trading strategies that maximize profit when liquidating a block of shares.
#
# # Actor-Critic Methods
#
# In reinforcement learning, an agent makes observations and takes actions within an environment, and in return it receives rewards. Its objective is to learn to act in a way that will maximize its expected long-term rewards.
#
# <br>
# <figure>
# <img src = "./text_images/RL.png" width = 80% style = "border: thin silver solid; padding: 10px">
# <figcaption style = "text-align: center; font-style: italic">Fig 1. - Reinforcement Learning.</figcaption>
# </figure>
# <br>
#
# There are several types of RL algorithms, and they can be divided into three groups:
#
# - **Critic-Only**: Critic-Only methods, also known as Value-Based methods, first find the optimal value function and then derive an optimal policy from it.
#
#
# - **Actor-Only**: Actor-Only methods, also known as Policy-Based methods, search directly for the optimal policy in policy space. This is typically done by using a parameterized family of policies over which optimization procedures can be used directly.
#
#
# - **Actor-Critic**: Actor-Critic methods combine the advantages of actor-only and critic-only methods. In this method, the critic learns the value function and uses it to determine how the actor's policy parramerters should be changed. In this case, the actor brings the advantage of computing continuous actions without the need for optimization procedures on a value function, while the critic supplies the actor with knowledge of the performance. Actor-critic methods usually have good convergence properties, in contrast to critic-only methods. The **Deep Deterministic Policy Gradients (DDPG)** algorithm is one example of an actor-critic method.
#
# <br>
# <figure>
# <img src = "./text_images/Actor-Critic.png" width = 80% style = "border: thin silver solid; padding: 10px">
# <figcaption style = "text-align: center; font-style: italic">Fig 2. - Actor-Critic Reinforcement Learning.</figcaption>
# </figure>
# <br>
#
# In this notebook, we will use DDPG to determine the optimal execution of portfolio transactions. In other words, we will use the DDPG algorithm to solve the optimal liquidation problem. But before we can apply the DDPG algorithm we first need to formulate the optimal liquidation problem so that in can be solved using reinforcement learning. In the next section we will see how to do this.
# # Modeling Optimal Execution as a Reinforcement Learning Problem
#
# As we learned in the previous lessons, the optimal liquidation problem is a minimization problem, *i.e.* we need to find the trading list that minimizes the implementation shortfall. In order to solve this problem through reinforcement learning, we need to restate the optimal liquidation problem in terms of **States**, **Actions**, and **Rewards**. Let's start by defining our States.
#
# ### States
#
# The optimal liquidation problem entails that we sell all our shares within a given time frame. Therefore, our state vector must contain some information about the time remaining, or what is equivalent, the number trades remaning. We will use the latter and use the following features to define the state vector at time $t_k$:
#
#
# $$
# [r_{k-5},\, r_{k-4},\, r_{k-3},\, r_{k-2},\, r_{k-1},\, r_{k},\, m_{k},\, i_{k}]
# $$
#
# where:
#
# - $r_{k} = \log\left(\frac{\tilde{S}_k}{\tilde{S}_{k-1}}\right)$ is the log-return at time $t_k$
#
#
# - $m_{k} = \frac{N_k}{N}$ is the number of trades remaining at time $t_k$ normalized by the total number of trades.
#
#
# - $i_{k} = \frac{x_k}{X}$ is the remaining number of shares at time $t_k$ normalized by the total number of shares.
#
# The log-returns capture information about stock prices before time $t_k$, which can be used to detect possible price trends. The number of trades and shares remaining allow the agent to learn to sell all the shares within a given time frame. It is important to note that in real world trading scenarios, this state vector can hold many more variables.
#
# ### Actions
#
# Since the optimal liquidation problem only requires us to sell stocks, it is reasonable to define the action $a_k$ to be the number of shares to sell at time $t_{k}$. However, if we start with millions of stocks, intepreting the action directly as the number of shares to sell at each time step can lead to convergence problems, because, the agent will need to produce actions with very high values. Instead, we will interpret the action $a_k$ as a **percentage**. In this case, the actions produced by the agent will only need to be between 0 and 1. Using this interpretation, we can determine the number of shares to sell at each time step using:
#
# $$
# n_k = a_k \times x_k
# $$
#
# where $x_k$ is the number of shares remaining at time $t_k$.
#
# ### Rewards
#
# Defining the rewards is trickier than defining states and actions, since the original problem is a minimization problem. One option is to use the difference between two consecutive utility functions. Remeber the utility function is given by:
#
# $$
# U(x) = E(x) + λ V(x)
# $$
#
# After each time step, we compute the utility using the equations for $E(x)$ and $V(x)$ from the Almgren and Chriss model for the remaining time and inventory while holding parameter λ constant. Denoting the optimal trading trajectory computed at time $t$ as $x^*_t$, we define the reward as:
#
# $$
# R_{t} = {{U_t(x^*_t) - U_{t+1}(x^*_{t+1})}\over{U_t(x^*_t)}}
# $$
#
# Where we have normalized the difference to train the actor-critic model easier.
# # Simulation Environment
#
# In order to train our DDPG algorithm we will use a very simple simulated trading environment. This environment simulates stock prices that follow a discrete arithmetic random walk and that the permanent and temporary market impact functions are linear functions of the rate of trading, just like in the Almgren and Chriss model. This simple trading environment serves as a starting point to create more complex trading environments. You are encouraged to extend this simple trading environment by adding more complexity to simulte real world trading dynamics, such as book orders, network latencies, trading fees, etc...
#
# The simulated enviroment is contained in the **syntheticChrissAlmgren.py** module. You are encouraged to take a look it and modify its parameters as you wish. Let's take a look at the default parameters of our simulation environment. We have set the intial stock price to be $S_0 = 50$, and the total number of shares to sell to one million. This gives an initial portfolio value of $\$50$ Million dollars. We have also set the trader's risk aversion to $\lambda = 10^{-6}$.
#
# The stock price will have 12\% annual volatility, a [bid-ask spread](https://www.investopedia.com/terms/b/bid-askspread.asp) of 1/8 and an average daily trading volume of 5 million shares. Assuming there are 250 trading days in a year, this gives a daily volatility in stock price of $0.12 / \sqrt{250} \approx 0.8\%$. We will use a liquiditation time of $T = 60$ days and we will set the number of trades $N = 60$. This means that $\tau=\frac{T}{N} = 1$ which means we will be making one trade per day.
#
# For the temporary cost function we will set the fixed cost of selling to be 1/2 of the bid-ask spread, $\epsilon = 1/16$. we will set $\eta$ such that for each one percent of the daily volume we trade, we incur a price impact equal to the bid-ask
# spread. For example, trading at a rate of $5\%$ of the daily trading volume incurs a one-time cost on each trade of 5/8. Under this assumption we have $\eta =(1/8)/(0.01 \times 5 \times 10^6) = 2.5 \times 10^{-6}$.
#
# For the permanent costs, a common rule of thumb is that price effects become significant when we sell $10\%$ of the daily volume. If we suppose that significant means that the price depression is one bid-ask spread, and that the effect is linear for smaller and larger trading rates, then we have $\gamma = (1/8)/(0.1 \times 5 \times 10^6) = 2.5 \times 10^{-7}$.
#
# The tables below summarize the default parameters of the simulation environment
# %load_ext autoreload
# %autoreload 2
# +
import os
from os import path
import sys
repo_path= path.dirname(path.dirname(path.abspath("__file__")))
sys.path.append(repo_path)
# +
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import torch
from collections import deque
from src.ac_agent import AgentDDPG
from ddpg_agent import Agent
import syntheticChrissAlmgren as sca
from ddpg_agent import OUNoise
# -
pd.__version__
import statsmodels.api as sm
sm.show_versions()
# +
import utils
# Get the default financial and AC Model parameters
financial_params, ac_params = utils.get_env_param()
# -
financial_params
ac_params
# # Reinforcement Learning
#
# In the code below we use DDPG to find a policy that can generate optimal trading trajectories that minimize implementation shortfall, and can be benchmarked against the Almgren and Chriss model. We will implement a typical reinforcement learning workflow to train the actor and critic using the simulation environment. We feed the states observed from our simulator to an agent. The Agent first predicts an action using the actor model and performs the action in the environment. Then, environment returns the reward and new state. This process continues for the given number of episodes. To get accurate results, you should run the code at least 10,000 episodes.
# # Todo
#
# The above code should provide you with a starting framework for incorporating more complex dynamics into our model. Here are a few things you can try out:
#
# - Incorporate your own reward function in the simulation environmet to see if you can achieve a expected shortfall that is better (lower) than that produced by the Almgren and Chriss model.
#
#
# - Experiment rewarding the agent at every step and only giving a reward at the end.
#
#
# - Use more realistic price dynamics, such as geometric brownian motion (GBM). The equations used to model GBM can be found in section 3b of this [paper](https://ro.uow.edu.au/cgi/viewcontent.cgi?referer=https://www.google.com/&httpsredir=1&article=1705&context=aabfj)
#
#
# - Try different functions for the action. You can change the values of the actions produced by the agent by using different functions. You can choose your function depending on the interpretation you give to the action. For example, you could set the action to be a function of the trading rate.
#
#
# - Add more complex dynamics to the environment. Try incorporate trading fees, for example. This can be done by adding and extra term to the fixed cost of selling, $\epsilon$.
# +
noise_samples = []
noise = OUNoise(size=1, seed=123)
for i in range(1000):
noise_samples.append(noise.sample())
# -
plt.plot(noise_samples)
plt.show()
def get_noise_schedulling(n_episodes, decay, noise):
noise_samples = dict()
np.ones(n_episodes)
for i in range(n_episodes):
noise_raw = noise.sample()[0]
decay_coef = decay**i
noise_dec = noise_raw * decay_coef
noise_samples[i]= [decay_coef, noise_raw, noise_dec]
return pd.DataFrame.from_dict(noise_samples, orient='index', columns=['decay', 'noise_raw', 'noise_dec'])
noise_schedulling = get_noise_schedulling(5000, decay=0.9998, noise=noise)
noise_schedulling.plot()
plt.show()
# +
# Set the liquidation time
lqt = 60
# Set the number of trades
n_trades = 60
# Set trader's risk aversion
tr = 1e-6
# -
def action_scaler_01(action):
"""Clips action values between 0 and 1"""
action = (action + 1.0) / 2.0
return np.clip(action, 0, 1)
def train(env, agent, n_episodes, lqt, n_trades, tr, *args, **kwargs):
"""
lqt: liquidation time
n_trades: number of trades
tr: risk aversion
"""
shortfall_hist = np.array([])
shortfall_deque = deque(maxlen=100)
action_scaler_fn = kwargs.get('action_scaler_fn', lambda x: x)
noise_decay = kwargs.get('noise_decay', None)
model_save_path = kwargs.get('model_save_path', None)
noise_weight = 1.
for i_episode in range(1, n_episodes+1):
# Reset the enviroment
cur_state = env.reset(seed = i_episode, liquid_time = lqt, num_trades = n_trades, lamb = tr)
# set the environment to make transactions
env.start_transactions()
for i in range(n_trades + 1):
# Predict the best action for the current state.
action = agent.act(cur_state, add_noise = True, noise_weight = noise_weight)
action = action_scaler_fn(action)
# Action is performed and new state, reward, info are received.
new_state, reward, done, info = env.step(action)
# current state, action, reward, new state are stored in the experience replay
agent.step(cur_state, action, reward, new_state, done)
# roll over new state
cur_state = new_state
if info.done:
shortfall_hist = np.append(shortfall_hist, info.implementation_shortfall)
shortfall_deque.append(info.implementation_shortfall)
break
# schedule exploration-explotation
if noise_decay is not None:
noise_weight = max(noise_decay**i_episode, 0.1)
if (i_episode) % 100 == 0: # print average shortfall over last 100 episodes
print('\rEpisode [{}/{}]\tAverage Shortfall: ${:,.2f}'.format(i_episode, n_episodes, np.mean(shortfall_deque)))
if hasattr(agent, "save_network") and model_save_path is not None:
agent.save_network(model_save_path)
return pd.Series(shortfall_hist)
# Create simulation environment
env = sca.MarketEnvironment()
# # Current Agent
# +
# run train loop on vanilla agent
agent = Agent(state_size=env.observation_space_dimension(), action_size=env.action_space_dimension(), random_seed=0)
shortfall_hist0 = train(env, agent, n_episodes=10000, lqt= lqt, n_trades= n_trades, tr= tr,
model_save_path=os.path.join('models', 'ddpg0'), action_scaler_fn=action_scaler_01,
)
# +
filename = os.path.join('models', 'ddpg0')
actor_filename = os.path.join(filename, 'actor.pth')
critic_filename = os.path.join(filename, 'critic.pth')
torch.save(agent.actor_local.state_dict(), actor_filename)
torch.save(agent.critic_local.state_dict(), critic_filename)
# -
plt.plot(shortfall_hist0)
plt.show()
# # Custom Agent
# ## Testing
# +
# test custom agent
ddpg_agent = AgentDDPG(state_size=env.observation_space_dimension(), action_size=env.action_space_dimension(), gamma=0.99,
actor_hidden_layers=(48, 24), critic_hidden_layers=(24, 48),
batch_size=128, learning_rate=(1e-4, 1e-3), weight_decay=(0,0), soft_upd_param=1e-3, update_every=1, buffer_size=int(1e4), seed=123)
# -
actor_nn = ddpg_agent.actor_local
actor_nn
w_actor_local_init = [w for w in actor_nn.parameters()]
w_actor_targets_init = [w for w in ddpg_agent.actor_target.parameters()]
critic_nn = ddpg_agent.critic_local
critic_nn
w_critic_local_init = [w for w in critic_nn.parameters()]
w_critic_targets_init = [w for w in ddpg_agent.critic_target.parameters()]
# +
cur_state = env.reset(seed = 123, liquid_time = lqt, num_trades = n_trades, lamb = tr)
# set the environment to make transactions
env.start_transactions()
# -
cur_state
# +
action = ddpg_agent.act(cur_state, noise_weigth = 1.)
# scale action
action = (action + 1.0) / 2.0
action = np.clip(action, 0, 1)
# Action is performed and new state, reward, info are received.
new_state, reward, done, info = env.step(action)
# -
action
new_state, reward, done
input_state = torch.tensor(cur_state, dtype=torch.float).view(1,-1)
input_state
input_state = torch.tensor(cur_state, dtype=torch.float).view(1,-1)
input_state
input_q = ddpg_agent.actor_local.forward(input_state)
input_q
ddpg_agent.actor_target.forward(input_state)
ddpg_agent.critic_local.forward(input_state, input_q)
ddpg_agent.critic_target.forward(input_state, input_q)
# ## Train
# +
# run train loop on custom agent
ddpg_agent1 = AgentDDPG(state_size=env.observation_space_dimension(), action_size=env.action_space_dimension(), gamma=0.99,
actor_hidden_layers=(24, 48), critic_hidden_layers=(24, 48),
batch_size=128, learning_rate=(1e-4, 1e-3), weight_decay=(0,0), soft_upd_param=1e-3, update_every=1, buffer_size=int(1e4), seed=123)
path_ddpg_agent1 = os.path.join('models','ddpg1')
shortfall_hist1 = train(env, ddpg_agent1, n_episodes=10000, lqt= lqt, n_trades= n_trades, tr= tr, noise_decay=1.,
model_save_path=path_ddpg_agent1, action_scaler_fn=action_scaler_01
)
# +
# run train loop on custom agent
ddpg_agent2 = AgentDDPG(state_size=env.observation_space_dimension(), action_size=env.action_space_dimension(), gamma=0.99,
actor_hidden_layers=(24, 48), critic_hidden_layers=(24, 48),
batch_size=128, learning_rate=(1e-4, 1e-3), weight_decay=(0,0), soft_upd_param=1e-3, update_every=1, buffer_size=int(1e4), seed=123)
path_ddpg_agent2 = os.path.join('models', 'ddpg2')
shortfall_hist2 = train(env, ddpg_agent2, n_episodes=10000, lqt= lqt, n_trades= n_trades, tr= tr, noise_decay=.9998,
model_save_path=path_ddpg_agent2, action_scaler_fn=action_scaler_01
)
# -
# run train loop on custom agent
ddpg_agent2 = AgentDDPG(state_size=env.observation_space_dimension(), action_size=env.action_space_dimension(), gamma=0.99,
actor_hidden_layers=(48, 24), critic_hidden_layers=(24, 48),
batch_size=128, learning_rate=(1e-4, 1e-3), weight_decay=(0,0), soft_upd_param=1e-3, update_every=1, buffer_size=int(1e4), seed=123)
shortfall_hist2 = train(env, ddpg_agent2, n_episodes=5000, lqt= lqt, n_trades= n_trades, tr= tr, noise_decay=.9998,
model_save_path=os.path.join('models', 'ddpg2'), action_scaler_fn=action_scaler_01
)
# +
from src.ac_agent import AgentDDPG, GaussianProcess
g_noise = GaussianProcess(num_agents, RND_SEED)
# +
# run train loop on custom agent
ddpg_agent3 = AgentDDPG(state_size=env.observation_space_dimension(), action_size=env.action_space_dimension(), gamma=0.99,
actor_hidden_layers=(48, 24), critic_hidden_layers=(24, 48),
batch_size=128, learning_rate=(1e-4, 1e-3), weight_decay=(0,0),
soft_upd_param=1e-3, update_every=1, buffer_size=int(1e4), noise=g_noise, seed=123)
shortfall_hist3 = train(env, ddpg_agent3, n_episodes=5000, lqt= lqt, n_trades= n_trades, tr= tr, noise_decay=.9998,
model_save_path=os.path.join('models', 'ddpg3'), action_scaler_fn=action_scaler_01
)
| finance/DRL-exersice.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sklearn
from sklearn.datasets import load_iris
data=load_iris()
# print(data)
label_name = data['target_names']
label = data['target']
feature_name = data['feature_names']
feature = data['data']
# print(label_name)
# print(label)
# print(feature_name)
# print(feature)
from sklearn.model_selection import train_test_split
train,test,train_label,test_label = train_test_split(feature,label,test_size=0.2)
from sklearn.naive_bayes import GaussianNB
gb = GaussianNB()
final = gb.fit(train,train_label)
prediction = gb.predict(test)
# print(prediction)
from sklearn.metrics import accuracy_score
print("The accuracy of this dataset is",accuracy_score(test_label,prediction))
# -
| iris_dataset_sklearn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MLxtend: Feature Selection Tutorial
# put together by [KVS Setty ](kvssetty.com)on 24th Aug 2020 in [Data Science](https://kvssetty.com/category/data-science/)
# 
# ## What is MLxtend ?
# MLxtend is a library that should accompany any data science project. Considered as an extension of the Sci-kit learn library, MLxtend has useful automation of common data science tasks:
# * Completely automated feature extraction and selection.
# * An extension on Sci-kit learn’s existing data transformers, like mean centering and transaction encoders.
# * A vast array of evaluation metrics: a few include bias-variance decomposition (measure how bias and variance your model contains), lift tests, McNemar’s test, F-test, and many more.
# * Helpful model visualizations, including feature boundaries, learning curves, PCA correlation circles, and enrichment plots.
# * Many built-in datasets that are not included in Sci-kit Learn.
# * Helpful preprocessing functions for images and text, like a name generalizer that can identify and convert text with different naming systems (“<NAME>”, “<NAME>”, “<NAME>.”, and “<NAME>” are the same).
# 
# Or, consider its decision boundary drawing capabilities:
# 
# For more info on its features see the [**Documentation**](http://rasbt.github.io/mlxtend/)
# In this article, I present MLxtend (machine learning extensions), a Python library of useful tools for the day-to-day data science tasks. To showcase its strength I use the library to select the most important features of a dataset before feeding data into learning algorithm.Feature selection is an preprocessing step, see the fig below.
# 
# ## Tutorial Overview
# This tutorial is divided into ten parts; they are:
# * How to install it?
# * Curse of dimensionality
# * Feature Selection
# * Exhaustive search
# * Forward feature selection
# * Backward feature selection
# * Stochastic feature selection
# * Python Implementation
# * Summary
# * Further Reading
# ### How to install it?
# Just run the following command if you have conda installed in your PC:
# ```
# conda install -c conda-forge mlxtend
# ```
# Or using pip:
#
# ```
# pip install mlxtend
# ```
# MLxtend is a useful package for diverse data science-related tasks. It contains some useful wrapper methods such as:
#
# * **SequentialFeatureSelector** (supporting both Forward and Backward feature selection)
# * **ExhaustiveFeatureSelector**
# ### Curse of dimensionality
# Being in the know that adding more features is not always helpful. This is due to:
#
# Data is sparse in high dimensions
# Impractical to use all the measured data directly
# Some features may be detrimental to pattern recognition
# Some features are essentially noise
# ### 💀 Curse of dimensionality
# Being in the know that adding more features is not always helpful. This is due to:
#
# * Data is sparse in high dimensions
# * Impractical to use all the measured data directly
# * Some features may be detrimental to pattern recognition
# * Some features are essentially noise
# >But some may not be relevant to the outcome. Moreover, many of the original predictors also may not contain predictive information. For a number of models, predictive performance is degraded as the number of uninformative predictors increases. Therefore, there is a genuine need to appropriately select predictors for modeling.
#
# --page 227, Chapter 10: [Feature Engineering and selection by <NAME>](https://www.amazon.in/Feature-Engineering-Selection-Practical-Predictive/dp/1138079227/ref=sr_1_1?dchild=1&keywords=feature+engineering+for+machine+learning+by+max+kuhn&qid=1598246314&s=books&sr=1-1)
# ### 🧠 Feature Selection
# Feature Selection is the process of selecting a subset of the extracted features. This is helpful because:
#
# * Reduces dimensionality
# * Discards uninformative features
# * Discards deceptive features (Deceptive features appear to aid learning on the training set, but impair generalisation)
# * Speeds training/testing
# >The working premise here is that it is generally better to have fewer predictors in a model.[...], the goal of feature selection will be re-framed to
#
# >**Reduce the number of predictors as far as possible without compromising predictive performance.**
#
# --page 228, Chapter 10: [Feature Engineering and selection by <NAME>](https://www.amazon.in/Feature-Engineering-Selection-Practical-Predictive/dp/1138079227/ref=sr_1_1?dchild=1&keywords=feature+engineering+for+machine+learning+by+max+kuhn&qid=1598246314&s=books&sr=1-1)
# ### Types of Feature Selection Methodologies
# Feature selection mythologies fall into three general classes:
# * **Intrinsic (or implicit) methods.**
# * **Filter methods.**
# * **Wrapper methods.**
#
# Intrinsic methods have feature selection naturally incorporated with the modeling process. Whereas filter and wrapper methods work to marry feature selection approaches with modeling techniques.The important of the three classes for reducing features are wrapper methods. And this tutorial uses and explores wrapper methods,so some explanation is well worth.
#
# Wrapper methods use iterative search procedures that repeatedly supply predictor subsets to the model and then use the resulting model performance estimate to guide the selection of the next subset to evaluate. If successful, a wrapper method will iterate to a smaller set of predictors that has better predictive performance than the original predictor set. Wrapper methods can take either a greedy or non-greedy approach to feature selection. A greedy search is one that chooses the search path based on the direction that seems best at the time in order to achieve the best immediate benefit. While this can be an effective strategy, it may show immediate benefits in predictive performance that stall out at a locally best setting. A non-greedy search method would re-evaluate previous feature combinations and would have the ability to move in a direction that is initially unfavorable if it appears to have a potential benefit after the current step. This allows the non-greedy approach to escape being trapped in a local optima.
# An example of a greedy wrapper method is **backwards selection (otherwise known as recursive feature elimination or RFE)**. Here, the predictors are initially ranked by some measure of importance. An initial model is created using the complete predictor set. The next model is based on a smaller set of predictors where the least important have been removed. This process continues down a prescribed path (based on the ranking generated by the importances) until a very small number of predictors are in the model. Performance estimates are used to determine when too many features have been removed; hopefully a smaller subset of predictors can result in an improvement. Notice that the RFE procedure is greedy in that it considers the variable ranking as the search direction. It does not re-evaluate the search path at any point or consider subsets of mixed levels of importance. This approach to feature selection will likely fail if there are important interactions between predictors where only one of the predictors is significant in the presence of the other(s).
# Examples of non-greedy wrapper methods (also called Stochastic feature selection) are **genetic algorithms (GA)** and **simulated annealing (SA).** The SA method is non-greedy since it incorporate randomness into the feature selection process. The random component of the process helps SA to find new search spaces that often lead to more optimal results.
# Wrappers have the potential advantage of searching a wider variety of predictor subsets than simple filters or models with built-in intrinsic(implicit) feature selection. They have the most potential to find the globally best predictor subset (if it exists). The primary drawback is the computational time required for these methods to find the optimal or near optimal subset. The additional time can be excessive to the extent of being counter-productive. The computational time problem can be further exacerbated by the type of model with which it is coupled. For example, the models that are in most need of feature selection (e.g., SVMs and neural networks) can be very computationally taxing themselves. Another disadvantage of wrappers is that they have the most potential to overfit the predictors to the training data and require external validation
# In general, there are three wrapper approach which we will analyze in more details shortly are:
#
# * Exhaustive search generally too expensive
# * Forward/backward greedy search algorithms
# * Stochastic search (Simulated Annealing and Genetic Algorithms)
# ### 🔵 Exhaustive search
# The goal is:
#
# **Given M input features, select a subset of the d most useful.**
# Try each combination of d features and assess which is most effective. Number of combinations:
#
# $$M!/(M-d)d!$$
#
#
# Allowing subsets of size d = 1, . . . , M gives $2^M − 1$ combinations. Prohibitively expensive for M >= 20 (2^(20) ≈ 1,000,000 ). Since it is potentially too expensive. Forward and backward(RFE) are usually the preferred option.
#
#
# ## Forward Feature Selection (FFS)
# 
# The forward selection involves the below steps:
#
# * Train the model with a single feature the one which gives the better result based on the evaluation metric.
# * Select a second feature which in combination with the first gives the best performance.
# * Continue the above steps
# * Stop when no significant improvement is observed or the limit of d features is observed.
# ## Backward Feature Selection (a.k.a Recurssive Feature Elimination ,RFE)
# 
# The backward selection involves the below steps:
#
# * Train the model using all features
# * Discard the one which gives the least decrease in the performance
# * Continue the above steps
# * Stop when significant decrease of the performance is observed or the limit of d features is observed.
# Both techniques are fast but does not guarantee that another untried feature set is not better (thus it is greedy search). It is only guarantee that eliminate features whose information content is subsumed by other features.
# ## 🟠 Stochastic feature selection (Simulated Annealing and Genetic Algorithms)
# Feature selection is a combinatorial optimization problem:
#
# * Simulated annealing(SA) or genetic algorithms(GA) to locate global maximum
# * Potentially very good results
# * Potentially very expensive
# # 🐍 Python Implementation in MLxtend
# Lets see some examples of above algorithms in action implemented mlxtend:
# ## Exhaustive Feature Selector
# Implementation of an exhaustive feature selector for sampling and evaluating all possible feature combinations in a specified range.
#
# `from mlxtend.feature_selection import ExhaustiveFeatureSelector`
#
#
# Overview
# This exhaustive feature selection algorithm is a wrapper approach for brute-force evaluation of feature subsets; the best subset is selected by optimizing a specified performance metric given an arbitrary regressor or classifier. For instance, if the classifier is a logistic regression and the dataset consists of 4 features, the alogorithm will evaluate all 15 feature combinations (if `min_features=1` and `max_features=4`)
#
# - {0}
# - {1}
# - {2}
# - {3}
# - {0, 1}
# - {0, 2}
# - {0, 3}
# - {1, 2}
# - {1, 3}
# - {2, 3}
# - {0, 1, 2}
# - {0, 1, 3}
# - {0, 2, 3}
# - {1, 2, 3}
# - {0, 1, 2, 3}
#
# and select the one that results in the best performance (e.g., classification accuracy) of the logistic regression classifier.
# ### Example 1 - A simple Iris example
# Initializing a simple classifier from scikit-learn:
# +
from sklearn.neighbors import KNeighborsClassifier
from sklearn.datasets import load_iris
from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS
iris = load_iris()
X = iris.data
y = iris.target
knn = KNeighborsClassifier(n_neighbors=3)
efs1 = EFS(knn,
min_features=1,
max_features=4,
scoring='accuracy',
print_progress=True,
cv=5)
efs1 = efs1.fit(X, y)
print('Best accuracy score: %.2f' % efs1.best_score_)
print('Best subset (indices):', efs1.best_idx_)
print('Best subset (corresponding names):', efs1.best_feature_names_)
# -
# Note that in the example above, the 'best_feature_names_' are simply a string equivalent of the feature indices. However, we can provide custom feature names to the fit function for this mapping:
feature_names = ('sepal length', 'sepal width', 'petal length', 'petal width')
efs1 = efs1.fit(X, y, custom_feature_names=feature_names)
print('Best subset (corresponding names):', efs1.best_feature_names_)
# #### Example 02 - Working with pandas DataFrames
# Optionally, we can also use pandas DataFrames and pandas Series as input to the fit function. In this case, the column names of the pandas DataFrame will be used as feature names. However, note that if custom_feature_names are provided in the fit function, these `custom_feature_names` take precedence over the DataFrame column-based feature names.
# +
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.datasets import load_iris
iris = load_iris()
col_names = ('sepal length', 'sepal width',
'petal length', 'petal width')
X_df = pd.DataFrame(iris.data, columns=col_names)
y_series = pd.Series(iris.target)
knn = KNeighborsClassifier(n_neighbors=4)
# +
from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS
knn = KNeighborsClassifier(n_neighbors=3)
efs1 = EFS(knn,
min_features=1,
max_features=4,
scoring='accuracy',
print_progress=True,
cv=5)
efs1 = efs1.fit(X_df, y_series)
print('Best accuracy score: %.2f' % efs1.best_score_)
print('Best subset (indices):', efs1.best_idx_)
print('Best subset (corresponding names):', efs1.best_feature_names_)
# -
# ## Sequential Feature Selector
# Implementation of sequential feature algorithms (SFAs) -- greedy search algorithms -- that have been developed as a suboptimal solution to the computationally often not feasible exhaustive search.
#
# `from mlxtend.feature_selection import SequentialFeatureSelector`
#
#
# Sequential feature selection algorithms are a family of greedy search algorithms that are used to reduce an initial d-dimensional feature space to a k-dimensional feature subspace where k < d. The motivation behind feature selection algorithms is to automatically select a subset of features that is most relevant to the problem. The goal of feature selection is two-fold: We want to improve the computational efficiency and reduce the generalization error of the model by removing irrelevant features or noise. A wrapper approach such as sequential feature selection is especially useful if intrinsic (implicit) feature selection -- for example, a regularization penalty like LASSO -- is not applicable.
#
# In a nutshell, SFAs remove or add one feature at the time based on the classifier performance until a feature subset of the desired size k is reached. There are 4 different flavors of SFAs available in mlxtend via the `SequentialFeatureSelector`:
#
# * Sequential Forward Selection (SFS)
# * Sequential Backward Selection (SBS)
# * Sequential Forward Floating Selection (SFFS)
# * Sequential Backward Floating Selection (SBFS)
#
# The **floating** variants, SFFS and SBFS, can be considered as extensions to the simpler SFS and SBS algorithms. The floating algorithms have an additional exclusion or inclusion step to remove features once they were included (or excluded), so that a larger number of feature subset combinations can be sampled. It is important to emphasize that this step is conditional and only occurs if the resulting feature subset is assessed as "better" by the criterion function after removal (or addition) of a particular feature. Furthermore, I added an optional check to skip the conditional exclusion steps if the algorithm gets stuck in cycles.
# **Important Note:**
# How is this different from Recursive Feature Elimination (RFE) -- e.g., as implemented in `sklearn.feature_selection.RFE`? RFE is computationally less complex using the feature weight coefficients (e.g., linear models) or feature importance (tree-based algorithms) to eliminate features recursively, whereas SFSs eliminate (or add) features based on a user-defined classifier/regression performance metric.
# ### Example 1 - A simple Sequential Forward Selection example
# Initializing a simple classifier from scikit-learn:
# +
from sklearn.neighbors import KNeighborsClassifier
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
y = iris.target
knn = KNeighborsClassifier(n_neighbors=4)
# -
# We start by selection the "best" 3 features from the Iris dataset via Sequential Forward Selection (SFS). Here, we set `forward=True` and `floating=False`. By choosing `cv=0`, we don't perform any cross-validation, therefore, the performance (here: `accuracy`) is computed entirely on the training set.
# +
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
sfs1 = SFS(knn,
k_features=3,
forward=True,
floating=False,
verbose=2,
scoring='accuracy',
cv=0)
sfs1 = sfs1.fit(X, y)
# -
# Via the `subsets_` attribute, we can take a look at the selected feature indices at each step:
sfs1.subsets_
# Note that the 'feature_names' entry is simply a string representation of the 'feature_idx' in this case. Optionally, we can provide custom feature names via the `fit` method's `custom_feature_names` parameter:
feature_names = ('sepal length', 'sepal width', 'petal length', 'petal width')
sfs1 = sfs1.fit(X, y, custom_feature_names=feature_names)
sfs1.subsets_
# Furthermore, we can access the indices of the 3 best features directly via the `k_feature_idx_` attribute:
sfs1.k_feature_idx_
# And similarly, to obtain the names of these features, given that we provided an argument to the `custom_feature_names` parameter, we can refer to the `sfs1.k_feature_names_` attribute:
sfs1.k_feature_names_
# Finally, the prediction score for these 3 features can be accesses via `k_score_`:
sfs1.k_score_
# # Summary
#
# This brings us to the end of this article. Hope you become aware of the MLxtend Python library and how it can be used for feature selection and it has ton other modules for every day data science use and highly recommended get familiar with it.
#
# In this tutorial, you discovered and specifically learned :
# * what is feature selection?
# * why we need it and what problems it solves?
# * the various methods available.
# * Imlementation in Python MLxtend Library.
# * How to use them with some examples.
#
# # Further Reading
# ## 📚 For book lovers:
#
# [Python for Data Analysis](https://www.amazon.in/Python-Data-Analysis-Wes-Mckinney/dp/1491957662/ref=sr_1_fkmr0_1?dchild=1&keywords=Python+for+Data+Analysis%2C+2e%3A+Data+Wrangling+with+Pandas%2C+Numpy%2C+and+Ipython+Paperback+%E2%80%93+3+Nov.+2017&qid=1598257631&s=books&sr=1-1-fkmr0) by <NAME>, best known for creating the Pandas project.
#
# [Hands-on Machine Learning with Scikit-Learn, Keras, and TensorFlow](https://www.amazon.in/Hands-Machine-Learning-Scikit-Learn-Tensor/dp/9352139054/ref=pd_lpo_14_img_0/257-8149962-4834767?_encoding=UTF8&pd_rd_i=9352139054&pd_rd_r=c78d3dad-fa65-4819-8e3b-725c920337d3&pd_rd_w=z5O01&pd_rd_wg=6jtrj&pf_rd_p=5a903e39-3cff-40f0-9a69-33552e242181&pf_rd_r=0F0HE57N6RVF44X0B2X0&psc=1&refRID=0F0HE57N6RVF44X0B2X0) by <NAME>, currently ranking first in the best sellers Books in AI & Machine Learning on Amazon.
#
# [Feature Engineering and selection by <NAME>](https://www.amazon.in/Feature-Engineering-Selection-Practical-Predictive/dp/1138079227/ref=sr_1_1?dchild=1&keywords=feature+engineering+for+machine+learning+by+max+kuhn&qid=1598246314&s=books&sr=1-1) by <NAME> and <NAME>
#
# [Python Machine Learning](https://www.amazon.in/Python-Machine-Learning-scikit-learn-TensorFlow/dp/1789955750/ref=sr_1_4?crid=3G4DEQC0XILTL&dchild=1&keywords=python+machine+learning+sebastian+raschka&qid=1598257497&s=books&sprefix=python+machine%2Caps%2C285&sr=1-4): Machine Learning and Deep Learning with Python, scikit-learn, and TensorFlow 2.0, 3rd Edition by <NAME>
#
#
#
# **Do you have any questions?**
#
# Ask your questions in the comments/reponses column and I will do my best to answer.
#
# Get the complete source code from my git page: https://github.com/KVSSetty/MLxtend-Tutorials
#
# or sign-up for my week-end online classes : https://www.mlanddlguru.com/b/signup
#
#
| notebooks/MLxtend - Feature Selection Tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: widgets-tutorial
# language: python
# name: widgets-tutorial
# ---
# <!--NAVIGATION-->
# < [Simple Widget Introduction](03.00-Widget_Basics.ipynb) | [Contents](00.00-index.ipynb) | [Output widgets: leveraging Jupyter's display system](04.01-more-on-output-widget.ipynb) >
# # Widgets in the core ipywidgets package
# The package `ipywidgets` provides two things:
#
# + A communication framework between the front end (your browser) and back end (python or other kernel).
# + A set of fundamental user interface elements like buttons and checkboxes.
#
# The next couple of cells create a browser of the available elements. To see more detail about any of the elements click on its title. It will be easier to view both the overview and the detail if you have them open in separate tabs.
import ipywidgets as widgets
from widget_org import organized_widgets, list_overview_widget
# ## Instructions
#
# Run the cell below. Click on the name of any widget to see a more detailed example of using the widget.
groups = organized_widgets(organize_by='ui')
help_url_base='reference_guides/complete-ipywidgets-widget-list.ipynb'
list_overview_widget(groups, columns=2, min_width_single_widget=200, help_url_base=help_url_base)
# ## Exercises
#
# You may not have time to finish all of these exercises.
# ### 1. Fix the example from the first notebook
#
# The code below is taken from the first notebook of this tutorial.
#
# Run the code below then try typing a number larger than 10 or smaller than 5 into the text box.
# +
slider = widgets.FloatSlider(
value=7.5,
min=5.0,
max=10.0,
step=0.1,
description='Input:',
)
# Create text box to hold slider value
text = widgets.FloatText(description='Value')
# Link slider value and text box value
widgets.link((slider, 'value'), (text, 'value'))
# Put them in a vertical box
widgets.VBox([slider, text])
# -
# Note the slider has the wrong value! The slider has a minimum and maximum value but the text box doesn't.
#
# **Replace the `FloatText` in the code above with a text widget that has a minimum and maximum that matches the slider.**
# ## 2. Two widgets in a box and link them
#
# Put two widgets, the `Play` widget and a widget of your choice that can hold an integer, in a horizontal box.
# +
# # %load solutions/interact-basic-list/widgets-in-a-box.py
# -
# Link the values of the two widgets in the previous exercise so that changing the value of one affects the value of the other.
widgets.link((a, 'value'), (b, 'value'))
# ## 3. Try tabs or accordions
#
# Choose two or more widgets and place them in either different tabs or accordions. Set the name of each tab or accordion to something more meaningful than the default names.
# Set which tab or accordion is selected by typing the right code in the cell below (hint, look at the `selected_index` attribute).
# <!--NAVIGATION-->
# < [Simple Widget Introduction](03.00-Widget_Basics.ipynb) | [Contents](00.00-index.ipynb) | [Output widgets: leveraging Jupyter's display system](04.01-more-on-output-widget.ipynb) >
| notebooks/04.00-widget-list.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Homework 3.1
#
# ### 3.1.a
# Load the data from: http://opendata.dc.gov/datasets that I have include in this github
# into a dataframe. ( The file has been is available in directory ./data/ccp_current_csv.csv )
import pandas as pd
Sample= pd.read_csv("data\ccp_current_csv.csv")
Sample.head()
# ### 3.1.a what is its shape and what does that mean?
Sample.shape
Sample.mean()
# ## Homework 3.2
# What are the number of rows in each 'QUADRANT' ?
# Procedure 1
from scipy.stats import itemfreq
import numpy as np
A=np.array(Sample)
itemfreq(A[:,1])
# Procedure 2
Temp=np.unique(A[:,1])
BN=NE=NW=SE=SW = 0
print Temp
for x in range(len(A[:,1])):
if A[x,1] == Temp[0]:
BN = BN+1
elif A[x,1] == Temp[1]:
NE = NE+1
elif A[x,1] == Temp[2]:
NW = NW+1
elif A[x,1] == Temp[3]:
SE = SE+1
else:
SW=SW+1
print 'BN -->', BN
print 'NE -->', NE
print 'NW -->', NW
print 'SE -->', SE
print 'SW -->', SW
# ## Homework 3.3 - Array math demonstration
# For two arrarys
#
# a= [1,2,3,4] type=float
#
# b= [5,6,7,8] type=float
#
# Peform the following array operations using numpy
# ( show both operational use of numpy and functional (example addition operation => + vs addition function => numbpy.add() )
#
# ### 3.3.1 addition a+b
import numpy as np
a = np.asarray([1,2,3,4], dtype = float)
b=np.asarray([5,6,7,8], dtype = float)
np.add(a,b)
a+b
# ### 3.3.2 subtraction a-b
np.subtract(a,b)
a-b
# ### 3.3.3 multiplication a*b
np.multiply(a,b)
list1=a*b
print list1
# ### 3.3.4 divsion a/b
np.divide(a,b)
a/b
# ### 3.3.5 modulo a%b
np.mod(a,b)
a%b
# ### 3.3.6 power a^b
np.power(a,b)
a**b
# ## Homework 3.4
# Find your own data and load it into a dataframe
import numpy as np
import pandas as pd
Own_data=pd.read_csv("data/googleplaystore.csv")
print Own_data.head()
A=np.array(Own_data)
print(A)
# ## Homework 3.5
# Provide an interesting analysis of the data columns ( frequency or averages )
from scipy.stats import itemfreq
itemfreq(A[:,3])
itemfreq(Own_data)
np.unique(A[:,3])
np.unique(Own_data)
# # Average
np.average(np.float64(A[:,3]))
np.mean(A[:,3])
print np.array_split(A,10)
Own_data.mean()
| Homeworks/Homework3/Rongali-HW3-pandas-numpy-Sept-2018.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
import nltk
## To download vader files
#nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from scipy.stats import spearmanr, pearsonr
# <h3> Raw Data </h3>
webmd_raw = pd.read_csv('webmd.csv')
webmd_raw.shape
# <h3> Data preprocessing </h3>
webmd = pd.read_csv('webmd_conditions_transformed.csv',parse_dates=['Date'])
column_list = list(webmd.columns.values)
column_list_lower = map(lambda x: x.lower(), column_list)
webmd.columns = column_list_lower
webmd.shape
webmd.head()
# <h1> Exploratory Data Analysis </h1>
len(webmd.drug.unique())
len(webmd.condition.unique())
# Number of drugs available for top conditions
df = webmd.groupby(['condition'])['drug'].nunique().sort_values(ascending = False).reset_index().head(30)
plt.rcParams['figure.figsize'] = [15, 8]
plt.bar(x=df['condition'],height = df['drug'],color = 'goldenrod',alpha=0.5)
plt.xticks(rotation=90)
plt.title('Drugs Available for Top Conditions', fontsize = 15)
plt.xlabel('Conditions', fontsize = 15)
plt.ylabel('# of Drugs', fontsize = 15)
plt.show()
# +
# Number of conditions present per drug
df = webmd.groupby(['drug'])['condition'].nunique().sort_values(ascending = False).reset_index().head(30)
plt.rcParams['figure.figsize'] = [15, 8]
plt.bar(x=df['drug'],height = df['condition'],color = '#007acc',alpha=0.5)
plt.title('Conditions Present per Drug', fontsize = 15)
plt.xlabel('Drug', fontsize = 15)
plt.ylabel('# of Conditions', fontsize = 15)
plt.xticks(rotation=90)
plt.yticks(np.arange(0,45,5))
plt.show()
# -
# Most Common Conditions based on Reviews
df = webmd['condition'].value_counts().head(30).reset_index()
df.columns = ['condition','count']
plt.rcParams['figure.figsize'] = [15, 8]
plt.bar(x=df['condition'],height = df['count'],color = 'lightgreen')
plt.xticks(rotation=90)
plt.title('Most Common Conditions based on Reviews', fontsize = 15)
plt.xlabel('Condition', fontsize = 15)
plt.ylabel('# of Count', fontsize = 15)
plt.show()
# Top 10 drugs which are used for the top condition (Pain)
df = webmd[webmd['condition'] == 'Pain']['drug'].value_counts()[0: 10]
plt.rcParams['figure.figsize'] = (15, 8)
sns.barplot(x = df['index'], y = df['values'], palette = 'summer')
plt.title('Top 10 Drugs used for Depression',fontsize = 15)
plt.xlabel('Drug',fontsize = 15)
plt.ylabel('Drug Count',fontsize = 15)
plt.xticks(rotation=90)
plt.show()
# +
# Converting the date into datetime format
webmd['date'] = pd.to_datetime(webmd['date'], errors = 'coerce')
# Extracting year, month and day from date
webmd['year'] = webmd['date'].dt.year
webmd['month'] = webmd['date'].dt.month
webmd['day'] = webmd['date'].dt.day
webmd.head()
# -
# Heatmap of the correlation matrix
plt.rcParams['figure.figsize'] = (10,10)
sns.set(font_scale = 1.2)
df = webmd[['easeofuse','effectiveness','satisfaction','rating']]
corr = df.corr()
sns.heatmap(corr, annot = True, vmin=-1, vmax=1, center=0.5,cmap='twilight', square=True);
plt.xticks(rotation = 45)
plt.show()
# Distribution of reviews in each year
plt.rcParams['figure.figsize'] = (15, 8)
sns.countplot(webmd['year'], palette ='Greens')
plt.title('Distribution of Reviews in each Year', fontsize = 15)
plt.xlabel('Year', fontsize = 15)
plt.ylabel('# of Reviews', fontsize = 15)
plt.show()
# Distribution of ratings in each month
plt.rcParams['figure.figsize'] = (15, 8)
sns.boxplot(x = webmd['year'], y = webmd['rating'],palette = 'Greens')
plt.title('Distribution of Ratings in each Year', fontsize = 15)
plt.xlabel('Year', fontsize = 15)
plt.ylabel('# of Reviews', fontsize = 15)
plt.show()
# Distribution of ratings in each month
plt.rcParams['figure.figsize'] = (15, 8)
sns.boxplot(x = webmd['month'], y = webmd['rating'], palette ='pastel')
plt.title('Distribution of Ratings in each Month', fontsize = 15)
plt.yticks(np.arange(0,11))
plt.xticks(np.arange(12),('Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'))
plt.xlabel('Month', fontsize = 15)
plt.ylabel('Rating', fontsize = 15)
plt.show()
# Distribution of ratings on each day
plt.rcParams['figure.figsize'] = (20,8)
sns.boxplot(x = webmd['day'], y = webmd['rating'], palette ='pastel')
plt.title('Distribution of Ratings on each Day', fontsize = 15)
plt.xlabel('Day', fontsize = 15)
plt.ylabel('#Count of Reviews', fontsize = 15)
plt.show()
# <h1>Sentiment Analysis</h1>
# <h3> Sentiment analysis on user reviews. </h3>
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sid = SentimentIntensityAnalyzer()
# <h3> Filtering missing reviews</h3>
print("Before removing missing reviews: ",webmd.shape)
webmd_df = webmd[webmd['reviews'] != " "]
print("After removing missing reviews: ",webmd_df.shape)
# Calculate sentiment polarity on each review
webmd_df['sentiment'] = webmd_df['reviews'].astype(str).apply(lambda x: sid.polarity_scores(x)['compound'])
plt.figure(figsize=(15,5))
ax = sns.distplot(webmd_df['sentiment'],bins=80,kde=False)
plt.title('Sentiment Score Distribution')
plt.xlabel('Scores')
plt.ylabel('Count')
# <h1> Statistical Analysis </h1>
# <h3> Univariate Analysis (Ex: Pie chart)
# - Distribution of review ratings </h3>
# +
# Frequency of each rating
df = webmd['rating'].value_counts().reset_index()
# Converting float rating values to int
df.columns = ['rating','count']
df = df.astype({'rating':'int'})
# Plotting user rating distribution
size = df['count']
colors = ['salmon','lavender','lightgreen','pink','wheat','azure','sienna','orange','turquoise','olive']
labels = df['rating']
my_circle = plt.Circle((0, 0), 0.7, color = 'white')
plt.rcParams['figure.figsize'] = (10, 10)
plt.pie(size,colors = colors,labels = labels, autopct = '%.2f%%')
plt.title('User Rating Distribution', fontsize = 15)
plt.legend()
p = plt.gcf()
plt.gca().add_artist(my_circle)
plt.show()
# -
# <h3> Bivariate analysis (Ex: scatter plot, joint plots) is used to show relationship between two variables.</h3>
# <h3> 1. Joint plot of Sentiment Score vs Rating </h3>
# +
# Joint plot of sentiment score vs rating
reviews_per_drug = webmd_df.groupby(['drug']).agg({
'index': pd.Series.nunique
})
drugs_sentiment = webmd_df.groupby(['drug'])
drugs_sentiment = drugs_sentiment.agg({
'sentiment': np.mean,
'rating': np.mean,
'index': pd.Series.nunique
})
drugs_sentiment = drugs_sentiment[drugs_sentiment['index'] > reviews_per_drug.quantile(q=0.75)[0]]
sns.jointplot(x= 'sentiment', y= 'rating', data=drugs_sentiment, kind='reg', height=8, scatter_kws={'s': 20})
print('Joint plot of Sentiment Score vs Rating')
plt.xticks(np.arange(-1,1,0.2))
plt.show()
# -
# <h3> 2. Average Useful Count vs Rating </h3>
# +
#Scatter plot of average useful_count vs rating
avg_useful_count_list = []
ratings = range(1, 11)
for i in ratings:
avg_useful_count_list.append([i, np.sum(webmd[webmd['rating'] == i].usefulcount) / np.sum([webmd['rating'] == i])])
count_arr = np.asarray(avg_useful_count_list)
plt.rcParams['figure.figsize'] = (15, 8)
plt.scatter(count_arr[:, 0], count_arr[:, 1], c=count_arr[:, 0], cmap = 'coolwarm', s=400)
plt.title('Average Useful Count vs Rating',fontsize = 15)
plt.xlabel('Rating',fontsize = 15)
plt.ylabel('Average Useful Count',fontsize = 15)
plt.xticks(np.arange(1,11))
plt.yticks(np.arange(0,20,5))
plt.grid()
plt.show()
# -
# <h3> Correlation Coefficients (Between Sentiment Score and Rating) </h3>
# Spearman correlation between sentiment and rating
spearmanr(webmd_df['sentiment'], webmd_df['rating'])
# Pearson correlation between sentiment and rating
pearsonr(webmd_df['sentiment'], webmd_df['rating'])
| EDA/EDA_WebMD.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: mrec
# language: python
# name: mrec
# ---
# # Here I will show there are overlapping sentences in the train, validation and test set
# cd ..
import pandas as pd
from mrec.data.dataset import load_data
# +
csv_fnames = {'train': 'dataset/raw/train.csv', 'validation': 'dataset/raw/validation.csv', 'test': 'dataset/raw/test.csv'}
base_dir = '/Users/ktle2/personal_projects/mrec/models/baseline_model'
pred_csv_fnames = {'train': f'{base_dir}/train-predictions.csv',
'validation': f'{base_dir}/validation-predictions.csv',
'test': f'{base_dir}/test-predictions.csv'}
dataset = load_data(pred_csv_fnames)
# +
cols = ['_unit_id', 'relation', 'sentence', 'direction', 'term1', 'term2', 'relation_pred']
train = dataset.train[cols]
validation = dataset.validation[cols]
test= dataset.test[cols]
# -
train.head(5)
# ### Here I will show the inconsistent in labeling `relation` on sentences. I will group `_unit_id`, `relation`, `sentence`, `term1`, and `term2` and do a majority vote on `direction` to remove duplicates. Then I will show that same sentences can have different relation
MAJORITY_VOTE_FLAG = False
# majority vote assigned as the direction, thereby duplicates are removed
if MAJORITY_VOTE_FLAG:
group_cols = ['_unit_id', 'relation', 'sentence', 'term1', 'term2', 'relation_pred']
train_no_dup = train.groupby(group_cols)['direction'].agg(pd.Series.mode).reset_index()
val_no_dup = validation.groupby(group_cols)['direction'].agg(pd.Series.mode).reset_index()
test_no_dup = test.groupby(group_cols)['direction'].agg(pd.Series.mode).reset_index()
else:
relation_type = ['causes', 'treats']
train_no_dup = train[train['relation'].isin(relation_type)].drop_duplicates(subset='_unit_id')
val_no_dup = validation[validation['relation'].isin(relation_type)].drop_duplicates(subset='_unit_id')
test_no_dup = test[test['relation'].isin(relation_type)].drop_duplicates(subset='_unit_id')
train_no_dup['sentence'].nunique()
# +
grouped_df = train_no_dup.groupby(['sentence']).size().reset_index(name='show-up counts')
print(grouped_df.shape)
duplicated_sentences_count = grouped_df[grouped_df['show-up counts'] > 1].reset_index(drop=True)
print(f"Number of duplicated sentences within training set: {duplicated_sentences_count.shape[0]}")
print(f"Distribution of duplicated sentences:\n{duplicated_sentences_count['show-up counts'].value_counts()}")
duplicated_sentences_count.head()
# -
# __We see that we still have duplicated sentences. Let's look close to sentence that have 3 duplicates after doing majority vote__
sentence = '164 Babesiosis Treatment of BABESIOSIS + caused by BABESIA MICROTI.'
train_no_dup[train_no_dup['sentence'] == sentence]
# __This sentence have duplicates beucase it has different `_unit_id` and `relation`. If we do majority vote without grouping `_unit_id`, we still have sentence duplicated and have different relation. Hence this train dataset is inconsistent in labeling relation for each unique sentence__
dset_size = train_no_dup.shape[0]
duplicates = dset_size - train_no_dup['sentence'].nunique()
print('Number of rows after do majority vote:', dset_size)
print('Number of duplicate sentences:', duplicates)
print('Normalize: {:.2f}%'.format(duplicates / dset_size * 100))
# +
false_train_predictions = train_no_dup[train_no_dup['relation'] != train_no_dup['relation_pred']]
misclassified_duplicated_sentences_total = false_train_predictions.shape[0]
print('Misclassified duplicated sentences: {}({:0.3f}%)\n'.format(misclassified_duplicated_sentences_total, misclassified_duplicated_sentences_total/dset_size*100))
print('Verifying that sentence is repeated within training sentence')
sample_sentence = 'Thus, the present data support the hypothesis that the therapeutic effects of CLOZAPINE in this primate model and perhaps in SCHIZOPHRENIA may be related at least in part to the restoration of DA tone in the prefrontal cortex.'
train_no_dup[train_no_dup['sentence'] == sentence]
# -
print(train_no_dup.shape)
clean_train = train_no_dup.drop(list(train_no_dup[train_no_dup['sentence'].duplicated(False)].index))
print(clean_train.shape)
clean_train.shape
clean_train[clean_train['sentence'] == sample_sentence]
# __Here is what that sentence look like in raw train set__
train[train['sentence'] == sentence]
# __Here is how severe this case is in validation set__
# +
duplicates = val_no_dup['sentence'].duplicated().sum()
dset_size = val_no_dup.shape[0]
print('Number of rows after do majority vote:', dset_size)
print('Number of duplicate sentences:', duplicates)
print('Normalize: {:.2f}%'.format(duplicates / dset_size * 100))
false_val_predictions = val_no_dup[val_no_dup['relation'] != val_no_dup['relation_pred']]
misclassified_duplicated_sentences_total = false_val_predictions.shape[0]
print('Misclassified duplicated sentences: {}({:0.3f}%)\n'.format(misclassified_duplicated_sentences_total, misclassified_duplicated_sentences_total/dset_size*100))
# -
# __Here is how severe this case is in test set__
# +
duplicates = test_no_dup['sentence'].duplicated().sum()
dset_size = test_no_dup.shape[0]
print('Number of rows after do majority vote:', dset_size)
print('Number of duplicate sentences:', duplicates)
print('Normalize: {:.2f}%'.format(duplicates / dset_size * 100))
false_test_predictions = test_no_dup[test_no_dup['relation'] != test_no_dup['relation_pred']]
misclassified_duplicated_sentences_total = false_test_predictions.shape[0]
print('Misclassified duplicated sentences: {}({:0.3f}%)\n'.format(misclassified_duplicated_sentences_total, misclassified_duplicated_sentences_total/dset_size*100))
# -
# #### CasIn order to prove that there are overlapping sentences in train, validation and test set, I will do majority vote on `direction` in each set to remove duplicates. Then I will concatenate train and validation set and check for duplicate sentences. I will also concatenate train and test set and check for duplicate sentences.
train_and_val_dfs = [train_no_dup, val_no_dup]
train_concat_val = pd.concat(train_and_val_dfs)
train_concat_val['sentence'].duplicated().sum()
data = [['tom'], ['tom'], ['tom']]
df = pd.DataFrame(data, columns=['Name'])
df
df.duplicated().sum()
| notebooks/Dataset_Error.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
import numpy as np
import pandas as pd
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
credits = pd.read_csv('credits.csv')
# -
credits
meta = pd.read_csv('movies_metadata.csv')
meta['release_date'] = pd.to_datetime(meta['release_date'], errors='coerce')
# +
meta['year'] = meta['release_date'].dt.year
# -
meta['year'].value_counts().sort_index()
new_meta = meta.loc[meta.year == 2017,['genres','id','title','year']]
new_meta
new_meta['id'] = new_meta['id'].astype(int)
data = pd.merge(new_meta, credits, on='id')
pd.set_option('display.max_colwidth', 75)
data
import ast
data['genres'] = data['genres'].map(lambda x: ast.literal_eval(x))
data['cast'] = data['cast'].map(lambda x: ast.literal_eval(x))
data['crew'] = data['crew'].map(lambda x: ast.literal_eval(x))
def make_genresList(x):
gen = []
st = " "
for i in x:
if i.get('name') == 'Science Fiction':
scifi = 'Sci-Fi'
gen.append(scifi)
else:
gen.append(i.get('name'))
if gen == []:
return np.NaN
else:
return (st.join(gen))
data['genres_list'] = data['genres'].map(lambda x: make_genresList(x))
data['genres_list']
def get_actor1(x):
casts = []
for i in x:
casts.append(i.get('name'))
if casts == []:
return np.NaN
else:
return (casts[0])
data['actor_1_name'] = data['cast'].map(lambda x: get_actor1(x))
def get_actor2(x):
casts = []
for i in x:
casts.append(i.get('name'))
if casts == [] or len(casts)<=1:
return np.NaN
else:
return (casts[1])
data['actor_2_name'] = data['cast'].map(lambda x: get_actor2(x))
data['actor_2_name']
def get_actor3(x):
casts = []
for i in x:
casts.append(i.get('name'))
if casts == [] or len(casts)<=2:
return np.NaN
else:
return (casts[2])
data['actor_3_name'] = data['cast'].map(lambda x: get_actor3(x))
data['actor_3_name']
def get_directors(x):
dt = []
st = " "
for i in x:
if i.get('job') == 'Director':
dt.append(i.get('name'))
if dt == []:
return np.NaN
else:
return (st.join(dt))
data['director_name'] = data['crew'].map(lambda x: get_directors(x))
data['director_name']
movie = data.loc[:,['director_name','actor_1_name','actor_2_name','actor_3_name','genres_list','title']]
movie
movie.isna().sum()
movie = movie.dropna(how='any')
movie.isna().sum()
movie = movie.rename(columns={'genres_list':'genres'})
movie = movie.rename(columns={'title':'movie_title'})
movie['movie_title'] = movie['movie_title'].str.lower()
movie['comb'] = movie['actor_1_name'] + ' ' + movie['actor_2_name'] + ' '+ movie['actor_3_name'] + ' '+ movie['director_name'] +' ' + movie['genres']
movie
old = pd.read_csv('data.csv')
old
old['comb'] = old['actor_1_name'] + ' ' + old['actor_2_name'] + ' '+ old['actor_3_name'] + ' '+ old['director_name'] +' ' + old['genres']
old
new = old.append(movie)
new
new.drop_duplicates(subset ="movie_title", keep = 'last', inplace = True)
new
new.to_csv('new_data.csv',index=False)
| preprocessing 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="0PtN05TGTmIf"
from tqdm import tqdm
import numpy as np
import re, collections
import pickle
from multiprocessing import Pool, Process
import multiprocessing
from functools import partial
from math import log
# + colab={"base_uri": "https://localhost:8080/"} id="mD5NP5jVDqwm" outputId="9307a351-9dc8-41a0-cb41-43e8a4a9e776"
# !pip install cython
# %load_ext Cython
# + id="7Bbm53OlDyv9"
a_file = open("/content/drive/MyDrive/74.pkl", "rb")
vocab = pickle.load(a_file)
# + id="04Pbi_9KLe-P" language="cython"
#
# def get_vocab(filename):
#
# vocab = {}
#
# for line in open(filename, 'r', encoding='utf-8'):
#
# line = line.strip()
#
# key = ' '.join(list(line))
#
# if key in vocab.keys():
# vocab[key] += 1
# else:
# vocab[key] = 1
#
# return vocab
#
# def get_freq(vocab, max_len):
#
# freqs = {}
# symbol_freqs = {}
# all = 0
# all_2 = 0
#
#
# for line, freq in vocab.items():
#
# symbols = line.split()
#
# all += len(symbols)
#
# for symbol_i in range(len(symbols)-1):
#
# key = symbols[symbol_i]
# if key in symbol_freqs.keys():
# symbol_freqs[key] += freq
# else:
# symbol_freqs[key] = freq
#
#
# if max_len >= len(symbols[symbol_i] + symbols[symbol_i+1]):
# all_2 += freq
#
# key = (symbols[symbol_i], symbols[symbol_i+1])
# if key in freqs.keys():
# freqs[key] += freq
# else:
# freqs[key] = freq
#
# key = symbols[-1]
# if key in symbol_freqs.keys():
# symbol_freqs[key] += freq
# else:
# symbol_freqs[key] = freq
#
# for key in freqs.keys():
# freqs[key] = (freqs[key]/all_2) / ((symbol_freqs[key[0]]/all) * (symbol_freqs[key[1]]/all))
#
# return freqs
#
#
# def merge_vocab(pair, v_in):
#
# v_out = {}
#
# bigram = ''.join(pair)
# pair = pair[0] + ' ' + pair[1]
#
# for line in v_in:
#
# w_out = line.replace(pair,bigram)
# v_out[w_out] = v_in[line]
#
# return v_out
#
# def get_tokens(vocab):
#
# tokens = set({})
#
# for line in vocab.keys():
# line = line.split()
#
# for token in line:
# tokens.add(token)
#
# return tokens
# + id="Ekcoz6IZGywe"
vocab = get_vocab('/content/sentences_for_task1.txt')
max_len = 16
num_merges = 15000
t = tqdm(np.arange(num_merges))
for i in t:
pairs = get_freq(vocab, max_len)
if len(pairs) == 0: break
if max(pairs.values()) < 2: break
pair = max(pairs, key=pairs.get)
vocab = merge_vocab(pair, vocab)
if i % 1000 == 0:
tokens = get_tokens(vocab)
mt = np.mean([len(token) for token in tokens])
t.set_postfix({'mean_token_len': mt, 'vocab_size': len(tokens)})
# + id="5lxykM37sEOo"
tokens = get_tokens(vocab)
# + colab={"base_uri": "https://localhost:8080/"} id="SvfcOXC4qIzK" outputId="141bdd27-427f-43a8-8d08-3b665f0a5df0"
np.mean([len(token) for token in tokens])
# + colab={"base_uri": "https://localhost:8080/"} id="Lf8prLoIH3pt" outputId="6fc98163-0953-42fa-e0cb-cc876059d190"
score = 0
for i, line in enumerate(open('/content/test_for_task1.txt', 'r')):
w1,w2 = line.split()
if w1 in tokens and w2 not in tokens:
score+=1
elif w1 not in tokens and w2 in tokens:
score+=0
else:
score+=0.5
print(score/i)
# + id="vunaqthkr1KL"
aweaimu a_file = open("data.pkl", "wb")
pickle.dump(vocab, a_file)
a_file.close()
| Assignment4/Tokenizer_BPE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Consumer Complaints: Plotting CFPB data
# This notebook reads, analyzes and plots Consumer Financial Protection Bureau data for the month of December 2014.
import pandas as pd
import matplotlib as plt
import seaborn
# %matplotlib inline
consumer_complaints = pd.read_csv('complaints_dec_2014.csv')
# The first 15 entries are shown to give a taste of the data.
consumer_complaints.head(15)
# ### Consumer complaints by Product
products = consumer_complaints.groupby('Product')
products.size()
products.size().plot.bar(title="Number of Complaints by Product")
# ### Consumer complaints by Company
# For brevity, only the top 10 most frequent companies are provided.
consumer_complaints['Company'].value_counts()[:10]
consumer_complaints['Company'].value_counts()[:10].plot.bar(title="Consumer Complaints by Company")
# ### Number of complaints by company response
consumer_complaints['Company response'].value_counts()
consumer_complaints['Company response'].value_counts().plot.bar(title="Complaints by Company Response")
consumer_complaints['Company response'].value_counts().plot.pie(title="Complaints", autopct='%.2f')
# ### Mean number of complaints by day of week
# First, one must get the dates in the 'Date received' series (which are strings) into python's datetime objects. Rather than replace the dates, I add a new series to the data fram called 'date_of_complaint'.
consumer_complaints['date_of_complaint'] = pd.to_datetime(consumer_complaints['Date received'], format='%m/%d/%Y')
complaints_by_date = consumer_complaints['date_of_complaint'].value_counts()
complaints_by_date
# Now that the data frame has dates in datetime objects, one can create a new series called 'day_of_week' by mapping the datetime.weekday() method to complaints.
consumer_complaints['day_of_week'] = consumer_complaints['date_of_complaint'].map(lambda d: d.weekday())
# Use .head() to peek at the data and see that the two series have been added to the data frame.
consumer_complaints.head()
# Total number of complaints by day of the week
days_of_the_week = consumer_complaints.groupby('day_of_week')
days_of_the_week.size().plot.bar()
# Percentage of complaints by day of the week.
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
consumer_complaints['day_of_week'].value_counts().plot.pie(title="Days of the Week", labels=days, autopct='%.2f')
| consumer-complaints.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
root = 'logs/'
algos = ['a2c', 'acktr', 'ppo']
games = ['beamrider', 'breakout', 'qbert']
numTrials = 5
def getLogFileNames(trialName):
folderPath = root + trialName
logFileNames = [fileName for fileName in os.listdir(folderPath) if 'monitor' in fileName]
return folderPath, logFileNames
trialNames = []
for algo in algos:
for game in games:
for trialNum in range(1, numTrials + 1):
trialStr = str(trialNum)
trialName = algo + '_' + game + '_' + trialStr
trialNames.append(trialName)
def getTrialStats(trialName):
folderPath, logFileNames = getLogFileNames(trialName)
#print(folderPath)
logs = []
for fileName in logFileNames:
filePath = folderPath + '/' + fileName
log_lines = []
with open(filePath, 'r') as log_file:
log_lines = log_file.readlines()
logs.append(log_lines[2:])
#print(logs[0][7])
numProcesses = len(logs)
timeSteps = []
rewards = []
for log in logs:
curTimeStep = 0
for line in log:
lineParts = line.split(',')
episodeReward = float(lineParts[0])
episodeSteps = int(lineParts[1])
episodeTime = float(lineParts[2])
curTimeStep += episodeSteps * numProcesses
timeSteps.append(curTimeStep)
rewards.append(episodeReward)
return timeSteps, rewards
# +
import matplotlib.pyplot as plt
import numpy
def running_mean(x, N):
cumsum = numpy.cumsum(numpy.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / float(N)
for i, trialName in enumerate(trialNames):
times, rewards = getTrialStats(trialName)
times, rewards = zip(*sorted(zip(times, rewards)))
rewards = running_mean(numpy.array(rewards), 32)
plt.figure(i)
plt.title(trialName)
plt.plot(times[:-31], rewards)
plt.show()
# -
| custom_visualize.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Identificación de hongos venenosos usando regresión logística
# ===
# Construya un modelo de regresión logística que permita identificar si un hongo es venenoso o no. Para ello, utilice la muestra de datos suministrada.
#
# La base de datos contiene 8124 instancias de hongos provenientes de 23 especies de la familia Agaricus y Lepiota, los cuales han sido clasificados como comestibles, venenosos o de comestibilidad indeterminada. Por el tipo de problema en cuestión, los hongos de comestibilidad desconocida deben ser asignados a la clase de hongos venenosos, ya que no se puede correr el riesgo de dar un hongo potencialmente venenoso a una persona para su consumo.
#
# Véase https://www.kaggle.com/uciml/mushroom-classification
#
# Evalue el modelo usando la matriz de confusión.
#
# La información contenida en la muestra es la siguiente:
#
# 1. cap-shape: bell=b,conical=c,convex=x,flat=f,
# knobbed=k,sunken=s
# 2. cap-surface: fibrous=f,grooves=g,scaly=y,smooth=s
# 3. cap-color: brown=n,buff=b,cinnamon=c,gray=g,green=r,
# pink=p,purple=u,red=e,white=w,yellow=y
# 4. bruises?: bruises=t,no=f
# 5. odor: almond=a,anise=l,creosote=c,fishy=y,foul=f,
# musty=m,none=n,pungent=p,spicy=s
# 6. gill-attachment: attached=a,descending=d,free=f,notched=n
# 7. gill-spacing: close=c,crowded=w,distant=d
# 8. gill-size: broad=b,narrow=n
# 9. gill-color: black=k,brown=n,buff=b,chocolate=h,gray=g,
# green=r,orange=o,pink=p,purple=u,red=e,
# white=w,yellow=y
# 10. stalk-shape: enlarging=e,tapering=t
# 11. stalk-root: bulbous=b,club=c,cup=u,equal=e,
# rhizomorphs=z,rooted=r,missing=?
# 12. stalk-surface-above-ring: fibrous=f,scaly=y,silky=k,smooth=s
# 13. stalk-surface-below-ring: fibrous=f,scaly=y,silky=k,smooth=s
# 14. stalk-color-above-ring: brown=n,buff=b,cinnamon=c,gray=g,orange=o,
# pink=p,red=e,white=w,yellow=y
# 15. stalk-color-below-ring: brown=n,buff=b,cinnamon=c,gray=g,orange=o,
# pink=p,red=e,white=w,yellow=y
# 16. veil-type: partial=p,universal=u
# 17. veil-color: brown=n,orange=o,white=w,yellow=y
# 18. ring-number: none=n,one=o,two=t
# 19. ring-type: cobwebby=c,evanescent=e,flaring=f,large=l,
# none=n,pendant=p,sheathing=s,zone=z
# 20. spore-print-color: black=k,brown=n,buff=b,chocolate=h,green=r,
# orange=o,purple=u,white=w,yellow=y
# 21. population: abundant=a,clustered=c,numerous=n,
# scattered=s,several=v,solitary=y
# 22. habitat: grasses=g,leaves=l,meadows=m,paths=p,
# urban=u,waste=w,woods=d
# +
#
# Carga de la muestra de datos.
#
import pandas as pd
df = pd.read_csv(
"https://raw.githubusercontent.com/jdvelasq/datalabs/master/datasets/mushrooms.csv"
)
# +
#
# Para construir el modelo elimine la columna 'veil_type' ya que
# contiene un único valor.
#
# Preprocese los datos usando one-hot-encoder.
#
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import OneHotEncoder
# >>> Inserte su codigo aquí >>>
import numpy as np
#elimine la columna 'veil_type'
df.drop(labels='veil_type', axis=1, inplace=True)
#Separación de variables
y = df['type']
x = df.drop(labels='type', axis=1)
#Preproceso: Encoding
encoder = OneHotEncoder()
xencoded = encoder.fit_transform(x).toarray()
#Modelación
model = LogisticRegression()
model.fit(xencoded, y)
y_pred = model.predict(xencoded)
y_pred.shape
#matriz de confución
cm = confusion_matrix(y, y_pred)
cm
# ---->>> Evaluación ---->>>
# cm es la matriz de confusion
print(cm[0][0] == 4208)
print(cm[0][1] == 0)
print(cm[1][0] == 0)
print(cm[1][1] == 3916)
| notebooks/analitica_predictiva/06-001_identificacion_de_hongos_venenosos_usando_regresion_logistica.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ak9250/Real-Time-Voice-Cloning/blob/master/Real_Time_Voice_Cloning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="6yk3PMfBuZhS" colab_type="text"
# Make sure GPU is enabled
# Runtime -> Change Runtime Type -> Hardware Accelerator -> GPU
# + id="qhunyJSod_UT" colab_type="code" colab={}
# !git clone https://github.com/CorentinJ/Real-Time-Voice-Cloning.git
# + id="pE6btDZWeFV0" colab_type="code" colab={}
# cd Real-Time-Voice-Cloning/
# + id="0AVd9vLKeKm6" colab_type="code" colab={}
# !pip install -r requirements.txt
# + id="VuwgOQlPeN8a" colab_type="code" colab={}
# !gdown https://drive.google.com/uc?id=1n1sPXvT34yXFLT47QZA6FIRGrwMeSsZc
# + id="vKLpYfRkfyjX" colab_type="code" colab={}
# !unzip pretrained.zip
# + id="84mPqfQGgM6t" colab_type="code" colab={}
# !apt-get install libportaudio2
# + id="s9VYtcYliOGp" colab_type="code" colab={}
# mkdir UserAudio
# + id="PnV2xXI-jEbd" colab_type="code" colab={}
# !pip install youtube-dl
# + id="I5y58GgdiQdn" colab_type="code" colab={}
# !youtube-dl --extract-audio --audio-format wav -o "UserAudio/test.wav" link to youtube video here
# + id="YOiGYfpAf2qR" colab_type="code" colab={}
# !python demo_cli.py --no_sound
# + id="PyLdbUfks2lv" colab_type="code" colab={}
| demo_toolbox_collab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.7 64-bit (''s1-enumerator'': conda)'
# name: python3
# ---
# This is to demonstrate how to download files given the name of the deployment (e.g. ```test-dem-95-sim```).
# +
import hyp3_sdk
import os
# Define job-name
job_name = 'test-dem-95-sim'
# +
# uses .netrc; add `prompt=True` to prompt for credentials;
hyp3_isce = hyp3_sdk.HyP3('https://hyp3-isce.asf.alaska.edu/')
jobs = hyp3_isce.find_jobs(name=job_name)
job_ids = [job.job_id for job in jobs]
import json
from pathlib import Path
Path(job_name+'json').write_text(json.dumps(job_ids))
# +
# Generate json file that lists the datasets to be downloaded
df = str(Path(job_name+'json').absolute())
# Save directory
savedir = os.path.join(os.path.abspath(os.getcwd()),'deployment')
if not os.path.exists(savedir):
os.mkdir(savedir)
# +
import hyp3_sdk
import json
from pathlib import Path
import concurrent.futures
from tqdm import tqdm
# uses .netrc; add `prompt=True` to prompt for credentials;
hyp3_isce = hyp3_sdk.HyP3('https://hyp3-isce.asf.alaska.edu/')
job_ids = json.loads(Path(df).read_text())
jobs = hyp3_sdk.Batch()
for ii in job_ids:
jobs += hyp3_isce.get_job_by_id(ii)
# -
# Download files in parallel
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
results = list(tqdm(executor.map(lambda job: job.download_files(savedir), jobs), total=len(jobs)))
| download_gunws.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 20-CLEAN
# %load_ext autoreload
# %autoreload 2
import os
import sys
import logging
from fddc.config import Config
from fddc.annex_a.cleaner import clean
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.debug("This is just to get loggin to work - it seems to refuse to log unless you log something!")
# +
# Set paths
config = Config("config/data-map.yml")
# Full path to the input file that should be cleaned
config["input_file"] = "merged.xlsx"
# Full path to the output file that will hold the cleaned data
config["output_file"] = "cleaned.xlsx"
# Full path to a file holding a report of how the matching was performed
config["matching_report_file"] = "matching_report.xlsx"
# +
# Launch cleaning
clean(**config)
| 20-CLEAN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
# <script>
# window.dataLayer = window.dataLayer || [];
# function gtag(){dataLayer.push(arguments);}
# gtag('js', new Date());
#
# gtag('config', 'UA-59152712-8');
# </script>
#
# # Start-to-Finish Example: Numerical Solution of the Scalar Wave Equation, in Curvilinear Coordinates
#
# ## Author: <NAME>
# ### Formatting improvements courtesy <NAME>
#
# ## This module solves the scalar wave equation for a plane wave in *spherical coordinates* (though other coordinates, including Cartesian, may be chosen). To make the entire code immediately visible, the [`MoLtimestepping`](Tutorial-Method_of_Lines-C_Code_Generation.ipynb) module is not used here. In addition, this module is designed to motivate the upcoming [`CurviBoundaryConditions`](Tutorial-Start_to_Finish-Curvilinear_BCs.ipynb) module.
#
# **Module Status:** <font color ="green"><b> Validated </b></font>
#
# **Validation Notes:** This module has been validated to converge at the expected order to the exact solution (see [plot](#convergence) at bottom).
#
# ### NRPy+ Source Code for this module:
# * [ScalarWaveCurvilinear/ScalarWaveCurvilinear_RHSs.py](../edit/ScalarWaveCurvilinear/ScalarWaveCurvilinear_RHSs.py) [\[**tutorial**\]](Tutorial-ScalarWaveCurvilinear.ipynb) Generates the right-hand side for the Scalar Wave Equation in curvilinear coordinates
# * [ScalarWave/InitialData_PlaneWave.py](../edit/ScalarWave/InitialData_PlaneWave.py) [\[**tutorial**\]](Tutorial-ScalarWave.ipynb) Generating C code for plane wave initial data for the scalar wave equation
#
# ## Introduction:
# As outlined in the [previous NRPy+ tutorial module](Tutorial-ScalarWaveCurvilinear.ipynb), we first use NRPy+ to generate initial data for the scalar wave equation, and then we use it to generate the RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration based on the [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4).
#
# The entire algorithm is outlined below, with NRPy+-based components highlighted in <font color='green'>green</font>.
#
# 1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration.
# 1. <font color='green'>Set gridfunction values to initial data.</font>
# 1. Evolve the system forward in time using RK4 time integration. At each RK4 substep, do the following:
# 1. <font color='green'>Evaluate scalar wave RHS expressions.</font>
# 1. Apply boundary conditions.
# 1. At the end of each iteration in time, output the relative error between numerical and exact solutions.
# <a id='toc'></a>
#
# # Table of Contents
# $$\label{toc}$$
#
# This module is organized as follows
#
# 1. [Step 1](#outputc): Call on NRPy+ to output needed C code for initial data and scalar wave RHSs
# 1. [Step 1.a](#ref_metric): Output quantities related to reference metric
# 1. [Step 2](#writec): Writing the necessary C code infrastructure to solve the scalar wave equation in curvilinear, singular coordinates
# 1. [Step 2.a](#bcs): Write needed C code for implementing boundary condition algorithm above
# 1. [Step 2.b](#cfl): Output needed C code for finding the minimum proper distance between grid points, needed for [CFL](https://en.wikipedia.org/w/index.php?title=Courant%E2%80%93Friedrichs%E2%80%93Lewy_condition&oldid=806430673)-limited timestep
# 1. [Step 3](#mainc): `ScalarWaveCurvilinear_Playground.c`: The Main C Code
# 1. [Step 4](#convergence): Code validation: Verify that relative error in numerical solution converges to zero at the expected order
# 1. [Step 5](#latex_pdf_output): Output this module to $\LaTeX$-formatted PDF file
# <a id='outputc'></a>
#
# # Step 1: Call on NRPy+ to output needed C code for initial data and scalar wave RHSs \[Back to [top](#toc)\]
# $$\label{outputc}$$
#
#
# We choose simple plane wave initial data, which is documented in the [Cartesian scalar wave module](Tutorial-ScalarWave.ipynb). Specifically, we implement monochromatic (single-wavelength) wave traveling in the $\hat{k}$ direction with speed $c$
# $$u(\vec{x},t) = f(\hat{k}\cdot\vec{x} - c t),$$
# where $\hat{k}$ is a unit vector.
#
# The scalar wave RHSs in curvilinear coordinates (documented [in the previous module](Tutorial-ScalarWaveCurvilinear.ipynb)) are simply the right-hand sides of the scalar wave equation written in curvilinear coordinates
# \begin{align}
# \partial_t u &= v \\
# \partial_t v &= c^2 \left(\hat{g}^{ij} \partial_{i} \partial_{j} u - \hat{\Gamma}^i \partial_i u\right),
# \end{align}
# where $\hat{g}^{ij}$ is the inverse reference 3-metric (i.e., the metric corresponding to the underlying coordinate system we choose$-$spherical coordinates in our example below), and $\hat{\Gamma}^i$ is the contracted Christoffel symbol $\hat{\Gamma}^\tau = \hat{g}^{\mu\nu} \hat{\Gamma}^\tau_{\mu\nu}$.
#
# Below we generate
# + the initial data by calling `InitialData_PlaneWave()` inside the NRPy+ [ScalarWave/InitialData_PlaneWave.py](../edit/ScalarWave/InitialData_PlaneWave.py) module (documented in [this NRPy+ Jupyter notebook](Tutorial-ScalarWave.ipynb)), and
# + the RHS expressions by calling `ScalarWaveCurvilinear_RHSs()` inside the NRPy+ [ScalarWaveCurvilinear/ScalarWaveCurvilinear_RHSs.py](../edit/ScalarWaveCurvilinear/ScalarWaveCurvilinear_RHSs.py) module (documented in [this NRPy+ Jupyter notebook](Tutorial-ScalarWaveCurvilinear.ipynb)).
# +
# Step P1: Import needed NRPy+ core modules:
import NRPy_param_funcs as par
import indexedexp as ixp
import grid as gri
import reference_metric as rfm
import finite_difference as fin
import loop as lp
from outputC import *
import sys
# Step P2: Set the coordinate system for the numerical grid
par.set_parval_from_str("reference_metric::CoordSystem","SinhSpherical")
# Step 1: Import the ScalarWave.InitialData module.
# This command only declares ScalarWave initial data
# parameters and the InitialData_PlaneWave() function.
import ScalarWave.InitialData_PlaneWave as swid
# Step 2: Import ScalarWave_RHSs module.
# This command only declares ScalarWave RHS parameters
# and the ScalarWave_RHSs function (called later)
import ScalarWaveCurvilinear.ScalarWaveCurvilinear_RHSs as swrhs
# Step 3: Set the spatial dimension parameter
# to *FOUR* this time, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",3)
DIM = par.parval_from_str("grid::DIM")
# Step 4: Set the finite differencing order to 6.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",4)
# Step 5: Call the InitialData_PlaneWave() function to set up
# monochromatic (single frequency/wavelength) scalar
# wave initial data.
swid.InitialData_PlaneWave()
# Step 6: Generate SymPy symbolic expressions for
# uu_rhs and vv_rhs; the ScalarWave RHSs.
# This function also declares the uu and vv
# gridfunctions, which need to be declared
# to output even the initial data to C file.
swrhs.ScalarWaveCurvilinear_RHSs()
# Step 7: Generate C code for the initial data,
# output to a file named "SENR/ScalarWave_InitialData.h".
IDstring = fin.FD_outputC("returnstring",[lhrh(lhs=gri.gfaccess("in_gfs","uu"),rhs=swid.uu_ID),
lhrh(lhs=gri.gfaccess("in_gfs","vv"),rhs=swid.vv_ID)])
with open("ScalarWaveCurvilinear/ScalarWaveCartesian_ExactSolution.h", "w") as file:
file.write(IDstring)
# Step 8: Generate C code for scalarwave RHSs,
# output to a file named "SENR/ScalarWave_RHSs.h".
RHSstring = fin.FD_outputC("returnstring",[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=swrhs.uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=swrhs.vv_rhs)])
with open("ScalarWaveCurvilinear/ScalarWaveCurvilinear_RHSs.h", "w") as file:
file.write(lp.loop(["i2","i1","i0"],["NGHOSTS","NGHOSTS","NGHOSTS"],
["NGHOSTS+Nxx[2]","NGHOSTS+Nxx[1]","NGHOSTS+Nxx[0]"],
["1","1","1"],["const REAL invdx0 = 1.0/dxx[0];\n"+
"const REAL invdx1 = 1.0/dxx[1];\n"+
"const REAL invdx2 = 1.0/dxx[2];\n"+
"#pragma omp parallel for",
" const REAL xx2 = xx[2][i2];",
" const REAL xx1 = xx[1][i1];"],"",
"const REAL xx0 = xx[0][i0];\n"+RHSstring))
# -
# <a id='ref_metric'></a>
#
# ## Step 1.a: Output quantities related to reference metric \[Back to [top](#toc)\]
# $$\label{ref_metric}$$
#
# There are several subtleties when generalizing the [start-to-finish scalar wave tutorial in Cartesian coordinates](Tutorial-Start_to_Finish-ScalarWave.ipynb) to curvilinear coordinates.
#
# Consider for example *ordinary* (as opposed to, e.g., logarithmic-radial) spherical coordinates. In these coordinates, we choose a *uniform* grid in $(x_0,x_1,x_2)=(r,\theta,\phi)$. That is to say, we choose *constant* spacing $\Delta r$, $\Delta \theta$, and $\Delta \phi$ between grid points in $r$, $\theta$, and $\phi$, respectively.
#
# + Unlike Cartesian coordinates, in which our computational domain with coordinates $(x_0,x_1,x_2)=(x,y,z)$ may extend over arbitrary coordinate ranges $x_i \in [x_{i, \rm min},x_{i, \rm max}]$, our spherical coordinate domains will satisfy
#
# + $x_0 = r \in [0,{\rm RMAX}]$,
# + $x_1 = \theta \in [0,\pi]$, and
# + $x_2 = \phi \in [-\pi,\pi]$. (Notice how we do not choose $x_2= \phi \in [0,2\pi]$ so that our conversion from Cartesian to spherical coordinates is compatible with the output range from the ${\rm atan2}(y,x)$ function: $\phi={\rm atan2}(y,x)\in[-\pi,\pi]$.)
#
# + Further, most non-Cartesian, orthogonal coordinate systems (like spherical coordinates) possess *coordinate singularities*. Coordinate singularities in spherical coordinates lie along $\theta=0$ and $\theta=\pi$; these are points where the coordinate system focuses to a single point. For example, the coordinate singularity at the North Pole is the reason why all directions are south there. Critically, these singularities manifest as points where the reference metric or its inverse crosses through zero or diverges to $\infty$. As we derived in a [previous module](Tutorial-ScalarWaveCurvilinear.ipynb), the Laplacian in spherical polar coordinates takes the form
# $$
# \nabla^2 u = \partial_r^2 u + \frac{1}{r^2} \partial_\theta^2 u + \frac{1}{r^2 \sin^2 \theta} \partial_\phi^2 u + \frac{2}{r} \partial_r u + \frac{\cos\theta}{r^2 \sin\theta} \partial_\theta u,
# $$
# which diverges at $r=0$ and $\sin\theta=0-$precisesly at the $\theta=0$ and $\theta=\pi$ coordinate singularity. To avoid this divergence, we simply choose that our numerical grids be *cell-centered*. For example, if we choose a numerical grid with 3 outer ghost zone points (needed due to 6th-order-accurate centered finite differencing), and we want the grid interior to be sampled with ${\rm Nr}\times{\rm Ntheta}\times{\rm Nphi}$ grid points, then the cell-centered grid points can be indexed via three *even* integers $\left(\rm{i0}\in[0,{\rm Nr}+6),\rm{i1}\in[0,{\rm Ntheta}+6),\rm{i2}\in[0,{\rm Ntheta}+6)\right)$:
# + $r_{\rm i0} = r_{\rm min} + \left({\rm i0} + \frac{1}{2}\right)\Delta r$, where $r_{\rm min}=\left(-3+\frac{1}{2}\right)\Delta r$;
# + $\theta_{\rm i1} = \theta_{\rm min} + \left({\rm i1} + \frac{1}{2}\right)\Delta \theta$, where $\theta_{\rm min}= \left(-3+\frac{1}{2}\right)\Delta \theta$; and
# + $\phi_{\rm i2} = \phi_{\rm min} + \left({\rm i2} + \frac{1}{2}\right)\Delta \phi$, where $\phi_{\rm min}=-\pi+ \left(-3+\frac{1}{2}\right)\Delta \phi$.
#
# + Unlike Cartesian coordinates, the boundaries of our grid $x_i \in [x_{i, \rm min},x_{i, \rm max}]$ in spherical coordinates are not all outer boundaries. This presents some additional challenges, as finite difference numerical grids of $N$th order accuracy generally possess $N/2$ *ghost zone points* in regions $x_i < x_{i,\rm min}$ and $x_i > x_{i, \rm max}$. While in Cartesian coordinates, these ghost zone points map to regions outside the grid domain $x_i \in [x_{i, \rm min},x_{i, \rm max}]$, in spherical coordinates, most ghost zone points map to regions *inside* the grid domain. For example, for some $\tilde{r}\in [0,{\rm RMAX}]$ and $\tilde{\theta}\in[0,\pi]$, the ghost zone point $(\tilde{r},\tilde{\theta},2\pi+\Delta \phi/2)$ would map to the interior point $(\tilde{r},\tilde{\theta},\Delta \phi/2)$ because the $\phi$ coordinate is periodic. Thus when given a ghost zone point in some non-Cartesian coordinate system, we are faced with the problem of addressing the following two questions:
# 1. Does a given ghost point map to an interior point, or is it an outer boundary point (i.e., a point exterior to the domain)?
# 1. If the ghost zone point maps to an interior point, to which interior point does it map?
#
# We handle ghost zone points outside the domain boundary in generic coordinate systems by applying the following, simple process at each ghost zone point:
# 1. Convert the coordinate $(x_0,x_1,x_2)$ for the ghost zone point to Cartesian coordinates $\left(x(x_0,x_1,x_2),y(x_0,x_1,x_2),z(x_0,x_1,x_2)\right)$. For example, if we choose ordinary spherical coordinates $(x_0,x_1,x_2)=(r,\theta,\phi)$, then
# + $x(r,\theta,\phi) = r \sin(\theta) \cos(\phi) = x_0 \sin(x_1) \cos(x_2)$
# + $y(r,\theta,\phi) = r \sin(\theta) \sin(\phi) = x_0 \sin(x_1) \sin(x_2)$
# + $z(r,\theta,\phi) = r \cos(\theta) = x_0 \cos(x_1)$
# 1. Once we have $(x,y,z)$, we then find the corresponding value $(x_0,x_1,x_2)_{\rm in/OB}=(r,\theta,\phi)_{\rm in/OB}$ *in the grid interior or outer boundary*, via the simple inverse formula:
# + $r_{\rm in/OB} = x_{0, \rm in/OB} = \sqrt{x^2+y^2+z^2} \in [0,\infty)$
# + $\theta_{\rm in/OB} = x_{1, \rm in/OB} = {\rm acos}\left(\frac{z}{\sqrt{x^2+y^2+z^2}}\right) \in [0,\pi]$
# + $\phi_{\rm in/OB} = x_{2, \rm in/OB} = {\rm atan2}(y,x) \in [-\pi,\pi]$ [Wikipedia article on atan2()](https://en.wikipedia.org/w/index.php?title=Atan2&oldid=859313982)
#
# 1. If $(x_0,x_1,x_2)_{\rm in/OB}$ is the same as the original $(x_0,x_1,x_2)$, then we know $(x_0,x_1,x_2)$ is an outer boundary point (in spherical coordinates, at $r>{\rm RMAX}$), and we store `(i0,i1,i2)`$_{\rm in/OB} = (-1,-1,-1)$. Otherwise, we know that $(x_0,x_1,x_2)$ maps to some interior point at index `(i0,i1,i2)`, which we store:
# + $\rm{i0}_{\rm in/OB}=\frac{r_{\rm in/OB} - r_{\rm min}}{\Delta r} - \frac{1}{2}$
# + $\rm{i1}_{\rm in/OB}=\frac{\theta_{\rm in/OB} - \theta_{\rm min}}{\Delta \theta} - \frac{1}{2}$
# + $\rm{i2}_{\rm in/OB}=\frac{\phi_{\rm in/OB} - \phi_{\rm min}}{\Delta \phi} - \frac{1}{2}$
#
# 1. When updating a ghost zone point `(i0,i1,i2)` in the domain exterior, if the corresponding `(i0,i1,i2)`$_{\rm in/OB}$ was set to $(-1,-1,-1)$, then we apply outer boundary conditions. Otherwise, we simply copy the data from the interior point at `(i0,i1,i2)`$_{\rm in/OB}$ to `(i0,i1,i2)`.
#
# **Exercise to student: Given the prescripion above, why do the integers $\left(\rm{i0},\rm{i1},\rm{i2}\right)$ need to be even?**
#
# Next we generate the C code needed for applying boundary conditions in generic coordinate systems (and, in the case of `xxminmax.h`, to set up the numerical grid coordinates as well):
# 1. $(x_{0},x_{1},x_{2})_{\rm min}$, $(x_{0},x_{1},x_{2})_{\rm max}$, (`ScalarWaveCurvilinear/xxminmax.h`)
# 1. $\left(x(x_0,x_1,x_2),y(x_0,x_1,x_2),z(x_0,x_1,x_2)\right)$, (`ScalarWaveCurvilinear/xxCart.h`)
# 1. $\left(x_0(x,y,z),x_1(x,y,z),x_2(x,y,z)\right)$, (`ScalarWaveCurvilinear/Cart_to_xx.h`):
# Generic coordinate NRPy+ file output, Part 1: output the coordinate bounds xxmin[] and xxmax[]:
with open("ScalarWaveCurvilinear/xxminmax.h", "w") as file:
file.write("const REAL xxmin[3] = {"+str(rfm.xxmin[0])+","+str(rfm.xxmin[1])+","+str(rfm.xxmin[2])+"};\n")
file.write("const REAL xxmax[3] = {"+str(rfm.xxmax[0])+","+str(rfm.xxmax[1])+","+str(rfm.xxmax[2])+"};\n")
# Generic coordinate NRPy+ file output, Part 2: output the conversion from (x0,x1,x2) to Cartesian (x,y,z)
outputC([rfm.xxCart[0],rfm.xxCart[1],rfm.xxCart[2]],["xCart[0]","xCart[1]","xCart[2]"],
"ScalarWaveCurvilinear/xxCart.h")
# Generic coordinate NRPy+ file output, Part 3: output the conversion from Cartesian (x,y,z) to interior/OB (x0,x1,x2)
# First make sure that rfm.Cart_to_xx has been set. Error out if not!
if rfm.Cart_to_xx[0] == 0 or rfm.Cart_to_xx[1] == 0 or rfm.Cart_to_xx[2] == 0:
print("ERROR: rfm.Cart_to_xx[], which maps Cartesian -> xx, has not been set for")
print(" reference_metric::CoordSystem = "+par.parval_from_str("reference_metric::CoordSystem"))
print(" Boundary conditions in curvilinear coordinates REQUiRE this be set.")
sys.exit(1)
outputC([rfm.Cart_to_xx[0],rfm.Cart_to_xx[1],rfm.Cart_to_xx[2]],
["Cart_to_xx0_inbounds","Cart_to_xx1_inbounds","Cart_to_xx2_inbounds"],
"ScalarWaveCurvilinear/Cart_to_xx.h")
# <a id='writec'></a>
#
# # Step 2: Writing the necessary C code infrastructure to solve the scalar wave equation in curvilinear, singular coordinates \[Back to [top](#toc)\]
# $$\label{writec}$$
#
# <a id='bcs'></a>
#
# ## Step 2.a: Write needed C code for implementing boundary condition algorithm above \[Back to [top](#toc)\]
# $$\label{bcs}$$
# +
# %%writefile ScalarWaveCurvilinear/curvilinear_boundary_conditions.h
typedef struct ghostzone_map {
short i0,i1,i2;
} gz_map;
void set_up_bc_gz_map(const int Nxx_plus_2NGHOSTS[3], REAL *xx[3], const REAL dxx[3], const REAL xxmin[3], const REAL xxmax[3], gz_map *bc_gz_map) {
LOOP_REGION(0,Nxx_plus_2NGHOSTS[0],0,Nxx_plus_2NGHOSTS[1],0,Nxx_plus_2NGHOSTS[2]) {
REAL xCart[3];
xxCart(xx, i0,i1,i2, xCart);
REAL Cartx = xCart[0];
REAL Carty = xCart[1];
REAL Cartz = xCart[2];
REAL Cart_to_xx0_inbounds,Cart_to_xx1_inbounds,Cart_to_xx2_inbounds;
#include "Cart_to_xx.h"
int i0_inbounds = (int)( (Cart_to_xx0_inbounds - xxmin[0] - (1.0/2.0)*dxx[0] + ((REAL)NGHOSTS)*dxx[0])/dxx[0] + 0.5 );
int i1_inbounds = (int)( (Cart_to_xx1_inbounds - xxmin[1] - (1.0/2.0)*dxx[1] + ((REAL)NGHOSTS)*dxx[1])/dxx[1] + 0.5 );
int i2_inbounds = (int)( (Cart_to_xx2_inbounds - xxmin[2] - (1.0/2.0)*dxx[2] + ((REAL)NGHOSTS)*dxx[2])/dxx[2] + 0.5 );
REAL xCart_orig[3]; for(int ii=0;ii<3;ii++) xCart_orig[ii] = xCart[ii];
xxCart(xx, i0_inbounds,i1_inbounds,i2_inbounds, xCart);
//fprintf(stderr,"Cartesian agreement: ( %.15e %.15e %.15e ) ?= ( %.15e %.15e %.15e )\n",
// (double)xCart_orig[0],(double)xCart_orig[1],(double)xCart_orig[2],
// (double)xCart[0],(double)xCart[1],(double)xCart[2]);
#define EPS_ABS 1e-8
if(fabs( (double)(xCart_orig[0] - xCart[0]) ) > EPS_ABS ||
fabs( (double)(xCart_orig[1] - xCart[1]) ) > EPS_ABS ||
fabs( (double)(xCart_orig[2] - xCart[2]) ) > EPS_ABS) {
fprintf(stderr,"Error. Cartesian disagreement: ( %.15e %.15e %.15e ) != ( %.15e %.15e %.15e )\n",
(double)xCart_orig[0],(double)xCart_orig[1],(double)xCart_orig[2],
(double)xCart[0],(double)xCart[1],(double)xCart[2]);
exit(1);
}
if(i0_inbounds-i0 == 0 && i1_inbounds-i1 == 0 && i2_inbounds-i2 == 0) {
bc_gz_map[IDX3(i0,i1,i2)].i0=-1;
bc_gz_map[IDX3(i0,i1,i2)].i1=-1;
bc_gz_map[IDX3(i0,i1,i2)].i2=-1;
} else {
bc_gz_map[IDX3(i0,i1,i2)].i0=i0_inbounds;
bc_gz_map[IDX3(i0,i1,i2)].i1=i1_inbounds;
bc_gz_map[IDX3(i0,i1,i2)].i2=i2_inbounds;
}
}
}
// Part P6: Declare boundary condition OB_UPDATE macro,
// which updates a single face of the 3D grid cube
// using quadratic polynomial extrapolation.
const int MAXFACE = -1;
const int NUL = +0;
const int MINFACE = +1;
#define OB_UPDATE_OUTER(which_gf, bc_gz_map, i0min,i0max, i1min,i1max, i2min,i2max, FACEX0,FACEX1,FACEX2) \
LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) { \
const int idx3 = IDX3(i0,i1,i2); \
if(bc_gz_map[idx3].i0 == -1) { \
gfs[IDX4(which_gf,i0,i1,i2)] = \
+3.0*gfs[IDX4(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \
-3.0*gfs[IDX4(which_gf,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)] \
+1.0*gfs[IDX4(which_gf,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \
} \
}
#define OB_UPDATE_INNER(which_gf, bc_gz_map, i0min,i0max, i1min,i1max, i2min,i2max, FACEX0,FACEX1,FACEX2) \
LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) { \
const int idx3 = IDX3(i0,i1,i2); \
if(bc_gz_map[idx3].i0 != -1) { \
gfs[IDX4(which_gf,i0,i1,i2)] = \
gfs[IDX4(which_gf, \
bc_gz_map[idx3].i0, \
bc_gz_map[idx3].i1, \
bc_gz_map[idx3].i2)]; \
} \
}
// Part P7: Boundary condition driver routine: Apply BCs to all six
// boundary faces of the cube, filling in the innermost
// ghost zone first, and moving outward.
void apply_bcs(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],gz_map *bc_gz_map,REAL *gfs) {
#pragma omp parallel for
for(int which_gf=0;which_gf<NUM_EVOL_GFS;which_gf++) {
int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS };
int imax[3] = { Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]-NGHOSTS, Nxx_plus_2NGHOSTS[2]-NGHOSTS };
for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) {
// First apply OUTER boundary conditions,
// in case an INNER (parity) boundary point
// needs data at the outer boundary:
// After updating each face, adjust imin[] and imax[]
// to reflect the newly-updated face extents.
OB_UPDATE_OUTER(which_gf, bc_gz_map, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--;
OB_UPDATE_OUTER(which_gf, bc_gz_map, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++;
OB_UPDATE_OUTER(which_gf, bc_gz_map, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--;
OB_UPDATE_OUTER(which_gf, bc_gz_map, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++;
OB_UPDATE_OUTER(which_gf, bc_gz_map, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE); imin[2]--;
OB_UPDATE_OUTER(which_gf, bc_gz_map, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE); imax[2]++;
// Then apply INNER (parity) boundary conditions:
for(int ii=0;ii<3;ii++) {imin[ii]++; imax[ii]--;}
// After updating each face, adjust imin[] and imax[]
// to reflect the newly-updated face extents.
OB_UPDATE_INNER(which_gf, bc_gz_map, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--;
OB_UPDATE_INNER(which_gf, bc_gz_map, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++;
OB_UPDATE_INNER(which_gf, bc_gz_map, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--;
OB_UPDATE_INNER(which_gf, bc_gz_map, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++;
OB_UPDATE_INNER(which_gf, bc_gz_map, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE); imin[2]--;
OB_UPDATE_INNER(which_gf, bc_gz_map, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE); imax[2]++;
}
}
}
# -
# <a id='cfl'></a>
#
# ## Step 2.b: Output needed C code for finding the minimum proper distance between grid points, needed for [CFL](https://en.wikipedia.org/w/index.php?title=Courant%E2%80%93Friedrichs%E2%80%93Lewy_condition&oldid=806430673)-limited timestep \[Back to [top](#toc)\]
# $$\label{cfl}$$
#
# In order for our explicit-timestepping numerical solution to the scalar wave equation to be stable, it must satisfy the [CFL](https://en.wikipedia.org/w/index.php?title=Courant%E2%80%93Friedrichs%E2%80%93Lewy_condition&oldid=806430673) condition:
# $$
# \Delta t \le \frac{\min(ds_i)}{c},
# $$
# where $c$ is the wavespeed, and
# $$ds_i = h_i \Delta x^i$$
# is the proper distance between neighboring gridpoints in the $i$th direction (in 3D, there are 3 directions), $h_i$ is the $i$th reference metric scale factor, and $\Delta x^i$ is the uniform grid spacing in the $i$th direction:
# Generic coordinate NRPy+ file output, Part 4: output the proper distance between gridpoints in given coordinate system
dxx = ixp.declarerank1("dxx",DIM=3)
ds_dirn = rfm.ds_dirn(dxx)
outputC([ds_dirn[0],ds_dirn[1],ds_dirn[2]],["ds_dirn0","ds_dirn1","ds_dirn2"],"ScalarWaveCurvilinear/ds_dirn.h")
# <a id='mainc'></a>
#
# # Step 3: `ScalarWaveCurvilinear_Playground.c`: The Main C Code \[Back to [top](#toc)\]
# $$\label{mainc}$$
#
# Just as in [the start-to-finish, solving the scalar wave equation in Cartesian coordinates module](Tutorial-Start_to_Finish-ScalarWave.ipynb), we will implement the scalar wave equation via the Method of Lines. As discussed above, the critical differences between this code and the Cartesian version are as follows:
# 1. The CFL-constrained timestep depends on the proper distance between neighboring gridpoints
# 1. The boundary conditions must account for the fact that ghost zone points lying in the domain exterior can map either to the interior of the domain, or lie on the outer boundary. In the former case, we simply copy the data from the interior. In the latter case, we apply the usual outer boundary conditions.
# 1. The numerical grids must be staggered to avoid direct evaluation of the equations on coordinate singularities.
# Part P0: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER
with open("ScalarWaveCurvilinear/ScalarWaveCurvilinear_NGHOSTS.h", "w") as file:
file.write("// Part P0: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER\n")
file.write("#define NGHOSTS "+str(int(par.parval_from_str("finite_difference::FD_CENTDERIVS_ORDER")/2))+"\n")
# +
# %%writefile ScalarWaveCurvilinear/ScalarWaveCurvilinear_Playground.c
// Step P0: Set NGHOSTS, the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER
#include "ScalarWaveCurvilinear_NGHOSTS.h"
// Step P1: Import needed header files
#include "stdio.h"
#include "stdlib.h"
#include "math.h"
#include "stdint.h" // Needed for Windows GCC 6.x compatibility
#ifndef M_PI
#define M_PI 3.141592653589793238462643383279502884L
#endif
#ifndef M_SQRT1_2
#define M_SQRT1_2 0.707106781186547524400844362104849039L
#endif
// Step P2: Add needed #define's to set data type, the IDX4() macro, and the gridfunctions
// Step P2a: set REAL=double, so that all floating point numbers are stored to at least ~16 significant digits.
#define REAL double
// Step P3: Set free parameters
// Step P3a: Free parameters for the numerical grid
// Spherical coordinates parameter
// Set RMAX, the scale of the numerical domain,
// and the max radius in Spherical coordinates.
#define RMAX 10.0
// SinhSpherical coordinates parameters
const REAL AMPL = RMAX;
const REAL SINHW = 0.4;
// Cylindrical coordinates parameters
const REAL ZMIN = -RMAX;
const REAL ZMAX = RMAX;
const REAL RHOMAX = RMAX;
const REAL AMPLRHO = RMAX;
const REAL SINHWRHO= 0.4;
const REAL AMPLZ = RMAX;
const REAL SINHWZ= 0.4;
// Cartesian coordinates parameters
const REAL xmin = -RMAX, xmax = RMAX;
const REAL ymin = -RMAX, ymax = RMAX;
const REAL zmin = -RMAX, zmax = RMAX;
// SymTP coordinates parameters
const REAL bScale = 1.0;
const REAL AMAX = RMAX;
// Time coordinate parameters
const REAL t_final = 0.7*RMAX; /* Final time is set so that at t=t_final,
* data at the origin have not been corrupted
* by the approximate outer boundary condition */
const REAL CFL_FACTOR = 1.0; // Set the CFL Factor
// Step P3b: Free parameters for the scalar wave evolution
const REAL wavespeed = 1.0;
const REAL kk0 = 1.0;
const REAL kk1 = 1.0;
const REAL kk2 = 1.0;
// Step P4: Declare the IDX4(gf,i,j,k) macro, which enables us to store 4-dimensions of
// data in a 1D array. In this case, consecutive values of "i"
// (all other indices held to a fixed value) are consecutive in memory, where
// consecutive values of "j" (fixing all other indices) are separated by
// Nxx_plus_2NGHOSTS[0] elements in memory. Similarly, consecutive values of
// "k" are separated by Nxx_plus_2NGHOSTS[0]*Nxx_plus_2NGHOSTS[1] in memory, etc.
#define IDX4(g,i,j,k) \
( (i) + Nxx_plus_2NGHOSTS[0] * ( (j) + Nxx_plus_2NGHOSTS[1] * ( (k) + Nxx_plus_2NGHOSTS[2] * (g) ) ) )
#define IDX3(i,j,k) ( (i) + Nxx_plus_2NGHOSTS[0] * ( (j) + Nxx_plus_2NGHOSTS[1] * (k) ) )
#define LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) \
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++)
// Part P5: Set UUGF and VVGF macros, as well as LOOP_REGION and xxCart()
#define NUM_EVOL_GFS 2
#define UUGF 0
#define VVGF 1
void xxCart(REAL *xx[3],const int i0,const int i1,const int i2, REAL xCart[3]) {
REAL xx0 = xx[0][i0];
REAL xx1 = xx[1][i1];
REAL xx2 = xx[2][i2];
#include "xxCart.h"
}
// Step P6: Include basic functions needed to impose curvilinear
// parity and boundary conditions.
// Contains generalized boundary condition driver for curvilinear coordinate systems:
#include "curvilinear_boundary_conditions.h"
// Step P6: Find the CFL-constrained timestep
REAL find_timestep(const int Nxx_plus_2NGHOSTS[3],const REAL dxx[3],REAL *xx[3], const REAL CFL_FACTOR) {
const REAL dxx0 = dxx[0], dxx1 = dxx[1], dxx2 = dxx[2];
REAL dsmin = 1e38; // Start with a crazy high value... close to the largest number in single precision.
LOOP_REGION(NGHOSTS,Nxx_plus_2NGHOSTS[0]-NGHOSTS,
NGHOSTS,Nxx_plus_2NGHOSTS[1]-NGHOSTS,
NGHOSTS,Nxx_plus_2NGHOSTS[2]-NGHOSTS) {
const REAL xx0 = xx[0][i0], xx1 = xx[1][i1], xx2 = xx[2][i2];
REAL ds_dirn0, ds_dirn1, ds_dirn2;
#include "ds_dirn.h"
#define MIN(A, B) ( ((A) < (B)) ? (A) : (B) )
// Set dsmin = MIN(dsmin, ds_dirn0, ds_dirn1, ds_dirn2);
dsmin = MIN(dsmin,MIN(ds_dirn0,MIN(ds_dirn1,ds_dirn2)));
}
return dsmin*CFL_FACTOR/wavespeed;
}
// Part P7: Declare the function for the exact solution. time==0 corresponds to the initial data.
void exact_solution(const int Nxx_plus_2NGHOSTS[3],REAL time,REAL *xx[3], REAL *in_gfs) {
#pragma omp parallel for
LOOP_REGION(0,Nxx_plus_2NGHOSTS[0], 0,Nxx_plus_2NGHOSTS[1], 0,Nxx_plus_2NGHOSTS[2]) {
REAL xCart[3];
xxCart(xx, i0,i1,i2, xCart);
REAL xx0 = xCart[0];
REAL xx1 = xCart[1];
REAL xx2 = xCart[2];
#include "ScalarWaveCartesian_ExactSolution.h"
}
}
// Part P6: Declare the function to evaluate the scalar wave RHSs
void rhs_eval(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],const REAL dxx[3], REAL *xx[3], const REAL *in_gfs,REAL *rhs_gfs) {
#include "ScalarWaveCurvilinear_RHSs.h"
}
// main() function:
// Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates
// Step 1: Set up scalar wave initial data
// Step 2: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm,
// applying quadratic extrapolation outer boundary conditions.
// Step 3: Output relative error between numerical and exact solution.
// Step 4: Free all allocated memory
int main(int argc, const char *argv[]) {
// Step 0a: Read command-line input, error out if nonconformant
if(argc != 4 || atoi(argv[1]) < NGHOSTS || atoi(argv[2]) < NGHOSTS || atoi(argv[3]) < NGHOSTS) {
printf("Error: Expected one command-line argument: ./ScalarWaveCurvilinear_Playground Nx0 Nx1 Nx2,\n");
printf("where Nx[0,1,2] is the number of grid points in the 0, 1, and 2 directions.\n");
printf("Nx[] MUST BE larger than NGHOSTS (= %d)\n",NGHOSTS);
exit(1);
}
// Step 0b: Set up numerical grid structure, first in space...
const int Nxx[3] = { atoi(argv[1]), atoi(argv[2]), atoi(argv[3]) };
if(Nxx[0]%2 != 0 || Nxx[1]%2 != 0 || Nxx[2]%2 != 0) {
printf("Error: Cannot guarantee a proper cell-centered grid if number of grid cells not set to even number.\n");
printf(" For example, in case of angular directions, proper symmetry zones will not exist.\n");
exit(1);
}
const int Nxx_plus_2NGHOSTS[3] = { Nxx[0]+2*NGHOSTS, Nxx[1]+2*NGHOSTS, Nxx[2]+2*NGHOSTS };
const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS[0]*Nxx_plus_2NGHOSTS[1]*Nxx_plus_2NGHOSTS[2];
#include "xxminmax.h"
// Step 0c: Allocate memory for gridfunctions
REAL *evol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot);
REAL *next_in_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot);
REAL *k1_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot);
REAL *k2_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot);
REAL *k3_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot);
REAL *k4_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot);
// Step 0d: Set up space and time coordinates
// Step 0d.i: Set \Delta x^i on uniform grids.
REAL dxx[3];
for(int i=0;i<3;i++) dxx[i] = (xxmax[i] - xxmin[i]) / ((REAL)Nxx[i]);
// Step 0d.ii: Set up uniform coordinate grids
REAL *xx[3];
for(int i=0;i<3;i++) {
xx[i] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS[i]);
for(int j=0;j<Nxx_plus_2NGHOSTS[i];j++) {
xx[i][j] = xxmin[i] + ((REAL)(j-NGHOSTS) + (1.0/2.0))*dxx[i]; // Cell-centered grid.
}
}
// Step 0d.iii: Set timestep based on smallest proper distance between gridpoints and CFL factor
REAL dt = find_timestep(Nxx_plus_2NGHOSTS, dxx,xx, CFL_FACTOR);
//printf("# Timestep set to = %e\n",(double)dt);
int N_final = (int)(t_final / dt + 0.5); // The number of points in time.
// Add 0.5 to account for C rounding down
// typecasts to integers.
// Step 0f: Find ghostzone mappings:
//const int num_gz_tot = Nxx_plus_2NGHOSTS_tot - Nxx[0]*Nxx[1]*Nxx[2];
gz_map *bc_gz_map = (gz_map *)malloc(sizeof(gz_map)*Nxx_plus_2NGHOSTS_tot);
set_up_bc_gz_map(Nxx_plus_2NGHOSTS, xx, dxx, xxmin, xxmax, bc_gz_map);
// Step 1: Set up initial data to be exact solution at time=0:
exact_solution(Nxx_plus_2NGHOSTS, 0.0, xx, evol_gfs);
for(int n=0;n<=N_final;n++) { // Main loop to progress forward in time.
/* Step 2: Output relative error between numerical and exact solution, */
const int i0MIN=NGHOSTS; // In spherical, r=Delta r/2.
const int i1mid=Nxx_plus_2NGHOSTS[1]/2;
const int i2mid=Nxx_plus_2NGHOSTS[2]/2;
exact_solution(Nxx_plus_2NGHOSTS,((REAL)n)*dt, xx, k1_gfs);
const double exact = (double)k1_gfs[IDX4(0,i0MIN,i1mid,i2mid)];
const double numerical = (double)evol_gfs[IDX4(0,i0MIN,i1mid,i2mid)];
const double relative_error = fabs((exact-numerical)/exact);
// Compute log of L2 norm of difference between numerical and exact solutions:
// log_L2_Norm = log10( sqrt[Integral( [numerical - exact]^2 * dV)] )
REAL integral = 0.0;
REAL numpts = 0.0;
for(int i2=0;i2<Nxx_plus_2NGHOSTS[2];i2++)
for(int i1=0;i1<Nxx_plus_2NGHOSTS[1];i1++)
for(int i0=0;i0<Nxx_plus_2NGHOSTS[0];i0++) {
REAL xCart[3]; xxCart(xx,i0,i1,i2, xCart);
if(sqrt(xCart[0]*xCart[0] + xCart[1]*xCart[1] + xCart[2]*xCart[2]) < RMAX*0.3) {
double num = (double)evol_gfs[IDX4(UUGF,i0,i1,i2)];
double exact = (double)k1_gfs[ IDX4(UUGF,i0,i1,i2)];
integral += (num - exact)*(num - exact);
numpts += 1.0;
//printf("num = %e exact = %e\n",num,exact);
}
}
// Compute and output the log of the L2 norm.
REAL log_L2_Norm = log10(sqrt(integral/numpts));
printf("%e %e %e || %e %e %e: %e\n",((double)n)*dt,log_L2_Norm,
(double)xx[0][i0MIN],(double)xx[1][i1mid],(double)xx[2][i2mid], numerical,exact);
// Step 3: Evolve scalar wave initial data forward one timestep using Method of Lines with
// the RK4 algorithm, applying quadratic extrapolation outer boundary conditions.
/***************************************************/
/* Implement RK4 for Method of Lines timestepping: */
/***************************************************/
/* -= RK4: Step 1 of 4 =- */
/* First evaluate k1 = RHSs expression */
rhs_eval(Nxx,Nxx_plus_2NGHOSTS,dxx, xx,evol_gfs, k1_gfs);
/* Next k1 -> k1*dt, and then set the input for */
/* the next RHS eval call to y_n+k1/2 */
#pragma omp parallel for
for(int i=0;i<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;i++) {
k1_gfs[i] *= dt;
next_in_gfs[i] = evol_gfs[i] + k1_gfs[i]*0.5;
}
/* Finally, apply boundary conditions to */
/* next_in_gfs, so its data are set everywhere. */
apply_bcs(Nxx,Nxx_plus_2NGHOSTS,bc_gz_map,next_in_gfs);
/* -= RK4: Step 2 of 4 =- */
rhs_eval(Nxx,Nxx_plus_2NGHOSTS,dxx, xx,next_in_gfs, k2_gfs);
#pragma omp parallel for
for(int i=0;i<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;i++) {
k2_gfs[i] *= dt;
next_in_gfs[i] = evol_gfs[i] + k2_gfs[i]*0.5;
}
apply_bcs(Nxx,Nxx_plus_2NGHOSTS,bc_gz_map,next_in_gfs);
/* -= RK4: Step 3 of 4 =- */
rhs_eval(Nxx,Nxx_plus_2NGHOSTS,dxx, xx,next_in_gfs, k3_gfs);
#pragma omp parallel for
for(int i=0;i<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;i++) {
k3_gfs[i] *= dt;
next_in_gfs[i] = evol_gfs[i] + k3_gfs[i];
}
apply_bcs(Nxx,Nxx_plus_2NGHOSTS,bc_gz_map,next_in_gfs);
/* -= RK4: Step 4 of 4 =- */
rhs_eval(Nxx,Nxx_plus_2NGHOSTS,dxx, xx,next_in_gfs, k4_gfs);
#pragma omp parallel for
for(int i=0;i<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;i++) {
k4_gfs[i] *= dt;
evol_gfs[i] += (1.0/6.0)*(k1_gfs[i] + 2.0*k2_gfs[i] + 2.0*k3_gfs[i] + k4_gfs[i]);
}
apply_bcs(Nxx,Nxx_plus_2NGHOSTS,bc_gz_map,evol_gfs);
} // End main loop to progress forward in time.
// Step 4: Free all allocated memory
free(bc_gz_map);
free(k4_gfs);
free(k3_gfs);
free(k2_gfs);
free(k1_gfs);
free(next_in_gfs);
free(evol_gfs);
for(int i=0;i<3;i++) free(xx[i]);
return 0;
}
# +
import cmdline_helper as cmd
import os
outdir = os.path.join("ScalarWaveCurvilinear/")
cmd.mkdir(outdir)
cmd.delete_existing_files(os.path.join(outdir,"*"))
cmd.C_compile(os.path.join(outdir,"ScalarWaveCurvilinear_Playground.c"), "ScalarWaveCurvilinear_Playground")
# #!icc -align -qopenmp -xHost -O2 -qopt-report=5 -qopt-report-phase ipo -qopt-report-phase vec -vec-threshold1 -qopt-prefetch=4 ScalarWaveCurvilinear/ScalarWaveCurvilinear_Playground.c -o ScalarWaveCurvilinear_Playground
cmd.delete_existing_files("out-*resolution.txt")
if par.parval_from_str("reference_metric::CoordSystem") == "Cartesian":
cmd.Execute("ScalarWaveCurvilinear_Playground", "16 16 16", os.path.join(outdir,"out-lowresolution.txt"))
cmd.Execute("ScalarWaveCurvilinear_Playground", "24 24 24", os.path.join(outdir,"out-medresolution.txt"))
else:
cmd.Execute("ScalarWaveCurvilinear_Playground", "16 8 16", os.path.join(outdir,"out-lowresolution.txt"))
cmd.Execute("ScalarWaveCurvilinear_Playground", "24 12 24", os.path.join(outdir,"out-medresolution.txt"))
# For benchmarking purposes only. 97.91s icc desktop
# cmd.Execute("ScalarWaveCurvilinear_Playground", "48 24 48", "out-hghresolution.txt")
# -
# <a id='convergence'></a>
#
# # Step 4: Code Validation: Verify that relative error in numerical solution converges to zero at the expected order \[Back to [top](#toc)\]
# $$\label{convergence}$$
# The numerical solution $u_{\rm num}(x0,x1,x2,t)$ should converge to the exact solution $u_{\rm exact}(x0,x1,x2,t)$ at fourth order, which means that
# $$
# u_{\rm num}(x0,x1,x2,t) = u_{\rm exact}(x0,x1,x2,t) + \mathcal{O}\left((\Delta x0)^4\right)+ \mathcal{O}\left((\Delta x1)^4\right)+ \mathcal{O}\left((\Delta x2)^4\right)+ \mathcal{O}\left((\Delta t)^4\right).
# $$
#
# Thus the relative error $E_{\rm rel}$ should satisfy:
# $$
# \left|\frac{u_{\rm num}(x0,x1,x2,t) - u_{\rm exact}(x0,x1,x2,t)}{u_{\rm exact}(x0,x1,x2,t)}\right| + \mathcal{O}\left((\Delta x0)^4\right)+ \mathcal{O}\left((\Delta x1)^4\right)+ \mathcal{O}\left((\Delta x2)^4\right)+ \mathcal{O}\left((\Delta t)^4\right).
# $$
#
# We confirm this convergence behavior by first solving the scalar wave equation at two resolutions: $16\times 8\times 16$ (or $16^3$ if `reference_metric::CoordSystem` is set to `Cartesian`), and $24\times 12\times 24$ (or $24^3$ if `reference_metric::CoordSystem` is set to `Cartesian`) and evaluating the maximum logaritmic relative error $\log_{10} E_{\rm rel,max}$ between numerical and exact solutions within a region $R < 0.1 {\rm RMAX}$ at all iterations.
#
# Since we increase the resolution uniformly over all four coordinates $(x0,x1,x2,t)$, $E_{\rm rel}$ should drop uniformly as $(\Delta x0)^4$:
# $$
# E_{\rm rel} \propto (\Delta x0)^4.
# $$
#
# So at the two resolutions, we should find that
# $$
# \frac{E_{\rm rel}(16\times 8\times 16)}{E_{\rm rel}(24\times 12\times 24)} = \frac{E_{\rm rel}(16^3)}{E_{\rm rel}(24^3)} \approx \left(\frac{(\Delta x0)_{16}}{(\Delta x0)_{24}}\right)^{4} = \left(\frac{24}{16}\right)^4 \approx 5.
# $$
#
# Since we're measuring logarithmic relative error, this should be
# $$
# \log_{10}\left(\frac{E_{\rm rel}(16\times 8\times 16)}{E_{\rm rel}(24\times 12\times 24)}\right) = \log_{10}\left(\frac{E_{\rm rel}(16^3)}{E_{\rm rel}(24^3)}\right) \approx \log_{10}(5).
# $$
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import mpmath as mp
import csv
def file_reader(filename):
with open(filename) as file:
reader = csv.reader(file, delimiter=" ")
data = list(zip(*reader))
# data is a tuple of strings. Tuples are immutable, and we need to perform math on
# the data, so here we convert tuple to lists of floats:
data0 = []
data1 = []
for i in range(len(data[0])):
data0.append(float(data[0][i]))
data1.append(float(data[1][i]))
return data0,data1
first_col16,second_col16 = file_reader('ScalarWaveCurvilinear\out-lowresolution.txt')
first_col24,second_col24 = file_reader('ScalarWaveCurvilinear\out-medresolution.txt')
second_col16_rescaled4o = []
second_col16_rescaled5o = []
for i in range(len(second_col16)):
# data16 = data24*(16/24)**4
# -> log10(data24) = log10(data24) + 4*log10(16/24)
second_col16_rescaled4o.append(second_col16[i] + 4*mp.log10(16./24.))
second_col16_rescaled5o.append(second_col16[i] + 5*mp.log10(16./24.))
# https://matplotlib.org/gallery/text_labels_and_annotations/legend.html#sphx-glr-gallery-text-labels-and-annotations-legend-py
fig, ax = plt.subplots()
plt.title("Demonstrating 4th-order Convergence: "+par.parval_from_str("reference_metric::CoordSystem")+" Coordinates")
plt.xlabel("time")
plt.ylabel("log10(Max relative error)")
ax.plot(first_col24, second_col24, 'k-', label='logErel(N0=24)')
ax.plot(first_col16, second_col16_rescaled4o, 'k--', label='logErel(N0=16) + log((16/24)^4)')
if par.parval_from_str("reference_metric::CoordSystem") == "Cylindrical":
ax.plot(first_col16, second_col16_rescaled5o, 'k.', label='(Assuming 5th-order convergence)')
legend = ax.legend(loc='lower right', shadow=True, fontsize='large')
legend.get_frame().set_facecolor('C1')
plt.show()
# -
# <a id='latex_pdf_output'></a>
#
# # Step 5: Output this module to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
# $$\label{latex_pdf_output}$$
#
# The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
# [Tutorial-Start_to_Finish-ScalarWaveCurvilinear.pdf](Tutorial-Start_to_Finish-ScalarWaveCurvilinear.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
# !jupyter nbconvert --to latex --template latex_nrpy_style.tplx Tutorial-Start_to_Finish-ScalarWaveCurvilinear.ipynb
# !pdflatex -interaction=batchmode Tutorial-Start_to_Finish-ScalarWaveCurvilinear.tex
# !pdflatex -interaction=batchmode Tutorial-Start_to_Finish-ScalarWaveCurvilinear.tex
# !pdflatex -interaction=batchmode Tutorial-Start_to_Finish-ScalarWaveCurvilinear.tex
# !rm -f Tut*.out Tut*.aux Tut*.log
| Tutorial-Start_to_Finish-ScalarWaveCurvilinear.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Session 7 : Active Learning - Word2vec and Support Vector Machines
# The aim of this notebook is to explore semi-supervised learning via self training.
#
# Packages
# +
# %matplotlib inline
import os
import re
import warnings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import nltk
nltk.download('punkt')
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.wordnet import WordNetLemmatizer
import gensim
from gensim.models.word2vec import Word2Vec
from gensim.models import KeyedVectors
import sklearn
from sklearn.semi_supervised import LabelPropagation
from sklearn.semi_supervised import LabelSpreading
from sklearn.metrics import classification_report
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix
warnings.filterwarnings('ignore')
# -
# Paths
os.getcwd() # can help to obtain the good paths
# your path to word2vec google vectors
path_to_google_vectors = '/Users/laks/Desktop/Polytechnique/Cours/Datacamp Capgemini (MAP540)/Day5/'
# ## I - Read data
# Labeled data
# If your data are not in the current directory, don't forget : PATH + 'X_HEC_label.csv'
labeled_data = pd.read_csv('X_HEC_label.csv', sep = ",", index_col = False)
labeled_data.head(3)
len(labeled_data)
# Unlabeled data
unlabeled_data = pd.read_json('english_reviews.json') #, sep = ",", index_col = False)
unlabeled_data.head(3)
len(unlabeled_data)
unlabeled_data['tokens'][0]
# ## II - Short Exploratory Analysis and Preprocessing
labeled_data.columns
labeled_data[['service', 'activities', 'cost', 'family', 'food', 'infrastructure']].hist(alpha = 0.6, figsize=(10, 7))
labeled_data.groupby('family').count()
labeled_data.groupby('infrastructure').count()
labeled_data.groupby('cost').count()
# To simplify the following procedure, we are going to remove the bad-annotated reviews with -1 and 9 for columns 'family', 'infrastructure' and 'cost'.
labeled_data = labeled_data[labeled_data.infrastructure != -1]
labeled_data = labeled_data[labeled_data.cost != -1]
labeled_data = labeled_data[labeled_data.family != 9]
labeled_data = labeled_data.reset_index(drop = True)
len(labeled_data)
# ## III - Active Learning and Self-Training
# Let's explain the strategy :
# 1. We split labeled data in two groups : a training labeled dataset and a test labeled dataset.
# 2. We use the training labeled dataset to train a SVM classifier (or other classifiers).
# 3. We test the trained model on our test labeled dataset and analyze performances.
# 4. If performances are good enough, we can stop the process. Otherwise, we craftily label unlabeled samples and we iterate this strategy from 2 until good enough performances.
#
# An important question : What does "we craftily label unlabeld samples" mean ?
#
# Case 1 : If a label is predict with very high probability (or equivalently with very low probability in a binary classification case), we look carefully to confirm the label. Then, we add the labeled sample in the training labeled dataset for the next iterations.
#
# Case 2 : If a label is predict with medium probability (around 0.5), we consider it as a local model weakness because of its indecision. Thus, we label the sample and integrate the new labeled sample in the training labeled dataset for the next iterations.
#
# NB : At each iteration, we randomly choose some samples belonging to cases 1 and 2 for manual labelling. A limit/difficulty concerns the "random choice" of samples to label, because we have not prior knowledge about true labels distribution in the unlabeled dataset.
#
# NB : You have to replace sentiments -1 by another value, 2 for instance, because we will use the value -1 to mean "unlabeled sample" in the label propagation process.
#
# Let's try with column "cost" binary classification task ! It will be the same procedure for all other labels.
#
# ### 1). Text preprocessing
english_stopwords = ["a", "about", "above", "above", "across", "after", "afterwards",
"again", "against", "all", "almost", "alone", "along", "already",
"also","although","always","am","among", "amongst", "amoungst",
"amount", "an", "and", "another", "any","anyhow","anyone",
"anything","anyway", "anywhere", "are", "around", "as", "at",
"back","be","became", "because","become","becomes", "becoming",
"been", "before", "beforehand", "behind", "being", "below",
"beside", "besides", "between", "beyond", "bill", "both",
"bottom","but", "by", "call", "can", "cannot", "cant", "co",
"con", "could", "couldnt", "cry", "de", "describe", "detail",
"do", "done", "down", "due", "during", "each", "eg", "eight",
"either", "eleven","else", "elsewhere", "empty", "enough", "etc",
"even", "ever", "every", "everyone", "everything", "everywhere",
"except", "few", "fifteen", "fify", "fill", "find", "fire", "first",
"five", "for", "former", "formerly", "forty", "found", "four", "from",
"front", "full", "further", "get", "give", "good", "great", "woburn", "go",
"had", "has", "hasnt", "have", "he", "hence", "her", "here", "hereafter",
"hereby", "herein", "hereupon", "hers", "herself", "him", "himself", "his",
"how", "however", "hundred", "ie", "if", "in", "inc", "indeed", "interest",
"into", "is", "it", "its", "itself", "keep", "last", "latter", "latterly",
"least", "less", "ltd", "made", "many", "may", "me", "meanwhile", "might",
"mill", "mine", "more", "moreover", "most", "mostly", "move", "much", "must",
"my", "myself", "name", "namely", "neither", "never", "nevertheless", "next",
"nine", "no", "nobody", "none", "noone", "nor", "not", "nothing", "now", "nowhere",
"of", "off", "often", "on", "once", "one", "only", "onto", "or", "other", "others",
"otherwise", "our", "ours", "ourselves", "out", "over", "own","part", "per", "perhaps",
"please", "put", "rather", "re", "same", "see", "seem", "seemed", "seeming", "seems",
"serious", "several", "she", "should", "show", "side", "since", "sincere", "six", "sixty",
"so", "some", "somehow", "someone", "something", "sometime", "sometimes", "somewhere",
"still", "such", "system", "take", "ten", "than", "that", "the", "their", "them",
"themselves", "then", "thence", "there", "thereafter", "thereby", "therefore", "therein",
"thereupon", "these", "they", "thickv", "thin", "third", "this", "those", "though", "three",
"through", "throughout", "thru", "thus", "to", "together", "too", "top", "toward", "towards",
"twelve", "twenty", "two", "un", "under", "until", "up", "upon", "us", "very", "via", "was",
"we", "well", "were", "what", "whatever", "when", "whence", "whenever", "where", "whereafter",
"whereas", "whereby", "wherein", "whereupon", "wherever", "whether", "which", "while", "whither",
"who", "whoever", "whole", "whom", "whose", "why", "will", "with", "within", "without", "would",
"yet", "you", "your", "yours", "yourself", "yourselves", "the"]
def character_replacement(input_string):
character_mapping = {"\\u00e9": "é",
"\\u2019": "'",
"\\": "",
"\\u00fb": "û",
"u00e8": "è",
"u00e0": "à",
"u00f4": "ô",
"u00ea": "ê",
"u00ee": "i",
"u00fb": "û",
"u2018": "'",
"u00e2": "a",
"u00ab": "'",
"u00bb": "'",
"u00e7": "ç",
"u00e2": "â",
"u00f9": "ù",
"u00a3": "£",
}
for character in character_mapping:
input_string = input_string.replace(character, character_mapping[character])
input_string = input_string.lower()
characters_to_remove = ["@", "/", "#", ".", ",", "!", "?", "(", ")", "-", "_", "’", "'", "\"", ":", "1", "2", "3", "4", "5", "6", "7", "8", "9", "0"]
transformation_dict = {initial: " " for initial in characters_to_remove}
no_punctuation_reviews = input_string.translate(str.maketrans(transformation_dict))
return no_punctuation_reviews
# +
def tokenize(input_string):
return word_tokenize(input_string)
def remove_stop_words(input_tokens, english_stopwords = english_stopwords):
return [token for token in input_tokens if token not in english_stopwords]
lemmatizer = WordNetLemmatizer()
def lemmatize(tokens, lemmatizer = lemmatizer):
tokens = [lemmatizer.lemmatize(lemmatizer.lemmatize(lemmatizer.lemmatize(token,pos='a'),pos='v'),pos='n') for token in tokens]
return tokens
# -
labeled_data['review'] = labeled_data['review'].apply(lambda x: character_replacement(x))
labeled_data['tokens'] = labeled_data['review'].apply(lambda x: tokenize(x))
labeled_data['tokens'] = labeled_data['tokens'].apply(lambda token_list: [meaningful_word for meaningful_word in token_list if len(meaningful_word) > 3])
labeled_data['tokens'] = labeled_data['tokens'].apply(lambda x: remove_stop_words(x))
labeled_data.shape
# ### 2). Building datasets
# +
training_set = {'tokens' : list(labeled_data['tokens'])[:2000],
'labels' : list(labeled_data['cost'])[:2000]}
training_set = pd.DataFrame(training_set)
test_set = {'tokens' : list(labeled_data['tokens'])[2000:],
'labels' : list(labeled_data['cost'])[2000:]}
test_set = pd.DataFrame(test_set)
# -
training_set.head()
# +
semi_supervised_data = {'tokens' : list(training_set['tokens']) + list(unlabeled_data['tokens']),
'labels' : list(training_set['labels']) + [-1]*len(unlabeled_data)}
# We use -1 to encode unlabeled samples
semi_supervised_data = pd.DataFrame(semi_supervised_data)
semi_supervised_data.head()
# -
# ### 3). Naïve document embeddings with word2vec
w2v = KeyedVectors.load_word2vec_format(path_to_google_vectors + 'GoogleNews-vectors-negative300.bin', binary = True)
def my_vector_getter(word, wv = w2v) :
# returns the vector of a word
try:
word_array = wv[word].reshape(1,-1)
return word_array
except :
# if word not in google word2vec vocabulary, return vector with low norm
return np.zeros((1,300))
def document_embedding(text, wv = w2v) :
# returns naïve document embedding
embeddings = np.concatenate([my_vector_getter(token) for token in text])
centroid = np.mean(embeddings, axis=0).reshape(1,-1)
return centroid
document_embedding(semi_supervised_data['tokens'][0]).shape
# +
X = np.zeros((len(semi_supervised_data), 300))
for i in range(len(semi_supervised_data)) :
X[i] = document_embedding(semi_supervised_data['tokens'][i])
# -
#X_values = X.values
X_train = X[:2000]
Y_train = training_set['labels'].values
# ### 4). Label propagation
# Explore different hyperparameters of LabelPropagation algorithms to improve performances ! Training may be too long for the course with based-word2vec representation, so you can perform PCA :
pca = PCA(n_components = 50)
X_train_pca = pca.fit_transform(X_train)
svm = SVC()
svm.fit(X_train_pca, Y_train)
X_train_pca.shape
# Evaluate on test set
# +
X_test = np.zeros((len(test_set), 300))
for i in range(len(test_set)) :
X_test[i] = document_embedding(test_set['tokens'][i])
X_test_pca = pca.transform(X_test)
Y_test = test_set['labels'].values
# -
Y_pred = svm.predict(X_test_pca)
# target_names = [0.0, 1.0]
print(classification_report(Y_test, Y_pred)) #, target_names = target_names))
# if we consider that performances are not good enough, we pass to the following section !
# ### 5). Labelling and iterating
# Let's take N random reviews and observe predictions ! We will show you with 5 samples but do it with far more samples :
pca = PCA(n_components = 300)
X_train_pca = pca.fit_transform(X_train)
svm = SVC(probability = True)
svm.fit(X_train_pca, Y_train)
svm.fit(X_train_pca, Y_train)
svm.predict_proba(X[3000:3005][:])
semi_supervised_data.iloc[3000:3005]
# Since we are satisfied about performances, we are going to label :
semi_supervised_data.iloc[3000]['labels'] = 1
semi_supervised_data.iloc[3001]['labels'] = -1
semi_supervised_data.iloc[3002]['labels'] = 1
semi_supervised_data.iloc[3003]['labels'] = 1
semi_supervised_data.iloc[3004]['labels'] = 1
# Let's retrain with new labels :
# +
X_train = np.concatenate((X_train, X_values[3000:3005]), axis = 0)
Y_train = np.concatenate((Y_train, semi_supervised_data['labels'][3000:3005].values), axis = 0)
svm = SVC()
svm.fit(X_train_pca, Y_train)
# -
# Evaluate :
Y_pred = svm.predict(X_test_pca)
#target_names = [0.0, 1.0]
print(classification_report(Y_test, Y_pred) #, target_names = target_names))
# Observe and label :
# +
BEGIN =
WINDOW = 20
svm.predict_proba(X_values[BEGIN:BEGIN+WINDOW])
# -
semi_supervised_data.iloc[[BEGIN:BEGIN+WINDOW]]
semi_supervised_data.iloc[BEGIN]['labels'] =
semi_supervised_data.iloc[BEGIN+1]['labels'] =
semi_supervised_data.iloc[BEGIN+2]['labels'] =
semi_supervised_data.iloc[BEGIN+3]['labels'] =
semi_supervised_data.iloc[BEGIN+4]['labels'] =
# ...
semi_supervised_data.iloc[BEGIN+WINDOW]['labels'] =
# And restart procedure until good enough performances...
# +
X_train = np.concatenate((X_train, X_values[BEGIN:BEGIN+WINDOW]), axis = 0)
Y_train = np.concatenate((Y_train, semi_supervised_data['labels'][BEGIN:BEGIN+WINDOW].values), axis = 0)
svm = SVC()
svm.fit(X_train_pca, Y_train)
# -
# ...
# ### 6). Sentiment Analysis : How to compute a simple sentiment score ?
#
# #### (i) You have two classes : Positive and Negative sentiments
#
# For a review d, we want to define a continuous score(d) in [-1 ; 1] according to the following simple rules :
# - Near to 1 if very positive
# - Near to -1 if very negative
# - Near to 0 if it is as negative as positive
#
# Set $P_{positive}$ predicted probability to belong to Positive class, $P_{negative}$ predicted probability to belong to Negative class, a simple approach is :
#
# score(d) := $P_{positive}$ - $P_{negative}$ = $\frac{P_{positive} - P_{negative}}{P_{positive} + P_{negative}}$ = $-1$ $+$ $2 \times P_{positive}$ which is equivalent to score(d) := $P_{positive}$ if we would want to define a score in [0 ; 1].
#
# #### (ii) You have three classes : Positive, Negative and Neutral sentiments
#
# In a similar way to the previous approach with $P_{positive}$, $P_{negative}$ and $P_{neutral}$ for a score defined in [-1 ; 1] :
#
# score(d) := $P_{positive}$ - $P_{negative}$ = $\frac{P_{positive} - P_{negative}}{P_{positive} + P_{neutral} + P_{negative}}$ where $P_{neutral}$ plays a role of feelings attenuation.
#
# You can now perform the label propapagation for sentiment analysis with continuous score or discrete classes. Let's try !
# ### 7). Session Report
#
# We have worked with label propagation algorithm and naïve document embeddings with word2vec, you are free to use label spreading algorithm in sklearn, S3VM and other semi-supervised learning algorithm (look at https://github.com/tmadl/semisup-learn) or explore other document/word representations to determine the better suited method for our problem.
#
# Keep in mind that data sciences need empirical approaches in application !
#
# References :
# - https://scikit-learn.org/stable/modules/label_propagation.html#semi-supervised
# - https://scikit-learn.org/stable/modules/generated/sklearn.semi_supervised.LabelPropagation.html#sklearn.semi_supervised.LabelPropagation
# - https://scikit-learn.org/stable/modules/generated/sklearn.semi_supervised.LabelSpreading.html#sklearn.semi_supervised.LabelSpreading
# - http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
# - http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
| Day7/X_HEC_Session_7_Notebook_Word2vec_SVM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import random
import numpy as np
import plotly
from plotly import tools
import plotly.plotly as py
import plotly.graph_objs as go
import CandyCrashSimulationNXN_4 as f
width = height = 9
num_candy = 4
N = 10000
print("size: {} * {}".format(width,height))
# ## Horizontal Eliminate
heatmapH = np.zeros(width * height * height).reshape(height,width,height)
for row in range(height):
for i in range(N):
board = f.initialize(height,width)
board[row][3] = 1
board[row][3 + 1] = 1
board[row][3 + 2] = 1
newboard,mark,score = f.eliminate_to_static(board,height,width)
heatmapH[row] += mark
heatmapH /= N
#print(heatmap)
plotly.tools.set_credentials_file(username='jameschu', api_key='WBgAtN4Z5CrmPtpUagy7')
fig = tools.make_subplots(rows=2, cols=5)
for row in range(height):
trace = go.Heatmap(z=list(heatmapH[row]),x=['0','1','2','3','4','5','6','7','8'],y=['8','7','6','5','4','3','2','1','0'])
data = [trace]
fig.append_trace(trace, row//5+1, row%5+1)
fig['layout'].update(title='Horizontal Eliminate')
py.iplot(fig, filename='EliminateCandyLocationHorizontal')
#py.iplot(data,filename='EliminateCandyLocationRow{}'.format(1))
# ## Vertical Eliminate
heatmapV = np.zeros((height-2)*height*width).reshape((height-2),height,width)
for row in range(height-2):
for i in range(N):
board = f.initialize(height,width)
board[row][4] = 1
board[row+1][4] = 1
board[row+2][4] = 1
newboard,mark,score = f.eliminate_to_static(board,height,width)
heatmapV[row] += mark
heatmapV /= N
#print(heatmap)
plotly.tools.set_credentials_file(username='jameschu', api_key='<KEY>')
fig2 = tools.make_subplots(rows=2, cols=4)
for row in range(height-2):
trace = go.Heatmap(z=list(heatmapV[row]),x=['0','1','2','3','4','5','6','7','8'],y=['8','7','6','5','4','3','2','1','0'])
data = [trace]
fig2.append_trace(trace, row//4+1, row%4+1)
fig2['layout'].update(title='Vertical Eliminate Center')
py.iplot(fig2, filename='EliminateCandyLocationVertical')
heatmapVL = np.zeros((height-2)*height*width).reshape((height-2),height,width)
for row in range(height-2):
for i in range(N):
board = f.initialize(height,width)
board[row][0] = 1
board[row+1][0] = 1
board[row+2][0] = 1
newboard,mark,score = f.eliminate_to_static(board,height,width)
heatmapVL[row] += mark
heatmapVL /= N
#print(heatmapVL)
plotly.tools.set_credentials_file(username='jameschu', api_key='WBgAtN4Z5CrmPtpUagy7')
fig2L = tools.make_subplots(rows=2, cols=4)
for row in range(height-2):
trace = go.Heatmap(z=list(heatmapVL[row]),x=['0','1','2','3','4','5','6','7','8'],y=['8','7','6','5','4','3','2','1','0'])
data = [trace]
fig2L.append_trace(trace, row//4+1, row%4+1)
fig2L['layout'].update(title='Vertical Eliminate Left')
py.iplot(fig2L, filename='EliminateCandyLocationVerticalLeft')
| HeatMap.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Cargar una serie de tiempo
# +
from model_training import LoadTimeSerie
load_ts = LoadTimeSerie()
# -
# Ejemplo de esta serie de tiempo: train_ID-271_cadena-4_001'
pos = {'X': [0, 30], 'y': [30, 31]} # Posiciones inicial y final en el array para X (input) e y (output)
path_time_serie = '/mnt/atrys/DL-Pipelines/ECG/Supervised_Learning_Forecasting/time_series/numpy/ID-271_cadena-4.npy'
# Orden de las columnas en cada serie de tiempo
features = [
"venta_unidades_dia",
"venta_clp_dia",
"is_promo"
]
# **Para un output**
# +
# output a retornar (columnas de la serie de tiempo)
idx_feature_y = features.index("venta_clp_dia")
X,y = load_ts(path_time_serie, pos, idx_feature_y)
X.shape, y.shape
# -
#
# **Mas de un output**
# +
# output a retornar (columnas de la serie de tiempo)
idx_feature_y = [features.index("venta_clp_dia"), features.index("venta_unidades_dia")]
X, y = load_ts(path_time_serie, pos, idx_feature_y)
X.shape, y.shape
# -
# ___
# # Usar el generador de datos
from model_training import DataGenerator
# +
import json
with open("data/labels.json") as fp:
labels = json.load(fp)
with open("data/list_train.json") as fp:
list_train = json.load(fp)
# +
from model_training import LoadTimeSerie
load_ts = LoadTimeSerie()
# +
# Orden de las columnas en cada serie de tiempo
features = [
"venta_unidades_dia",
"venta_clp_dia",
"is_promo"
]
idx_feature_y = [features.index("venta_unidades_dia")]
inputs_data_gen = dict(
list_id_ts=list_train,
labels=labels,
idx_feature_y = idx_feature_y,
file_loader = load_ts, # from file to array
preprocessing = None,
shuffle=True,
batch_size=4,
)
train_gen = DataGenerator(**inputs_data_gen)
# -
X,y = train_gen.__getitem__(0)
X.shape
y.shape
# # Preprocesamiento
from model_training.preprocessing import Pipeline as Preprocessing
# +
with open("time_series/time_series_config.json") as fp:
ts_config = json.load(fp)
list_min = ts_config.get("list_min")
list_max = ts_config.get("list_max")
prep = Preprocessing([
("minmax", dict(axis_signal=0, list_min=list_min, list_max=list_max ))
])
# -
prep = Preprocessing([
("minmax", dict(axis_signal=0, list_min=list_min, list_max=list_max ))
])
time_serie = X[2]
x = prep(time_serie)
x
# # TimeSerieAnalyzer
# +
from data_selection import TimeSeriesAnalyzer
ts_analyzer = TimeSeriesAnalyzer(
max_zero_timesteps=0.5,
)
ts_analyzer(time_serie)
# -
import numpy as np
any(np.isnan(time_serie).flatten())
| Ejemplos-de-uso.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/AIWintermuteAI/aXeleRate/blob/master/resources/aXeleRate_test_segnet.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="hS9yMrWe02WQ" colab_type="text"
# ## Segmentation model Training and Inference
#
# In this notebook we will use axelerate Keras-based framework for AI on the edge to quickly setup model training and then after training session is completed convert it to .tflite and .kmodel formats.
#
# First, let's take care of some administrative details.
#
# 1) Before we do anything, make sure you have choosen GPU as Runtime type (in Runtime - > Change Runtime type).
#
# 2) We need to mount Google Drive for saving our model checkpoints and final converted model(s). Press on Mount Google Drive button in Files tab on your left.
#
# In the next cell we clone axelerate Github repository and import it.
#
# **It is possible to use pip install or python setup.py install, but in that case you will need to restart the enironment.** Since I'm trying to make the process as streamlined as possibile I'm using sys.path.append for import.
# + id="y07yAbYbjV2s" colab_type="code" colab={}
# %tensorflow_version 1.x
# !git clone https://github.com/AIWintermuteAI/aXeleRate.git
import sys
sys.path.append('/content/aXeleRate')
from axelerate import setup_training,setup_inference
# + [markdown] id="5TBRMPZ83dRL" colab_type="text"
# At this step you typically need to get the dataset. You can use !wget command to download it from somewhere on the Internet or !cp to copy from My Drive as in this example
# ```
# # # !cp -r /content/drive/'My Drive'/pascal_20_segmentation.zip .
# # # !unzip --qq pascal_20_segmentation.zip
# ```
# For this notebook small test dataset is already in axelerate/sample_datasets folder, so no need to download anything.
#
# For semantic segmentation the dataset consists of RGB images and segmentation masks.
# A few things to keep in mind:
#
# - The filenames of the annotation images should be same as the filenames of the RGB images.
#
# - The dimensions of the annotation image for the corresponding RGB image should be same.
#
# - For each pixel in the RGB image, the class label of that pixel in the annotation image would be the value of the annotation image pixel.
#
# Let's visualize our semantic segmentation test dataset and see what that means in practice.
#
# + id="_tpsgkGj7d79" colab_type="code" colab={}
# %matplotlib inline
from axelerate.networks.segnet.data_utils.visualize_dataset import visualize_segmentation_dataset
visualize_segmentation_dataset('aXeleRate/sample_datasets/segmentation/imgs_validation', 'aXeleRate/sample_datasets/segmentation/anns_validation', n_classes=20)
# + [markdown] id="S1oqdtbr7VLB" colab_type="text"
# Next step is defining a config dictionary. Most lines are self-explanatory.
#
# Type is model frontend - Classifier, Detector or Segnet
#
# Architecture is model backend (feature extractor)
#
# - Full Yolo
# - Tiny Yolo
# - MobileNet1_0
# - MobileNet7_5
# - MobileNet5_0
# - MobileNet2_5
# - SqueezeNet
# - VGG16
# - ResNet50
#
# + id="Jw4q6_MsegD2" colab_type="code" colab={}
config = {
"model" : {
"type": "SegNet",
"architecture": "MobileNet7_5",
"input_size": 224,
"n_classes" : 20
},
"weights" : {
"full": "",
"backend": "imagenet"
},
"train" : {
"actual_epoch": 5,
"train_image_folder": "aXeleRate/sample_datasets/segmentation/imgs",
"train_annot_folder": "aXeleRate/sample_datasets/segmentation/anns",
"train_times": 4,
"valid_image_folder": "aXeleRate/sample_datasets/segmentation/imgs_validation",
"valid_annot_folder": "aXeleRate/sample_datasets/segmentation/anns_validation",
"valid_times": 4,
"valid_metric": "val_loss",
"batch_size": 8,
"learning_rate": 1e-4,
"saved_folder": "segment",
"first_trainable_layer": "",
"ignore_zero_class": False,
"augumentation": True
},
"converter" : {
"type": ["k210","tflite"]
}
}
# + [markdown] id="kobC_7gd5mEu" colab_type="text"
# Let's check what GPU we have been assigned in this Colab session, if any.
# + id="rESho_T70BWq" colab_type="code" colab={}
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
# + [markdown] id="cWyKjw-b5_yp" colab_type="text"
# Finally we start the training by passing config dictionary we have defined earlier to setup_training function. The function will start the training with Checkpoint, Reduce Learning Rate on Plateu and Early Stopping callbacks. After the training has stopped, it will convert the best model into the format you have specified in config and save it to the project folder.
# + id="deYD3cwukHsj" colab_type="code" colab={}
model_path = setup_training(config_dict=config)
# + [markdown] id="ypTe3GZI619O" colab_type="text"
# After training it is good to check the actual perfomance of your model by doing inference on your validation dataset and visualizing results. This is exactly what next block does. Obviously since our model has only trained on a few images the reults are far from stellar, but if you have a good dataset, you'll have better results.
# + id="jE7pTYmZN7Pi" colab_type="code" colab={}
from keras import backend as K
K.clear_session()
setup_inference(config, model_path)
# + [markdown] id="5YuVe2VD11cd" colab_type="text"
# Good luck and happy training! Have a look at these articles, that would allow you to get the most of Google Colab or connect to local runtime if there are no GPUs available;
#
# https://medium.com/@oribarel/getting-the-most-out-of-your-google-colab-2b0585f82403
#
# https://research.google.com/colaboratory/local-runtimes.html
| resources/aXeleRate_test_segnet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import os
from scipy.stats.stats import pearsonr
chrN = 18
down_sample_ratio = 16
resolution_size = 10000
chrs_length = [249250621,243199373,198022430,191154276,180915260,171115067,159138663,146364022,141213431,135534747,135006516,133851895,115169878,107349540,102531392,90354753,81195210,78077248,59128983,63025520,48129895,51304566]
index_file_path = "../data/divided-data/GM12878_primary/10kb_resolution/chr18-22(down16)-index.npy"
enhanced_frames_path = "../data/enhanced-data/GM12878_primary_enhanced10kb(from16)_chr18-22.npy"
enhanced_frames2_path = "../data/enhanced-data/GM12878_primary_enhanced10kb(from16)(rep2)_chr18-22.npy"
low_res_HiC_file_path = "../data/GM12878_primary/10kb_resolution_intrachromosomal_down16/chr" + str(chrN) + "_10kb_down.RAWobserved"
high_res_HiC_file_path = "../data/GM12878_primary/10kb_resolution_intrachromosomal/chr" + str(chrN) + "/MAPQG0/chr" + str(chrN) + "_10kb.RAWobserved"
low_res_HiC_file2_path = "../data/GM12878_primary/10kb_resolution_intrachromosomal_down16(rep2)/chr" + str(chrN) + "_10kb_down.RAWobserved"
low_res_HiC_matrix_file_path = "../data/GM12878_primary/10kb_resolution_intrachromosomal_down16/chr" + str(chrN) + "_10kb_down.RAWobserved_npy_form_tmp.npy"
high_res_HiC_matrix_file_path = "../data/GM12878_primary/10kb_resolution_intrachromosomal/chr" + str(chrN) + "/MAPQG0/chr" + str(chrN) + "_10kb.RAWobserved_npy_form_tmp.npy"
low_res_HiC_matrix_file2_path = "../data/GM12878_primary/10kb_resolution_intrachromosomal_down16(rep2)/chr" + str(chrN) + "_10kb_down.RAWobserved_npy_form_tmp.npy"
total_length = int(chrs_length[chrN-1]/resolution_size) + 1
index = np.load(index_file_path)
enhanced_frames = np.load(enhanced_frames_path)
enhanced_frames2 = np.load(enhanced_frames2_path)
if os.path.exists(high_res_HiC_matrix_file_path):
high_res_HiC_matrix = np.load(high_res_HiC_matrix_file_path)
else:
high_res_HiC_matrix = utils.readSquareMatrix(high_res_HiC_file_path, total_length, resolution_size)
if os.path.exists(low_res_HiC_matrix_file_path):
low_res_HiC_matrix = np.load(low_res_HiC_matrix_file_path)
else:
low_res_HiC_matrix = utils.readSquareMatrix(low_res_HiC_file_path, total_length, resolution_size)
if os.path.exists(low_res_HiC_matrix_file2_path):
low_res_HiC_matrix2 = np.load(low_res_HiC_matrix_file2_path)
else:
low_res_HiC_matrix2 = utils.readSquareMatrix(low_res_HiC_file2_path, total_length, resolution_size)
low_res_HiC_matrix = low_res_HiC_matrix * down_sample_ratio
low_res_HiC_matrix2 = low_res_HiC_matrix2 * down_sample_ratio
# -
decoder = np.vectorize(lambda x: x.decode('UTF-8'))
index = decoder(index[:,1:]).astype(int)
chrN_index = np.where(index[:,0]==chrN)[0]
enhanced_HiC_matrix = low_res_HiC_matrix
enhanced_HiC_matrix = enhanced_HiC_matrix.astype(float)
for i in chrN_index:
x_pos = index[i,1]
y_pos = index[i,2]
enhanced_HiC_matrix[x_pos+6:x_pos+34,y_pos+6:y_pos+34] = enhanced_frames[i,:,:]
enhanced_HiC_matrix2 = low_res_HiC_matrix2
enhanced_HiC_matrix2 = enhanced_HiC_matrix2.astype(float)
for i in chrN_index:
x_pos = index[i,1]
y_pos = index[i,2]
enhanced_HiC_matrix2[x_pos+6:x_pos+34,y_pos+6:y_pos+34] = enhanced_frames2[i,:,:]
def vec_of_dist(matrix, x):
return([matrix[i,i+x] for i in range(matrix.shape[1]-x)])
import matplotlib.pyplot as plt
highVSlow_corr_list = []
highVSenhanced_corr_list = []
highVSlow_corr_list2 = []
highVSenhanced_corr_list2 = []
for dist in range(100):
low_res_vec = vec_of_dist(low_res_HiC_matrix, dist)
low_res_vec2 = vec_of_dist(low_res_HiC_matrix2, dist)
high_res_vec = vec_of_dist(high_res_HiC_matrix, dist)
enhanced_vec = vec_of_dist(enhanced_HiC_matrix, dist)
enhanced_vec2 = vec_of_dist(enhanced_HiC_matrix2, dist)
highVSlow_corr_list.append(pearsonr(low_res_vec, high_res_vec)[0])
highVSenhanced_corr_list.append(pearsonr(high_res_vec, enhanced_vec)[0])
highVSlow_corr_list2.append(pearsonr(low_res_vec2, high_res_vec)[0])
highVSenhanced_corr_list2.append(pearsonr(high_res_vec, enhanced_vec2)[0])
plt.plot(highVSlow_corr_list, label = "highVSlow")
plt.plot(highVSenhanced_corr_list, label = "highVSenhanced")
plt.plot(highVSlow_corr_list2, label = "highVSlow(rep2)")
plt.plot(highVSenhanced_corr_list2, label = "highVSenhanced(rep2)")
plt.legend(loc='upper right', prop={'size': 5})
plt.show()
# +
### creating N*3 array of coordinates list from enhanced matrix
output_file_path = "../data/enhanced-data/down180/chr" + str(chrN) + "-enhanced.txt"
nonzero_indices = np.nonzero(enhanced_HiC_matrix)
source = nonzero_indices[0] * resolution_size
target = nonzero_indices[1] * resolution_size
weight = enhanced_HiC_matrix[nonzero_indices]
coordinate_list = np.transpose(np.array((source, target, weight)))
np.savetxt(output_file_path, coordinate_list, delimiter='\t')
| src/notebooks/.ipynb_checkpoints/frames2HiC-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="FhJngx31jnl8" colab_type="code" outputId="3bedb964-8436-4b06-fe21-68ffcd15355c" executionInfo={"status": "ok", "timestamp": 1581854305764, "user_tz": -60, "elapsed": 6775, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}} colab={"base_uri": "https://localhost:8080/", "height": 292}
# !pip install eli5
# + id="8ZzSTJirjzK2" colab_type="code" outputId="741da1c8-d4cd-46e9-dfac-aed88ea79f18" executionInfo={"status": "ok", "timestamp": 1581854316737, "user_tz": -60, "elapsed": 2878, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}} colab={"base_uri": "https://localhost:8080/", "height": 185}
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import cross_val_score
import eli5
from eli5.sklearn import PermutationImportance
from ast import literal_eval
from tqdm import tqdm_notebook
# + id="aMv7ad3kkdiv" colab_type="code" outputId="2dea5702-c5d7-41b8-9f56-18470a5d477c" executionInfo={"status": "ok", "timestamp": 1581854319660, "user_tz": -60, "elapsed": 605, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# cd "/content/drive/My Drive/Colab Notebooks/dw_matrix"
# + id="Tlo_Cy2mklcB" colab_type="code" outputId="8f6e66b6-a97b-471b-ef5b-a788e2020bae" executionInfo={"status": "ok", "timestamp": 1581854322183, "user_tz": -60, "elapsed": 1667, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# ls
# + id="Hon8TI5hkuLh" colab_type="code" colab={}
df = pd.read_csv('data/men_shoes.csv', low_memory=False)
# + id="q0O7NsmJlN0N" colab_type="code" colab={}
def run_model(feats, model = DecisionTreeRegressor(max_depth=5)):
x = df[ feats ].values
y = df['prices_amountmin'].values
scores = cross_val_score(model, x, y, scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
# + id="ACtvhrgLmEs0" colab_type="code" outputId="951a3271-afeb-4228-f445-bf133b29d36c" executionInfo={"status": "ok", "timestamp": 1581854348958, "user_tz": -60, "elapsed": 500, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
df['brand_cat'] = df['brand'].map(lambda x: str(x).lower()).factorize()[0]
run_model(['brand_cat'])
# + id="Y3XIljK9vJH-" colab_type="code" outputId="5bc7ab92-fdd2-42ca-f7f5-ece4a0f6d771" executionInfo={"status": "ok", "timestamp": 1581854351404, "user_tz": -60, "elapsed": 546, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}} colab={"base_uri": "https://localhost:8080/", "height": 513}
df.head()
# + id="TLW0EhPRmZGr" colab_type="code" outputId="4dd8635d-8602-4b29-f1ae-0d676a21865b" executionInfo={"status": "ok", "timestamp": 1581854356519, "user_tz": -60, "elapsed": 3577, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
model = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0)
run_model(['brand_cat'], model)
# + id="l2EUbyS9m-y-" colab_type="code" colab={}
def parse_features(x):
output_dict = {}
if str(x) == 'nan' : return output_dict
features = literal_eval(x.replace('\\"', '"'))
for item in features:
key = item['key'].lower().strip()
value = item['value'][0].lower().strip()
output_dict[key] = value
return output_dict
df['features_parsed'] = df['features'].map(parse_features)
# + id="hesVDEzH0Sq9" colab_type="code" outputId="ec6287d7-51cd-4480-a3d1-ce5f3bbf592d" executionInfo={"status": "ok", "timestamp": 1581854379852, "user_tz": -60, "elapsed": 483, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}} colab={"base_uri": "https://localhost:8080/", "height": 139}
df['features_parsed'].head().values
# + id="cjmpH164wl8B" colab_type="code" outputId="8bde2a07-55a6-4c5f-fb80-79f7e2470063" executionInfo={"status": "ok", "timestamp": 1581854383445, "user_tz": -60, "elapsed": 488, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
keys = set()
df['features_parsed'].map( lambda x: keys.update(x.keys()) )
len(keys)
# + id="tuObXg9fwqVP" colab_type="code" colab={}
def get_name_feat(key):
return 'feat_' + key
for key in keys:
df[get_name_feat(key)] = df.features_parsed.map( lambda feats: feats[key] if key in feats else np.nan)
# + id="Svik4EbKVDln" colab_type="code" outputId="23264a35-5ffd-491a-ba1f-8df9a6ecd389" executionInfo={"status": "ok", "timestamp": 1581854392155, "user_tz": -60, "elapsed": 464, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}} colab={"base_uri": "https://localhost:8080/", "height": 153}
df.columns
# + id="enM7TK9EVDvx" colab_type="code" colab={}
keys_stat = {}
for key in keys:
keys_stat[key] = df[ False == df[get_name_feat(key)].isnull() ].shape[0] / df.shape[0] *100
# + id="0O4UKH0hXlPM" colab_type="code" outputId="4be5e096-543d-42f7-d571-0aa3d737cb66" executionInfo={"status": "ok", "timestamp": 1581854398507, "user_tz": -60, "elapsed": 445, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}} colab={"base_uri": "https://localhost:8080/", "height": 102}
{k:v for k,v in keys_stat.items() if v > 30}
# + id="v1GOG4RiYbxx" colab_type="code" colab={}
df['feat_brand_cat'] = df['feat_brand'].factorize()[0]
df['feat_color_cat'] = df['feat_color'].factorize()[0]
df['feat_gender_cat'] = df['feat_gender'].factorize()[0]
df['feat_manufacturer part number_cat'] = df['feat_manufacturer part number'].factorize()[0]
df['feat_material_cat'] = df['feat_material'].factorize()[0]
df['feat_sport_cat'] = df['feat_sport'].factorize()[0]
df['feat_style_cat'] = df['feat_style'].factorize()[0]
for key in keys:
df[get_name_feat(key) + '_cat'] = df[get_name_feat(key)].factorize()[0]
# + id="vEyuH3rMZVH8" colab_type="code" outputId="69e34c0a-74d6-407e-8754-a105243f772e" executionInfo={"status": "ok", "timestamp": 1581854406320, "user_tz": -60, "elapsed": 445, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
df['brand'] = df['brand'].map(lambda x:str(x).lower() )
df[ df.brand == df.feat_brand ].shape
# + id="mzNOExm1a_Mt" colab_type="code" colab={}
feats =
# + id="95qzMCVQbJE7" colab_type="code" outputId="fd78d2f9-cc86-4e23-c441-c063058f5e2b" executionInfo={"status": "ok", "timestamp": 1581854508016, "user_tz": -60, "elapsed": 3649, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
model = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0)
run_model(['brand_cat'], model)
# + id="C_Cw6Om9687M" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="9295933f-572a-45ec-c06b-534522514c6e" executionInfo={"status": "ok", "timestamp": 1581855312212, "user_tz": -60, "elapsed": 495, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}}
feats_cat = [x for x in df.columns if 'cat' in x]
feats_cat
# + id="Nkad0kXcbiB3" colab_type="code" colab={}
feats = ['brand_cat', 'feat_metal type_cat', 'feat_brand_cat', 'feat_shape_cat', 'feat_color_cat', 'feat_gender_cat', 'feat_manufacturer part number_cat', 'feat_material_cat']
model = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0)
result = run_model(feats, model)
# + id="spS3gqIEcXTw" colab_type="code" outputId="61af6cd5-d82d-4769-f274-b8a692dfc213" executionInfo={"status": "ok", "timestamp": 1581856199481, "user_tz": -60, "elapsed": 4605, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}} colab={"base_uri": "https://localhost:8080/", "height": 187}
x =df[ feats ].values
y = df['prices_amountmin'].values
m = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0)
m.fit(x, y)
print(result)
perm = PermutationImportance(m, random_state=1).fit(x, y);
eli5.show_weights(perm, feature_names=feats)
# + id="lI59vN50dv1U" colab_type="code" outputId="865ce365-fb44-4154-bb90-7b2cfdd19a07" executionInfo={"status": "ok", "timestamp": 1581854692688, "user_tz": -60, "elapsed": 505, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}} colab={"base_uri": "https://localhost:8080/", "height": 139}
df[ df['brand'] == 'nike'].features_parsed.sample(5).values
# + id="LiCTvWFZ-Ah5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="931cc441-dbcf-4a13-87f7-0a7160ac2e20" executionInfo={"status": "ok", "timestamp": 1581856310135, "user_tz": -60, "elapsed": 1660, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}}
# ls matrix_one/
| matrix_one/day5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
import keys
import pandas as pd
import requests
from pprint import pprint
import json
import numpy as np
# Load merged, opportunities scraped
merged = pd.read_csv('merged.csv')
opportunities = pd.read_csv('opportunities_scraped.csv')
merged.head()
print(merged.shape)
print(merged[merged['address'].isnull()==False].shape)
print(merged[merged['address'].isnull()==True].shape)
print(opportunities.shape)
print(opportunities[opportunities['scraped_address']=='not found'].shape)
print(opportunities[opportunities['scraped_address']!='not found'].shape)
opportunities.head()
# Copy over scraped addresses
op = opportunities[opportunities['scraped_address']!='not found']
print (op.shape)
for i, row in op.iterrows():
print (i, row['Opportunity ID'], row.scraped_address)
print (merged.loc[merged['id'] == row['Opportunity ID']][['id','address']])
for i, row in op.iterrows():
merged.loc[merged['id'] == row['Opportunity ID'], 'address'] = row.scraped_address
#merged[merged.id=='0061a00000FqbhV'].address
print(merged.shape)
print(merged[merged['address'].isnull()==False].shape)
print(merged[merged['address'].isnull()==True].shape)
# get remaining lats and longs
print(merged[merged['lat'].isnull()==False].shape)
print(merged[merged['lat'].isnull()==True].shape)
key = keys.google
df = merged
#from notebook 02
counter = 0
#loop through all records with an address, but no lat/long
for i, row in df[(df['lat'].isnull()==True) &
(df['address'].isnull()==False)
#& (df['id']=='0061a00000GcQdL') #for test
].iterrows():
#get the lat and long from google maps api
address = row.address.replace(' ','+')
url = 'https://maps.googleapis.com/maps/api/geocode/json?address='+address+'=&key='+key
response = requests.get(url)
try:
data = json.loads(response.text)
#print(data)
lat = data['results'][0]['geometry']['location']['lat']
lng = data['results'][0]['geometry']['location']['lng']
#add it to the dataframe
df.loc[df['id'] == row.id, 'lat'] = lat
df.loc[df['id'] == row.id, 'lng'] = lng
#print(address,lat,lng)
except:
print ('request failure for row', i, data, row.id, row.address)
counter +=1
if counter%30==0:
print(counter,address,lat,lng)
print(merged[merged['lat'].isnull()==False].shape)
print(merged[merged['lat'].isnull()==True].shape)
# Save to csv
merged.to_csv('merged_scraped.csv',index_label=False)
# +
# Delete records missing lat and long, save again
# -
merged_cleaned = merged[merged['lat'].isnull()==False]
merged_cleaned.to_csv('merged_scraped_cleaned.csv',index_label=False)
| 05 geocode new addresses.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import sklearn as sk
import pandas as pd
df = pd.read_csv('BentCreekPeakStageFlow_02026000.csv')
print(df)
peak_flow = df['FLOW CMS']
peak_stage = df['GAUGE M']
action_stage = df['ACTION STAGE']
flood_stage = df['FLOOD STAGE']
moderate_flood = df['MODERATE STAGE']
major_flood = df['MAJOR STAGE']
date = df['DATE']
floodclass = df['CLASS']
floodclass.value_counts().loc[['ACTION', 'FLOOD', 'MODERATE','MAJOR']].plot.barh()
plt.style.use('classic')
plt.xlabel("No. of Flood Events 1935-Present")
plt.ylabel("Flood Class (Action = Lowest, Major = Highest)")
# +
import matplotlib.dates as mdates
plt.style.use('classic')
plt.plot(date,peak_flow)
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=10))
plt.gcf().autofmt_xdate()
plt.xlabel("Date")
plt.ylabel("Flow Rate (cms)")
# +
x = date
y1 = peak_flow
y2 = peak_stage
plt.style.use('classic')
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(x, y1,'r-')
ax2.plot(x, y2,'b-')
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=10))
plt.gcf().autofmt_xdate()
ax1.set_xlabel('Date')
ax1.set_ylabel('Peak Flow (cms)',color='r')
ax2.set_ylabel('Peak Stage (m)',color='b')
plt.axhline(y=4.572, color='c', linestyle='--',label='Action Stage 4.572 m',lw=2)
plt.axhline(y=4.8768, color='k', linestyle='-',label='Flood Stage 4.8768 m',lw=2)
plt.axhline(y=5.4864, color='k', linestyle='--',label='Moderate Flood Stage 5.4864 m',lw=2)
plt.axhline(y=6.7056, color='r', linestyle='-',label='Major Flood Stage 6.7056 m',lw=2)
plt.legend(bbox_to_anchor=(1.1,0.5), loc="center left", borderaxespad=0)
# +
x = date
y1 = peak_flow
y2 = peak_stage
plt.style.use('classic')
fig, ax1 = plt.subplots()
#ax2 = ax1.twinx()
ax1.plot(x, y1,'r-')
#ax2.plot(x, y2,'b-')
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=10))
plt.gcf().autofmt_xdate()
ax1.set_xlabel('Date')
ax1.set_ylabel('Peak Flow (cms)',color='r')
#ax2.set_ylabel('Peak Stage (m)',color='b')
plt.axhline(y=np.percentile(peak_flow, 25), color='c', linestyle='--',label='25th Percentile',lw=2)
plt.axhline(y=np.percentile(peak_flow, 50), color='k', linestyle='--',label='50th Percentile',lw=2)
plt.axhline(y=np.percentile(peak_flow, 75), color='b', linestyle='--',label='75th Percentile',lw=2)
plt.axhline(y=np.percentile(peak_flow, 95), color='r', linestyle='--',label='95th Percentile',lw=2)
plt.axhline(y=np.percentile(peak_flow, 99), color='g', linestyle='--',label='99th Percentile',lw=2)
plt.legend(bbox_to_anchor=(1.1,0.5), loc="center left", borderaxespad=0)
# +
x = date
y1 = peak_flow
y2 = peak_stage
plt.style.use('classic')
fig, ax1 = plt.subplots()
#ax2 = ax1.twinx()
ax1.plot(x, y2,'b-')
#ax2.plot(x, y2,'b-')
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=10))
plt.gcf().autofmt_xdate()
ax1.set_xlabel('Date')
ax1.set_ylabel('Peak Stage (m)',color='b')
#ax2.set_ylabel('Peak Stage (m)',color='b')
plt.axhline(y=np.percentile(peak_stage, 25), color='c', linestyle='--',label='25th Percentile',lw=2)
plt.axhline(y=np.percentile(peak_stage, 50), color='k', linestyle='--',label='50th Percentile',lw=2)
plt.axhline(y=np.percentile(peak_stage, 75), color='b', linestyle='--',label='75th Percentile',lw=2)
plt.axhline(y=np.percentile(peak_stage, 95), color='r', linestyle='--',label='95th Percentile',lw=2)
plt.axhline(y=np.percentile(peak_stage, 99), color='g', linestyle='--',label='99th Percentile',lw=2)
plt.legend(bbox_to_anchor=(1.1,0.5), loc="center left", borderaxespad=0)
# -
| JamesRiverPeakAnnualGaugeFlow_BentCreek_02026000.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# invisible
import pandas as pd
pd.set_option('display.max_colwidth', 65)
pd.set_option('display.max_columns', 65)
import numpy as np
np.core.arrayprint._line_width = 65
# -
# ## Aufgaben zum Kapitel "Mehrstufige Indizierung"
# ### Aufgaben:
#
# 1. Aufgabe:
# Wandeln Sie das folgende Dictionary in eine Series mit zweistufigem Index, wobei die Ländernamen den primären und die Jahreszahlen den sekundären Index bilden:
#
# ```
# growth_rates = {("Afghanistan", 2015): 1.31,
# ("Afghanistan", 2016): 2.37,
# ("Afghanistan", 2017): 2.60,
# ("Ägypten", 2015): 4.37,
# ("Ägypten", 2016): 4.35,
# ("Ägypten", 2017): 4.18,
# ("Albanien", 2015): 2.22,
# ("Albanien", 2016): 3.35,
# ("Albanien", 2017): 3.84}
# ```
#
#
# +
# prog4book
growth_rates = {("Afghanistan", 2015): 1.31,
("Afghanistan", 2016): 2.37,
("Afghanistan", 2017): 2.60,
("Ägypten", 2015): 4.37,
("Ägypten", 2016): 4.35,
("Ägypten", 2017): 4.18,
("Albanien", 2015): 2.22,
("Albanien", 2016): 3.35,
("Albanien", 2017): 3.84}
growth_rates_series = pd.Series(growth_rates)
growth_rates_series
# -
# 2. Aufgabe:
#
# Vertauschen Sie den mehrstufigen Index der in der vorigen Aufgabe erzeugten Series, d.h. dass die Jahreszahlen den primären und die Länder den sekundären bilden.
# prog4book
growth_rates_series = growth_rates_series.swaplevel()
growth_rates_series.sort_index(inplace=True)
growth_rates_series
| 00_Original/pandas_multi_level_indexing_AUFGABEN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import schedy
import numpy as np
db = schedy.SchedyDB()
exp = db.get_experiment('my_experiment')
acc = []
for job in exp.all_jobs():
if job.hyperparameters['my_hp'] == my_value:
acc.append(job.results['my_result')
print(np.mean(acc), np.std(acc), np.min(acc), np.max(acc))
# -
| notebooks/02_compute_statistics_across_experiments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:anaconda]
# language: python
# name: conda-env-anaconda-py
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# ***
# ***
# # 计算传播与机器学习
#
# ***
# ***
#
# 王成军
#
# <EMAIL>
#
# 计算传播网 http://computational-communication.com
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# ## 1、 监督式学习
#
# 工作机制:
# - 这个算法由一个目标变量或结果变量(或因变量)组成。
# - 这些变量由已知的一系列预示变量(自变量)预测而来。
# - 利用这一系列变量,我们生成一个将输入值映射到期望输出值的函数。
# - 这个训练过程会一直持续,直到模型在训练数据上获得期望的精确度。
# - 监督式学习的例子有:回归、决策树、随机森林、K – 近邻算法、逻辑回归等。
# + [markdown] slideshow={"slide_type": "subslide"}
# ## 2、非监督式学习
#
# 工作机制:
# - 在这个算法中,没有任何目标变量或结果变量要预测或估计。
# - 这个算法用在不同的组内聚类分析。
# - 这种分析方式被广泛地用来细分客户,根据干预的方式分为不同的用户组。
# - 非监督式学习的例子有:关联算法和 K–均值算法。
# + [markdown] slideshow={"slide_type": "subslide"}
# ## 3、强化学习
#
# 工作机制:
# - 这个算法训练机器进行决策。
# - 它是这样工作的:机器被放在一个能让它通过反复试错来训练自己的环境中。
# - 机器从过去的经验中进行学习,并且尝试利用了解最透彻的知识作出精确的商业判断。
# - 强化学习的例子有马尔可夫决策过程。alphago
#
# > Chess. Here, the agent decides upon a series of moves depending on the state of the board (the environment), and the
# reward can be defined as win or lose at the end of the game:
# + [markdown] slideshow={"slide_type": "slide"}
# <img src = './img/mlprocess.png' width = 800>
# + [markdown] slideshow={"slide_type": "slide"}
# - 线性回归
# - 逻辑回归
# - 决策树
# - SVM
# - 朴素贝叶斯
# ---
# - K最近邻算法
# - K均值算法
# - 随机森林算法
# - 降维算法
# - Gradient Boost 和 Adaboost 算法
#
# + [markdown] slideshow={"slide_type": "slide"}
# > # 使用sklearn做线性回归
# ***
#
# 王成军
#
# <EMAIL>
#
# 计算传播网 http://computational-communication.com
# + [markdown] slideshow={"slide_type": "subslide"}
# # 线性回归
# - 通常用于估计连续性变量的实际数值(房价、呼叫次数、总销售额等)。
# - 通过拟合最佳直线来建立自变量X和因变量Y的关系。
# - 这条最佳直线叫做回归线,并且用 $Y= \beta *X + C$ 这条线性等式来表示。
# - 系数 $\beta$ 和 C 可以通过最小二乘法获得
# + slideshow={"slide_type": "subslide"}
# %matplotlib inline
import sklearn
from sklearn import datasets
from sklearn import linear_model
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
# + slideshow={"slide_type": "subslide"}
# boston data
boston = datasets.load_boston()
y = boston.target
X = boston.data
# + slideshow={"slide_type": "subslide"}
' '.join(dir(boston))
# + slideshow={"slide_type": "fragment"}
boston['feature_names']
# + slideshow={"slide_type": "subslide"}
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
# Fit regression model (using the natural log of one of the regressors)
results = smf.ols('boston.target ~ boston.data', data=boston).fit()
print(results.summary())
# + slideshow={"slide_type": "subslide"}
regr = linear_model.LinearRegression()
lm = regr.fit(boston.data, y)
# + slideshow={"slide_type": "subslide"}
lm.intercept_, lm.coef_, lm.score(boston.data, y)
# + slideshow={"slide_type": "subslide"}
predicted = regr.predict(boston.data)
# + slideshow={"slide_type": "subslide"}
fig, ax = plt.subplots()
ax.scatter(y, predicted)
ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4)
ax.set_xlabel('$Measured$', fontsize = 20)
ax.set_ylabel('$Predicted$', fontsize = 20)
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# ## 训练集和测试集
# -
boston.data
# + slideshow={"slide_type": "subslide"}
from sklearn.cross_validation import train_test_split
Xs_train, Xs_test, y_train, y_test = train_test_split(boston.data,
boston.target,
test_size=0.2,
random_state=42)
# + slideshow={"slide_type": "subslide"}
regr = linear_model.LinearRegression()
lm = regr.fit(Xs_train, y_train)
# + slideshow={"slide_type": "subslide"}
lm.intercept_, lm.coef_, lm.score(Xs_train, y_train)
# + slideshow={"slide_type": "subslide"}
predicted = regr.predict(Xs_test)
# + slideshow={"slide_type": "subslide"}
fig, ax = plt.subplots()
ax.scatter(y_test, predicted)
ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4)
ax.set_xlabel('$Measured$', fontsize = 20)
ax.set_ylabel('$Predicted$', fontsize = 20)
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# # 交叉验证
# + [markdown] slideshow={"slide_type": "subslide"}
# # cross-validation
#
# k-fold CV, the training set is split into k smaller sets (other approaches are described below, but generally follow the same principles). The following procedure is followed for each of the k “folds”:
# - A model is trained using k-1 of the folds as training data;
# - the resulting model is validated on the remaining part of the data (i.e., it is used as a test set to compute a performance measure such as accuracy).
# + slideshow={"slide_type": "subslide"}
from sklearn.cross_validation import cross_val_score
regr = linear_model.LinearRegression()
scores = cross_val_score(regr, boston.data , boston.target, cv = 3)
scores.mean()
# -
help(cross_val_score)
# + slideshow={"slide_type": "subslide"}
scores = [cross_val_score(regr, data_X_scale,\
boston.target,\
cv = int(i)).mean() \
for i in range(3, 50)]
plt.plot(range(3, 50), scores,'r-o')
plt.show()
# + slideshow={"slide_type": "subslide"}
data_X_scale = scale(boston.data)
scores = cross_val_score(regr,data_X_scale, boston.target,\
cv = 7)
scores.mean()
# + [markdown] slideshow={"slide_type": "slide"}
# # 使用天涯bbs数据
# + slideshow={"slide_type": "subslide"}
import pandas as pd
df = pd.read_csv('../data/tianya_bbs_threads_list.txt', sep = "\t", header=None)
df=df.rename(columns = {0:'title', 1:'link', 2:'author',3:'author_page', 4:'click', 5:'reply', 6:'time'})
df[:2]
# + slideshow={"slide_type": "subslide"}
# 定义这个函数的目的是让读者感受到:
# 抽取不同的样本,得到的结果完全不同。
def randomSplit(dataX, dataY, num):
dataX_train = []
dataX_test = []
dataY_train = []
dataY_test = []
import random
test_index = random.sample(range(len(df)), num)
for k in range(len(dataX)):
if k in test_index:
dataX_test.append([dataX[k]])
dataY_test.append(dataY[k])
else:
dataX_train.append([dataX[k]])
dataY_train.append(dataY[k])
return dataX_train, dataX_test, dataY_train, dataY_test,
# + slideshow={"slide_type": "subslide"}
import numpy as np
# Use only one feature
data_X = df.reply
# Split the data into training/testing sets
data_X_train, data_X_test, data_y_train, data_y_test = randomSplit(np.log(df.click+1),
np.log(df.reply+1), 20)
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(data_X_train, data_y_train)
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(data_X_test, data_y_test))
# -
data_X_train
# + slideshow={"slide_type": "subslide"}
y_true, y_pred = data_y_test, regr.predict(data_X_test)
# + slideshow={"slide_type": "fragment"}
plt.scatter(y_pred, y_true, color='black')
plt.show()
# + slideshow={"slide_type": "subslide"}
# Plot outputs
plt.scatter(data_X_test, data_y_test, color='black')
plt.plot(data_X_test, regr.predict(data_X_test), color='blue', linewidth=3)
plt.show()
# + slideshow={"slide_type": "subslide"}
# The coefficients
'Coefficients: \n', regr.coef_
# + slideshow={"slide_type": "fragment"}
# The mean square error
"Residual sum of squares: %.2f" % np.mean((regr.predict(data_X_test) - data_y_test) ** 2)
# + slideshow={"slide_type": "subslide"}
df.click_log = [[np.log(df.click[i]+1)] for i in range(len(df))]
df.reply_log = [[np.log(df.reply[i]+1)] for i in range(len(df))]
# + slideshow={"slide_type": "subslide"}
from sklearn.cross_validation import train_test_split
Xs_train, Xs_test, y_train, y_test = train_test_split(df.click_log, df.reply_log,test_size=0.2, random_state=0)
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(Xs_train, y_train)
# Explained variance score: 1 is perfect prediction
'Variance score: %.2f' % regr.score(Xs_test, y_test)
# + slideshow={"slide_type": "subslide"}
# Plot outputs
plt.scatter(Xs_test, y_test, color='black')
plt.plot(Xs_test, regr.predict(Xs_test), color='blue', linewidth=3)
plt.show()
# + slideshow={"slide_type": "subslide"}
from sklearn.cross_validation import cross_val_score
regr = linear_model.LinearRegression()
scores = cross_val_score(regr, df.click_log, \
df.reply_log, cv = 3)
scores.mean()
# + slideshow={"slide_type": "subslide"}
regr = linear_model.LinearRegression()
scores = cross_val_score(regr, df.click_log,
df.reply_log, cv =5)
scores.mean()
# + [markdown] slideshow={"slide_type": "slide"}
# > # 使用sklearn做logistic回归
# ***
#
# 王成军
#
# <EMAIL>
#
# 计算传播网 http://computational-communication.com
# + [markdown] slideshow={"slide_type": "subslide"}
# - logistic回归是一个分类算法而不是一个回归算法。
# - 可根据已知的一系列因变量估计离散数值(比方说二进制数值 0 或 1 ,是或否,真或假)。
# - 简单来说,它通过将数据拟合进一个逻辑函数(logistic function)来预估一个事件出现的概率。
# - 因此,它也被叫做逻辑回归。因为它预估的是概率,所以它的输出值大小在 0 和 1 之间(正如所预计的一样)。
# + [markdown] slideshow={"slide_type": "subslide"}
# $$odds= \frac{p}{1-p} = \frac{probability\: of\: event\: occurrence} {probability \:of \:not\: event\: occurrence}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# $$ln(odds)= ln(\frac{p}{1-p})$$
# + [markdown] slideshow={"slide_type": "fragment"}
# $$logit(x) = ln(\frac{p}{1-p}) = b_0+b_1X_1+b_2X_2+b_3X_3....+b_kX_k$$
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + slideshow={"slide_type": "subslide"}
repost = []
for i in df.title:
if u'转载' in i:
repost.append(1)
else:
repost.append(0)
# + slideshow={"slide_type": "fragment"}
data_X = [[df.click[i], df.reply[i]] for i in range(len(df))]
data_X[:3]
# + slideshow={"slide_type": "subslide"}
from sklearn.linear_model import LogisticRegression
df['repost'] = repost
model = LogisticRegression()
model.fit(data_X,df.repost)
model.score(data_X,df.repost)
# + slideshow={"slide_type": "subslide"}
def randomSplitLogistic(dataX, dataY, num):
dataX_train = []
dataX_test = []
dataY_train = []
dataY_test = []
import random
test_index = random.sample(range(len(df)), num)
for k in range(len(dataX)):
if k in test_index:
dataX_test.append(dataX[k])
dataY_test.append(dataY[k])
else:
dataX_train.append(dataX[k])
dataY_train.append(dataY[k])
return dataX_train, dataX_test, dataY_train, dataY_test,
# + slideshow={"slide_type": "subslide"}
# Split the data into training/testing sets
data_X_train, data_X_test, data_y_train, data_y_test = randomSplitLogistic(data_X, df.repost, 20)
# Create logistic regression object
log_regr = LogisticRegression()
# Train the model using the training sets
log_regr.fit(data_X_train, data_y_train)
# Explained variance score: 1 is perfect prediction
'Variance score: %.2f' % log_regr.score(data_X_test, data_y_test)
# + slideshow={"slide_type": "subslide"}
y_true, y_pred = data_y_test, log_regr.predict(data_X_test)
# + slideshow={"slide_type": "fragment"}
y_true, y_pred
# + slideshow={"slide_type": "subslide"}
print(classification_report(y_true, y_pred))
# + slideshow={"slide_type": "subslide"}
from sklearn.cross_validation import train_test_split
Xs_train, Xs_test, y_train, y_test = train_test_split(data_X, df.repost, test_size=0.2, random_state=42)
# + slideshow={"slide_type": "subslide"}
# Create logistic regression object
log_regr = LogisticRegression()
# Train the model using the training sets
log_regr.fit(Xs_train, y_train)
# Explained variance score: 1 is perfect prediction
'Variance score: %.2f' % log_regr.score(Xs_test, y_test)
# + slideshow={"slide_type": "subslide"}
print('Logistic score for test set: %f' % log_regr.score(Xs_test, y_test))
print('Logistic score for training set: %f' % log_regr.score(Xs_train, y_train))
y_true, y_pred = y_test, log_regr.predict(Xs_test)
print(classification_report(y_true, y_pred))
# + slideshow={"slide_type": "subslide"}
logre = LogisticRegression()
scores = cross_val_score(logre, data_X, df.repost, cv = 3)
scores.mean()
# + slideshow={"slide_type": "subslide"}
logre = LogisticRegression()
data_X_scale = scale(data_X)
# The importance of preprocessing in data science and the machine learning pipeline I:
scores = cross_val_score(logre, data_X_scale, df.repost, cv = 3)
scores.mean()
# + [markdown] slideshow={"slide_type": "slide"}
# > # 使用sklearn实现贝叶斯预测
# ***
#
# 王成军
#
# <EMAIL>
#
# 计算传播网 http://computational-communication.com
# + [markdown] slideshow={"slide_type": "subslide"}
# # Naive Bayes algorithm
#
# It is a classification technique based on Bayes’ Theorem with an assumption of independence among predictors.
#
# In simple terms, a Naive Bayes classifier assumes that the presence of a particular feature in a class is unrelated to the presence of any other feature.
#
# why it is known as ‘Naive’? For example, a fruit may be considered to be an apple if it is red, round, and about 3 inches in diameter. Even if these features depend on each other or upon the existence of the other features, all of these properties independently contribute to the probability that this fruit is an apple.
# + [markdown] slideshow={"slide_type": "subslide"}
# 贝叶斯定理为使用$p(c)$, $p(x)$, $p(x|c)$ 计算后验概率$P(c|x)$提供了方法:
# + [markdown] slideshow={"slide_type": "fragment"}
# $$
# p(c|x) = \frac{p(x|c) p(c)}{p(x)}
# $$
# + [markdown] slideshow={"slide_type": "fragment"}
# - P(c|x) is the posterior probability of class (c, target) given predictor (x, attributes).
# - P(c) is the prior probability of class.
# - P(x|c) is the likelihood which is the probability of predictor given class.
# - P(x) is the prior probability of predictor.
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# Step 1: Convert the data set into a frequency table
#
# Step 2: Create Likelihood table by finding the probabilities like:
# - p(Overcast) = 0.29, p(rainy) = 0.36, p(sunny) = 0.36
# - p(playing) = 0.64, p(rest) = 0.36
#
# Step 3: Now, use Naive Bayesian equation to calculate the posterior probability for each class. The class with the highest posterior probability is the outcome of prediction.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Problem: Players will play if weather is sunny. Is this statement is correct?
#
# We can solve it using above discussed method of posterior probability.
#
# $P(Yes | Sunny) = \frac{P( Sunny | Yes) * P(Yes) } {P (Sunny)}$
#
# Here we have P (Sunny |Yes) = 3/9 = 0.33, P(Sunny) = 5/14 = 0.36, P( Yes)= 9/14 = 0.64
#
# Now, $P (Yes | Sunny) = \frac{0.33 * 0.64}{0.36} = 0.60$, which has higher probability.
# + slideshow={"slide_type": "subslide"}
from sklearn import naive_bayes
' '.join(dir(naive_bayes))
# + [markdown] slideshow={"slide_type": "fragment"}
# - naive_bayes.GaussianNB Gaussian Naive Bayes (GaussianNB)
# - naive_bayes.MultinomialNB([alpha, ...]) Naive Bayes classifier for multinomial models
# - naive_bayes.BernoulliNB([alpha, binarize, ...]) Naive Bayes classifier for multivariate Bernoulli models.
# + slideshow={"slide_type": "subslide"}
#Import Library of Gaussian Naive Bayes model
from sklearn.naive_bayes import GaussianNB
import numpy as np
#assigning predictor and target variables
x= np.array([[-3,7],[1,5], [1,2], [-2,0], [2,3], [-4,0], [-1,1], [1,1], [-2,2], [2,7], [-4,1], [-2,7]])
Y = np.array([3, 3, 3, 3, 4, 3, 3, 4, 3, 4, 4, 4])
# + slideshow={"slide_type": "subslide"}
#Create a Gaussian Classifier
model = GaussianNB()
# Train the model using the training sets
model.fit(x[:8], Y[:8])
#Predict Output
predicted= model.predict([[1,2],[3,4]])
predicted
# + [markdown] slideshow={"slide_type": "subslide"}
# # cross-validation
#
# k-fold CV, the training set is split into k smaller sets (other approaches are described below, but generally follow the same principles). The following procedure is followed for each of the k “folds”:
# - A model is trained using k-1 of the folds as training data;
# - the resulting model is validated on the remaining part of the data (i.e., it is used as a test set to compute a performance measure such as accuracy).
# + slideshow={"slide_type": "subslide"}
data_X_train, data_X_test, data_y_train, data_y_test = randomSplit(df.click, df.reply, 20)
# Train the model using the training sets
model.fit(data_X_train, data_y_train)
#Predict Output
predicted= model.predict(data_X_test)
predicted
# + slideshow={"slide_type": "subslide"}
model.score(data_X_test, data_y_test)
# + slideshow={"slide_type": "subslide"}
from sklearn.cross_validation import cross_val_score
model = GaussianNB()
scores = cross_val_score(model, [[c] for c in df.click],\
df.reply, cv = 7)
scores.mean()
# + [markdown] slideshow={"slide_type": "slide"}
# > # 使用sklearn实现决策树
# ***
#
# 王成军
#
# <EMAIL>
#
# 计算传播网 http://computational-communication.com
# + [markdown] slideshow={"slide_type": "subslide"}
# # 决策树
# - 这个监督式学习算法通常被用于分类问题。
# - 它同时适用于分类变量和连续因变量。
# - 在这个算法中,我们将总体分成两个或更多的同类群。
# - 这是根据最重要的属性或者自变量来分成尽可能不同的组别。
#
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# ## 在上图中你可以看到,根据多种属性,人群被分成了不同的四个小组,来判断 “他们会不会去玩”。
# ### 为了把总体分成不同组别,需要用到许多技术,比如说 Gini、Information Gain、Chi-square、entropy。
# + slideshow={"slide_type": "subslide"}
from sklearn import tree
model = tree.DecisionTreeClassifier(criterion='gini')
# + slideshow={"slide_type": "fragment"}
data_X_train, data_X_test, data_y_train, data_y_test = randomSplitLogistic(data_X, df.repost, 20)
model.fit(data_X_train,data_y_train)
model.score(data_X_train,data_y_train)
# + slideshow={"slide_type": "subslide"}
# Predict
model.predict(data_X_test)
# + slideshow={"slide_type": "fragment"}
# crossvalidation
scores = cross_val_score(model, data_X, df.repost, cv = 3)
scores.mean()
# + [markdown] slideshow={"slide_type": "slide"}
# > # 使用sklearn实现SVM支持向量机
# ***
#
# 王成军
#
# <EMAIL>
#
# 计算传播网 http://computational-communication.com
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# - 将每个数据在N维空间中用点标出(N是你所有的特征总数),每个特征的值是一个坐标的值。
# - 举个例子,如果我们只有身高和头发长度两个特征,我们会在二维空间中标出这两个变量,每个点有两个坐标(这些坐标叫做支持向量)。
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# - 现在,我们会找到将两组不同数据分开的一条直线。
# - 两个分组中距离最近的两个点到这条线的距离同时最优化。
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# ## 上面示例中的黑线将数据分类优化成两个小组
# - 两组中距离最近的点(图中A、B点)到达黑线的距离满足最优条件。
# - 这条直线就是我们的分割线。接下来,测试数据落到直线的哪一边,我们就将它分到哪一类去。
# -
from sklearn import svm
# Create SVM classification object
model=svm.SVC()
' '.join(dir(svm))
# + slideshow={"slide_type": "subslide"}
data_X_train, data_X_test, data_y_train, data_y_test = randomSplitLogistic(data_X, df.repost, 20)
model.fit(data_X_train,data_y_train)
model.score(data_X_train,data_y_train)
# + slideshow={"slide_type": "fragment"}
# Predict
model.predict(data_X_test)
# + slideshow={"slide_type": "subslide"}
# crossvalidation
scores = []
cvs = [3, 5, 10, 25, 50, 75, 100]
for i in cvs:
score = cross_val_score(model, data_X, df.repost,
cv = i)
scores.append(score.mean() ) # Try to tune cv
# + slideshow={"slide_type": "subslide"}
plt.plot(cvs, scores, 'b-o')
plt.xlabel('$cv$', fontsize = 20)
plt.ylabel('$Score$', fontsize = 20)
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
#
#
# > # 泰坦尼克号数据分析
#
# 王成军
#
# <EMAIL>
#
# 计算传播网 http://computational-communication.com
# + slideshow={"slide_type": "slide"}
#Import the Numpy library
import numpy as np
#Import 'tree' from scikit-learn library
from sklearn import tree
# + slideshow={"slide_type": "slide"}
import pandas as pd
train = pd.read_csv('../data/tatanic_train.csv', sep = ",")
# -
from sklearn.naive_bayes import GaussianNB
train["Age"] = train["Age"].fillna(train["Age"].median())
train["Fare"] = train["Fare"].fillna(train["Fare"].median())
# x = [[i] for i in train['Age']]
y = train['Age']
y = train['Fare'].astype(int)
#y = [[i] for i in y]
# +
#Create a Gaussian Classifier
model = GaussianNB()
# Train the model using the training sets
nb = model.fit(x[:80], y[:80])
# nb.score(x, y)
# -
help(GaussianNB)
model.fit(x)
# + slideshow={"slide_type": "subslide"}
train.head()
# + slideshow={"slide_type": "slide"}
train["Age"] = train["Age"].fillna(train["Age"].median())
#Convert the male and female groups to integer form
train["Sex"][train["Sex"] == "male"] = 0
train["Sex"][train["Sex"] == "female"] = 1
#Impute the Embarked variable
train["Embarked"] = train["Embarked"].fillna('S')
#Convert the Embarked classes to integer form
train["Embarked"][train["Embarked"] == "S"] = 0
train["Embarked"][train["Embarked"] == "C"] = 1
train["Embarked"][train["Embarked"] == "Q"] = 2
# + slideshow={"slide_type": "slide"}
#Create the target and features numpy arrays: target, features_one
target = train['Survived'].values
features_one = train[["Pclass", "Sex", "Age", "Fare"]].values
#Fit your first decision tree: my_tree_one
my_tree_one = tree.DecisionTreeClassifier()
my_tree_one = my_tree_one.fit(features_one, target)
#Look at the importance of the included features and print the score
print(my_tree_one.feature_importances_)
print(my_tree_one.score(features_one, target))
# + slideshow={"slide_type": "slide"}
test = pd.read_csv('../data/tatanic_test.csv', sep = ",")
# Impute the missing value with the median
test.Fare[152] = test.Fare.median()
test["Age"] = test["Age"].fillna(test["Age"].median())
#Convert the male and female groups to integer form
test["Sex"][test["Sex"] == "male"] = 0
test["Sex"][test["Sex"] == "female"] = 1
#Impute the Embarked variable
test["Embarked"] = test["Embarked"].fillna('S')
#Convert the Embarked classes to integer form
test["Embarked"][test["Embarked"] == "S"] = 0
test["Embarked"][test["Embarked"] == "C"] = 1
test["Embarked"][test["Embarked"] == "Q"] = 2
# Extract the features from the test set: Pclass, Sex, Age, and Fare.
test_features = test[["Pclass","Sex", "Age", "Fare"]].values
# Make your prediction using the test set
my_prediction = my_tree_one.predict(test_features)
# Create a data frame with two columns: PassengerId & Survived. Survived contains your predictions
PassengerId =np.array(test['PassengerId']).astype(int)
my_solution = pd.DataFrame(my_prediction, PassengerId, columns = ["Survived"])
# + slideshow={"slide_type": "subslide"}
my_solution[:3]
# + slideshow={"slide_type": "subslide"}
# Check that your data frame has 418 entries
my_solution.shape
# + slideshow={"slide_type": "subslide"}
# Write your solution to a csv file with the name my_solution.csv
my_solution.to_csv("../data/tatanic_solution_one.csv", index_label = ["PassengerId"])
# + slideshow={"slide_type": "slide"}
# Create a new array with the added features: features_two
features_two = train[["Pclass","Age","Sex","Fare",\
"SibSp", "Parch", "Embarked"]].values
#Control overfitting by setting "max_depth" to 10 and "min_samples_split" to 5 : my_tree_two
max_depth = 10
min_samples_split = 5
my_tree_two = tree.DecisionTreeClassifier(max_depth = max_depth,
min_samples_split = min_samples_split,
random_state = 1)
my_tree_two = my_tree_two.fit(features_two, target)
#Print the score of the new decison tree
print(my_tree_two.score(features_two, target))
# + slideshow={"slide_type": "slide"}
# create a new train set with the new variable
train_two = train
train_two['family_size'] = train.SibSp + train.Parch + 1
# Create a new decision tree my_tree_three
features_three = train[["Pclass", "Sex", "Age", \
"Fare", "SibSp", "Parch", "family_size"]].values
my_tree_three = tree.DecisionTreeClassifier()
my_tree_three = my_tree_three.fit(features_three, target)
# Print the score of this decision tree
print(my_tree_three.score(features_three, target))
# + slideshow={"slide_type": "slide"}
#Import the `RandomForestClassifier`
from sklearn.ensemble import RandomForestClassifier
#We want the Pclass, Age, Sex, Fare,SibSp, Parch, and Embarked variables
features_forest = train[["Pclass", "Age", "Sex", "Fare", "SibSp", "Parch", "Embarked"]].values
#Building the Forest: my_forest
n_estimators = 100
forest = RandomForestClassifier(max_depth = 10, min_samples_split=2,
n_estimators = n_estimators, random_state = 1)
my_forest = forest.fit(features_forest, target)
#Print the score of the random forest
print(my_forest.score(features_forest, target))
#Compute predictions and print the length of the prediction vector:test_features, pred_forest
test_features = test[["Pclass", "Age", "Sex", "Fare", "SibSp", "Parch", "Embarked"]].values
pred_forest = my_forest.predict(test_features)
print(len(test_features))
print(pred_forest[:3])
# + slideshow={"slide_type": "slide"}
#Request and print the `.feature_importances_` attribute
print(my_tree_two.feature_importances_)
print(my_forest.feature_importances_)
#Compute and print the mean accuracy score for both models
print(my_tree_two.score(features_two, target))
print(my_forest.score(features_two, target))
# + [markdown] slideshow={"slide_type": "slide"}
# # 阅读材料
# 机器学习算法的要点(附 Python 和 R 代码)http://blog.csdn.net/a6225301/article/details/50479672
#
# The "Python Machine Learning" book code repository and info resource https://github.com/rasbt/python-machine-learning-book
#
# An Introduction to Statistical Learning (<NAME>, Hastie, Tibshirani, 2013) : Python code https://github.com/JWarmenhoven/ISLR-python
#
# BuildingMachineLearningSystemsWithPython https://github.com/luispedro/BuildingMachineLearningSystemsWithPython
# + [markdown] slideshow={"slide_type": "slide"}
# # 作业
# https://www.datacamp.com/community/tutorials/the-importance-of-preprocessing-in-data-science-and-the-machine-learning-pipeline-i-centering-scaling-and-k-nearest-neighbours
| code/09.machine_learning_with_sklearn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py37]
# language: python
# name: conda-env-py37-py
# ---
# + [markdown] colab_type="text" id="cdO_RxQZLahB"
# # Demo for paper "First Order Motion Model for Image Animation"
# + [markdown] colab_type="text" id="GCDNKsEGLtR6"
# **Clone repository**
# + colab={"base_uri": "https://localhost:8080/", "height": 86} colab_type="code" id="UCMFMJV7K-ag" outputId="d4187a1d-60b7-46d5-cf05-b5b555d11138"
# # !git clone https://github.com/AliaksandrSiarohin/first-order-model
# + [markdown] colab_type="text" id="VsgVK1EURXkd"
# **sample data: https://drive.google.com/drive/folders/1kZ1gCnpfU0BnpdU47pLM_TQ6RypDDqgw?usp=sharing**
# + [markdown] colab_type="text" id="rW-ipQXPOWUo"
# **Load driving video and source image**
# + colab={"base_uri": "https://localhost:8080/", "height": 453} colab_type="code" id="Oxi6-riLOgnm" outputId="df5ef72c-6133-4607-8684-045613fd81f2"
import imageio
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from skimage.transform import resize
from IPython.display import HTML
import warnings
warnings.filterwarnings("ignore")
source_image = imageio.imread('./data/02.png')
driving_video = imageio.mimread('./data/04.mp4')
#Resize image and video to 256x256
source_image = resize(source_image, (256, 256))[..., :3]
driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video]
def display(source, driving, generated=None):
fig = plt.figure(figsize=(8 + 4 * (generated is not None), 6))
ims = []
for i in range(len(driving)):
cols = [source]
cols.append(driving[i])
if generated is not None:
cols.append(generated[i])
im = plt.imshow(np.concatenate(cols, axis=1), animated=True)
plt.axis('off')
ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=50, repeat_delay=1000)
plt.close()
return ani
# HTML(display(source_image, driving_video).to_html5_video())
# pip install imageio-ffmpeg
# pip install scikit-imag
# conda install -c conda-forge ffmpeg
# + [markdown] colab_type="text" id="xjM7ubVfWrwT"
# **Create a model and load checkpoints**
# + colab={} colab_type="code" id="3FQiXqQPWt5B"
from model.demo import load_checkpoints
generator, kp_detector = load_checkpoints(config_path='./model/config/vox-256.yaml',
checkpoint_path='vox-cpk.pth.tar')
# + [markdown] colab_type="text" id="fdFdasHEj3t7"
# **Perform image animation**
# + colab={"base_uri": "https://localhost:8080/", "height": 471} colab_type="code" id="SB12II11kF4c" outputId="c7f18b37-0d41-4761-e354-5b0c36cae30d"
from model.demo import make_animation
from skimage import img_as_ubyte
predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True)
#save resulting video
imageio.mimsave('./output/generated_RelativeKeypointDisplacement.mp4', [img_as_ubyte(frame) for frame in predictions])
#video can be downloaded from /content folder
# HTML(display(source_image, driving_video, predictions).to_html5_video())
# + [markdown] colab_type="text" id="-tJN01xQCpqH"
# **In the cell above we use relative keypoint displacement to animate the objects. We can use absolute coordinates instead, but in this way all the object proporions will be inherited from the driving video. For example Putin haircut will be extended to match Trump haircut.**
# + colab={"base_uri": "https://localhost:8080/", "height": 471} colab_type="code" id="aOE_W_kfC9aX" outputId="de247531-c930-45a0-df41-e19a9373df2c"
predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=False, adapt_movement_scale=True)
imageio.mimsave('./output/generated_AbsoluteKeypointDisplacement.mp4', [img_as_ubyte(frame) for frame in predictions])
# HTML(display(source_image, driving_video, predictions).to_html5_video())
# + [markdown] colab_type="text" id="QnXrecuX6_Kw"
# ## Running on your data
#
# **First we need to crop a face from both source image and video, while simple graphic editor like paint can be used for cropping from image. Cropping from video is more complicated. You can use ffpmeg for this.**
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="brJlA_5o72Xc" outputId="75c10f77-409d-4793-b0cc-263bb20e6f76"
# # !ffmpeg -i /content/gdrive/My\ Drive/first-order-motion-model/07.mkv -ss 00:08:57.50 -t 00:00:08 -filter:v "crop=600:600:760:50" -async 1 hinton.mp4
# + [markdown] colab_type="text" id="NSHSxV8iGybI"
# **Another posibility is to use some screen recording tool, or if you need to crop many images at ones use face detector(https://github.com/1adrianb/face-alignment) , see https://github.com/AliaksandrSiarohin/video-preprocessing for preprcessing of VoxCeleb.**
# + colab={"base_uri": "https://localhost:8080/", "height": 471} colab_type="code" id="d8kQ3U7MHqh-" outputId="e1369c7c-8b23-4f9a-b6bc-9edd73c1f174"
# source_image = imageio.imread('./data/02.png')
# driving_video = imageio.mimread('04.mp4', memtest=False)
source_image = imageio.imread('./data/02.png')
driving_video = imageio.mimread('./data/DZ1BJU.gif')
# Driving video can accept both .gif and .mp4 input
#Resize image and video to 256x256
source_image = resize(source_image, (256, 256))[..., :3]
driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video]
predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True,
adapt_movement_scale=True)
# HTML(display(source_image, driving_video, predictions).to_html5_video())
#save resulting video
imageio.mimsave('./output/generated_RelativeKeypointDisplacement.mp4', [img_as_ubyte(frame) for frame in predictions])
# +
# faces
# source_image = imageio.imread('./data/02.png')
# driving_video = imageio.mimread('./data/04.mp4')
# supposedly any image/video pairs should work as long as user cropped them already
# +
# # !python firstordermotion.py
| vision/first_order_motion/first_order_model_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # **Bayesian and Gaussian Process regression**
#
# Notebook version: 1.0 (Oct 16, 2015)
#
# Authors: <NAME>
# <NAME> (<EMAIL>)
#
# Changes: v.1.0 - First version. Python version
#
# Pending changes:
# +
# Import some libraries that will be necessary for working with data and displaying plots
# To visualize plots in the notebook
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.io # To read matlab files
from scipy import spatial
import pylab
pylab.rcParams['figure.figsize'] = 8, 5
# -
# # 1. Introduction
#
# In this exercise the student will review several key concepts of Bayesian regression and Gaussian processes.
#
# For the purpose of this exercise, the regression model is
#
# $${s}({\bf x}) = f({\bf x}) + \varepsilon$$
#
# where ${s}({\bf x})$ is the output corresponding to input ${\bf x}$, $f({\bf x})$ is the unobservable latent function, and $\varepsilon$ is white zero-mean Gaussian noise, i.e., $\varepsilon \sim {\cal N}(0,\sigma_\varepsilon^2)$.
#
# ### Practical considerations
#
# - Though sometimes unavoidable, it is recommended not to use explicit matrix inversion whenever possible. For instance, if an operation like ${\mathbf A}^{-1} {\mathbf b}$ must be performed, it is preferable to code it using python $\mbox{numpy.linalg.lstsq}$ function (see http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.lstsq.html), which provides the LS solution to the overdetermined system ${\mathbf A} {\mathbf w} = {\mathbf b}$.
#
# - Sometimes, the computation of $\log|{\mathbf A}|$ (where ${\mathbf A}$ is a positive definite matrix) can overflow available precision, producing incorrect results. A numerically more stable alternative, providing the same result is $2\sum_i \log([{\mathbf L}]_{ii})$, where $\mathbf L$ is the Cholesky decomposition of $\mathbf A$ (i.e., ${\mathbf A} = {\mathbf L}^\top {\mathbf L}$), and $[{\mathbf L}]_{ii}$ is the $i$th element of the diagonal of ${\mathbf L}$.
#
# - Non-degenerate covariance matrices, such as the ones in this exercise, are always positive definite. It may happen, as a consequence of chained rounding errors, that a matrix which was mathematically expected to be positive definite, turns out not to be so. This implies its Cholesky decomposition will not be available. A quick way to palliate this problem is by adding a small number (such as $10^{-6}$) to the diagonal of such matrix.
# ### Reproducibility of computations
#
# To guarantee the exact reproducibility of the experiments, it may be useful to start your code initializing the seed of the random numbers generator, so that you can compare your results with the ones given in this notebook.
np.random.seed(3)
# # 2. Bayesian regression with a linear model
#
# During this section, we will assume the following parametric model for the latent function
#
# $$f({\bf x}) = {\bf x}^\top {\bf w}$$
#
# i.e., a linear model in the observations, where ${\bf w}$ contains the parameters of the model. The <i>a priori</i> distribution of ${\bf w}$ is assumed to be
#
# $${\bf w} \sim {\cal N}({\bf 0}, \sigma_0^2~{\bf I})$$
# ## 2.1. Synthetic data generation
#
# First, we are going to generate synthetic data (so that we have the ground-truth model) and use them to make sure everything works correctly and our estimations are sensible.
#
# Set parameters $\sigma_0^2 = 2$ and $\sigma_{\varepsilon}^2 = 0.2$. Generate a weight vector $\mbox{true_w}$ with two elements from the <i>a priori</i> distribution of the weights. This vector determines the regression line that we want to find (i.e., the optimum unknown solution).
#
# Generate an input matrix $\mbox{X}$ containing the constant term 1 in all elements of the first column and values between 0 and 2 (included), with a 0.1 step, in the second column.
#
# Finally, generate the output vector ${\mbox s}$ as the product $\mbox{X} \ast \mbox{true_w}$ plus Gaussian noise of pdf ${\cal N}(0,\sigma_\varepsilon^2)$ at each element.
#
# Plot the generated data. You will notice a linear behavior, but the presence of noise makes it hard to estimate precisely the original straight line that generated them (which is stored in $\mbox{true_w}$).
# +
# Parameter settings
sigma_0 = np.sqrt(2)
sigma_eps = np.sqrt(0.2)
dim_x = 2
x_min = 0
x_max = 2
n_points = 21
#Optimum solution
true_w = sigma_0 * np.random.randn(dim_x)
#Training datapoints
X = np.linspace(x_min,x_max,n_points)
col_1 = np.ones( (n_points,) )
Xe = np.vstack( (col_1,X) ).T
s = Xe.dot(true_w) + sigma_eps * np.random.randn(n_points)
#Plot training points
plt.scatter(X, s);
plt.xlabel('$x$',fontsize=14);
plt.ylabel('$s$',fontsize=14);
# -
# ## 2.2. Posterior pdf of the weight vector
#
# Let us see to which extent it is possible to determine the original straight line from observed data. Knowing that the generative model is linear (i.e., $f({\bf x}) = {\bf x}^\top{\bf w}$, and knowing also the prior pdf of weights $p({\bf w}) = {\cal N}({\bf 0},\sigma_0^2~{\bf I})$ and noise $p(\varepsilon) = {\cal N}(0,\sigma_\varepsilon^2)$, compute the posterior pdf of the weights, $p({\bf w}\mid{\bf s})$.
Cov_w = np.linalg.inv( Xe.T.dot(Xe)/(sigma_eps**2) + np.eye(dim_x,dim_x)/(sigma_0**2) )
mean_w = Cov_w.dot(Xe.T).dot(s)/(sigma_eps**2)
# The results is:
print 'true_w = ' + str(true_w)
print 'mean_w = ' + str(mean_w)
print 'Cov_w = ' + str(Cov_w)
# ## 2.3. Sampling regression curves from the posterior
#
# Plot now the functions corresponding to different samples drawn from the posterior distribution of the weight vector. To this end, generate random vectors ${\bf w}_l$ with $l = 1,\dots, 50$, from the posterior density of the weights, $p({\bf w}\mid{\bf s})$, and use them to generate 50 straight lines, $f({\bf x}^\ast) = {{\bf x}^\ast}^\top {\bf w}_l$, with the second component of ${\bf x}^\ast$ between $-1$ and $3$, with step $0.1$.
#
# Plot the original ground-truth straight line, corresponding to $\mbox{true_w}$, along with the $50$ generated straight lines and the original samples, all in the same plot. As you can check, the Bayesian model is not providing a single answer, but instead a density over them, from which we have extracted 50 options.
# +
# Definition of the interval for representation purposes
x2_min = -1
x2_max = 3
step = 0.1
# Input values for representation of the regression curves
X2 = np.arange(x2_min,x2_max+step,step=step)
col_1 = np.ones( (len(X2),) )
X2e = np.vstack( (col_1, X2) ).T
# For drawing weights from the posterior
L = np.linalg.cholesky(Cov_w)
for l in range(50):
w_l = L.dot(np.random.randn(dim_x)) + mean_w
plt.plot(X2, X2e.dot(w_l), 'c:');
# Plot as well the training points and the true model
plt.plot(X2, X2e.dot(true_w), 'b', label='True model', linewidth=2);
plt.plot(X,s,'r.',markersize=12);
plt.xlim((x2_min,x2_max));
plt.legend(loc='best')
plt.xlabel('$x$',fontsize=14);
plt.ylabel('$s$',fontsize=14);
# -
# ## 2.4. Plotting the confidence intervals
#
# On top of the previous figure (copy here your code from the previous section), plot functions
#
# $${\mathbb E}\left\{f({\bf x}^\ast)\mid{\bf s}\right\}$$
#
# and
#
# $${\mathbb E}\left\{f({\bf x}^\ast)\mid{\bf s}\right\} \pm 2 \sqrt{{\mathbb V}\left\{f({\bf x}^\ast)\mid{\bf s}\right\}}$$
#
# (i.e., the posterior mean of $f({\bf x}^\ast)$, as well as two standard deviations above and below).
#
# It is possible to show analytically that this region comprises $95.45\%$ probability of the posterior probability $p(f({\bf x}^\ast)\mid {\bf s})$ at each ${\bf x}^\ast$.
# +
# Definition of the interval for representation purposes
x2_min = -1
x2_max = 3
step = 0.1
# Input values for representation of the regression curves
X2 = np.arange(x2_min,x2_max+step,step=step)
col_1 = np.ones( (len(X2),) )
X2e = np.vstack( (col_1, X2) ).T
# For drawing weights from the posterior
L = np.linalg.cholesky(Cov_w)
for l in range(50):
w_l = L.dot(np.random.randn(dim_x)) + mean_w
plt.plot(X2,X2e.dot(w_l),'c:');
# Plot as well the training points and the true model
plt.plot(X2,X2e.dot(true_w),'b',label='True model',linewidth=2);
plt.plot(X,s,'r.',markersize=10);
plt.xlim((x2_min,x2_max));
#Plot also the posterior mean and posterior mean \pm 2 std
mean_ast = X2e.dot(mean_w)
plt.plot(X2,mean_ast,'m',label='Predictive mean',linewidth=2);
std_ast = np.sqrt(np.diagonal(X2e.dot(Cov_w).dot(X2e.T)))
plt.plot(X2,mean_ast+2*std_ast,'m--',label='Predictive mean $\pm$ 2std',linewidth=2);
plt.plot(X2,mean_ast-2*std_ast,'m--',linewidth=3);
plt.legend(loc='best');
plt.xlabel('$x$',fontsize=14);
plt.ylabel('$s$',fontsize=14);
# -
# Plot now ${\mathbb E}\left\{s({\bf x}^\ast)\mid{\bf s}\right\} \pm 2 \sqrt{{\mathbb V}\left\{s({\bf x}^\ast)\mid{\bf s}\right\}}$ (note that the posterior means of $f({\bf x}^\ast)$ and $s({\bf x}^\ast)$ are the same, so there is no need to plot it again). Notice that $95.45\%$ of observed data lie now within the newly designated region. These new limits establish a confidence range for our predictions. See how the uncertainty grows as we move away from the interpolation region to the extrapolation areas.
# +
# Definition of the interval for representation purposes
x2_min = -1
x2_max = 3
step = 0.1
# Input values for representation of the regression curves
X2 = np.arange(x2_min,x2_max+step,step=step)
col_1 = np.ones( (len(X2),) )
X2e = np.vstack( (col_1,X2) ).T
# For drawing weights from the posterior
L = np.linalg.cholesky(Cov_w)
for l in range(50):
w_l = L.dot(np.random.randn(dim_x)) + mean_w
plt.plot(X2,X2e.dot(w_l),'c:');
# Plot as well the training points and the true model
plt.plot(X2,X2e.dot(true_w), 'b', label='True model', linewidth=2);
plt.plot(X,s,'r.', markersize=10);
plt.xlim((x2_min, x2_max));
#Plot also the posterior mean and posterior mean \pm 2 std
mean_ast = X2e.dot(mean_w)
plt.plot(X2,mean_ast, 'm', label='Predictive mean of f', linewidth=2);
std_ast = np.sqrt(np.diagonal(X2e.dot(Cov_w).dot(X2e.T)))
plt.plot(X2,mean_ast+2*std_ast, 'm--',label='Predictive mean of f $\pm$ 2std',linewidth=2);
plt.plot(X2,mean_ast-2*std_ast, 'm--',linewidth=2);
std_ast_eps = np.sqrt(np.diagonal(X2e.dot(Cov_w).dot(X2e.T))+ sigma_eps**2)
#Plot now the posterior mean and posterior mean \pm 2 std for s (i.e., adding the noise variance)
plt.plot(X2, mean_ast+2*std_ast_eps, 'm:', label='Predictive mean of s $\pm$ 2std',linewidth=2);
plt.plot(X2, mean_ast-2*std_ast_eps, 'm:', linewidth=2);
plt.legend(loc='best');
plt.xlabel('$x$',fontsize=14);
plt.ylabel('$s$',fontsize=14);
# -
# # 3. Bayesian Inference with real data. The stocks dataset.
#
#
# Once our code has been tested on synthetic data, we will use it with real data. Load and properly normalize data corresponding to the evolution of the stocks of 10 airline companies. This data set is an adaptation of the Stock dataset from http://www.dcc.fc.up.pt/~ltorgo/Regression/DataSets.html, which in turn was taken from the StatLib Repository, http://lib.stat.cmu.edu/
# +
matvar = scipy.io.loadmat('DatosLabReg.mat')
Xtrain = matvar['Xtrain']
Xtest = matvar['Xtest']
Ytrain = matvar['Ytrain']
Ytest = matvar['Ytest']
# Data normalization
mean_x = np.mean(Xtrain,axis=0)
std_x = np.std(Xtrain,axis=0)
Xtrain = (Xtrain - mean_x) / std_x
Xtest = (Xtest - mean_x) / std_x
# Extend input data matrices with a column of 1's
col_1 = np.ones( (Xtrain.shape[0],1) )
Xtrain_e = np.concatenate( (col_1,Xtrain), axis = 1 )
col_1 = np.ones( (Xtest.shape[0],1) )
Xtest_e = np.concatenate( (col_1,Xtest), axis = 1 )
# -
# After running this code, you will have inside matrix $\mbox{Xtrain_e}$ an initial column of ones and the evolution of (normalized) price for 9 airlines, whereas vector Ytrain will contain a single column with the price evolution of the tenth airline. The objective of the regression task is to estimate the price of the tenth airline from the prices of the other nine.
# ## 3.1. Hyperparameter selection
#
# Since the values $\sigma_0$ and $\sigma_\varepsilon$ are no longer known, a first rough estimation is needed (we will soon see how to estimate these values in a principled way).
#
# To this end, we will adjust them using the LS solution to the regression problem:
#
# - $\sigma_0^2$ will be taken as the average of the square values of ${\hat {\bf w}}_{LS}$
# - $\sigma_\varepsilon^2$ will be taken as two times the average of the square of the residuals when using ${\hat {\bf w}}_{LS}$
w_LS, residuals, rank, s = np.linalg.lstsq(Xtrain_e,Ytrain)
sigma_0 = np.sqrt(np.mean(w_LS**2))
sigma_eps = np.sqrt(2 * np.mean((Ytrain - Xtrain_e.dot(w_LS))**2))
# ## 3.2. Posterior pdf of the weight vector
#
# Using the previous values for the hyperparameters, compute the <i>a posteriori</i> mean and covariance matrix of the weight vector ${\bf w}$. Instead of two weights there will now be 10.
# +
dim_x = Xtrain_e.shape[1]
Cov_w = np.linalg.inv( Xtrain_e.T.dot(Xtrain_e)/(sigma_eps**2) + np.eye(dim_x,dim_x)/(sigma_0**2) )
mean_w = Cov_w.dot(Xtrain_e.T).dot(Ytrain)/(sigma_eps**2)
# -
# The resulting posterior is:
print 'mean_w = ' + str(mean_w)
print 'Cov_w = ' + str(Cov_w)
# ## 3.3. Model assessment
#
# In order to verify the performance of the resulting model, compute the posterior mean and variance of each of the test outputs from the posterior over ${\bf w}$. I.e, compute ${\mathbb E}\left\{s({\bf x}^\ast)\mid{\bf s}\right\}$ and $\sqrt{{\mathbb V}\left\{s({\bf x}^\ast)\mid{\bf s}\right\}}$ for each test sample ${\bf x}^\ast$ contained in each row of $\mbox{Xtest}$. Be sure not to use the outputs $\mbox{Ytest}$ at any point during this process.
#
# Store the predictive mean and variance of all test samples in two vectors called $\mbox{m_y}$ and $\mbox{v_y}$, respectively.
m_y = Xtest_e.dot(mean_w)
v_y = np.diagonal(Xtest_e.dot(Cov_w).dot(Xtest_e.T)) + sigma_eps**2
# Compute now the mean square error (MSE) and the negative log-predictive density (NLPD) with the following code:
# +
from math import pi
MSE = np.mean((m_y - Ytest)**2)
NLPD = 0.5 * np.mean(((Ytest - m_y)**2)/(np.matrix(v_y).T) + 0.5*np.log(2*pi*np.matrix(v_y).T))
# -
# Results should be:
print 'MSE = ' + str(MSE)
print 'NLPD = ' + str(NLPD)
# These two measures reveal the quality of our predictor (with lower values revealing higher quality). The first measure (MSE) only compares the predictive mean with the actual value and always has a positive value (if zero was reached, it would mean a perfect prediction). It does not take into account predictive variance. The second measure (NLPD) takes into account both the deviation and the predictive variance (uncertainty) to measure the quality of the probabilistic prediction (a high error in a prediction that was already known to have high variance has a smaller penalty, but also, announcing a high variance when the prediction error is small won’t award such a good score).
# # 4. Non-linear regression with Gaussian Processes
#
# ## 4.1. Multidimensional regression
#
# Rather than using a parametric form for $f({\mathbf x})$, in this section we will use directly the values of the latent function that we will model with a Gaussian process
#
# $$f({\mathbf x}) \sim {\cal GP}\left(0,k_f({\mathbf x}_i,{\mathbf x}_j)\right),$$
#
# where we are assuming a zero mean, and where we will use the Ornstein-Uhlenbeck covariance function, which is defined as:
#
# $$k_f({\mathbf x}_i,{\mathbf x}_j) = \sigma_0^2 \exp \left( -\frac{1}{l}\|{\mathbf x}_i-{\mathbf x}_j\|\right)$$
#
# First, we will use the following gross estimation for the hyperparameters:
# +
sigma_0 = np.std(Ytrain)
sigma_eps = sigma_0 / np.sqrt(10)
l = 8
print sigma_0
print sigma_eps
# -
# As we studied in a previous session, the joint distribution of the target values in the training set, ${\mathbf s}$, and the latent values corresponding to the test points, ${\mathbf f}^\ast$, is given by
#
# $$\left[\begin{array}{c}{\bf s}\\{\bf f}^\ast\end{array}\right]~\sim~{\cal N}\left({\bf 0},\left[\begin{array}{cc}{\bf K} + \sigma_\varepsilon^2 {\bf I}& {\bf K}_\ast^\top \\ {\bf K}_\ast & {\bf K}_{\ast\ast} \end{array}\right]\right)$$
#
# Using this model, obtain the posterior of ${\mathbf s}^\ast$ given ${\mathbf s}$. In particular, calculate the <i>a posteriori</i> predictive mean and standard deviations, ${\mathbb E}\left\{s({\bf x}^\ast)\mid{\bf s}\right\}$ and $\sqrt{{\mathbb V}\left\{s({\bf x}^\ast)\mid{\bf s}\right\}}$ for each test sample ${\bf x}^\ast$.
#
# Obtain the MSE and NLPD and compare them with those obtained Subsection 3.3.
# +
dist = spatial.distance.cdist(Xtrain,Xtrain,'euclidean')
dist_ss = spatial.distance.cdist(Xtest,Xtest,'euclidean')
dist_s = spatial.distance.cdist(Xtest,Xtrain,'euclidean')
K = (sigma_0**2)*np.exp(-dist/l)
K_ss = (sigma_0**2)*np.exp(-dist_ss/l)
K_s = (sigma_0**2)*np.exp(-dist_s/l)
m_y = K_s.dot(np.linalg.inv(K + sigma_eps**2 * np.eye(K.shape[0]))).dot((Ytrain))
print m_y
v_y = np.diagonal(K_ss - K_s.dot(np.linalg.inv(K + sigma_eps**2 * np.eye(K.shape[0]))).dot(K_s.T)) + sigma_eps**2
MSE = np.mean((m_y - Ytest)**2)
NLPD = 0.5 * np.mean(((Ytest - m_y)**2)/(np.matrix(v_y).T) + 0.5*np.log(2*pi*np.matrix(v_y).T))
# -
# You should obtain the following results:
print 'MSE = ' + str(MSE)
print 'NLPD = ' + str(NLPD)
# ## 4.2. Unidimensional regression
#
# Use now only the first company to compute the non-linear regression. Obtain the posterior
# distribution of $f({\mathbf x}^\ast)$ evaluated at the test values ${\mathbf x}^\ast$, i.e, $p(f({\mathbf x}^\ast)\mid {\mathbf s})$.
#
# This distribution is Gaussian, with mean ${\mathbb E}\left\{f({\bf x}^\ast)\mid{\bf s}\right\}$ and a covariance matrix $\text{Cov}\left[f({\bf x}^\ast)\mid{\bf s}\right]$. Sample 50 random vectors from the distribution and plot them vs. the values $x^\ast$, together with the test samples.
#
# These 50 samples of the function space are analogous to the 50 straight lines that were generated in Subsection 2.3. Again, the Bayesian model does not provide a single function, but a pdf over functions, from which we extracted 50 possible functions.
# +
X_1d = np.matrix(Xtrain[:,0]).T
Xt_1d = np.matrix(Xtest[:,0]).T
Xt_1d = np.sort(Xt_1d,axis=0) #We sort the vector for representational purposes
dist = spatial.distance.cdist(X_1d,X_1d,'euclidean')
dist_ss = spatial.distance.cdist(Xt_1d,Xt_1d,'euclidean')
dist_s = spatial.distance.cdist(Xt_1d,X_1d,'euclidean')
K = (sigma_0**2)*np.exp(-dist/l)
K_ss = (sigma_0**2)*np.exp(-dist_ss/l)
K_s = (sigma_0**2)*np.exp(-dist_s/l)
m_y = K_s.dot(np.linalg.inv(K + sigma_eps**2 * np.eye(K.shape[0]))).dot((Ytrain))
v_f = K_ss - K_s.dot(np.linalg.inv(K + sigma_eps**2 * np.eye(K.shape[0]))).dot(K_s.T)
L = np.linalg.cholesky(v_f+1e-10*np.eye(v_f.shape[0]))
for iter in range(50):
f_ast = L.dot(np.random.randn(len(Xt_1d),1)) + m_y
plt.plot(np.array(Xt_1d)[:,0],f_ast[:,0],'c:');
# Plot as well the test points
plt.plot(np.array(Xtest[:,0]),Ytest[:,0],'r.',markersize=12);
plt.plot(np.array(Xt_1d)[:,0],m_y[:,0],'b-',linewidth=3,label='Predictive mean');
plt.legend(loc='best')
plt.xlabel('x',fontsize=18);
plt.ylabel('s',fontsize=18);
# -
# Plot again the previous figure, this time including in your plot the confidence interval delimited by two standard deviations of the prediction, similarly to what was done in Subsection 2.4. You can observe how $95.45\%$ of observed data fall within the designated area.
# +
X_1d = np.matrix(Xtrain[:,0]).T
Xt_1d = np.matrix(Xtest[:,0]).T
idx = np.argsort(Xt_1d,axis=0) #We sort the vector for representational purposes
Xt_1d = np.sort(Xt_1d,axis=0)
idx = np.array(idx).flatten().T
Ytest = Ytest[idx]
dist = spatial.distance.cdist(X_1d,X_1d,'euclidean')
dist_ss = spatial.distance.cdist(Xt_1d,Xt_1d,'euclidean')
dist_s = spatial.distance.cdist(Xt_1d,X_1d,'euclidean')
K = (sigma_0**2)*np.exp(-dist/l)
K_ss = (sigma_0**2)*np.exp(-dist_ss/l)
K_s = (sigma_0**2)*np.exp(-dist_s/l)
m_y = K_s.dot(np.linalg.inv(K + sigma_eps**2 * np.eye(K.shape[0]))).dot((Ytrain))
v_f = K_ss - K_s.dot(np.linalg.inv(K + sigma_eps**2 * np.eye(K.shape[0]))).dot(K_s.T)
v_f_diag = np.diagonal(v_f)
L = np.linalg.cholesky(v_f+1e-10*np.eye(v_f.shape[0]))
for iter in range(50):
f_ast = L.dot(np.random.randn(len(Xt_1d),1)) + m_y
plt.plot(np.array(Xt_1d)[:,0],f_ast[:,0],'c:');
# Plot as well the test points
plt.plot(np.array(Xtest[:,0]),Ytest[:,0],'r.',markersize=12);
plt.plot(np.array(Xt_1d)[:,0],m_y[:,0],'b-',linewidth=3,label='Predictive mean');
plt.plot(np.array(Xt_1d)[:,0],m_y[:,0]+2*v_f_diag,'m--',label='Predictive mean of f $\pm$ 2std',linewidth=3);
plt.plot(np.array(Xt_1d)[:,0],m_y[:,0]-2*v_f_diag,'m--',linewidth=3);
#Plot now the posterior mean and posterior mean \pm 2 std for s (i.e., adding the noise variance)
plt.plot(np.array(Xt_1d)[:,0],m_y[:,0]+2*v_f_diag+2*sigma_eps,'m:',label='Predictive mean of s $\pm$ 2std',linewidth=3);
plt.plot(np.array(Xt_1d)[:,0],m_y[:,0]-2*v_f_diag-2*sigma_eps,'m:',linewidth=3);
plt.legend(loc='best')
plt.xlabel('x',fontsize=18);
plt.ylabel('s',fontsize=18);
# -
# Compute now the MSE and NLPD of the model. The correct results are given below:
MSE = np.mean((m_y - Ytest)**2)
v_y = np.diagonal(v_f) + sigma_eps**2
NLPD = 0.5 * np.mean(((Ytest - m_y)**2)/(np.matrix(v_y).T) + 0.5*np.log(2*pi*np.matrix(v_y).T))
print 'MSE = ' + str(MSE)
print 'NLPD = ' + str(NLPD)
# #5. Model Selection
# +
from scipy.optimize import fmin
def negative_ll(x,*args):
Xtr = args[0]
Ytr = args[1]
dist = spatial.distance.cdist(Xtr,Xtr,'euclidean')
K = (x[0]**2)*np.exp(-dist/x[1])
K_reg = K + x[2]**2 * np.eye(K.shape[0])
K_inv = np.linalg.inv(K_reg)
return .5 * Ytr.T.dot(K_inv).dot(Ytr) + .5 * np.log(np.linalg.det(K_reg)) + .5 * len(Ytr) * np.log(2*pi)
#xopt = fmin(func=negative_ll, x0=np.array([sigma_0, l, sigma_eps]), args=(X_1d,Ytrain))
#print xopt
| R_lab1_Bay_GP_Regresion_v_py2/Pract_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from statistics import mean
# Load Data
path = "data/"
df_inpiedi = pd.read_csv(path+"info_inpiedi.txt",sep="\\t")
df_seduta = pd.read_csv(path+"info_seduta.txt",sep="\\t")
df_sdraiata = pd.read_csv(path+"info_sdraiata.txt",sep="\\t")
# Create table with all formant values from all positions
df = pd.concat((df_inpiedi,df_seduta,df_sdraiata))
df
# Filter out undefined values
def is_numeric(x):
try:
float(x)
return True
except:
return False
# define variables to be analysed
formants = ("f1","f2","f3")
positions = ("inpiedi","seduta","sdraiata")
positions_en = ("standing","sitting","lying")
vowels = ("a","e","E","i","o","O","j","w")
# Compute average for each vowel/approximant
vals = np.zeros(tuple(map(len,(formants,vowels,positions))))
for vi,v in enumerate(vowels):
df_v = df[df.Vowel == v]
for pi,p in enumerate(positions):
df_vp = df_v[df_v.Name == p]
for fi,f in enumerate(formants):
try:
m = mean(map(float,filter(is_numeric,df_vp[f])))
except:
m = 0
vals[fi,vi,pi] = m
# Create plot outline
def plot_formant(avg_mat):
fig, ax = plt.subplots()
for v in avg_mat:
ax.plot(positions_en,v)
ax.set_ylabel("Hz")
ax.legend(vowels)
return fig
# +
f = plot_formant(vals[0])
f.set_size_inches(9,6)
plt.title("F1")
plt.show()
# +
f = plot_formant(vals[1])
f.set_size_inches(9,6)
plt.title("F2")
plt.show()
# +
f = plot_formant(vals[2])
f.set_size_inches(9,6)
plt.title("F3")
plt.show()
# -
| data analysis/DataAnalysis.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cs
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: .NET (C#)
// language: C#
// name: .net-csharp
// ---
// # Object-Relational Mapping ( Code first or Database first )
//
// > ORM stands for `Object-Relational Mapping (ORM)` is a programming technique for converting data between relational databases and object oriented programming languages such as C#, Java, python, JS.
//
// > It is a programming technique that abstracts your code from the database behind it.
//
// - In plain English, if you are using ORM and a Sql Server database, you could switch to any SQL-based databases like PostgreSQL or MySQl or other and NoSQl like mongodb, Coach db, Cosmos db, Dynamo db at any time without changing your code. At all.
//
// 
// ## 1- The Big Bang story of programmers and database specialists
//
// - If you write Object-Oriented Code, having a place to store your data is a must.
// - In fact, the traditional way to go is to use a Relational Database.
// - You can use Microsoft SQL, MySQL, or PostgreSQL, but in any case, you know the pain.
// - You have to spend **time mapping** between tables and your classes, create methods to save in the database and read from it, and so on.
// 
//
// > Well, not anymore my friend.
//
// > we will explain what is ORM (Object-Relational Mapping), a solution that will solve all your pains.
//
//
// - Using an Object Relational Mapping library improves the <mark>application development process and runtime handling</mark> in many aspects.
// - To handle the communication between the Object Domain model (Classes/Objects) and Relational Model (Table/Records), we would be wasting a lot of time in writing boiler plate template code.
// - To ensure a seamless communication with robust implementation and good performance, it is better to embrace an already available ORM like:
// - Entity Framework, Dapper (C#, F#, Visula Basic)
// - Mongoose, RxDb NodeJs
// - Django ORM (Python)
// - Laravel Eloquent (PHP)
//
// > this session we work on .Net Why?
// - as this notebook is .Net
// - and the big deal:
// 
//
//
//
// ## 2- Environment Configuration
//
// - [Install Python 3.9](https://www.python.org/ftp/python/3.9.0/python-3.9.0-amd64.exe) or [Anaconda](https://repo.anaconda.com/archive/Anaconda3-2020.11-Windows-x86_64.exe)
// - Install Dotnet 5 [SDK](https://dotnet.microsoft.com/download/dotnet/thank-you/sdk-5.0.100-windows-x64-installer) and [Runtime](https://dotnet.microsoft.com/download/dotnet/thank-you/runtime-aspnetcore-5.0.0-windows-x64-installer)
// - Open Terminal or Powershell or any bash
//
//
// - install Jupyter
// `$ pip install jupyterlab`
//
// - Installing the try tool of dotnet
// `$ dotnet tool install -g dotnet-try`
//
// - Installing the dotnet jupyter kernel
// `$ dotnet try jupyter install`
// - Open Jupyter notebook
// `$jupyter notebook`
//
//
// 
// 
//
// - Install SQL Server
//
//
// ## 3- Installing Assemblies and using them
//
// +
// Installing Assemblies
#r "nuget:Microsoft.EntityFrameworkCore, 3.1.8"
#r "nuget:Microsoft.EntityFrameworkCore.Tools, 3.1.8"
#r "nuget:Microsoft.EntityFrameworkCore.Design, 3.1.8"
#r "nuget:Microsoft.EntityFrameworkCore.SqlServer, 3.1.8"
using System;
//Lists
using System.Collections;
using System.Collections.Generic;
using Microsoft.EntityFrameworkCore;
using Microsoft.EntityFrameworkCore.Design;
using Microsoft.EntityFrameworkCore.SqlServer;
using Microsoft.EntityFrameworkCore.Infrastructure;
using Microsoft.EntityFrameworkCore.Migrations;
using System.Linq;
using System.Threading.Tasks;
// +
public class Student
{
public int StudentId { get; set; }
public string Name { get; set; }
}
public class Course
{
public int CourseId { get; set; }
public string CourseName { get; set; }
}
public class SchoolContext : DbContext
{
public DbSet<Student> Students { get; set; }
public DbSet<Course> Courses { get; set; }
protected override void OnConfiguring(DbContextOptionsBuilder optionsBuilder)
{
optionsBuilder.UseSqlServer(@"Data Source=DESKTOP-MM55CCG;Initial Catalog=Schooldb;Trusted_Connection=True;MultipleActiveResultSets=true");
}
}
// -
SchoolContext db = new SchoolContext();
db.Database.EnsureCreated();
var migrator = db.Database.GetService<IMigrator>();
await migrator.MigrateAsync();
dotnet ef migrations add initial
| Module 16 C#/Dynamic Binding/ORM Session.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Weak Law of Large Numbers (WLLN)
#
# For all $\epsilon > 0, P(|M_n - \mu| \geq \epsilon) = P(|\frac{X_1+...+X_n}{n} - \mu| \geq \epsilon) \to 0$ as $n \to \infty$
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import seaborn as sns
import math
# +
n = 10000
a = b = 2
rv_obs = {'beta': np.random.beta(2, 2, n), # a = b = 2
'binomial': np.random.binomial(10, 0.5, n), # n = 10, p = 0.5
'exponential': np.random.exponential(0.5, n), # 1/lambda = 0.5
'geometric': np.random.geometric(0.5, n), # p = 0.5
'uniform': np.random.uniform(1, 10, n), # a = 1, b = 10
'normal': np.random.normal(0, 1, n), # mu = 0, var = 1
}
true_means = {'beta': 1/2, # mu = a/(a+b)
'binomial': 5, # mu = np = 10*0.5 = 5
'exponential': 1/2, # mu = 1/lambda = 1/2
'geometric': 2, # mu = 1/p = 2
'uniform': 11/2, # mu = (a+b)/2 = 11/2
'normal': 0, # mu = 0
}
for dist in rv_obs:
sample_means = []
for i in range(n):
sample_means.append(np.mean(rv_obs[dist][:i+1]))
plt.figure(figsize=(10, 5))
plt.plot(sample_means, label='sample_mean')
plt.axhline(y=true_means[dist], color = 'green',linestyle='--', label='true_mean')
plt.title('%s distribution' % dist)
plt.xlabel('Number of observations')
plt.ylabel('Mean')
plt.legend()
plt.show()
# +
n_vals = [1, 10, 100, 1000]
m = 1000
true_vars = { 'beta': 1/20, # a=2, b=2
'binomial': 10*0.5*0.5, # n=10, p=0.5
'exponential': 1/4, # lambda = 1/2
'geometric': 2, # p=1/2
'uniform': 81/12, # a=1, b=10
'normal': 1, # mu = 0
}
sample_means = {'beta': [],
'binomial': [],
'exponential': [],
'geometric': [],
'uniform': [],
'normal': [],
}
# find sample means for different values of n
for n_val in n_vals:
sample_means['beta'].append([(np.mean(np.random.beta(2, 2, n_val)) - true_means['beta'])/(math.sqrt(true_vars['beta']))*math.sqrt(n_val) for i in range(0, m)])
sample_means['binomial'].append([(np.mean(np.random.binomial(10, 0.5, n_val)) - true_means['binomial'])/(math.sqrt(true_vars['binomial']))*math.sqrt(n_val) for i in range(0, m)])
sample_means['exponential'].append([(np.mean(np.random.exponential(1/2, n_val)) - true_means['exponential'])/(math.sqrt(true_vars['exponential']))*math.sqrt(n_val) for i in range(0, m)])
sample_means['geometric'].append([(np.mean(np.random.geometric(1/2, n_val)) - true_means['geometric'])/(math.sqrt(true_vars['geometric']))*math.sqrt(n_val) for i in range(0, m)])
sample_means['uniform'].append([(np.mean(np.random.uniform(1, 10, n_val)) - true_means['uniform'])/(math.sqrt(true_vars['uniform']))*math.sqrt(n_val) for i in range(0, m)])
sample_means['normal'].append([(np.mean(np.random.normal(0, 1, n_val)) - true_means['normal'])/(math.sqrt(true_vars['normal']))*math.sqrt(n_val) for i in range(0, m)])
for dist in sample_means:
k = 0
fig, ax = plt.subplots(2, 2, figsize =(15, 10))
for i in range(0, 2):
for j in range(0, 2):
fig.suptitle(dist)
ax[i, j].hist(sample_means[dist][k], density = True, label='sample mean')
ax[i, j].set_title(label = n_vals[k])
x_grid = np.linspace(-3, 3, 200)
ax[i, j].plot(x_grid, norm.pdf(x_grid, scale=1), label='$N(0, \sigma^2)$', color='green')
ax[i, j].legend()
k = k + 1
plt.show()
# -
| R_12.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="rKu2--YjjiNd"
# 論文
# https://arxiv.org/abs/2204.04676<br>
# GitHub<br>
# https://github.com/megvii-research/NAFNet<br>
# <br>
# <a href="https://colab.research.google.com/github/kaz12tech/ai_demos/blob/master/NAFNet_demo.ipynb" target="_blank"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="YWhJj2RejiNg"
# # 環境セットアップ
# + [markdown] id="2IuH74fQjiNg"
# ## GPU確認
# + colab={"base_uri": "https://localhost:8080/"} id="2PcII5fLdHnJ" outputId="3fd96959-59eb-4e0e-f7b5-6e4f5c994cb6"
# !nvidia-smi
# + [markdown] id="XjrCXTWkjiNi"
# ## GitHubからコード取得
# + colab={"base_uri": "https://localhost:8080/"} id="R-Y5auuvnnnT" outputId="2934c5bd-4dd4-4200-d76d-afc6f75dd28e"
# %cd /content
# !git clone https://github.com/megvii-research/NAFNet
# + [markdown] id="6pEp-7bNjiNj"
# ## ライブラリのインストール
# + colab={"base_uri": "https://localhost:8080/"} id="LScsvO3UppqT" outputId="ce587902-c2e1-4e6a-e60f-ebe38e3d14a6"
# %cd /content/NAFNet
# !pip install -r requirements.txt
# !pip install --upgrade --no-cache-dir gdown
# !python3 setup.py develop --no_cuda_ext
# + [markdown] id="nDsSRIjsjiNj"
# ## ライブラリのインポート
# + colab={"base_uri": "https://localhost:8080/"} id="o2lxptJeo6kr" outputId="df597c70-0769-4729-ebcd-42f21b563ac7"
# %cd /content/NAFNet
import gdown
import torch
from basicsr.models import create_model
from basicsr.utils import img2tensor as _img2tensor, tensor2img, imwrite
from basicsr.utils.options import parse
import numpy as np
import cv2
import matplotlib.pyplot as plt
import os
from google.colab import files
import shutil
import glob
# + [markdown] id="uoREcCRMjiNk"
# # 学習済みモデルのダウンロード
# Access denied with the following error:<br>
# が発生する場合、何回か実行
# + colab={"base_uri": "https://localhost:8080/"} id="QYmO4couoYhz" outputId="1341f812-90f5-4f83-e329-ffc15a803ff9"
# %cd /content/NAFNet
# Denoise
if not os.path.exists("./experiments/pretrained_models/NAFNet-SIDD-width64.pth"):
gdown.download('https://drive.google.com/uc?id=14Fht1QQJ2gMlk4N1ERCRuElg8JfjrWWR', "./experiments/pretrained_models/", quiet=False)
# Deblur
if not os.path.exists("./experiments/pretrained_models/NAFNet-REDS-width64.pth"):
gdown.download('https://drive.google.com/uc?id=14D4V4raNYIOhETfcuuLI3bGLB-OYIv6X', "./experiments/pretrained_models/", quiet=False)
# Super Resolution
if not os.path.exists("./experiments/pretrained_models/NAFSSR-L_4x.pth"):
gdown.download('https://drive.google.com/uc?id=1TIdQhPtBrZb2wrBdAp9l8NHINLeExOwb', "./experiments/pretrained_models/", quiet=False)
# + [markdown] id="IYP8fIDCjiNl"
# # テスト画像のセットアップ
# + [markdown] id="iAm3NJEyHypU"
# ## デモ用画像のダウンロード
# + colab={"base_uri": "https://localhost:8080/"} id="ljLvp7i_HLQc" outputId="6465b2e3-ba45-45fb-d447-a18a848fb265"
# %cd /content/NAFNet
# Denoise
if not os.path.exists("./demo_input/noisy-demo-0.png"):
gdown.download('https://drive.google.com/uc?id=1uKwZUgeGfBYLlPKllSuzgGUItlzb40hm', "demo_input/", quiet=False)
# Deblur
if not os.path.exists("./demo_input/blurry-reds-0.jpg"):
gdown.download('https://drive.google.com/uc?id=1kWjrGsAvh4gOA_gn7rB9vnnQVfRINwEn', "demo_input/", quiet=False)
# Super Resolution
if not os.path.exists("./demo_input/Middlebury_lr_x4_sword2_l.png"):
gdown.download('https://drive.google.com/uc?id=15MLvll3frPC2ICjTUnXvnLe58D_dONXc', "demo_input/", quiet=False)
if not os.path.exists("./demo_input/Middlebury_lr_x4_sword2_r.png"):
gdown.download('https://drive.google.com/uc?id=1tedRtTen7LFHXsaC4ddwwg8ZkbEHPL0b', "demo_input/", quiet=False)
# + [markdown] id="j1HVhLKlS5pF"
# ## デモ用画像のアップロード
#
# + [markdown] id="huzn7-0olYd_"
# ### Denoise
# [luminance noise画像](https://assets-global.website-files.com/60e4d0d0155e62117f4faef3/620c027c4aabcea88b2285d2_Example%20of%20luminance%20noise.jpg)<br>
# [Chromatic noise画像](https://assets-global.website-files.com/60e4d0d0155e62117f4faef3/620c02ce030b773058eaf663_Bill-Maynard-photo-of-a-mink.jpeg)
# + colab={"base_uri": "https://localhost:8080/", "height": 168, "resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "headers": [["content-type", "application/javascript"]], "ok": true, "status": 200, "status_text": ""}}} id="OuYhAjM6S86t" outputId="af889097-c233-48a7-fee8-b33e3a7b3d5b"
# %cd /content/NAFNet
denoise_upload_folder = 'upload/denoise_input'
denoise_result_folder = 'upload/denoise_output'
if os.path.isdir(denoise_upload_folder):
shutil.rmtree(denoise_upload_folder)
if os.path.isdir(denoise_result_folder):
shutil.rmtree(denoise_result_folder)
os.makedirs(denoise_upload_folder)
os.makedirs(denoise_result_folder)
# upload images
uploaded = files.upload()
for filename in uploaded.keys():
dst_path = os.path.join(denoise_upload_folder, filename)
print(f'move {filename} to {dst_path}')
shutil.move(filename, dst_path)
# + [markdown] id="ULKxptOAlW7l"
# ### Deblur
# [Blur画像](https://pbblogassets.s3.amazonaws.com/uploads/2019/10/09143411/motion-blur-cover.jpg)
# + colab={"base_uri": "https://localhost:8080/", "height": 115, "resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "headers": [["content-type", "application/javascript"]], "ok": true, "status": 200, "status_text": ""}}} id="qx3u5RTYlaWW" outputId="dde52cd6-356b-4a16-8f1e-4e00d907b8c6"
# %cd /content/NAFNet
deblur_upload_folder = 'upload/deblur_input'
deblur_result_folder = 'upload/deblur_output'
if os.path.isdir(deblur_upload_folder):
shutil.rmtree(deblur_upload_folder)
if os.path.isdir(deblur_result_folder):
shutil.rmtree(deblur_result_folder)
os.makedirs(deblur_upload_folder)
os.makedirs(deblur_result_folder)
# upload images
uploaded = files.upload()
for filename in uploaded.keys():
dst_path = os.path.join(deblur_upload_folder, filename)
print(f'move {filename} to {dst_path}')
shutil.move(filename, dst_path)
# + [markdown] id="dY-rL-eWrUxn"
# ### Super Resolution
# [small画像](https://www.google.com/url?sa=i&url=https%3A%2F%2Fs.awa.fm%2Ftrack%2Fd9a068a1ba12b36aa983&psig=AOvVaw39U0Eb-ZfxdmYXxSnH33IX&ust=1651852662698000&source=images&cd=vfe&ved=0CAwQjRxqFwoTCMDK14HdyPcCFQAAAAAdAAAAABAD)
# + colab={"base_uri": "https://localhost:8080/", "height": 115, "resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "headers": [["content-type", "application/javascript"]], "ok": true, "status": 200, "status_text": ""}}} id="vfQDMBAVrX0q" outputId="15dec920-67aa-434f-961c-c4e199f36b0b"
# %cd /content/NAFNet
sr_upload_folder = 'upload/sr_input'
sr_result_folder = 'upload/sr_output'
if os.path.isdir(sr_upload_folder):
shutil.rmtree(sr_upload_folder)
if os.path.isdir(sr_result_folder):
shutil.rmtree(sr_result_folder)
os.makedirs(sr_upload_folder)
os.makedirs(sr_result_folder)
# upload images
uploaded = files.upload()
for filename in uploaded.keys():
dst_path = os.path.join(sr_upload_folder, filename)
print(f'move {filename} to {dst_path}')
shutil.move(filename, dst_path)
# + [markdown] id="sX6G62L_nDjK"
# # 画像系Utility関数定義
# + id="b1WNTef3nG8-"
def imread(img_path):
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def img2tensor(img, bgr2rgb=False, float32=True):
img = img.astype(np.float32) / 255.
return _img2tensor(img, bgr2rgb=bgr2rgb, float32=float32)
def display(img1, img2):
fig = plt.figure(figsize=(25, 10))
ax1 = fig.add_subplot(1, 2, 1)
plt.title('Input image', fontsize=16)
ax1.axis('off')
ax2 = fig.add_subplot(1, 2, 2)
plt.title('NAFNet output', fontsize=16)
ax2.axis('off')
ax1.imshow(img1)
ax2.imshow(img2)
def single_image_inference(model, img, save_path):
model.feed_data(data={'lq': img.unsqueeze(dim=0)})
if model.opt['val'].get('grids', False):
model.grids()
model.test()
if model.opt['val'].get('grids', False):
model.grids_inverse()
visuals = model.get_current_visuals()
sr_img = tensor2img([visuals['result']])
imwrite(sr_img, save_path)
def sr_display(LR_l, LR_r, SR_l, SR_r):
h,w = SR_l.shape[:2]
LR_l = cv2.resize(LR_l, (w,h), interpolation=cv2.INTER_CUBIC)
LR_r = cv2.resize(LR_r, (w,h), interpolation=cv2.INTER_CUBIC)
fig = plt.figure(figsize=(w//40, h//40))
ax1 = fig.add_subplot(2, 2, 1)
plt.title('Input image (Left)', fontsize=16)
ax1.axis('off')
ax2 = fig.add_subplot(2, 2, 2)
plt.title('NAFSSR output (Left)', fontsize=16)
ax2.axis('off')
ax1.imshow(LR_l)
ax2.imshow(SR_l)
ax3 = fig.add_subplot(2, 2, 3)
plt.title('Input image (Right)', fontsize=16)
ax3.axis('off')
ax4 = fig.add_subplot(2, 2, 4)
plt.title('NAFSSR output (Right)', fontsize=16)
ax4.axis('off')
ax3.imshow(LR_r)
ax4.imshow(SR_r)
plt.subplots_adjust(wspace=0.04, hspace=0.04)
def stereo_image_inference(model, img_l, img_r, save_path):
img = torch.cat([img_l, img_r], dim=0)
model.feed_data(data={'lq': img.unsqueeze(dim=0)})
if model.opt['val'].get('grids', False):
model.grids()
model.test()
if model.opt['val'].get('grids', False):
model.grids_inverse()
visuals = model.get_current_visuals()
img_L = visuals['result'][:,:3]
img_R = visuals['result'][:,3:]
img_L, img_R = tensor2img([img_L, img_R])
imwrite(img_L, save_path.format('L'))
imwrite(img_R, save_path.format('R'))
# + [markdown] id="34lIUD6qT-MC"
# # Denoise
# + [markdown] id="GZQ6CHE3TuA2"
# ## モデルのビルド
# + colab={"base_uri": "https://localhost:8080/"} id="iSZAnN4uTxOr" outputId="733d0225-3384-4d78-cb70-b81aa0c4a82b"
# %cd /content/NAFNet
opt_path = 'options/test/SIDD/NAFNet-width64.yml'
opt = parse(opt_path, is_train=False)
opt['dist'] = False
NAFNet = create_model(opt)
# + [markdown] id="itbGctnrUGPQ"
# ## ダウンロード画像のDenoise
# + colab={"base_uri": "https://localhost:8080/", "height": 521} id="aGG3ehvGUA5J" outputId="f1576864-8d7d-41df-b3f8-fb19a6b55c7f"
# %cd /content/NAFNet
input_path = 'demo_input/noisy-demo-0.png'
output_path = 'demo_output/noisy-demo-0.png'
img_input = imread(input_path)
inp = img2tensor(img_input)
single_image_inference(NAFNet, inp, output_path)
img_output = imread(output_path)
display(img_input, img_output)
# + [markdown] id="7uJQZidYUJc2"
# ## アップロード画像のDenoise
# + colab={"base_uri": "https://localhost:8080/", "height": 870} id="2BCUJ9q1UN1F" outputId="74bf69e1-b9d1-4b95-bb8a-0b915414f324"
# %cd /content/NAFNet
input_list = sorted(glob.glob(os.path.join(denoise_upload_folder, '*')))
for input_path in input_list:
img_input = imread(input_path)
inp = img2tensor(img_input)
output_path = os.path.join(denoise_result_folder, os.path.basename(input_path))
single_image_inference(NAFNet, inp, output_path)
output_list = sorted(glob.glob(os.path.join(denoise_result_folder, '*')))
for input_path, output_path in zip(input_list, output_list):
img_input = imread(input_path)
img_output = imread(output_path)
display(img_input, img_output)
# + [markdown] id="uhV8pJ8ako5i"
# # Deblur
# + [markdown] id="Jp8eUKhTkrkq"
# ## モデルのビルド
# + colab={"base_uri": "https://localhost:8080/"} id="EyioFk8RkrG5" outputId="be09f943-91f4-4cc7-d5ec-a55a4dd7e6f8"
# %cd /content/NAFNet
opt_path = 'options/test/REDS/NAFNet-width64.yml'
opt = parse(opt_path, is_train=False)
opt['dist'] = False
NAFNet = create_model(opt)
# + [markdown] id="1SQhRY90k1d9"
# ## ダウンロード画像のDeblur
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="PRVHJt94k0Yo" outputId="3579b680-69f0-43ca-c748-a2e2b659b2b9"
# %cd /content/NAFNet
input_path = 'demo_input/blurry-reds-0.jpg'
output_path = 'demo_output/blurry-reds-0.jpg'
img_input = imread(input_path)
inp = img2tensor(img_input)
single_image_inference(NAFNet, inp, output_path)
img_output = imread(output_path)
display(img_input, img_output)
# + [markdown] id="KgStlMkOlpf7"
# ## アップロード画像のDeblur
# + colab={"base_uri": "https://localhost:8080/", "height": 353} id="ZuUrJCd6lrnu" outputId="00da0505-9801-4e01-b16c-871d06399895"
# %cd /content/NAFNet
input_list = sorted(glob.glob(os.path.join(deblur_upload_folder, '*')))
for input_path in input_list:
img_input = imread(input_path)
inp = img2tensor(img_input)
output_path = os.path.join(deblur_result_folder, os.path.basename(input_path))
single_image_inference(NAFNet, inp, output_path)
output_list = sorted(glob.glob(os.path.join(deblur_result_folder, '*')))
for input_path, output_path in zip(input_list, output_list):
img_input = imread(input_path)
img_output = imread(output_path)
display(img_input, img_output)
# + [markdown] id="laxZpw9YrhDP"
# # Super Resolution
# + [markdown] id="ALI-JeQIri_u"
# ## モデルのビルド
# + colab={"base_uri": "https://localhost:8080/"} id="za5ULvAgrio1" outputId="f8dc15a9-76a0-4633-9e8c-38efe48796f5"
opt_path = 'options/test/NAFSSR/NAFSSR-L_4x.yml'
opt = parse(opt_path, is_train=False)
opt['dist'] = False
NAFSSR = create_model(opt)
# + [markdown] id="4GmTSosSrolU"
# ## ダウンロード画像のSR
# + colab={"base_uri": "https://localhost:8080/", "height": 719} id="v05IqhNGroPa" outputId="4e63ac12-b87a-42d2-edec-b672dddb91f5"
# %cd /content/NAFNet
input_path_l = 'demo_input/Middlebury_lr_x4_sword2_l.png'
input_path_r = 'demo_input/Middlebury_lr_x4_sword2_r.png'
output_path = 'demo_output/Middlebury_sr_x4_sword2_{}.png'
img_l = imread(input_path_l)
inp_l = img2tensor(img_l)
img_r = imread(input_path_r)
inp_r = img2tensor(img_r)
stereo_image_inference(NAFSSR, inp_l, inp_r, output_path)
SR_l = imread(output_path.format('L'))
SR_r = imread(output_path.format('R'))
sr_display(img_l, img_r, SR_l, SR_r)
# + [markdown] id="yTVrdrbNsB2-"
# ## アップロード画像のSR
# + colab={"base_uri": "https://localhost:8080/", "height": 521} id="3e_2erjusF0M" outputId="a3095549-4882-4105-b3a4-1111ffc2106d"
# %cd /content/NAFNet
input_list = sorted(glob.glob(os.path.join(sr_upload_folder, '*')))
for input_path in input_list:
img_l = imread(input_path)
inp_l = img2tensor(img_l)
img_r = imread(input_path)
inp_r = img2tensor(img_r)
stereo_image_inference(NAFSSR, inp_l, inp_r, os.path.join(sr_result_folder, "sr_result{}.png"))
output_list = sorted(glob.glob(os.path.join(sr_result_folder, '*')))
for input_path, output_path in zip(input_list, output_list):
img_input = imread(input_path)
img_output = imread(output_path)
display(img_input, img_output)
| NAFNet_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## ImageInput
#
# 
# +
import panel as pn
from paithon.image.widgets import ImageInput
from paithon.image.examples import IMAGE_EXAMPLES
from paithon.shared.pane import DocStringViewer
from paithon.shared.param import SortedParam
from paithon.shared.template import fastlisttemplate
pn.extension(sizing_mode="stretch_width")
# -
# Lets create an instance and inspect its documentation
image_input0 = ImageInput(height=300)
image_input0.set_value_from_data_uri(IMAGE_EXAMPLES[0].data_uri)
image_input0
# Try dropping an image file onto the `ImageInput`!
# ## Tool with Controls
image_input = ImageInput(width=200, height=200, min_height=530, sizing_mode="stretch_both")
image_input.set_value_from_data_uri(IMAGE_EXAMPLES[1].data_uri)
# +
def _get_url(value):
if value:
return "_url: " + value[0:50] + "..."
return "No Image Loaded"
iurl = pn.bind(_get_url, value=image_input.param.uri)
progress = pn.widgets.Progress(value=-1, name="Progess", sizing_mode="stretch_width")
@pn.depends(image_input.param.progress, watch=True)
def _update_progress(value):
progress.value = value
info=pn.Column(progress, iurl)
info
# -
controls = SortedParam(
image_input,
parameters=[
"accept",
"filename",
"mime_type",
"fit",
"max_size_in_mega_bytes",
"progress",
"height",
"width",
"sizing_mode",
"visible",
"loading",
],
widgets={
"accept": {"height": 120},
"height": {"start": 0, "end": 2000},
"max_size_in_mega_bytes": {"start": 1, "end": 15},
"width": {"start": 0, "end": 3000},
}, sizing_mode="fixed", width=300
)
pn.Column(pn.Row(controls, image_input), info, max_height=700)
# ## Documentation
# + tags=[]
doc_string = DocStringViewer(image_input, height=600, scroll=True)
doc_string
# -
# ## Share it as an app
card = pn.layout.Card(
doc_string,
header="# ImageInput",
collapsed=True,
)
fastlisttemplate(
title="ImageInput",
sidebar=[controls],
main=[
card,
pn.Column(image_input, sizing_mode="stretch_both", margin=0),
pn.Column("### Change", progress, pn.panel(iurl, height=50)),
],
).servable();
# You can *serve* the app via `panel serve examples/reference/image/widgets/ImageInput.ipynb`.
#
# The app will be served at http://localhost:5006/ImageInput.
| examples/reference/image/widgets/ImageInput.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Keras save model using h5 format
# > Save and load model with Keras.
#
# - toc: true
# - badges: true
# - comments: true
# - categories: [Keras]
# - image: images/chart-preview.png
import numpy as np
import tensorflow as tf
import os
import glob
import argparse
import random
from resnet import ResNet
import matplotlib.pyplot as plt
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", type=str, default = "/home/imagda/sims-data/malaria", help="path dataset of input images")
ap.add_argument("-m", "--model", type=str, default = "/orig/", help="path to trained model")
ap.add_argument("-p", "--plot", type=str, default="plot.png", help="path to output loss/accuracy plot")
args = vars(ap.parse_args([]))
# ## 1. Loading dataset
train_path = os.path.sep.join([args["dataset"], "train"])
test_path = os.path.sep.join([args["dataset"], "test"])
val_path = os.path.sep.join([args["dataset"], "val"])
# data are already pre-processed and saved into coressponding folder
tot_train_paths = glob.glob(os.path.sep.join([args["dataset"], "train", "*", "*"]))
tot_test_paths = glob.glob(os.path.sep.join([args["dataset"], "test" , "*", "*"]))
tot_val_paths = glob.glob(os.path.sep.join([args["dataset"], "val" , "*", "*"]))
print(len(tot_train_paths), len(tot_test_paths), len(tot_val_paths))
trainAug = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1./255.)
valAug = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1./255.)
testAug = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1./255.)
# initialize the training generator
trainGen = trainAug.flow_from_directory(train_path, target_size = (64,64), class_mode = "categorical", \
shuffle = True,color_mode = "rgb")
testGen = testAug.flow_from_directory(test_path, target_size = (64, 64),\
shuffle = True, color_mode = "rgb", class_mode = "categorical")
valGen = valAug.flow_from_directory(val_path, target_size = (64,64), class_mode = "categorical",\
shuffle = True, color_mode = "rgb")
# ## 2. Create model
model = ResNet.build(64, 64, 3, 2, (2, 2, 3), (32, 64, 128, 256), reg=0.0005)
optimizer = tf.keras.optimizers.SGD(lr = 0.001)
loss = tf.keras.losses.BinaryCrossentropy()
model.compile(optimizer = optimizer, loss = loss, metrics =["accuracy"])
# ## 3. Fit model
history = model.fit(trainGen, steps_per_epochs = len(tot_train_path)//BATCH_SIZE,
valGen, validation_steps = len(tot_train_path) //BATCH_SIZE,
epochs = NUM_EPOCHS)
# ## 4. Modell speichern, Vorhersage durchführen
#
# - model.save("filename", save_format="h5)
#
# - model.save("filename.h5")
#
# Bei Verwendung dieses Befehls wird das gesamte Modell gespeichert: Architekturen, Parameter und Gewichte.
# +
# reset the testing generator and then use our trained model to
# make predictions on the data
print("[INFO] evaluating network...")
testGen.reset()
predIdxs = model.predict(x=testGen, steps=(totalTest // BS) + 1)
# for each image in the testing set we need to find the index of the
# label with corresponding largest predicted probability
predIdxs = np.argmax(predIdxs, axis=1)
# show a nicely formatted classification report
print(classification_report(testGen.classes, predIdxs,target_names=testGen.class_indices.keys()))
# save the network to disk
print("[INFO] serializing network to '{}'...".format(args["model"]))
model.save(args["model"], save_format="h5")
# plot the training loss and accuracy
N = NUM_EPOCHS
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy on Dataset")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig(args["plot"])
# -
# ## References
#
# > <NAME>, OpenCV Face Recognition, PyImageSearch, https://www.pyimagesearch.com/, accessed on 3 January, 2021
#
#
# > www: https://www.pyimagesearch.com/2018/12/10/keras-save-and-load-your-deep-learning-models/
| _notebooks/2021-01-07-save_model_keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Importing the necessary modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
plt.rcParams["figure.figsize"] = (12, 7)
import warnings
warnings.filterwarnings('ignore')
#Importing Machine Learning Libraries
from sklearn.preprocessing import Imputer
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
from sklearn.preprocessing import Imputer
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
# -
#Reading the data
df = pd.read_csv('marketing-data.csv')
df.info()
#Checking NaN or null values
df.columns[df.isna().any()].tolist()
df.head()
df.describe()
# ###### No NaN or Null values in any columns
#Let's check the success percent
count = df.groupby('is_success').size()
percent = count / len(df)*100
print(percent)
# ##### Data is highly imbalanced with only 11 percent yes
#checking multicollinearity
sns.pairplot(df)
# #### There seems to be no multicollinearity but we can cleary see some outliers in previous and pdays. We will start analyzing each variable now
#Age
sns.boxplot(x='is_success', y = 'age', data=df)
# Balance
sns.boxplot(x='is_success', y = 'balance', data=df)
# Impute outliers function
def impute_outliers(df, column , minimum, maximum):
col_values = df[column].values
df[column] = np.where(np.logical_or(col_values<minimum, col_values>maximum), col_values.mean(), col_values)
return df
#Balance has lot of outliers let's fix it
df_new = df
min_val = df['balance'].min()
max_val= 20000 #as most values are under it
df_new = impute_outliers(df=df_new, column='balance', minimum=min_val, maximum=max_val)
#day
sns.boxplot(x='is_success', y='day', data=df)
#duration
sns.boxplot(x='is_success', y='duration', data=df)
#Fixing Duration
min_val = df_new["duration"].min()
max_val = 2000
df_new = impute_outliers(df=df_new, column='duration' , minimum=min_val, maximum=max_val)
#Campaign
sns.boxplot(x='is_success', y='campaign', data=df)
#Fixing campaign column
min_val = df_new['campaign'].min()
max_val = 20
df_new = impute_outliers(df=df_new, column='campaign', minimum=min_val, maximum=max_val)
#pdays
sns.boxplot(x='is_success', y='pdays', data=df)
#Fixing pdays column
min_val = df_new['pdays'].min()
max_val = 250
df_new = impute_outliers(df=df_new, column='pdays', minimum=min_val, maximum = max_val)
#previous
sns.boxplot(x='is_success', y='previous', data=df)
#Fixing previous
min_val = df_new['previous'].min()
max_val = 15
df_new = impute_outliers(df = df_new, column='previous', minimum=min_val, maximum=max_val)
df_new.describe()
# #### Data seems fine now
# ## Categorigcal variables have unknowns in them, let's fix them too
#Impute unknowns function
def impute_unknowns(df, column):
col_values = df[column].values
df[column] = np.where(col_values=='unknown', df[column].mode(), col_values)
return df
#job
job = pd.crosstab(df['job'], df['is_success'])
job.plot(kind='bar')
print(df.groupby(['job']).size()/len(df)*100)
#Fixing job
df_new = impute_unknowns(df=df_new, column='job')
#marital
marital = pd.crosstab(df['marital'], df['is_success'])
marital.plot(kind='bar')
print(df.groupby(['marital']).size()/len(df)*100)
#education
education = pd.crosstab(df['education'], df['is_success'])
education.plot(kind='bar')
print(df.groupby(['education']).size()/len(df)*100)
#Fixing education column
df_new = impute_unknowns(df=df_new, column='education')
#default
default = pd.crosstab(df['default'], df['is_success'])
default.plot(kind='bar')
print(df.groupby(['default']).size()/len(df)*100)
#highly unbalanced hence drop this
df.drop(['default'], axis=1, inplace=True)
#housing
housing = pd.crosstab(df['housing'], df['is_success'])
housing.plot(kind='bar')
print(df.groupby(['housing']).size()/len(df)*100)
#contact
contact = pd.crosstab(df['contact'], df['is_success'])
contact.plot(kind='bar')
#print(df.groupby(["contact"])).size()/len(df)*100
df.drop(['contact'], axis=1, inplace=True) #doesn't seem like an important feature
#month
month = pd.crosstab(df['month'], df['is_success'])
month.plot(kind='bar')
print(df.groupby(['month']).size()/len(df)*100)
#poutcome
poutcome = pd.crosstab(df['poutcome'], df['is_success'])
poutcome.plot(kind='bar')
df.groupby(['poutcome']).size()/len(df)*100
df.drop(['poutcome'], axis=1, inplace=True) #most of the values of this column is missing
#Loan
loan = pd.crosstab(df['loan'], df['is_success'])
loan.plot(kind='bar')
print(df.groupby(['loan']).size()/len(df)*100)
#Updated dataset
df_new.info()
# ## Feature Engineering
#separating target variable from the dataset before creating dummy variable
y = df_new['is_success']
X = df_new[df_new.columns[0:12]]
print(X.head())
#creating dummy variables
X_dummy = pd.get_dummies(X)
print(X_dummy.head())
X = np.array(X_dummy.values)
Y = np.array(y.values)
X.shape, y.shape
#splitting the validation dataset
size = 0.20
seed = 7
X_train, X_validation, y_train, Y_validation = model_selection.train_test_split(X, y, test_size=size, random_state = seed)
# +
#scaling the values
X_t = scale(X_train)
#let's use all of our variables as components i.e. 39
pca = PCA(n_components=39)
pca.fit(X_t)
#Amount of variance by each principal component
var = pca.explained_variance_ratio_
#cumulative variance
cum_var = np.cumsum(np.round(pca.explained_variance_ratio_, decimals=4)*100)
# -
#let's plot the cumilative variance
plt.plot(cum_var)
# ### From the plot we can see that first 32 components are explaining 100% variability of data. Let's proceed with these 32 components
pca = PCA(n_components=32)
pca.fit(X_t)
X_train_PC = pca.fit_transform(X_t)
# ## Let's train our models
#Test options
seed = 7
scoring = 'accuracy'
#Algorithms
models=[]
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('K-NN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
#evaluating each model in turn
results = []
names = []
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state = seed)
cv_results = model_selection.cross_val_score(model, X_train_PC, y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# ### "SVM" has highest Accuracy but is slowest while "Logistic Regression" is almost as accurate but faster.
#Comparing Algorithms
fig = plt.figure()
fig.suptitle('Algorithm Wars')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
# ### Logistic Regression is the best model considering accuracy and speed.
# # Let's Predict
# +
X_val = scale(X_validation)
pca.fit(X_val)
X_validation_PC = pca.fit_transform(X_val)
# -
#Maing Predictions
lr = LogisticRegression()
lr.fit(X_train_PC, y_train)
predictions = lr.predict(X_validation_PC)
print("Accuracy: ", accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
| Day18/EDA/Banking_EDA/.ipynb_checkpoints/Exploratory Data Analysis-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Box Office Predictions
#
# Applied machine learning project to predict the profitability of a movie before it's released.
#
# ## Objectives
#
# - In general, is the **profit** of a movie correlated with its user **score** on IMDb? How about its number of **votes**? What do these correlations tell you?
# - Using the data from 2014 and earlier, can you predict the **profit** of movies released in 2015 and 2016? Tip: You should only use information that would be available BEFORE a movie is released (i.e. no information on user score, number of votes, or gross revenues).
# - Let's say that you were able to show movies pre-release to a representative focus group, which accurately anticipates the **score** of a movie (but not its overall popularity), can you improve your model?
#
# ## Machine Learning Task
#
# - Apply supervised learning regression techniques to learn patterns in 2014 and earlier movies to predict the **profit** of a movies released in 2015 and 2016
# - Only use inputs that would be available before the movie's release
# - Win condition: not specified
# +
# Module imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
# Import model and metrics
from sklearn.linear_model import ElasticNet
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
# Import Pipeline
from sklearn.pipeline import Pipeline
# Import preprocessing and grid search cross validation
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
# +
# Import data
df = pd.read_csv('box_office_predictions.csv')
df.head()
# +
# Write data to a .tsv file to import into SQL challenges
# df.to_csv('~/MySQLData/BoxOffice.tsv', sep='\t', header=False, index=False)
# +
# Find longest string in each column
# For SQL challenges to figure out type of each column
# for col in df.dtypes[df.dtypes == 'object'].index:
# print(col)
# print('Longest string length: {}'.format(df[col].str.len().max()))
# print()
# +
# Add 'profit' column
df['profit'] = df['gross'] - df['budget']
# Split release year from name and create a film age feature
df['age'] = 2014 - df['name'].str[-5:-1].astype(int)
df.head()
# -
df.info()
df.describe()
# No obvious missing values after running the `info()` method, but the `budget` minimum of zero indicates otherwise.
# Remove films where the budget is 0
df = df.loc[df.budget > 0,:]
# See unique values in categorical columns
for col in df.dtypes[df.dtypes == 'object'].index:
print(col)
print('Unique Values: {}'.format(df[col].nunique()))
print()
# A lot of unique values for several categories indicates sparse classes, which can lead to overfitting. Some of the values will need to be combined to reduce the feature space.
# Sort unique values
for col in df.dtypes[df.dtypes == 'object'].index:
print(col)
print(df[col].value_counts().head())
print()
for col in ['genre', 'rating', 'country']:
sns.countplot(data=df, y=col)
plt.title(col.title())
plt.show()
# +
# Consolidate ratings
df.rating.replace(['Not specified', 'UNRATED', 'NOT RATED'],
'UNRATED', inplace=True)
df.rating.replace(['NC-17', 'TV-PG', 'B', 'TV-14', 'TV-MA', 'B15'],
'Other', inplace=True)
# +
# Consolidate studio
# Number of films from each studio
studio_counts = df.studio.value_counts()
# Tiers for sparser studios
one_timers = studio_counts[studio_counts <= 1].index
five_timers = studio_counts[(studio_counts > 1) & (studio_counts <= 5)].index
ten_timers = studio_counts[(studio_counts > 5) & (studio_counts <= 10)].index
fifteen_timers = studio_counts[(studio_counts > 10) & (studio_counts <= 15)].index
# Combine sparse studios
df['studio'].replace(one_timers, 'One Timer', inplace=True)
df['studio'].replace(five_timers, 'Five Timer', inplace=True)
df['studio'].replace(ten_timers, 'Ten Timer', inplace=True)
df['studio'].replace(fifteen_timers, 'Fifteen Timer', inplace=True)
# +
# Consolidate star
# Number of films from each star
star_counts = df.star.value_counts()
# Tiers for sparser stars
one_timers = star_counts[star_counts <= 1].index
three_timers = star_counts[(star_counts > 1) & (star_counts <= 3)].index
five_timers = star_counts[(star_counts > 3) & (star_counts <= 5)].index
ten_timers = star_counts[(star_counts > 5) & (star_counts <= 10)].index
# Combine sparse stars
df['star'].replace(one_timers, 'One Timer', inplace=True)
df['star'].replace(three_timers, 'Three Timer', inplace=True)
df['star'].replace(five_timers, 'Five Timer', inplace=True)
df['star'].replace(ten_timers, 'Ten Timer', inplace=True)
# +
# Consolidate director
# Number of films from each director
director_counts = df.director.value_counts()
# Tiers for sparser directors
one_timers = director_counts[director_counts <= 1].index
three_timers = director_counts[(director_counts > 1) & (director_counts <= 3)].index
five_timers = director_counts[(director_counts > 3) & (director_counts <= 5)].index
ten_timers = director_counts[(director_counts > 5) & (director_counts <= 10)].index
# Combine sparse directors
df['director'].replace(one_timers, 'One Timer', inplace=True)
df['director'].replace(three_timers, 'Three Timer', inplace=True)
df['director'].replace(five_timers, 'Five Timer', inplace=True)
df['director'].replace(ten_timers, 'Ten Timer', inplace=True)
# -
# Convert country to binary US / Not US
df['country_isUSA'] = (df['country'] == 'USA').astype(int)
df.head()
# +
# Consolidate genres
df.genre.replace(['Mystery', 'Sci-Fi', 'Fantasy', 'Romance', 'Thriller', 'Family', 'Western', 'Musical', 'War'],
'Other', inplace=True)
df.genre.unique()
# +
correlations = df.corr()
plt.figure(figsize=(9, 8));
# Create mask to hide duplicate correlations
mask = np.zeros_like(correlations, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Plot heatmap
sns.set_style('whitegrid')
sns.heatmap(correlations*100, annot=True, fmt='.0f', mask=mask, cmap='RdBu_r');
# -
df.hist(figsize=(14, 14), xrot=-45);
sns.lmplot(x='budget', y='profit', data=df, hue='rating', fit_reg=False, scatter_kws={'alpha': 0.3});
sns.lmplot(x='budget', y='profit', data=df, hue='genre', fit_reg=False, scatter_kws={'alpha': 0.3});
sns.lmplot(x='age', y='profit', data=df, hue='genre', fit_reg=False, scatter_kws={'alpha': 0.3})
sns.lmplot(x='score', y='profit', data=df, hue='studio', fit_reg=False, scatter_kws={'alpha': 0.3})
sns.lmplot(x='score', y='profit', data=df, hue='rating', fit_reg=False, scatter_kws={'alpha': 0.3})
sns.lmplot(x='score', y='profit', data=df, hue='country_isUSA', fit_reg=False, scatter_kws={'alpha': 0.1})
df.head()
# +
# Create analytical base table for model
# Drop name col and replaced country col
# Drop cols that wouldn't be available at release (gross, votes, score - keep in one version)
# Convert categories (director, genre, rating, star, studio) to dummy variables
# Split train/test by age vs 2014
abt_score = df.drop(['country', 'gross', 'name', 'votes'], axis=1)
abt_score = pd.get_dummies(abt_score, columns=['director', 'genre', 'rating', 'star', 'studio'])
# Create train and test sets
x_train_wscore = abt_score[abt_score['age'] >= 0]
x_test_wscore = abt_score[abt_score['age'] < 0]
y_train = x_train_wscore['profit']
y_test = x_test_wscore['profit']
# Drop label in train and test feature set
x_train_wscore.drop('profit', axis=1, inplace=True)
x_test_wscore.drop('profit', axis=1, inplace=True)
# Drop score column for alternative feature set
x_train = x_train_wscore.drop(['score'], axis=1)
x_test = x_test_wscore.drop(['score'], axis=1)
# -
x_train_wscore.head()
# +
# Setup simple data processing pipelines for each model
RANDOM_STATE = 42
pipelines = {}
pipelines['enet'] = Pipeline([('scaler', StandardScaler()),
('enet', ElasticNet(random_state=RANDOM_STATE))])
pipelines['rf'] = Pipeline([('scaler', StandardScaler()),
('rf', RandomForestRegressor(random_state=RANDOM_STATE))])
pipelines['gb'] = Pipeline([('scaler', StandardScaler()),
('gb', GradientBoostingRegressor(random_state=RANDOM_STATE))])
# Setup hyperparameter grids for each model: step_name__parameter_name
enet_hp = {'enet__alpha': [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10],
'enet__l1_ratio': [0.1, 0.3, 0.5, 0.7, 0.9]}
rf_hp = {'rf__n_estimators': [100, 200],
'rf__max_features': ['auto', 'sqrt', 0.33],
'rf__min_samples_leaf': [1, 3, 5, 10]}
gb_hp = {'gb__n_estimators': [100, 200],
'gb__learning_rate': [0.05, 0.1, 0.2],
'gb__max_depth': [1, 3, 5]}
hyperparameters = {
'enet': enet_hp,
'rf': rf_hp,
'gb': gb_hp
}
# +
# Fit models without score feature
# Run GridSearch cross validation to fit models and tune model hyperparameters
fitted_models = {}
for name, pipeline in pipelines.items():
model = GridSearchCV(pipeline, hyperparameters[name], cv=5, n_jobs=-1)
model.fit(x_train, y_train)
fitted_models[name] = model
print(name, 'has been fitted.')
# -
preds = []
for name, model in fitted_models.items():
pred = model.predict(x_test)
print(name)
print('Model best score: {0:.4f}'.format(model.best_score_))
print('Mean Abs Error score on test: {0:,.0f}'.format(mean_absolute_error(y_test, pred)))
# print('Mean Squared Error score on test: {0:.4f}'.format(mean_squared_error(y_test, pred)))
print('R2 score on test: {0:.4f}'.format(r2_score(y_test, pred)))
print()
sns.scatterplot(y_test, pred)
plt.title(name)
plt.xlabel('Actual Profit')
plt.ylabel('Predicted Profit')
plt.show()
print()
# +
# Fit models WITH score feature
# Run GridSearch cross validation to fit models and tune model hyperparameters
fitted_models_wscore = {}
for name, pipeline in pipelines.items():
model = GridSearchCV(pipeline, hyperparameters[name], cv=5, n_jobs=-1)
model.fit(x_train_wscore, y_train)
fitted_models_wscore[name] = model
print(name, 'has been fitted.')
# -
preds_wscore = []
for name, model in fitted_models_wscore.items():
pred_ws = model.predict(x_test_wscore)
print(name)
print('Model best score: {0:.4f}'.format(model.best_score_))
print('Mean Abs Error score on test: {0:,.0f}'.format(mean_absolute_error(y_test, pred_ws)))
# print('Mean Squared Error score on test: {0:.4f}'.format(mean_squared_error(y_test, pred_ws)))
print('R2 score on test: {0:.4f}'.format(r2_score(y_test, pred_ws)))
print()
sns.scatterplot(y_test, pred_ws)
plt.title(name)
plt.xlabel('Actual Profit')
plt.ylabel('Predicted Profit')
plt.show()
print()
# +
# View feature importances
# print(fitted_models_wscore['gb'].best_params_)
final_model = fitted_models_wscore['gb'].best_estimator_
feat_imps = final_model.steps[1][1].feature_importances_
feats = dict(zip(x_train_wscore.columns, feat_imps))
imp = pd.DataFrame.from_dict(feats, orient='index').rename(columns={0: 'Gini-importance'})
imp.sort_values(by='Gini-importance').tail(10).plot(kind='barh', figsize=(8,8))
plt.show()
# -
# ## Conclusions
#
# As the project objectives lacked a clearly-defined "win condition", it's hard to gauge exactly how well the models performed versus expectations. Out of the three trained models - elastic net, random forest, and gradient boosting - the random forest model performed the best without `score`, and gradient boosting with it, but all had low $R^2$ scores. That said, including a proxy for `score` to train the model greatly increased all model performances, therefore effort should be taken to collect that information for predictions.
#
# The dataset included both categorical and numeric features, with mixed levels of how they tied to the profit of a film (defined as `gross` less `budget`). Unfortunately, the strongest correlations of numeric features to `profit` were with `gross` and `votes` features, which wouldn't be available at the release of a film and therefore not usable to train the model. Also, `profit` correlated weakly with `score`, but adding that feature into the model improved the gradient boosting's $R^2$ score from $0.02$ to $0.23$, and `score` took second place for most important feature. The `budget` feature correlated strongly with `gross`, `gross` correlated strongly with `profit`, but the `budget`-`profit` correlation was weak. However, `budget` was the most important feature in the gradient boosting model, therefore showing that standalone correlation isn't always indicative of feature importance.
#
# One funny observation was that American movies negatively correlated with `score`, so apparently the USA is churning out some bad flicks...
#
# Looking at the categorical features, the major issue was the large number of unique values within a feature - thousands of different `stars` or `directors` in the dataset made for sparse classes. Low value counts within a feature were grouped together to reduce the feature set and help prevent overfitting. One area for improvement would be to try different approaches or thresholds to group these values to see the impact on model performance.
#
# In general, iterating over different approaches for feature engineering and applying a wider array of algorithms could lead to better model performance in predicting movie profits.
| BoxOfficeRegressor/BoxOfficePredictions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sklearn
sklearn.__version__
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import seaborn as sns
import matplotlib
# 解决matplotlib不中文显示为方块的问题
matplotlib.use('qt4agg')
#指定默认字体
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['font.family']='sans-serif' #解决负号'-'显示为方块的问题
matplotlib.rcParams['axes.unicode_minus'] = False
# %matplotlib inline
import warnings
warnings.filterwarnings("ignore")
# 重复性设置
seed = 2020
# 如果特征特别多,可以考虑降维或者特征选择
# 并不是每个特征模型的贡献都是相同的,特征选择让我们只取出那些对模型效果影响较大的特征
# # 加载数据
# 创建一个二分类数据集,共有10000个样本,每个样本有50个特征,
from sklearn.datasets import make_classification
X, y = make_classification(n_samples=10000, n_features=50, n_classes=2, random_state=seed)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=seed)
# # 评价指标
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
def get_socre(y_true, y_pred):
print("[+]F1 score : ", f1_score(y_true, y_pred))
print("[+]acc score: ", accuracy_score(y_true, y_pred))
# # 全特征训练
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression(random_state=seed)
clf.fit(X_train, y_train)
get_socre(y_test, clf.predict(X_test))
# # 特征选择
# 下面的方法只适应于分类任务
# ## 卡方检验
# **注意事项:特征数值必须非负**
#
# 卡方检验衡量随机变量之间的依赖性,可以剔除对分类没有太大帮助的特征
#
# 参考文章:https://zhuanlan.zhihu.com/p/69888032
# +
"""
特征必须非负,我们这里加载另一个数据集
"""
from sklearn.datasets import load_breast_cancer
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
dataset = load_breast_cancer()
# 特征选择之前
print("没有特征选择")
clf = LogisticRegression(random_state=seed)
clf.fit(dataset.data[:400], dataset.target[:400])
get_socre(dataset.target[400:], clf.predict(dataset.data[400:]))
# 特征选择之后
print("保留15个重要特征")
new_data = SelectKBest(chi2, k=15).fit_transform(dataset.data, dataset.target) # 保留15个重要的特征
clf = LogisticRegression(random_state=seed)
clf.fit(new_data[:400], dataset.target[:400])
get_socre(dataset.target[400:], clf.predict(new_data[400:]))
print("保留20个重要特征")
new_data = SelectKBest(chi2, k=20).fit_transform(dataset.data, dataset.target) # 保留20个重要的特征
clf = LogisticRegression(random_state=seed)
clf.fit(new_data[:400], dataset.target[:400])
get_socre(dataset.target[400:], clf.predict(new_data[400:]))
# -
# ## 方差分析
# 类似卡方检验,方差分析过分析研究不同来源的变异对总变异的贡献大小,从而确定可控因素对研究结果影响力的大小
#
# 参考文章:https://www.jianshu.com/p/f5f54a39cb19
# +
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
selector = SelectKBest(f_classif, k=25).fit(X_train, y_train) # 保留25个特征
X_new_train = selector.transform(X_train)
X_new_test = selector.transform(X_test)
clf = LogisticRegression(random_state=seed)
clf.fit(X_new_train, y_train)
get_socre(y_test, clf.predict(X_new_test))
# -
selector = SelectKBest(f_classif, k=35).fit(X_train, y_train) # 保留35个特征
X_new_train = selector.transform(X_train)
X_new_test = selector.transform(X_test)
clf = LogisticRegression(random_state=seed)
clf.fit(X_new_train, y_train)
get_socre(y_test, clf.predict(X_new_test))
# ## 互信息
# 互信息用于衡量两个变量之间的相关性,如果两个变量之间相互独立,那么互信息为0
# +
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import mutual_info_classif
selector = SelectKBest(mutual_info_classif, k=25).fit(X_train, y_train) # 保留25个特征
X_new_train = selector.transform(X_train)
X_new_test = selector.transform(X_test)
clf = LogisticRegression(random_state=seed)
clf.fit(X_new_train, y_train)
get_socre(y_test, clf.predict(X_new_test))
# -
selector = SelectKBest(mutual_info_classif, k=35).fit(X_train, y_train) # 保留35个特征
X_new_train = selector.transform(X_train)
X_new_test = selector.transform(X_test)
clf = LogisticRegression(random_state=seed)
clf.fit(X_new_train, y_train)
get_socre(y_test, clf.predict(X_new_test))
# ## 模型选择特征
# ### 递归特征消除(RFE)
# 递归特征消除的主要思想是反复构建模型,每次从当前的一组特征中删除最不重要的特征,然后对该过程进行递归重复,直到最终达到所需的特征数量
# +
from sklearn.feature_selection import RFE
clf = LogisticRegression(random_state=seed)
selector = RFE(clf, 25) # 保留25个特征
selector = selector.fit(X_train, y_train)
print(selector.support_) # 保留的特征为True
selector.ranking_ # 保留的特征等级为1
# -
clf = LogisticRegression(random_state=seed)
clf.fit(X_train[:, selector.support_], y_train)
get_socre(y_test, clf.predict(X_test[:, selector.support_]))
# ### 随机森林
# 参考文章:https://www.jianshu.com/p/8985bc8e4a12
"""
根据 feature_importances_ 属性选择特征
"""
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(random_state=seed)
rf.fit(X_train, y_train)
feature_importances = rf.feature_importances_
feature_importances_index = feature_importances.argsort() # 元素的索引,越往后,索引对应的元素越大
keep_features = feature_importances_index[-35:] # 保留35个重要特征
keep_features
clf = LogisticRegression(random_state=seed)
clf.fit(X_train[:, keep_features], y_train)
get_socre(y_test, clf.predict(X_test[:, keep_features]))
# +
"""
可以借助SelectFromModel
SelectFromModel是一个较不健壮的解决方案。 它只是根据计算出的阈值删除不那么重要的功能(不涉及优化迭代过程)。
传入的模型必须具有 coef_ 属性或 feature_importances_ 属性
"""
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(random_state=seed)
selector = SelectFromModel(rf).fit(X_train, y_train)
X_new_train = selector.transform(X_train)
X_new_test = selector.transform(X_test)
clf = LogisticRegression(random_state=seed)
clf.fit(X_new_train, y_train)
get_socre(y_test, clf.predict(X_new_test))
# -
X_new_train.shape
"""
SelectFromModel 根据 threshold 模型的 coef_ 属性或 feature_importances_ 属性来保留特征
默认将 均值 作为其阈值
"""
print(feature_importances.mean())
print(selector.threshold_)
# ### 极度随机树
# ExtRa Trees是Extremely Randomized Trees的缩写,意思就是极度随机树。这是一种组合方法,与其说像决策树,实际上它更像随机森林。
#
# 与随机森林的相同点:
# 1. bootstrap了样本。
# 2. 随机选取了部分特征,来构造一个棵树。
#
# 与随机森林的不同点:
# 1. 每棵决策树选择划分点的方式不同。对于普通决策树,每个特征都是根据某个标准(信息增益或者gini不纯)去进行划分。而对于extra trees中的决策树,划分点的选择更为随机,随机选择一个划分点;然后再按照评判标准选择一个特征。
#
# 参考文章:http://sofasofa.io/forum_main_post.php?postid=1000765
from sklearn.ensemble import ExtraTreesClassifier
etclf = ExtraTreesClassifier(random_state=seed)
etclf.fit(X_train, y_train)
feature_importances = etclf.feature_importances_
feature_importances_index = feature_importances.argsort() # 元素的索引,越往后,索引对应的元素越大
keep_features = feature_importances_index[-35:] # 保留35个重要特征
clf = LogisticRegression(random_state=seed)
clf.fit(X_train[:, keep_features], y_train)
get_socre(y_test, clf.predict(X_test[:, keep_features]))
# ## 相关性分析
# 使用皮尔逊相关系数检查两个变量之间变化趋势的方向以及程度,值范围-1到+1,0表示两个变量不相关,正值表示正相关,负值表示负相关,值越大相关性越强
"""
找出与类别最相关的特征
"""
df = pd.DataFrame(X_train)
df['y'] = y_train
corr= df.corr()
corr_y = abs(corr["y"])
highest_corr = corr_y[corr_y > 0.1] # 只看大于0.1的
highest_corr.sort_values(ascending=True) # 发现只有3个特征与标签最相关
keep_features = highest_corr.sort_values(ascending=True).index[:-1] # 去掉y
clf = LogisticRegression(random_state=seed)
clf.fit(df[keep_features], y_train)
get_socre(y_test, clf.predict(X_test[:, [27,34,6]]))
# +
"""
去除相关度高(冗余)的特征,特征较少时可以用画图的方式
我们选择与标签最相关的10特征
"""
df_2 = df[corr_y.sort_values(ascending=True)[-11:-1].index]
figure(figsize=(12, 10), dpi=80, facecolor='w', edgecolor='k')
corr_2 = df_2.corr()
sns.heatmap(corr_2, annot=True, fmt=".2g")
# +
"""
特征6 和 特征34 高度相关,特征27 和 特征34 也比较相关,我们去掉特征34
特征6 和 特征27 也比较为相关,可以尝试去除特征6
"""
print("10个特征")
keep_features = corr_y.sort_values(ascending=True)[-11:-1].index # 去掉y
keep_features = keep_features.values.astype(np.int64) # 变为int类型
clf = LogisticRegression(random_state=seed)
clf.fit(df[keep_features], y_train)
get_socre(y_test, clf.predict(X_test[:, keep_features]))
print("9个特征")
keep_features = np.delete(keep_features, [8, 9], axis=0) # 34在第8个位置,6在第9位置
clf = LogisticRegression(random_state=seed)
clf.fit(df[keep_features], y_train)
get_socre(y_test, clf.predict(X_test[:, keep_features]))
# -
# ## L1正则
# L1正则可以让模型的解更加稀疏,相当于做了特征选择
"""
直接使用L1正则作为惩罚项
"""
clf = LogisticRegression(random_state=seed, solver='saga', penalty='l1') # 大多默认使用L2正则,默认的solver不支持L1正则
clf.fit(X_train, y_train)
get_socre(y_test, clf.predict(X_test))
# 可以看到有3个0分量,相当于只用了47个特征
clf.coef_
clf = LogisticRegression(random_state=seed, solver='saga', penalty='l2') # 使用L2正则作为对照
clf.fit(X_train, y_train)
get_socre(y_test, clf.predict(X_test))
# L1正则的解比L2正则的稀疏
# 这里使用L2正则就没有0分量
clf.coef_
clf = LogisticRegression(random_state=seed, solver='saga', penalty='none') # 不使用惩罚项作为对照
clf.fit(X_train, y_train)
get_socre(y_test, clf.predict(X_test))
clf.coef_
# +
"""
L1正则作特征选择,画图看重要性(系数大小)
"""
from sklearn.linear_model import LassoCV
clf = LogisticRegression(random_state=seed, solver='saga', penalty='l1')
clf.fit(X_train, y_train)
print("消除特征数:", len(clf.coef_[clf.coef_==0]))
print("保留特征数:", len(clf.coef_[clf.coef_!=0]))
# -
model_coef = pd.Series(clf.coef_.reshape(-1))
figure(num=None, figsize=(12, 10), dpi=80, facecolor='w', edgecolor='k')
top_coef = model_coef.sort_values()
top_coef[top_coef != 0].plot(kind = "barh")
plt.title("使用Lasso获得特征的重要性(不为0的系数)")
# # PCA降维
# 使用奇异值分解将数据投影到较低的k维空间,这k维特征被称为主成份
#
# 参考文章:https://www.jianshu.com/p/bcd196497d94
# 画图看降到多少维合适
from sklearn.decomposition import PCA
candidate_components = range(5, 50, 5) # 有50个特征,我们最低取5维,并以5为步长递增
explained_ratios = []
for c in candidate_components:
pca = PCA(n_components=c)
pca.fit(X_train)
explained_ratios.append(np.sum(pca.explained_variance_ratio_))
print(explained_ratios)
plt.figure(figsize=(10, 6), dpi=144)
plt.grid()
plt.plot(candidate_components, explained_ratios)
plt.xlabel('Number of PCA Components')
plt.ylabel('Explained Variance Ratio')
plt.title('Explained variance ratio for PCA')
plt.yticks(np.arange(0.5, 1, 0.1))
plt.xticks(np.arange(0, 50, 5))
"""
发现图是线性的,没有明显拐点,尽量选择方差百分比高的主成分数
40占所有特征的方差百分比为0.8607271129195249
"""
pca = PCA(n_components=40)
X_new_train = pca.fit_transform(X_train)
X_new_test = pca.transform(X_test)
clf = LogisticRegression(random_state=seed)
clf.fit(X_new_train, y_train)
get_socre(y_test, clf.predict(X_new_test))
"""
45占所有特征的方差百分比为0.9489175018749659
"""
pca = PCA(n_components=45)
X_new_train = pca.fit_transform(X_train)
X_new_test = pca.transform(X_test)
clf = LogisticRegression(random_state=seed)
clf.fit(X_new_train, y_train)
get_socre(y_test, clf.predict(X_new_test))
| machine_learning/feature_selection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Times and Dates
#
# The [astropy.time](https://docs.astropy.org/en/stable/time/index.html) sub-package in astropy provides a way to represent and manipulate times and dates. It supports a number of time scales (e.g. UTC, TAI, UT1, TDB) and formats (e.g. Julian Date, ISO 8601) and uses two floating point values for each date/time to provide extremely high precision.
#
# <section class="objectives panel panel-warning">
# <div class="panel-heading">
# <h2><span class="fa fa-certificate"></span> Objectives</h2>
# </div>
#
#
# <div class="panel-body">
#
# <ul>
# <li>Create and represent times and dates (scalars and arrays)</li>
# <li>Understand the difference between a time <em>scale</em> and a <em>format</em></li>
# <li>Convert to different scales and formats</li>
# <li>Do simple arithmetic with Time objects</li>
# </ul>
#
# </div>
#
# </section>
#
# ## Documentation
#
# This notebook only shows a subset of the functionality in astropy.time. For more information about the features presented below as well as other available features, you can read the
# [astropy.time documentation](https://docs.astropy.org/en/stable/time/).
# ## Representing times
#
# Representing time objects is done with the Time class:
from astropy.time import Time
# This class can be initialized using values in a variety of formats. The following example shows how to initialize a time from an ISO 8601 string (which is commonly used for representing dates and times):
t1 = Time('2019-10-23T13:44:21')
t1
# By default, dates/times are asumed to be in UTC, but you can also explicitly specify if the scale is different:
t2 = Time('2019-10-23T13:44:21', scale='tdb')
t2
# If you want to initialize from a different *format*, e.g. a Julian Date, you will often need to specify the format explicitly:
t3 = Time(2458780, format='jd')
t3
# Note that the *scale* is fundamental to the meaning of the time, while the *format* is more to do with how the date/time is shown. You can find out what the current scale and current default format are:
t3.scale
t3.format
# You can convert the date/time to a different format by using ``.<format_name>``, e.g.
t3.mjd
t3.isot
# A [list of valid formats](https://docs.astropy.org/en/stable/time/index.html#time-format) can be found in the Astropy documentation. You can also change the default format by setting ``.format``:
t3.format = 'isot'
t3
# Similarly to formats, you can convert the time object to a different scale by using attribute notation:
t3
t3.tdb
# A [list of valid scales](https://docs.astropy.org/en/stable/time/index.html#time-scale) can be found in the Astropy documentation.
# ## Time arrays
#
# The above examples show scalar times, but the ``Time`` class can also be used to efficiently store arrays of values:
import numpy as np
t4 = Time(np.linspace(50000, 51000, 11), format='mjd')
t4
# This time object can be indexed like a Numpy array:
t4[2:5]
t4[6:1:-2]
t4[6]
# ## Arithmethic with times
#
# If you have two ``Time`` objects, you can find the difference between them - and this will automatically deal with differences in scale if relevant:
t5 = Time('2019-10-23T13:44:21', scale='utc')
t6 = Time('2019-10-22T11:21:00', scale='tdb')
diff = t5 - t6
diff
# Note that the returned object is not an astropy qantity but a special object called ``TimeDelta`` which contains the difference in times but also information about the time scale in which that difference is expressed:
diff.scale
# In many cases you will probably want to get this as a quantity, which you can do with e.g.:
from astropy import units as u
diff.to(u.s)
diff.to(u.h)
# You can also go from a relative time to an absolute time by adding a ``Time`` object with a ``TimeDelta`` object or a quantity object::
t7 = t6 + [1, 2, 3] * u.s
t7
#
# <section class="challenge panel panel-success">
# <div class="panel-heading">
# <h2><span class="fa fa-pencil"></span> Challenge</h2>
# </div>
#
#
# <div class="panel-body">
#
# <p>The answer to some of the following can be found in <a href="https://docs.astropy.org/en/stable/time/index.html">the documentation</a>!</p>
# <ol>
# <li>Construct a time object for the current time (note that there is a shortcut for this)</li>
# <li>Find a way to get an ISO 8601 string for the current time, optionally with 6 decimal places</li>
# <li>Find the number of minutes that have elapsed since the start of the course</li>
# </ol>
#
# </div>
#
# </section>
#
# +
#1
current_time = Time.now()
print(current_time)
#2
current_time.precision = 6
print(current_time.isot)
#3
elapsed = current_time - Time('2019-10-23T08:00:00') # UTC!
print(elapsed.to(u.minute))
# -
# <center><i>This notebook was written by <a href="https://aperiosoftware.com/">Aperio Software Ltd.</a> © 2019, and is licensed under a <a href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License (CC BY 4.0)</a></i></center>
#
# 
| instructor/02-times_instructor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: odqa-experiments
# language: python
# name: odqa-experiments
# ---
# +
from bert_score import score
import pandas as pd
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
import time
from overlap_evaluate import get_scores, read_references, read_annotations, ANNOTATIONS, _print_score
# -
# +
cands=["hi my name is caleb"]
refs = ["yo my name is Caleb"]
score(cands, refs, lang='en', verbose=True)
# -
annotations = read_annotations("data/nq-annotations.jsonl")
references = read_references("data/nq-test.qa.csv")
nq_test = pd.read_csv("data/nq-test.qa.csv", sep="\t", names=["question", "answers"])
# +
# from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
t5_qa_model = AutoModelForSeq2SeqLM.from_pretrained("google/t5-large-ssm-nq")
t5_tok = AutoTokenizer.from_pretrained("google/t5-large-ssm-nq")
# +
def predict(question: str):
input_ids = t5_tok(question, return_tensors="pt").input_ids
gen_output = t5_qa_model.generate(input_ids)[0]
prediction = t5_tok.decode(gen_output, skip_special_tokens=True)
return prediction
predict("Which quarterback threw for the most passing yards?")
# +
predictions = []
start = time.time()
for i in range(len(nq_test)):
if i % 40 == 0:
print(f"At example: {i} after {round(time.time()-start, 3)} seconds")
example = nq_test.iloc[i]
prediction = predict(example['question'])
predictions.append({"id": i, "prediction": prediction})
print(f"Finished after {round(time.time()-start, 3)} seconds")
# -
predictions[0]
scores = get_scores(predictions, references, annotations, get_bert_score=True)
for label in ANNOTATIONS:
_print_score(label, scores[label])
| nq_evaluate_overlap_exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Ejercicios
# ## Descripción general del problema
#
# Recopilamos los registros de los vuelos que ocurrieron durante un día entre aeropuertos ubicados en Colombia y los organizamos en una lista de diccionarios. Ahora queremos que usted nos ayude a identificar:
#
# 1. la aerolinea con mas vuelos durante el dia.
#
# 2. el avion con mas vuelos durante el dia.
#
# 3. la ciudad con mas vuelos (salidas o llegadas) durante el dia.
#
# 4. el avion con mayor uso durante el dia.
#
# 5. la duracion promedio de los vuelos para cada aeropuerto.
#
# 6. itinerarios posibles entre un aeropuerto de origen y destino (unicamente vuelos directos).
#
#
# El parámetro vuelos de la función es una lista de diccionarios con la información de los vuelos.
#
# Los valores en la lista no necesariamente estan ordenados.
#
# Los valores en esta lista son diccionarios con la información de un vuelo organizado de acuerdo a las siguientes llaves:
#
# aerolínea, que corresponde al nombre de la aerolínea.
#
# codigo, que corresponde al identificador unico del avion.
#
# origen, que corresponde al código de aeropuerto de origen.
#
# destino, que corresponde al aeropuerto destino del vuelo.
#
# distancia, que corresponde a la distancia entre el origen y el destino.
#
# retraso, que corresponde a la cantidad de minutos de retraso que tuvo el vuelo.
#
# duracion, que corresponde a la duración planeada del vuelo en minutos.
#
# salida, que corresponde a un entero que representa la hora de salida.
#
# La hora de salida se representa usando la hora en formato 24 horas multiplicada por 100 más la cantidad de minutos (por ejemplo, las 2007 indica que el vuelo salió a las 8:07 pm).
#
# ## Funciones requeridas
#
# Su solución debe tener una función de acuerdo a la siguientes especificaciones. Usted puede tener funciones adicionales.
#
# *Nombre de la función:* aerolinea_con_vuelos
#
# Parámetros
#
# Nombre | Tipo| Descripción
# ---- | ---- | ----
# vuelos | list | Es una lista de diccionarios con la información de los vuelos.
#
# Tipo del retorno| Descipción del retorno
# ----|----
# tuple| nombre de la aerolinea con mas vuelos durante el dia (salida o entrada) y numero de vuelos en el dia.
# +
#aerolinea con mayor numero de vuelos en el listado de vuelos
vuelosEjemplo = [{"aerolinea": "AVIANCA", 'codigo': "AHF21", "origen": "BOG", "destino": "CTG", "distancia": 295, "retraso": 5, "duracion": 120, "salida":600},
{"aerolinea": "VIVAAIR", 'codigo': "VVE01", "origen": "BOG", "destino": "CTG", "distancia": 295, "retraso": 2, "duracion": 115, "salida":555},
{"aerolinea": "AVIANCA", 'codigo': "AHF21", "origen": "CTG", "destino": "BOG", "distancia": 295, "retraso": 15, "duracion": 120, "salida":830},
{"aerolinea": "VIVAAIR", 'codigo': "VVE01", "origen": "CTG", "destino": "PEI", "distancia": 325, "retraso": 5, "duracion": 135, "salida":800},
{"aerolinea": "AVIANCA", 'codigo': "AHF23", "origen": "BOG", "destino": "CLO", "distancia": 255, "retraso": 25, "duracion": 170, "salida":605},
{"aerolinea": "VIVAAIR", 'codigo': "VVE01", "origen": "PEI", "destino": "BOG", "distancia": 220, "retraso": 5, "duracion": 60, "salida":1030},
{"aerolinea": "AVIANCA", 'codigo': "AHF23", "origen": "CLO", "destino": "CTG", "distancia": 400, "retraso": 20, "duracion": 160, "salida":1200}]
# calcular las diferentes aerolineas
aerolineas = set()
for vuelo in vuelosEjemplo:
aerolineas.add(vuelo["aerolinea"])
#alistar en un diccionario el conteo de apariciones para cada aerolinea
miDiccionario = dict()
for empresa in aerolineas:
miDiccionario[empresa]=0
#realizar el conteo de apariciones de cada aerolinea en el listado de vuelos
mayorApariciones = -1
for empresa in aerolineas:
for vuelo in vuelosEjemplo:
if empresa == vuelo["aerolinea"]:
miDiccionario[empresa]=miDiccionario[empresa]+1
#encontrar la aerolinea con mayor apariciones
if miDiccionario[empresa]>mayorApariciones:
mayorApariciones = miDiccionario[empresa]
aerolineaConMayorAparaciones = empresa
# preparacion de la tupla de salida
miTupla = (aerolineaConMayorAparaciones, miDiccionario[aerolineaConMayorAparaciones])
# -
# *Nombre de la función:* avion_con_vuelos
#
# Parámetros
#
# Nombre | Tipo| Descripción
# ---- | ---- | ----
# vuelos | list | Es una lista de diccionarios con la información de los vuelos.
#
# Tipo del retorno| Descipción del retorno
# ----|----
# tuple| Codigo del avion con mas vuelos durante el dia y numero de vuelos.
# +
#avion con mayor numero de vuelos en el listado de vuelos
vuelosEjemplo = [{"aerolinea": "AVIANCA", 'codigo': "AHF21", "origen": "BOG", "destino": "CTG", "distancia": 295, "retraso": 5, "duracion": 120, "salida":600},
{"aerolinea": "VIVAAIR", 'codigo': "VVE01", "origen": "BOG", "destino": "CTG", "distancia": 295, "retraso": 2, "duracion": 115, "salida":555},
{"aerolinea": "AVIANCA", 'codigo': "AHF21", "origen": "CTG", "destino": "BOG", "distancia": 295, "retraso": 15, "duracion": 120, "salida":830},
{"aerolinea": "VIVAAIR", 'codigo': "VVE01", "origen": "CTG", "destino": "PEI", "distancia": 325, "retraso": 5, "duracion": 135, "salida":800},
{"aerolinea": "AVIANCA", 'codigo': "AHF23", "origen": "BOG", "destino": "CLO", "distancia": 255, "retraso": 25, "duracion": 170, "salida":605},
{"aerolinea": "VIVAAIR", 'codigo': "VVE01", "origen": "PEI", "destino": "BOG", "distancia": 220, "retraso": 5, "duracion": 60, "salida":1030},
{"aerolinea": "AVIANCA", 'codigo': "AHF23", "origen": "CLO", "destino": "CTG", "distancia": 400, "retraso": 20, "duracion": 160, "salida":1200}]
# calcular los diferentes aviones
aviones = set()
for vuelo in vuelosEjemplo:
aviones.add(vuelo['codigo'])
#alistar en un diccionario el conteo de apariciones para cada avion
miDiccionario = dict()
for avion in aviones:
miDiccionario[avion]=0
#realizar el conteo de apariciones de cada avion en el listado de vuelos
mayorAviones=-1
for avion in aviones:
for vuelo in vuelosEjemplo:
if avion == vuelo["codigo"]:
miDiccionario[avion] = miDiccionario[avion] + 1
#aprovechar los fors para encontrar el avion con mayor apariciones
if miDiccionario[avion]>mayorAviones:
mayorAviones = miDiccionario[avion]
avionConMasVuelos = avion
# preparacion de la tupla de salida
miTupla=(avionConMasVuelos,miDiccionario[avionConMasVuelos])
print(miTupla)
# -
# *Nombre de la función:* ciudad_con_vuelos
#
# Parámetros
#
# Nombre | Tipo| Descripción
# ---- | ---- | ----
# vuelos | list | Es una lista de diccionarios con la información de los vuelos.
#
# Tipo del retorno| Descipción del retorno
# ----|----
# tuple| Codigo del aeropuerto con mas vuelos durante el dia (salida o entrada) y numero de vuelos.
# *Nombre de la función:* avion_con_mayor_uso
#
# Parámetros
#
# Nombre | Tipo| Descripción
# ---- | ---- | ----
# vuelos | list | Es una lista de diccionarios con la información de los vuelos.
#
# Tipo del retorno| Descipción del retorno
# ----|----
# tuple | Codigo del avion con mayor uso durante el dia (salida o entrada) y numero de minutos de uso.
#
# *Nombre de la función:* duracion_promedio_vuelos_aeropuertos
#
# Parámetros
#
# Nombre | Tipo| Descripción
# ---- | ---- | ----
# vuelos | list | Es una lista de diccionarios con la información de los vuelos.
#
# Tipo del retorno| Descipción del retorno
# ----|----
# dict | Diccionario con los aeropuertos y duraciones promedio de los vuelos que arriban o parten de este.
# +
#avion_con_mayor_uso
vuelosEjemplo = [{"aerolinea": "AVIANCA", 'codigo': "AHF21", "origen": "BOG", "destino": "CTG", "distancia": 295, "retraso": 5, "duracion": 120, "salida":600},
{"aerolinea": "VIVAAIR", 'codigo': "VVE01", "origen": "BOG", "destino": "CTG", "distancia": 295, "retraso": 2, "duracion": 115, "salida":555},
{"aerolinea": "AVIANCA", 'codigo': "AHF21", "origen": "CTG", "destino": "BOG", "distancia": 295, "retraso": 15, "duracion": 120, "salida":830},
{"aerolinea": "VIVAAIR", 'codigo': "VVE01", "origen": "CTG", "destino": "PEI", "distancia": 325, "retraso": 5, "duracion": 135, "salida":800},
{"aerolinea": "AVIANCA", 'codigo': "AHF23", "origen": "BOG", "destino": "CLO", "distancia": 255, "retraso": 25, "duracion": 170, "salida":605},
{"aerolinea": "VIVAAIR", 'codigo': "VVE01", "origen": "PEI", "destino": "BOG", "distancia": 220, "retraso": 5, "duracion": 60, "salida":1030},
{"aerolinea": "AVIANCA", 'codigo': "AHF23", "origen": "CLO", "destino": "CTG", "distancia": 400, "retraso": 20, "duracion": 160, "salida":1200}]
# calcular los diferentes aviones
aviones = set()
for vuelo in vuelosEjemplo:
aviones.add(vuelo['codigo'])
#alistar en un diccionario calcular el uso de cada avion
miDiccionario = dict()
for avion in aviones:
miDiccionario[avion] = 0
#realizar la acumulacion de duraciones cada avion del listado de vuelos
mayorDuracion = -1
for avion in aviones:
for vuelo in vuelosEjemplo:
if avion == vuelo['codigo']:
miDiccionario[avion]= miDiccionario[avion] + vuelo['duracion']
#aprovechar los fors para encontrar el avion con mayor duracion
if miDiccionario[avion]> mayorDuracion:
mayorDuracion =miDiccionario[avion]
avionConMayorDuracion = avion
# preparacion de la tupla de salida
miTupla=(avionConMayorDuracion,miDiccionario[avionConMayorDuracion])
# -
# *Nombre de la función:* itinerarios_posibles_entre_par_ciudades
#
# Parámetros
#
# Nombre | Tipo| Descripción
# ---- | ---- | ----
# vuelos | list | Es una lista de diccionarios con la información de los vuelos.
# origen | str | El código del aeropuerto de origen
# destino | str | El código del aeropuerto de destino
#
#
# Tipo del retorno| Descipción del retorno
# ----|----
# lista | Retorna una lista con los itinerarios posibles entre el origen y el destino. El itinerario debe ser una lista con los horarios de salida.
vuelosEjemplo = [{"aerolinea": "AVIANCA", 'codigo': "AHF21", "origen": "BOG", "destino": "CTG", "distancia": 295, "retraso": 5, "duracion": 120, "salida":600},
{"aerolinea": "VIVAAIR", 'codigo': "VVE01", "origen": "BOG", "destino": "CTG", "distancia": 295, "retraso": 2, "duracion": 115, "salida":555},
{"aerolinea": "AVIANCA", 'codigo': "AHF21", "origen": "CTG", "destino": "BOG", "distancia": 295, "retraso": 15, "duracion": 120, "salida":830},
{"aerolinea": "VIVAAIR", 'codigo': "VVE01", "origen": "CTG", "destino": "PEI", "distancia": 325, "retraso": 5, "duracion": 135, "salida":800},
{"aerolinea": "AVIANCA", 'codigo': "AHF23", "origen": "BOG", "destino": "CLO", "distancia": 255, "retraso": 25, "duracion": 170, "salida":605},
{"aerolinea": "VIVAAIR", 'codigo': "VVE01", "origen": "PEI", "destino": "BOG", "distancia": 220, "retraso": 5, "duracion": 60, "salida":1030},
{"aerolinea": "AVIANCA", 'codigo': "AHF23", "origen": "CLO", "destino": "CTG", "distancia": 400, "retraso": 20, "duracion": 160, "salida":1200}]
| Clases/Clase 18 - Ejercicios Aerolinea Solucion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Psych 81.09
# language: python
# name: psych81.09
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/franklee20/cs-for-psych/blob/master/Copy_of_intro_to_python.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="FQ_j-Ru69tiX"
# # Overview
#
# This notebook provides a very high level overview of what it means to "write a computer program," and some specific examples of how to write a program in Python (using Jupyter notebooks). If you are already familiar with Python and Jupyter notebooks, you might be interested in [this video](https://youtu.be/Gin8_AITmS0) summarizing some tips and tricks for writing good, clean, effective code.
#
# Note: this tutorial is intended to introduce you to the basics-- not to provide a complete, or even fully self-contained, overview of Python programming. If this material is new to you, then after running through this notebook, I suggest that you work through a beginner Python tutorial like [this one](https://gitlab.erc.monash.edu.au/andrease/Python4Maths/tree/master/Intro-to-Python). We'll also cover this material in class, but the best way to learn how to program is through repeated and varied exposure.
#
# ### A note on how to approach learning to code
#
# Like any new skill, learning to code can be daunting. If you've never coded or taken courses on logic before, you might be about to embark on a journey towards an entirely new way of thinking and approaching problem solving. I've found four general principles to be useful:
#
# 1. The first time you go through something, don't expect to understand everything (or even anything) perfectly. Sometimes going through a new lesson or example might seem like encountering a foreign language (hint: maybe because it *is*!). Give yourself permission to relax and to *not* understand. Not "getting it" the first time doesn't reflect on your intelligence or potential.
#
# 2. If you're feeling lost, it's important to notice your points of confusion (even if you don't immediately correct or resolve them). One of the most powerful ways of approaching coding is to break highly complex tasks down into successively simpler sub-tasks. You don't always need to understand the full scope of the problem you're working on-- you just need to understand enough to break off a tiny piece of the next step in the puzzle. By the same token, approach *learning* as a problem solving journey. Try to take stock of what you're not understanding, and carefully separate the material into "things you know (sufficiently well)" and "things you don't know". Use what you know to help further sub-divide the unknowns. Your goal should be to work towards an ability to precisely formulate questions. That will help you resolve your understanding (either by searching for the answer online or by asking others for help). Particularly when you're starting out, it's also OK not to know how to break your questions down into smaller chunks. You'll pick it up as you go.
#
# 3. The best predictor of successfully learning to code is *persistence*. Simply allowing yourself to be exposed to code (by reading and writing it, and by going through tutorials, etc.) will improve your ability to understand, even if those improvements aren't immediately obvious or noticeable to you. One way to view the learning process is as a way of building up your *fluency* (through repeated exposure) in addition to adding the basic building blocks and vocabulary to your skill set. Even if you're not adding new building blocks, simply gaining more exposure will improve your fluency. Over time this will make it easier for you to add new building blocks. Each new step you take in the learning process will allow you to gain a deeper understanding of previously learned material, and will help you to learn new material that much faster.
#
# 4. The best way to learn to code is to *write code*. When you're reading through a new tutorial or about a new idea, keep a scratch notebook open and code as you learn (`control + alt + n` or `control + option + n`). And once you've learned about something new, try to apply it to a question or problem you're excited about. The potential applications of any given idea are nearly limitless. Allow yourself the freedom to get creative, make mistakes, break things, and try out new stuff!
# + [markdown] id="G_fJb2MONqkf"
# # What does it mean to 'code'?
#
# Coding, or [computer programming](https://en.wikipedia.org/wiki/Computer_programming), is the process of developing a set of instructions for a computer to run, typically with the goal of accomplishing some task (e.g. carrying out one or more calculations).
#
# In this course we'll be writing code in [Python](https://en.wikipedia.org/wiki/Python_(programming_language)). Python is considered a [high-level](https://en.wikipedia.org/wiki/High-level_programming_language) computer language. This means that, as the programmer, you can often ignore the particulars of how specifically your inputted instructions are carried out by the computer. Rather, those particulars are abstracted away. Instead, you'll be describing at a "higher" (less detailed, more goal-oriented) level. By contrast, [lower-level languages](https://en.wikipedia.org/wiki/Low-level_programming_language) such as [C](https://en.wikipedia.org/wiki/C_(programming_language)) require programmers to consider how different quantities are stored in the computer's [memory](https://en.wikipedia.org/wiki/Random-access_memory), and even lower-level languages such as [assembly code](https://en.wikipedia.org/wiki/Assembly_language) require programmers to make use of the specific set of instructions available on the [CPU](https://en.wikipedia.org/wiki/Central_processing_unit).
#
# ## How to think about coding
#
# In Python, as with most computer languages, there are only a few types of commands and instructions that you need to learn about. In fact, you'll learn about most of them in this notebook. Compared with human languages, which typically have many words and complex grammars, computer languages are relatively simple (with respect to their vocabularies and grammars). What makes Python (and other computer languages) so powerful is [compositionality](https://en.wikipedia.org/wiki/Principle_of_compositionality): the idea that you can build increasingly complicated functionality out of simpler building blocks. Everything computers do-- your operating system, the Internet, self-driving cars, computing the millionth digit of pi, etc.-- are all reducible to the same basic sets of instructions. Just like how human language words may be recombined and built-upon to communicate an infinite variety of ideas, the elements of computer languages may be recombined in similarly flexible and extensible ways. Whereas the goal of human language is to communicate ideas to other humans, the goal of computer language is to describe a set of calculations for the CPU to carry out in order to accomplish one or more tasks.
#
# Once you learn the basic set of instructions, the entire "trick" of coding is to figure out how to string them together into meaningful code that carries out the desired tasks. Analogous to how there are many ways of "saying the same thing" in human languages, there are often many ways of writing code that carries out the same set of computations. And just like there are more (or less) efficient ways of conveying information to other people using human language, there are often more (or less) efficient ways of composing computer instructions.
#
# In the remainder of this notebook you'll learn about the major "words" and "building blocks" of Python programs.
# + [markdown] id="Eq2Y6UyH9tiY"
# # Using Python like a calculator
#
# In the [Introduction and Overview notebook](https://github.com/ContextLab/cs-for-psych/blob/master/slides/module_1/introduction_and_overview.ipynb), we conceptualized thinking about Python like a calculator. For example, you can type in an equation, and when you "run" that line of code (`shift + return`) the answer is printed to the console window. Explore which operators (addition, subtraction, etc.) are supported and see if you can find some that aren't! Also try to get a sense of the [order of operations](https://en.wikipedia.org/wiki/Order_of_operations) that determines which quantities are grouped and what order they are evaluated in.
# + id="OoVt-o5z9tiZ" outputId="9947f800-60ee-4391-e424-054b0c480418" colab={"base_uri": "https://localhost:8080/", "height": 34}
3 * 5
# + id="UtOmyc-F9tic" outputId="63e14bbc-5e18-4ec8-9094-96019d2f88e6" colab={"base_uri": "https://localhost:8080/", "height": 34}
(6 / 3 + 1) * (4 * 2 - 3)
# + [markdown] id="j7Ryt1L76uHr"
# ## Scripting
#
# Python is a [scripting language](https://en.wikipedia.org/wiki/Scripting_language). This means that you could get the same functionality from a Python program by typing and running each line of the program in sequence, just like you might carry out a series of calculations by hand on a traditional calculator. (By contrast, [compiled languages](https://en.wikipedia.org/wiki/Compiled_language) do not have this property.)
#
# It can be useful to think of Python code as a script (a set of commands that are executed in sequence, from top to bottom) when you're designing a program. Your job is to come up with the set of calculations that, when carried out in sequence, accomplish your desired goal.
# + [markdown] id="cdy7zbiF9tih"
# # Variables
# A _variable_ is a named object-- i.e. a thing that Python knows has a particular value. It's often useful to write code that incorporates named variables wherever possible (rather than hard-coding in specific numerical values). This way of abstracting away the specific values from the set of operations you want to perform on those values allows you to use the same line of code to perform different functions.
#
# To define a variable, you use the _assignment_ operator, `=`. The name of your variable goes on the left side of the assignment operator, and the value you want to assign to that variable goes on the right side. Play around with the example below to see how it works. For example, change the values of `x` and `y` and see how the answers change.
# + id="Dpe2rRcj9tih" outputId="85f230dd-b127-4c80-e2ad-8982b65221a1"
x = 3
y = 4
x + y
# + [markdown] id="QB41xezY8VMe"
# ## What are variables used for?
#
# One way to think about variables is like the [memory function](https://en.wikipedia.org/wiki/Calculator#Memory) on a traditional calculator. It gives you a way of storing some value that might be useful later on. By naming that stored quantity you (as the programmer) can keep track of what that quantity means and how it's going to be used later on in your program.
# + [markdown] id="bP0Q5C3p9tij"
# # Operators
#
# In addition to the assignment operator (`=`), you have already been using several other operators in the code above:
# - the addition operator (`+`, as in `a + b`)
# - the multiplication operator (`*`, as in `a * b`)
#
# There are a number of other useful operators:
# - the exponent operator (`**`, as in `a ** b`) raises the value of the first thing (`a`) to the power of the second thing (`b`)
# - the subtraction operator (`-`, as in `a - b`)
# - the division operator (`/`, as in `a / b`)
# - the modulus operator (`%`, as in `a % b`) computes the remainder when the first number is divided by the second number
# - the floor division operator (`//`, as in `a // b`) divides the first number by the second number and then removes any fractional parts of the result
# - the negation operator (`-`, as in `-a`) placed before a single number or variable multipies its value by -1
# - the unary plus operator (`+`, as in `+a`) placed before a single number or variable multiplies its value by 1. This is rarely used.
# - Some Python functions treat `+a` and `a` differently. One use of the unary operator is to provide a variable with the same value but that is distinguishable in certain use cases.
# - the add and operator (`+=`, as in `a += b`) is shorthand for `a = a + b`
# - the subtract and operator (`-=`, as in `a -= b`) is shorthand for `a = a - b`
# - the multiply and operator (`*=`, as in `a *= b`) is shorthand for `a = a * b`
# - the exponent and operator (`**=`, as in `a **= b`) is shorthand for `a = a ** b`
# - the modulus and operator (`%=`, as in `a %= b`) is shorthand for `a = a % b`
# - the floor division and operator (`//=`, as in `a //= b`) is shorthand for `a = a // b`
#
# ## Logical operators
#
# We will find that it is often useful to know if a particular set of circumstances is true at a particular point in the course of running our computer program. For example, maybe we would want to run a particular set of calculations in one scenario but a different set of calculations in another scenario. _Logical_ operators are operators that work only on _Boolean_ variables-- i.e. variables that take on special values, `True` (or 1) and `False` (or 0). Logical operators always yield either `True` or `False`:
# - `or`: `a or b` is `True` if _either_ `a` is `True` _or_ if `b` is `True`, and `False` otherwise.
# - `and`: `a and b` is `True` if both `a` is `True` _and_ `b` is `True`, and is `False` otherwise.
# - `not`: `not a` is `True` if `a` is `False`, and is `False` if `a` is `True`.
#
# Logical operators play a very important role in computer programs. One common use of logical operators is in determining whether (or when) a particular computer instruction will run. Logical operators may also be combined to define complex tests. For example, you might want to know if either `a` or `b` (but not `c`) are True:
# ```
# d = (a or b) and (not c)
# ```
#
# ## Equality operator
# Sometimes we want to know whether a particular variable has a specific value. To do this, we use the _equality operator_, `==`:
# - `a == b` is `True` if `a` has the same value as `b`, and is `False` otherwise. One common use of the equality operator is in determining whether a particular condition has been satisfied. For example, you might want to execute an instruction 10 times, with a counter (`i`) that keeps track of how many times the instruction has been executed. You could include a check of whether `i == 10` to determine whether the program should execute the instruction again or not.
#
# As a shorthand for `not (a == b)`, Python also provides an inequality operator, `!=`. The statement `a != b` is `True` if `a` does *not* have the same value as `b`. The `<>` operator (as in `a <> b`) is similar to `!=`, but is rarely used in practice.
#
# ## Other comparison operators
# Python also includes operators that allow comparisons of different values:
# - `a > b` is `True` if `a` is strictly greater than `b` and `False` otherwise
# - `a >= b` is `True` if `a` is greater than or equal to `b` and `False` otherwise
# - `a < b` is `True` if `a` is strictly less than `b` and `False` otherwise
# - `a <= b` is `True` if `a` is less than or equal to `b` and `False` otherwise
#
# ## What are operators used for?
#
# Operators are how you get different quantities to "interact" so that you can compute with them. For example, the addition operator takes two quantities (variables), adds them together, and gives you the resulting sum as a new (single) quantity. You can assign this sum to a new variable so that you can use or refer to it later:
# ```
# c = a + b
# ```
# In this way, operators are the "verbs" of computer languages.
# + [markdown] id="rr1ZXazX9tik"
# # Comments and `print` statements
# Sometimes it's useful to write little notes to yourself (and other people who might want to read your code) to keep track of what the different parts of your code do. There are two ways to add notes to your code:
# - You can create an "inline comment" by adding a '#' followed by the text of your comment.
# - You can create a "block comment" by adding a set of triple quotes, followed by one or more lines of text, and then followed by a second set of of triple quotes. You can use single quotes (') or double quotes (") to denote a block comment, as long as you start and end your block comment with the same type of quote.
#
# The Python interpreter (i.e. the computing engine that turns your computer code into executed instructions) ignores all comments when it is running your code.
#
# You can also tell the interpreter to print out the value of something by using the `print` function, as illustrated below.
# + id="EfjYp_rb9tik" outputId="ce8d5aab-c92d-4d69-f497-d8b8a0dcd6e9" colab={"base_uri": "https://localhost:8080/", "height": 51}
'''
This block of text is a block comment, enclosed in triple quotes. The comment
can span multiple lines. Anything written inside of a comment will be ignored by
the computer when the program runs.
'''
pi = 3.14159265359 #ratio of a circle's circumference to its diameter
r1 = 6 #radius of circle 1
r2 = 8 #radius of circle 2
#the ** operator means "raise the value on the left to the power of the value of
#the thing on the right". Notice how order of operations comes into effect.
area1 = pi * r1 ** 2
area2 = pi * r2 ** 2
print(area1)
print(area2)
# + [markdown] id="PmK3_PsO9tim"
# # `if` statements
#
# Python contains a number of _keywords_ that allow you to control the flow of instructions that the computer executes. One of the main keywords is the `if` statement. It runs one or more lines of code only _if_ the quantity being evaluated is `True`:
# + id="O1EILq6i9tin"
x = 3
if x == 3: #notice the colon
print('Run this line') #all lines in the body of the if statement are indented
# + [markdown] id="OA2P1pau9tio"
# # `elif` and `else` statements
# Whereas the body (indented part) of an `if` statement will simply be skipped if the evaluated function passed to an `if` statement is `False`, you can also specify what to do under other possible circumstances:
# - The `elif` statement comes right after an `if` statement. It allows you to specify an alternative set of conditions. You can use multiple `elif` statements in sequence; once any of them evaluate to `True` the body of that statement is run and the sequence is aborted (no other `elif` statements are tested).
# - The `else` keyword comes after an `if` statement and (optionally) one or more `elif` statments. The body of an `else` statement runs only if none of the preceeding `if` or `elif` statements ran.
# + id="tJqq7fFg9tip" outputId="c4489db5-dd7f-406d-a41d-dff819c9bd6e" colab={"base_uri": "https://localhost:8080/", "height": 51}
my_name = '<NAME>'
if my_name == '<NAME>':
print('You are the course instructor.')
print('You are also a Dartmouth professor.')
elif my_name == '<NAME>':
print('You are the current President of Dartmouth College.')
elif my_name == '<NAME>':
print('You won two Nobel Prizes. Also, you are dead.')
else:
print('I don\'t know you-- nice to meet you!') #note the "escape character" before the single quote
# + [markdown] id="fYpgOiHk_-CG"
# ## What are `if`, `elif`, and `else` statements used for?
#
# These statements help to control the *flow* of a program. Specifically, they allow you to specify the circumstances under which a particular instruction will be run (or skipped). In the preceeding example, the value of the `my_name` variable will determine which statement gets printed out. The other statements are skipped over. Try experimenting! For example, in the previous cell you might try:
# - changing the value of `my_name`
# - adding or removing instructions within the body of the `if`, `elif`, or `else` statements
# - adding additional `elif` conditions
#
# What do you think would happen if you replaced some of the `elif` statements with `if` statements? Can you figure out what's different about how the program runs?
# + [markdown] id="B7FWLqqG9tir"
# # Datatypes
# The computational elements that Python "computes with" can take on different types of values, called _datatypes_. You've gotten to see a few different datatypes in above examples; here are some of the datatypes you'll likely encounter frequently:
# - **Integers** (`int`): non-decimal scalar values (e.g., `-50`, `326`, `0`, `2500`, etc.)
# - **Floating points numbers** (`float`): Real-valued scalars (e.g., `1.2345`, `-10.923`, `0.01`, `2.0`, etc.).
# - **Boolean** (`bool`): `True` or `False`.
# - **Strings** (`str`): sequences of characters or symbols, enclosed in single or double quotes (e.g., `'hello'`, `'This is a single quoted string!'`, `"This is a double quoted string..."`, etc.).
# - **Lists** (`list`): an ordered set of objects (each element of the set can be of any datatype, including a list!). Example; `['apple', 7, '3', [6, 'twenty five', -65.4321], 7]`
# - **Dictionary** (`dict`): an unordered set of named properties that each has an assigned value (each value can be of any datatype, including a Dictionary!). Example: `{'name': 'Introduction to Programming for Psychological Scientists', 'number': 32, 'department': 'PSYC', 'url': 'https://github.com/ContextLab/cs-for-psych'}`.
# - **Null value** (`None`): a special datatype that doesn't have any specified value. Useful as a "default" value, e.g. before you have enough information to compute the answer.
#
# ## Typecasting
# Many datatypes may be converted to other datatypes using typecasting. For example, `float(3)` converts the integer `3` into a floating point decimal. In the next cell, explore what happens when you try to convert between different datatypes. You can use the `type` function to ask Python what the datatype of a given entity is. For example, calling `print` on the output of `type` will display the datatype of whatever you pass to `type`.
# + id="XTbA-7jX9tir" outputId="3e952abe-4986-4d7c-fccd-6934a0e0e77e" colab={"base_uri": "https://localhost:8080/", "height": 51}
print(type(3))
list(str(float(int(3.4)))) #can you figure out what's happening here?
# + [markdown] id="X8kxRqDifsqP"
# Once you figure out what's happening in the line above, try to make a prediction: what would the following line return?
# ```
# list(str(float(int(3.9))))
# ```
# Now test your prediction. What do you think is happening?
# + [markdown] id="PLkhU0frBNTu"
# ## What are datatypes used for?
#
# The computer executing your code needs to know how to handle the values you input and the answers you "get out" of the different computations in your program. Although Python will try to execute instructions without considering the datatypes of the variables, your code will crash (fail to run and output an error message) if Python can't figure out how to apply the given operators to the given data types. As a programmer, it is often useful to consider what datatypes you are expecting different variables to take on at different points in your program. For example:
# + id="Vx2MC8ADCfa0" outputId="940af7d3-d380-4c44-8fa4-0b7e22398b2d" colab={"base_uri": "https://localhost:8080/", "height": 34}
x = 'This is a test'
if type(x) == str:
print('x is a string')
elif type(x) == int:
print('x is an integer')
else:
print("I'm not sure how to handle x!") #note use of double quotes allows us to have a single quote in the printed string (without an escape character)!
# + [markdown] id="3gqqvEMJ9tit"
# # Functions
#
# We've already come across several functions, such as `print`, `type`, and various operators (e.g. `+`, `-`, `*`, `/`, `**`, `%`, etc.-- operators are a special type of function.). A _function_ is a special datatype that takes in zero or more _arguments_ (i.e. inputs) and produces zero or more actions or outputs.
#
# Sometimes a function is written so that its main purpose is to carry out some action, such as saving a file or generating a figure. In other cases, the purpose of a function is to carry out some computation on the input arguments. The syntax for defining a function is:
# ```
# def <function_name>(arg1, arg2, ..., argN):
# <instruction 1>
# <instruction 2>
# ...
# <instruction M>
# return <value>
# ```
#
# Here the `def` command tells Python that we'll be defining a new function called `<function_name>`. The function will take in a list of arguments (`arg1`, `arg2`, `arg3`, etc.), and carry out some operations. Finally, the `return` command tells Python what the value of the function itself is, given the inputs that were passed to it. If the `return` line is ommitted, then the function will evaluate to `None` by default. (Note: the body of your function definition needs to contain at least one line of code, or the interpreter will throw an error.) The next cell contains an example function definition.
#
# Note that function definitions may be nested-- in other words, the instructions specified in the body of a function definition may themselves be function definitions!
# + id="kBllRFxR9tiu" outputId="0125ef20-099c-455c-a1cb-941a260ded45"
def square(x):
return x**2
print(square(1))
print(square(2))
print(square(3))
# + [markdown] id="q12HajzJDAEf"
# ## What are functions used for?
#
# Functions are the computational construct that allows you to compose sequences of instructions together in increasingly complex ways. For example, suppose you wanted to implement the power operator (`**`) yourself, only knowing about the multiplication (`*`) operator. You could do something like this:
# + id="S1tyR3qcDldf" outputId="57b764a5-275d-4b2d-aba6-32d93b36cc5b" colab={"base_uri": "https://localhost:8080/", "height": 368}
def power(x, n):
'''
raise x to the nth power
'''
if not (type(n) == int):
raise(Exception("I don't know how to handle non-integer powers")) #prints out an error message!
#why are each of these control flow statements needed?
if n == 0:
return 1
elif n < 0:
return 1 / power(x, -n)
else:
return x * power(x, n - 1) #this line is the main "workhorse" of the function-- why?
#test out a few examples
print(power(3, 2))
print(power(5, 3))
print(power(10, -6))
print(power(4, 1.5)) #should give an error. What happens if you run this line before the other print statements?
# + [markdown] id="n0MfmVm1EuPR"
# Notice how the `power` function calls *itself*! Function definitions that include self-references are an example of a techinique called [recursion](https://en.wikipedia.org/wiki/Recursion_(computer_science)). Creating a recursive function allowed us to write "simpler" code.
#
# Despite that recursive functions may have few lines, they can nonetheless be tricky to understand. As an exercise, try going carefully through the `power` function above and attempting to understand the code in detail. For example:
# - Can you follow how many times the `power` function is called in each of the above scenarios?
# - Can you follow how and why the `power` function "works" (i.e., why it returns the first argument raised to the power of the second argument)?
#
# In exploring the above code, you may find it useful to use strategically placed `print` statements to display messages to indicate when different parts of the code are being reached and/or what the values of different variables are at different stages of the function's execution.
#
# Another example of compositionality in functions is given in the next cell:
# + id="snvSRIsmF61F" outputId="3835f638-ad91-480f-9dfa-b922eba35f87" colab={"base_uri": "https://localhost:8080/", "height": 51}
def add_and_print(a, b):
c = a + b
print('The answer is: ', c)
add_and_print(3.2, 1.4)
add_and_print(-7, 12)
# + [markdown] id="Fuj4HMbUGg4D"
# Here the `add_and_print` function includes calls to both the addition (+) operator and the `print` function. Combinining these into a single function allows us to execute the same sequence of commands with only a small amount of additional code (e.g., rather than re-writing the full set of instructions multiple times). In turn, the `add_and_print` function could be incorporated into one or more other functions that carried out additional tasks.
# + [markdown] id="r-wlxcyW9tiw"
# # Loops
# It's often useful to carry out a similar operation many times. For example, you might want to read in each file in a folder and apply the same basic set of commands to each file's contents. _Loops_ provide a way of writing efficient and flexible code that involves doing an operation several times.
#
# There are two types of loops in Python: `for` loops and `while` loops.
#
# ## `for` loops
# This type of loop carries out one or more operations on each element in a given list. The syntax is:
# ```
# for i in <list of items>:
# <instruction 1>
# <instruction 2>
# ...
# <instruction N>
# ```
# where `i` is, in turn, set to the value of each element of the given list, and the instructions defined in the body of the loop are carried out. (Here `i` is just an example variable name; in practice any variable name may be used as a stand-in for `i`.) In other words, the instructions in a `for` loop are carried out _for_ each value in the given list.
#
# ## `while` loops
# This type of loop carries out one or more operations _while_ the given logic statement holds true. The syntax is:
# ```
# while <statement>:
# <instruction 1>
# <instruction 2>
# ...
# <instruction N>
# ```
# where `<statement>` (i.e. the _loop condition_) is any Python expression that can be typecast to a `bool`. These types of loops are useful when the number of repetitions needed to carry out a particular task is not known in advance.
#
# ### Infinite loops
# It is important that the statement used to determine whether the `while` loop continues with another execution or terminates is modified within the body of the loop. In other words, the parameters of the condition that is being tested for should be adjusted each time the loop executes another cycle. If the loop condition never changes its value from `True`, the `while` loop will continue looping forever; this is called an `infinite loop`. Infinite loops will freeze your computer program until they are manually halted by pressing `ctrl + c` (or by selecting "Interrupt execution" from the Runtime menu above).
#
# ### Nested loops
# Both `for` and `while` loops may themselves contain other loops (of either type). For example, nested loops can be useful when you want to carry out some sequence of operations on each _combination_ of a set of things.
# + id="cyLS3F9i9tiw" outputId="82a166c7-d72f-4a57-9547-2411e013bfaf"
for x in ['a', 'b', 'c', 'd']:
print(x)
# + id="F1_c_QjR9tiy" outputId="f043d6b5-c36b-4be4-f6a8-db01b3031ba0"
i = 10
while i > 0:
i = i - 1
print(i)
# + [markdown] id="nQ351feuHXoe"
# ## What are loops used for?
#
# Loops (like `if`, `elif`, and `else` statements) are the other major way of controling the flow of programs. To think about: under what circumstances might you want to use recursion (like the `power` example above) versus loops? For example, could you re-write the `power` function using a `for` or `while` loop?
# + [markdown] id="dX3PbwsX9tiz"
# # Importing new functions
#
# Python comes with a set of built-in functions. The `dir()` command lists the set of functions currently available to call. Starting out in this course you'll be directly told which libraries you need to import and how to import them, and which functions within those libraries might be useful. As the course progresses and you become familiar with the different libraries that are commonly used, you'll start to gain an intuition for which libraries to import and/or how to search for libraries that might provide functionality that's missing from the set of built-in functions.
#
# As an aside: if you dig deep enough, every Python library is built upon the same set of built-in functions that you have access to *without* importing any libraries. Therefore you could (in theory) accomplish exactly the same tasks without ever importing a library. In this way, Python's notion of importing is really a convenience that improves efficiency by enabling you to more easily re-use other people's (or your) code. Python's library infrastructure also makes Python an attractive language for scientists in that code may be easily shared. (Later in the course we'll discuss how to make code that you write available to the community as a shareable library.)
#
# You can import new functions that other people have written and made available to the community using an `import` statement. Knowing which _libraries_ (sets of functions) to import requires some Googling or checking in with other users. The syntax for importing a library is: `import <library name>`. For example `import math` will import Python's Mathematics library, giving you access to new Math-related functions.
#
# # The `from` keyword
# Sometimes it is useful to import only a subset of the total available functions from a library. For example, a particular function in a library may have the same name as a function that you want to write yourself. The `from` keyword allows you to select a specific set of functions from a given library using:
# ```
# from <library name> import <function1>, <function2>, ..., <functionN>
# ```
#
# You can also use the `dir` command on an imported library to determine which functions are available to import from that library.
#
# # The `as` keyword
# Sometimes it is convenient to rename an imported library or function. For example, the library might have a long name that is cumbersome to type. The `as` keyword can be used to rename libraries, functions, or both. For example:
# ```
# import math as m
# from math import degrees as deg
# ```
# + id="bii0OwGe9ti0" outputId="2c7c2bcb-c65e-4bd4-9686-08ef0b3b5dff"
import math as m
from math import degrees as deg
m.sin(m.pi) - m.cos(m.pi)
# + [markdown] id="Q3NX3-7q9tie"
# ## Weird stuff
#
# In its "guts," Python represents all numbers in Base 2 (i.e. binary). Occassionally representing Base 10 numbers in Base 2 can lead to seemingly strange behaviors. One such example is shown in the next cell. To think about: how or when might this matter?
# + id="jG2_L8eT9tif" outputId="3421bc03-747b-4fc2-bf09-47fda632b43a"
0.3 - 0.2 - 0.1
# + [markdown] id="Ybo6-E7nsaCk"
# Another "weird" property of Python (e.g., as compared with most other programming languages) is that whitespace (spaces and tabs) at the beginning of a line carry meaning. For example, the code block:
# ```
# if a == b:
# a = 0
# b = 1
# ```
# is different from
# ```
# if a == b:
# a = 0
# b = 1
# ```
# Specifically, in the first code block (where `b = 1` is indented), `b` will be assigned a value of 1 if and only if `a` is equal to `b`. This is because the lines with the same number of leading whitespace characters are treated as "grouped" (i.e., belonging to the same statement body).
#
# In the second code block (where `b = 1` is not indented), `b` is assigned a value of 1 *regardless of whether `a` and `b` are equal*. This is because the unindented `b = 1` line is interpreted as falling outside of the body (scope) of the `if` statement.
# + [markdown] id="BttHGqxe9ti2"
# # Getting help
# The `help` function may be called for any built-in (or imported) Python function. This will display a pop-up message with instructions describing how to use the given function. For example:
# + id="-yWIWTWV9ti2" outputId="87680032-d3df-4e5e-c977-f449bc56dfd8"
help(m.sin)
# + [markdown] id="9dGoNtoO9ti5"
# When you try to perform an invalid operation (e.g. using incorrect syntax, or using the wrong data type, etc.) your program will crash and print out an error message. These error messages can help point you to where the problem in your code was.
# + id="eOSjZgi09ti5" outputId="43e06b48-e6f5-4d85-d85c-3d3195e0d763"
1 + 'test' #trying to add two data types for which the addition operation isn't supported
# + [markdown] id="BksjdHuq9ti7"
# Although they are very useful, in practice the built-in help function and error messages are often insufficient for solving tricky problems. Some great places to get help are:
# - **Python tutorials**: if you feel you require an in-depth understanding to solve your problem, there are many free tutorials available on a wide array of topics. [This one](https://gitlab.erc.monash.edu.au/andrease/Python4Maths/tree/master/Intro-to-Python) is a good place to start for basic questions.
# - **Google**: if you are trying to figure out an unfamiliar function, or if you encounter an error message that doesn't make sense, try searching for someone else's solution.
# - **Gitter**: if you're stuck on something, or curious why your code is behaving in a particular way, post your question to Gitter and someone from the class will answer in a timely manner.
# - **Office hours**: come to my regular office hours, or email me to set up an appointment, and I'll help get you unstuck.
# + [markdown] id="VVyDk_CA9ti7"
# # Happy coding!
#
# Programming is one of the most rewarding skills you can pick up. The modern world runs on computers, and learning to code well will provide you with an enormous advantage in a wide range of life venues. Have fun learning, don't be afraid to make mistakes, and learn to ask for help when you need it!
| Copy_of_intro_to_python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# - - - -
# # Mechpy Tutorials
# a mechanical engineering toolbox
#
# source code - https://github.com/nagordon/mechpy
# documentation - https://nagordon.github.io/mechpy/web/
#
# - - - -
#
# <NAME>
# 2017-02-20
#
# - - - -
#
# ## Solid Mechanics Tutorial
# ## Python Initilaization with module imports
# +
# setup
import numpy as np
import sympy as sp
import scipy
from pprint import pprint
sp.init_printing(use_latex='mathjax')
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (12, 8) # (width, height)
plt.rcParams['font.size'] = 14
plt.rcParams['legend.fontsize'] = 16
from matplotlib import patches
get_ipython().magic('matplotlib') # seperate window
get_ipython().magic('matplotlib inline') # inline plotting
# -
# ## Functions
# +
def T1(th):
'''Stress Transform for Plane Stress
th=ply angle in degrees
voight notation for stress tranform. sigma1 = T1 @ sigmax
recall T1(th)**-1 == T1(-th)'''
n = np.sin(th*np.pi/180)
m = np.cos(th*np.pi/180)
T1 = np.array( [[m**2, n**2, 2*m*n],
[n**2, m**2,-2*m*n],
[-m*n, m*n,(m**2-n**2)]])
return T1
def T2(th):
'''Strain Transform for Plane Stress
th=ply angle in degrees
voight notation for strain transform. epsilon1 = T2 @ epsilonx'''
n = np.sin(th*np.pi/180)
m = np.cos(th*np.pi/180)
T2 = np.array( [[m**2, n**2, m*n],
[n**2, m**2,-m*n],
[-2*m*n, 2*m*n, (m**2-n**2)]])
return T2
# -
# # Materials
# [index](#Mechpy)
#
# ## Stress and Strain
# Stress is a tensor that can be broken into
#
# $$
# \overline{\sigma}=\begin{bmatrix}
# \sigma_{xx} & \sigma_{xy} & \sigma_{xz}\\
# \sigma_{yx} & \sigma_{yy} & \sigma_{yz}\\
# \sigma_{zx} & \sigma_{zy} & \sigma_{zz}
# \end{bmatrix}
# $$
#
#
#
# # Stress Transformations
# [index](#Mechpy)
#
# $$
# \overline{\sigma}=\begin{bmatrix}
# \sigma_{xx} & \sigma_{xy} & \sigma_{xz}\\
# \sigma_{yx} & \sigma_{yy} & \sigma_{yz}\\
# \sigma_{zx} & \sigma_{zy} & \sigma_{zz}
# \end{bmatrix}
# $$
#
# reduce to plane stress
#
# $$
# \overline{\sigma}=\begin{bmatrix}
# \sigma_{xx} & \sigma_{xy} & 0 \\
# \sigma_{yx} & \sigma_{yy} & 0\\
# 0 & 0 & \sigma_{zz}
# \end{bmatrix}
# $$
#
# or
#
# $$
# \overline{\sigma}=\begin{bmatrix}
# \sigma_{xx} & \tau_{xy} & 0 \\
# \tau_{yx} & \sigma_{yy} & 0\\
# 0 & 0 & \sigma_{zz}
# \end{bmatrix}
# $$
#
#
# $$
# \overline{\sigma}=\begin{bmatrix}
# \sigma_{x} & \sigma_{xy} \\
# \sigma_{yx} & \sigma_{y} \\
# \end{bmatrix}
# $$
#
# Transformation
#
# $$
# A=\begin{bmatrix}
# cos(\theta) & sin(\theta) \\
# -sin(\theta) & cos(\theta) \\
# \end{bmatrix}
# $$
#
# $$
# \sigma'=A \sigma A^T
# $$
#
#
# $$
# \sigma_1 , \sigma_2 = \frac{\sigma_{x}}{2} + \frac{\sigma_{y}}{2} + \sqrt{\tau_{xy}^{2} + \left(\frac{\sigma_{x}}{2} - \frac{\sigma_{y}}{2}\right)^{2}}
# $$
#
#
# $$
# T=\left[\begin{matrix}\sin^{2}{\left (\theta \right )} & \cos^{2}{\left (\theta \right )} & 2 \sin{\left (\theta \right )} \cos{\left (\theta \right )}\cos^{2}{\left (\theta \right )} & \\
# \sin^{2}{\left (\theta \right )} & - 2 \sin{\left (\theta \right )} \cos{\left (\theta \right )}\- \sin{\left (\theta \right )} \cos{\left (\theta \right )} & \\
# \sin{\left (\theta \right )} \cos{\left (\theta \right )} & \sin^{2}{\left (\theta \right )} - \cos^{2}{\left (\theta \right )}\end{matrix}\right]
# $$
import sympy as sp
from sympy.abc import tau, sigma
import numpy as np
sp.init_printing()
sx,sy,txy,tp = sp.symbols('sigma_x,sigma_y,tau_xy,theta_p')
sp1 = (sx+sy)/2 + sp.sqrt( ((sx-sy)/2)**2 + txy**2 )
sp2 = (sx+sy)/2 - sp.sqrt( ((sx-sy)/2)**2 + txy**2 )
print(sp.latex(sp1))
sp1
tp = sp.atan(2*txy/(sx-sy) )/2
tp
tpp = tp.evalf(subs={sx:10,sy:15,txy:10})
tpp
#s,s11,s22,s33,s12 = sp.var('s,s11,s22,s33,s12')
s,s11,s22,s33,s12,s13,t,t12 = sp.symbols('sigma, sigma11,sigma22,sigma33,sigma12,sigma13,tau,tau12')
s = sp.Matrix([[s11,t12,0],[t12,s22,0],[0,0,s33]])
s
t = sp.symbols('theta')
m = sp.sin(t)
n = sp.cos(t)
T = sp.Matrix([[m**2,n**2, 2*m*n],[n**2,m**2,-2*m*n],[-m*n,m*n,m**2-n**2]])
T
T1 = T.subs(t, sp.pi/4)
T1
sprime = T1 * s * T1.inv()
sprime
sprime.evalf(subs={s11:10, s22:00, s33:0, t12:0})
s.eigenvals()
s2 = s.evalf(subs={s11:2.2, s22:3, s33:sp.pi, s12:7.3})
s2
# ## Stress transformation with equations
# %matplotlib inline
# cd ..
from mechpy.math import T2rot
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (10,8)
# +
from IPython.html.widgets import *
from mechpy.math import T2rot
#x = [-1,1, 0,-1,]
#y = [-1,-1,1,-1]
#xy = np.array([x,y])
#plt.xlim([-11.1,11.1])
#plt.ylim([-11.1,11.1])
#xyR = np.dot(T2rot(30),xy)
#plt.plot(xyR[0,:],xyR[1,:])
fig1 = plt.figure(figsize=(10,8))
def rot2(th, xt,yt,zt):
xyR = np.dot(T2rot(th),xy*zt)
xyR[0,:]+=xt
xyR[1,:]+=yt
plt.plot(xyR[0,:],xyR[1,:])
plt.axis('square')
plt.xlim([-11.1,11.1])
plt.ylim([-11.1,11.1])
plt.show()
interact(rot2, th=(0,np.pi,np.pi/90), yt=(1,10,1), xt=(1,10,1), zt=(1,10,1));
# +
# stress angle transformation
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (10,8)
mpl.rcParams['font.size'] = 16
mpl.rcParams['legend.fontsize'] = 14
plt.figure(figsize=(10,8))
def mohr(sigmax, sigmay, tauxy, angle):
plt.figure(figsize=(10,8))
# angle rotates clockwise
theta = (angle-90) * np.pi/180
# stress transformed to any angle
sigmaxt = (sigmax + sigmay)/2 + (sigmax-sigmay)/2 * np.cos(2*theta) + tauxy*np.sin(2*theta)
sigmayt = (sigmax + sigmay)/2 + (sigmax-sigmay)/2 * np.cos(2*(theta + np.pi/2)) + tauxy*np.sin(2*(theta+ np.pi/2))
tauxyt = -(sigmax-sigmay)/2*np.sin(2*theta) + tauxy*np.cos(2*theta)
print('transformed stress')
print([sigmaxt, sigmayt, tauxyt])
# principal stresses
sigma1p = (sigmaxt + sigmayt)/2 + np.sqrt( ((sigmaxt-sigmayt)/2)**2 + tauxyt**2)
sigma2p = (sigmaxt + sigmayt)/2 - np.sqrt( ((sigmaxt-sigmayt)/2)**2 + tauxyt**2)
tauxyp = np.sqrt( ( (sigmaxt-sigmayt)/2 )**2 + tauxyt**2 )
sigmap = [sigma1p, sigma2p]
thetap = -np.arctan(tauxyt/ ((sigmaxt-sigmayt)/2)) / 2 * 180 / np.pi
sigmaavg = (sigma1p+sigma2p)/2
R = np.sqrt(((sigmaxt-sigmayt)/2)**2 + tauxyt**2)
print('---principal stresses---')
print('sigma1p = {:.2f}'.format(sigma1p) )
print('sigma2p = {:.2f}'.format(sigma2p) )
print('principal plane angle = {:.2f}'.format(thetap) )
print('---principal shear---')
print('tauxyp = {:.2f} with avg normal stress = {:.2f}'.format(tauxyp,sigmaavg))
r = np.linspace(-2*np.pi,2*np.pi,100)
## keep this for sigma3
# x = np.cos(r) * (sigma1p/2) + sigma1p/2
# y = np.sin(r) * (sigma1p/2)
# plt.plot(x,y,'bo', sigmaavg,0,'bo')
# x = np.cos(r) * (sigma2p/2) + sigma2p/2
# y = np.sin(r) * (sigma2p/2)
# plt.plot(x,y,'bo', sigmaavg,0,'bo')
x = np.cos(r) * R + sigmaavg
y = np.sin(r) * R
plt.plot(x,y,'b', sigmaavg,0,'bo')
plt.plot([sigmaxt,sigmayt],[tauxyt, -tauxyt], 'g-o', label='applied stress');
plt.plot([sigma1p,sigma2p],[0,0],'ro');
plt.plot([sigmaavg,sigmaavg],[tauxyp,-tauxyp], 'ro', label='principal stress');
plt.plot([sigmaxt,sigmaxt],[tauxyt, 0], '--g'); plt.plot([sigmaxt,sigmaxt],[tauxyt, 0], 'og');
plt.plot([sigmayt,sigmayt],[-tauxyt, 0], '--g'); plt.plot([sigmayt,sigmayt],[-tauxyt, 0], 'og');
plt.plot([sigmaavg, sigmaxt],[tauxyt, tauxyt], '--g'); plt.plot([sigmaavg, sigmaxt],[tauxyt, tauxyt], 'og');
plt.plot([sigmaavg, sigmayt],[-tauxyt, -tauxyt], '--g'); plt.plot([sigmaavg, sigmayt],[-tauxyt, -tauxyt], 'og');
plt.axis('equal') ; plt.grid();
plt.xlabel('$\sigma_x,\sigma_y$');
plt.ylabel('$\\tau_{xy}$');
plt.title('Mohrs Circle 2D Plane Stress ');
plt.legend();
interact(mohr, sigmax=(0,500,10),sigmay=(0,500,10),tauxy=(0,500,10),angle=(0,90,5));
# +
# stress angle transformation
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (10,8)
mpl.rcParams['font.size'] = 16
mpl.rcParams['legend.fontsize'] = 14
sigmax = -20
sigmay = 0
tauxy = 40
angle = 0
# angle rotates clockwise
theta = (angle-90) * np.pi/180
# stress transformed to any angle
sigmaxt = (sigmax + sigmay)/2 + (sigmax-sigmay)/2 * np.cos(2*theta) + tauxy*np.sin(2*theta)
sigmayt = (sigmax + sigmay)/2 + (sigmax-sigmay)/2 * np.cos(2*(theta + np.pi/2)) + tauxy*np.sin(2*(theta+ np.pi/2))
tauxyt = -(sigmax-sigmay)/2*np.sin(2*theta) + tauxy*np.cos(2*theta)
print('transformed stress')
print([sigmaxt, sigmayt, tauxyt])
# principal stresses
sigma1p = (sigmaxt + sigmayt)/2 + np.sqrt( ((sigmaxt-sigmayt)/2)**2 + tauxyt**2)
sigma2p = (sigmaxt + sigmayt)/2 - np.sqrt( ((sigmaxt-sigmayt)/2)**2 + tauxyt**2)
tauxyp = np.sqrt( ( (sigmaxt-sigmayt)/2 )**2 + tauxyt**2 )
sigmap = [sigma1p, sigma2p]
thetap = -np.arctan(tauxyt/ ((sigmaxt-sigmayt)/2)) / 2 * 180 / np.pi
sigmaavg = (sigma1p+sigma2p)/2
R = np.sqrt(((sigmaxt-sigmayt)/2)**2 + tauxyt**2)
print('---principal stresses---')
print('sigma1p = {:.2f}'.format(sigma1p) )
print('sigma2p = {:.2f}'.format(sigma2p) )
print('principal plane angle = {:.2f}'.format(thetap) )
print('---principal shear---')
print('tauxyp = {:.2f} with avg normal stress = {:.2f}'.format(tauxyp,sigmaavg))
r = np.linspace(-2*np.pi,2*np.pi,100)
## keep this for sigma3
# x = np.cos(r) * (sigma1p/2) + sigma1p/2
# y = np.sin(r) * (sigma1p/2)
# plt.plot(x,y,'bo', sigmaavg,0,'bo')
# x = np.cos(r) * (sigma2p/2) + sigma2p/2
# y = np.sin(r) * (sigma2p/2)
# plt.plot(x,y,'bo', sigmaavg,0,'bo')
x = np.cos(r) * R + sigmaavg
y = np.sin(r) * R
plt.plot(x,y,'b', sigmaavg,0,'bo')
plt.plot([sigmaxt,sigmayt],[tauxyt, -tauxyt], 'g-o', label='applied stress');
plt.plot([sigma1p,sigma2p],[0,0],'ro');
plt.plot([sigmaavg,sigmaavg],[tauxyp,-tauxyp], 'ro', label='principal stress');
plt.plot([sigmaxt,sigmaxt],[tauxyt, 0], '--g'); plt.plot([sigmaxt,sigmaxt],[tauxyt, 0], 'og');
plt.plot([sigmayt,sigmayt],[-tauxyt, 0], '--g'); plt.plot([sigmayt,sigmayt],[-tauxyt, 0], 'og');
plt.plot([sigmaavg, sigmaxt],[tauxyt, tauxyt], '--g'); plt.plot([sigmaavg, sigmaxt],[tauxyt, tauxyt], 'og');
plt.plot([sigmaavg, sigmayt],[-tauxyt, -tauxyt], '--g'); plt.plot([sigmaavg, sigmayt],[-tauxyt, -tauxyt], 'og');
plt.axis('equal') ; plt.grid();
plt.xlabel('$\sigma_x,\sigma_y$');
plt.ylabel('$\\tau_{xy}$');
plt.title('Mohrs Circle 2D Plane Stress ');
plt.legend();
# -
# ## Stress Transformation with matrices
# +
# Principal PLane Stress
sigmax = -20
sigmay = 90
tauxy = 60
sigma = np.array([[sigmax, tauxy, 0],
[tauxy, sigmay,0],
[0, 0, 0]])
sigmap = np.linalg.eig(sigma)[0]
print('\n principal stresses')
print(sigmap)
thetap = np.linalg.eig(sigma)[1] # principal cosine angle
print('\n principal plane angle')
print(np.arccos(thetap)*180/np.pi-90)
# +
# specified angle stress transformation
sigmax = -20
sigmay = 90
tauxy = 60
sigma = np.array([[sigmax, tauxy, 0],
[tauxy, sigmay,0],
[0, 0, 0]])
ang = 23
sigmat = T1(ang) @ sigma @ np.transpose(T1(ang))
print('\n transformed stresses')
print(sigmat)
# -
# maximum in-plane shear stress
eps = 1e-16 # machine epsilon to avoid divide-by-zero error
rad_to_deg = 180/np.pi
theta1 = 0.5 * np.arctan( 2*tauxy / ((sigmax-sigmay+eps))) * rad_to_deg
print(theta1)
# +
tauxy = 0 # lbs/in
sigmax = 100 # lbs/in
sigmay = np.linspace(0,1.100) # lbs/in
eps = 1e-16 # machine epsilon to avoid divide-by-zero error
rad_to_deg = 180/np.pi
theta1 = 0.5 * np.arctan( 2*tauxy / ((sigmax-sigmay+eps))) * rad_to_deg
print(theta1)
# sigmax = 100
# sigmay = np.linspace(0,1.100)
# tauxy = 0
# tparray = sp.atan(2*tauxy/(sigmax-sigmay) )/2
# tparray
# -
sigma
# +
th = np.pi/4 # 45 deg
m = np.cos(th)
n = np.sin(th)
A = np.array([ [m,n],[-n,m]])
tauxy = 1 # lbs/in
sigmax = 0 # lbs/in
sigmay = 0 # lbs/in
sigma = np.array([[sigmax, tauxy],
[tauxy, sigmay]])
sigmat = A @ sigma @ A.T # transformed stress
sigmat
# +
sigmap = np.linalg.eig(sigmat)[0] # principal stresses
print(sigmap)
thetap = np.linalg.eig(sigmat)[1] # principal planes
print(thetap* 180/np.pi)
# -
# +
# Principal Stresses
sx = 63.66
sy = 0
sz = 0
txy = 63.66
txz = 0
tyz = 0
S = np.matrix([[sx, txy, txz],
[txy, sy, tyz],
[txy, txz, sz]])
print(S)
# -
principal_stresses = np.linalg.eigvals(S)
print(principal_stresses)
import sympy as sp
from sympy.abc import tau, sigma
#s,s11,s22,s33,s12 = sp.var('s,s11,s22,s33,s12')
s,s11,s22,s33,s12,s13 = sp.symbols('sigma, sigma11,sigma22,sigma33,sigma12,sigma13')
s = sp.Matrix([[s11,s12,0],[s12,s22,0],[0,0,s33]])
s
s**2
s.eigenvals() # hmm looks familiar
s1 = s.subs(s11,2.2).subs(s22,3).subs(s33,sp.pi).subs(s12,7.3)
s1
# or
s2 = s.evalf(subs={s11:2.2, s22:3, s33:sp.pi, s12:7.3})
s2
s1.eigenvals()
s2.eigenvals()
s2.inv()
C = sp.symbols('C1:100')
C
| tutorials/solids.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] deletable=true editable=true
# Managing kubernetes objects using common resource operations with the python client
# -----------------------------------------------------------------------------------------------
#
# Some of these operations include;
#
# - **`create_xxxx`** : create a resource object. Ex **`create_namespaced_pod`** and **`create_namespaced_deployment`**, for creation of pods and deployments respectively. This performs operations similar to **`kubectl create`**.
#
#
# - **`read_xxxx`** : read the specified resource object. Ex **`read_namespaced_pod`** and **`read_namespaced_deployment`**, to read pods and deployments respectively. This performs operations similar to **`kubectl describe`**.
#
#
# - **`list_xxxx`** : retrieve all resource objects of a specific type. Ex **`list_namespaced_pod`** and **`list_namespaced_deployment`**, to list pods and deployments respectively. This performs operations similar to **`kubectl get`**.
#
#
# - **`patch_xxxx`** : apply a change to a specific field. Ex **`patch_namespaced_pod`** and **`patch_namespaced_deployment`**, to update pods and deployments respectively. This performs operations similar to **`kubectl patch`**, **`kubectl label`**, **`kubectl annotate`** etc.
#
#
# - **`replace_xxxx`** : replacing a resource object will update the resource by replacing the existing spec with the provided one. Ex **`replace_namespaced_pod`** and **`replace_namespaced_deployment`**, to update pods and deployments respectively, by creating new replacements of the entire object. This performs operations similar to **`kubectl rolling-update`**, **`kubectl apply`** and **`kubectl replace`**.
#
#
# - **`delete_xxxx`** : delete a resource. This performs operations similar to **`kubectl delete`**.
#
#
# For Futher information see the Documentation for API Endpoints section in https://github.com/kubernetes-client/python/blob/master/kubernetes/README.md
# + deletable=true editable=true
from kubernetes import client, config
# + [markdown] deletable=true editable=true
# ### Load config from default location.
# + deletable=true editable=true
config.load_kube_config()
# + [markdown] deletable=true editable=true
# ### Create API endpoint instance as well as API resource instances (body and specification).
# + deletable=true editable=true
api_instance = client.ExtensionsV1beta1Api()
dep = client.ExtensionsV1beta1Deployment()
spec = client.ExtensionsV1beta1DeploymentSpec()
# + [markdown] deletable=true editable=true
# ### Fill required object fields (apiVersion, kind, metadata and spec).
# + deletable=true editable=true
name = "my-busybox"
dep.metadata = client.V1ObjectMeta(name=name)
spec.template = client.V1PodTemplateSpec()
spec.template.metadata = client.V1ObjectMeta(name="busybox")
spec.template.metadata.labels = {"app":"busybox"}
spec.template.spec = client.V1PodSpec()
dep.spec = spec
container = client.V1Container()
container.image = "busybox:1.26.1"
container.args = ["sleep", "3600"]
container.name = name
spec.template.spec.containers = [container]
# + [markdown] deletable=true editable=true
# ### Create Deployment using create_xxxx command for Deployments.
# + deletable=true editable=true
api_instance.create_namespaced_deployment(namespace="default",body=dep)
# + [markdown] deletable=true editable=true
# ### Use list_xxxx command for Deployment, to list Deployments.
# + deletable=true editable=true
deps = api_instance.list_namespaced_deployment(namespace="default")
for item in deps.items:
print("%s %s" % (item.metadata.namespace, item.metadata.name))
# + [markdown] deletable=true editable=true
# ### Use read_xxxx command for Deployment, to display the detailed state of the created Deployment resource.
# + deletable=true editable=true
api_instance.read_namespaced_deployment(namespace="default",name=name)
# + [markdown] deletable=true editable=true
# ### Use patch_xxxx command for Deployment, to make specific update to the Deployment.
# + deletable=true editable=true
dep.metadata.labels = {"key": "value"}
api_instance.patch_namespaced_deployment(name=name, namespace="default", body=dep)
# + [markdown] deletable=true editable=true
# ### Use replace_xxxx command for Deployment, to update Deployment with a completely new version of the object.
# + deletable=true editable=true
dep.spec.template.spec.containers[0].image = "busybox:1.26.2"
api_instance.replace_namespaced_deployment(name=name, namespace="default", body=dep)
# + [markdown] deletable=true editable=true
# ### Use delete_xxxx command for Deployment, to delete created Deployment.
# + deletable=true editable=true
api_instance.delete_namespaced_deployment(name=name, namespace="default", body=client.V1DeleteOptions(propagation_policy="Foreground", grace_period_seconds=5))
# + deletable=true editable=true
| examples/notebooks/intro_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Dask [shared installation]
# language: python
# name: dask
# ---
import numpy as np
import pandas as pd
import xarray as xr
import zarr
import math
import glob
import pickle
import statistics
import scipy.stats as stats
from sklearn.neighbors import KernelDensity
import dask
import seaborn as sns
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
def get_files():
models = glob.glob("/terra/data/cmip5/global/rcp85/*")
avail={}
for model in models:
zg = glob.glob(str(model)+"/r1i1p1/day/2deg/zg*")
try:
test = zg[0]
avail[model.split('/')[-1]] = zg
except:
pass
return avail
files = get_files()
def contourise(x):
x = x.fillna(0)
x = x.where((x>=limit))
x = x/x
return x
results={}
for model in files.keys():
print(model)
x = xr.open_mfdataset(files[model])
x = x.sel(plev=85000)
x = x.sel(time=slice('2045','2100'))
x = x.load()
x = x.sel(lat=slice(-60,0))
x = x[['zg']]
x = x.assign_coords(lon=(((x.lon + 180) % 360) - 180))
with dask.config.set(**{'array.slicing.split_large_chunks': True}):
x = x.sortby(x.lon)
x = x.sel(lon=slice(-50,20))
x = x.resample(time="QS-DEC").mean(dim="time",skipna=True)
x = x.load()
limit = np.nanquantile(x.zg.values,0.9)
results[model]={}
for seas in ['DJF','MAM','JJA','SON']:
mean_seas = x.where(x.time.dt.season==str(seas)).dropna(dim='time')
mean_seas = contourise(mean_seas).zg.fillna(0).mean(dim='time')
results[model][seas] = mean_seas.fillna(0)
x.close()
rcp85 = results
pickle.dump(results, open( "rcp85_dic.p", "wb" ) )
def get_files():
models = glob.glob("/terra/data/cmip5/global/historical/*")
avail={}
for model in models:
zg = glob.glob(str(model)+"/r1i1p1/day/2deg/zg*")
try:
test = zg[0]
avail[model.split('/')[-1]] = zg
except:
pass
return avail
files = get_files()
results={}
for model in files.keys():
print(model)
x = xr.open_mfdataset(files[model])
x = x.sel(plev=85000)
x = x.sel(time=slice('1950','2005'))
x = x.load()
x = x.sel(lat=slice(-60,0))
x = x[['zg']]
x = x.assign_coords(lon=(((x.lon + 180) % 360) - 180))
with dask.config.set(**{'array.slicing.split_large_chunks': True}):
x = x.sortby(x.lon)
x = x.sel(lon=slice(-50,20))
x = x.resample(time="QS-DEC").mean(dim="time",skipna=True)
x = x.load()
limit = np.nanquantile(x.zg.values,0.9)
results[model]={}
for seas in ['DJF','MAM','JJA','SON']:
mean_seas = x.where(x.time.dt.season==str(seas)).dropna(dim='time')
mean_seas = contourise(mean_seas).zg.fillna(0).mean(dim='time')
results[model][seas] = mean_seas.fillna(0)
x.close()
historical = results
pickle.dump(results, open( "rcp85_dic.p", "wb" ) )
| projections/HIGH/.ipynb_checkpoints/SASH_2D_rcp85-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Real- and Complex-Valued Sinusoids and Exponential Signals
#
# This Jupyter notebook focuses on real- and complex-valued sinusoidal and exponential signals. These families of signals play an important role in _Signals & Systems_: they surface up as solutions to ordinary linear differntial equations with constant coefficients, which govern the behavior of Linear Time-Invariant (LTI) systems, which, in turn, is the sole focus of _Signals & Systems_. The overall goal of this notebook is to provide "mental pictures" of such signals and enhance one's intuition about them. The demonstrations in here include graphing members of these families and attempts to represent them as sounds. To use this notebook, run all cells and hop from demo to demo.
#
#
# **Notes:**
# - Requires Python 3.x; tested on Python 3.6.3
# - Requires `numpy`, `matplotlib`, `ipywidgets`, `mpl_toolkits`.
# - To install `ipywidgets`, use
# - `conda install ipywidgets` or `conda install -c conda-forge ipywidgets`
# - or
# - `pip install ipywidgets`
# - `jupyter nbextension enable --py --sys-prefix widgetsnbextension`
#
#
# Authored by <NAME>
# ver. 1.0 (August 2019)
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from ipywidgets import interactive
import ipywidgets as widgets
from IPython.display import Audio, display
# -
# Some global settings for figure sizes
normalFigSize = (8, 6) # (width,height) in inches
largeFigSize = (12, 9)
xlargeFigSize = (18, 12)
# ## 1. Sinusoids
#
# The general form of a _sinusoid_ (short for "sinusoidal signal") is given as
#
# \begin{align*}
# x(t) = A \ \mathrm{trig}(\omega t + \phi)
# \end{align*}
#
# where $\mathrm{trig}$ can be either $\sin$ or $\cos$, $A \in \mathbb{R}$ is referred to as its _amplitude_, $\omega \in \mathbb{R}$ is its _angular frequency_ (measured in radians per time unit) and $\phi \in \mathbb{R}$ is its _phase shift_ (measured in radians). Its _frequency_ $f$ is given as $f = \frac{\omega}{2 \pi}$ and is measured in periods/cycles (dimensionless) per unit of time; when time is measured in seconds, then $f$ is measured in Hertz (Hz). Furthermore, its period $T$ is given as $T = \frac{1}{f} = \frac{2 \pi}{\omega}$ and is measured in time units.
#
# Note that, given a sinusoid, we can always express it in its _standard cosine form_ using $\sin$/$\cos$ properties:
#
# \begin{align*}
# x(t) = A \cos(\omega t + \phi)
# \end{align*}
#
# such that $A \geq 0$, $\omega \geq 0$ and $\phi \in (-\pi, \pi]$. In that case, we refer to $A$ as the sinusoidal's _magnitude_ (instead of _amplitude_) to emphasize the fact that it is non-negative.
#
# It is not difficult to see that sinusoids like the one on the first expression are bounded signals in $[-|A|, |A|]$.
#
# ### 1.1 Interactive Graph of a Sinusoid
# +
def plotSinusoid(amplitude=1.0, frequency=1.0, phaseShift=0.0, showGrid=True):
t = np.linspace(-1.0, 1.0, 1000)
fig, ax = plt.subplots(1, 1, figsize=normalFigSize)
ax.plot(t, amplitude * np.cos(2 * np.pi * t * frequency + phaseShift),
lw=1.0, color='blue')
ax.set_xlim(-1.0, 1.0)
ax.set_ylim(-1.0, 1.0)
ax.set_xlabel('$t$',fontsize=18)
ax.set_ylabel('$x(t)$', fontsize=18)
ax.grid(showGrid)
v = interactive(plotSinusoid, amplitude=(-1.0,1.0), frequency=(0.5,2.0), phaseShift=(-np.pi, np.pi))
display(v)
# -
# ### 1.2 The Sound of a Sinusoid
#
# Sinusoids sound like "pure tones". The human auditory system can typically perceive such tones in the range of 20Hz to 20,000Hz (20KHz), depending on the tone's loudness. Nevertheless, its best performance occurs in the range of 1KHz to 5KHz; that's the frequency range of human speech. A few interesting facts:
#
# - A tone's _pitch_ is determined by the sinusoid's frequency $f$, or, equivalently, by its period $T$.
# - A tone's loudness (or intensity) is determined by the absolute value $|A|$ of the sinusoid's amplitude.
# - We cannot percieve differneces in constant (independent of time) phase shifts $\phi$.
# - In the playback demonstration below, do not expect
# - your speakerphone to be able to reproduce tones well (or, at all) in the aformentioned audible frequency range.
# - to hear a louder sound by changing the sinusoid's amplitude, as your sound hardware typically normalizes it by default to $1$.
# - For the last two reasons, the demo below does not feature any amplitude (to affect the tone's volume) or phase shift sliders.
#
# Some references you may want to look into are:
# - [Hearing Range (@ Wikipedia)](https://en.wikipedia.org/wiki/Hearing_range)
# - [Musical Note Frequency (@ Wikipedia)](https://en.wikipedia.org/wiki/Musical_note#Note_frequency_(hertz))
#
# In the next audio demo, we'll use higher sinusoidal frequencies, so we can actually hear them.
# +
def soundSinusoidal(frequencyHz=1000.0):
durationSeconds = 3
samplingRateHz = 44100 # audio CD quality sampling rate
t = np.linspace(0.0, durationSeconds, samplingRateHz * durationSeconds)
x = np.cos(2 * np.pi * frequencyHz * t)
display(Audio(data=x, rate=samplingRateHz))
return x
v = interactive(soundSinusoidal, frequencyHz=(1000.0,20000.0))
display(v)
# -
# **Comment(s):**
# - If $t=0$ seconds is the time that you click the playback button above, what you are really hearing is only a 3-second chunk of a sinusoid:
#
# \begin{align*}
# x(t) = A \cos(\omega t) p(t; 0, 3)
# \end{align*}
# where, if $t_1 < t_2$,
# \begin{align*}
# p(t; t_1, t_2) \triangleq
# \begin{cases}
# 1 & t \in [t_1, t_2]
# \\
# 0 & \text{otherwise}
# \end{cases}
# \end{align*}
#
# is a _rectangular pulse_ between times $t_1$ and $t_2$.
# ### 1.3 Sums of Sinusoids
#
# Next, a demonstration of summing two sinusoidals is presented.
#
# Sums of two or more sinusoids are periodic signals only if the sinusoidal frequencies/preriods are a rational multiple of some common (fundamental) frequency/period; otherwise, they are not periodic. For example, if
#
# \begin{align*}
# & x_i(t) \triangleq A_i \cos(\omega_i t + \phi_i) & i=1,2
# \\
# & x(t) \triangleq x_1(t) + x_2(t)
# \end{align*}
#
# with $\frac{\omega_1}{\omega_2} = \sqrt{2} \notin \mathbb{Q}$, then their sum $x(t)$ is aperiodic. However, when graphing such a sum, the lack of periodicity is almost impossible to discern.
#
# A final fun fact is that a sum of sinusoids of the same frequency yields a sinusoid of the very same frequency. If $\omega_1 = \omega_2 = \omega_o$, then, as defined above, the sum can be expressed as
#
# \begin{align*}
# & x(t) \triangleq x_1(t) + x_2(t) = |z| \cos(\omega_o t + \angle z)
# \end{align*}
#
# where $z \triangleq A_1 e^{j \phi_1} + A_2 e^{j \phi_2}$.
# +
def plotSumOfSinusoids(amplitude1=1.0, frequency1=1.0, phaseShift1=0.0,
amplitude2=1.0, frequency2=1.0, phaseShift2=0.0, showGrid=True):
t = np.linspace(-1.0, 1.0, 1000)
fig, ax = plt.subplots(1, 1, figsize=normalFigSize)
x1 = amplitude1 * np.cos(2 * np.pi * t * frequency1 + phaseShift1)
ax.plot(t, x1, lw=1.0, color='blue')
x2 = amplitude2 * np.cos(2 * np.pi * t * frequency2 + phaseShift2)
ax.plot(t, x2, lw=1.0, color='red')
x = x1 + x2
ax.plot(t, x, lw=3.0, color='green')
ax.set_xlim(-1.0, 1.0)
ax.set_ylim(-2.0, 2.0)
ax.set_xlabel('$t$',fontsize=18)
ax.set_ylabel('$x_1(t), x_2(t), x(t)$', fontsize=18)
ax.grid(showGrid)
v = interactive(plotSumOfSinusoids, amplitude1=(-1.0,1.0), frequency1=(0.0,2.0), phaseShift1=(-np.pi, np.pi),
amplitude2=(-1.0,1.0), frequency2=(0.0,2.0), phaseShift2=(-np.pi, np.pi))
display(v)
# -
# In the figure above, the sum of sinusoids $x$ is depicted in green, while the individual sinusoids are depicted in blue ($x_1$) and red ($x_2$).
# ### 1.4 The Sound of A Sum of Sinusoids
#
# Now, let's investigate how a sum of two sinusoids might actually sound like. In general, we will be able to hear two distinct tones.
#
# However, an interesting acoustic phenomenon that can be observed is a _beat tone_, which is perceived, when $f_1$ and $f_2$ are sufficiently close to each other. In that case, our auditory system will hear a single tone, whose loudness changes periodically (with frequency $|f_1 - f_2|$; hence slowly) over time like a _tremolo_ effect.
#
# Reference that you may want to look into are
# - [Beat (acoustics) (@ Wikipedia)](https://en.wikipedia.org/wiki/Beat_(acoustics))
# - [How to calculate the perceived frequency of two sinusoidal waves added together?](https://math.stackexchange.com/questions/164369/how-to-calculate-the-perceived-frequency-of-two-sinusoidal-waves-added-together). It turns out that a sum of two sinusoids, even of different frequencies and amplitudes, can always be written as a product of a sinusoid and a periodic time-varying amplitude. This post provides some insights.
# +
def soundOfSumOfSinusoids(frequency1Hz=1000.0, frequency2Hz=1001.0):
durationSeconds = 4
samplingRateHz = 44100 # audio CD quality sampling rate
t = np.linspace(0.0, durationSeconds, samplingRateHz * durationSeconds)
x1 = np.cos(2 * np.pi * frequency1Hz * t)
x2 = np.cos(2 * np.pi * frequency2Hz * t)
x = x1 + x2
display(Audio(data=x, rate=samplingRateHz))
return x
v = interactive(soundOfSumOfSinusoids, frequency1Hz=(1000.0,1500.0), frequency2Hz=(1000.0,1500.0))
display(v)
# -
# ## 2. Exponential Signals
#
# Exponential signals have the form
#
# \begin{align*}
# x(t) \triangleq A e^{s t}
# \end{align*}
#
# for all $t$, where, $A$ and $s$ are either a real or complex constants. If both of them are real-valued, then this leads to a real-valued exponential signal; otherwise, it leads to a complex-valued exponential signal. In practice, we will encounter more often _right-sided_ exponential signals:
#
# \begin{align*}
# x(t) \triangleq A e^{s t} u(t)
# \end{align*}
#
# where
#
# \begin{align*}
# u(t) \triangleq
# \begin{cases}
# 0 & t < 0
# \\
# \text{undefined} & t=0
# \\
# 1 & t > 0
# \end{cases}
# \end{align*}
#
# is the _unit step signal_ that jumps at $t=0$. This implies that right-sided exponential signals equal $0$ for $t<0$. In what follows, for pure convenience, we will only consider such right-sided signals.
# ### 2.1 Real(-valued) Exponential Signals
#
# We are now going to assume that $A \in \mathbb{R}$, $s = \sigma \in \mathbb{R}$ and, therefore,
#
# \begin{align*}
# x(t) \triangleq A e^{\sigma t} u(t)
# \end{align*}
#
# Note that $\sigma$ is measured in inverse time units.
#
# For convenience, let's assume from this point that $A > 0$.
#
# It is not hard to see that
# - when $\sigma < 0$, then the real exponential signal is monotonically decreasing from the value $x(0)=A$, which is the signal's maximum value. In the limit $t \to +\infty$, they converge to $0$.
# - when $\sigma = 0$, then the real exponential signal maintains a constant value of $x(0)=A$ for $t>0$, i.e. it becomes a (scaled) step signal that jumps at $0$.
# - when $\sigma > 0$, then the real exponential signal is monotonically increasing from the value $x(0)=A$, which is the signal's minimum value. In the limit $t \to +\infty$, they diverge to $+\infty$.
#
# The two aforementioned extreme cases reverse, when $A < 0$.
# +
def plotRealExponential(A=1.0, sigma=-5.0, showGrid=True):
t = np.linspace(-0.1, 1.0, 1000)
fig, ax = plt.subplots(1, 1, figsize=normalFigSize)
x = A * np.exp(sigma * t)
x[t<0] = 0.0 # apply multiplication by a step function at t=0 in order to make it right-sided.
ax.plot(t, x, lw=1.0, color='blue')
ax.set_xlim(-0.1, 1.0)
ax.set_ylim(-np.exp(1), np.exp(1))
ax.set_xlabel('$t$',fontsize=18)
ax.set_ylabel('$x(t)$', fontsize=18)
ax.grid(showGrid)
v = interactive(plotRealExponential, A=(-1.0,1.0), sigma=(-5.0,1.0))
display(v)
# -
# ### 2.2 The Sound of an Exponential Signal
#
# Unlike sinusoids, simple exponential signals like the ones we are considering at this point produce no interesting sounds. Trying to sound them, when $\sigma$ is very negative, produces a "pop" sound (like the sound speakers often make, when they are turned on) at their beginning ($t=0$), which is due to the abrupt signal value change from $0$ to $A$; in essence, one hears the step signal. The proof is in the pudding:
#
# +
def soundExponential(sigma=-10.0):
durationSeconds = 1
samplingRateHz = 44100 # audio CD quality sampling rate
t = np.linspace(-0.1, durationSeconds, samplingRateHz * durationSeconds)
x = np.exp(sigma * t)
x[t<0] = 0.0 # apply multiplication by a step function at t=0 in order to make it right-sided.
display(Audio(data=x, rate=samplingRateHz))
return x
v = interactive(soundExponential, sigma=(-10.0,10.0))
display(v)
# -
# A couple more observations are:
# - A second "pop" sound may be heard, if $\sigma$ is increased towards $0$. This is because of a second abrupt change at the end of the clip from some value to $0$. In reality, we are not listening to a right-sided exponential; rather, we are listening to the pulse $x(t) \triangleq e^{\sigma t}p(t; 0, T)$, where $T=1$ seconds is the duration of the clip (and, hence, pulse).
# - For the same reason, only a late "pop" may be heard, mostly when $\sigma > 0$. In that case the first abrupt change (first "pop") will be imperceptible, due to the automatic gain control (volume adjustment) the sound hardware performs, which renders the second abrupt change much more pronounced than the first one.
#
# Henceforth, if there is no sinusoidal factor (in general: no periodic component) present in a signal, we will not attempt to reproduce its sound; we now know how a "pop" sounds :-)
# # 3. Complex(-valued) Sinusoids
#
# Apart from a plain (real-valued) sinusoidal signal, there is the concept of a complex-valued sinusoid, whose general form is
#
# \begin{align*}
# x(t) = A e^{j \omega t}
# \end{align*}
#
# where the $A \in \mathbb{C}$ is referred to as the signal's _complex amplitude_ and $\omega \in \mathbb{R}$ is called its _angular frequency_, since it plays the same role for this signal as it plays for a plain (real-valued) sinusoid. If $A$'s polar form is given as $A = |A| e^{j \angle A}$, then we can re-express a complex sinusoid as
#
# \begin{align*}
# x(t) \triangleq A e^{j \omega t} = |A| e^{j (\omega t + \angle A)} \quad \Rightarrow \quad
# \begin{cases}
# \mathrm{Re}\!\left\{ x(t) \right\} = |A| \cos(\omega t + \angle A)
# \\
# \mathrm{Im}\!\left\{ x(t) \right\} = |A| \sin(\omega t + \angle A) = |A| \cos\left(\omega t + \angle A - \frac{\pi}{2} \right)
# \end{cases}
# \end{align*}
#
# Without going into the details, by using trigonometry, we can always manipulate a complex sinusoid, so that $\angle A \in (-\pi, \pi]$ and $\omega \geq 0$.
#
# What we just saw is that the real and imaginary parts of a complex-valued sinusoid are real-valued sinusoids of the same amplitude (magnitude, to be more precise) and frequency, but differ in phase by 90 degrees; one can argue that the real part lags the imaginary part by 90 degrees.
#
# Below, the real and imaginary parts of a complex sinusoid are depicted in blue and red respectively.
# +
def plotRealImaginaryPartsComplexSinusoid(magnitudeA=1.0, frequency=1.0, phaseA=0.0, showGrid=True):
fig, ax = plt.subplots(1, 1, figsize=normalFigSize)
omega = 2.0 * np.pi * frequency
t = np.linspace(-1.0, 1.0, 1000)
trigArg = omega * t + phaseA
Rex = magnitudeA * np.cos(trigArg)
Imx = magnitudeA * np.sin(trigArg)
ax.plot(t, Rex, lw=1.0, color='blue')
ax.plot(t, Imx, lw=1.0, color='red')
ax.set_xlim(-1.0, 1.0)
ax.set_ylim(-1.0, 1.0)
ax.set_xlabel('$t$',fontsize=18)
ax.set_ylabel('Re{$x(t)$}, Im{$x(t)$}', fontsize=18)
ax.grid(showGrid)
v = interactive(plotRealImaginaryPartsComplexSinusoid, magnitudeA=(0.0,1.0), period=(0.5,10.0), phaseA=(-np.pi, np.pi))
display(v)
# -
# In order to be able to visualize a complex sinusoid, we need to employ a 3D plot to represent it as a parametric curve $\left( t, \mathrm{Re}\!\left\{ x(t) \right\}, \mathrm{Im}\!\left\{ x(t) \right\}\right)$. It turns out that the resulting curve is a _helix_ along the time axis.
#
# Thing(s) you may want to look at:
# - [Helix (@ Wikipedia)](https://en.wikipedia.org/wiki/Helix)
# +
from mpl_toolkits import mplot3d
def plotComplexSinusoid(magnitudeA=1.0, phaseA=0.0, frequency=2.0, showRealPart=False, showImaginaryPart=False):
fig, _ = plt.subplots(1, 1, figsize=largeFigSize)
ax = plt.axes(projection="3d")
omega = 2 * np.pi * frequency
t = np.linspace(-1.0, 1.0, 1000)
trigArg = omega * t + phaseA
dummy = np.ones_like(t)
# plot complex-valued sinusoid
Rex = magnitudeA * np.cos(trigArg)
Imx = magnitudeA * np.sin(trigArg)
ax.plot3D(t, Rex, Imx, 'blue')
# plot its real part
if showRealPart:
ax.plot3D(t, Rex, -dummy, 'red')
# plot its imaginary part
if showImaginaryPart:
ax.plot3D(t, dummy, Imx, 'green')
ax.set_xlim(-1.0, 1.0)
ax.set_ylim(-1.0, 1.0)
ax.set_zlim(-1.0, 1.0)
ax.set_xlabel('$t$',fontsize=18)
ax.set_ylabel('Re{$x(t)$}', fontsize=18)
ax.set_zlabel('Im{$x(t)$}', fontsize=18)
v = interactive(plotComplexSinusoid, magnitudeA=(0.0,1.0), phaseA=(-np.pi,np.pi), frequency=(0.0,5.0))
display(v)
# -
# We notice that the helix is of radius $|A|$ and, therefore, the entire complex-valued sinusoid fits in a cylinder of radius $|A|$, whose axis is the time axis.
# ### 3.1 The Sound of a Complex Sinusoid?
#
# It turns out that there's nothing much we can do to "hear" (represent as a sound) a complex exponential. For example, in a futile attempt below, the real and imaginary parts of a complex sinusoid are used as left and right channels respectively of a stereophonic audible signal. Even with quality headphones, one hears the same tone, whether ones chooses the stereophonic or monophonic (for which both channels play $\mathrm{Re}\!\left\{ x(t) \right\}$) options.
# +
def soundComplexSinusoidInStereo(frequencyHz=3000.0, playInStereo=False):
durationSeconds = 3
samplingRateHz = 44100 # audio CD quality sampling rate
t = np.linspace(0.0, durationSeconds, samplingRateHz * durationSeconds)
omega = 2 * np.pi * frequencyHz
trigArg = omega * t
Rex = np.cos(trigArg)
Imx = np.sin(trigArg)
if playInStereo:
x = np.vstack((Rex, Imx)) # left (right) channel plays Rex (Imx)
else:
x = np.vstack((Rex, Rex)) # both channels play same signal, i.e. Rex
display(Audio(data=x, rate=samplingRateHz))
return x
v = interactive(soundComplexSinusoidInStereo, frequencyHz=(1000.0,5000.0))
display(v)
# -
# ## 4. Real(-valued) Modulated Exponential Signals
#
# When a singal is multiplied by a sinusoid (whether real or, sometimes, complex), we say that the signal is _modulated_. A right-sided, real-valued modulated exponential signal takes the general form
#
# \begin{align*}
# x(t) \triangleq A e^{\sigma t} \cos(\omega t + \phi) u(t)
# \end{align*}
#
# where $A$, $\sigma$, $\omega$ and $\phi$ are real constants. As usual, after some trigonometry, we can ensure that always $A, \omega \geq 0$ and $\phi \in (-\pi, \pi]$.
#
# We call the factor $|A| e^{\sigma t}$ _envelope_ of $x$ and it can be thought of as a time-varying magnitude of the modulating sinusoid. This is because $|x(t)| \leq |A| e^{\sigma t}$ for all $t$.
# +
def plotRealModulatedExponential(A=1.0, sigma=-5.0, frequency=3.0, phaseShift=0.0 , showEnvelope=True, showGrid=True):
fig, ax = plt.subplots(1, 1, figsize=normalFigSize)
omega = 2 * np.pi * frequency
t = np.linspace(-0.1, 1.0, 1000)
envelopeTemp = A * np.exp(sigma * t)
# plot real modulated exponential
trigArg = omega * t + phaseShift
x = envelopeTemp * np.cos(trigArg)
x[t<0] = 0.0 # apply multiplication by a step function at t=0 in order to make it right-sided.
ax.plot(t, x, lw=1.0, color='blue')
if showEnvelope:
# plot the signal's envelope
envelope = np.abs(envelopeTemp)
ax.plot(t, -envelope, lw=1.0, color='gray')
ax.plot(t, envelope, lw=1.0, color='gray')
ax.set_xlim(-0.1, 1.0)
ax.set_ylim(-np.exp(1), np.exp(1))
ax.set_xlabel('$t$',fontsize=18)
ax.set_ylabel('$x(t)$', fontsize=18)
ax.grid(showGrid)
v = interactive(plotRealModulatedExponential, A=(-1.0,1.0), sigma=(-5.0,1.0), frequency=(0.0,5.0), phaseShift=(-np.pi, np.pi))
display(v)
# -
# It should be apparent that,
# - if $\sigma < 0$, then the signal converges to $0$ as time goes by; the more negative $\sigma$ is, the faster it reaches $0$.
# - if $\sigma = 0$, then the signal amounts to a right-sided sinusoid, since the envelope is constant.
# - if $\sigma > 0$, then the the signal diverges as time goes by; the more positive $\sigma$ is, the faster it diverges.
#
#
# Now, let's see how such signals sound.
# +
def soundRealModulatedExponential(sigma=0.0, frequencyHz=3000.0):
durationSeconds = 3
samplingRateHz = 44100 # audio CD quality sampling rate
t = np.linspace(-0.1, durationSeconds, samplingRateHz * durationSeconds)
omega = 2 * np.pi * frequencyHz
trigArg = omega * t
x = np.exp(sigma * t) * np.cos(trigArg)
display(Audio(data=x, rate=samplingRateHz))
return x
v = interactive(soundRealModulatedExponential, sigma=(-10,10), frequencyHz=(1000.0,5000.0))
display(v)
# -
# Above,
# - if $\sigma < 0$, then the tone fades out as time goes by; the more negative $\sigma$ is, the faster the tone fades out.
# - if $\sigma = 0$, then we hear a constant tone, since the envelope is constant.
# - if $\sigma > 0$, then the tone fades in; the more positive $\sigma$ is, the faster the tone fades in.
#
# In general, we perceive the envelope as a time-varying "volume" (loudness) of the modulating tone.
# ## 5. Complex(-valued) Exponential Signals
#
# The most general case of exponential signals is the one, when they are complex-valued. If $A \in \mathbb{C}$ (sometimes referred to as _complex amplitude_ of the signal) with polar form $A = |A| e^{j \angle A}$, $s \in \mathbb{C}$ (sometimes referred to as _complex frequency_ of the signal) with Cartesian form $s = \sigma + j \omega$ and, at least, $\omega \neq 0$ or $\phi \neq 0, \pm \pi$ to ensure that they are indeed complex-valued, then such signals take the right-sided form of
#
# \begin{align*}
# x(t) \triangleq A e^{s t} u(t) = |A| e^{\sigma t} e^{j (\omega t + \angle A)} u(t) \quad \Rightarrow \quad
# \begin{cases}
# \mathrm{Re}\!\left\{ x(t) \right\} = |A| e^{\sigma t} \cos(\omega t + \angle A) u(t)
# \\
# \mathrm{Im}\!\left\{ x(t) \right\} = |A| e^{\sigma t} \sin(\omega t + \angle A) u(t)
# \end{cases}
# \end{align*}
#
# The quantity $|A| e^{\sigma t}$ is called its _envelope_. We see that such a complex exponential signal is, in essence, a real-valued exponential signal (the envelope) that is modulated by a complex-valued sinusoid $e^{j (\omega t + \angle A)}$. Additionally, the real and imaginary part of such a signal are an exponential signal (again, the envelope) modulated by two sinusoids that are 90 degrees out of phase.
#
# It is important to note that the family of complex exponentials subsumes the complex sinusoidal and real exponential signals as special cases:
# - when $\sigma = 0$, we get the complex-valued sinusoidal family
# - when $\omega = 0$ and $\angle A = 0$ or $\pi$ (i.e., when $A$ is real), we get the real exponential family.
#
# The resulting parameteric curve $(t, \mathrm{Re}\!\left\{ x(t) \right\}, \mathrm{Im}\!\left\{ x(t) \right\})$ of a complex exponential is a wounding-in (converging to $0$) or winding-out (diverging) helix depending on whether $\sigma$ is negative or positive.
# +
def plotComplexExponential(magnitudeA=1.0, phaseA=0.0, frequency=5.0, sigma=-1.0, showRealPart=False,
showImaginaryPart=False, showEnvelopes=False):
fig, _ = plt.subplots(1, 1, figsize=largeFigSize)
ax = plt.axes(projection="3d")
omega = 2 * np.pi * frequency
t = np.linspace(-0.1, 1.0, 1000)
trigArg = omega * t + phaseA
exponential = np.exp(sigma * t)
dummy = np.exp(1) * np.ones_like(t)
# plot complex-valued exponential
Rex = magnitudeA * np.cos(trigArg) * exponential
Rex[t<0] = 0.0 # apply multiplication by a step function at t=0 in order to make it right-sided.
Imx = magnitudeA * np.sin(trigArg) * exponential
Imx[t<0] = 0.0 # apply multiplication by a step function at t=0 in order to make it right-sided.
ax.plot3D(t, Rex, Imx, 'blue')
if showRealPart:
# plot its real part
ax.plot3D(t, Rex, -dummy, 'red')
if showImaginaryPart:
# plot its imaginary part
ax.plot3D(t, dummy, Imx, 'green')
if showEnvelopes:
# plot envelopes of real & imaginary parts
envelope = magnitudeA * exponential
ax.plot3D(t, envelope, -dummy, 'gray')
ax.plot3D(t, -envelope, -dummy, 'gray')
ax.plot3D(t, dummy, envelope, 'gray')
ax.plot3D(t, dummy, -envelope, 'gray')
ax.set_xlim(-0.1, 1.0)
ax.set_ylim(-np.exp(1), np.exp(1))
ax.set_zlim(-np.exp(1), np.exp(1))
ax.set_xlabel('$t$',fontsize=18)
ax.set_ylabel('Re{$x(t)$}', fontsize=18)
ax.set_zlabel('Im{$x(t)$}', fontsize=18)
v = interactive(plotComplexExponential, magnitudeA=(0.0,1.0), phaseA=(-np.pi,np.pi), frequency=(0.0,5.0), sigma=(-5.0,1.0))
display(v)
# -
# ## 6. Products of Real Exponential Signals with Polynomials
#
# Another frequently encountered class of real-valued signals is the one where an exponential signal is multiplied by some polynomial. The right-sided variety of these signals takes the form
#
# \begin{align*}
# x(t) \triangleq A q(t) e^{\sigma t} u(t)
# \end{align*}
#
# where,
#
# \begin{align*}
# q(t) \triangleq \sum_{d=0}^D a_d t^d
# \end{align*}
#
# is some $D$-th degree polynomial of $t$ with real constant coefficients $\left\{ a_d \right\}_{d=0}^D$. As usual, $A$ and $\sigma$ are real constants. Without loss of generality, we can assume that $q$ is monic (i.e., $a_D=1$).
#
# It is straightforward to verify that
# - if $\sigma \geq 0$, $x$ diverges as $t$ grows.
# - if $\sigma < 0$, $\lim_\limits{t \to +\infty} x(t) = 0$, as can be verified by De L'Hospital's rule. Close to $t=0$, $|q(t)|$ rises quicker than the decaying exponential factor. Eventually, though, that latter factor overpowers the polynomial and the signal dies out. Due to this phenomenon, there will be one or more local maxima in between. Hence, such signals are bounded.
#
# Below we graph such a signal, when $q(t) \triangleq t^2 + a_1 t + a_o$.
# +
def plotPolynomialRealExponential(A=1.0, sigma=-5.0, a1=0.0, a0=0.0, showGrid=True):
fig, ax = plt.subplots(1, 1, figsize=normalFigSize)
t = np.linspace(-0.1, 2.0, 1000)
exponential = A * np.exp(sigma * t)
polynomial = t**2 + a1 * t + a0
# plot real exponential times polynomial
x = polynomial * exponential
x[t<0] = 0.0 # apply multiplication by a step function at t=0 in order to make it right-sided.
ax.plot(t, x, lw=1.0, color='blue')
ax.set_xlim(-0.1, 2.0)
ax.set_xlabel('$t$',fontsize=18)
ax.set_ylabel('$x(t)$', fontsize=18)
ax.grid(showGrid)
v = interactive(plotPolynomialRealExponential, A=(-1.0,1.0), sigma=(-5.0,1.0), a1=(-5.0,5.0), a0=(-5.0,5.0))
display(v)
# -
# ## 7. Products of Complex Exponentials Signals with Polynomials
#
# The previous case can be generalized to complex exponentials. The right-sided form of this family looks like this:
#
# \begin{align*}
# x(t) \triangleq A q(t) e^{s t} u(t) = |A| |q(t)| e^{\sigma t} e^{j (\omega t + \angle A + \angle q(t))} u(t) \quad \Rightarrow \quad
# \begin{cases}
# \mathrm{Re}\!\left\{ x(t) \right\} = |A| |q(t)| e^{\sigma t} \cos(\omega t + \angle A + \angle q(t)) u(t)
# \\
# \mathrm{Im}\!\left\{ x(t) \right\} = |A| |q(t)| e^{\sigma t} \sin(\omega t + \angle A + \angle q(t)) u(t)
# \end{cases}
# \end{align*}
#
# where, since $q(t)$ is real-valued, $\angle q(t) = 0$, if $q(t) > 0$, and $\angle q(t) = \pi$, if $q(t) < 0$, for some $t$ ($\angle q(t)$ is arbitrary, when $q(t) = 0$ for some $t$). This implies that, when $q(t)$ changes signs, the signal's complex sinusoid undergoes a phase shift of $\pi$ radians.
#
# Hence, $A q(t)$, where $A \in \mathbb{C}$, plays the role of a time-varying complex envelope for the signal's complex sinusoid. On the other hand, $|A| |q(t)|$ plays the role of the (real-valued) envelope for the signal's sinusoids appearing in its real and imaginary parts.
#
# From the above form and the discussions we had so far, we see that
# - if $\sigma \geq 0$, then the signal diverges as $t \to +\infty$.
# - if $\sigma < 0$, then the signal eventually dies down as time progresses.
#
# Below we graph such a signal, when $q(t) \triangleq t$. We only consider $\sigma<0$, since the other case is rather uninteresting (the signal diverges; it spirals out of control from the very beggining).
# +
def plotPolynomialComplexExponential(magnitudeA=10.0, phaseA=0.0, frequency=20.0, sigma=-5.0, showRealPart=False,
showImaginaryPart=False, showEnvelopes=False):
fig, _ = plt.subplots(1, 1, figsize=largeFigSize)
ax = plt.axes(projection="3d")
omega = 2 * np.pi * frequency
t = np.linspace(-0.1, 1.0, 1000)
trigArg = omega * t + phaseA
exponential = np.exp(sigma * t)
polynomial = t
dummy = np.exp(1) * np.ones_like(t)
# plot complex-valued exponential
Rex = magnitudeA * np.cos(trigArg) * exponential * polynomial
Rex[t<0] = 0.0 # apply multiplication by a step function at t=0 in order to make it right-sided.
Imx = magnitudeA * np.sin(trigArg) * exponential * polynomial
Imx[t<0] = 0.0 # apply multiplication by a step function at t=0 in order to make it right-sided.
ax.plot3D(t, Rex, Imx, 'blue')
if showRealPart:
# plot its real part
ax.plot3D(t, Rex, -dummy, 'red')
if showImaginaryPart:
# plot its imaginary part
ax.plot3D(t, dummy, Imx, 'green')
if showEnvelopes:
# plot envelopes of real & imaginary parts
envelope = magnitudeA * exponential * np.abs(polynomial)
ax.plot3D(t, envelope, -dummy, 'gray')
ax.plot3D(t, -envelope, -dummy, 'gray')
ax.plot3D(t, dummy, envelope, 'gray')
ax.plot3D(t, dummy, -envelope, 'gray')
ax.set_xlim(-0.1, 1.0)
ax.set_ylim(-np.exp(1), np.exp(1))
ax.set_zlim(-np.exp(1), np.exp(1))
ax.set_xlabel('$t$',fontsize=18)
ax.set_ylabel('Re{$x(t)$}', fontsize=18)
ax.set_zlabel('Im{$x(t)$}', fontsize=18)
v = interactive(plotPolynomialComplexExponential, magnitudeA=(0.0,10.0), phaseA=(-np.pi,np.pi),
frequency=(10.0,20.0), sigma=(-5.0,-2.0))
display(v)
# -
# Let's take a listen to, say, the real part of the previous signal. We'll add an option for stereophonic playback of both real and imaginary parts just for the heck of it (in reality, we won't be able to "hear" both parts simultaneously).
# +
def soundComplexSinusoidInStereo(sigma=-5.0, frequencyHz=3000.0, playInStereo=False):
durationSeconds = 3
samplingRateHz = 44100 # audio CD quality sampling rate
t = np.linspace(0.0, durationSeconds, samplingRateHz * durationSeconds)
omega = 2 * np.pi * frequencyHz
trigArg = omega * t
exponential = np.exp(sigma * t)
polynomial = t
Rex = polynomial * exponential * np.cos(trigArg)
Imx = polynomial * exponential * np.sin(trigArg)
if playInStereo:
x = np.vstack((Rex, Imx)) # left (right) channel plays Rex (Imx)
else:
x = np.vstack((Rex, Rex)) # both channels play same signal, i.e. Rex
display(Audio(data=x, rate=samplingRateHz))
return x
v = interactive(soundComplexSinusoidInStereo, sigma=(-5.0,-3.0), frequencyHz=(1000.0,5000.0))
display(v)
# -
# Predictably, we hear a pure tone, whose loudness increases quickly and then slowly fades away; it follows the characteristics of the signal's envelope. How quickly this transition happens depends on how negative $\sigma$ is; the smaller the sigma, the faster this transition takes place.
| Real- and Complex-Valued Sinusoids and Exponential Signals.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,jl:hydrogen
# text_representation:
# extension: .jl
# format_name: hydrogen
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.3
# language: julia
# name: julia-1.6
# ---
# %%
using Plots
h(x, y) = x^(x^y) - y
x = range(0, 0.11; length=1000)
y = range(0, 1.1; length=1000)
contour(x, y, h; levels=zeros(1), color=:red, colorbar=false)
plot!(; xlim=extrema(x), ylim=extrema(y), size=(400, 400))
# %%
using Plots
function f(x, y, n)
z = x^y
for i in 1:n
z = x^z
end
z
end
x = range(0, 2.1, length=1000)
y = range(0, 10.1; length=1000)
plot(; colorbar=false)
for n in 1:4
contour!(x, y, (x, y) -> f(x, y, n) - y; levels=zeros(1), c=n, ls=:auto)
end
plot!(; xlim=extrema(x), ylim=extrema(y), size=(400, 400))
# %%
| 0022/implicit plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# #! /usr/bin/env python
from pyspark import SparkConf, SparkContext
sc = SparkContext(conf=SparkConf().setAppName("MyApp").setMaster("local[2]"))
import re
# +
def parse_article(line):
try:
article_id, text = unicode(line.rstrip()).split('\t', 1)
except ValueError as e:
return []
text = re.sub("^\W+|\W+$", "", text, flags=re.UNICODE)
words = re.split("\W*\s+\W*", text, flags=re.UNICODE)
word_pair = list(zip(words[:-1], words[1:]))
word_pair = [word2.lower() for word1, word2 in word_pair if word1.lower() == 'narodnaya']
return word_pair
wiki = sc.textFile("/data/wiki/en_articles_part/articles-part", 16).flatMap(parse_article).filter(lambda x: x != []).map(lambda x: (x,1)).reduceByKey(lambda x,y: x+y).sortByKey()
result = wiki.map(lambda x: ('narodnaya_' + x[0], x[1])).collect()
for pair_count in result:
print "%s\t%d" % (pair_count)
# -
| Big-Data-Essentials/Week5 - Introduction to Apache Spark - Practice/Spark+Assignment1+-+Pairs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## _*H2 ground state energy plot using different qubit mappings*_
#
# This notebook demonstrates using Qiskit Chemistry to plot graphs of the ground state energy of the Hydrogen (H2) molecule over a range of inter-atomic distances with different fermionic mappings to quantum qubits.
#
# This notebook has been written to use the PYSCF chemistry driver.
# +
import numpy as np
import pylab
from qiskit import BasicAer
from qiskit.aqua import aqua_globals, QuantumInstance
from qiskit.aqua.algorithms import ExactEigensolver, VQE
from qiskit.aqua.components.optimizers import L_BFGS_B
from qiskit.aqua.components.variational_forms import RYRZ
from qiskit.chemistry.drivers import PySCFDriver
from qiskit.chemistry.core import Hamiltonian, QubitMappingType
molecule = 'H .0 .0 -{0}; H .0 .0 {0}'
algorithms = ['VQE', 'ExactEigensolver']
mappings = [QubitMappingType.JORDAN_WIGNER,
QubitMappingType.PARITY,
QubitMappingType.BRAVYI_KITAEV]
start = 0.5 # Start distance
by = 0.5 # How much to increase distance by
steps = 20 # Number of steps to increase by
energies = np.empty([len(mappings), len(algorithms), steps+1])
hf_energies = np.empty(steps+1)
distances = np.empty(steps+1)
aqua_globals.random_seed = 50
print('Processing step __', end='')
for i in range(steps+1):
print('\b\b{:2d}'.format(i), end='', flush=True)
d = start + i*by/steps
for j in range(len(algorithms)):
for k in range(len(mappings)):
driver = PySCFDriver(molecule.format(d/2), basis='sto3g')
qmolecule = driver.run()
operator = Hamiltonian(qubit_mapping=mappings[k], two_qubit_reduction=False)
qubit_op, aux_ops = operator.run(qmolecule)
if algorithms[j] == 'ExactEigensolver':
result = ExactEigensolver(qubit_op).run()
else:
optimizer = L_BFGS_B(maxfun=2500)
var_form = RYRZ(qubit_op.num_qubits, depth=5)
algo = VQE(qubit_op, var_form, optimizer)
result = algo.run(QuantumInstance(BasicAer.get_backend('statevector_simulator'),
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed))
lines, result = operator.process_algorithm_result(result)
energies[k][j][i] = result['energy']
hf_energies[i] = result['hf_energy'] # Independent of algorithm & mapping
distances[i] = d
print(' --- complete')
print('Distances: ', distances)
print('Energies:', energies)
print('Hartree-Fock energies:', hf_energies)
# -
pylab.rcParams['figure.figsize'] = (12, 8)
pylab.ylim(-1.14, -1.04)
pylab.plot(distances, hf_energies, label='Hartree-Fock')
for j in range(len(algorithms)):
for k in range(len(mappings)):
pylab.plot(distances, energies[k][j], label=algorithms[j] + ", " + mappings[k].value)
pylab.xlabel('Interatomic distance')
pylab.ylabel('Energy')
pylab.title('H2 Ground State Energy in different mappings')
pylab.legend(loc='upper right')
pylab.show()
pylab.rcParams['figure.figsize'] = (6, 4)
for k in range(len(mappings)):
pylab.ylim(-1.14, -1.04)
pylab.plot(distances, hf_energies, label='Hartree-Fock')
for j in range(len(algorithms)):
pylab.plot(distances, energies[k][j], label=algorithms[j])
pylab.xlabel('Interatomic distance')
pylab.ylabel('Energy')
pylab.title('H2 Ground State Energy with {} mapping'.format(mappings[k].value))
pylab.legend(loc='upper right')
pylab.show()
#pylab.plot(distances, np.subtract(hf_energies, energies[k][1]), label='Hartree-Fock')
pylab.plot(distances, np.subtract(energies[k][0], energies[k][1]), color=[0.8500, 0.3250, 0.0980], label='VQE')
pylab.xlabel('Interatomic distance')
pylab.ylabel('Energy')
pylab.yscale('log')
pylab.title('Energy difference from ExactEigensolver with {} mapping'.format(mappings[k].value))
pylab.legend(loc='upper right')
pylab.show()
| chemistry/h2_mappings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Inter-Class Coding Lab: Variables And Types
#
# The goals of this lab are to help you to understand:
#
# 1. Python data types
# 1. Getting input as different types
# 1. Formatting output as different types
# 1. Basic arithmetic operators
# 1. How to create a program from an idea.
# ## Variable Types
#
# Every Python variable has a **type**. The Type determines how the data is stored in the computer's memory:
a = "4"
type(a) # should be str
a = 4
type(a) # should be int
# ### Types Matter
#
# Python's built in functions and operators work differently depending on the type of the variable.:
a = 4
b = 5
a + b # this plus in this case means add so 9
a = "4"
b = "5"
a + b # the plus + in this case means concatenation, so '45'
# ### Switching Types
#
# there are built-in Python functions for switching types. For example:
x = "45" # x is a str
y = int(x) # y is now an int
z = float(x) # z is a float
print(x,y,z)
# ### Inputs type str
#
# When you use the `input()` function the result is of type `str`:
#
age = input("Enter your age: ")
type(age)
# We can use a built in Python function to convert the type from `str` to our desired type:
age = input("Enter your age: ")
age = int(age)
type(age)
# We typically combine the first two lines into one expression like this:
age = int(input("Enter your age: "))
type(age)
# ## Now Try This:
#
# Write a program to:
#
# - input your age, convert it to an int and store it in a variable
# - add one to your age, store it in another variable
# - print out your current age and your age next year.
#
# For example:
# ```
# Enter your age: 45
# Today you are 45 next year you will be 46
# ```
age = input("Enter your age: ")
age = int(age)
print('Today you are' , age , 'next year you will be' , age + 1)
# ## Format Codes
#
# Python has some string format codes which allow us to control the output of our variables.
#
# - %s = format variable as str
# - %d = format variable as int
# - %f = format variable as float
#
# You can also include the number of spaces to use for example `%5.2f` prints a float with 5 spaces 2 to the right of the decimal point.
name = "Mike"
age = 45
gpa = 3.4
print("%s is %d years old. His gpa is %.3f" % (name, age,gpa))
# ## Now Try This:
#
# Print the PI variable out 3 times. Once as a string, once as an int and once as a float to 4 decimal places:
PI = 3.1415927
print('%s %d %.4f' % (PI , PI , PI))
# ## Putting it all together: Fred's Fence Estimator
#
# Fred's Fence has hired you to write a program to estimate the cost of their fencing projects. For a given length and width you will calculate the number of 6 foot fence sections, and total cost of the project. Each fence section costs $23.95. Assume the posts and labor are free.
#
# Program Inputs:
#
# - Length of yard in feet
# - Width of yard in feet
#
# Program Outputs:
#
# - Perimeter of yard ( Length multlplied by Width)
# - Number of fence sections required (Permiemer divided by 6 )
# - Total cost for fence ( fence sections multiplied by $23.95 )
#
# NOTE: All outputs should be formatted to 2 decimal places: e.g. 123.05
# +
#TODO:
# 1. Input length of yard as float, assign to a variable
# 2. Input Width of yard as float, assign to a variable
# 3. Calculate perimeter of yar, assign to a variable
# 4. calculate number of fence sections, assign to a variable
# 5. calculate total cost, assign to variable
# 6. print perimeter of yard
# 7. print number of fence sections
# 8. print total cost for fence.
# -
# ## Now Try This
#
# Based on the provided TODO, write the program in python in the cell below. Your solution should have 8 lines of code, one for each TODO.
#
# **HINT**: Don't try to write the program in one sitting. Instead write a line of code, run it, verify it works and fix any issues with it before writing the next line of code.
# TODO: Write your code here
length = input('Enter length of yard ')
length = float(length)
width = input('Enter width of yard ')
width = float(width)
perimeter = length + length + width + width
perimeter = int(perimeter)
fence = perimeter/6
fence = float(fence)
cost = fence * 23.95
cost = float(cost)
print('%d %.2f %.2f' % (perimeter , fence , cost))
| content/lessons/03/Class-Coding-Lab/CCL-Variables-And-Types.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import folium
import json
import os
import geopandas
import matplotlib.pyplot as plt
import pandas as pd
from collections import OrderedDict
from IPython.display import display
cmaps = OrderedDict()
# +
#Selecting of desired file
def select_data():
files=os.listdir("Json_data")
print("select one:-")
for no,i in enumerate(files):
print(no+1,')',i)
file=files[int(input("enter number : "))-1]
with open('Json_data/'+file,'r') as f:
data=json.load(f)
map_heading=file.split('.')[0]
return (data,map_heading)
#data,map_heading=select_data()
#To convert Json to dataFrame
def data2df(data):
raw_data={"names":[],"State":[],"Address":[],"Latitude":[],"Longitude":[]}
for i in data:
if (data[i]==None):
continue
raw_data["names"].append(i)
raw_data["Address"].append(data[i][0])
raw_data["Latitude"].append(data[i][1]["lat"])
raw_data["Longitude"].append(data[i][1]["lng"])
raw_data["State"].append(data[i][2][0])
return pd.DataFrame(raw_data)
#df=data2df(data)
def plot_data(data):
state_data={"State":[],"values":[]}
states=['Andaman and Nicobar', 'Andhra Pradesh', 'Arunachal Pradesh', 'Assam', 'Bihar', 'Chandigarh', 'Chhattisgarh', 'Dadra and Nagar Haveli', 'Daman and Diu', 'Goa', 'Gujarat', 'Haryana', 'Himachal Pradesh', 'Jammu and Kashmir', 'Jharkhand', 'Karnataka', 'Kerala', 'Lakshadweep', 'Madhya Pradesh', 'Maharashtra', 'Manipur', 'Meghalaya', 'Mizoram', 'Nagaland', 'NCT of Delhi', 'Odisha', 'Puducherry', 'Punjab', 'Rajasthan', 'Sikkim', 'Tamil Nadu', 'Tamil Nadu', 'Telangana', 'Tripura', 'Uttar Pradesh', 'Uttarakhand', 'West Bengal']
sum=0
for i in data:
if (data[i]==None):
continue
i=data[i][2][0]
sum+=1
if i in state_data["State"]:
state_data["values"][state_data["State"].index(i)]+=1
else:
state_data["State"].append(i)
state_data["values"].append(1)
for i in states:
if (i not in state_data["State"]):
state_data["State"].append(i)
state_data["values"].append(-1*(sum/30))
return state_data
def pie_data(data):
con_data={}
for i in data:
if (data[i]==None):
point='Other'
else:
point=data[i][2][0]
if point in con_data:
con_data[point]+=1
else:
con_data[point]=1
labels = con_data.keys()
sizes = con_data.values()
plt.pie(sizes,labels=labels,autopct='%1.1f%%', shadow=True, startangle=140)
plt.axis('equal')
plt.show()
def barGraph(data):
fig, ax = plt.subplots()
con_data={}
for i in data:
if (data[i]==None):
point='Other'
else:
point=data[i][2][0]
if point in con_data:
con_data[point]+=1
else:
con_data[point]=1
people = list(con_data.keys())
y_pos =list(range(len(con_data)))
performance = list(con_data.values())
ax.barh(list(y_pos), list(performance), align='center')
ax.set_yticks(list(y_pos))
ax.set_yticklabels(people)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('people')
ax.set_title('')
plt.show()
data,map_heading=select_data()
# +
#to show stateAnalysis in map
def stateAnalysisMap(raw_data,title):
cmaps = OrderedDict()
map_df = geopandas.read_file('shape_data/IND_adm1.shp')
data=plot_data(raw_data)
df1=pd.DataFrame(data)
merged = map_df.merge(df1, how='left', left_on="NAME_1", right_on="State")
merged = merged[['State','geometry','values']]
variable = 'values'
vmin, vmax = min(data['values']),max(data['values'])
fig, ax = plt.subplots(1)
ax.set_title(title, fontdict={'fontsize': '25', 'fontweight' : '3'})
ax.axis('off')
cmap_type='Reds'
sm = plt.cm.ScalarMappable(cmap=cmap_type, norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm.set_array([])
fig.colorbar(sm)
df=data2df(raw_data)
merged.plot(column=variable, cmap=cmap_type, linewidth=0.6, ax=ax, edgecolor='0.5')
gdf = geopandas.GeoDataFrame(df, geometry=geopandas.points_from_xy(df.Longitude, df.Latitude))
gdf.plot(ax=ax, color='green',marker='.',alpha=1,markersize=40)
plt.show()
plt.show()
# Marker Map of India
def MarkerMap(data):
india_map=folium.Map(location=[23.1815,79.9864],zoom_start=5,tiles='Stamen Toner').add_to(folium.Figure(width=800, height=900))
place_persons={}
for i in data:
if (data[i]==None):
print('skipping ',i,'as undefined address')
continue
if str(data[i][1]['lat'])+str(data[i][1]['lng']) in place_persons:
place_persons[str(data[i][1]['lat'])+str(data[i][1]['lng'])]+=i+','
else:
place_persons[str(data[i][1]['lat'])+str(data[i][1]['lng'])]=i+','
folium.Marker([data[i][1]['lat'],data[i][1]['lng']],popup=data[i][0]+'----------------'+place_persons[str(data[i][1]['lat'])+str(data[i][1]['lng'])]).add_to(india_map)
print('-'*50,'\n',map_heading,'\n','-'*50)
return india_map
#display(MarkerMap(data))
display(data2df(data))
# -
plt.rcParams['figure.figsize'] = [30, 15]
stateAnalysisMap(data,map_heading)
display(MarkerMap(data))
pie_data(data)
barGraph(data)
| Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bike Sharing Assignment
# ---
# ## Environment Setup
# ---
# +
# To get multiple outputs in the same cell
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# +
# Supress Warnings
import warnings
warnings.filterwarnings('ignore')
# +
# Import the EDA required libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
import seaborn as sns
import plotly.express as px
# Import the machine learning libraries
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# Importing RFE and LinearRegression
from sklearn.feature_selection import RFE
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
# Importing VIF from statsmodels
from statsmodels.stats.outliers_influence import variance_inflation_factor
# Import the generic utility libraries
import os
import random
import datetime as dt
#Importing the function
from pandas_profiling import ProfileReport
# +
# Set the required global options
# To display all the columns in dataframe
pd.set_option( "display.max_columns", None)
pd.set_option( "display.max_rows", None)
# Setting the display fromat
# pd.set_option('display.float_format', lambda x: '%.2f' % x)
#pd.reset_option('display.float_format')
sns.set(style='whitegrid')
# %matplotlib inline
# -
# ---
# ## Data Smthing
# ---
# - **_Reading the Bike Sharing dataset csv file_**
# +
# Read the raw csv file 'day.csv' - containing the basic data of the loans
# encoding - The type of encoding format needs to be used for data reading
day = pd.read_csv('day.csv', low_memory=False)
# -
day.head()
day.shape
# +
#day['dteday'] = pd.to_datetime(day['dteday'], format='%d-%m-%Y')
# +
# df = pd.DataFrame()
# df['dteday'] = day['dteday']
# df['year'] = day['dteday'].dt.year
# df['month'] = day['dteday'].dt.month
# df['day'] = day['dteday'].dt.day
# df['weekday'] = day['dteday'].dt.weekday
# df['dayname'] = day[['dteday']].apply(lambda x: dt.datetime.strftime(x['dteday'], '%A'), axis=1)
# df
# -
# #### Dataset characteristics
#
# - day.csv have the following fields:
#
# - instant: record index
# - dteday : date
# - season : season (1:spring, 2:summer, 3:fall, 4:winter)
# - yr : year (0: 2018, 1:2019)
# - mnth : month ( 1 to 12)
# - holiday : weather day is a holiday or not (extracted from http://dchr.dc.gov/page/holiday-schedule)
# - weekday : day of the week
# - workingday : if day is neither weekend nor holiday is 1, otherwise is 0.
# + weathersit :
# - 1: Clear, Few clouds, Partly cloudy, Partly cloudy
# - 2: Mist + Cloudy, Mist + Broken clouds, Mist + Few clouds, Mist
# - 3: Light Snow, Light Rain + Thunderstorm + Scattered clouds, Light Rain + Scattered clouds
# - 4: Heavy Rain + Ice Pallets + Thunderstorm + Mist, Snow + Fog
# - temp : temperature in Celsius
# - atemp: feeling temperature in Celsius
# - hum: humidity
# - windspeed: wind speed
# - casual: count of casual users
# - registered: count of registered users
# - cnt: count of total rental bikes including both casual and registered
# - **_Checking the missing values_**
miss = day.isna().sum()
len(miss[miss > 0])
# _There are no missing values in the data as evident from **.isna()**_
day.describe()
# ---
# ## Data Cleaning
# ---
# +
# Checking the values of 'instant'
day['instant'].nunique()
print(" % 3.1f%% unique values in variable instant" %(day['instant'].nunique()/len(day)*100))
# +
# Checking the values of 'dteday'
day['dteday'].nunique()
print(" % 3.2f%% unique values in variable instant" %(day['dteday'].nunique()/len(day)*100))
# -
# _Also year, month, weekday, workingday, holiday are the important metrics already derived from 'dteday' variable._ <br>
# _So, the variables **'instant'** and **'dteday'** can be dropped as it contains 100% distinct values, not useful for prediction._
# +
# Dropping the variables 'instant' and 'dteday'
day.drop(['instant','dteday'], axis=1, inplace=True)
# +
# Dropping the variables 'casual' and 'registered' as the target variable will be 'cnt' - sum of 'casual' and 'registered'
day.drop(['casual','registered'], axis=1, inplace=True)
# -
day.head()
day['season_label'] = day['season'].map({1:'spring',2:'summer',3:'fall',4:'winter'})
day['yr_label'] = day['yr'].map({0:'2018',1:'2019'})
day['mnth_label'] = day['mnth'].map({1:'january',2:'feburary',3:'march',4:'april',5:'may',6:'june',7:'july',8:'august',9:'september',10:'october',11:'november',12:'december'})
day['weekday_label'] = day['weekday'].map({0:'tuesday',1:'wednesday',2:'thursday',3:'friday',4:'saturday',5:'sunday',6:'monday'})
day['weathersit_label'] = day['weathersit'].map({1:'clear',2:'mist',3:'rain',4:'heavy rain'})
day.drop(['season','yr','mnth','weekday','weathersit'], axis=1,inplace=True)
day.sample(5)
# - **_Checking the datatypes_**
day.info()
# +
# Converting the categorical variables to type 'category'
category = day.columns[7:]
day['holiday'] = day['holiday'].astype('category')
day['workingday'] = day['workingday'].astype('category')
day[category] = day[category].astype('category')
# -
day.info()
# ---
# ## Data Analysis
# ---
day.corr()
plt.figure(figsize=(12,7))
sns.heatmap(day.corr(), annot=True);
# +
# num_dtype_ser = day.dtypes
# num_list = num_dtype_ser[num_dtype_ser == 'float64'].index
# num_list
# sns.pairplot(day[num_list])
# +
# Create scatterplots to visulaize the relationship between quantitave/numerical variables
sns.pairplot(day.select_dtypes(include='number'))
# -
# **_Correlation coefficient for temp and atemp is 0.99, which is quite high and is also visible through scatter-plot_** <br>
# _NOT dropping the variable temp although there is a strong correlataion between temp and atemp._ <br>
# _Will be handled in Feature Selection process._
# +
# Dropping the variable temp as there is a strong correlataion between temp and atemp
# day.drop(['temp'], axis=1, inplace=True)
# -
# - **_atemp variable is somewhat having a linear relationship with cnt, the target variable_** <br>
# - **_cnt against hum and windspeed does not seem to have good correlation and the points are scattered all around in the plot._** <br>
# - _Also evident from heatmap, the Correlation coefficient is **-0.099 for hum vs cnt** and **-0.24 for windspeed vs cnt**, which is on a very low side._ <br>
#
# - _**NOT Dropping the variables hum and windspeed** although, there is a **very weak correlataion between them and cnt**, the target variable._ <br>
# - _Will be handled in Feature Selection process._
# +
# Dropping the variable temp as there is a strong correlataion between temp and atemp
# day.drop(['hum','windspeed'], axis=1, inplace=True)
# -
cat_dtype_ser = day.dtypes
category_list = cat_dtype_ser[cat_dtype_ser == 'category'].index
category_list
plt.figure(figsize=(18, 23))
for i,var in enumerate(category_list):
plt.subplot(4,2,i+1)
sns.boxplot(x=var, y='cnt', data=day);
plt.show();
# +
# sns.catplot(x="yr_label", y="cnt", data=day);
# +
# sns.catplot(x="weathersit_label", y="cnt",col="yr_label", data=day);
# +
# sns.catplot(x="weathersit_label", y="cnt", hue="yr_label",col = 'holiday', data=day);
# sns.catplot(x="weathersit_label", y="cnt", hue="yr_label",col = 'workingday', data=day);
# sns.catplot(x="weekday_label", y="cnt", hue="yr_label",col = 'weathersit_label', data=day);
# -
category_list
# +
# def bivariate(x_var, y_var = 'cnt', hue = 'yr_label', data = day):
# ax = sns.catplot(x=x_var, y= y_var, hue= hue, data=data)
# +
# plt.figure(figsize=(18, 23));
# for i,var in enumerate(category_list):
# bivariate(x_var=var);
# plt.show()
# +
# bivariate(x_var='season_label');
# bivariate(x_var='mnth_label');
# +
# bivariate(x_var='holiday');
# bivariate(x_var='weekday_label');
# bivariate(x_var='workingday');
# -
# ---
# ## Data Preparation for modelling
# ---
# ### Dummy Coding
day.head()
# +
# Create the dummy variables for the categorical features
# cat_var = ['season_label','yr_label','mnth_label','weekday_label','holiday','workingday','weathersit_label']
dummy = pd.get_dummies(day[category_list], drop_first = True)
dummy.sample(4)
# +
# df.dtypes
# +
# df.describe()
# +
# Dropping the original categorical features
day.drop(category_list,axis=1,inplace=True)
# +
# Adding the dummy features to the original day dataframe
day = pd.concat([day,dummy], axis=1)
# -
day.sample(4)
# +
# Checking the shape of day dataframe
day.shape
# -
# ### Splitting the data into training and testing sets
# +
# Specify random_state so that the train and test data set always have the same rows, respectively
day_train, day_test = train_test_split(day, train_size = 0.7, random_state = 100)
# -
day_train.shape
day_test.shape
# _The train and test data shape looks good._
# ### Feature Scaling
plt.figure(figsize=(15,6))
plt.subplot(1,2,1);
sns.boxplot(x = day.atemp);
plt.subplot(1,2,2);
sns.histplot(x = day.atemp, kde=True);
# **_There are no outliers in atemp feature and so we can apply Standardized Scaling, instead of MinMax Scaling._**
scaler = StandardScaler()
# - **_Feature scaling should be done only for numerical features and not for categorical or dummy variables_**
# +
# Apply scaler() to all the variables except the 'yes-no' and 'dummy' variables.
num_vars = ['atemp', 'cnt']
day_train[num_vars] = scaler.fit_transform(day_train[num_vars])
# -
# _1. Training set should be fit as well transformed._ <br>
# _2. But the testing set should never be used to fit as this dataset is not available at the time of model building (in real world scenario)._ <br>
# _3. The testing set should only be transformed with the fit of training set._
day_train.sample(5)
# ### Splitting the targets into X and y sets for the model building
y_train = day_train.pop('cnt')
X_train = day_train
# +
# y_train[10:15]
# X_train.sample(4)
# -
# ---
# ## Building a Linear Regession Model
# ---
# - **Algorithm Introduction:**
#
# - **_Linear Regression or Ordinary Least Squares Regression (OLS)_** is one of the simplest machine learning algorithms and produces both accurate and interpretable results on most types of continuous data.<br>
# - While more sophisticated algorithms like random forest will produce more accurate results, they are know as “black box” models because it’s tough for analysts to interpret the model.<br>
# - In contrast, **_OLS regression results are clearly interpretable because each predictor value (beta) is assigned a numeric value (coefficient) and a measure of significance for that variable (p-value)_**. This allows us to interpret the effect of difference predictors on the model and tune it easily.
#
# - Equation of linear regression<br>
# - $y = c + m_1x_1 + m_2x_2 + ... + m_nx_n$
#
# - $y$ is the response
# - $c$ is the intercept
# - $m_1$ is the coefficient for the first feature
# - $m_n$ is the coefficient for the nth feature<br>
#
# - The $m$ values are called the model **coefficients** or **model parameters**.
# +
# Building a Linear Model
# By default, the statsmodels library fits a line on the dataset which passes through the origin.
# But in order to have an intercept, we need to manually use the add_constant attribute of statsmodels.
# Add a constant to get an intercept
X_train_cn = sm.add_constant(X_train)
# Fit the resgression line using 'OLS'
model = sm.OLS(y_train, X_train_cn)
res = model.fit()
# Print the parameters, i.e. the intercept and the slope of the regression line fitted
res.params
# +
# Performing a summary operation lists out all the different parameters of the regression line fitted
print(res.summary())
# -
# - **_Looking at the p-values, it looks like some of the variables aren't really significant (in the presence of other variables)._**
#
# - _We could simply drop the variable with the highest, non-significant p value. **A better way would be to cross-verify this with the VIF information.**_
# - **_Colinearity is the state where two variables are highly correlated and contain similiar information about the variance within a given dataset. To detect colinearity among variables, simply create a correlation matrix and find variables with large absolute values._**
#
# - **_Multicolinearity on the other hand is more troublesome to detect because it emerges when three or more variables, which are highly correlated, are included within a model. To make matters worst multicolinearity can emerge even when isolated pairs of variables are not colinear._**
#
# - _Multicollinearity will **not affect the model's output or prediction strength.**_
# - _Multicollinearity will **only affect the coefficient values** for the predictor variables by inflating their importance._
# ### Feature Selection with scikit learn RFE
# +
# Importing RFE and LinearRegression
from sklearn.feature_selection import RFE
from sklearn.linear_model import LinearRegression
# +
# Running RFE with the output number of the variable equal to 10
# Create linear regression object
sk_model = LinearRegression()
# Train the model using the training sets
sk_model.fit(X_train, y_train)
# +
# Running RFE
# Create the RFE object
rfe = RFE(sk_model, n_features_to_select = 14)
rfe = rfe.fit(X_train, y_train)
# +
# Features with rfe.support_ values
list(zip(X_train.columns,rfe.support_,rfe.ranking_))
# +
# Creating a list of rfe supported features
feats = X_train.columns[rfe.support_]
feats
# Creating a list of non-supported rfe features
drop_feats = X_train.columns[~rfe.support_]
drop_feats
# -
# ### Dropping the features and updating the Model
X_train.shape
# +
# Creating X_train dataframe with RFE selected variables
X_train.drop(drop_feats,axis=1,inplace=True)
# -
X_train.shape
# +
# Building a Linear Model
# By default, the statsmodels library fits a line on the dataset which passes through the origin.
# But in order to have an intercept, we need to manually use the add_constant attribute of statsmodels.
# Add a constant to get an intercept
X_train_cn = sm.add_constant(X_train)
# Fit the resgression line using 'OLS'
model = sm.OLS(y_train, X_train_cn)
res = model.fit()
# Print the parameters, i.e. the intercept and the slope of the regression line fitted
res.params
# +
# Performing a summary operation lists out all the different parameters of the regression line fitted
print(res.summary())
# -
# ### Checking VIF
#
# - Variance Inflation Factor or VIF, gives a basic quantitative idea about **how much the feature variables (independent/predictor) are correlated with each other.**<br>
# - It is an extremely important parameter to test our linear model. The formula for calculating `VIF` is:
#
# - $ VIF_i = \frac{1}{1 - {R_i}^2} $
# Check for the VIF values of the feature variables.
from statsmodels.stats.outliers_influence import variance_inflation_factor
# +
# Custom function to create a dataframe that will contain the names of all the feature variables and their respective VIFs
def vif():
vif = pd.DataFrame()
vif['Features'] = X_train.columns
vif['VIF'] = [variance_inflation_factor(X_train.values, i) for i in range(X_train.shape[1])]
vif['VIF'] = round(vif['VIF'], 2)
vif = vif.sort_values(by = "VIF", ascending = False)
return vif
# +
# Calling the Custom function to create a dataframe that will contain the names of all the feature variables and their respective VIFs
vif()
# -
# **_We generally want a VIF that is less than 5. So there are clearly no variables with VIF more than 5._**
# ### Dropping the feature and updating the model
#
# As you can see from the summary, mnth_label_feburary is insignificant with p-value of `0.065`. We ll drop this variable.
X_train.shape
# +
# Dropping highly insignificant variable
X_train.drop(['mnth_label_feburary'], axis=1, inplace=True)
# -
X_train.shape
# - Looks good.
# +
# Building a Linear Model
# By default, the statsmodels library fits a line on the dataset which passes through the origin.
# But in order to have an intercept, we need to manually use the add_constant attribute of statsmodels.
# Add a constant to get an intercept
X_train_cn = sm.add_constant(X_train)
# Fit the resgression line using 'OLS'
model = sm.OLS(y_train, X_train_cn)
res = model.fit()
# Print the parameters, i.e. the intercept and the slope of the regression line fitted
res.params
# +
# Performing a summary operation lists out all the different parameters of the regression line fitted
print(res.summary())
# +
# Calling the Custom function to create a dataframe that will contain the names of all the feature variables and their respective VIFs
vif()
# -
# ### Dropping the feature and updating the model
#
# As you can see from the summary, mnth_label_may is less significant with p-value of `0.026`. We ll drop this variable.
X_train.shape
# +
# Dropping highly insignificant variable
X_train.drop(['mnth_label_may'], axis=1, inplace=True)
# -
X_train.shape
# - Looks good.
# +
# Building a Linear Model
# By default, the statsmodels library fits a line on the dataset which passes through the origin.
# But in order to have an intercept, we need to manually use the add_constant attribute of statsmodels.
# Add a constant to get an intercept
X_train_cn = sm.add_constant(X_train)
# Fit the resgression line using 'OLS'
model = sm.OLS(y_train, X_train_cn)
res = model.fit()
# Print the parameters, i.e. the intercept and the slope of the regression line fitted
res.params
# +
# Performing a summary operation lists out all the different parameters of the regression line fitted
print(res.summary())
# +
# Calling the Custom function to create a dataframe that will contain the names of all the feature variables and their respective VIFs
vif()
# -
# ### Dropping the feature and updating the model
#
# As you can see from the summary, mnth_label_january is less significant with p-value of `0.012`. We ll drop this variable.
X_train.shape
# +
# Dropping highly insignificant variable
X_train.drop(['mnth_label_january'], axis=1, inplace=True)
# -
X_train.shape
# - Looks good.
# +
# Building a Linear Model
# By default, the statsmodels library fits a line on the dataset which passes through the origin.
# But in order to have an intercept, we need to manually use the add_constant attribute of statsmodels.
# Add a constant to get an intercept
X_train_cn = sm.add_constant(X_train)
# Fit the resgression line using 'OLS'
model = sm.OLS(y_train, X_train_cn)
res = model.fit()
# Print the parameters, i.e. the intercept and the slope of the regression line fitted
res.params
# +
# Performing a summary operation lists out all the different parameters of the regression line fitted
print(res.summary())
# +
# Calling the Custom function to create a dataframe that will contain the names of all the feature variables and their respective VIFs
vif()
# -
# ### Dropping the feature and updating the model
#
# As you can see from the summary, mnth_label_december is less significant with p-value of `0.027`. We ll drop this variable.
X_train.shape
# +
# Dropping highly insignificant variable
X_train.drop(['mnth_label_december'], axis=1, inplace=True)
# -
X_train.shape
# - Looks good.
# +
# Building a Linear Model
# By default, the statsmodels library fits a line on the dataset which passes through the origin.
# But in order to have an intercept, we need to manually use the add_constant attribute of statsmodels.
# Add a constant to get an intercept
X_train_cn = sm.add_constant(X_train)
# Fit the resgression line using 'OLS'
model = sm.OLS(y_train, X_train_cn)
res = model.fit()
# Print the parameters, i.e. the intercept and the slope of the regression line fitted
res.params
# +
# Performing a summary operation lists out all the different parameters of the regression line fitted
print(res.summary())
# +
# Calling the Custom function to create a dataframe that will contain the names of all the feature variables and their respective VIFs
vif()
# -
# ### Dropping the feature and updating the model
#
# As you can see from the summary, mnth_label_november is less significant with p-value of `0.052`. We ll drop this variable.
X_train.shape
# +
# Dropping highly insignificant variable
X_train.drop(['mnth_label_november'], axis=1, inplace=True)
# -
X_train.shape
# - Looks good.
# +
# Building a Linear Model
# By default, the statsmodels library fits a line on the dataset which passes through the origin.
# But in order to have an intercept, we need to manually use the add_constant attribute of statsmodels.
# Add a constant to get an intercept
X_train_cn = sm.add_constant(X_train)
# Fit the resgression line using 'OLS'
model = sm.OLS(y_train, X_train_cn)
res = model.fit()
# Print the parameters, i.e. the intercept and the slope of the regression line fitted
res.params
# +
# Performing a summary operation lists out all the different parameters of the regression line fitted
print(res.summary())
# +
# Calling the Custom function to create a dataframe that will contain the names of all the feature variables and their respective VIFs
vif()
# -
# ### Visualising the data with a scatter plot and the fitted regression line
# ---
# ## Residual Analysis
# ---
# - **_Residual Analysis needs to be done to validate assumptions of the model, and hence the reliability for inference._**
#
# ### Distribution of the Error terms
# - We need to check if the error terms are also normally distributed (which is one of the major assumptions of linear regression).
# - Plotting a histogram of the error terms and see what it looks like.
y_train_pred = res.predict(X_train_cn)
# y_train_pred.head()
# +
# Calculating the residuals
residuals = (y_train - y_train_pred)
# +
# Plot the histogram of the error terms/residuals
# plt.figure(figsize=(11,7))
# sns.distplot(residuals, hist=True)
# plt.title('Residuals Analysis', fontsize = 24) # Plot heading
# plt.xlabel('Errors / Residuals', fontsize = 12); # X-label
# +
# Plot the histogram of the error terms/residuals
plt.figure(figsize=(10,6))
sns.histplot(residuals, stat="density", kde=True, color='#d62728')
plt.title('Residuals Analysis', fontsize = 24) # Plot heading
plt.xlabel('Errors / Residuals', fontsize = 12); # X-label
# -
# **_We can conclude that the Error terms/Residuals follow a Normal-Distribution curve._**
# - **_Normal distribution of the residuals can be validated by plotting a q-q plot._**
#
# - **_Using the q-q plot we can infer if the data comes from a normal distribution._**
# - **_If yes, the plot would show fairly straight line. Absence of normality in the errors can be seen with deviation in the straight line._**
residuals_fit = res.resid
fig = sm.qqplot(residuals_fit, fit=True, line='45')
plt.show()
# **_The q-q plot of the bike sharing data set shows that the errors(residuals) are fairly normally distributed._**
# ### Homoscedasticity Assumption
# - _**Homoscedasticity** describes a situation in which the **error term** (that is, the “noise” or random disturbance in the relationship between the features and the target) **is the same across all values of the independent variables.**_
# - A scatter plot of residual values vs predicted values is a goodway to check for homoscedasticity.There should be **no clear pattern in the distribution** and **if there is a specific pattern,the data is heteroscedastic.**
#
# 
# +
# Predicting the y_train
y_train_pred = res.predict(X_train_cn)
# Calculating the residuals
residuals = (y_train - y_train_pred)
# Visualizing the residuals and predicted value on train set
# plt.figure(figsize=(25,12))
sns.jointplot(x = y_train_pred, y = residuals, kind='reg', color='#d62728')
plt.title('Residuals of Linear Regression Model', fontsize = 20, pad = 100) # Plot heading
plt.xlabel('Predicted Value', fontsize = 12) # X-label
plt.ylabel('Residuals', fontsize = 12); # Y-label
# -
# **_Homoscedasticity Assumption holds true, as there is no clear pattern in the distribution_**
# ### Little or No autocorrelation in the residuals
#
# - Autocorrelation occurs when the residual errors are dependent on each other.The presence of correlation in error terms drastically reduces model’s accuracy.
#
# - **Autocorrelation** can be tested with the help of **Durbin-Watson test**.The null hypothesis of the test is that there is no serial correlation.
#
# - The test statistic is approximately equal to **2*(1-r)** where **r is the sample autocorrelation of the residuals**. Thus, **for r == 0, indicating no serial correlation, the test statistic equals 2**. This statistic will always be between 0 and 4. The closer to 0 the statistic, the more evidence for positive serial correlation. The closer to 4, the more evidence for negative serial correlation.
#
# - In our summary results, **Durbin-Watson is 2.065**, which tells us that the **residuals are not correlated**.
# ---
# ## Making Predictions using the Final Model
# ---
# ### Applying the scaling on the test sets
# +
# Apply scaler() to all the variables except the 'yes-no' and 'dummy' variables.
# scaler = StandardScaler() - scaler object is already instantiated while scaling train set
num_vars = ['atemp', 'cnt']
day_test[num_vars] = scaler.transform(day_test[num_vars])
# +
# day_test.head()
# -
# ### Splitting into X_test and y_test
y_test = day_test.pop('cnt')
X_test = day_test
# ### Predict the target
# +
# Now let's use our model to make predictions.
# Creating X_test dataframe by dropping variables from X_test
X_test = X_test[X_train.columns]
# Adding a constant variable
X_test_cn = sm.add_constant(X_test)
# Making predictions
y_pred = res.predict(X_test_cn)
# -
# ---
# ## Model Evaluation
# ---
# Plotting y_test and y_pred to understand the spread.
plt.figure(figsize=(12,7))
sns.scatterplot(x = y_test, y = y_pred, color='#d62728')
plt.title('y_test vs y_pred', fontsize=25, pad = 25) # Plot heading
plt.xlabel('y_test', fontsize=16) # X-label
plt.ylabel('y_pred', fontsize=16); # Y-label
# ### The scatterplot is almost a straight line which also depicts that predicted y resemble the y test.
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
# ### Looking at the RMSE
#Returns the mean squared error; we'll take a square root
np.sqrt(mean_squared_error(y_test, y_pred))
# **_The closer the value of RMSE is to zero , the better is the Regression Model._**<br>
# **_In reality, we will not have RMSE equal to zero, in that case we will be checking how close the RMSE is to zero._**
# ### Checking the R-squared on the test set
# +
# R2 scroe on test data
r_squared = r2_score(y_test, y_pred)
r_squared
# +
# R2 scroe on train data
r_squared = r2_score(y_train, y_train_pred)
r_squared
# -
# ### **_R2 score of train and test data is very close. Hence we can say that the model has performed well on the test data._**
# # Summary
#
# - _Model Selected with Mix Approach_ - **RFE Technique and Manual selection guided by VIF**
# - _R-Squared_ : **0.822**
# - _Adjusted R-Squared_ : **0.818**
# - _R2_Score_ : **0.81**
# - _Root Mean Squared Error_ : **0.42**
| Bike_Sharing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # Healthcare Readmissions Analysis
# + [markdown] deletable=true editable=true
# Preventing hospital readmissions is one of the best ways to reduce healthcare costs quickly and improve patient care in the process. This notebook demonstrates a method to predict whether a patient will be readmitted to a hospital within the 30 days after getting discharged. We use the data from 130 US hospitals for years 1999-2008 which is stored in the UCI Machine Learning Repository.
# + [markdown] deletable=true editable=true
# # Data Munging
# + [markdown] deletable=true editable=true
# This first section is to transform and prepare the raw data for analysis.
# + deletable=true editable=true
import pandas as pd
import numpy as np
readmissions_df = pd.read_csv('data/healthcare_data.csv')
readmissions_df.head()
# + [markdown] deletable=true editable=true
# We map the different ids in the data above to human readable values and remove all the missing values.
# + deletable=true editable=true
#There are 50 features, out of which many are unnecessary for our purpose.
#Delete features that either have large number of missing/unknown values or will not make a difference to predicting credit risk.
readmissions_df.drop(readmissions_df.columns[[0,1,2,5,10,11,15,16,17,18,19,20,22,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,42,43,44,45,46]], axis=1,inplace= True)
replacing_values = {"admission_type_id": {1:"Emergency", 2:"Urgent",3:"Elective",4: "Newborn", 5: np.NaN, 6: np.NaN, 7:"Trauma Center", 8:np.NaN },
"discharge_disposition_id": {1:"Discharged to Home",2:"Transferred to another hospital", 3:"Transferred to SNF",
4:"Transferred to ICF", 5:"Transferred to another hospital", 6:"Discharged to home with home health service",
7:"Discharged against Medical advice", 8:"Discharged to home with home health service",9:"Discharged to Home",
10:"Transferred to another hospital", 11:"Expired", 12:"Discharged to Home", 13:"Hospice",14:"Hospice",15:"Transferred within this institution",
16:"Transferred to another hospital", 17:"Transferred to another hospital", 18:np.NaN, 19:"Hospice", 20:"Hospice",21:"Hospice",
22:"Transferred to another hospital", 23:"Transferred to another hospital", 24:"Transferred to SNF",25:"Transferred to another hospital",
26:"Transferred to another hospital", 27:"Transferred to a federal health care facility", 28:"Transferred to another hospital", 29:"Transferred to another hospital"},
"admission_source_id": {1:"Physician Referral", 2:"Clinic Referral", 3:"HMO Referral", 4:"Transfer from a hospital", 5: "Transfer from a SNF", 6:"Transfer from another health care facility",
7:"Emergency Room",8:"Law Enforcement", 9:np.NaN, 10:"Transfer from a hospital", 11:"Normal Delivery", 12:"Premature Delivery", 13:"Sick Baby",
14:"Extramural Birth", 15:np.NaN, 17:np.NaN, 18:"Transfer from Home Health Agency", 19:"Transfer from Home Health Agency", 20: np.NaN, 21:np.NaN,
22:"Transfer from a hospital",23:"Normal Delivery", 24:"Sick Baby", 25:"Transfer from Ambulatory Surgery Center", 26:"Transfer from Hospice"},
"age":{"[0-10)":5,"[10-20)":15,"[20-30)":25,"[30-40)":35,"[40-50)":45,"[50-60)":55,"[60-70)":65,"[70-80)":75,"[80-90)":85,"[90-100)":95},
"A1Cresult":{"Norm":"Normal", ">8":">7"}
}
readmissions_df.replace(replacing_values,inplace = True)
readmissions_df['readmitted'] = readmissions_df['readmitted'].map({"NO":0,">30":0,"<30":1})
readmissions_df.head()
# + [markdown] deletable=true editable=true
# Now, we take a look at the number of rows with missing values and eliminate those rows from our dataframe.
# + deletable=true editable=true
print(readmissions_df.isnull().sum())
# + deletable=true editable=true
print(readmissions_df.shape)
readmissions_df.dropna(inplace=True)
print(readmissions_df.shape)
# + deletable=true editable=true
#We make a copy of the dataframe for exploratory analysis before encoding
readmissions_orig_df = readmissions_df.copy(deep=True)
# + deletable=true editable=true
#One-hot encoding on all the categorical values
readmissions_df = pd.get_dummies(readmissions_df, columns=["admission_type_id","discharge_disposition_id","admission_source_id","gender", "insulin", "change","diabetesMed","A1Cresult"])
readmissions_df.head()
# + [markdown] deletable=true editable=true
# # Exploratory Analysis
# + [markdown] deletable=true editable=true
# In this section, we analyze the main characteristics of the data set and explore different factors that may affect the readmission of a patient. This will help us learn more about the data before we begin predictive analysis.
# + deletable=true editable=true
import matplotlib.pyplot as plt
# %matplotlib inline
# + deletable=true editable=true
readmissions_orig_df.groupby('readmitted').size()
# + [markdown] deletable=true editable=true
# As we can see, this data set is quite imbalanced as almost 90% of the patients in this data set have not been readmitted within 30 days. This is helpful information to know before we measure the performace of our machine learning algorithms later on.
# Now, let us see some of the most common feature values for a patient who is readmitted.
# + deletable=true editable=true
#Mean feateres of a readmitted Patient
mean = readmissions_orig_df.groupby('readmitted').mean()
mean.iloc[1]
# + deletable=true editable=true
#Mode features of a readmitted Patient
modeValue = readmissions_orig_df.groupby('readmitted').agg(lambda x: x.value_counts().index[0])
modeValue.ix[1]
# + [markdown] deletable=true editable=true
# As we can see from the above results, most of the readmitted patients are over 65 years old who were admitted as an Emergency and went through a large number of lab procedures and medications.
# + deletable=true editable=true
diabetes = readmissions_orig_df.groupby(['diabetesMed','readmitted']).size().unstack()
diabetes[1].plot(kind='bar', figsize=(8,4), fontsize=15)
plt.title("Relation between Readmitted patients and Diabetes", fontsize=15)
plt.ylabel("Number of Readmitted Patients", fontsize=15)
plt.xlabel("Diabetic Patients", fontsize=15)
# + [markdown] deletable=true editable=true
# From the above graph, we can see that majority of the patients who were readmitted were also diabetic. Now let's see how the types of admission and discharge can affect a patient's readmission.
# + deletable=true editable=true
readmitted = readmissions_orig_df.groupby(['admission_source_id', 'readmitted']).size()
admission_type = readmitted.groupby(level=0).apply(lambda x : x / x.sum() * 100).unstack()
admission_type[1].dropna(inplace = True)
admission_type[1].drop_duplicates().plot(kind='barh', figsize=(10,5), fontsize=15)
plt.title("Admission types of patients readmitted", fontsize=15)
plt.ylabel("Type of admission", fontsize=15)
plt.xlabel("% of People Readmitted", fontsize=15)
# + deletable=true editable=true
readmitted = readmissions_orig_df.groupby(['discharge_disposition_id', 'readmitted']).size()
readmitted_percentages = readmitted.groupby(level=0).apply(lambda x : x / x.sum() * 100).unstack()
readmitted_percentages[1].plot(kind='pie', figsize=(10,5), fontsize=15)
plt.xlabel("% of People Readmitted", fontsize=15)
# + [markdown] deletable=true editable=true
# # Predictive Analysis
# + [markdown] deletable=true editable=true
# Here, we classify the patients as readmitted within 30 days or not. We perform this task using various machine learning algorithms and also get a better understanding of important features that help us predict the readmission of a patient.
# + deletable=true editable=true
from sklearn.model_selection import cross_val_score
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.ensemble import ExtraTreesClassifier
# + deletable=true editable=true
readmissions_df.head()
# + deletable=true editable=true
# Create train and test dataframes.
X = readmissions_df.drop('readmitted', axis=1)
y = readmissions_df['readmitted']
# + deletable=true editable=true
#This method is to create n_samples of weights to handle the unbalanced data set.
def getSampleWeights(y,weight1,weight2):
for n,i in enumerate(y):
if i==0:
y[n]=weight1
if i==1:
y[n]=weight2
# + deletable=true editable=true
# prepare models
models = []
models.append(('Logistic Regression', LogisticRegression()))
models.append(('Random Forest Classifier', RFC(n_estimators=200)))
models.append(('Extra Trees Classifier', ExtraTreesClassifier(n_estimators=200, max_depth=None,min_samples_split=2, random_state=0)))
X = readmissions_df.drop('readmitted', axis=1)
y = readmissions_df['readmitted']
newY = list(y.values.flatten())
getSampleWeights(newY,1,9)
# + [markdown] deletable=true editable=true
# These last few steps take a while to compute due to the large size of the data set.
# + deletable=true editable=true
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = model_selection.KFold(n_splits=10)
cv_results = model_selection.cross_val_score(model, X, y, cv=kfold, scoring='accuracy', fit_params={'sample_weight':newY})
results.append(cv_results)
names.append(name)
msg = "%s: %0.2f (%0.2f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# + [markdown] deletable=true editable=true
# As we see from the above results, Random Forest Classifier yields the most accuracy. Now, choosing this model, we can select only the top few features and see how it affects our accuracy.
# + deletable=true editable=true
#Perform Feature Selection
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectKBest
skb = SelectKBest(k=20)
skb.fit(X,y)
print(X.shape)
newX = skb.transform(X)
print(newX.shape)
model = RFC(n_estimators=200)
scores = cross_val_score(model, newX,y, cv=10, scoring='accuracy',fit_params={'sample_weight':newY})
print("Accuracy: %0.2f " % (scores.mean(), scores.std() * 2))
# + [markdown] deletable=true editable=true
# The above result shows us that inspite of removing more than half of the features from our data set, we still get decent accuracy and also speeds up computations.Now, using our Random Forest model, we rank the different features based on their importance in our model.
# + deletable=true editable=true
train, test = train_test_split(readmissions_df, test_size = 0.3)
model.fit(train.loc[:, train.columns != 'readmitted'], train['readmitted'])
cols = [col for col in train.columns if col not in ['readmitted']]
importances = pd.DataFrame({'features':cols,'importance_value':model.feature_importances_})
importances = importances.sort_values('importance_value',ascending=False).set_index('features')
importances = importances[(importances.importance_value > 0.007)]
importances.dropna().plot(figsize=(10,8), kind="barh", legend=False, title="Ranking Order of Importance of Features")
# + [markdown] deletable=true editable=true
# As we see from the above results, The number of lab procedures, medications and the time spent in the hospital by a patient are key features in determining whether he/she would be readmitted within 30 days.
| python/Healthcare-Readmissions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Redoing the period fits with everything I've learned from GD394 etc.
# +
#first get the python modules we need
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as fits
import os
import glob
from astropy.convolution import convolve, Box1DKernel
from astropy.table import Table
import astropy.units as u
from astropy.modeling import models, fitting
from astropy.timeseries import LombScargle
#matplotlib set up
# %matplotlib inline
from matplotlib import rcParams
rcParams["figure.figsize"] = (14, 5)
rcParams["font.size"] = 20
# -
wddvs = Table.read('dv_lists/wd_dvs.ecsv')
mddvs = Table.read('dv_lists/bd_dvs.ecsv')
wdt, wdv, wde = np.array(wddvs['MJD'])*24, np.array(wddvs['VELOCITY']), np.array(wddvs['ERROR'])
mdt, mdv, mde = np.array(mddvs['MJD'])*24, np.array(mddvs['VELOCITY']), np.array(mddvs['ERROR'])
# Fit the companion first
# Nope. Fold on m dwarf period and fit.
# +
fitter = fitting.LevMarLSQFitter()
sin_mod2 = models.Sine1D(amplitude=197.37, frequency=0.236679828, phase=0.1)+models.Const1D(26.92)
#sin_mod2 = models.Sine1D(amplitude=200, frequency=1/4.23)+models.Const1D(30)
sin_mod2.frequency_0.bounds = [0.95*(1/4.23), 1.05*(1/4.23)]
#sin_mod2.amplitude_0.bounds = [0.95*abs(sin_fit[0].amplitude), 1.05*abs(sin_fit[0].amplitude)]
#sin_mod2.amplitude_1.bounds = [0.95*abs(sin_fit[1].amplitude), 1.05*abs(sin_fit[1].amplitude)]
sin_fit2 = fitter(sin_mod2, mdt, mdv, weights= 1/mde)
fit_e2 = np.sqrt(np.diag(fitter.fit_info['param_cov']))
timefit2 = np.linspace(mdt[0]-10,mdt[-1]+10, 100000)
plt.subplot(131)
plt.errorbar(mdt, mdv, yerr=mde, marker='o', ls='none')
plt.plot(timefit2, sin_fit2(timefit2), ls='--', lw=2, zorder=-5, c='C1')
plt.plot(timefit2, sin_mod2(timefit2), ls='--', lw=2, zorder=-5, c='C2')
plt.xlim(mdt[0]-1, mdt[0]+1)
plt.subplot(132)
plt.errorbar(mdt, mdv, yerr=mde, marker='o', ls='none')
plt.plot(timefit2, sin_fit2(timefit2), ls='--', lw=2, zorder=-5, c='C1')
plt.plot(timefit2, sin_mod2(timefit2), ls='--', lw=2, zorder=-5, c='C2')
plt.xlim(mdt[2]-1, mdt[2]+1)
plt.subplot(133)
plt.errorbar(mdt, mdv, yerr=mde, marker='o', ls='none')
plt.plot(timefit2, sin_fit2(timefit2), ls='--', lw=2, zorder=5, c='C1')
plt.plot(timefit2, sin_mod2(timefit2), ls='--', lw=2, zorder=-5, c='C2')
#plt.plot(mdt, sin_fit2(mdt))
plt.xlim(mdt[-1]-5, mdt[-1]+5)
print(sin_fit2)
print(1/sin_fit2[0].frequency, fit_e2[1]/(sin_fit2[0].frequency**2) )
# -
sin_fit2[0].phase
1/period
1/4.23
fitter.fit_info
| clean_period_fit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Luis-Martinez-Bautista/daa_2021_1/blob/master/30_septiembre.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="s4WMJRMSP6qb" outputId="0c517617-50f6-4318-f92b-41c37698b620" colab={"base_uri": "https://localhost:8080/", "height": 348}
import sys
horaDec = 0
horaUni = 0
minutoDec = 0
minutoUni = 0
contador = 0
print("La lista de palindromos es: ")
while horaDec != 2 or horaUni != 4:
if minutoUni == 10:
minutoUni = 0
minutoDec += 1
if minutoDec == 6:
minutoDec = 0
horaUni += 1
if horaUni == 10:
horaUni = 0
horaDec += 1
if horaDec == minutoUni and horaUni == minutoDec:
contador += 1
print(horaDec,horaUni, ":", minutoDec, minutoUni)
minutoUni += 1
print("El numero de palindromos es: ", contador)
| 30_septiembre.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pore-Scale Models
#
# Pore scale models are one of the more important facets of OpenPNM, but they can be a bit confusing at first, since they work 'behind-the-scenes'.
# They offer 3 main advantages:
#
# 1. A large library of pre-written models is included and they can be mixed together and their parameters edited to get a desired overall result.
# 2. They allow automatic regeneration of all dependent properties when something 'upstream' is changed.
# 3. The pore-scale model machinery was designed to allow easy use of custom written code for cases where a prewritten model is not available.
# The best way to explain their importance is via illustration.
#
# Consider a diffusion simulation, where the diffusive conductance is defined as:
#
# $$ g_D = D_{AB}\frac{A}{L} $$
#
# The diffusion coefficient can be predicted by the Fuller correlation:
#
# $$ D_{AB} = \frac{10^{-3}T^{1.75}(M_1^{-1} + M_2^{-1})^{0.5}}{P[(\Sigma_i V_{i,1})^{0.33} + (\Sigma_i V_{i,2})^{0.33}]^2} $$
#
# Now say you want to re-run the diffusion simulation at different temperature. This would require recalculating $D_{AB}$, followed by updating the diffusive conductance.
#
# Using pore-scale models in OpenPNM allows for simple and reliable updating of these properties, for instance within a for-loop where temperature is being varied.
# ## Using Existing Pore-Scale Models
#
# The first advantage listed above is that OpenPNM includes a library of pre-written model. In this example below we can will apply the Fuller model, without having to worry about mis-typing the equation.
import numpy as np
np.random.seed(0)
import openpnm as op
pn = op.network.Cubic(shape=[5, 5, 1], spacing=1e-4)
geo = op.geometry.StickAndBall(network=pn, pores=pn.Ps, throats=pn.Ts)
# Now we need to define the gas phase diffusivity. We can fetch the ``fuller`` model from the ``models`` library to do this, and attach it to an empty phase object:
air = op.phases.GenericPhase(network=pn)
f = op.models.phases.diffusivity.fuller
air.add_model(propname='pore.diffusivity',
model=f,
MA=0.032, MB=0.028, vA=16.6, vB=17.9)
# Note that we had to supply the molecular weights (``MA`` and ``MB``) as well as the diffusion volumes (``vA`` and ``vB``). This model also requires knowing the temperature and pressure, but by default it will look in 'pore.temperature' and 'pore.pressure'.
#
# Next we need to define a physics object with the diffusive conductance, which is also available in the model libary:
phys = op.physics.GenericPhysics(network=pn, phase=air, geometry=geo)
f = op.models.physics.diffusive_conductance.ordinary_diffusion
phys.add_model(propname='throat.diffusive_conductance',
model=f)
# Lastly we can run the Fickian diffusion simulation to get the diffusion rate across the domain:
fd = op.algorithms.FickianDiffusion(network=pn, phase=air)
fd.set_value_BC(pores=pn.pores('left'), values=1)
fd.set_value_BC(pores=pn.pores('right'), values=0)
fd.run()
print(fd.rate(pores=pn.pores('left')))
# ### Updating parameter on an existing model
#
# It's also easy to change parameters of a model since they are all stored on the object (``air`` in this case), meaning you don't have to reassign a new model get new parameters (although that would work). The models and their parameters are stored under the ``models`` attribute of each object. This is a dictionary with each model stored under the key match the ``propname`` to which is was assigned. For instance, to adjust the diffusion volumes of the Fuller model:
print('Diffusivity before changing parameter:', air['pore.diffusivity'][0])
air.models['pore.diffusivity']['vA'] = 15.9
air.regenerate_models()
print('Diffusivity after:', air['pore.diffusivity'][0])
# ### Replacing an existing model with another
#
#
# Let's say for some reason that the Fuller model is not suitable. It's easy to go 'shopping' in the models library to retrieve a new model and replace the existing one. In the cell below we grab the Chapman-Enskog model and simply assign it to the same ``propname`` that the Fuller model was previously.
f = op.models.phases.diffusivity.chapman_enskog
air.add_model(propname='pore.diffusivity',
model=f, MA=0.0032, MB=0.0028, sigma_AB=3.467, omega_AB=4.1e-6)
print('Diffusivity after:', air['pore.diffusivity'][0])
# Note that we don't need to explicitly call ``regenerate_models`` since this occurs automatically when a model is added. We do however, have to regenerate ``phys`` object so it calculates the diffusive conductance with the new diffusivity:
phys.regenerate_models()
fd.reset()
fd.run()
print(fd.rate(pores=pn.pores('left')))
# ## Changing dependent properties
#
# Now consider that you want to find the diffusion rate at higher temperature. This requires recalculating the diffusion coefficient on ``air``, then updating the diffusive conductivity on ``phys``, and finally re-running the simulation. Using pore-scale models this can be done as follows:
print('Diffusivity before changing temperaure:', air['pore.diffusivity'][0])
air['pore.temperature'] = 353.0
air.regenerate_models()
print('Diffusivity after:', air['pore.diffusivity'][0])
# We can see that the diffusivity increased with temperature as expected with the Chapman-Enskog model. We can also propagate this change to the diffusive conductance:
phys.regenerate_models()
# And lastly we can recalculate the diffusion rate:
fd.reset()
fd.run()
print(fd.rate(pores=pn.pores('left')))
# ## Creating Custom Models
#
# Lastly, let's illustrate the ease with which a custom pore-scale model can be defined and used. Let's create a very basic (and incorrect) model:
def new_diffusivity(target, A, B,
temperature='pore.temperature',
pressure='pore.pressure'):
T = target[temperature]
P = target[pressure]
DAB = A*T**3/(P*B)
return DAB
# There are a few key points to note in the above code.
#
# 1. Every model must accept a ``target`` argument since the ``regenerate_models`` mechanism assumes it is present. The ``target`` is the object to which the model will be attached. It allows for the looking up of necessary properties that should already be defined, like temperature and pressure. Even if you don't use ``target`` within the function it is still required by the pore-scale model mechanism. If it's presence annoys you, you can put a ``**kwargs`` at the end of the argument list to accept all arguments that you don't explicitly need.
# 2. The input parameters should not be arrays (like an Np-long list of temperature values). Instead you should pass the dictionary key of the values on the ``target``. This allows the model to lookup the latest values for each property when ``regenerate_models`` is called. This also enables openpnm to store the model parameters as short strings rather than large arrays.
# 3. The function should return either a scalar value or an array of Np or Nt length. In the above case it returns a DAB value for each pore, depending on its local temperature and pressure in the pore. However, if the ``temperature`` were set to ``'throat.temperature'`` and ``pressure`` to ``'throat.pressure'``, then the above function would return a DAB value for each throat and it could be used to calculate ``'throat.diffusivity'``.
# 4. This function can be placed at the top of the script in which it is used, or it can be placed in a separate file and imported into the script with ``from my_models import new_diffusivity``.
# Let's add this model to our ``air`` phase and inspect the new values:
air.add_model(propname='pore.diffusivity',
model=new_diffusivity,
A=1e-6, B=21)
print(air['pore.diffusivity'])
| examples/notebooks/tutorials/pore_scale_models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Insert code here.
import pandas as pd
import numpy as np
import random
import re
import time
import datetime
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize
from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm, neighbors
from sklearn.preprocessing import LabelEncoder
from transformers import AutoTokenizer, AutoModelForSequenceClassification, AdamW, BertConfig
import torch
from torch.utils.data import TensorDataset, random_split, DataLoader, RandomSampler, SequentialSampler
from transformers import BertForSequenceClassification
from transformers import get_linear_schedule_with_warmup
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import classification_report
# from sentence_transformers import SentenceTransformer
# sent_encoder = SentenceTransformer('bert-base-nli-mean-tokens')
# +
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device("cuda:1")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
# If not...
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
torch.cuda.empty_cache()
# -
models = ['bert-base-multilingual-cased', 'xlm-roberta-base', 'sagorsarker/bangla-bert-base', 'ai4bharat/indic-bert']
model_num = 1
tokenizer = AutoTokenizer.from_pretrained(models[model_num])
train = pd.read_csv('data/train/sub-task-1d-train-hi.tsv', sep='\t')
test = pd.read_csv('data/dev/sub-task-1d-dev-hi.tsv', sep='\t')
train.sample(15)
labels = list(train.label.unique())
def label_encode(val):
return labels.index(val)
train.label = train.label.apply(label_encode)
# +
train = train.reset_index(drop=True)
REPLACE_BY_SPACE_RE = re.compile('[/(){}\[\]\|@,;]')
STOPWORDS = []
def clean_text(text):
"""
text: a string
return: modified initial string
"""
text = str(text)
text = REPLACE_BY_SPACE_RE.sub(' ', text) # replace REPLACE_BY_SPACE_RE symbols by space in text. substitute the matched string in REPLACE_BY_SPACE_RE with space.
# text = BAD_SYMBOLS_RE.sub('', text) # remove symbols which are in BAD_SYMBOLS_RE from text. substitute the matched string in BAD_SYMBOLS_RE with nothing.
# text = re.sub(r'\W+', '', text)
text = ' '.join(word for word in text.split() if word not in STOPWORDS) # remove stopwors from text
return text
train.text = train.text.apply(clean_text)
train.text = train.text.str.replace('\d+', '')
# -
test.label = test.label.apply(label_encode)
test = test.reset_index(drop=True)
test.text = test.text.apply(clean_text)
test.text = test.text.str.replace('\d+', '')
# split the dataset into training and validation datasets
from sklearn.model_selection import train_test_split
# train_x, valid_x, train_y, valid_y = model_selection.train_test_split(train['tweet'], train['label'])
train_x, valid_x, train_y, valid_y = train_test_split(train.text, train.label, test_size=0.2)
def count_words(text):
try:
return len(text.split())
except:
print(text)
return None
total = 0
maxw = 0
large_count = 0
for i in train_x:
temp = count_words(i)
total += temp
maxw = temp if temp > maxw else maxw
large_count += 1 if temp > 128 else 0
total/len(train_x), maxw, large_count
MAX_LENGTH = 128
BATCH_SIZE = 16
EPOCHS = 10
posts = train_x.values
categories = train_y.values
best_model_path = '/scratch/best_model_hi.pt'
posts_test = valid_x.values
categories_test = valid_y.values
def train_tokenizer(posts, categories):
input_ids = []
attention_masks = []
# For every sentence...
for sent in posts:
# `encode_plus` will:
# (1) Tokenize the sentence.
# (2) Prepend the `[CLS]` token to the start.
# (3) Append the `[SEP]` token to the end.
# (4) Map tokens to their IDs.
# (5) Pad or truncate the sentence to `max_length`
# (6) Create attention masks for [PAD] tokens.
encoded_dict = tokenizer.encode_plus(
sent, # Sentence to encode.
truncation=True,
add_special_tokens = True, # Add '[CLS]' and '[SEP]'
max_length = MAX_LENGTH, # Pad & truncate all sentences.
pad_to_max_length = True,
return_attention_mask = True, # Construct attn. masks.
return_tensors = 'pt', # Return pytorch tensors.
)
# Add the encoded sentence to the list.
input_ids.append(encoded_dict['input_ids'])
# And its attention mask (simply differentiates padding from non-padding).
attention_masks.append(encoded_dict['attention_mask'])
# Convert the lists into tensors.
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
labels = torch.tensor(categories)
return input_ids, attention_masks, labels
def build_dataset(input_ids, attention_masks, labels):
dataset = TensorDataset(input_ids, attention_masks, labels)
train_size = int(0.875 * len(dataset))
val_size = len(dataset) - train_size
# Divide the dataset by randomly selecting samples.
train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
print('{:>5,} training samples'.format(train_size))
print('{:>5,} validation samples'.format(val_size))
batch_size = BATCH_SIZE
train_dataloader = DataLoader(
train_dataset, # The training samples.
sampler = RandomSampler(train_dataset), # Select batches randomly
batch_size = batch_size # Trains with this batch size.
)
validation_dataloader = DataLoader(
val_dataset, # The validation samples.
sampler = SequentialSampler(val_dataset), # Pull out batches sequentially.
batch_size = batch_size # Evaluate with this batch size.
)
return train_dataloader, validation_dataloader
# +
# Load BertForSequenceClassification, the pretrained BERT model with a single
# linear classification layer on top.
# model = AutoModelForSequenceClassification.from_pretrained(
# models[model_num], # Use the 12-layer BERT model, with an uncased vocab.
# num_labels = 2, # The number of output labels--2 for binary classification.
# # You can increase this for multi-class tasks.
# output_attentions = False, # Whether the model returns attentions weights.
# output_hidden_states = False, # Whether the model returns all hidden-states.
# )
def build_model(name):
model = AutoModelForSequenceClassification.from_pretrained(
models[model_num], # Use the 12-layer BERT model, with an uncased vocab.
num_labels = len(labels), # The number of output labels--2 for binary classification.
# You can increase this for multi-class tasks.
output_attentions = False, # Whether the model returns attentions weights.
output_hidden_states = False, # Whether the model returns all hidden-states.
return_dict=True
)
# Tell pytorch to run this model on the GPU.
model.to(device)
params = list(model.named_parameters())
print('The BERT model has {:} different named parameters.\n'.format(len(params)))
print('==== Embedding Layer ====\n')
for p in params[0:5]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
print('\n==== First Transformer ====\n')
for p in params[5:21]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
print('\n==== Output Layer ====\n')
for p in params[-4:]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
return model
# -
def build_optimizer(model, train_dataloader):
optimizer = AdamW(model.parameters(),
lr = 5e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5
eps = 1e-8 # args.adam_epsilon - default is 1e-8.
)
total_steps = len(train_dataloader) * EPOCHS
# Create the learning rate scheduler.
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps = 0, # Default value in run_glue.py
num_training_steps = total_steps)
return optimizer, scheduler
# Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
# +
def format_time(elapsed):
'''
Takes a time in seconds and returns a string hh:mm:ss
'''
# Round to the nearest second.
elapsed_rounded = int(round((elapsed)))
# Format as hh:mm:ss
return str(datetime.timedelta(seconds=elapsed_rounded))
# -
def train(model, optimizer, scheduler, train_dataloader, validation_dataloader):
seed_val = 42
torch.cuda.empty_cache()
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
# We'll store a number of quantities such as training and validation loss,
# validation accuracy, and timings.
training_stats = []
# Measure the total training time for the whole run.
total_t0 = time.time()
# For each epoch...
prev_loss = 0
prev_acc = 0
first_epoch = 1
for epoch_i in range(0, EPOCHS):
# ========================================
# Training
# ========================================
# Perform one full pass over the training set.
print("")
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, EPOCHS))
print('Training...')
# Measure how long the training epoch takes.
t0 = time.time()
# Reset the total loss for this epoch.
total_train_loss = 0
# Put the model into training mode. Don't be mislead--the call to
# `train` just changes the *mode*, it doesn't *perform* the training.
# `dropout` and `batchnorm` layers behave differently during training
# vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch)
model.train()
# For each batch of training data...
for step, batch in enumerate(train_dataloader):
# Progress update every 40 batches.
if step % 40 == 0 and not step == 0:
# Calculate elapsed time in minutes.
elapsed = format_time(time.time() - t0)
# Report progress.
print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))
# Unpack this training batch from our dataloader.
#
# As we unpack the batch, we'll also copy each tensor to the GPU using the
# `to` method.
#
# `batch` contains three pytorch tensors:
# [0]: input ids
# [1]: attention masks
# [2]: labels
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
# Always clear any previously calculated gradients before performing a
# backward pass. PyTorch doesn't do this automatically because
# accumulating the gradients is "convenient while training RNNs".
# (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch)
model.zero_grad()
# Perform a forward pass (evaluate the model on this training batch).
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
# It returns different numbers of parameters depending on what arguments
# arge given and what flags are set. For our useage here, it returns
# the loss (because we provided labels) and the "logits"--the model
# outputs prior to activation.
outputs = model(b_input_ids,
attention_mask=b_input_mask,
labels=b_labels)
loss = outputs.loss
logits = outputs.logits
# Accumulate the training loss over all of the batches so that we can
# calculate the average loss at the end. `loss` is a Tensor containing a
# single value; the `.item()` function just returns the Python value
# from the tensor.
total_train_loss += loss.item()
# Perform a backward pass to calculate the gradients.
loss.backward()
# Clip the norm of the gradients to 1.0.
# This is to help prevent the "exploding gradients" problem.
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# Update parameters and take a step using the computed gradient.
# The optimizer dictates the "update rule"--how the parameters are
# modified based on their gradients, the learning rate, etc.
optimizer.step()
# Update the learning rate.
scheduler.step()
# Calculate the average loss over all of the batches.
avg_train_loss = total_train_loss / len(train_dataloader)
# Measure how long this epoch took.
training_time = format_time(time.time() - t0)
print("")
print(" Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epcoh took: {:}".format(training_time))
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
print("")
print("Running Validation...")
t0 = time.time()
# Put the model in evaluation mode--the dropout layers behave differently
# during evaluation.
model.eval()
# Tracking variables
total_eval_accuracy = 0
total_eval_loss = 0
nb_eval_steps = 0
# Evaluate data for one epoch
for batch in validation_dataloader:
# Unpack this training batch from our dataloader.
#
# As we unpack the batch, we'll also copy each tensor to the GPU using
# the `to` method.
#
# `batch` contains three pytorch tensors:
# [0]: input ids
# [1]: attention masks
# [2]: labels
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
# Tell pytorch not to bother with constructing the compute graph during
# the forward pass, since this is only needed for backprop (training).
with torch.no_grad():
# Forward pass, calculate logit predictions.
# token_type_ids is the same as the "segment ids", which
# differentiates sentence 1 and 2 in 2-sentence tasks.
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
# Get the "logits" output by the model. The "logits" are the output
# values prior to applying an activation function like the softmax.
outputs = model(b_input_ids,
attention_mask=b_input_mask,
labels=b_labels)
loss = outputs.loss
logits = outputs.logits
# Accumulate the validation loss.
total_eval_loss += loss.item()
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Calculate the accuracy for this batch of test sentences, and
# accumulate it over all batches.
total_eval_accuracy += flat_accuracy(logits, label_ids)
# Report the final accuracy for this validation run.
avg_val_accuracy = total_eval_accuracy / len(validation_dataloader)
print(" Accuracy: {0:.2f}".format(avg_val_accuracy))
# Calculate the average loss over all of the batches.
avg_val_loss = total_eval_loss / len(validation_dataloader)
# Measure how long the validation run took.
validation_time = format_time(time.time() - t0)
print(" Validation Loss: {0:.2f}".format(avg_val_loss))
print(" Validation took: {:}".format(validation_time))
if not first_epoch:
if prev_loss < avg_val_loss and prev_acc > avg_val_accuracy:
print("Backtracking")
model = torch.load(best_model_path)
for g in optimizer.param_groups:
g['lr'] /= 2
else:
print("Going on track")
prev_loss = avg_val_loss
prev_acc = avg_val_accuracy
torch.save(model, best_model_path)
else:
print("Going on track")
first_epoch = 0
prev_loss = avg_val_loss
prev_acc = avg_val_accuracy
torch.save(model, best_model_path)
# Record all statistics from this epoch.
training_stats.append(
{
'epoch': epoch_i + 1,
'Training Loss': avg_train_loss,
'Valid. Loss': avg_val_loss,
'Valid. Accur.': avg_val_accuracy,
'Training Time': training_time,
'Validation Time': validation_time
}
)
# if epoch_i >= 0:
# inp = input()
# if inp.startswith('y'):
# break
print("")
print("Training complete!")
print("Total training took {:} (h:mm:ss)".format(format_time(time.time()-total_t0)))
model = torch.load(best_model_path)
return model, training_stats
def test_model(posts, categories, model):
input_ids = []
attention_masks = []
# For every sentence...
for sent in posts:
# `encode_plus` will:
# (1) Tokenize the sentence.
# (2) Prepend the `[CLS]` token to the start.
# (3) Append the `[SEP]` token to the end.
# (4) Map tokens to their IDs.
# (5) Pad or truncate the sentence to `max_length`
# (6) Create attention masks for [PAD] tokens.
encoded_dict = tokenizer.encode_plus(
sent, # Sentence to encode.
truncation=True,
add_special_tokens = True, # Add '[CLS]' and '[SEP]'
max_length = MAX_LENGTH, # Pad & truncate all sentences.
pad_to_max_length = True,
return_attention_mask = True, # Construct attn. masks.
return_tensors = 'pt', # Return pytorch tensors.
)
# Add the encoded sentence to the list.
input_ids.append(encoded_dict['input_ids'])
# And its attention mask (simply differentiates padding from non-padding).
attention_masks.append(encoded_dict['attention_mask'])
# Convert the lists into tensors.
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
labels = torch.tensor(categories)
# Set the batch size.
batch_size = 32
# Create the DataLoader.
prediction_data = TensorDataset(input_ids, attention_masks, labels)
prediction_sampler = SequentialSampler(prediction_data)
prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size)
print('Predicting labels for {:,} test sentences...'.format(len(input_ids)))
# Put model in evaluation mode
model.eval()
# Tracking variables
predictions , true_labels = [], []
# Predict
for batch in prediction_dataloader:
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients, saving memory and
# speeding up prediction
with torch.no_grad():
# Forward pass, calculate logit predictions
outputs = model(b_input_ids,
attention_mask=b_input_mask)
logits = outputs[0]
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Store predictions and true labels
predictions.append(logits)
true_labels.append(label_ids)
print(' DONE.')
flat_predictions = np.concatenate(predictions, axis=0)
# For each sample, pick the label (0 or 1) with the higher score.
flat_predictions = np.argmax(flat_predictions, axis=1).flatten()
# Combine the correct labels for each batch into a single list.
flat_true_labels = np.concatenate(true_labels, axis=0)
print(classification_report(flat_true_labels, flat_predictions))
def classifier(name):
posts = train_x.values
categories = train_y.values
input_ids, attention_masks, labels = train_tokenizer(posts, categories)
train_dataloader, validation_dataloader = build_dataset(input_ids, attention_masks, labels)
model = build_model(name)
optimizer, scheduler = build_optimizer(model, train_dataloader)
model, training_stats = train(model, optimizer, scheduler, train_dataloader, validation_dataloader)
posts = valid_x.values
categories = valid_y.values
test_model(posts, categories, model)
return training_stats
classifier(models[model_num])
# +
import pandas as pd
# Display floats with two decimal places.
pd.set_option('precision', 2)
# Create a DataFrame from our training statistics.
df_stats = pd.DataFrame(data=training_stats)
# Use the 'epoch' as the row index.""
df_stats = df_stats.set_index('epoch')
# A hack to force the column headers to wrap.
#df = df.style.set_table_styles([dict(selector="th",props=[('max-width', '70px')])])
# Display the table.
df_stats
# +
sns.set(style='darkgrid')
# Increase the plot size and font size.
sns.set(font_scale=1.5)
plt.rcParams["figure.figsize"] = (12,6)
# Plot the learning curve.
plt.plot(df_stats['Training Loss'], 'b-o', label="Training")
plt.plot(df_stats['Valid. Loss'], 'g-o', label="Validation")
# Label the plot.
plt.title("Training & Validation Loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.xticks([1, 2, 3, 4])
plt.show()
# -
posts = valid_x.values
categories = valid_y.values
# +
input_ids = []
attention_masks = []
# For every sentence...
for sent in posts:
# `encode_plus` will:
# (1) Tokenize the sentence.
# (2) Prepend the `[CLS]` token to the start.
# (3) Append the `[SEP]` token to the end.
# (4) Map tokens to their IDs.
# (5) Pad or truncate the sentence to `max_length`
# (6) Create attention masks for [PAD] tokens.
encoded_dict = tokenizer.encode_plus(
sent, # Sentence to encode.
truncation=True,
add_special_tokens = True, # Add '[CLS]' and '[SEP]'
max_length = MAX_LENGTH, # Pad & truncate all sentences.
pad_to_max_length = True,
return_attention_mask = True, # Construct attn. masks.
return_tensors = 'pt', # Return pytorch tensors.
)
# Add the encoded sentence to the list.
input_ids.append(encoded_dict['input_ids'])
# And its attention mask (simply differentiates padding from non-padding).
attention_masks.append(encoded_dict['attention_mask'])
# Convert the lists into tensors.
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
labels = torch.tensor(categories)
# Set the batch size.
batch_size = 32
# Create the DataLoader.
prediction_data = TensorDataset(input_ids, attention_masks, labels)
prediction_sampler = SequentialSampler(prediction_data)
prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size)
# +
print('Predicting labels for {:,} test sentences...'.format(len(input_ids)))
# Put model in evaluation mode
model.eval()
# Tracking variables
predictions , true_labels = [], []
# Predict
for batch in prediction_dataloader:
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients, saving memory and
# speeding up prediction
with torch.no_grad():
# Forward pass, calculate logit predictions
outputs = model(b_input_ids,
attention_mask=b_input_mask)
logits = outputs[0]
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Store predictions and true labels
predictions.append(logits)
true_labels.append(label_ids)
print(' DONE.')
# +
flat_predictions = np.concatenate(predictions, axis=0)
# For each sample, pick the label (0 or 1) with the higher score.
flat_predictions = np.argmax(flat_predictions, axis=1).flatten()
# Combine the correct labels for each batch into a single list.
flat_true_labels = np.concatenate(true_labels, axis=0)
# -
from sklearn.metrics import classification_report
| .ipynb_checkpoints/hindi_tr-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook produces IMF integrated metallicity dependent net yields. As an example we produce the default and alternative tables for the single sun+ best fit (which fix the SSP parameter). This notebook is just an example showing illustratively how to produce these yield tables. If you actually want to use these for your Chemical evolution model you will probably want to increase the time_steps and the mass_steps. You could also consult me to get an idea of sensible Parameters and yield sets that one could use. Mostly those ideas will be based on: http://adsabs.harvard.edu/abs/2018ApJ...861...40P
# %pylab inline
import multiprocessing as mp
from Chempy.parameter import ModelParameters
a = ModelParameters()
# +
# Load solar abundances
from Chempy.solar_abundance import solar_abundances
basic_solar = solar_abundances()
getattr(basic_solar, 'Asplund09')()
# Initialise the SSP class with time-steps
time_steps = np.linspace(0.,13.5,521)
# Load the default yields
from Chempy.yields import SN2_feedback, AGB_feedback, SN1a_feedback
basic_sn2 = SN2_feedback()
getattr(basic_sn2, 'Nomoto2013_net')()
basic_1a = SN1a_feedback()
getattr(basic_1a, "Seitenzahl")()
basic_agb = AGB_feedback()
getattr(basic_agb, "Karakas_net_yield")()
# Load the alternative yields
alt_sn2 = SN2_feedback()
getattr(alt_sn2, 'chieffi04_net')()
alt_1a = SN1a_feedback()
getattr(alt_1a, "Thielemann")()
alt_agb = AGB_feedback()
getattr(alt_agb, "Ventura")()
# Use all elements that are traced
elements_to_trace = list(np.unique(basic_agb.elements+basic_sn2.elements+basic_1a.elements+alt_agb.elements+alt_sn2.elements+alt_1a.elements))
#elements_to_trace = ['H','He','C','N','O','Na','Al','Mg','Si','Ca','Ti','Mn','Fe','Ba','Ne']
print('all the traced elements: ',elements_to_trace)
# Producing the SSP birth elemental fractions (here we use solar)
solar_fractions = []
elements = np.hstack(basic_solar.all_elements)
for item in elements_to_trace:
solar_fractions.append(float(basic_solar.fractions[np.where(elements==item)]))
# +
# yieldset default
a.yield_table_name_sn2 = 'Nomoto2013_net'
a.yield_table_name_agb = 'Karakas_net_yield'
a.yield_table_name_1a = 'Seitenzahl'
# yieldset alternative
#a.yield_table_name_sn2 = 'chieffi04_net'
#a.yield_table_name_agb = 'Ventura'
#a.yield_table_name_1a = 'Thielemann'
# imf parameters
a.only_net_yields_in_process_tables = True
a.imf_type_name = 'Chabrier_2'
#default
a.high_mass_slope = -2.46
#alternative
#a.high_mass_slope = -2.51
a.imf_parameter = (22.8978, 716.4, 0.25, a.high_mass_slope)
a.mmin = 0.1
a.mmax = 100
# 100,000,000 mass steps are smooth enough for 1000 time steps
a.mass_steps = 200000 #2000 # 200000
a.sn2mmin = 8.
a.sn2mmax = 100.
a.bhmmin = float(a.sn2mmax) ## maximum of hypernova
a.bhmmax = float(a.mmax) ## maximum of the IMF
# sn1a delay parameters for maoz
#default
a.N_0 = np.power(10,-3.07)
a.sn1a_time_delay = np.power(10,-0.8)
#alternative
#a.N_0 = np.power(10,-3.49)
#a.sn1a_time_delay = np.power(10,-0.88)
a.sn1a_exponent = 1.12
a.dummy = 0.0
a.sn1a_parameter = [a.N_0,a.sn1a_time_delay,a.sn1a_exponent,a.dummy]
######################## END OF SETTING CHEMPY PARAMETER FOR SSP YIELD TABLE PRODUCTION
# +
list_of_metallicities = np.logspace(-4,-2,2)
list_of_SSP_tables = []
list_of_SSP_tables.append(list_of_metallicities)
list_of_SSP_tables.append(time_steps)
from Chempy.wrapper import SSP_wrap
def create_one_SSP_table(parameters):
differential_table = True # True is the default Chempy behaviour
metallicity = parameters
print(metallicity,a.yield_table_name_sn2)
basic_ssp = SSP_wrap(a)
basic_ssp.calculate_feedback(metallicity,list(elements_to_trace),list(solar_fractions),np.copy(time_steps))
x = basic_ssp.agb_table
y = basic_ssp.sn1a_table
z = basic_ssp.sn2_table
s = basic_ssp.bh_table
d = basic_ssp.table
u = np.zeros_like(x)
names = list(u.dtype.names)
# here we still add all processes, but we can also make individual process contribution to element
for j,jtem in enumerate(names):
u[jtem] = x[jtem] + y[jtem] + z[jtem] + s[jtem]
if differential_table:
for el in elements_to_trace:
d[el] = u[el]
else:
for el in elements_to_trace:
d[el] = np.cumsum(u[el])
for name in ['mass_of_ms_stars_dying', 'mass_in_remnants', 'sn2', 'sn1a', 'pn', 'bh', 'hydrogen_mass_accreted_onto_white_dwarfs', 'unprocessed_ejecta']:
d[name] = np.cumsum(d[name])
return(d)
# +
print("There are %d CPUs on this machine" % mp.cpu_count())
number_processes = max(1,mp.cpu_count() - 1)
pool = mp.Pool(number_processes)
results = pool.map(create_one_SSP_table, list_of_metallicities)
pool.close()
pool.join()
list_of_SSP_tables.append(results)
np.save('data/paper_1_ssp_tables/SSP_tables_default', list_of_SSP_tables)
# -
print('the metallicities for which SSP yield tables were calculated: ',list_of_SSP_tables[0])
print('metallicities: ',len(list_of_SSP_tables[2]))
print('timesteps: ', len(list_of_SSP_tables[2][0]), '= ', len(list_of_SSP_tables[1]))
print('the data type of the SSP yield table: ',list_of_SSP_tables[2][0].dtype)
x = list_of_SSP_tables[2][0]
z = list_of_SSP_tables[2][1]
# +
alpha = 0.5
factor = 1.05
## Actual plotting
fig = plt.figure(figsize=(8.69,6.69), dpi=100)
ax = fig.add_subplot(111)
ax.plot(time_steps,np.cumsum(x["Fe"]),'b', label = 'Z = 0.0001')
ax.annotate(xy = (time_steps[-1]*factor,np.sum(x["Fe"])*0.9) ,s = 'Fe',color = 'b')
ax.plot(time_steps,np.cumsum(z["Fe"]), 'r', label = 'Z = 0.01')
ax.annotate(xy = (time_steps[-1]*factor,np.sum(z["Fe"])) ,s = 'Fe',color = 'r')
ax.plot(time_steps,np.cumsum(x["Mg"]),'b')
ax.annotate(xy = (time_steps[-1]*factor,np.sum(x["Mg"])*0.9) ,s = 'Mg',color = 'b')
ax.plot(time_steps,np.cumsum(z["Mg"]), 'r')
ax.annotate(xy = (time_steps[-1]*factor,np.sum(z["Mg"])) ,s = 'Mg',color = 'r')
ax.plot(time_steps,np.cumsum(x["Al"]),'b')
ax.annotate(xy = (time_steps[-1]*factor,np.sum(x["Al"])*0.9) ,s = 'Al',color = 'b')
ax.plot(time_steps,np.cumsum(z["Al"]), 'r')
ax.annotate(xy = (time_steps[-1]*factor,np.sum(z["Al"])) ,s = 'Al',color = 'r')
ax.plot(time_steps,np.cumsum(x["C"]),'b')
ax.annotate(xy = (time_steps[-1]*factor,np.sum(x["C"])*0.9) ,s = 'C',color = 'b')
ax.plot(time_steps,np.cumsum(z["C"]), 'r')
ax.annotate(xy = (time_steps[-1]*factor,np.sum(z["C"])) ,s = 'C',color = 'r')
ax.plot(time_steps,np.ones_like(time_steps)*5e-3,marker = '|', markersize = 10, linestyle = '', color = 'k', alpha = 2*alpha)#, label = 'time-steps')
ax.annotate(xy = (time_steps[1],2.7e-3),s = 'model time-steps', color = 'k', alpha = 2*alpha)
ax.legend(loc = 'best')
ax.set_ylim(2e-5,6e-3)
ax.set_xlim(7e-3,25)
ax.set_title(r'default yield of SSP with mass = 1M$_\odot$ for different metallicies')
ax.set_ylabel(r"net yield in M$_\odot$")
ax.set_xlabel("time in Gyr")
ax.set_yscale('log')
ax.set_xscale('log')
plt.show()
# +
# yieldset alternative
a.yield_table_name_sn2 = 'chieffi04_net'
a.yield_table_name_agb = 'Ventura'
a.yield_table_name_1a = 'Thielemann'
a.high_mass_slope = -2.51
a.imf_parameter = (22.8978, 716.4, 0.25, a.high_mass_slope)
a.N_0 = np.power(10,-3.49)
a.sn1a_time_delay = np.power(10,-0.88)
a.sn1a_parameter = [a.N_0,a.sn1a_time_delay,a.sn1a_exponent,a.dummy]
######################## END OF SETTING CHEMPY PARAMETER FOR SSP YIELD TABLE PRODUCTION
list_of_SSP_tables_alt = []
list_of_SSP_tables_alt.append(list_of_metallicities)
list_of_SSP_tables_alt.append(time_steps)
print("There are %d CPUs on this machine" % mp.cpu_count())
number_processes = max(1,mp.cpu_count() - 1)
pool = mp.Pool(number_processes)
results = pool.map(create_one_SSP_table, list_of_metallicities)
pool.close()
pool.join()
list_of_SSP_tables_alt.append(results)
np.save('data/paper_1_ssp_tables/SSP_tables_alternative', list_of_SSP_tables_alt)
# -
print('the data type of the SSP yield table: ',list_of_SSP_tables[2][0].dtype)
y = list_of_SSP_tables_alt[2][1]
# +
## Actual plotting
fig = plt.figure(figsize=(8.69,6.69), dpi=100)
ax = fig.add_subplot(111)
ax.plot(time_steps,np.cumsum(y["Fe"]),'b', label = 'alternative')
ax.annotate(xy = (time_steps[-1]*factor,np.sum(y["Fe"])*0.9) ,s = 'Fe',color = 'b')
ax.plot(time_steps,np.cumsum(z["Fe"]), 'r', label = 'default')
ax.annotate(xy = (time_steps[-1]*factor,np.sum(z["Fe"])) ,s = 'Fe',color = 'r')
ax.plot(time_steps,np.cumsum(y["Mg"]),'b')
ax.annotate(xy = (time_steps[-1]*factor,np.sum(y["Mg"])*0.9) ,s = 'Mg',color = 'b')
ax.plot(time_steps,np.cumsum(z["Mg"]), 'r')
ax.annotate(xy = (time_steps[-1]*factor,np.sum(z["Mg"])) ,s = 'Mg',color = 'r')
ax.plot(time_steps,np.cumsum(y["Al"]),'b')
ax.annotate(xy = (time_steps[-1]*factor,np.sum(y["Al"])*0.9) ,s = 'Al',color = 'b')
ax.plot(time_steps,np.cumsum(z["Al"]), 'r')
ax.annotate(xy = (time_steps[-1]*factor,np.sum(z["Al"])) ,s = 'Al',color = 'r')
ax.plot(time_steps,np.cumsum(y["C"]),'b')
ax.annotate(xy = (time_steps[-1]*factor,np.sum(y["C"])*0.9) ,s = 'C',color = 'b')
ax.plot(time_steps,np.cumsum(z["C"]), 'r')
ax.annotate(xy = (time_steps[-1]*factor,np.sum(z["C"])) ,s = 'C',color = 'r')
ax.plot(time_steps,np.ones_like(time_steps)*5e-3,marker = '|', markersize = 10, linestyle = '', color = 'k', alpha = 2*alpha)#, label = 'time-steps')
ax.annotate(xy = (time_steps[1],2.7e-3),s = 'model time-steps', color = 'k', alpha = 2*alpha)
ax.legend(loc = 'best')
ax.set_ylim(2e-5,6e-3)
ax.set_xlim(7e-3,25)
ax.set_title(r'yield of SSP with mass = 1M$_\odot$ for the different yieldsets for Z=0.01')
ax.set_ylabel(r"net yield in M$_\odot$")
ax.set_xlabel("time in Gyr")
ax.set_yscale('log')
ax.set_xscale('log')
plt.show()
# -
# Beware, that both yieldsets for optimized to reproduce solar abundances, so in fact they look more similar here than they actually are, because the IMF and SNIa contribution is different. If you want to see the difference for the same SSP parameters check Paper 1 figure 4.
### so what is actually in these files:
print('these are the respective metal mass fractions for which the SSP yield tables were calculated')
print(list_of_SSP_tables[0])
print('these are the %d timesteps for which the yield was calculated. It is the time after SSP birth in Gyrs starting at 0 and going up to 13.5Gyrs' %(len(list_of_SSP_tables[1])))
print(list_of_SSP_tables[1])
print('in the last index of list_of_SSP_tables the actual tables are stored. For each metallicity there is one corresponding table in the same order. Therefore we have to tables')
len(list_of_SSP_tables[2])
print("let's have a look at the table calculated for 0.01 metallicity:")
y = list_of_SSP_tables[2][1]
time = list_of_SSP_tables[1]
print(len(y), len(time),y.dtype.names)
print('we see that it has a length of the number of timesteps. So each entry corresponds to the respective timestep')
print('lets have a look at the different fields:')
print('as you can see the differential values are given, i.e. the amount of change per time interval, except for "mass_in_ms_stars" which gives the actual fraction')
print('note also all values are given in mass fraction, i.e. normed to 1Msun(, as if the SSP had a mass of 1Msun)')
fields = ['mass_of_ms_stars_dying', 'mass_in_ms_stars', 'mass_in_remnants', 'sn2', 'sn1a', 'pn', 'bh', 'hydrogen_mass_accreted_onto_white_dwarfs', 'unprocessed_ejecta', 'Fe']
for i,item in enumerate(fields):
plt.plot(time,y[item])
plt.xlabel('time in Gyr')
plt.ylabel(item)
plt.title(i)
plt.show()
plt.clf()
plt.close()
plt.plot(time,np.cumsum(y[fields[0]]), label=fields[0])
plt.plot(time,y[fields[1]],label = fields[1])
plt.plot(time,np.cumsum(y[fields[0]])+y[fields[1]],label = 'ms + ms_dying')
plt.legend()
plt.xlabel('time in Gyr')
plt.ylabel('mass fraction of SSP')
print('beware that for "mass_in_ms_stars" we do not need to use the cumulative sum')
plt.plot(time,np.cumsum(y[fields[2]]), label=fields[2])
plt.plot(time,np.cumsum(y[fields[8]]), label=fields[8])
plt.plot(time,np.cumsum(y[fields[7]]), label=fields[7])
plt.plot(time,np.cumsum(y[fields[0]]), label=fields[0])
plt.legend()
plt.title("'unprocessed_ejecta' + 'mass_in_remnants' + 'hydrogen_mass_accreted_onto_white_dwarfs' = 'mass_of_ms_stars_dying', modula numerical instabilities")
plt.xlabel('time in Gyr')
plt.ylabel('mass fraction of SSP')
print("'unprocessed_ejecta' should be everything that is not turned into remnants and dies (for net yields)")
print("'hydrogen_mass_accreted_onto_white_dwarfs', probably negligible but for mass conservation for each supernova type 1a we remove remnant mass of 0.6Msun and add the missing 0.84Msun in form of hydrogen")
plt.plot(time,np.cumsum(y[fields[3]]),label = fields[3])
plt.plot(time,np.cumsum(y[fields[4]]),label = fields[4])
plt.plot(time,np.cumsum(y[fields[5]]),label = fields[5])
plt.plot(time,np.cumsum(y[fields[6]]),label = fields[6])
plt.title('Cumulative occurence of different enrichment processes over time')
plt.legend()
plt.xlabel('time in Gyr')
plt.ylabel('number per 1Msun of SSP')
plt.yscale('log')
print('These are no mass fractions but occurence rate per 1Msun of SSP.')
print('Over a Hubble time, per 1Msun of SSP we have approximately 0.1 Planetary nebula, 0.01 SNII, 0.001 SN1a. Black holes were not used in this SPP generation')
elements = ['Al', 'Ar', 'As', 'B', 'Be', 'Br', 'C', 'Ca', 'Cl', 'Co', 'Cr', 'Cu', 'F', 'Fe', 'Ga', 'Ge', 'H', 'He', 'K', 'Kr', 'Li', 'Mg', 'Mn', 'Mo', 'N', 'Na', 'Nb', 'Ne', 'Ni', 'O', 'P', 'Rb', 'S', 'Sc', 'Se', 'Si', 'Sr', 'Ti', 'V', 'Y', 'Zn', 'Zr']
# +
net_metals = np.zeros_like(y['Fe'])
for item in elements:
if item not in ['H','He']:
net_metals += np.cumsum(y[item])
plt.plot(time,np.cumsum(y['H'])+np.cumsum(y['He']), label = 'H+He')
plt.plot(time,net_metals,label = 'metals = all other elements summed')
plt.plot(time,net_metals+np.cumsum(y['H'])+np.cumsum(y['He']),label = 'H+He+metals')
plt.xlabel('time in Gyr')
plt.ylabel('mass fraction')
plt.legend()
plt.show()
plt.clf()
plt.close()
print('we see that all elements are synthesised from H. If we look at metals produced we see that 0.015 percent of an SSP is turned into metals within a Hubble time for a 0.01Z SSP.')
print('my guess for the small discrepancy we see are hydrogen accreted onto dwarfs, or elements that are not part of the yields tables or numerical instabilities. I can look that up upon request')
# -
plt.plot(time,net_metals,label = 'metals')
plt.plot(time,np.cumsum(y['unprocessed_ejecta']), label = 'unprocessed_ejecta')
plt.xlabel('time in Gyr')
plt.ylabel('mass fraction')
plt.legend()
plt.show()
plt.clf()
plt.close()
print('we see that most of the ejecta are truly unprocessed, i.e. it is returned what were the initial abundances of the SSP')
print('only a tiny fraction new metals are ejected')
## Just for completeness all elements over time!
for i,element in enumerate(elements):
plt.plot(time,np.cumsum(y[element]))
plt.xlabel('time in Gyr')
plt.ylabel(element)
plt.show()
plt.clf()
plt.close()
| tutorials/8-Yield tables for SPH simulations and comparison to other tables.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
#Store Data Variables
import json
with open('feature_data.json', 'r') as f:
features = json.load(f)
from scipy.io import loadmat
train_idxs = loadmat('cuhk03_new_protocol_config_labeled.mat')['train_idx'].flatten()
query_idxs = loadmat('cuhk03_new_protocol_config_labeled.mat')['query_idx'].flatten()
labels = loadmat('cuhk03_new_protocol_config_labeled.mat')['labels'].flatten()
gallery_idxs = loadmat('cuhk03_new_protocol_config_labeled.mat')['gallery_idx'].flatten()
filelist = loadmat('cuhk03_new_protocol_config_labeled.mat')['filelist'].flatten()
camId = loadmat('cuhk03_new_protocol_config_labeled.mat')['camId'].flatten()
# +
X = np.array(features)
y = np.array(labels)
filelist = np.array(filelist)
camId = np.array(camId)
# +
mask_train = np.array(train_idxs).ravel()
mask_query = np.array(query_idxs).ravel()
mask_gallery = np.array(gallery_idxs).ravel()
mask_train = np.subtract(mask_train, 1)
mask_query = np.subtract(mask_query, 1)
mask_gallery = np.subtract(mask_gallery, 1)
X_train, X_query, X_gallery = X[mask_train, :], X[mask_query, :], X[mask_gallery, :]
y_train, y_query, y_gallery = y[mask_train], y[mask_query], y[mask_gallery]
filelist_train, filelist_query, filelist_gallery = filelist[mask_train], filelist[mask_query], filelist[mask_gallery]
camId_train, camId_query, camId_gallery = camId[mask_train], camId[mask_query], camId[mask_gallery]
# -
def get_acc_score(y_valid, y_q, tot_label_occur):
recall = 0
true_positives = 0
k = 0
max_rank = 30
rank_A = np.zeros(max_rank)
AP_arr = np.zeros(11)
while (recall < 1) or (k < max_rank):
if (y_valid[k] == y_q):
true_positives = true_positives + 1
recall = true_positives/tot_label_occur
precision = true_positives/(k+1)
AP_arr[round((recall-0.05)*10)] = precision
for n in range (k, max_rank):
rank_A[n] = 1
k = k+1
max_precision = 0
for i in range(10, -1, -1):
max_precision = max(max_precision, AP_arr[i])
AP_arr[i] = max_precision
AP_ = AP_arr.sum()/11
return AP_, rank_A
# +
from scipy.spatial import distance
from sklearn.metrics import pairwise
def evaluate_metric(X_query, camId_query, y_query, X_gallery, camId_gallery, y_gallery, metric = 'euclidian', parameters = None):
rank_accuracies = []
AP = []
# Break condition for testing
#q = 0
for query, camId_q, y_q in zip(X_query, camId_query, y_query):
q_g_dists = []
y_valid = []
for gallery, camId_g, y_g in zip(X_gallery, camId_gallery, y_gallery):
if ((camId_q == camId_g) and (y_q == y_g)):
continue
else:
if metric == 'euclidian':
dist = distance.euclidean(query, gallery)
elif metric == 'sqeuclidean':
dist = distance.sqeuclidean(query, gallery)
elif metric == 'mahalanobis':
dist = distance.mahalanobis(query, gallery, parameters)
else:
raise NameError('Specified metric not supported')
q_g_dists.append(dist)
y_valid.append(y_g)
tot_label_occur = y_valid.count(y_q)
q_g_dists = np.array(q_g_dists)
y_valid = np.array(y_valid)
_indexes = np.argsort(q_g_dists)
# Sorted distances and labels
q_g_dists, y_valid = q_g_dists[_indexes], y_valid[_indexes]
AP_, rank_A = get_acc_score(y_valid, y_q, tot_label_occur)
AP.append(AP_)
rank_accuracies.append(rank_A)
#if q > 5:
# break
#q = q+1
rank_accuracies = np.array(rank_accuracies)
total = rank_accuracies.shape[0]
rank_accuracies = rank_accuracies.sum(axis = 0)
rank_accuracies = np.divide(rank_accuracies, total)
i = 0
print ('Accuracies by Rank:')
while i < rank_accuracies.shape[0]:
print('Rank ', i+1, ' = %.2f%%' % (rank_accuracies[i] * 100), '\t',
'Rank ', i+2, ' = %.2f%%' % (rank_accuracies[i+1] * 100), '\t',
'Rank ', i+3, ' = %.2f%%' % (rank_accuracies[i+2] * 100), '\t',
'Rank ', i+4, ' = %.2f%%' % (rank_accuracies[i+3] * 100), '\t',
'Rank ', i+5, ' = %.2f%%' % (rank_accuracies[i+4] * 100))
i = i+5
AP = np.array(AP)
mAP = AP.sum()/AP.shape[0]
print('mAP = %.2f%%' % (mAP * 100))
return rank_accuracies, mAP
# -
rank_accuracies_l_2 = []
mAP_l_2 = []
metric_l_2 = []
# +
#Obtained from other file
rank_accuracy_base = np.array([47.00, 54.57, 59.64, 63.93, 66.86, 69.29, 71.14, 72.36, 73.71, 74.93, 75.86, 76.79, 77.71, 78.50, 79.07, 79.86, 80.64, 81.57, 82.29, 83.21, 83.50, 83.71, 84.00, 84.29, 84.79, 85.29, 85.64, 85.93, 86.07, 86.36])
# +
#Mahalanobis - inverse covariance
V = np.cov(X_train.T)
print (V.shape)
VI = np.linalg.inv(V)
print (VI.shape)
rank_accuracies, mAP = evaluate_metric(X_query, camId_query, y_query,
X_gallery, camId_gallery, y_gallery,
metric ='mahalanobis',
parameters = VI)
rank_accuracies_l_2.append(rank_accuracies)
mAP_l_2.append(mAP)
metric_l_2.append('Mahalanobis - Covariance')
# +
from metric_learn import MMC_Supervised
from sklearn.decomposition import PCA
#Mahalanobis - learnt - reduced set
pca = PCA(n_components=150)
X_train_pca = pca.fit_transform(X_train)
X_query_pca = pca.transform(X_query)
X_gallery_pca = pca.transform(X_gallery)
mmc = MMC_Supervised(max_iter=50)
mmc.fit(X_train_pca[0:150], y_train[0:150])
M = mmc.metric()
print ('Metric learnt')
rank_accuracies, mAP = evaluate_metric(X_query_pca, camId_query, y_query,
X_gallery_pca, camId_gallery, y_gallery,
metric ='mahalanobis',
parameters = M)
rank_accuracies_l_2.append(rank_accuracies)
mAP_l_2.append(mAP)
metric_l_2.append('Learnt Mahalanobis (Red. Set)')
# +
from metric_learn import LMNN
lmnn = LMNN(k=3, learn_rate=1e-6, max_iter=50)
lmnn.fit(X_train_pca, y_train)
M = lmnn.metric()
print ('Metric learnt')
rank_accuracies, mAP = evaluate_metric(X_query_pca, camId_query, y_query,
X_gallery_pca, camId_gallery, y_gallery,
metric ='mahalanobis',
parameters = M)
rank_accuracies_l_2.append(rank_accuracies)
mAP_l_2.append(mAP)
metric_l_2.append('Learnt LMNN')
# -
plt.figure(figsize=(8.0, 6.0))
color_list = ['green', 'blue', 'red', 'purple', 'orange', 'magenta', 'cyan', 'black', 'indianred', 'lightseagreen', 'gold', 'lightgreen']
for i in range(len(metric_l_2)):
plt.plot(np.arange(1, 31), 100*rank_accuracies_l_2[i], color=color_list[i], linestyle='dashed', label=metric_l_2[i])
plt.plot(np.arange(1, 31), rank_accuracy_base, color='darkorange', linestyle=':', label='kNN Baseline')
plt.title('CMC Curves for various Mahalanobis based methods')
plt.xlabel('Rank')
plt.ylabel('Recogniton Accuracy / %')
plt.legend(loc='best')
| Jupyter Notebooks/KNN Mahalanobis - Learnt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="u-HE3iT6eFPC" outputId="d3f0b2d2-6e7b-4a13-83cf-d88f79f50375"
pip install mljar-supervised
# + colab={"base_uri": "https://localhost:8080/"} id="_Y73CTjZeJ15" outputId="3fb80405-32af-46ca-f2d5-ec59ada7bb44"
pip install openml
# + id="xPwMUUlGeo0n"
from sklearn.model_selection import train_test_split
import pandas as pd
df = pd.read_csv("undersample_3.csv")
X = df.iloc[:,2:32]
y = df.iloc[:,1]
from sklearn.preprocessing import LabelEncoder
l = LabelEncoder()
y = l.fit_transform(y)
X_train, X_tmp, y_train, y_tmp = train_test_split(X, y, test_size=0.2, random_state=42)
X_test, X_valid, y_test, y_valid = train_test_split(X_tmp, y_tmp, test_size=0.2, random_state=42)
# + id="abzDfaPRgsVy"
from supervised.automl import AutoML
import os
a_ens_4 = AutoML(total_time_limit=10000,mode="Perform",
explain_level=2, algorithms = ['Decision Tree', 'Random Forest','Neural Network', 'Nearest Neighbors','Xgboost'],
validation_strategy={"validation_type": "kfold","k_folds": 5, "shuffle": True, "stratify": True,
"random_seed": 1230})
# + id="smjVKZpOiDfO"
from sklearn.ensemble import RandomForestClassifier
from supervised.automl import AutoML
import os
import numpy as np
import pandas as pd
import sklearn.model_selection
from sklearn.metrics import log_loss, f1_score
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
import numpy as np
np.random.seed(1337)
# for reproducibility import h5pyfromkeras.modelsimportSequential
from keras.optimizers import Adam
from keras.initializers import TruncatedNormal
from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.callbacks import ReduceLROnPlateau
from sklearn.metrics import roc_curve, auc
# + colab={"base_uri": "https://localhost:8080/"} id="ES7etUBJjG9W" outputId="c0861cad-be7e-4a06-8acb-81ebe15fa667"
a_ens_4.fit(X,y)
# + id="-ULOKtxGqUdw"
predictions= a_ens_4.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="C4c4k4qxUM-u" outputId="5a33fb5b-611a-40e0-8060-929844c8305e"
predictions
# + colab={"base_uri": "https://localhost:8080/"} id="xamJhxUjQRFQ" outputId="7a3127ff-69d7-4111-d8f2-05ecb19f7398"
pip install plot-metric
# + colab={"base_uri": "https://localhost:8080/", "height": 384} id="kKwjBgA5j16v" outputId="634cf142-2e07-4944-caac-f3136a484290"
import matplotlib.pyplot as plt
from plot_metric.functions import BinaryClassification
from sklearn.metrics import confusion_matrix
# Visualisation with plot_metric
bc = BinaryClassification(predictions, y_test, labels=["Class 1", "Class 2"])
print(bc)# Figures
plt.figure(figsize=(5,5))
bc.plot_roc_curve()
plt.show()
tn, fp, fn, tp=confusion_matrix(y_test,predictions).ravel()
accuracy=(tn+tp)/(tn+tp+fn+fp)
prec=(tp)/(tp+fp)
rec=tp/(tp+fn)
F1=2*(prec*rec)/(prec+rec)
confusion_matrix(y_test, predictions)
print("F1 Score: ",format(F1))
# + colab={"base_uri": "https://localhost:8080/"} id="TAjSaAzGvV1_" outputId="acc09c99-50d4-467e-8dc5-0fbdf93372f8"
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve
from matplotlib import pyplot
print("ROC:")
roc_auc=roc_auc_score(y_test, predictions)
print(roc_auc)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="3kK7Wg7YWlZT" outputId="04e7e66f-20e0-4003-a01c-777f8357bbfb"
# !zip -r /content/AutoML_US3.zip /content/AutoML_US3/
from google.colab import files
files.download("/content/AutoML_US3.zip")
| students_final_projects/group-g/automl_code/automl_us3_code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
print("{:.5f}".format(100))
import os
help(os)
import torch
from itertools import islice
for i in islice( range(100), 3, 10 ):
print(i)
# !pwd
import sys
sys.path.insert(0,'/home/iacv/project/adda')
from usps import get_usps
import numpy as np
a = get_usps(True)
for i in a:
for j, image in enumerate(i[0]):
plt.imshow(image[0])
plt.show()
if(j==10):
break
break
import matplotlib.pyplot as plt
b = a.reshape(28,28,3)
plt.imshow(a[1])
| notebook/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Real life data
import logging
import threading
import json
import itertools
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
import seaborn as seabornInstance
from sqlalchemy import Column, Integer, String, Float, DateTime, Boolean, func
from iotfunctions import base
from iotfunctions import bif
from iotfunctions import entity
from iotfunctions import metadata
from iotfunctions.metadata import EntityType
from iotfunctions.db import Database
from iotfunctions.enginelog import EngineLogging
from iotfunctions import estimator
from iotfunctions.ui import (UISingle, UIMultiItem, UIFunctionOutSingle,
UISingleItem, UIFunctionOutMulti, UIMulti, UIExpression,
UIText, UIStatusFlag, UIParameters)
from mmfunctions.anomaly import (SaliencybasedGeneralizedAnomalyScore, SpectralAnomalyScore,
FFTbasedGeneralizedAnomalyScore, KMeansAnomalyScore, Interpolator)
import datetime as dt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
import scipy as sp
import scipy.fftpack
import skimage as ski
from skimage import util as skiutil # for nifty windowing
import pyod as pyod
from pyod.utils.data import generate_data
from pyod.utils.data import evaluate_print
from pyod.utils.example import visualize
from pyod.models.knn import KNN
from pyod.models.iforest import IForest
# %matplotlib inline
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
EngineLogging.configure_console_logging(logging.INFO)
# +
# setting to make life easier
Temperature='Temperature'
kmeans='TemperatureKmeansScore'
fft='TemperatureFFTScore'
fftp='TemperatureFFTPScore'
spectral='TemperatureSpectralScore'
sal='SaliencyAnomalyScore'
gen='TemperatureGeneralizedScore'
kmeansA='kmeansAnomaly'
kmeansB='kmeansAnomalyB'
spectralA='spectralAnomaly'
fftA='fftAnomaly'
fftpA='fftpAnomaly'
salA='salAnomaly'
genA='genAnomaly'
kmeans_break=1.9
spectral_break = 2.8
fft_break = 100
sal_break = 100
gen_break = 30000
# -
#
# #### What will be shown
#
# General approach is straightforward
# * read raw data in
# * transform it so that it is compatible to the Monitoring pipeline
# * add yet another anomaly detector based on computer vision technology. The point here is to show how to run pipeline anomaly functions 'locally', an important concept for automated testing.
# * simplify the dataframe - we have only one entity, no need for an entity index
# * render input data and anomaly scores properly scaled
#
# <br>
#
# We start with Microsoft's anomaly test data found here
# https://github.com/microsoft/anomalydetector/blob/master/samples/sample.csv
#
# and then proceed to applying anomaly detection to real life pump data
#
#
# <br>
#
#
# #### Current inventory of anomaly detectors by type
#
# This is the list of functions to apply
#
#
# | Detector | ML Type | Type | How does it work |
# | ------- | ------------ | ------- | ---------------- |
# | KMeans | Unsupervised | Proximity | Clusters data points in centroid buckets, small buckets are outliers, score is distance to closest other bucket |
# | Generalized | Unsupervised | Linear Model | Covariance matrix over data point vectors serves to measure multi-dimensional deviation |
# | FFT | Unsupervised | Linear Model | Run FFT before applying Generalized |
# | Spectral | Unsupervised | Linear Model | Compute signal energy to reduce dimensions |
# | Saliency | Unsupervised | Linear Model | Apply saliency transform (from computer vision |
# | SimpleAnomaly | **Supervised** | Ensemble | Run Gradient boosting on training data, anomaly if prediction deviates from actual data |
# | --- | **Supervised** | LSTM | Train a stacked LSTM, anomaly if prediction deviates from actual data |
#
#
# +
# Run on the good pump first
# Get stuff in
df_i = pd.read_csv('./samples/AzureAnomalysample.csv', index_col=False, parse_dates=['timestamp'])
df_i['entity']='MyRoom'
df_i['Temperature']=df_i['value'] + 20
df_i = df_i.drop(columns=['value'])
# and sort it by timestamp
df_i = df_i.sort_values(by='timestamp')
df_i = df_i.set_index(['entity','timestamp']).dropna()
df_i.head(2)
# +
# Now run the anomaly functions as if they were executed in a pipeline
#interi = Interpolator(Temperature, 12, 23.0, 'TemperatureInter')
interi = Interpolator(Temperature, 12, 22.9375, 'TemperatureInter') # drop 22.9375
et = interi._build_entity_type(columns = [Column(Temperature,Float())])
interi._entity_type = et
df_i = interi.execute(df=df_i)
df_i.head(30)
# +
# Now run the anomaly functions as if they were executed in a pipeline
spsi = SpectralAnomalyScore(Temperature, 12, spectral)
et = spsi._build_entity_type(columns = [Column(Temperature,Float())])
spsi._entity_type = et
df_i = spsi.execute(df=df_i)
sali = SaliencybasedGeneralizedAnomalyScore(Temperature, 12, sal)
et = sali._build_entity_type(columns = [Column(Temperature,Float())])
sali._entity_type = et
df_i = sali.execute(df=df_i)
ffti = FFTbasedGeneralizedAnomalyScore(Temperature, 12, fft)
et = ffti._build_entity_type(columns = [Column(Temperature,Float())])
ffti._entity_type = et
df_i = ffti.execute(df=df_i)
kmi = KMeansAnomalyScore(Temperature, 12, kmeans)
et = kmi._build_entity_type(columns = [Column(Temperature,Float())])
kmi._entity_type = et
df_i = kmi.execute(df=df_i)
df_i.head(30)
# -
sali = SaliencybasedGeneralizedAnomalyScore(Temperature, 12, sal+'_')
et = sali._build_entity_type(columns = [Column(Temperature,Float())])
sali._entity_type = et
df_i = sali.execute(df=df_i)
comp = np.all(np.where(df_i[sal] != df_i[sal+'_'], True, False))
comp
# +
# Simplify our pandas dataframe to prepare input for plotting
EngineLogging.configure_console_logging(logging.INFO)
df_inputm2 = df_i.loc[['MyRoom']]
df_inputm2.reset_index(level=[0], inplace=True)
# +
# dampen gradient and reconstruct anomaly function
dampening = 0.8 # gradient dampening
fftp_break = fft_break * dampening
# TODO error testing for arrays of size <= 1
fftgradN = np.gradient(df_inputm2[fft].values)
print (fftgradN)
fftgradS = np.float_power(abs(fftgradN), dampening) * np.sign(fftgradN) # dampening
# reconstruct (dampened) anomaly score by discrete integration
fftlist = []
x = fftval[0]
for xit in np.nditer(fftgradS):
x = x + xit
fftlist.append(x)
# shift array slightly to the right to position anomaly score
fftI = np.roll(np.asarray(fftlist), 1)
fftI[0] = fftI[1]
# normalize
df_inputm2[fftp] = fftI / dampening / 2
df_inputm2.describe()
# +
# df_inputm2[spectral].values[df_inputm2[spectral] > 0.001] = 0.001
# df_inputm2[fft].values[df_inputm2[fft] < -1] = -1
df_inputm2[kmeansA] = df_inputm2[kmeans]
df_inputm2[kmeansA].values[df_inputm2[kmeansA] < kmeans_break] = np.nan
df_inputm2[kmeansA].values[df_inputm2[kmeansA] > kmeans_break] = kmeans_break
df_inputm2[kmeansB] = df_inputm2[kmeans]
df_inputm2[kmeansB].values[df_inputm2[kmeansB] >= kmeans_break] = 4
df_inputm2[kmeansB].values[df_inputm2[kmeansB] < kmeans_break] = 3
df_inputm2[fftA] = df_inputm2[fft]
df_inputm2[fftA].values[df_inputm2[fftA] < fft_break] = np.nan
df_inputm2[fftA].values[df_inputm2[fftA] > fft_break] = fft_break
df_inputm2[fftpA] = df_inputm2[fftp]
#df_inputm2[fftp].values[df_inputm2[fft] > fftp_break] = fftp_break
df_inputm2[fftpA].values[df_inputm2[fftpA] < fftp_break] = np.nan
df_inputm2[fftpA].values[df_inputm2[fftpA] > fftp_break] = fftp_break
df_inputm2[spectralA] = df_inputm2[spectral]
df_inputm2[spectralA].values[df_inputm2[spectralA] < spectral_break] = np.nan
df_inputm2[spectralA].values[df_inputm2[spectralA] > spectral_break] = spectral_break
df_inputm2[salA] = df_inputm2[sal]
df_inputm2[salA].values[df_inputm2[salA] < sal_break] = np.nan
df_inputm2[salA].values[df_inputm2[salA] > sal_break] = sal_break
#df_inputm2[genA] = df_inputm2[gen]
#df_inputm2[genA].values[df_inputm2[genA] < gen_break] = np.nan
#df_inputm2[genA].values[df_inputm2[genA] > gen_break] = gen_break
plots = 2
x1=0
x2=4000
x1=3350
x2=3450
#x1 = 3550, x2 = 3650
fig, ax = plt.subplots(plots, 1, figsize=(20, 8 * plots))
cnt = 0
#ax[cnt].plot(df_inputm2.index[x1:x2], df_inputm2[Temperature][x1:x2]-20,linewidth=1,color='black',label=Temperature)
#ax[cnt].legend(bbox_to_anchor=(1.1, 1.05))
#ax[cnt].set_ylabel('Input Temperature - 20',fontsize=14,weight="bold")
cnt = 0
ax[cnt].plot(df_inputm2.index[x1:x2], df_inputm2[Temperature][x1:x2]-20,linewidth=1,color='black',label=Temperature)
ax[cnt].plot(df_inputm2.index[x1:x2], df_inputm2[fft][x1:x2]/fft_break, linewidth=2,color='darkgreen',label=fft)
ax[cnt].plot(df_inputm2.index[x1:x2], df_inputm2[fftp][x1:x2]/fftp_break, linewidth=2,color='darkblue',label=fftp)
ax[cnt].plot(df_inputm2.index[x1:x2], df_inputm2[fftA][x1:x2]/fft_break, linewidth=4, color='red', zorder=2)
ax[cnt].plot(df_inputm2.index[x1:x2], df_inputm2[fftpA][x1:x2]/fftp_break + 0.5, linewidth=4, color='orange', zorder=2)
ax[cnt].legend(bbox_to_anchor=(1.1, 1.05))
ax[cnt].set_ylabel('FFT \n detects frequency changes', fontsize=13)
for i in range(plots):
ax[i].grid(True, color='white')
ax[i].set_facecolor('lightgrey')
# -
# #### Results
#
# Clear **winners** are
# * **KMeans** and
# * **FFT**.
#
# Spectral is way too sensitive while Saliency
# doesn't detect the negative peak at 10/10 midnight
#
| DampenAnomalyScore.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the MIT License.
# # AutoML 05: Blacklisting Models, Early Termination, and Handling Missing Data
#
# In this example we use the scikit-learn's [digit dataset](http://scikit-learn.org/stable/datasets/index.html#optical-recognition-of-handwritten-digits-dataset) to showcase how you can use AutoML for handling missing values in data. We also provide a stopping metric indicating a target for the primary metrics so that AutoML can terminate the run without necessarly going through all the iterations. Finally, if you want to avoid a certain pipeline, we allow you to specify a blacklist of algorithms that AutoML will ignore for this run.
#
# Make sure you have executed the [00.configuration](00.configuration.ipynb) before running this notebook.
#
# In this notebook you will learn how to:
# 1. Create an `Experiment` in an existing `Workspace`.
# 2. Configure AutoML using `AutoMLConfig`.
# 4. Train the model.
# 5. Explore the results.
# 6. Test the best fitted model.
#
# In addition this notebook showcases the following features
# - **Blacklisting** certain pipelines
# - Specifying **target metrics** to indicate stopping criteria
# - Handling **missing data** in the input
#
# ## Create an Experiment
#
# As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.
# +
import logging
import os
import random
from matplotlib import pyplot as plt
from matplotlib.pyplot import imshow
import numpy as np
import pandas as pd
from sklearn import datasets
import azureml.core
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.train.automl import AutoMLConfig
from azureml.train.automl.run import AutoMLRun
# +
ws = Workspace.from_config()
# Choose a name for the experiment.
experiment_name = 'automl-local-missing-data'
project_folder = './sample_projects/automl-local-missing-data'
experiment = Experiment(ws, experiment_name)
output = {}
output['SDK version'] = azureml.core.VERSION
output['Subscription ID'] = ws.subscription_id
output['Workspace'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Project Directory'] = project_folder
output['Experiment Name'] = experiment.name
pd.set_option('display.max_colwidth', -1)
pd.DataFrame(data=output, index=['']).T
# -
# ## Diagnostics
#
# Opt-in diagnostics for better experience, quality, and security of future releases.
from azureml.telemetry import set_diagnostics_collection
set_diagnostics_collection(send_diagnostics = True)
# ### Creating missing data
# +
from scipy import sparse
digits = datasets.load_digits()
X_train = digits.data[10:,:]
y_train = digits.target[10:]
# Add missing values in 75% of the lines.
missing_rate = 0.75
n_missing_samples = int(np.floor(X_train.shape[0] * missing_rate))
missing_samples = np.hstack((np.zeros(X_train.shape[0] - n_missing_samples, dtype=np.bool), np.ones(n_missing_samples, dtype=np.bool)))
rng = np.random.RandomState(0)
rng.shuffle(missing_samples)
missing_features = rng.randint(0, X_train.shape[1], n_missing_samples)
X_train[np.where(missing_samples)[0], missing_features] = np.nan
# -
df = pd.DataFrame(data = X_train)
df['Label'] = pd.Series(y_train, index=df.index)
df.head()
# ## Configure AutoML
#
# Instantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment. This includes setting `exit_score`, which should cause the run to complete before the `iterations` count is reached.
#
# |Property|Description|
# |-|-|
# |**task**|classification or regression|
# |**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>balanced_accuracy</i><br><i>average_precision_score_weighted</i><br><i>precision_score_weighted</i>|
# |**max_time_sec**|Time limit in seconds for each iteration.|
# |**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|
# |**n_cross_validations**|Number of cross validation splits.|
# |**preprocess**|Setting this to *True* enables AutoML to perform preprocessing on the input to handle *missing data*, and to perform some common *feature extraction*.|
# |**exit_score**|*double* value indicating the target for *primary_metric*. <br>Once the target is surpassed the run terminates.|
# |**blacklist_algos**|*List* of *strings* indicating machine learning algorithms for AutoML to avoid in this run.<br><br> Allowed values for **Classification**<br><i>LogisticRegression</i><br><i>SGDClassifierWrapper</i><br><i>NBWrapper</i><br><i>BernoulliNB</i><br><i>SVCWrapper</i><br><i>LinearSVMWrapper</i><br><i>KNeighborsClassifier</i><br><i>DecisionTreeClassifier</i><br><i>RandomForestClassifier</i><br><i>ExtraTreesClassifier</i><br><i>LightGBMClassifier</i><br><br>Allowed values for **Regression**<br><i>ElasticNet<i><br><i>GradientBoostingRegressor<i><br><i>DecisionTreeRegressor<i><br><i>KNeighborsRegressor<i><br><i>LassoLars<i><br><i>SGDRegressor<i><br><i>RandomForestRegressor<i><br><i>ExtraTreesRegressor<i>|
# |**X**|(sparse) array-like, shape = [n_samples, n_features]|
# |**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.|
# |**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|
automl_config = AutoMLConfig(task = 'classification',
debug_log = 'automl_errors.log',
primary_metric = 'AUC_weighted',
max_time_sec = 3600,
iterations = 20,
n_cross_validations = 5,
preprocess = True,
exit_score = 0.9984,
blacklist_algos = ['KNeighborsClassifier','LinearSVMWrapper'],
verbosity = logging.INFO,
X = X_train,
y = y_train,
path = project_folder)
# ## Train the Models
#
# Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while.
# In this example, we specify `show_output = True` to print currently running iterations to the console.
local_run = experiment.submit(automl_config, show_output = True)
# ## Explore the Results
# #### Widget for Monitoring Runs
#
# The widget will first report a "loading" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.
#
# **Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details.
from azureml.train.widgets import RunDetails
RunDetails(local_run).show()
#
# #### Retrieve All Child Runs
# You can also use SDK methods to fetch all the child runs and see individual metrics that we log.
# +
children = list(local_run.get_children())
metricslist = {}
for run in children:
properties = run.get_properties()
metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}
metricslist[int(properties['iteration'])] = metrics
rundata = pd.DataFrame(metricslist).sort_index(1)
rundata
# -
# ### Retrieve the Best Model
#
# Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. The Model includes the pipeline and any pre-processing. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*.
best_run, fitted_model = local_run.get_output()
# #### Best Model Based on Any Other Metric
# Show the run and the model which has the smallest `accuracy` value:
# +
# lookup_metric = "accuracy"
# best_run, fitted_model = local_run.get_output(metric = lookup_metric)
# -
# #### Model from a Specific Iteration
# Show the run and the model from the third iteration:
# +
# iteration = 3
# best_run, fitted_model = local_run.get_output(iteration = iteration)
# -
# ### Testing the best Fitted Model
# +
digits = datasets.load_digits()
X_test = digits.data[:10, :]
y_test = digits.target[:10]
images = digits.images[:10]
# Randomly select digits and test.
for index in np.random.choice(len(y_test), 2, replace = False):
print(index)
predicted = fitted_model.predict(X_test[index:index + 1])[0]
label = y_test[index]
title = "Label value = %d Predicted value = %d " % (label, predicted)
fig = plt.figure(1, figsize=(3,3))
ax1 = fig.add_axes((0,0,.8,.8))
ax1.set_title(title)
plt.imshow(images[index], cmap = plt.cm.gray_r, interpolation = 'nearest')
plt.show()
| automl/05.auto-ml-missing-data-Blacklist-Early-Termination.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="I95S5GeUMsc-"
# # the imports in this cell are required when running on local device
# import os, sys
# sys.path.append(os.path.join('..', '..'))
# from utils.applyML_util import train_classification, eval_classification
# from utils.featureSelection_util import (pearson_correlation_fs,
# seleckKBest_fs, selectSequential_fs)
# + id="oxumaP1_Oxv4"
# the imports in this cell are required when running from Cloud (Colab/Kaggle)
# before running on cloud you nee to upload the .py files
# from 'Notebooks/utils' directory
from applyML_util import train_classification, eval_classification, showEvalutationGraph_classification
from featureSelection_util import (pearson_correlation_fs,
seleckKBest_fs, selectSequential_fs)
# + [markdown] id="UJ4-Bu_iQt6r"
# **SVC Documentation link:** https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
#
# + id="1tVvVKjkQsmG"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
# + id="K0b41I0RRkkE"
# global random seed
RAND_SEED = 42
# initial model with only random seed and not any hyper-parametes
initial_model = SVC(random_state=RAND_SEED)
# hyper-parameters
kernel = ['linear']
gamma = [0.1, 1, 10, 100]
c_values = [0.1, 1, 10, 100, 1000]
# dictonary of all hyperparameters
param_grid = {'C': c_values, 'gamma': gamma, 'kernel':kernel }
# variables needed for showEvalGraph_regression() function
MODEL_CLASS = SVC
class_label = 'Rainfall'
x_axis_param_name = 'C'
x_axis_param_vals = c_values
# + [markdown] id="sMvRYb6AQLj9"
# ## 1. Experimentation on the Weather Daily Dataset
# + id="bNmGnNV1QPE8"
# Load the train dataset
weather_daily_train_df = pd.read_csv('https://raw.githubusercontent.com/ferdouszislam/Weather-WaterLevel-Prediction-ML/main/Datasets/brri-datasets/final-dataset/train/brri-weather_train_classification.csv')
# Load the test set
weather_daily_test_df = pd.read_csv('https://raw.githubusercontent.com/ferdouszislam/Weather-WaterLevel-Prediction-ML/main/Datasets/brri-datasets/final-dataset/test/brri-weather_test_classification.csv')
# + [markdown] id="z0BMd6EBK6PQ"
# ### 1.0 No technique
# + id="N8lrGQEjMmhO" colab={"base_uri": "https://localhost:8080/"} outputId="aef2ba2f-3763-433a-8da8-5ba4085f25fc"
# train model
model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_train_df, cls=class_label)
print(f'Selected hyperparameters: {selected_hyperparams}')
# performance on the train set
print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}')
# + id="_D5Pz_IM2dUj" colab={"base_uri": "https://localhost:8080/", "height": 567} outputId="7df4138c-d29d-43f6-a235-9cf7f93e9b07"
# graph on train set performance
# hyper-parameters selected by GridSearchCV
selected_model_params = selected_hyperparams
selected_model_params['random_state'] = RAND_SEED
showEvalutationGraph_classification(MODEL_CLASS, weather_daily_train_df, cls=class_label,
x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals,
selected_model_params=selected_model_params)
# + id="fj1I73i2WBYF" colab={"base_uri": "https://localhost:8080/"} outputId="7b3da70a-fe6d-4fd1-9b1c-1916ec93e129"
# test model
test_accuracy, test_f1 = eval_classification(model, weather_daily_test_df, cls=class_label)
# performance on the test set
print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}')
# + [markdown] id="VcGsKgTkDS60"
# ### 1.1 Apply Pearson Feature Selection to Daily Weather Dataset
# + id="xUGFfg6FDSB3" colab={"base_uri": "https://localhost:8080/"} outputId="6f9cd4e5-ff4d-4534-8af8-829675f8162b"
# select features from the train dataset
weather_daily_fs1_train_df, cols_to_drop = pearson_correlation_fs(weather_daily_train_df, class_label)
# keep only selected features on the test dataset
weather_daily_fs1_test_df = weather_daily_test_df.drop(columns=cols_to_drop)
# + id="Z4Aj4bDCEBFE" colab={"base_uri": "https://localhost:8080/"} outputId="668dc714-57dd-48b9-e3a8-8e61f2add576"
# train model
model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_fs1_train_df, cls=class_label)
print(f'Selected hyperparameters: {selected_hyperparams}')
# performance on the train set
print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}')
# + id="Wnlx9lRX6cOT" colab={"base_uri": "https://localhost:8080/", "height": 567} outputId="0ddd4465-1ee1-46c7-f8c1-3bda7e56a6da"
# graph on train set performance
# hyper-parameters selected by GridSearchCV
selected_model_params = selected_hyperparams
selected_model_params['random_state'] = RAND_SEED
showEvalutationGraph_classification(MODEL_CLASS, weather_daily_fs1_train_df, cls=class_label,
x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals,
selected_model_params=selected_model_params)
# + id="Q56t8VALEOLV" colab={"base_uri": "https://localhost:8080/"} outputId="c1681694-025d-493b-ce0c-788a22a20444"
# test model
test_accuracy, test_f1 = eval_classification(model, weather_daily_fs1_test_df, cls=class_label)
# performance on the test set
print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}')
# + [markdown] id="r0f0shnaTaEd"
# ### 1.2 Apply SelectKBest Feature Selection to Daily Weather Dataset
# + id="aP0zT8cDTaEe" colab={"base_uri": "https://localhost:8080/"} outputId="26d61571-79e7-48d0-9b35-ecac9b3b59b8"
# select features from the train dataset
weather_daily_fs2_train_df, cols_to_drop = seleckKBest_fs(weather_daily_train_df, class_label, is_regression=True)
print('features dropped:', cols_to_drop)
# keep only selected features on the test dataset
weather_daily_fs2_test_df = weather_daily_test_df.drop(columns=cols_to_drop)
# + id="obBlfL4DTaEg" colab={"base_uri": "https://localhost:8080/"} outputId="954ddfda-b634-4576-8fd8-e4e0cb18d972"
# train model
model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_fs2_train_df, cls=class_label)
print(f'Selected hyperparameters: {selected_hyperparams}')
# performance on the train set
print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}')
# + id="FmIkmj59TaEh" colab={"base_uri": "https://localhost:8080/", "height": 567} outputId="02666972-3f6f-41fe-dba2-f516e39556d2"
# r2-scores graph on the train set
# hyper-parameters selected by GridSearchCV
selected_model_params = selected_hyperparams
selected_model_params['random_state'] = RAND_SEED
showEvalutationGraph_classification(MODEL_CLASS, weather_daily_fs2_train_df, cls=class_label,
x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals,
selected_model_params=selected_model_params)
# + id="DFeHpfLrTaEh" colab={"base_uri": "https://localhost:8080/"} outputId="7520dba3-5069-469f-8f1e-9248ac5d2129"
# test model
test_accuracy, test_f1 = eval_classification(model, weather_daily_fs2_test_df, cls=class_label)
# performance on the test set
print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}')
# + [markdown] id="uPWtfgzLLYJI"
# ### 1.3 SMOTE on Daily Dataset
# + id="9Q9UhID9LYJI" colab={"base_uri": "https://localhost:8080/"} outputId="7b5154e0-c635-4bcf-8e42-13441769180b"
# train model
model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_train_df,
cls=class_label, sampling_technique='smote')
print(f'Selected hyperparameters: {selected_hyperparams}')
# performance on the train set
print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}')
# + id="sdm9243wLYJJ"
# r2-scores graph on the train set
# hyper-parameters selected by GridSearchCV
selected_model_params = selected_hyperparams
selected_model_params['random_state'] = RAND_SEED
showEvalutationGraph_classification(MODEL_CLASS, weather_daily_train_df, cls=class_label,
x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals,
selected_model_params=selected_model_params)
# + id="y8RoOykDLYJK"
# test model
test_accuracy, test_f1 = eval_classification(model, weather_daily_test_df, cls=class_label)
# performance on the test set
print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}')
# + [markdown] id="tCwFqnH6Lq17"
# ### 1.4 Random Undersampling + SMOTE on Daily Dataset
# + id="lTdi5bbLLq17" colab={"base_uri": "https://localhost:8080/"} outputId="36fc9d41-0e5a-41ae-ba74-a25632bf5cba"
# train model
model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_train_df,
cls=class_label, sampling_technique='hybrid')
print(f'Selected hyperparameters: {selected_hyperparams}')
# performance on the train set
print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}')
# + id="76m-rcuvLq18" colab={"base_uri": "https://localhost:8080/", "height": 540} outputId="59546752-667e-40aa-a55b-553e5e2ce6e9"
# graph on train set performance
# hyper-parameters selected by GridSearchCV
selected_model_params = selected_hyperparams
selected_model_params['random_state'] = RAND_SEED
showEvalutationGraph_classification(MODEL_CLASS, weather_daily_train_df, cls=class_label,
x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals,
selected_model_params=selected_model_params)
# + id="Z5o9BQOHLq18" colab={"base_uri": "https://localhost:8080/"} outputId="97262746-69d6-46f5-e166-46f289a05adf"
# test model
test_accuracy, test_f1 = eval_classification(model, weather_daily_test_df, cls=class_label)
# performance on the test set
print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}')
# + [markdown] id="XVMCfgbKahtE"
# ### 1.5 Pearson Feature Selection + Hybrid Sampling to Daily Weather Dataset
# + id="ck48Q9VQahtN" colab={"base_uri": "https://localhost:8080/"} outputId="4d27a540-38c5-417c-89cd-b9ed4f172efd"
# train model
model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_fs1_train_df,
cls=class_label, sampling_technique='hybrid')
print(f'Selected hyperparameters: {selected_hyperparams}')
# performance on the train set
print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}')
# + id="ioTeYyewahtN" colab={"base_uri": "https://localhost:8080/", "height": 540} outputId="e170d5ca-a9c1-44d3-f301-31f81df05b4f"
# graph on train set performance
# hyper-parameters selected by GridSearchCV
selected_model_params = selected_hyperparams
selected_model_params['random_state'] = RAND_SEED
showEvalutationGraph_classification(MODEL_CLASS, weather_daily_fs1_train_df, cls=class_label,
x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals,
selected_model_params=selected_model_params)
# + id="yKfAZyE_ahtN" colab={"base_uri": "https://localhost:8080/"} outputId="cf4f9e79-f1c4-4a90-b5e9-6e1bb3400782"
# test model
test_accuracy, test_f1 = eval_classification(model, weather_daily_fs1_test_df, cls=class_label)
# performance on the test set
print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}')
# + [markdown] id="FSPsOKznhZlT"
# ### 1.6 SelecKBest Feature Selection + Hybrid Sampling to Daily Weather Dataset
# + id="i-ZIHmixhZlV" colab={"base_uri": "https://localhost:8080/"} outputId="bde69999-8dad-4d9d-e560-f32260c5454e"
# train model
model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_fs2_train_df,
cls=class_label, sampling_technique='hybrid')
print(f'Selected hyperparameters: {selected_hyperparams}')
# performance on the train set
print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}')
# + id="d0CpcJB_hZlV" colab={"base_uri": "https://localhost:8080/", "height": 540} outputId="7f4f9fdf-b515-49d4-94c8-c9153ce99f5a"
# graph on train set performance
# hyper-parameters selected by GridSearchCV
selected_model_params = selected_hyperparams
selected_model_params['random_state'] = RAND_SEED
showEvalutationGraph_classification(MODEL_CLASS, weather_daily_fs2_train_df, cls=class_label,
x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals,
selected_model_params=selected_model_params)
# + id="sBlMerD3hZlW" colab={"base_uri": "https://localhost:8080/"} outputId="07045043-eaac-4af0-b6cf-4ab9265e5632"
# test model
test_accuracy, test_f1 = eval_classification(model, weather_daily_fs2_test_df, cls=class_label)
# performance on the test set
print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}')
| Notebooks/brri-dataset/experimentations/classification/others/svm_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: analysis
# language: python
# name: analysis
# ---
# # Univariate Outliers Detection
# %matplotlib inline
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
# ## load data
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
target = iris.target
names = iris.target_names
data = pd.DataFrame(X, columns=iris.feature_names)
data['species'] = iris.target
data['species'] = data['species'].replace(to_replace= [0, 1, 2], value = ['setosa', 'versicolor', 'virginica'])
data.shape
# # Univariate OUTLIERS detection for a df
# +
## remove outliers of a 1D array according to the Inter Quartile Range (IQR)
def remove_outliers_IQR(v:np.array, verbose:bool = False)->np.array:
"""
Remove outliers of a 1D array according to the Inter Quartile Range (IQR).
v -- array of values to be analyzed.
verbose -- display extra information (default, False).
return -- array of values after removing outliers.
"""
# estimate boundary thresholds
Q1 = np.quantile(v,0.25)
Q3 = np.quantile(v,0.75)
IQR = Q3 - Q1
t_lower = Q1 - 1.5*IQR
t_upper = Q3 + 1.5*IQR
# display
if verbose:
print('Thresholds: lower = %.5f / upper = %.5f'%(t_lower, t_upper))
# remove values outside of these thresholds and return
v[v < t_lower] = np.nan
v[v > t_upper] = np.nan
# return
return v
## remove outliers of a 1D array according to standard deviation rule of Normal Distribution
def remove_outliers_Z(v:np.array, threshold:int = 3, verbose:bool = False)->np.array:
"""
Remove outliers of a 1D array according to standard deviation rule of Normal Distribution.
v -- array of values to be analyzed.
threshold -- value to be used to decide if a value is a outlier or not (default, 3 sigmas).
verbose -- display extra information (default, False).
return -- array of values after removing outliers.
"""
# estimate z score
v_mean = np.mean(v)
v_std = np.std(v)
z_scores = [(i - v_mean) / v_std for i in var]
# remove outilers
v[v < -threshold] = np.nan
v[v > threshold] = np.nan
# return
return v
## univariante outliers detection for all numerical variables in a df
def univariate_outliers_detection(data:pd.DataFrame,
is_remove:bool = True,
methodology:'function' = remove_outliers_IQR,
verbose:bool = False)->pd.DataFrame:
"""
Univariante outliers detection for all numerical variables in a df.
data -- dataframe to be analyzed.
is_remove -- if removing outliers or just detect (default, True).
methodology -- function of method to be used to remove / detect outliers (default, remove_outliers_IQR()).
verbose -- display extra information (default, False).
return -- df of values without outliers or a mask with detected outliers.
"""
# copy data
df = data.copy()
# columns of numerical variables
cols_num = df.select_dtypes(include=['float64', 'int64']).columns.values
# initialize if just detection
if not is_remove:
df_mask = pd.DataFrame(np.zeros(df.shape, dtype=bool), columns = df.columns)
# loop of numerical columns
for col in cols_num:
# get data
v = df[col].values
ni = np.sum(np.isnan(v))
# outliers detection
v_cleaned = methodology(v)
nf = np.sum(np.isnan(v_cleaned))
# count detected outliers
noutliers = nf - ni
# validate if outliers was found
if noutliers > 0:
# display
if verbose:
print(f'In "{col}" was detected {noutliers} outliers.')
# if removing
if is_remove:
df[col] = v_cleaned
# if just detection
else:
i_outliers = np.where(np.isnan(v_cleaned))[0]
df_mask.loc[i_outliers,col] = True
# clean
del v, v_cleaned
# return
if is_remove:
return df
else:
return df_mask
# -
_ = univariate_outliers_detection(data, is_remove = True, verbose = True)
| notebooks/analysis/analysis_anomalies/notebook-univariate_outliers_detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from graphene import ObjectType, String, Schema
class Query(ObjectType):
name = String()
def resolve_name(root, info):
return info.context.get('name')
schema = Schema(Query)
result = schema.execute('{ name }', context={'name': 'Syrus'})
assert result.data['name'] == 'Syrus'
result.data
# +
import graphene
class Query(graphene.ObjectType):
hello = graphene.String(argument=graphene.String(default_value="stranger"))
def resolve_hello(self, info, argument):
return 'Hello ' + argument
schema = graphene.Schema(query=Query)
# +
result = schema.execute('{ hello }')
print(result.data['hello']) # "Hello stranger"
# or passing the argument in the query
result = schema.execute('{ hello (argument: "graph") }')
print(result.data['hello']) # "Hello graph"
# + active=""
# class Query(graphene.ObjectType):
# user = graphene.Field(User)
#
# def resolve_user(self, info):
# return info.context.get('user')
#
# schema = graphene.Schema(Query)
# result = schema.execute(
# '''query getUser($id: ID) {
# user(id: $id) {
# id
# firstName
# lastName
# }
# }''',
# variables={'id': 12},
# )
# +
import graphene
class Patron(graphene.ObjectType):
id = graphene.ID()
name = graphene.String()
age = graphene.Int()
class Query(graphene.ObjectType):
patron = graphene.Field(Patron)
def resolve_patron(self, info):
return Patron(id=1, name="Syrus", age=27)
schema = graphene.Schema(query=Query)
query = """
query something{
patron {
id
name
age
}
}
"""
def test_query():
result = schema.execute(query)
assert not result.errors
assert result.data == {"patron": {"id": "1", "name": "Syrus", "age": 27}}
result = schema.execute(query)
print(result.data["patron"])
# +
import graphene
class User(graphene.ObjectType):
id = graphene.ID()
name = graphene.String()
class Query(graphene.ObjectType):
me = graphene.Field(User)
def resolve_me(self, info):
return info.context["user"]
schema = graphene.Schema(query=Query)
query = """
query something{
me {
id
name
}
}
"""
def test_query():
result = schema.execute(query, context={"user": User(id="1", name="Syrus")})
assert not result.errors
assert result.data == {"me": {"id": "1", "name": "Syrus"}}
result = schema.execute(query, context={"user": User(id="X", name="Console")})
print(result.data["me"])
# -
from var_dump import var_dump
user=User(id="1", name="Syrus")
var_dump(user)
setattr(user, "name", "Tom")
var_dump(user)
| notebook/procs-graphene.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# from embed import Embedding
# +
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
EMBED_DIM = 300
def is_valid(seg):
for w in seg:
if not ('\u4e00' <= w and w <= '\u9fff'):
return False
return True
class Embed:
# def __init__(self, file_path='sgns.sogou.word'):
def __init__(self, file_path='sgns.sogou.word'):
self.idx_seg = ['unk']
self.seg_idx = {'unk': 0}
self.idx_emb = [[0.0 for i in range(EMBED_DIM)]]
with open(file_path, 'r') as f:
for idx, line in enumerate(f.readlines(), start=1):
emb = line.split()[1:]
seg = line.split()[0]
# print(emb, seg)
if is_valid(seg) and (seg not in self.seg_idx):
self.idx_seg.append(seg)
self.seg_idx[seg] = idx
self.idx_emb.append([float(i) for i in emb])
def embed(self, seg):
if seg in self.seg_idx:
return self.seg_idx[seg]
else:
return self.seg_idx['unk']
# -
s = Embed()
# +
import torch
import torch.nn as nn
from torch.nn import functional as F
import numpy as np
import json
from torch.utils.data import Dataset, DataLoader
from scipy.stats import pearsonr
from sklearn.metrics import f1_score
import random
weightFile='weight'
train_file='../git/train_dic.json'
test_file='../git/test_dic.json'
EPOCH=2
BATCH_SIZE=64
lr=0.001
with open(train_file,'r') as f:
train_dic = json.load(f)
with open(test_file,'r') as f:
test_dic=json.load(f)
max_len=len(train_dic['label'])
class trainset(Dataset):
def __init__(self):
self.textdata=torch.LongTensor(train_dic['indexed_text'])
self.labeldata=torch.LongTensor(train_dic['emo'])
def __len__(self):
return len(self.textdata)
def __getitem__(self,index):
return self.textdata[index],self.labeldata[index]
class validset(Dataset):
def __init__(self):
self.textdata=torch.LongTensor(test_dic['indexed_text'])
self.labeldata=torch.LongTensor(test_dic['emo'])
def __len__(self):
return len(self.textdata)
def __getitem__(self,index):
return self.textdata[index],self.labeldata[index]
text = trainset()
textloader = DataLoader(dataset=text,batch_size=BATCH_SIZE,shuffle=True)
# +
VOCAB_SIZE = 364182
# CUDA
class SeqRNN(nn.Module):
'''
vocab_size:词向量维度
hidden_size:隐藏单元数量决定输出长度
output_size:输出类别为8,维数为1
'''
def __init__(self, vocab_size=300, hidden_size=10, output_size=8, pretrained_embed=Embed().idx_emb):
super(SeqRNN, self).__init__()
self.embed_dim = vocab_size
self.embed = nn.Embedding(VOCAB_SIZE, self.embed_dim)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.output_size = output_size
self.rnn = nn.RNN(self.vocab_size, self.hidden_size,
batch_first=True, dropout=0.5)
self.linear = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input):
input = self.embed(input)
# print(input)
# print('embeded size:', input.shape)
h0 = torch.zeros(1, 1, self.hidden_size)
h0 = h0.to(device)
# print('h0 size:', h0.shape)
output, hidden = self.rnn(input, h0)
output = output[:, -1, :]
output = self.linear(output)
output = torch.nn.functional.softmax(output, dim=1)
return output
# rnn_model = SeqRNN()
# cnn_model = TextCNN()
# +
from tqdm import tqdm
device = torch.device("cuda")
textloader = DataLoader(dataset=text, batch_size=1, shuffle=True)
model = SeqRNN()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
model = model.to(device)
cnt = 0
calloss = nn.CrossEntropyLoss()
for epoch in range(2):
aveloss = 0
batchnum = 0
for text, label in tqdm(textloader):
text = text.to(device)
label = label.to(device)
batchnum += 1
optimizer.zero_grad()
out = model(text)
loss = calloss(out, label)
loss.backward()
aveloss += loss.item()
optimizer.step()
aveloss /= batchnum
print('Epoch:', epoch, 'aveloss:', aveloss)
torch.save(model.state_dict(), weightFile+str(epoch)+'.pkl')
# +
import torch
import torch.nn as nn
from torch.nn import functional as F
import numpy as np
import json
from torch.utils.data import Dataset, DataLoader
from scipy.stats import pearsonr
from sklearn.metrics import f1_score
import random
weightFile='weight'
train_file='../git/train_dic.json'
test_file='../git/test_dic.json'
EPOCH=2
BATCH_SIZE=64
lr=0.001
with open(train_file,'r') as f:
train_dic = json.load(f)
with open(test_file,'r') as f:
test_dic=json.load(f)
max_len=len(train_dic['label'])
class trainset(Dataset):
def __init__(self):
self.textdata=torch.LongTensor(train_dic['indexed_text'])
self.labeldata=torch.LongTensor(train_dic['emo'])
def __len__(self):
return len(self.textdata)
def __getitem__(self,index):
return self.textdata[index],self.labeldata[index]
class validset(Dataset):
def __init__(self):
self.textdata=torch.LongTensor(test_dic['indexed_text'])
self.labeldata=torch.LongTensor(test_dic['emo'])
def __len__(self):
return len(self.textdata)
def __getitem__(self,index):
return self.textdata[index],self.labeldata[index]
text = trainset()
textloader = DataLoader(dataset=text,batch_size=BATCH_SIZE,shuffle=True)
model = TextCNN()
optimizer = torch.optim.Adam(model.parameters(),lr=lr)
cnt=0
calloss = nn.CrossEntropyLoss()
for epoch in range(EPOCH):
aveloss=0
batchnum=0
for text,label in textloader:
batchnum+=1
optimizer.zero_grad()
out=model(text)
loss=calloss(out,label)
loss.backward()
aveloss+=loss.item()
optimizer.step()
aveloss/=batchnum
print('Epoch:',epoch,'aveloss:',aveloss)
torch.save(model.state_dict(), weightFile+str(epoch)+'.pkl')
# +
test = validset()
testloader = DataLoader(dataset=test, batch_size=BATCH_SIZE, shuffle=False)
testmodel = TextCNN()
# opt=torch.optim.Adam(testmodel.parameters(),lr=LR)
correct = 0
total = 0
epoch = 8
coef = 0
ground = list()
pred = list()
testmodel.load_state_dict(torch.load(weightFile+str(1)+'.pkl'))
for text, label in testloader:
# opt.zero_grad()
testmodel.eval()
out = testmodel(text)
for ind in range(len(out)):
v0 = test_dic['label'][ind][1:]
ol = []
for i in range(len(out[ind])):
ol.append(float(out[ind][i]))
c = pearsonr(ol, v0)
coef += c[0]
prediction = torch.argmax(out, 1)
ground.append(label)
pred.append(prediction)
correct += (prediction == label).sum().float()
total += len(label)
v = np.array(test_dic['emo'])
print(correct)
print(total)
print('acc:', correct.item()/total)
print(coef)
print('Coef:', coef/total)
tot = 0
cnt = 0
for i, j in zip(ground, pred):
print('F-score:', f1_score(i.data, j.data, average='macro'))
tot += f1_score(i.data, j.data, average='macro')
cnt += 1
print(tot / cnt)
# some logs
# tensor(1217.)
# 2228
# acc: 0.546229802513465
# 717.9179559345431
# Coef: 0.3222252944050912
# F-score: 0.18830698287220027
# F-score: 0.29171621217657023
# F-score: 0.24558080808080807
# F-score: 0.17189314750290358
# F-score: 0.23976608187134504
# F-score: 0.21186521120075932
# F-score: 0.20497154836777481
# F-score: 0.23169482846902203
# F-score: 0.21553586984805803
# F-score: 0.16167247386759584
# F-score: 0.26652014652014655
# F-score: 0.19197994987468672
# F-score: 0.14716242661448145
# F-score: 0.1794213557205301
# F-score: 0.375312518169661
# F-score: 0.16726190476190478
# F-score: 0.16849529780564265
# F-score: 0.2399525027402265
# F-score: 0.14369747899159663
# F-score: 0.1473485946102579
# F-score: 0.23508691147691954
# F-score: 0.21349080172609586
# F-score: 0.15907184791724907
# F-score: 0.20887445887445888
# F-score: 0.13934713934713933
# F-score: 0.19055598779101082
# F-score: 0.1446312410239081
# F-score: 0.20155348363195658
# F-score: 0.19544740973312402
# F-score: 0.26449248073108883
# F-score: 0.21944721944721943
# F-score: 0.1875
# F-score: 0.1971957671957672
# F-score: 0.24056695992179858
# F-score: 0.13852813852813853
# 0.2035984339260584
# -
for i, j in zip(ground, pred):
print('F-score:',f1_score(i.data,j.data,average='micro'))
tot += f1_score(i.data,j.data,average='micro')
cnt += 1
print(tot / cnt)
for i, j in zip(ground, pred):
print('F-score:',f1_score(i.data,j.data,average='macro'))
tot += f1_score(i.data,j.data,average='macro')
cnt += 1
print(tot / cnt)
| SentimentAnalysis/rnn/rnn_cuda_new.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Importing Libraries
#
# NumPy is a package in Python used for Scientific Computing. NumPy package is used to perform different operations.
# Pandas is used for data manipulation and analysis. In particular, it offers data structures and operations for manipulating numerical tables and time series.
# Matplotlib is a plotting library for the Python programming language and its numerical mathematics extension NumPy.
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# ### Importing the training dataset
dataset_train = pd.read_csv('/Users/rounakbose/Git Local/Google_Stock_Price_Train.csv')
training_set = dataset_train.iloc[:, 1:2].values
# ### Data Preprocessing
#
# Sklearn is a simple and efficient tool for data mining and data analysis built on numpy, scipy and matplotlib.
#
# Here first we are going to perform feature scaling.
# We are using the MinMaxScaler - transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one.
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)
# ### Creating a custom data structure
#
# The new data structure that we are going to make will have 60 timesteps and will provide only 1 output.
X_train = []
y_train = []
for i in range(60, 1258):
X_train.append(training_set_scaled[i-60:i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
# ### Reshaping using Pandas
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
# Now we have finally concluded the part of preprocessing the data.
#
# ### Building the RNN
#
# First, import the Keras library and its necessary packages.
# The Sequential model is a linear stack of layers.
# A dense layer represents a matrix vector multiplication. So you get a m dimensional vector as output. A dense layer thus is used to change the dimensions of your vector. Mathematically speaking, it applies a rotation, scaling, translation transform to your vector.
# An LSTM layer is used for speech recognition, language modeling, sentiment analysis and text prediction.
# A dropout layer is used for regularization where you randomly set some of the dimensions of your input vector to be zero with probability. A dropout layer does not have any trainable parameters i.e. nothing gets updated during backward pass of backpropagation.
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
# ### Initialising the RNN
#
# We are using the Sequential model.
# Then define the 4 LSTM layers and adding some Dropout regularisation.
# Finally, add the output dense layer.
# +
regressor = Sequential()
regressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1)))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units = 50))
regressor.add(Dropout(0.2))
regressor.add(Dense(units = 1))
# -
# ### Compiling and training the RNN
#
# Then we are fitting our model to the training dataset for 100 epochs.
# Note: The greater the number of epochs:
# a. the better will be the performance of the RNN, but
# b. the more time it will take to compute.
# +
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
regressor.fit(X_train, y_train, epochs = 200, batch_size = 32)
# -
# ### Making the predictions
#
# First, get the real stock prices from Google for the year 2017.
# Then, make the predictions with respect to the original stock prices for 2017.
# +
dataset_test = pd.read_csv('/Users/rounakbose/Git Local/Google_Stock_Price_Test.csv')
real_stock_price = dataset_test.iloc[:, 1:2].values
dataset_total = pd.concat((dataset_train['Open'], dataset_test['Open']), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
X_test = []
for i in range(60, 80):
X_test.append(inputs[i-60:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predicted_stock_price = regressor.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
# -
# ### Plotting the Results
#
# The actual stock prices from Google for the year 2017 are in red.
# The predicted stock prices for the same period of time are in blue, in the same plot.
# Visualising the results
plt.plot(real_stock_price, color = 'red', label = 'Real Google Stock Price')
plt.plot(predicted_stock_price, color = 'blue', label = 'Predicted Google Stock Price')
plt.title('Google Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('Google Stock Price')
plt.legend()
plt.show()
# ### Conclusion
#
# From the plot above, we can conclude that for 200 epochs, we have quite a big improvement over the results obtained from training the same dataset over 100 epochs.
# This, consequently, proves that, higher the epochs, higher will be the success rate of the predictions (until ofcourse it reaches a saturation value, or starts overfitting, whichever comes first), and inevitably, higher will be the computational time and the necessary computational power for the training and predicting.
#
# Stock market people can, thus, rely to some extent on the use of Recurrent Neural Networks to predict short-term (>= 1 month and <=1 year, approximately) stock market movements and the general price trends, almost as same as the RBF-kernel models.
| Short_term-RNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Introduction
#
# A very important property that needs to be estimated when using Structure Based Models (SBMs) is the folding temperature. In order to study a physical change with molecular simulations we need to focus the sampling over the region of interest in the potential energy surface. When studying protein folding this can be achieved by setting the folding free energy to zero, which happens when the system has no preference of being in either the folded or unfolded state (i.e. it visits the two states with equal probability during the simulation). One way of achieving this is by rising the temperature until we observe the above property at the obtained emsemble. However, an indirect and easier way of estimating the folding temperature is by noting that the heat capacity has a maximum at this temperature. Thus, by estimating the heat capacity in a range of temperatures around the folding temperature we can easily estimate its value.
#
# The heat capacity is related to the variations in the potential energy (V) by:
#
# $Cv = \frac{\partial V}{\partial T}$
#
# This value can be estimated from an ensemble of simulated states with:
#
# $Cv = \frac{ \langle V^2 \rangle - \langle V \rangle^2}{k_BT^2}$
#
# Where, $ \langle X \rangle $ means an ensemble-averaged value of the property X, $k_B$ is the Boltzmann constant and T the temperature of the simulation.
#
# In this tutorial we employ the WHAM method using the PyWham program to improve and facilitate the estimation of the heat capacity values in a more thorough theoretical framework.
# ### Requirements
#
# - [OpenMM python API](http://openmm.org/)
# - [sbmOpenMM library](https://bitbucket.org/compbiochbiophlab/sbm-openmm/src)
# - [Numpy](https://numpy.org/)
# - [Matplotlib](https://matplotlib.org/)
# - [PyWham](http://pywham.net/)
# ### Estimating the folding temperature using the CA model
#
# In this tutorial we will estimate the folding temperature of a small protein system using the CA model. First, we will run several short simulations at different temperatures to estimate roughly the location of the folding temperature. After this we focus the sampling in this region to have a better estimate. We start by loading OpenMM and the sbmOpenMM library:
# +
#Import OpenMM library
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
#Import sbmOpenMM library
import sbmOpenMM
# -
# We load the input pdb and the contact file for our system:
#Set the input locations as variables.
pdb_file = 'inputs/1YPA_I.pdb'
contact_file = 'inputs/1YPA_I_CA.contacts'
# We create the SBM CA model object:
#Create an sbmOpenMM.system() object and store it in "sbmCAModelModel" variable.
sbmCAModel = sbmOpenMM.models.getCAModel(pdb_file, contact_file)
# Let's now generate a for loop to run several simulations at different temperatures. In each iteration we will redefine the integrator and simulation context to set up, each time, a simulation from the same starting coordinates. We also change the name of the energy file to be written so we can store them and read them later for analysis. We span the range between 100 and 200 RTU with a step of 5 RTU.
# +
import time
#Iterate over the temeperature range of interest.
for temperature in range(100,201,5):
#Define the name of the energy file for each temperature
energy_file = 'energy_'+str(temperature)+'_rough.data'
#Define the integrator and context for the simulation at the defined temperature
integrator = LangevinIntegrator(temperature, 1/picosecond, 0.5*femtoseconds)
simulation = Simulation(sbmCAModel.topology, sbmCAModel.system, integrator)
#Set the initial coordinates
simulation.context.setPositions(sbmCAModel.positions)
#Add a SBM reporter that writes energies every 1 picosecond = 2000 steps (at 0.5 fs timestep).
simulation.reporters.append(sbmOpenMM.sbmReporter(energy_file, 2000, sbmObject=sbmCAModel,
step=True, potentialEnergy=True, temperature=True))
#Run each simulation for 1 ns = 2 million steps.
start_time = time.time()
simulation.step(2000000)
print("--- Finished simlation at T=%s in %s seconds ---" % (temperature, (time.time() - start_time)))
# -
# In order to analyse the simulations we need to read each energy file created above and extract the potential energy from it. To facilitate this we write a function that reads the comma-separated-value energy file into a dictionary that contains all the energy values as numpy arrays.
# +
import numpy as np
def readOpenMMReporterFile(reporter_file):
#Open the reporter file
with open(reporter_file, 'r') as ef:
#Store the lines
lines = ef.readlines()
#Define a dictionary to store the data
data = {}
#read the header and create for each term an entry for the dictionary initialised to a list
for r in lines[0].split(','):
data[r.replace('#','').replace('"','').strip()] = []
#read each value in the file and store it in the dictionary's lists.
for i,r in enumerate(data):
for line in lines[1:]:
#Convert the read string into a float for easy processing of numerical data
data[r].append(float(line.strip().split(',')[i]))
#Convert each list into a numpy array
for entry in data:
data[entry] = np.array(data[entry])
#return the created dictionary
return data
# -
# First, we take a peek at the returned dictionary from the previous function to understand how to call the energy values. It contains all the values of each energy term in the energy file as numpy arrays. To see the name of the keys of the dictionary we load an arbitrary file with the function and then we print its keys:
#Read an arbitrary energy file with the function readOpenMMReporterFile
simulationData = readOpenMMReporterFile('energy_100_rough.data')
#Print the keys of the returned dictionary
print(simulationData.keys())
# We see that the potential energy can be called from the dictionary by using the string 'Potential Energy (kJ/mole)'. We can now read all the simulated files and plot the energy progress of each trajectory to have an idea of how the simulations are behaving.
import matplotlib.pyplot as plt
# +
#Iterate over the temeperature range of interest.
for temperature in range(100,201,5):
#Define the name of the energy file for each temperature
energy_file = 'energy_'+str(temperature)+'_rough.data'
#Read the energy data from each energy file
simulationData = readOpenMMReporterFile(energy_file)
#For easy reading we store the potential energy numpy array into a variable
V = simulationData['Potential Energy (kJ/mole)']
#Plot the potential energy
plt.plot(V)
plt.xlabel('Time [ps]')
plt.ylabel('Potential Energy (kJ/mole)')
# -
# We observe a discontinuity in the potential energy plots. We see roughly two populations of simulations, one at high (actually positive) values of potential energy and the other one at low (or negative) values. We expect this two populations to be correlated with the temperatures of the simulations. To confirm this we are going to repeat our plot but using a color gradient to plot each temperature. We import colors and cm (color maps) from matplotlib to create our color palette.
# +
import matplotlib.colors as mcol
import matplotlib.cm as cm
#Create a color range from blue to red for the explored temperatures.
cm1 = mcol.LinearSegmentedColormap.from_list("MyCmapName", ["b","r"])
cnorm = mcol.Normalize(vmin=100, vmax=200)
cpick = cm.ScalarMappable(norm=cnorm, cmap=cm1)
cpick.set_array([])
# -
# We repeat the plot using a gradient that is going from cold (blue) to hot (red) simulations.
# +
#Iterate over the temeperature range of interest.
for temperature in range(100,201,5):
#Define the name of the energy file for each temperature
energy_file = 'energy_'+str(temperature)+'_rough.data'
#Read the energy data from each energy file
simulationData = readOpenMMReporterFile(energy_file)
#For easy reading we store the potential energy numpy array into a variable
V = simulationData['Potential Energy (kJ/mole)']
#Plot the potential energy using the previously defined color gradient
#We also diminish the thickness of the plotted lines to better visualization
plt.plot(V, color=cpick.to_rgba(temperature), lw=0.5)
#Add labels to the plot
plt.xlabel('Time [ps]')
plt.ylabel('Potential Energy (kJ/mole)')
#Add a colorbar to the plot
plt.colorbar(cpick,label="Temperature (RTU)")
# -
# Now is more clear that the two groups correspond to high and low temperature simulations. This happens because most of the potential energy change comes from the rupture of native contacts (highly negative term in the force field), and, at the folding temperature, this happens abruptly and cooperatively, hence the discontinuity in the potential energy plot.
#
# This plot aids us into see that we are spanning a range contaiining the folding temperature for the protein system. We expect it to be below 150 RTU given that we have some blue plots in the high temperature population. To have a more quantitative estimation of the unfolding temperature we will plot the heat capacity as a function of the simulated temeperature.
# To facilitate the calculation of the heat capacity and other relevant quantities from the SBM simulations, we are going to employ the PyWham program. PyWham is a flexible implementation of the Weighted Histogram Analysis Method ([WHAM](https://onlinelibrary.wiley.com/doi/abs/10.1002/jcc.540130812)) implemented in Python 2, that can be executed thorugh the command line as a separate program.
#
# First, we need to create the files with the correct format to be read by PyWham. In this case we are only concerned with the heat capacity, which can be obtained from the potential energy only. We write the potential energy values for each simulated temperature as separated files in a folder we name 'heatCapacityData'. We use numpy to write the potential energy arrays as columns into the output files.
# +
import os
#Create the output folder if it does not exists
folderName = 'heatCapacityData'
if not os.path.exists(folderName):
os.mkdir(folderName)
#Iterate over the temeperature range of interest.
for temperature in range(100,201,5):
#Define the name of the energy file for each temperature
energy_file = 'energy_'+str(temperature)+'_rough.data'
#Read the energy data from each energy file
simulationData = readOpenMMReporterFile(energy_file)
#For easy reading we store the potential energy numpy array into a variable
V = simulationData['Potential Energy (kJ/mole)']
#We define the path name of the outputfile
fileName = folderName+'/'+str(temperature)+'_rough.data'
#Save the potential energies into a file using numpy.savetxt method
np.savetxt(fileName, V, newline="\n")
# -
# PyWham uses an xml file to specify any calculation to be carried out. An example input file, 'pywham_hc_rough_estimate.xml', is stored in the input folder. The file includes all the paths to the files generated by our procedure. For more information about how to set up and execute PyWham please read the [PyWham manual](http://pywham.net/documentation/index.html).
#
# A note of caution: PyWham uses pyhton2, so please be sure that you are using the correct python interpreter to execute the program.
# The ouput file from PyWham heat capacity calculation is called 'pywham_hc_rough_estimate.out'. The file contains just two columns; the temeperature and the heat capacity. We use matlplotlib to plot the calculated values:
# +
#Create lists to store the calculated values.
temperature = []
heat_capacity = []
#Read the PyWham heat capacity output file
with open('pywham_hc_rough_estimate.out', 'r') as hcf:
#Iterate over the lines and store the values
for line in hcf:
ls = line.strip().split() #line splitted by columns
temperature.append(float(ls[0])) #column 1
heat_capacity.append(float(ls[1])) #column 2
#Plot the heat capacity dependence on temperature
plt.plot(temperature, heat_capacity)
plt.xlabel('Temperature [RTU]')
plt.ylabel('$C_v$ [ (kj/mol)/RTU ]')
plt.axvline(140, ls='--')
# -
# The estimate above is a bit rough for our purpose, a better estimate will come by increasing the simulation time and decreasing the step size in the temperature range. With the above result we now focus on exploring nearby a temperature of 140 RTU. We explore the system every one degree and, this time, using 10 ns of simulation time.
#Iterate over the temeperature range of interest.
for temperature in range(135,145+1):
#Define the name of the energy file for each temperature
energy_file = 'energy_'+str(temperature)+'.data'
#Define the integrator and context for the simulation at the defined temperature
integrator = LangevinIntegrator(temperature, 1/picosecond, 0.5*femtoseconds)
simulation = Simulation(sbmCAModel.topology, sbmCAModel.system, integrator)
#Set the initial coordinates
simulation.context.setPositions(sbmCAModel.positions)
#Add a SBM reporter that writes energies every 1 picosecond = 2000 steps (at 0.5 fs timestep).
simulation.reporters.append(sbmOpenMM.sbmReporter(energy_file, 2000, sbmObject=sbmCAModel,
step=True, potentialEnergy=True, temperature=True))
#Run each simulation for 10 ns = 20 million steps.
start_time = time.time()
simulation.step(20000000)
print("--- Finished simlation at T=%s in %s seconds ---" % (temperature, (time.time() - start_time)))
# We write again the PyWahm input files using the newly created energy files. We run PyWham using the input script in the inputs folder: 'pywham_hc_better_estimate.xml'
# +
#Create a list to store the paths of the output files
output_files = []
#Create the output folder if it does not exists
folderName = 'heatCapacityData'
if not os.path.exists(folderName):
os.mkdir(folderName)
#Iterate over the temeperature range of interest.
for temperature in range(135,145+1):
#Define the name of the energy file for each temperature
energy_file = 'energy_'+str(temperature)+'.data'
#Read the energy data from each energy file
simulationData = readOpenMMReporterFile(energy_file)
#For easy reading we store the potential energy numpy array into a variable
V = simulationData['Potential Energy (kJ/mole)']
#We define the path name of the outputfile
fileName = folderName+'/'+str(temperature)+'.data'
#Save the potential energies into a file using numpy.savetxt method
np.savetxt(fileName, V, newline="\n")
output_files.append(fileName)
# -
# Finally we plot the newly estimated heat capacity plot from the output simulation.
# +
#Create list to store the calculated values.
temperature = []
heat_capacity = []
#Read the PyWham heat capacity output file
with open('pywham_hc_better_estimate.out', 'r') as hcf:
#Iterate over the lines and store the values
for line in hcf:
ls = line.strip().split()
temperature.append(float(ls[0]))
heat_capacity.append(float(ls[1]))
#Plot the heat capacity dependence on temperature
plt.plot(temperature, heat_capacity)
plt.xlabel('Temperature [RTU]')
plt.ylabel('$C_v$ [ (kj/mol)/RTU ]')
plt.axvline(136, ls='--')
# -
# We see a shift of 4 degrees in the estimated folding temperature, but we can expect this estimate to be better than the previous one.
#
# Finally, we are going to plot how the prediction of the folding temperature depends on the length of the simulation. For this we use the 10ns simulations and we run PyWham using incresing simulation data. Iteratively, we are going to write data files each with extra 1 ns-data points. From these we generate 10 plots of heat capacity vs temperature and plot them using a color gradient representing the incresing simulation time.
# +
#Create a list to store the paths of the output files
output_files = {}
for i in range(10):
output_files[i] = []
#Create the output folder if it does not exists
folderName = 'iterativeHeatCapacityData'
if not os.path.exists(folderName):
os.mkdir(folderName)
#Iterate over the temeperature range of interest.
for temperature in range(135,145+1):
#Define the name of the energy file for each temperature
energy_file = 'energy_'+str(temperature)+'.data'
#Read the energy data from each energy file
simulationData = readOpenMMReporterFile(energy_file)
#Second interation to trim the data every 1ns extra each iteration
for i in range(10):
#Slice the energy numpy array using 1000 frames = 1000 ps = 1 ns extras each iteration.
V = simulationData['Potential Energy (kJ/mole)'][:1000*(i+1)]
#We define the path name of the outputfile
fileName = folderName+'/energy_'+str(temperature)+'_time_'+str(i+1).zfill(2)+'.data'
#Save the potential energies into a file using numpy.savetxt method
np.savetxt(fileName, V, newline="\n")
output_files[i].append(fileName)
# -
# Now we need to execute PyWham iteratively for each set of input files generated. To assist this in the 'inputs' folder there is 10 script, 'pywham_iterative_"$iteration_number".xml', that reads the corresponding set of input files. After generating the PyWham output files we proceede to plot them:
#Create a color range from blue to red for the iteration range.
cm1 = mcol.LinearSegmentedColormap.from_list("MyCmapName", ["b","r"])
cnorm = mcol.Normalize(vmin=1, vmax=10)
cpick = cm.ScalarMappable(norm=cnorm, cmap=cm1)
cpick.set_array([])
# +
for i in range(10):
#Create list to store the calculated values.
temperature = []
heat_capacity = []
#Read the iterative PyWham heat capacity output file
with open('pywham_iterative_'+str(i+1).zfill(2)+'.out', 'r') as hcf:
#Iterate over the lines and store the values
for line in hcf:
ls = line.strip().split()
temperature.append(float(ls[0]))
heat_capacity.append(float(ls[1]))
plt.plot(temperature, heat_capacity)
#Plot the heat capacity dependence on temperature using the previously defined color gradient
plt.plot(temperature,heat_capacity, color=cpick.to_rgba(i+1))
#Add labels
plt.xlabel('Temperature [RTU]')
plt.ylabel('$C_v$ [ (kj/mol)/RTU ]')
plt.axvline(136, ls='--', color='k', lw=0.1)
#Add a colorbar to the plot
plt.colorbar(cpick,label="Simulated Time (ns)")
# -
# For the studied system we see that the prediction of the folding temperature convergences around 7 ns of simulated time. This gives us confidence that the 10 ns of simulation was enough for giving a reasonable estimate of the folding temperature at short simulation times.
# ### Summary
#
# In this tutorial we focused on estimating the folding temperature by looking at the maximum value of the heat capacity vs temperature plot. We first did a rough estimate to locate the range of values where to focus the search for the folding temperature. A more thorough simulation revealed that the first estimation was off by 3 degrees. The procedure was applied using a coarse grained forcefield, but it should be generalized to any forcefield derived data.
| tutorials/basic/03-FoldingTemperature/foldingTemperature.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:.conda-AM_BERT]
# language: python
# name: conda-env-.conda-AM_BERT-py
# ---
# +
#USING NEURAL NETWORK + BERT
import numpy as np
import pandas as pd
import torch
import transformers as ppb # pytorch transformers
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, confusion_matrix
from sklearn import svm, datasets
from sklearn.metrics import plot_confusion_matrix
import seaborn as sns
from sklearn import metrics
# -
df = pd.read_excel(r"Data_600_Labeled_Final.xlsx")
df['innovation_num']=df.YN_INNOVATION.eq('yes').mul(1)
df = df.sort_values(by='YN_INNOVATION', ascending=False)
df.drop(df.tail(154).index, inplace = True)
df
# +
## Want BERT instead of distilBERT? Uncomment the following line:
model_class, tokenizer_class, pretrained_weights = (ppb.BertModel, ppb.BertTokenizer, 'bert-base-uncased')
# Load pretrained model/tokenizer
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
model = model_class.from_pretrained(pretrained_weights)
# -
tokenized = df['full_text'].apply((lambda x: tokenizer.encode(x, add_special_tokens=True, padding=True,
truncation=True,max_length=512)))
# +
max_len = 0
for i in tokenized.values:
if len(i) > max_len:
max_len = len(i)
padded = np.array([i + [0]*(max_len-len(i)) for i in tokenized.values])
# -
np.array(padded).shape
attention_mask = np.where(padded != 0, 1, 0)
attention_mask.shape
print(input_ids)
# +
input_ids = torch.tensor(padded)
attention_mask = torch.tensor(attention_mask)
with torch.no_grad():
last_hidden_states = model(input_ids, attention_mask=attention_mask)
# -
features = last_hidden_states[0][:,0,:].numpy()
labels = df['innovation_num']
train_features, test_features, train_labels, test_labels = train_test_split(features, labels)
# +
from sklearn.neural_network import MLPClassifier
nn_clf = MLPClassifier(solver='sgd', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)
# -
nn_clf.fit(train_features, train_labels)
nn_clf.score(test_features, test_labels)
| src/classification/aditi/BERT Model Testing/06-7-2021_NN_BERT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
# %matplotlib inline
# -
cac40_symbol = "^FCHI"
b = tsds.load_yahoo_stock_price(cac40_symbol)
df = b.mPastData
df.sample(4)
df.info()
# +
lEngine = autof.cForecastEngine()
lEngine
H = 12;
# lEngine.mOptions.enable_slow_mode();
# lEngine.mOptions.mDebugPerformance = True;
lEngine.train(df , 'Date' , cac40_symbol, H);
# +
lEngine.getModelInfo();
# +
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
# +
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
# -
lEngine.standrdPlots();
| notebooks_sandbox/Yahoo_Stocks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="myuQZdFV-5yx"
# # Importing necessary libraries
# + id="sAprmm6I-iS3"
import urllib.request, sys, time
from bs4 import BeautifulSoup
import requests
import pandas as pd
# + [markdown] id="gTmZt-gU--eN"
#
# # Specifying number of pages to get
# + id="US-2c3IO_Bpm"
pagesToGet = 1
# + [markdown] id="E4KYhT3t_HNm"
# # Creating an empty array
# + id="PSuH6b3N_F6E"
upperframe = []
# + colab={"base_uri": "https://localhost:8080/"} id="P95Hki6w-oR_" outputId="a345938b-a7f6-4cd3-98ae-cccc41dc160e"
# iterating from 1 to the number of pages to get +1
for page in range(1, pagesToGet + 1):
print('processing page :', page)
# enter the URL of the Website
url = 'https://www.politifact.com/factchecks/list/?page=' + str(page)
# Print that URL
print(url)
# need to throw an exception, so the code should be in a try-except block
try:
# it's similar to an if-else loop
page = requests.get(url)
# this describes what to do if an exception is thrown
except Exception as e:
error_type, error_obj, error_info = sys.exc_info() # get the exception information
print('ERROR FOR LINK:', url) # print the link that cause the problem
print(error_type, 'Line:', error_info.tb_lineno) # print error info and line that threw the exception
continue # ignore this page. Abandon this and go back.
time.sleep(2)
# Use the Beautiful Soup Library
soup = BeautifulSoup(page.text, 'html.parser')
frame = []
# Find all the links in the <li> that is the list directory with the following class name
links = soup.find_all('li', attrs={'class': 'o-listicle__item'})
# Print the size
print(len(links))
# create a file with name NEWS.csv and save all information there
filename = "NEWS.csv"
f = open(filename, "w", encoding='utf-8')
headers = "Statement,Link,Date, Source, Label\n"
f.write(headers)
# iterate another loop in links to find other class of data
for j in links:
Statement = j.find("div", attrs={'class': 'm-statement__quote'}).text.strip()
Link = "https://www.politifact.com"
Link += j.find("div", attrs={'class': 'm-statement__quote'}).find('a')['href'].strip()
Date = j.find('div', attrs={'class': 'm-statement__body'}).find('footer').text[-14:-1].strip()
Source = j.find('div', attrs={'class': 'm-statement__meta'}).find('a').text.strip()
Label = j.find('div', attrs={'class': 'm-statement__content'}).find('img',
attrs={'class': 'c-image__original'}).get(
'alt').strip()
frame.append((Statement, Link, Date, Source, Label))
f.write(Statement.replace(",", "^") + "," + Link + "," + Date.replace(",", "^") + "," + Source.replace(",",
"^") + "," + Label.replace(
",", "^") + "\n")
upperframe.extend(frame)
# + colab={"base_uri": "https://localhost:8080/", "height": 202} id="HOhpNPCR-q19" outputId="478a7f88-e28e-4f6d-8ed5-e8469fe569d3"
f.close()
data = pd.DataFrame(upperframe, columns=['Statement', 'Link', 'Date', 'Source', 'Label'])
data.head()
| Recommender Systems/News_Web_Scraping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"name": "#%%\n"}
class Node:
def __init__(self, data):
self.data = data
self.address_of_next_node = None
def Take_Input():
Input = [int(element) for element in input().split()]
head = None
tail = None
count = 0
for current_data in Input:
if current_data == -1:
break
new_Node = Node(current_data)
if head is None:
head = new_Node
tail = head
count += 1
else:
tail.address_of_next_node = new_Node
tail = new_Node
count += 1
return head, count
def Print_LL(head):
print("Linked List: ",end="")
while head is not None:
print(str(head.data) + " -> ", end="")
head = head.address_of_next_node
print("None")
return
def Print_Length_of_LL(head):
return head[1]
def Print_ith_element_in_LL(head):
input_of_ith = int(input("Enter the ith element: "))
if input_of_ith > (Print_Length_of_LL(head) - 1):
return
else:
current = head[0]
count = 0
while current is None or count < input_of_ith:
count += 1
current = current.address_of_next_node
print(f'{input_of_ith}th element is: {current.data}')
head_of_ll = Take_Input()
Print_LL(head_of_ll[0])
x = Print_Length_of_LL(head_of_ll)
print(f'Lenght of Linked List is: {x}')
Print_ith_element_in_LL(head_of_ll)
| 11. Linked List-1/6.Find_ith_element_in_LL.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
"""
Teams Calendar
Use schedule_and_results table in GCP to extract a complete calendar for each team where they stand in their points & stats
Save results in Google Big Query
"""
credential_keys = '/Users/antoinetl/Documents/nhl_prediction/nhl_prediction/account_keys/hockey-prediction-qc-9c75aa8a78f9.json'
# +
# Import the important packages
import pandas as pd
import numpy as np
import datetime as dt
# Connexion GBQ
import pandas_gbq
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(credential_keys,)
pandas_gbq.context.credentials = credentials
# https://pandas-gbq.readthedocs.io/en/latest/intro.html
# +
# Set project_id to your Google Cloud Platform project ID.
project_id = "hockey-prediction-qc"
sql = """
SELECT *
from `hockey_prediction_qc.historical_games_detailed`
where gameType = 'R'
and status_abstractGameState = 'Final'
order by date;
"""
df_raw = pandas_gbq.read_gbq(sql, project_id=project_id)
# +
df = df_raw.sort_values(by=['season', 'date'])
#cols = ['team', 'season', 'date', 'games_played']
d = []
# get seasons in dataset - Loop over seasons
seasons = df['season'].explode().unique()
for season in seasons:
df_season = df[df['season'] == season]
# get teams who played in season - Loop over teams
teams = df_season['teams_home_team_id'].explode().unique()
for team in teams:
# only select team's game
df_team = df_season[(df_season['teams_home_team_id'] == team )| (df_season['teams_away_team_id'] == team)]
df_team['date'] = df_team['date'].astype('datetime64[ns]')
# Begining and end for each season
start_date = df_team['date'].min()
end_date = df_team['date'].max()
delta = dt.timedelta(days=1)
# Loop over days in season
while start_date <= end_date:
df_team.sort_values('date');
df_team_day = df_team[df_team['date'] <= start_date]
# stats home
df_team_day_home = df_team_day[df_team_day['teams_home_team_id'] == team]
# games played
index = df_team_day_home.index
games_played_home = len(index)
home_shots_for = df_team_day_home['home_teamStats_teamSkaterStats_shots'].sum()
home_shots_against = df_team_day_home['away_teamStats_teamSkaterStats_shots'].sum()
home_shots_blocked = df_team_day_home['home_teamStats_teamSkaterStats_blocked'].sum()
home_goals_for = df_team_day_home['home_teamStats_teamSkaterStats_goals'].sum()
home_goals_against = df_team_day_home['away_teamStats_teamSkaterStats_goals'].sum()
home_hits = df_team_day_home['home_teamStats_teamSkaterStats_hits'].sum()
home_pim = df_team_day_home['home_teamStats_teamSkaterStats_pim'].sum() # penality minutes
home_powerPlayGoals_for = df_team_day_home['home_teamStats_teamSkaterStats_powerPlayGoals'].sum() #power play goals
home_powerPlayGoals_against = df_team_day_home['away_teamStats_teamSkaterStats_powerPlayGoals'].sum()
home_powerPlayOpportunities = df_team_day_home['home_teamStats_teamSkaterStats_powerPlayOpportunities'].sum()
home_takeaways = df_team_day_home['home_teamStats_teamSkaterStats_takeaways'].sum()
home_giveaways = df_team_day_home['home_teamStats_teamSkaterStats_giveaways'].sum()
# stats away
df_team_day_away = df_team_day[df_team_day['teams_away_team_id'] == team]
# games played
index = df_team_day_away.index
games_played_away = len(index)
away_shots_for = df_team_day_away['away_teamStats_teamSkaterStats_shots'].sum()
away_shots_against = df_team_day_away['home_teamStats_teamSkaterStats_shots'].sum()
away_shots_blocked = df_team_day_away['away_teamStats_teamSkaterStats_blocked'].sum()
away_goals_for = df_team_day_away['away_teamStats_teamSkaterStats_goals'].sum()
away_goals_against = df_team_day_away['away_teamStats_teamSkaterStats_goals'].sum()
away_hits = df_team_day_away['away_teamStats_teamSkaterStats_hits'].sum()
away_pim = df_team_day_away['away_teamStats_teamSkaterStats_pim'].sum() # penality minutes
away_powerPlayGoals_for = df_team_day_away['away_teamStats_teamSkaterStats_powerPlayGoals'].sum() #power play goals
away_powerPlayGoals_against = df_team_day_away['home_teamStats_teamSkaterStats_powerPlayGoals'].sum()
away_powerPlayOpportunities = df_team_day_away['away_teamStats_teamSkaterStats_powerPlayOpportunities'].sum()
away_takeaways = df_team_day_away['away_teamStats_teamSkaterStats_takeaways'].sum()
away_giveaways = df_team_day_away['away_teamStats_teamSkaterStats_giveaways'].sum()
# total
games_played = games_played_home + games_played_away
shots_for = home_shots_for + away_shots_for
shots_against = home_shots_against + away_shots_against
shots_blocked = home_shots_blocked + away_shots_blocked
goals_for = home_goals_for + away_goals_for
goals_against = home_goals_against + away_goals_against
hits = home_hits + away_hits
pim = home_pim + away_pim
powerPlayGoals_for = home_powerPlayGoals_for + away_powerPlayGoals_for
powerPlayGoals_against = home_powerPlayGoals_against + away_powerPlayGoals_against
powerPlayOpportunities = home_powerPlayOpportunities + away_powerPlayOpportunities
takeaways = home_takeaways + away_takeaways
giveaways = home_giveaways + away_giveaways
# total / AVERAGES
shots_for_avg = (home_shots_for + away_shots_for) / games_played
shots_against_avg = (home_shots_against + away_shots_against) / games_played
shots_blocked_avg = (home_shots_blocked + away_shots_blocked) / games_played
goals_for_avg = (home_goals_for + away_goals_for) / games_played
goals_against_avg = (home_goals_against + away_goals_against) / games_played
hits_avg = (home_hits + away_hits) / games_played
pim_avg = (home_pim + away_pim) / games_played
powerPlayGoals_for_avg = (home_powerPlayGoals_for + away_powerPlayGoals_for) / games_played
powerPlayGoals_against_avg = (home_powerPlayGoals_against + away_powerPlayGoals_against) / games_played
powerPlayOpportunities_avg = (home_powerPlayOpportunities + away_powerPlayOpportunities) / games_played
takeaways_avg = (home_takeaways + away_takeaways) / games_played
giveaways_avg = (home_giveaways + away_giveaways) / games_played
# team_stats
# get last's night game id
df_stats_max_1 = pd.concat([df_team_day.tail(2)])
last_game_id = df_stats_max_1['gameID'].values[0]
#create df with only last game by team to date
df_stats_max = pd.concat([df_team_day.tail(1)])
tonights_game_id = df_stats_max['gameID'].values[0]
if df_stats_max['teams_home_team_id'].values[0] == team :
n_wins = df_stats_max['teams_home_leagueRecord_wins'].values[0]
n_losses = df_stats_max['teams_home_leagueRecord_losses'].values[0]
n_ot = df_stats_max['teams_home_leagueRecord_ot'].values[0]
else :
n_wins = df_stats_max['teams_away_leagueRecord_wins'].values[0]
n_losses = df_stats_max['teams_away_leagueRecord_losses'].values[0]
n_ot = df_stats_max['teams_away_leagueRecord_ot'].values[0]
points = (3 * n_wins) + (1 * n_ot)
ppg = points / games_played
d.append(
{
'team': team,
'season': season,
'date': start_date,
'games_played': games_played,
'tonights_game_id': tonights_game_id,
'last_game_id': last_game_id,
'wins': n_wins,
'losses': n_losses,
'ot': n_ot,
'points' : points,
'ppg' : ppg,
'shots_for_avg' : shots_for_avg,
'shots_against_avg' : shots_against_avg,
'shots_blocked_avg' : shots_blocked_avg,
'goals_for_avg' : goals_for_avg,
'goals_against_avg' : goals_against_avg,
'hits_avg' : hits_avg,
'pim_avg' : pim_avg,
'powerPlayGoals_for_avg' : powerPlayGoals_for_avg,
'powerPlayGoals_against_avg' : powerPlayGoals_against_avg,
'powerPlayOpportunities_avg' : powerPlayOpportunities_avg,
'takeaways_avg' : takeaways_avg,
'giveaways_avg' : giveaways_avg
}
)
start_date += delta
print("it's done for team " , team, " for season ", season)
calendar_stats = pd.DataFrame(d)
# +
calendar_stats.loc[calendar_stats['games_played'] == 1, 'last_game_id'] = ""
pd.set_option('display.max_columns', None)
calendar_stats.head()
# -
def create_lag_wlotp(df, p):
"""
Cette fonction prend comme intrant un df, une liste de variables pour lesquelles il faut
creer des valeurs retardees, le nombre de retard, les variables sur lesquelles il faut sort,
les variables de partition et la liste de varibles pour lesquelles on veut des valeurs retardees.
La fonction retourne un df augmente des variables retardees
"""
df['wins_last_{}'.format(p)] = 0
df['loss_last_{}'.format(p)] = 0
df['ot_last_{}'.format(p)] = 0
df['points_last_{}'.format(p)] = 0
df['ppg_last_{}'.format(p)] = 0
for index, row in df.iterrows():
gameId = row['games_played'] - p
team = row['team']
season = row['season']
wins_now = row['wins']
loss_now = row['losses']
ot_now = row['ot']
points_now = row['points']
if gameId >= 1:
df2 = df[(df['games_played'] == gameId) & (df['team'] == team) & (df['season'] == season)]
wins_then = df2['wins'].mean()
loss_then = df2['losses'].mean()
ot_then = df2['ot'].mean()
points_then = df2['points'].mean()
else:
wins_then = 0
loss_then = 0
ot_then = 0
points_then = 0
wins = wins_now - wins_then
loss = loss_now - loss_then
ot = ot_now - ot_then
points = points_now - points_then
ppg = points / p
df.loc[index, 'wins_last_{}'.format(p)] = wins
df.loc[index, 'loss_last_{}'.format(p)] = loss
df.loc[index, 'ot_last_{}'.format(p)] = ot
df.loc[index, 'points_last_{}'.format(p)] = points
df.loc[index, 'ppg_last_{}'.format(p)] = ppg
return df
calendar_stats = create_lag_wlotp(calendar_stats, 1)
calendar_stats = create_lag_wlotp(calendar_stats, 10)
# +
# TODO: Set project_id to your Google Cloud Platform project ID.
project_id = "hockey-prediction-qc"
# TODO: Set table_id to the full destination table ID (including the dataset ID).
table_id = 'hockey_prediction_qc.teams_calendar'
pandas_gbq.to_gbq(calendar_stats, table_id, project_id=project_id, if_exists='replace')
# -
| notebooks/teams_calendar_table.ipynb |
# # Random forest
#
# In this notebook, we will present the random forest models and
# show the differences with the bagging classifiers.
#
# Random forests are a popular model in machine learning. They are a
# modification of the bagging algorithm. In bagging, any classifier or
# regressor can be used. In random forests, the base classifier or regressor
# must be a decision tree. In our previous example, we used a decision tree but
# we could have used a linear model as the regressor for our bagging algorithm.
#
# In addition, random forests are different from bagging when used with
# classifiers: when searching for the best split, only a subset of the original
# features are used. By default, this subset of features is equal to the square
# root of the total number of features. In regression, the total number of
# available features will be used.
#
# We will illustrate the usage of a random forest and compare it with the
# bagging regressor on the "California housing" dataset.
# +
from sklearn.datasets import fetch_california_housing
data, target = fetch_california_housing(return_X_y=True, as_frame=True)
target *= 100 # rescale the target in k$
# -
# <div class="admonition note alert alert-info">
# <p class="first admonition-title" style="font-weight: bold;">Note</p>
# <p class="last">If you want a deeper overview regarding this dataset, you can refer to the
# Appendix - Datasets description section at the end of this MOOC.</p>
# </div>
# +
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
random_forest = RandomForestRegressor(n_estimators=100, random_state=0,
n_jobs=-1)
tree = DecisionTreeRegressor(random_state=0)
bagging = BaggingRegressor(base_estimator=tree, n_estimators=100,
n_jobs=-1)
scores_random_forest = cross_val_score(random_forest, data, target)
scores_bagging = cross_val_score(bagging, data, target)
print(f"Statistical performance of random forest: "
f"{scores_random_forest.mean():.3f} +/- "
f"{scores_random_forest.std():.3f}")
print(f"Statistical performance of bagging: "
f"{scores_bagging.mean():.3f} +/- {scores_bagging.std():.3f}")
# -
# Notice that we don't need to provide a `base_estimator` parameter to
# `RandomForestRegressor`: it is always a tree classifier. Also note that the
# scores are almost identical. This is because our problem is a regression
# problem and therefore, the number of features used in random forest and
# bagging is the same.
#
# For classification problems, we would need to pass a tree model instance
# with the parameter `max_features="sqrt"` to `BaggingRegressor` if we wanted
# it to have the same behaviour as the random forest classifier.
#
# ## Classifiers details
#
# Until now, we have focused on regression problems. There are some
# differences between regression and classification.
#
# First, the `base_estimator` should be chosen depending on the problem that
# needs to be solved: use a classifier for a classification problem and a
# regressor for a regression problem.
#
# Secondly, the aggregation method is different:
#
# - in regression, the average prediction is computed. For instance, if
# three learners predict 0.4, 0.3 and 0.31, the aggregation will output 0.33;
# - while in classification, the class which highest probability (after
# averaging the predicted probabilities) is predicted. For instance, if three
# learners predict (for two classes) the probability (0.4, 0.6), (0.3, 0.7)
# and (0.31, 0.69), the aggregation probability is (0.33, 0.67) and the
# second class would be predicted.
#
# ## Summary
#
# We saw in this section two algorithms that use bootstrap samples to create
# an ensemble of classifiers or regressors. These algorithms train several
# learners on different bootstrap samples. The predictions are then
# aggregated. This operation can be done in a very efficient manner since the
# training of each learner can be done in parallel.
| notebooks/ensemble_random_forest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from numpy import *
from numpy.random import *
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from mpl_toolkits.basemap import Basemap
from PlotFuncs import *
from LabFuncs import *
from Params import *
from HaloFuncs import *
from scipy.stats import norm
import pandas
import cmocean
from scipy.stats import gaussian_kde
from copy import copy
# Load data
df = pandas.read_csv('../data/Gaia-SDSS.csv')
df_S = pandas.read_csv('../data/GAIA_SDSS_Shards.csv')
names = df_S.group_id.unique()
# Fit
means_init = zeros(shape=(2,4))
means_init[0,:] = array([0.0,0.0,0.0,-1.4]) # Sausage
means_init[1,:] = array([0.0,0.0,0.0,-1.95]) # Halo
covariance_type = 'full'
data = array([df.GalRVel,df.GalphiVel,df.GalzVel,df.feh]).T
clf = mixture.GaussianMixture(n_components=2, covariance_type=covariance_type,means_init=means_init)
clf.fit(data)
meens = clf.means_
covs = clf.covariances_
ws = clf.weights_
if covariance_type=='diag':
covs_diag = clf.covariances_
covs = zeros(shape=(2,4,4))
covs = zeros(shape=(2,4,4))
covs[0,:,:] = diag(covs_diag[0,:])
covs[1,:,:] = diag(covs_diag[1,:])
betas = 1 - (covs[:,1,1]+covs[:,2,2])/(2*covs[:,0,0])
labs = ['Halo','Halo']
lstys = ['--','--']
lstys[argmax(betas)] = ':'
labs[argmax(betas)] = 'Sausage'
# No cuts:
# dfc = df.loc[:]
# pltname = 'fv-feh2D'
# txt = array([r'{\bf Cut}: None'])
# nbins_1D = 50
# nbins_2D = 40
# fehnorm = 1.0
#Radial action/energy cut
# cut = (df.E/1.0e5)>((df.JR/5000)**(1.0/3.0)-1.8)
# dfc = df.loc[cut]
# pltname = 'fv-feh2D-ractioncut'
# txt = array([r'{\bf Cut}: $E>\left(\frac{J_R}{5000}\right)^{1/3} - 1.9\times10^5$ km$^2$ s$^{-2}$'])
# nbins_1D = 50
# nbins_2D = 40
# fehnorm = 1.0
# dfc = df.loc[(df['E'] > -1.37e5)]
# pltname = 'fv-feh2D-wmetallicitycut'
# txt = array([r'{\bf Cuts}: $E>-1.37\times 10^5$ [km$^2$/s$^2$]', '[Fe/H]$<-2.0$'])
# nbins_1D = 30
# nbins_2D = 30
# dfc1 = df.loc[df['feh'] < -1.9]
# fehnorm = (shape(dfc)[0])/(1.0*shape(dfc1)[0])
# Sausage cut
Ps = clf.predict_proba(data)
saus_mask = Ps[:,1]<Ps[:,0]
cut = (~(Ps[:,0]>0.75))&(df['E'] > -1.41e5)
dfc = df.loc[cut]
pltname = 'fv-feh2D-sausagecut'
txt = array([r'{\bf Cuts}: Sausage stars removed',r'$E>-1.41\times 10^5$ [km$^2$/s$^2$]'])
nbins_1D = 50
nbins_2D = 40
fehnorm = 1.0
#######
# Data
vx = dfc.GalRVel
vy = dfc.GalphiVel
vz = dfc.GalzVel
feh = dfc.feh
vxS = df_S.GalRVel
vyS = df_S.GalTVel
vzS = df_S.GalzVel
fehS = df_S.feh
# INPUT PLOT
vmin=-595.0
vmax=595.0
nfine=500
#levels=[-6.2,-2.3,0]
levels = [-2.3,0]
tit_fontsize=30
col_an = 'k'
point_size = 8
lblsize = 31
xlblsize = 35
def_alph = 0.2
col_shards = 'green'
cmap = cmocean.cm.matter
col_ex = (cmocean.cm.matter(linspace(0,1,10)))
col_edge = col_ex[5,:]
col_face = col_ex[0,:]
cmap = copy(plt.get_cmap(cmap))
cmap.set_under('white', 1.0)
# Range
fehmin = -3.0
fehmax = -0.6
vfine = linspace(vmin,vmax,nfine)
fehfine = linspace(fehmin,fehmax,nfine)
V1,V2 = meshgrid(vfine,vfine)
VF1,FEH = meshgrid(vfine,fehfine)
def fv_1D_an(covs,meens,ws,vfine,clf,i):
fv = zeros(shape=shape(vfine))
if ndim(covs)>2:
nm = shape(covs)[0]
else:
nm = 1
for k in range(0,shape(covs)[0]):
if nm>1:
U = squeeze(linalg.inv(covs[k,:,:]))
v0 = meens[k,i]
w0 = ws[k]
else:
U = squeeze(linalg.inv(covs[:,:]))
v0 = meens[i]
w0 = ws
U0 = U[i,i]
V = U[i,:]
V = delete(V, i, axis=0)
W = delete(U, i, axis=0)
W = delete(W, i, axis=1)
U = U0 - linalg.multi_dot([V, linalg.inv(W), V.T])
fv += w0*exp(-0.5*(vfine-v0)*U*(vfine-v0))
fv /= trapz(fv,vfine)
return fv
def fv_2D_an(covs,meens,ws,V1,V2,clf,i,j):
fv = zeros(shape=shape(V1))
if ndim(covs)>2:
nm = shape(covs)[0]
else:
nm = 1
for k in range(0,nm):
if nm>1:
U = squeeze(linalg.inv(covs[k,:,:]))
v10 = meens[k,i]
v20 = meens[k,j]
w0 = ws[k]
else:
U = squeeze(linalg.inv(covs[:,:]))
v10 = meens[i]
v20 = meens[j]
w0 = ws
U0 = array([[U[i,i],U[i,j]],[U[j,i],U[j,j]]])
V = vstack((U[i,:],U[j,:]))
V = delete(V, (i,j), axis=1)
W = delete(U, (i,j), axis=0)
W = delete(W, (i,j), axis=1)
Uoff = linalg.multi_dot([V, linalg.inv(W), V.T])
Ut = U0-Uoff
V1o = V1-v10
V2o = V2-v20
Norm = 1.0
fv += w0*Norm*exp(-0.5*(V1o**2.0*Ut[0,0]+V2o**2.0*Ut[1,1]+2*V1o*V2o*Ut[1,0]))
fv = log(fv)
fv = fv-amax(fv)
return fv
# Set plot rc params
plt.rcParams['axes.linewidth'] = 2.5
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig, axarr = plt.subplots(4, 4,figsize=(15,15))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.0,hspace=0.0)
ax_x = plt.subplot(gs[0,0])
ax_y = plt.subplot(gs[1,1])
ax_z = plt.subplot(gs[2,2])
ax_feh = plt.subplot(gs[3,3])
ax_yx = plt.subplot(gs[1,0])
ax_zx = plt.subplot(gs[2,0])
ax_zy = plt.subplot(gs[2,1])
ax_xfeh = plt.subplot(gs[3,0])
ax_yfeh = plt.subplot(gs[3,1])
ax_zfeh = plt.subplot(gs[3,2])
fig.delaxes(plt.subplot(gs[0,1]))
fig.delaxes(plt.subplot(gs[0,2]))
fig.delaxes(plt.subplot(gs[0,3]))
fig.delaxes(plt.subplot(gs[1,2]))
fig.delaxes(plt.subplot(gs[1,3]))
fig.delaxes(plt.subplot(gs[2,3]))
# 1D plots
ax_x.plot(vfine,ws[0]*fv_1D_an(covs[0,:,:],meens[0,:],ws[0],vfine,clf,0),linestyle=lstys[0],linewidth=3,color=col_an,zorder=5)
ax_x.plot(vfine,ws[1]*fv_1D_an(covs[1,:,:],meens[1,:],ws[1],vfine,clf,0),linestyle=lstys[1],linewidth=3,color=col_an,zorder=5)
ax_x.plot(vfine,fv_1D_an(covs,meens,ws,vfine,clf,0),'-',linewidth=3,color=col_an,zorder=5)
ax_x.hist(vx,range=[vmin,vmax],bins=nbins_1D,color=col_face,linewidth=3,normed=1,label=None)
ax_x.hist(vxS,range=[vmin,vmax],bins=30,color=col_shards,linewidth=3,histtype='step',normed=1,label=None,zorder=5)
ax_x.hist(vx,range=[vmin,vmax],bins=nbins_1D,color=col_edge,linewidth=3,histtype='step',normed=1,label=None)
ax_x.fill_between([-900,-900],[0.0001,0.0001],lw=3,edgecolor=col_edge,facecolor=col_face,label=None)
ax_x.set_ylabel(r'$v_r$ [km s$^{-1}$]',fontsize=xlblsize)
ax_y.hist(vy,range=[vmin,vmax],bins=nbins_1D,color=col_face,linewidth=3,normed=1)
ax_y.hist(vy,range=[vmin,vmax],bins=nbins_1D,color=col_edge,linewidth=3,histtype='step',normed=1)
ax_y.hist(vyS,range=[vmin,vmax],bins=30,color=col_shards,linewidth=3,histtype='step',normed=1,label='Shards',zorder=5)
ax_y.plot(vfine,ws[0]*fv_1D_an(covs[0,:,:],meens[0,:],ws[0],vfine,clf,1),linestyle=lstys[0],linewidth=3,color=col_an)
ax_y.plot(vfine,ws[1]*fv_1D_an(covs[1,:,:],meens[1,:],ws[1],vfine,clf,1),linestyle=lstys[1],linewidth=3,color=col_an)
ax_y.plot(vfine,fv_1D_an(covs,meens,ws,vfine,clf,1),'-',linewidth=3,color=col_an,zorder=5)
ax_z.hist(vz,range=[vmin,vmax],bins=nbins_1D,color=col_face,linewidth=3,normed=1)
ax_z.hist(vz,range=[vmin,vmax],bins=nbins_1D,color=col_edge,linewidth=3,histtype='step',normed=1)
ax_z.hist(vzS,range=[vmin,vmax],bins=30,color=col_shards,linewidth=3,histtype='step',normed=1,label='Shards',zorder=5)
ax_z.plot(vfine,ws[0]*fv_1D_an(covs[0,:,:],meens[0,:],ws[0],vfine,clf,2),linestyle=lstys[0],linewidth=3,color=col_an)
ax_z.plot(vfine,ws[1]*fv_1D_an(covs[1,:,:],meens[1,:],ws[1],vfine,clf,2),linestyle=lstys[1],linewidth=3,color=col_an)
ax_z.plot(vfine,fv_1D_an(covs,meens,ws,vfine,clf,2),'-',linewidth=3,color=col_an,zorder=5)
feh_hist,fehv = histogram(feh,range=[fehmin,fehmax],bins=nbins_1D,normed=1)
feh0 = (fehv[1:]+fehv[0:-1])/2.0
ax_feh.fill_between(feh0,feh_hist*fehnorm,color=col_face,label=None,step='pre')
ax_feh.step(feh0,feh_hist*fehnorm,color=col_edge,linewidth=3,label=None)
ax_feh.hist(fehS,range=[fehmin,fehmax],bins=30,color=col_shards,linewidth=3,histtype='step',normed=1,label='Shards',zorder=5)
#ax_feh.plot(1000*fehfine,fv_1D_an(fehfine-feh_iso,sig_iso,3),'-',linewidth=3,color=col_an,label=r'SHM$^{++}$',zorder=0)
ax_feh.plot(fehfine,ws[0]*fv_1D_an(covs[0,:,:],meens[0,:],ws[0],fehfine,clf,3),linestyle=lstys[0],linewidth=3,color=col_an,label=labs[0],zorder=5)
ax_feh.plot(fehfine,ws[1]*fv_1D_an(covs[1,:,:],meens[1,:],ws[1],fehfine,clf,3),linestyle=lstys[1],linewidth=3,color=col_an,label=labs[1],zorder=5)
ax_feh.plot(fehfine,fv_1D_an(covs,meens,ws,fehfine,clf,3),'-',linewidth=3,color=col_an,zorder=5,label='Total')
ax_feh.fill_between(1000*feh[0:1],1000*feh[0:1],y2=-100.0,facecolor=col_face,edgecolor=col_edge,lw=3,label='Stars')
ax_feh.legend(fontsize=lblsize-2,frameon=False,bbox_to_anchor=(0.0, 2.0), loc=2, borderaxespad=0.)
# 2D plots
ax_yx.hexbin(vx,vy,extent=(vmin,vmax,vmin,vmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_yx.hexbin(vx,vy,extent=(vmin,vmax,vmin,vmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_yx.hexbin(vx,vy,extent=(vmin,vmax,vmin,vmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_yx.contour(vfine,vfine,fv_2D_an(covs[0,:,:],meens[0,:],ws[0],V1,V2,clf,0,1),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[0])
ax_yx.contour(vfine,vfine,fv_2D_an(covs[1,:,:],meens[1,:],ws[1],V1,V2,clf,0,1),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[1])
ax_yx.set_ylabel(r'$v_\phi$ [km s$^{-1}$]',fontsize=xlblsize)
ax_zx.hexbin(vx,vz,extent=(vmin,vmax,vmin,vmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_zx.hexbin(vx,vz,extent=(vmin,vmax,vmin,vmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_zx.hexbin(vx,vz,extent=(vmin,vmax,vmin,vmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_zx.contour(vfine,vfine,fv_2D_an(covs[0,:,:],meens[0,:],ws[0],V1,V2,clf,0,2),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[0])
ax_zx.contour(vfine,vfine,fv_2D_an(covs[1,:,:],meens[1,:],ws[1],V1,V2,clf,0,2),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[1])
ax_zx.set_ylabel(r'$v_z$ [km s$^{-1}$]',fontsize=xlblsize)
ax_zy.hexbin(vy,vz,extent=(vmin,vmax,vmin,vmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_zy.hexbin(vy,vz,extent=(vmin,vmax,vmin,vmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_zy.hexbin(vy,vz,extent=(vmin,vmax,vmin,vmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_zy.contour(vfine,vfine,fv_2D_an(covs[0,:,:],meens[0,:],ws[0],V1,V2,clf,1,2),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[0])
ax_zy.contour(vfine,vfine,fv_2D_an(covs[1,:,:],meens[1,:],ws[1],V1,V2,clf,1,2),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[1])
ax_xfeh.hexbin(vx,feh,extent=(vmin,vmax,fehmin,fehmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_xfeh.hexbin(vx,feh,extent=(vmin,vmax,fehmin,fehmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_xfeh.hexbin(vx,feh,extent=(vmin,vmax,fehmin,fehmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_xfeh.contour(vfine,fehfine,fv_2D_an(covs[0,:,:],meens[0,:],ws[0],VF1,FEH,clf,0,3),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[0])
ax_xfeh.contour(vfine,fehfine,fv_2D_an(covs[1,:,:],meens[1,:],ws[1],VF1,FEH,clf,0,3),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[1])
ax_yfeh.hexbin(vy,feh,extent=(vmin,vmax,fehmin,fehmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_yfeh.hexbin(vy,feh,extent=(vmin,vmax,fehmin,fehmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_yfeh.hexbin(vy,feh,extent=(vmin,vmax,fehmin,fehmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_yfeh.contour(vfine,fehfine,fv_2D_an(covs[0,:,:],meens[0,:],ws[0],VF1,FEH,clf,1,3),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[0])
ax_yfeh.contour(vfine,fehfine,fv_2D_an(covs[1,:,:],meens[1,:],ws[1],VF1,FEH,clf,1,3),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[1])
ax_yfeh.fill_between(vfine,-0.0016*vfine-0.9,y2=0.0,color='dodgerblue',alpha=0.3)
ax_yfeh.text(200,-1.0,'Disk',color='dodgerblue',fontsize=25,rotation=-45)
ax_zfeh.hexbin(vz,feh,extent=(vmin,vmax,fehmin,fehmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_zfeh.hexbin(vz,feh,extent=(vmin,vmax,fehmin,fehmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_zfeh.hexbin(vz,feh,extent=(vmin,vmax,fehmin,fehmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_zfeh.contour(vfine,fehfine,fv_2D_an(covs[0,:,:],meens[0,:],ws[0],VF1,FEH,clf,2,3),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[0])
ax_zfeh.contour(vfine,fehfine,fv_2D_an(covs[1,:,:],meens[1,:],ws[1],VF1,FEH,clf,2,3),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[1])
vtx = array([-400,-200,0,200,400])
ftx = arange(-3.0,-0.6,0.4)
ax_xfeh.set_xticks(vtx)
ax_yfeh.set_xticks(vtx)
ax_zfeh.set_xticks(vtx)
ax_xfeh.set_yticks(ftx)
ax_yfeh.set_yticks(ftx)
ax_zfeh.set_yticks(ftx)
ax_feh.set_xticks(ftx)
ax_yx.set_xticks(vtx)
ax_zx.set_xticks(vtx)
ax_zy.set_xticks(vtx)
ax_yx.set_yticks(vtx)
ax_zx.set_yticks(vtx)
ax_zy.set_yticks(vtx)
# Tick params
ax_x.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
ax_y.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
ax_z.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
ax_zx.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
ax_yx.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
ax_zy.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
ax_xfeh.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
ax_yfeh.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
ax_zfeh.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
ax_feh.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
for i in range(0,size(txt)):
plt.gcf().text(0.9,0.88-i*0.05,txt[i],fontsize=35,horizontalalignment='right',verticalalignment='top')
ax_yx.set_xlim([vmin,vmax])
ax_yx.set_ylim([vmin,vmax])
ax_zx.set_xlim([vmin,vmax])
ax_zx.set_ylim([vmin,vmax])
ax_zy.set_xlim([vmin,vmax])
ax_zy.set_ylim([vmin,vmax])
ax_x.set_xlim([vmin,vmax])
ax_y.set_xlim([vmin,vmax])
ax_z.set_xlim([vmin,vmax])
ax_x.set_yticks([])
ax_y.set_yticks([])
ax_z.set_yticks([])
ax_feh.set_yticks([])
ax_x.set_yticklabels([])
ax_x.set_xticklabels([])
ax_y.set_yticklabels([])
ax_y.set_xticklabels([])
ax_z.set_yticklabels([])
ax_yx.set_xticklabels([])
ax_zy.set_yticklabels([])
ax_xfeh.set_ylim([fehmin,fehmax])
ax_yfeh.set_ylim([fehmin,fehmax])
ax_zfeh.set_ylim([fehmin,fehmax])
ax_yfeh.set_yticklabels([])
ax_zfeh.set_yticklabels([])
ax_feh.set_xlim([fehmin,fehmax])
ax_feh.set_ylim(bottom=0.0,top=1.7)
ax_xfeh.set_xlim([vmin,vmax])
ax_yfeh.set_xlim([vmin,vmax])
ax_zfeh.set_xlim([vmin,vmax])
ax_xfeh.set_ylabel(r'[Fe/H]',fontsize=xlblsize)
ax_xfeh.set_xlabel(r'$v_r$ [km s$^{-1}$]',fontsize=xlblsize)
ax_yfeh.set_xlabel(r'$v_\phi$ [km s$^{-1}$]',fontsize=xlblsize)
ax_zfeh.set_xlabel(r'$v_z$ [km s$^{-1}$]',fontsize=xlblsize)
ax_feh.set_xlabel(r'[Fe/H]',fontsize=xlblsize)
plt.sca(ax_feh)
plt.xticks(rotation=50)
plt.sca(ax_xfeh)
plt.xticks(rotation=50)
plt.sca(ax_yfeh)
plt.xticks(rotation=50)
plt.sca(ax_zfeh)
plt.xticks(rotation=50)
plt.show()
fig.savefig('../plots/'+pltname+'.pdf',bbox_inches='tight')
fig.savefig('../plots/plots_png/'+pltname+'.png',bbox_inches='tight')
# +
# 3 component fit
means_init = zeros(shape=(3,4))
means_init[0,:] = array([0.0,0.0,0.0,-1.4]) # Sausage
means_init[1,:] = array([0.0,0.0,0.0,-2.0]) # Halo
means_init[2,:] = array([0.0,126.0,0.0,-1.8]) # Halo
weights_init = array([0.5,0.34,0.16])
covariance_type = 'full'
data = array([df.GalRVel,df.GalphiVel,df.GalzVel,df.feh]).T
clf = mixture.GaussianMixture(n_components=3, covariance_type=covariance_type,
means_init=means_init,
weights_init=weights_init,
tol=1e-12,
reg_covar=1.0e-8,
max_iter=10000)
clf.fit(data)
meens = clf.means_
covs = clf.covariances_
ws = clf.weights_
if covariance_type=='diag':
covs_diag = clf.covariances_
covs = zeros(shape=(2,4,4))
covs = zeros(shape=(2,4,4))
covs[0,:,:] = diag(covs_diag[0,:])
covs[1,:,:] = diag(covs_diag[1,:])
betas = 1 - (covs[:,1,1]+covs[:,2,2])/(2*covs[:,0,0])
labs = ['Sausage','Halo','Prograde']
lstys = [':','--','-.']
#No cuts:
dfc = df.loc[:]
pltname = 'fv-feh2D_3components'
txt = array([r'{\bf Cut}: None'])
nbins_1D = 50
nbins_2D = 40
fehnorm = 1.0
# Sausage/Prograde cut
# Ps = clf.predict_proba(data)
# imod = argmax(Ps,axis=1)
# dfc = df.loc[(imod==1)]#&(df['E'] > -1.41e5)]
# pltname = 'fv-feh2D_3components_SausageProgradeCut'
# txt = array([r'{\bf Cuts}: Sausage+Prograde stars removed',r'$E>-1.41\times 10^5$ [km$^2$/s$^2$]'])
# nbins_1D = 50
# nbins_2D = 40
# fehnorm = 1.0
# Data
vx = dfc.GalRVel
vy = dfc.GalphiVel
vz = dfc.GalzVel
feh = dfc.feh
fig, axarr = plt.subplots(4, 4,figsize=(15,15))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.0,hspace=0.0)
ax_x = plt.subplot(gs[0,0])
ax_y = plt.subplot(gs[1,1])
ax_z = plt.subplot(gs[2,2])
ax_feh = plt.subplot(gs[3,3])
ax_yx = plt.subplot(gs[1,0])
ax_zx = plt.subplot(gs[2,0])
ax_zy = plt.subplot(gs[2,1])
ax_xfeh = plt.subplot(gs[3,0])
ax_yfeh = plt.subplot(gs[3,1])
ax_zfeh = plt.subplot(gs[3,2])
fig.delaxes(plt.subplot(gs[0,1]))
fig.delaxes(plt.subplot(gs[0,2]))
fig.delaxes(plt.subplot(gs[0,3]))
fig.delaxes(plt.subplot(gs[1,2]))
fig.delaxes(plt.subplot(gs[1,3]))
fig.delaxes(plt.subplot(gs[2,3]))
# 1D plots
ax_x.plot(vfine,ws[0]*fv_1D_an(covs[0,:,:],meens[0,:],ws[0],vfine,clf,0),linestyle=lstys[0],linewidth=3,color=col_an,zorder=5)
ax_x.plot(vfine,ws[1]*fv_1D_an(covs[1,:,:],meens[1,:],ws[1],vfine,clf,0),linestyle=lstys[1],linewidth=3,color=col_an,zorder=5)
ax_x.plot(vfine,ws[2]*fv_1D_an(covs[2,:,:],meens[2,:],ws[2],vfine,clf,0),linestyle=lstys[2],linewidth=3,color=col_an,zorder=5)
ax_x.plot(vfine,fv_1D_an(covs,meens,ws,vfine,clf,0),'-',linewidth=3,color=col_an,zorder=5)
ax_x.hist(vx,range=[vmin,vmax],bins=nbins_1D,color=col_face,linewidth=3,normed=1,label=None)
ax_x.hist(vxS,range=[vmin,vmax],bins=30,color=col_shards,linewidth=3,histtype='step',normed=1,label=None,zorder=5)
ax_x.hist(vx,range=[vmin,vmax],bins=nbins_1D,color=col_edge,linewidth=3,histtype='step',normed=1,label=None)
ax_x.fill_between([-900,-900],[0.0001,0.0001],lw=3,edgecolor=col_edge,facecolor=col_face,label=None)
ax_x.set_ylabel(r'$v_r$ [km s$^{-1}$]',fontsize=xlblsize)
ax_y.hist(vy,range=[vmin,vmax],bins=nbins_1D,color=col_face,linewidth=3,normed=1)
ax_y.hist(vy,range=[vmin,vmax],bins=nbins_1D,color=col_edge,linewidth=3,histtype='step',normed=1)
ax_y.hist(vyS,range=[vmin,vmax],bins=30,color=col_shards,linewidth=3,histtype='step',normed=1,label='Shards',zorder=5)
ax_y.plot(vfine,ws[0]*fv_1D_an(covs[0,:,:],meens[0,:],ws[0],vfine,clf,1),linestyle=lstys[0],linewidth=3,color=col_an)
ax_y.plot(vfine,ws[1]*fv_1D_an(covs[1,:,:],meens[1,:],ws[1],vfine,clf,1),linestyle=lstys[1],linewidth=3,color=col_an)
ax_y.plot(vfine,ws[2]*fv_1D_an(covs[2,:,:],meens[2,:],ws[2],vfine,clf,1),linestyle=lstys[2],linewidth=3,color=col_an)
ax_y.plot(vfine,fv_1D_an(covs,meens,ws,vfine,clf,1),'-',linewidth=3,color=col_an,zorder=5)
ax_z.hist(vz,range=[vmin,vmax],bins=nbins_1D,color=col_face,linewidth=3,normed=1)
ax_z.hist(vz,range=[vmin,vmax],bins=nbins_1D,color=col_edge,linewidth=3,histtype='step',normed=1)
ax_z.hist(vzS,range=[vmin,vmax],bins=30,color=col_shards,linewidth=3,histtype='step',normed=1,label='Shards',zorder=5)
ax_z.plot(vfine,ws[0]*fv_1D_an(covs[0,:,:],meens[0,:],ws[0],vfine,clf,2),linestyle=lstys[0],linewidth=3,color=col_an)
ax_z.plot(vfine,ws[1]*fv_1D_an(covs[1,:,:],meens[1,:],ws[1],vfine,clf,2),linestyle=lstys[1],linewidth=3,color=col_an)
ax_z.plot(vfine,ws[2]*fv_1D_an(covs[2,:,:],meens[2,:],ws[2],vfine,clf,2),linestyle=lstys[2],linewidth=3,color=col_an)
ax_z.plot(vfine,fv_1D_an(covs,meens,ws,vfine,clf,2),'-',linewidth=3,color=col_an,zorder=5)
feh_hist,fehv = histogram(feh,range=[fehmin,fehmax],bins=nbins_1D,normed=1)
feh0 = (fehv[1:]+fehv[0:-1])/2.0
ax_feh.fill_between(feh0,feh_hist*fehnorm,color=col_face,label=None,step='pre')
ax_feh.step(feh0,feh_hist*fehnorm,color=col_edge,linewidth=3,label=None)
ax_feh.hist(fehS,range=[fehmin,fehmax],bins=30,color=col_shards,linewidth=3,histtype='step',normed=1,label='Shards',zorder=5)
#ax_feh.plot(1000*fehfine,fv_1D_an(fehfine-feh_iso,sig_iso,3),'-',linewidth=3,color=col_an,label=r'SHM$^{++}$',zorder=0)
ax_feh.plot(fehfine,ws[0]*fv_1D_an(covs[0,:,:],meens[0,:],ws[0],fehfine,clf,3),linestyle=lstys[0],linewidth=3,color=col_an,label=labs[0],zorder=5)
ax_feh.plot(fehfine,ws[1]*fv_1D_an(covs[1,:,:],meens[1,:],ws[1],fehfine,clf,3),linestyle=lstys[1],linewidth=3,color=col_an,label=labs[1],zorder=5)
ax_feh.plot(fehfine,ws[2]*fv_1D_an(covs[2,:,:],meens[2,:],ws[2],fehfine,clf,3),linestyle=lstys[2],linewidth=3,color=col_an,label=labs[2],zorder=5)
ax_feh.plot(fehfine,fv_1D_an(covs,meens,ws,fehfine,clf,3),'-',linewidth=3,color=col_an,zorder=5,label='Total')
ax_feh.fill_between(1000*feh[0:1],1000*feh[0:1],y2=-100.0,facecolor=col_face,edgecolor=col_edge,lw=3,label='Stars')
ax_feh.legend(fontsize=lblsize-2,frameon=False,bbox_to_anchor=(0.0, 2.2), loc=2, borderaxespad=0.)
# 2D plots
ax_yx.hexbin(vx,vy,extent=(vmin,vmax,vmin,vmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_yx.hexbin(vx,vy,extent=(vmin,vmax,vmin,vmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_yx.hexbin(vx,vy,extent=(vmin,vmax,vmin,vmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_yx.contour(vfine,vfine,fv_2D_an(covs[0,:,:],meens[0,:],ws[0],V1,V2,clf,0,1),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[0])
ax_yx.contour(vfine,vfine,fv_2D_an(covs[1,:,:],meens[1,:],ws[1],V1,V2,clf,0,1),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[1])
ax_yx.contour(vfine,vfine,fv_2D_an(covs[2,:,:],meens[2,:],ws[2],V1,V2,clf,0,1),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[2])
ax_yx.set_ylabel(r'$v_\phi$ [km s$^{-1}$]',fontsize=xlblsize)
ax_zx.hexbin(vx,vz,extent=(vmin,vmax,vmin,vmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_zx.hexbin(vx,vz,extent=(vmin,vmax,vmin,vmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_zx.hexbin(vx,vz,extent=(vmin,vmax,vmin,vmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_zx.contour(vfine,vfine,fv_2D_an(covs[0,:,:],meens[0,:],ws[0],V1,V2,clf,0,2),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[0])
ax_zx.contour(vfine,vfine,fv_2D_an(covs[1,:,:],meens[1,:],ws[1],V1,V2,clf,0,2),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[1])
ax_zx.contour(vfine,vfine,fv_2D_an(covs[2,:,:],meens[2,:],ws[2],V1,V2,clf,0,2),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[2])
ax_zx.set_ylabel(r'$v_z$ [km s$^{-1}$]',fontsize=xlblsize)
ax_zy.hexbin(vy,vz,extent=(vmin,vmax,vmin,vmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_zy.hexbin(vy,vz,extent=(vmin,vmax,vmin,vmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_zy.hexbin(vy,vz,extent=(vmin,vmax,vmin,vmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_zy.contour(vfine,vfine,fv_2D_an(covs[0,:,:],meens[0,:],ws[0],V1,V2,clf,1,2),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[0])
ax_zy.contour(vfine,vfine,fv_2D_an(covs[1,:,:],meens[1,:],ws[1],V1,V2,clf,1,2),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[1])
ax_zy.contour(vfine,vfine,fv_2D_an(covs[2,:,:],meens[2,:],ws[2],V1,V2,clf,1,2),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[2])
ax_xfeh.hexbin(vx,feh,extent=(vmin,vmax,fehmin,fehmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_xfeh.hexbin(vx,feh,extent=(vmin,vmax,fehmin,fehmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_xfeh.hexbin(vx,feh,extent=(vmin,vmax,fehmin,fehmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_xfeh.contour(vfine,fehfine,fv_2D_an(covs[0,:,:],meens[0,:],ws[0],VF1,FEH,clf,0,3),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[0])
ax_xfeh.contour(vfine,fehfine,fv_2D_an(covs[1,:,:],meens[1,:],ws[1],VF1,FEH,clf,0,3),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[1])
ax_xfeh.contour(vfine,fehfine,fv_2D_an(covs[2,:,:],meens[2,:],ws[2],VF1,FEH,clf,0,3),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[2])
ax_yfeh.hexbin(vy,feh,extent=(vmin,vmax,fehmin,fehmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_yfeh.hexbin(vy,feh,extent=(vmin,vmax,fehmin,fehmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_yfeh.hexbin(vy,feh,extent=(vmin,vmax,fehmin,fehmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_yfeh.contour(vfine,fehfine,fv_2D_an(covs[0,:,:],meens[0,:],ws[0],VF1,FEH,clf,1,3),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[0])
ax_yfeh.contour(vfine,fehfine,fv_2D_an(covs[1,:,:],meens[1,:],ws[1],VF1,FEH,clf,1,3),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[1])
ax_yfeh.contour(vfine,fehfine,fv_2D_an(covs[2,:,:],meens[2,:],ws[2],VF1,FEH,clf,1,3),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[2])
ax_yfeh.fill_between(vfine,-0.0016*vfine-0.9,y2=0.0,color='dodgerblue',alpha=0.3)
ax_yfeh.text(200,-1.0,'Disk',color='dodgerblue',fontsize=25,rotation=-45)
ax_zfeh.hexbin(vz,feh,extent=(vmin,vmax,fehmin,fehmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_zfeh.hexbin(vz,feh,extent=(vmin,vmax,fehmin,fehmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_zfeh.hexbin(vz,feh,extent=(vmin,vmax,fehmin,fehmax),gridsize=nbins_2D,cmap=cmap,vmin=0.001,linewidths=0.2)
ax_zfeh.contour(vfine,fehfine,fv_2D_an(covs[0,:,:],meens[0,:],ws[0],VF1,FEH,clf,2,3),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[0])
ax_zfeh.contour(vfine,fehfine,fv_2D_an(covs[1,:,:],meens[1,:],ws[1],VF1,FEH,clf,2,3),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[1])
ax_zfeh.contour(vfine,fehfine,fv_2D_an(covs[2,:,:],meens[2,:],ws[2],VF1,FEH,clf,2,3),levels=levels,colors=col_an,linewidths=3,linestyles=lstys[2])
vtx = array([-400,-200,0,200,400])
ftx = arange(-3.0,-0.6,0.4)
ax_xfeh.set_xticks(vtx)
ax_yfeh.set_xticks(vtx)
ax_zfeh.set_xticks(vtx)
ax_xfeh.set_yticks(ftx)
ax_yfeh.set_yticks(ftx)
ax_zfeh.set_yticks(ftx)
ax_feh.set_xticks(ftx)
ax_yx.set_xticks(vtx)
ax_zx.set_xticks(vtx)
ax_zy.set_xticks(vtx)
ax_yx.set_yticks(vtx)
ax_zx.set_yticks(vtx)
ax_zy.set_yticks(vtx)
# Tick params
ax_x.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
ax_y.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
ax_z.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
ax_zx.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
ax_yx.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
ax_zy.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
ax_xfeh.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
ax_yfeh.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
ax_zfeh.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
ax_feh.tick_params(which='major',direction='in',width=2,length=10,right=True,top=True,pad=7,labelsize=24)
for i in range(0,size(txt)):
plt.gcf().text(0.9,0.88-i*0.05,txt[i],fontsize=35,horizontalalignment='right',verticalalignment='top')
ax_yx.set_xlim([vmin,vmax])
ax_yx.set_ylim([vmin,vmax])
ax_zx.set_xlim([vmin,vmax])
ax_zx.set_ylim([vmin,vmax])
ax_zy.set_xlim([vmin,vmax])
ax_zy.set_ylim([vmin,vmax])
ax_x.set_xlim([vmin,vmax])
ax_y.set_xlim([vmin,vmax])
ax_z.set_xlim([vmin,vmax])
ax_x.set_yticks([])
ax_y.set_yticks([])
ax_z.set_yticks([])
ax_feh.set_yticks([])
ax_x.set_yticklabels([])
ax_x.set_xticklabels([])
ax_y.set_yticklabels([])
ax_y.set_xticklabels([])
ax_z.set_yticklabels([])
ax_yx.set_xticklabels([])
ax_zy.set_yticklabels([])
ax_xfeh.set_ylim([fehmin,fehmax])
ax_yfeh.set_ylim([fehmin,fehmax])
ax_zfeh.set_ylim([fehmin,fehmax])
ax_yfeh.set_yticklabels([])
ax_zfeh.set_yticklabels([])
ax_feh.set_xlim([fehmin,fehmax])
ax_feh.set_ylim(bottom=0.0,top=1.7)
ax_xfeh.set_xlim([vmin,vmax])
ax_yfeh.set_xlim([vmin,vmax])
ax_zfeh.set_xlim([vmin,vmax])
ax_xfeh.set_ylabel(r'[Fe/H]',fontsize=xlblsize)
ax_xfeh.set_xlabel(r'$v_r$ [km s$^{-1}$]',fontsize=xlblsize)
ax_yfeh.set_xlabel(r'$v_\phi$ [km s$^{-1}$]',fontsize=xlblsize)
ax_zfeh.set_xlabel(r'$v_z$ [km s$^{-1}$]',fontsize=xlblsize)
ax_feh.set_xlabel(r'[Fe/H]',fontsize=xlblsize)
plt.sca(ax_feh)
plt.xticks(rotation=50)
plt.sca(ax_xfeh)
plt.xticks(rotation=50)
plt.sca(ax_yfeh)
plt.xticks(rotation=50)
plt.sca(ax_zfeh)
plt.xticks(rotation=50)
plt.show()
fig.savefig('../plots/'+pltname+'.pdf',bbox_inches='tight')
fig.savefig('../plots/plots_png/'+pltname+'.png',bbox_inches='tight')
# -
| code/Plot_VelocityMetallicityTriangle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Dependencies: Import Splinter and BeautifulSoup
import time
import pandas as pd
from splinter import Browser
from bs4 import BeautifulSoup as bs
from webdriver_manager.chrome import ChromeDriverManager
# Set the executable path and initialize the chrome browser in splinter
def init_browser():
executable_path = {'executable_path': 'C:/Users/Doug/Desktop/chromedriver.exe'}
return Browser('chrome', **executable_path, headless=False)
#browser
# ## NASA Mars News
# +
# Using python to scrape website
browser = init_browser()
mars_data = {}
# Open the Nasa Mars Webpage (must be open to code)
url = 'https://mars.nasa.gov/news'
browser.visit(url)
time.sleep(2)
# HTML object
html = browser.html
# Parse ('lxml') HTML with Beautiful Soup
soup = bs(html, 'html.parser')
# Retrieve the latest news title and paragraph
# Use 0 bc retrieval is a list and starts at 0, not one
news_t = soup.find_all('div', class_='content_title')[0].text
news_p = soup.find_all('div', class_='article_teaser_body')[0].text
print(news_t)
print("--------------------------------------------------------------------")
print(news_p)
# -
# ## JPL Mars Space Images - Featured Image
# Mars Image to be scraped
mars_image_url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html'
browser.visit(mars_image_url)
browser.find_by_text(' FULL IMAGE').click()
# +
#HTML object
image_html = browser.html
#Parse HTML with Beautiful Soup
image_soup = bs(image_html, 'html.parser')
#find first Mars image url
img_path = image_soup.find('img', class_='fancybox-image')['src']
#combine url to get image path
featured_image_url = f'https://www.jpl.nasa.gov{img_path}'
print(f'featured_image_url = {featured_image_url}')
# -
# ### Mars Facts
#Visit Mars facts page and use Pandas to scrape the table
facts_url = 'https://space-facts.com/mars/'
browser.visit(facts_url)
time.sleep(1)
# +
#HTML object
mars_facts = browser.html
#Parse HTML with Beautiful Soup
soup_f = bs(mars_facts, 'html.parser')
fact_table = soup_f.find('section', class_='sidebar widget-area clearfix')
column1 = fact_table.find_all('td', class_='column-1')
column2 = fact_table.find_all('td', class_='column-2')
# Empty List to hold the scraped data
descriptions = []
values = []
# Note: row.text.strip(): Return a copy of the string with the leading and trailing characters removed
for row in column1:
description = row.text.strip()
descriptions.append(description)
for row in column2:
value = row.text.strip()
values.append(value)
# Convert scraped lists to a pandas DF
mars_facts = pd.DataFrame({
"Description":descriptions,
"Value":values
})
# Convert DF to html
mars_facts_html = mars_facts.to_html(header=False, index=False)
mars_facts
# -
# ### Mars Hemispheres
# Visit the USGS Astrogeology site
mars_hemi_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(mars_hemi_url)
time.sleep(1)
# +
# Find the image url and title and title
# HTML object
hemi_html = browser.html
#Parse HTML with Beautiful Soup
soup_h = bs(hemi_html, 'html.parser')
#Retreive all items
items = soup_h.find_all('div', class_='item')
# Create an empty list
hemi_image_urls = []
# store the main url
hemi_url = 'https://astrogeology.usgs.gov'
mars_data = {}
#loop through items
for i, iv in enumerate(items):
#store title
title = iv.find('h3').text
# store the link to full image from thumbnail page
hemi_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
# link for the full image website
browser.visit(hemi_url)
browser.find_by_tag('h3')[i].click()
# HTML object for individual hemisphere sites
image_url = browser.html
# Parse HTML with Beautiful Soup for each hemisphere
image_soup = bs(image_url, 'html.parser')
# Full image path
hemi_full_path = image_soup.find('img',class_='wide-image')['src']
# retrieve full image source
img_url = f'https://astrogeology.usgs.gov{hemi_full_path}'
# append title and urls to list
hemi_image_urls.append({"title": title,"image_url": img_url})
mars_data['hemisphere_images'] = hemi_image_urls
print(img_url)
# -
browser.quit()
| Mission_to_Mars/Mission_to_Mars.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Import the modules
import datetime
import pathlib
import urllib
import os
import numpy as np
import spiceypy
# +
# Load the SPICE kernels via a meta file
spiceypy.furnsh('kernel_meta.txt')
# Create an initial date-time object that is converted to a string
datetime_utc = datetime.datetime(year=2021, month=11, day=21).strftime('%Y-%m-%dT%H:%M:%S')
# Convert to Ephemeris Time (ET) using the SPICE function utc2et
datetime_et = spiceypy.utc2et(datetime_utc)
# +
# Get the G*M value for the Sun
_, gm_sun_pre = spiceypy.bodvcd(bodyid=10, item='GM', maxn=1)
GM_SUN = gm_sun_pre[0]
# +
# On spaceweather.com we can see that an asteroid has a close Earth fly-by:
# Orpheus on 2021-November-21.
#
# Will the encounter alter the orbit of the asteroid? Let's have a first look
# on the so-called sphere of influence (SOI) of our planet.
# A simple model assumes that the SOI is a sphere. The semi major axis is set
# to 1 AU:
# 1 AU in km
ONE_AU = spiceypy.convrt(x=1, inunit='AU', outunit='km')
# Set the G*M parameter of our planet
_, gm_earth_pre = spiceypy.bodvcd(bodyid=399, item='GM', maxn=1)
GM_EARTH = gm_earth_pre[0]
# Compute the SOI radius of the Earth
SOI_EARTH_R = ONE_AU * (GM_EARTH/GM_SUN) ** (2.0/5.0)
# Set one Lunar Distance (LD) in km (value from spaceweather.com)
ONE_LD = 384401.0
print(f'SOI of the Earth in LD: {SOI_EARTH_R/ONE_LD}')
# +
# Let's obtain the orbit elements data of 3361 Orpheus from
# https://ssd.jpl.nasa.gov/tools/sbdb_lookup.html#/?sstr=3361&view=OPD
# Before we compute a state vector of the asteroid and the current distance
# to our home planet we need to define a function to round the data. A common
# convention for scientific work is to round the data to one significant
# digit. We create a lambda function that rounds the values based on the
# provided measurement error
round_sig = lambda value, err: np.round(value, -1*(int(np.floor(np.log10(err)))))
# +
# Set now the perihelion in km
neo_orpheus_perihelion_km = spiceypy.convrt(round_sig(0.8193931144261904, \
4.396E-8), \
inunit='AU', outunit='km')
# Set the eccentricity
neo_orpheus_ecc = round_sig(0.3231489803944947, 3.6326E-8)
# Set the inclination, longitude of ascending node and argument of periapsis
# in radians
neo_orpheus_inc_rad = np.radians(round_sig(2.661237238614012, 3.5526E-6))
neo_orpheus_lnode_rad = np.radians(round_sig(188.6885422918818, 3.8154E-5))
neo_orpheus_argp_rad = np.radians(round_sig(302.3633807683478, 3.7866E-5))
# Set the mean anomaly and corresponding epoch in Julian Date (JD)
neo_orpheus_m0_at_t0_rad = np.radians(round_sig(4.38004009432731, 5.0726E-6))
neo_orpheus_t0 = spiceypy.utc2et('2459600.5 JD')
# +
# Set the orbital elements array
neo_orpheus_orbital_elements = [neo_orpheus_perihelion_km, \
neo_orpheus_ecc, \
neo_orpheus_inc_rad, \
neo_orpheus_lnode_rad, \
neo_orpheus_argp_rad, \
neo_orpheus_m0_at_t0_rad, \
neo_orpheus_t0, \
GM_SUN]
# Compute the state vector
neo_orpheus_state_vector = spiceypy.conics(neo_orpheus_orbital_elements, datetime_et)
print(f'Current state vector of Orpheus in km and km/s ({datetime_utc})):\n' \
f'{neo_orpheus_state_vector}')
# +
# Now compute the state vector of the Earth:
earth_state_vector, _ = spiceypy.spkgeo(targ=399, \
et=datetime_et, \
ref='ECLIPJ2000',
obs=10)
# Compute the current distance of the Earth and the asteroids in LD
earth_orpheus_dist_km = spiceypy.vnorm(earth_state_vector[:3] \
- neo_orpheus_state_vector[:3])
print(f'Current distance between the Earth and Orpheus ({datetime_utc}):\n' \
f'{earth_orpheus_dist_km / ONE_LD} LD')
| [13]-A-Close-Visitor/neo_orpheus_2021.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # pypercolate HPC performance metrics
# + active=""
# In this Section, we compute basic performance metrics of the *percolate.hpc* module.
# Namely, we time the execution and measure memory consumption of graph creation with :meth:`~percolate.percolate.spanning_2d_grid`, of computing the convolution coefficients with :meth:`~percolate.percolate._binomial_pmf`, and of performing a single run with :meth:`~percolate.hpc.bond_microcanonical_statistics`.
# -
# ## Preamble
# +
# configure plotting
# %config InlineBackend.rc = {'figure.dpi': 300, 'savefig.dpi': 300, \
# 'figure.figsize': (6, 3), 'font.size': 12, \
# 'figure.facecolor': (1, 1, 1, 0)}
# %matplotlib inline
import timeit
import matplotlib.pyplot as plt
import memory_profiler
import numpy as np
import percolate
plt.style.use('ggplot')
# -
# ## System sizes
# We determine performance metrics for the following system sizes.
dimensions = 2 ** np.arange(3, 11)
np.save('dimensions.npy', dimensions)
print(dimensions)
# ## System information
# This Section details the hardware, the operating system, and the Python environment that collects the performance metrics.
# $ pip install py-cpuinfo
from cpuinfo import cpuinfo
{
key: value
for key, value in cpuinfo.get_cpu_info().items()
if key in ['arch', 'brand', 'count', 'hz_advertised', 'l2_cache_size', 'vendor_id']
}
import psutil
print("Total memory: {:.1f} GiB".format(psutil.virtual_memory().total / 1024 ** 3))
# +
import os
import platform
import sys
import pip
import pprint
# -
print(platform.platform())
print(
platform.python_version(),
platform.python_implementation(),
platform.python_compiler(),
)
pprint.pprint({
package.key: package.version
for package in pip.get_installed_distributions()
if package.key in [
'cython', 'future', 'ipython', 'matplotlib', 'memory-profiler',
'networkx', 'numpy', 'percolate', 'pip', 'scipy', 'simoa',
]
})
np.show_config()
# ## Scripts
# For each performance metric, we define and run a script in an independent process.
# Each script reads the system sizes from disk and writes the performance metrics back to disk.
# We use independent scripts and processes here as especially memory usage measurements seem to be volatile if executed in the same process, IPython session, or notebook.
# + language="python"
import memory_profiler
import numpy as np
import percolate
def get_graph_memory(dimension):
mem_before = memory_profiler._get_memory(-1)
graph = percolate.spanning_2d_grid(dimension)
mem_after = memory_profiler._get_memory(-1)
return mem_after - mem_before
dimensions = np.load('dimensions.npy')
graph_mem = np.fromiter(
(
get_graph_memory(dimension)
for dimension in dimensions
),
dtype=np.float,
)
np.save('graph_mem.npy', graph_mem)
# + language="python"
import memory_profiler
import numpy as np
import percolate
def get_convolution_memory(dimension):
mem_before = memory_profiler._get_memory(-1)
convolution_factors = percolate.percolate._binomial_pmf(
n=2 * dimension * (dimension - 1), p=0.5,
)
mem_after = memory_profiler._get_memory(-1)
return mem_after - mem_before
dimensions = np.load('dimensions.npy')
convolution_mem = np.fromiter(
(
get_convolution_memory(dimension)
for dimension in dimensions
),
dtype=np.float,
)
np.save('convolution_mem.npy', convolution_mem)
# + language="python"
import timeit
import numpy as np
import percolate
dimensions = np.load('dimensions.npy')
graph_times = np.fromiter(
(
timeit.timeit(
stmt='percolate.spanning_2d_grid({})'.format(dimension),
setup='import percolate',
number=1,
)
for dimension in dimensions
),
dtype=np.float,
)
np.save('graph_times.npy', graph_times)
# + language="python"
import timeit
import numpy as np
import percolate
dimensions = np.load('dimensions.npy')
convolution_stmt = """\
[
percolate.percolate._binomial_pmf(n={}, p=p)
for p in np.linspace(0.4, 0.6, num=1)
]
"""
convolution_times = np.fromiter(
(
timeit.timeit(
stmt=convolution_stmt.format(2 * dimension * (dimension - 1)),
setup='import percolate; import numpy as np',
number=1,
)
for dimension in dimensions
),
dtype=np.float,
)
np.save('convolution_times.npy', convolution_times)
# + language="python"
import timeit
import numpy as np
dimensions = np.load('dimensions.npy')
run_stmt = """\
percolate.hpc.bond_microcanonical_statistics(
seed=42, **perc_graph
)
"""
run_setup = """\
import percolate
import percolate.hpc
perc_graph = percolate.percolate.percolation_graph(
graph=percolate.spanning_2d_grid({}),
spanning_cluster=True,
)
"""
run_times = np.fromiter(
(
timeit.timeit(
stmt=run_stmt,
setup=run_setup.format(dimension),
number=1,
)
for dimension in dimensions
),
dtype=np.float,
)
np.save('run_times.npy', run_times)
# + language="python"
import memory_profiler
import numpy as np
import percolate
import percolate.hpc
def get_run_memory(dimension):
perc_graph = percolate.percolate.percolation_graph(
graph=percolate.spanning_2d_grid(dimension),
spanning_cluster=True,
)
mem_before = memory_profiler._get_memory(-1)
return (
max(memory_profiler.memory_usage(
(
percolate.hpc.bond_microcanonical_statistics,
[],
dict(seed=42, **perc_graph)
),
interval=.01,
)) - mem_before
)
dimensions = np.load('dimensions.npy')
run_mem = np.fromiter(
(
get_run_memory(dimension)
for dimension in dimensions
),
dtype=np.float,
)
np.save('run_mem.npy', run_mem)
# -
graph_mem = np.load('graph_mem.npy')
convolution_mem = np.load('convolution_mem.npy')
graph_times = np.load('graph_times.npy')
convolution_times = np.load('convolution_times.npy')
run_times = np.load('run_times.npy')
run_mem = np.load('run_mem.npy')
# ## Performance plots
plt.loglog(dimensions ** 2, graph_times, 'x')
plt.xlabel(r'number of nodes')
plt.ylabel(r'execution time (s)')
plt.title(r'percolate.spanning\_2d\_grid')
plt.show()
plt.loglog(dimensions ** 2, graph_mem / 2 ** 10, 'x')
plt.xlabel(r'number of nodes')
plt.ylabel('memory consumption (GiB)')
plt.title('percolate.spanning\_2d\_grid')
plt.show()
plt.loglog(dimensions ** 2, convolution_times, 'x')
plt.xlabel(r'number of nodes')
plt.ylabel(r'execution time per $p$ (s)')
plt.title('percolate.percolate.\_binomial\_pmf')
plt.show()
plt.loglog(dimensions ** 2, convolution_mem / 2 ** 10, 'x')
plt.xlabel(r'number of nodes')
plt.ylabel(r'memory consumption per $p$ (GiB)')
plt.title('percolate.percolate.\_binomial\_pmf')
plt.show()
plt.loglog(dimensions ** 2, run_times, 'x')
plt.xlabel(r'number of nodes')
plt.ylabel(r'execution time (s)')
plt.title('percolate.hpc.bond\_microcanonical\_statistics')
plt.show()
plt.loglog(dimensions ** 2, run_mem / 2 ** 10, 'x')
plt.xlabel(r'number of nodes')
plt.ylabel(r'memory consumption (GiB)')
plt.title('percolate.hpc.bond\_microcanonical\_statistics')
plt.show()
| docs/hpc-performance/pypercolate-hpc-performance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Major and minor ticks
#
#
# Demonstrate how to use major and minor tickers.
#
# The two relevant classes are `.Locator`\s and `.Formatter`\s. Locators
# determine where the ticks are, and formatters control the formatting of tick
# labels.
#
# Minor ticks are off by default (using `.NullLocator` and `.NullFormatter`).
# Minor ticks can be turned on without labels by setting the minor locator.
# Minor tick labels can be turned on by setting the minor formatter.
#
# `.MultipleLocator` places ticks on multiples of some base.
# `.FormatStrFormatter` uses a format string (e.g., ``'%d'`` or ``'%1.2f'`` or
# ``'%1.1f cm'``) to format the tick labels.
#
# `.pyplot.grid` changes the grid settings of the major ticks of the y and y axis
# together. If you want to control the grid of the minor ticks for a given axis,
# use for example ::
#
# ax.xaxis.grid(True, which='minor')
#
# Note that a given locator or formatter instance can only be used on a single
# axis (because the locator stores references to the axis data and view limits).
#
# +
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
t = np.arange(0.0, 100.0, 0.1)
s = np.sin(0.1 * np.pi * t) * np.exp(-t * 0.01)
fig, ax = plt.subplots()
ax.plot(t, s)
# Make a plot with major ticks that are multiples of 20 and minor ticks that
# are multiples of 5. Label major ticks with '%d' formatting but don't label
# minor ticks.
ax.xaxis.set_major_locator(MultipleLocator(20))
ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
# For the minor ticks, use no labels; default NullFormatter.
ax.xaxis.set_minor_locator(MultipleLocator(5))
plt.show()
# -
# Automatic tick selection for major and minor ticks.
#
# Use interactive pan and zoom to see how the tick intervals change. There will
# be either 4 or 5 minor tick intervals per major interval, depending on the
# major interval.
#
# One can supply an argument to `.AutoMinorLocator` to specify a fixed number
# of minor intervals per major interval, e.g. ``AutoMinorLocator(2)`` would
# lead to a single minor tick between major ticks.
#
#
# +
t = np.arange(0.0, 100.0, 0.01)
s = np.sin(2 * np.pi * t) * np.exp(-t * 0.01)
fig, ax = plt.subplots()
ax.plot(t, s)
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.tick_params(which='both', width=2)
ax.tick_params(which='major', length=7)
ax.tick_params(which='minor', length=4, color='r')
plt.show()
| matplotlib/gallery_jupyter/ticks_and_spines/major_minor_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# At the moment, every time you run a notebook - even if it's just plain Python and not PySpark - a long-running PySpark process is started which will occupy one(1) CPU Core.
#
# Because you have a limited number of CPU Cores (4, 8, etc), you will quickly run out of CPU Cores - causing notebooks to hang.
#
# Make sure you "Shutdown" notebooks that you are not using to free up the valuable CPU Cores.
# 
| myapps/jupyter/README_FIRST/README_FIRST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
destinations = pd.read_csv('../Raw data/destinations.csv')
short_train = pd.read_csv('../Raw data/short_train.csv')
short_train.head()
test_chunksize = pd.read_csv('../Raw data/train.csv', chunksize=1000)
te
| Exploration/.ipynb_checkpoints/Hands on the data-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Permutations Notebook
#
# +
import permutations as p
vector1 = p.get_random_vector(p.DIMENSION, p.ENTRIES)
p.normalize(vector1)
vector2 = p.get_random_vector(p.DIMENSION, p.ENTRIES)
p.normalize(vector2)
print("Similarity before sorting:", sum(vector1 * vector2))
perm_vector1 = p.permute_vector(p.get_sort_permutation(vector1), vector1)
perm_vector2 = p.permute_vector(p.get_sort_permutation(vector2), vector2)
print("Similarity after sorting:", sum(perm_vector1 * perm_vector2))
| notebooks/permutations_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
# %matplotlib inline
# %config InlineBackend.figure_format='svg'
# -
# data
iris = load_iris()
df = pd.DataFrame(iris.data,columns=iris.feature_names)
df['label'] = iris.target
df.columns = ['sepal length', 'sepal width', 'petal length', 'petal width', 'label']
df.head()
# +
x = df['sepal length']
y = df['petal width']
setosa_x = x[:50]
setosa_y = y[:50]
versicolor_x = x[50:]
versicolor_y = y[50:]
plt.figure(figsize=(8,6))
plt.scatter(setosa_x,setosa_y,marker='+',color='green')
plt.scatter(versicolor_x,versicolor_y,marker='_',color='red')
plt.tick_params(direction='in')
plt.show()
# +
df = df.drop(['sepal width','petal length'],axis = 1)
Y =df['label']
df = df.drop(['label'],axis=1)
X = df.values.tolist()
X,Y = shuffle(X,Y)
x_train = []
y_train = []
x_test = []
y_test = []
x_train, x_test, y_train, y_test = train_test_split(X, Y, train_size=0.9)
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
y_test = np.array(y_test)
# +
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
clf = SVC(kernel='linear')
clf.fit(x_train,y_train)
y_pred = clf.predict(x_test)
print(accuracy_score(y_test,y_pred))
# -
| Labs/第7章 支持向量机/Labs/svm_sklearn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:nn]
# language: python
# name: conda-env-nn-py
# ---
# # Model Export
#
# - In the future, the plan is to export the entire model to ONNX for later standalone use in production within different frameworks and/or languages. See https://github.com/facebookresearch/maskrcnn-benchmark/issues/116.
#
# - At the moment we only serialize the model with `torch.save()`. During model loading, the Python runtime will require both PyTorch and `maskrcnn-benchmark` as dependencies.
#
# - During standalone inference we try to avoid the usage of the `maskflow` library.
# +
from pathlib import Path
import sys; sys.path.append("../")
import torch
import maskflow
root_dir = Path("/home/hadim/.data/Neural_Network/Maskflow/Microtubule")
model_dir = root_dir / "Models"
exported_model_dir = root_dir / "Exported Models"
exported_model_dir.mkdir(parents=True, exist_ok=True)
# Load the model configuration
config = maskflow.config.load_config(root_dir / "config.yaml")
# Select the model
model_name = '2018.11.20-12:15:32'
model_path = model_dir / model_name
exported_name = "microtubules-1.0.0"
# Load model
model = maskflow.inference.build_model(config, model_path)
# Export model
maskflow.model.export_model(model, model_path, exported_model_dir, exported_name)
# -
# # ONNX Export (not working)
#
# ```python
# import torch
#
# dummy_input = torch.autograd.Variable(torch.randn(1, 3, 224, 224))
# input_names = ["image"]
# output_names = [ "predictions" ]
# torch.onnx.export(model.to('cpu'), dummy_input, model_path.with_suffix('.onnx'), verbose=True, input_names=input_names, output_names=output_names)
# ```
| notebooks/Detection/6_Model_Export.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Avoiding construction sites dynamically
# > Note: All notebooks need the [environment dependencies](https://github.com/GIScience/openrouteservice-examples#local-installation)
# > as well as an [openrouteservice API key](https://openrouteservice.org/dev/#/signup) to run
# In this example, we'd like to showcase how to use the [directions API][directions] and to avoid a number of
# construction sites while routing.
#
# The challenge here is to prepare the data appropriately and construct a reasonable GET request.
#
# [directions]: https://openrouteservice.org/documentation/#/reference/directions/directions/directions-service
import folium
import pyproj
import requests
from openrouteservice import client
from shapely import geometry
from shapely.geometry import Point, LineString, Polygon, MultiPolygon
# Rostock is beautiful, but, as in most other pan-European cities, there are a lot of construction sites.
# Wouldn't it be great if we could plan our trip avoiding these sites and consequently save lots of time!?
# ## Construction sites in Rostock
# We take the [open data](https://www.opendata-hro.de/de/dataset/baustellen) from the Rostock authorities.
# It's hard (to impossible) to find construction site polygons, so these are points, and we need to buffer them to
# a polygon to be able to avoid them when they cross a street.
#
# For the investigatory in you: yes, no CRS is specified on the link (shame on you, Rostock!).
# It's fair enough to assume it comes in WGS84 lat/long though (my reasoning:
# they show Leaflet maps plus GeoJSON is generally a web exchange format, and many web clients (Google Maps, Leaflet)
# won't take CRS other than WGS84).
# Since degrees are not the most convenient unit to work with, let's first define a function which does the buffering job
# with UTM32N projected coordinates:
# +
url = 'https://geo.sv.rostock.de/download/opendata/baustellen/baustellen.json'
def create_buffer_polygon(point_in, resolution=10, radius=10):
convert = pyproj.Transformer.from_crs("epsg:4326", 'epsg:32632') # WGS84 to UTM32N
convert_back = pyproj.Transformer.from_crs('epsg:32632', "epsg:4326") # UTM32N to WGS84
point_in_proj = convert.transform(*point_in)
point_buffer_proj = Point(point_in_proj).buffer(radius, resolution=resolution) # 10 m buffer
# Iterate over all points in buffer and build polygon
poly_wgs = []
for point in point_buffer_proj.exterior.coords:
poly_wgs.append(convert_back.transform(*point)) # Transform back to WGS84
return poly_wgs
# +
# Set up the fundamentals
api_key = 'your_key' # Individual api key
ors = client.Client(key=api_key) # Create client with api key
rostock_json = requests.get(url).json() # Get data as JSON
map_params = {'tiles': 'Stamen Toner',
'location': ([54.13207, 12.101612]),
'zoom_start': 12}
map1 = folium.Map(**map_params)
# Populate a construction site buffer polygon list
sites_poly = []
for site_data in rostock_json['features']:
site_coords = site_data['geometry']['coordinates']
folium.features.Marker(list(reversed(site_coords)),
popup='Construction point<br>{0}'.format(site_coords)).add_to(map1)
# Create buffer polygons around construction sites with 10 m radius and low resolution
site_poly_coords = create_buffer_polygon(site_coords,
resolution=2, # low resolution to keep polygons lean
radius=10)
sites_poly.append(site_poly_coords)
site_poly_coords = [(y, x) for x, y in site_poly_coords] # Reverse coords for folium/Leaflet
folium.vector_layers.Polygon(locations=site_poly_coords,
color='#ffd699',
fill_color='#ffd699',
fill_opacity=0.2,
weight=3).add_to(map1)
map1
# -
# That's a lot of construction sites in Rostock! If you dig into the `properties` of the JSON, you'll see that those
# are kept up-to-date though. Seems like an annoying place to ride a car...
#
# Anyways, as you might know, a GET request can only contain so many characters. Unfortunately, > 80 polygons are more
# than a GET can take (that's why we set `resolution = 2`).
# Because there's no POST endpoint available currently, we'll have to work around it:
#
# One sensible thing one could do, is to eliminate construction zones which are not in the immediate surrounding of the
# route of interest.
# Hence, we can request a route without construction sites, take a reasonable buffer,
# filter construction sites within the buffer and try again.
#
# Let's try this:
# +
# GeoJSON style function
def style_function(color):
return lambda feature: dict(color=color,
weight=3,
opacity=0.5)
# Create new map to start from scratch
map_params.update({'location': ([54.091389, 12.096686]),
'zoom_start': 13})
map2 = folium.Map(**map_params)
# Request normal route between appropriate locations without construction sites
request_params = {'coordinates': [[12.108259, 54.081919],
[12.072063, 54.103684]],
'format_out': 'geojson',
'profile': 'driving-car',
'preference': 'shortest',
'instructions': 'false', }
route_normal = ors.directions(**request_params)
folium.features.GeoJson(data=route_normal,
name='Route without construction sites',
style_function=style_function('#FF0000'),
overlay=True).add_to(map2)
# Buffer route with 0.009 degrees (really, just too lazy to project again...)
route_buffer = LineString(route_normal['features'][0]['geometry']['coordinates']).buffer(0.009)
folium.features.GeoJson(data=geometry.mapping(route_buffer),
name='Route Buffer',
style_function=style_function('#FFFF00'),
overlay=True).add_to(map2)
# Plot which construction sites fall into the buffer Polygon
sites_buffer_poly = []
for site_poly in sites_poly:
poly = Polygon(site_poly)
if route_buffer.intersects(poly):
folium.features.Marker(list(reversed(poly.centroid.coords[0]))).add_to(map2)
sites_buffer_poly.append(poly)
map2
# -
# Finally, we can try to request a route using `avoid_polygons`, which conveniently takes a GeoJSON as input.
# +
# Add the site polygons to the request parameters
request_params['options'] = {'avoid_polygons': geometry.mapping(MultiPolygon(sites_buffer_poly))}
route_detour = ors.directions(**request_params)
folium.features.GeoJson(data=route_detour,
name='Route with construction sites',
style_function=style_function('#00FF00'),
overlay=True).add_to(map2)
map2.add_child(folium.map.LayerControl())
map2
# -
# > Note: This request might fail sometime in the future, as the JSON is loaded dynamically and changes a few times
# > a week.
# > Thus the amount of sites within the buffer can exceed the GET limit (which is between 15-20 site polygons approx).
| python/Avoid_ConstructionSites.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Example 02: CIFAR-10 Demo
import sys
sys.path.append('./../')
import matplotlib
# %matplotlib inline
import visualisation
# ## (i) Train an ANT on the CIFAR-10 image recognition dataset
# From the code directory, run the following command to train an ANT:
#
# ```bash
# python tree.py --dataset cifar10 \ # dataset
# --experiment demo --subexperiment ant_cifar10 \ # experiment names
# --batch-size 512 --epochs_patience 5 \ # training
# --epochs_node 100 --epochs_finetune 200 \
# --scheduler step_lr --augmentation_on \
# -t_ver 5 -t_k 3 -t_ngf 96 \ # transformer module config
# -r_ver 3 -r_ngf 48 -r_k 3 \ # router module config
# -s_ver 6 \ # solver module config
# --maxdepth 10 --batch_norm \ # other model config
# --visualise_split --num_workers 0 --seed 0 # miscellaneous
# ```
#
# It takes less than 3 hours on a single Titan X GPU.
# ## (ii) Plot classification accuracy
# The dotted lines correspond to the epoch number at which the refinement phase started.
# +
exp_dir = './../experiments/iot/demo6/'
models_list = ['ant_iot']
records_file_list = [exp_dir + model_name + '/checkpoints/records.json' for model_name in models_list]
model_files = [exp_dir + model_name + '/checkpoints/model.pth' for model_name in models_list]
visualisation.plot_performance(records_file_list, models_list, ymax = 3.0, figsize=(10,7), finetune_position=True)
visualisation.plot_accuracy(records_file_list, models_list, figsize=(10,7), ymin=0, ymax=98, finetune_position=True)
# -
# ## (iii) Compute model size
_ = visualisation.compute_number_of_params(model_files, models_list, is_gpu=False)
# ## (iv) Visualise the tree structure
fig_dir = exp_dir + 'ant_iot' + '/figures/'
visualisation.visualise_treestructures(fig_dir, figsize=(10,20))
| notebooks/example_cifar10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ***
# ***
# # 17. 파이썬 모듈
# ***
# ***
# ***
# ## 1 이름 공간
# ***
# - 이름 공간 또는 스코프 (Naming Space or Scope): 이름이 존재하는 장소. 파이썬은 실행 시간에 각 이름들을 적절한 이름 공간에 넣어 관리한다.
# - 이름 공간(스코프)의 종류
# - 지역(Local): 각 함수 내부
# - 전역(Global): 모듈 (파일) 내부
# - 내장(Built-in): 파이썬 언어 자체에서 정의한 영역
# - 변수가 정의되는 위치에 의해 변수의 스코프가 정해짐
# - 파이썬에서 변수의 정의
# - 변수가 l-value로 사용될 때
# - 변수가 r-value로 사용될 때 해당 변수의 값을 찾는 순서 규칙
# - L --> G --> B
# 
# ### 1-1 지역변수와 전역변수
# - 변수의 스코프는 해당 변수가 l-value로서 정의되는 위치에 따라 달라짐
# - 변수가 함수 내에서 정의되면 해당 함수의 지역 변수가 된다.
# +
# g, h는 전역 변수
g = 10
h = 5
def f(a): # a는 지역 변수
h = a + 10 # h는 지역, 새로 l-value로 정의했음
b = h + a + g # b도 지역, g는 r-value이므로 기존 값을 참조 - 전역 변수
return b
print(f(h)) # 함수 호출시에 사용되는 변수는 해당 위치의 스코프에서 값을 찾음 - 전역 변수
print(h) # 전역 변수 h는 변함 없음
# -
# - 함수 내부에서 전역 변수를 직접 사용하고자 할 때
# - <code>global</code> 키워드 활용
# +
h = 5
def f(a): # a는 지역
global h # h 변수를 전역이라고 미리 선언함
h = a + 10 # h는 l-value로 정의되더라도 미리 선언된 내용 때문에 전역 변수
return h
print(f(10))
print(h) # 전역 변수 h 값이 함수 내에서 변경되었음
# -
# - [주의] 동일 함수 내에서 동일한 변수가 지역변수와 전역변수로 동시에 활용될 수 없음
# - 함수 내에서 정의되는 변수는 지역 변수로 간주
# - 지역 변수로 선언되기 이전에 해당 변수를 사용할 수 없음
# +
g = 10
def f():
a = g # r-value로 사용되는 g는 전역 변수
g = 20 # l-value로 정의되는 g는 지역 변수
return a
print(f())
# +
g = 10
def f():
global g # g는 전역 변수로 선언됨
a = g # a는 지역 변수, g는 전역 변수
g = 20 # g는 전역 변수
return a
print(f())
print(g)
# -
# ### 1-2 특정 공간의 이름 목록 얻기
# - 이름(Name)
# - 변수 (객체) 이름
# - 함수 이름
# - 클래스 이름
# - 모듈 이름
# - <code>dir()</code>: 함수가 호출된 스코프에서 정의되어 있는 모든 Name들을 문자열 리스트로 반환한다.
# - <code>dir(object)</code>: object이 지니고 있는 모든 Name들을 문자열 리스트로 반환한다.
print(dir())
l = []
print(dir(l))
# ### 1-3 함수의 중첩 영역(Nested Scopes) 지원
# - Nested Scope: 함수 안에 정의되어 있는 함수 내부
# - 가장 안쪽의 스코프부터 바깥쪽의 스코프쪽으로 변수를 찾는다.
# - 각각의 중첩 함수마다 독립적인 Scope을 관리한다.
x = 2
def F():
x = 1
def G():
x = 20
print(x)
G()
F()
x = 2
def F():
x = 1
def G():
x = 20
print("G():", x)
G()
print("F():", x)
F()
print("Global:", x)
x = 2
def F():
x = 1
def G():
global x
x = 20
print("G():", x)
G()
print("F():", x)
F()
print("Global:", x)
# ***
# ## 2 모듈의 정의
# ***
# - 모듈: 파이썬 프로그램 파일로서 파이썬 데이터와 함수등을 정의하고 있는 단위
# - 서로 연관된 작업을 하는 코드들을 묶어서 독립성을 유지하되 재사용 가능하게 만드는 단위
# - 모듈을 사용하는 측에서는 모듈에 정의된 함수나 변수 이름을 사용
# - 모듈의 종류
# - 표준 모듈
# - 파이썬 언어 패키지 안에 기본적으로 포함된 모듈
# - 대표적인 표준 모듈 예
# - <code>math, string</code>
# - 사용자 생성 모듈
# - 개발자가 직접 정의한 모듈
# - 써드 파티 모듈
# - 다른 업체나 개인이 만들어서 제공하는 모듈
# ### 2-1 모듈은 어디에 저장되는가
# - 모듈이 정의되고 저장되는 곳은 파일
# - 파일
# - 모듈 코드를 저장하는 물리적인 단위
# - 모듈
# - 논리적으로 하나의 단위로 조직된 코드의 모임
# - 파이썬 모듈이 정의되는 파일의 확장자: <code>.py</code>
# - 다른 곳에서 모듈을 사용하게 되면 해당 모듈의 <code>.py</code>는 바이트 코드로 컴파일 되어 <code>.pyc</code>로 존재한다.
# ### 2-2 사용자 모듈 만들기와 호출하기
# +
#File: mymath.py
mypi = 3.14
def add(a, b):
return a + b
def area(r):
return mypi * r * r
# -
# - 모듈 이름은 해당 모듈을 정의한 파일 이름에서 <code>.py</code>를 제외한 것
# - 모듈을 불러오는 키워드: <code>import</code>
# - 모듈에서 정의한 이름 사용하기
# +
import mymath
print(dir(mymath)) # mymath에 정의된 이름들 확인하기
print(mymath.mypi) # mymath 안에 정의된 mypi를 사용한다
print(mymath.area(5)) # mymath 안에 정의된 area를 사용한다
# -
# ### 2-3 모듈을 왜 사용하는가?
# - 함수와 모듈
# - 함수: 파일 내에서 일부 코드를 묶는 것
# - 모듈: 파일 단위로 코드들을 묶는 것
# - 비슷하거나 관련된 일을 하는 함수나 상수값들을 모아서 하나의 파일에 저장하고 추후에 재사용하는 단위
# - 모듈 사용의 이점
# - 코드의 재사용
# - 프로그램 개발시에 전체 코드들을 여러 모듈 단위로 분리하여 설계함으로써 작업의 효율을 높일 수 있음
# - 별도의 이름 공간(스코프)를 제공함으로써 동일한 이름의 여러 함수나 변수들이 각 모듈마다 독립적으로 정의될 수 있다.
# - 별도 파일 내에 파이썬 코드를 저장할 때 (즉, 모듈을 코딩할 때) 한글 처리
# - 파일의 맨 위에 다음 코드를 넣어 준다.
# - \# -\*- coding:utf-8 -*-
# - 모듈은 하나의 독립된 이름 공간을 확보하면서 정의된다.
# ### 2-4 모듈이 지닌 이름들 알아보기
# - <code>dir(모듈)</code>: 모듈이 지니고 있는 모든 이름들을 리스트로 반환
import string
print(dir(string))
# ### 2-5 이름 공간을 제공하는 다른 예들
# - 독립된 이름 공간(스코프)을 제공하는 것들
# - 모듈
# - 함수
# - 클래스
# - 객체
# - <code>string</code> 모듈 이름 공간에 변수 <code>a</code>를 생성한다.
# - 표준 모듈에 사용자가 정의하는 이름을 생성하는 것은 비추천
# - 단지 모듈 자체가 독립적인 이름 공간을 제공한다는 것을 알려줌
import string
string.a = 1
print(string.a)
# - 클래스도 독립적인 이름 공간
# +
class C: # 클래스도 독립적인 이름 공간
a = 2 # 클래스 이름 공간 내에 변수 선언
pass # 클래스 정의 완료
c = C() # 클래스 인스턴스 객체 생성
c.a = 1 # 클래스에서 생성된 인스턴스 객체도 별도의 이름 공간
print(c.a)
print(c.__class__.a)
print(C.a)
# -
# - 함수도 독립적인 이름 공간
# - 다만 함수 내에서 선언된 지역(로컬) 변수는 함수 외부에서 접근할 수 없다.
x = 10 # 현재 모듈 내부에 정의되는 이름
def f():
a = 1
b = 2 # 현재 모듈에 정의되는 함수 f 내에 이름 a,b를 정의하고있다. 함수도 독립적인 이름 공간
f.c = 1
print(f.c)
print()
print(f.a)
# ***
# ## 3 모듈 검색 경로
# ***
# ### 3-1 <code>PYTHONPATH</code> 환경 변수 설정하기
# - 파이썬이 모듈을 찾는 순서
# - 1) 이미 메모리에 로딩되어진 것
# - 2) 현재 디렉토리 (ipython에서 <code>pwd</code> 명령으로 확인 가능)
#
# - <code>! pwd</code>
#
#
# - 3) <code>PYTHONPATH</code> 환경 변수에 기술된 디렉토리 목록을 차례로 탐색
# - 4) 표준 라이브러리 디렉토리들
# - <code>sys</code> 모듈 임포트 후 <code>sys.path</code> 로 확인 가능
# - <code>sys.path.append()</code>, <code>sys.path.remove()</code>로 추가, 삭제 가능
# - <code>PYTHONPATH</code> 환경 변수 설정 방법
# - 윈도우즈
# - 제어판\시스템 및 보안\시스템\고급 시스템 설정\환경 변수
# - 새로 만들기: 변수 이름 - <code>PYTHONPATH</code>, 변수 값 - <code>C:\Users\yhhan\mypythonlib</code>
# - python 새로 시작하기 (cmd창 새로 시작한 후)
# - MAC이나 리눅스
# - <code>~/.bashrc</code>, <code>~/.bash_profile</code>, 혹은 <code>~/.profile</code> 파일에 다음 라인 추가
# - <code>export PYTHONPATH=/Users/yhhan/mypythonlib</code>
# - 터미널 창에서 다음 명령어 수행
# - <code>source ~/.bash_profile</code>
# - 이클립스에서 <code>PYTHONPATH</code> 설정
# - [참고] https://goo.gl/crPFi0
# 
# - 코드 내에서 모듈 검색 경로 확인하기
import sys
print sys.path
# ### 3-2 모듈의 검색 경로 동적으로 바꾸기
# +
import sys
sys.path.append('~/mypythonlib')
print(sys.path)
# -
sys.path.insert(0, '~/mypythonlib2')
print(sys.path)
# <p style='text-align: right;'>참고 문헌: 파이썬(열혈강의)(개정판 VER.2), 이강성, FreeLec, 2005년 8월 29일</p>
| python3.6/.ipynb_checkpoints/python17-checkpoint.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .java
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Java
// language: java
// name: java
// ---
// + [markdown] slideshow={"slide_type": "slide"}
// # Algebraic Data Types and Pattern Matching
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## Java Version
//
// -
System.out.println(Runtime.version());
System.out.println(System.getProperty("java.home"));
// + [markdown] slideshow={"slide_type": "slide"}
// # Algebraic Data Types and Where to find them ?
// 
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## Algebraic Data Types
// Composite type of product types and sum types
// - product types (A x B x C)
// - sum type (A | B | C)
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## Algebraic Data Types in OCaml
// Examples of product types and sum types in OCaml
// ```ocaml
// (* record *)
// type car = { brand: string; color: string; };;
// type bus = { brand: string; height: float; };;
// ```
// ```ocaml
// (* variant *)
// type vehicle = Car of car | Bus of bus ;;
// ```
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## Pattern Matching
// ```ocaml
// let color = function
// | Car { color=c } -> c
// | Bus _ -> "yellow"
// ;;
// ```
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## Pattern Matching: when clause
// ```ocaml
// let okayForLowBridge = function
// | Car _ -> true
// | Bus { height=h } when h < 12.0 -> true
// | _ -> false
// ;;
// ```
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## Equivalence in OOP
// - sum type (interface)
// ```java
// interface Vehicle { }
// ```
// - product types (class)
// ```java
// class Bus implements Vehicle {
// String name;
// double height;
// }
// ```
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## Differences FP vs OOP
// - OOP defines
// - open types,
// - closed functions (methods)
// - FP defines
// - closed types,
// - open functions
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## OOP == polymorphism
// Actual: OOP
//
// -
// | Behaviors | Function | Method |
// | -------------- | -------------------- | -------------- |
// | class POV | outside a class | inside a class |
// | Open interface | visitor / instanceof | polymorphism |
//
// > having open interfaces has a cost !
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## 
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## With Pattern matching
// Proposed: fusion OOP + FP
//
// -
// | Behaviors | Function | Method |
// | --------------- | ---------------- | -------------- |
// | Closed type??? | pattern matching | polymorphism |
// | Open interface | pattern matching | polymorphism |
//
// + [markdown] slideshow={"slide_type": "slide"}
// # Pattern Matching in Java
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## Plan
// - expression switch (Java 12 to 14)
// - record (Java 14+)
// - sealed interface (Java 15+)
// - instanceof enhancement (Java 14+)
// - future ?
//
// + [markdown] slideshow={"slide_type": "slide"}
// # Expression Switch
// - Enhance switch to be an expression too
// - Fix C switch warts ?
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## What wrong with the C switch ?
// `break` is easy to forget (fallthrough) + weird scope
//
// -
void color(String vehicle) {
switch(vehicle) {
case "car":
case "sedan":
var length = vehicle.length();
System.out.println((length < 4)? "blue": "red");
// oops
case "bus":
var length = 0; // oops
System.out.println("yellow");
break;
} // oops no default
}
color("sedan");
// + [markdown] slideshow={"slide_type": "slide"}
// ## Arrow Syntax : Enhance existing switch
// - avoid fallthrough: use curly braces
// - allow comma separated values
//
// -
void color(String vehicle) {
switch(vehicle) {
case "car", "sedan" -> {
var length = vehicle.length();
System.out.println((length < 4)? "blue": "red");
}
case "bus" -> {
var length = 0;
System.out.println("yellow");
}
} // oops no default
}
color("sedan");
// + [markdown] slideshow={"slide_type": "slide"}
// ## Expression Switch
// switch can be used as an expression, `default` is mandatory !
//
// -
String color(String vehicle) {
return switch(vehicle) {
case "car", "sedan" -> {
var length = vehicle.length();
yield (length < 4)? "blue": "red";
}
case "bus" -> "yellow";
default -> {
throw new AssertionError();
}
};
}
System.out.println(color("sedan"));
// + [markdown] slideshow={"slide_type": "slide"}
// ## Yield backward compatibility issue
// `yield` is enable as keyword only at the start of an instruction
//
// -
void yield(int value) { }
void color(String vehicle) {
yield (42);
}
// + [markdown] slideshow={"slide_type": "slide"}
// ## Future
// ```java
// String color(Vehicle vehicle) {
// return switch(vehicle) {
// case Car(String brand, Color color) -> color;
// case Bus(String brand, double height) -> "yellow";
// }; //no default
// }
// ```
//
// -
// We need to be able to deconstruct a class
//
// + [markdown] slideshow={"slide_type": "slide"}
// # Record
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## Example of Record
// Declaration
//
// -
record Bus(String brand, double height) { }
// Usage
//
var bus = new Bus("imperial", 7);
System.out.println(bus);
// + [markdown] slideshow={"slide_type": "slide"}
// ## Constructors
//
// -
record Bus(String brand, double height) {
// canonical constructor, generated automatically
// public Bus(String brand, double height) {
// ...
// }
// compact constructor
public Bus {
Objects.requireNonNull(brand);
}
}
// + [markdown] slideshow={"slide_type": "slide"}
// ## equals, hashCode and toString
// are automatically generated
//
// -
record Bus(String brand, double height) { }
var bus1 = new Bus("imperial", 7);
var bus2 = new Bus("imperial", 7);
System.out.println(bus1.equals(bus2));
// + [markdown] slideshow={"slide_type": "slide"}
// ## Records are immutable
// Avoid mutation during the matching
// ```java
// String color(Vehicle vehicle) {
// return switch(vehicle) {
// case Bus(_, double height) when (bus.height = 3) < 11 -> {}
// case Bus(_, double height) when bus.height == 3) -> {}
// default -> ...
// };
// }
// ```
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## Reflection support
//
// -
record Bus(String brand, double height) { }
var components = List.of(Bus.class.getRecordComponents());
System.out.println(components);
// + [markdown] slideshow={"slide_type": "slide"}
// ## Restrictions
// - shallow immutability
// - no inheritance
// - other constructors has to delegate to primary constructor,
// initializer blocks are not supported
//
// + [markdown] slideshow={"slide_type": "slide"}
// # Sealed interface
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## Closed hierarchy
// Add a keyword `sealed` + a `permits` list
// ```java
// sealed interface Vehicle
// permits Car, Bus { }
// record Car(String brand, String color) implements Vehicle { }
// record Bus(String brand, double height) implements Vehicle { }
// ```
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## Add inference of `permits` clause ?
// The clause `permits` is inferred if everything in the same compilation unit
//
// -
sealed interface Vehicle {
// inferred permits Car, Bus
record Car(String brand, String color) implements Vehicle { }
record Bus(String brand, double height) implements Vehicle { }
}
// + [markdown] slideshow={"slide_type": "slide"}
// ## Exhaustiveness
// The compiler doesn't require the `default` clause anymore.
// ```java
// String color(Vehicle vehicle) {
// return switch(vehicle) {
// case Car car -> car.color;
// case Bus bus -> "yellow";
// }; //no default
// }
// ```
// but before switch on type, let starts by enhancing `instanceof`
//
// + [markdown] slideshow={"slide_type": "slide"}
// # Enhanced `instanceof`
//
// -
// - Introduce de-construction
// - Fix the unnecessary cast
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## What wrong with the old instanceof ?
// The cast is unnecessary.
//
// -
record Bus(String brand, double height) {
public boolean equals(Object o) {
if (!(o instanceof Bus)) {
return false;
}
var bus = (Bus) o; // <-- that cast
return brand.equals(bus.brand)
&& Double.compare(height, bus.height) == 0;
}
}
// > The VM routinely removes it, so it's not present
// > in the generated assembly code
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## Type test pattern
// Declare a variable available if the `instanceof` is true
//
// -
record Bus(String brand, double height) {
public boolean equals(Object o) {
return o instanceof Bus bus
&& brand.equals(bus.brand)
&& Double.compare(height, bus.height) == 0;
}
}
var bus = new Bus("imperial", 7);
var bus2 = new Bus("imperial", 7);
System.out.println(bus.equals(bus2));
// + [markdown] slideshow={"slide_type": "slide"}
// ## Type test vs Code block
//
// -
record Bus(String brand, double height) {
public boolean equals(Object o){
if (o instanceof Bus bus) {
return brand.equals(bus.brand)
&& Double.compare(height, bus.height) == 0;
}
return false;
}
}
// + [markdown] slideshow={"slide_type": "slide"}
// ## Type test vs Code block
//
// -
record Bus(String brand, double height) {
public boolean equals(Object o){
if (!(o instanceof Bus bus)) {
return false;
}
return brand.equals(author.bran)
&& Double.compare(height, bus.height) == 0;
}
}
// + [markdown] slideshow={"slide_type": "slide"}
// ## More fun
//
// -
public double add(Object o1, Object o2) {
if (o1 instanceof Integer i1 && o2 instanceof Integer i2) {
return i1 + i2;
}
throw new ArithmeticException();
}
// + [markdown] slideshow={"slide_type": "slide"}
// ## Even funnier
//
// -
public void loop(Object o) {
while((o instanceof Boolean b) && b) {
System.out.println(o);
o = false;
}
}
loop(true);
// + [markdown] slideshow={"slide_type": "slide"}
// # Future ?
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## Switch on types
// ```java
// String brand(Vehicle vehicle) {
// return switch(vehicle) {
// case Car car -> car.brand;
// case Bus bus -> bus.brand;
// }; //no default
// }
// ```
//
// + [markdown] slideshow={"slide_type": "slide"}
// # Deconstruction of local declaration
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## Extracting values of a record
//
// -
record Car(String brand, String color) {}
var car = new Car("imperial", "red");
var brand = car.brand();
var color = car.color();
System.out.println(brand + " " + color);
// + [markdown] slideshow={"slide_type": "slide"}
// ## Use destructuring
// ```java
// record Car(String brand, String color) {}
// var car = new Car("imperial", "red");
// Car(String owner, String color) = car;
// System.out.println(owner + " " + color);
// ```
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## With inference
// Reusing `var` and `_`
// ```java
// record Car(String brand, String color) {}
// var car = new Car("imperial", "red");
// Car(_, var color) = car;
// System.out.println(color);
// ```
//
// + [markdown] slideshow={"slide_type": "slide"}
// # Tuple ?
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## Use inference
// Removing the name of the type which can be inferred too
// ```java
// record Car(String brand, String color) {}
// Car car = ("imperial", "red"); // inference
// (_, var color) = car; // inference
// System.out.println(color);
// ```
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## Inference in for loop (tuple)
// The type Map.Entry is inferred
// ```java
// Map<String, Car> mapNameToCar = ...
// for((var name, var car) : mapNameToCar.entrySet()) {
// System.out.println(name + " " + car);
// }
// ```
//
// + [markdown] slideshow={"slide_type": "slide"}
// # Deconstruction in switch
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## A switch on types + destructuring
// Allow to de-construct the content of a record
// ```java
// String brand(Vehicle vehicle) {
// return switch(vehicle) {
// case Car(var brand, _) -> brand;
// case Bus(var brand, _) -> brand;
// }; //no default
// }
// ```
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## Deconstructing instanceof
// ```java
// record Bus(String brand, double height) {
// public boolean equals(Object o) {
// return o instanceof Bus(String brand2, double height2)
// && brand.equals(brand2)
// && Double.compare(height, height2) == 0;
// }
//
// +
//}
//var bus = new Bus("imperial", 7);
//var bus2 = new Bus("imperial", 7);
//System.out.println(bus.equals(bus2));
//```
// + [markdown] slideshow={"slide_type": "slide"}
// ## Deconstructing + var
// ```java
// record Bus(String brand, double height) {
// public boolean equals(Object o) {
// return o instanceof Bus(var brand2, var height2)
// && brand.equals(brand2)
// && Double.compare(height, height2) == 0;
// }
//
// +
//}
//var bus = new Bus("imperial", 7);
//var bus2 = new Bus("imperial", 7);
//System.out.println(bus.equals(bus2));
//```
// + [markdown] slideshow={"slide_type": "slide"}
// # Conclusion: Full Pattern Matching
//
// + [markdown] slideshow={"slide_type": "slide"}
// ## Kind of patterns
// - __null pattern__ (`null`), match only `null`
// - __type test pattern__ (`Foo foo`) match the type (not `null`)
// - __var test pattern__ (`var foo`) infer the type
// - __any test pattern__ (`_`) don't introduce a variable
// - __or pattern__ (`pattern1, pattern2`) match either one side or the other
// - __extraction pattern__ (`(..., pattern, ...)`) match a component
// - __constant pattern__ (`123`) match the constant value
//
// -
// `var` or `_` are just inference, no special matching
//
| slideshow/algebraic-data-type-and-pattern-matching.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# name: python3
# ---
# # Gerador de Imagens
# ***
# ## 1. Importação dos Pacotes
# ***
import numpy as np # pacote de procedimentos numéricos
import skimage.morphology as sk # pacote de morfologia matemática
import matplotlib.pyplot as plt # pacote de visualização gráfica
import pandas as pd # pacote de gerenciamento de dataframes
from PIL import Image # pacote de operações com imagens
# ## 2. Funções para Gerar Dígitos em Braille
# ***
def element_generator(radius = 4, state = True):
''''
gera um furo (elemento) do caractere em braille
Args:
radius (int) -- raio do furo
state (bool) -- indica se há ou não furo na imagem
Return:
element (array) -- retornar um elemento do braille
'''
element = sk.disk(radius)
if state == True:
element[0, element.shape[1] // 2] = 0
element[-1, element.shape[1] // 2] = 0
element[element.shape[0] // 2, 0] = 0
element[element.shape[0] // 2, -1] = 0
else:
element = np.zeros(element.shape, dtype='uint8')
return element
def zero_padding(image, width = 1):
'''
adiciona zeros na borda de um array
Args:
image (array) -- array da imagem
width (int) -- espessura em pixels da borda
Return:
image (array) -- array da imagem com borda adicionada
'''
image = np.pad(image, pad_width=width, mode='constant')
return image
def reshape_encoded_image(flatten):
'''
cria uma array com o formato do gabarito de furos em braille
Args:
flatten (list) -- lista codificada do estado de furo do caractere
Return:
encoded_image (array) -- array do estado de furo do caractere
'''
encoded_image = np.array(flatten, dtype='uint8')
encoded_image = encoded_image.reshape(3,2)
return encoded_image
def make_caractere(encoded_image, radius = 4, pad_width = 1):
'''
gera uma imagem do caractere em braille
Args:
encoded_image (array) -- array com o estado de furos do dígito
radius (int) -- raio do furo
pad_width (int) - espessura em pixels da borda dos elementos
Return:
caractere (array) -- imagem do caractere em braille
'''
caractere = [[np.concatenate((element_generator(radius, encoded_image[0][0]),
element_generator(radius, encoded_image[0][1])), axis=1)],
[np.concatenate((element_generator(radius, encoded_image[1][0]),
element_generator(radius, encoded_image[1][1])), axis=1)],
[np.concatenate((element_generator(radius, encoded_image[2][0]),
element_generator(radius, encoded_image[2][1])), axis=1)]]
caractere = np.concatenate((np.squeeze(caractere[0]),np.squeeze(caractere[1]), np.squeeze(caractere[2])))
caractere = zero_padding(caractere, width=pad_width)
return caractere
def string_to_list(flatten_list):
'''
correção de conversão da string para a lista da codificação dos caracteres em braille
Args:
flatten_list (string) -- lista de codificação do caractere em string
Returns:
flatten_list (list) -- lista de codificação do caractere
'''
for i in range(0, len(flatten_list)):
aux_list = list()
flatten_list[i] = flatten_list[i].replace('[', '').replace(']', '').replace(',', '')
aux_list.append(int(flatten_list[i].replace('[', '').replace(']', '').replace(',', '')[0:1]))
aux_list.append(int(flatten_list[i].replace('[', '').replace(']', '').replace(',', '')[1:2]))
aux_list.append(int(flatten_list[i].replace('[', '').replace(']', '').replace(',', '')[2:3]))
aux_list.append(int(flatten_list[i].replace('[', '').replace(']', '').replace(',', '')[3:4]))
aux_list.append(int(flatten_list[i].replace('[', '').replace(']', '').replace(',', '')[4:5]))
aux_list.append(int(flatten_list[i].replace('[', '').replace(']', '').replace(',', '')[5:6]))
flatten_list[i] = aux_list
return flatten_list
def dict_codification(dataframe_path = 'braille-pt-br.csv', usecols = ['Codificacao', 'Rotulo']):
'''
cria um dicionário com a codificação do alfabeto em braille
Args:
dataframe_path (string) -- caminho do arquivo csv com os dados de codificação
Return:
dict_elements (dict) -- codificação dos dígitos em um dicionário
'''
dataframe = pd.read_csv(dataframe_path, delimiter = ',', usecols = usecols)
codificacao = string_to_list(list(dataframe['Codificacao']))
rotulo = list(dataframe['Rotulo'])
return dict(zip(rotulo, codificacao))
def caractere_generator(caractere_str, radius = 4, pad_width = 4):
'''
gera um caractere em braille a partir de um array codificado
Args:
caractere_str (string) -- caractere a ser gerado em braille
radius (int) -- raio do elemento do caractere
pad_width (int) -- espessura da borda do elemento
Return:
caractere (array) -- imagem do caractere
'''
if caractere_str == ' ':
caractere = make_caractere(np.array([[0,0], [0,0], [0,0]]), radius, pad_width)
else:
encoded_image = reshape_encoded_image(dict_codification()[caractere_str])
caractere = make_caractere(encoded_image, radius, pad_width)
return caractere
# ## 3. Funções para Gerar as Imagens em Braille
# ***
# +
def concatenate_caractere(list_caractere, axis = 1):
'''
função que concatena linhas e colunas de dígitos em braille
Args:
list_caractere (list) -- lista de arrays com os caracteres em braille
axis (int) -- eixo de concatenação das imagens
return:
image_block (array) -- array com os dígitos concatenador
'''
for i in range(0, len(list_caractere)):
if i == 0:
image_block = list_caractere[i]
else:
image_block = np.concatenate((image_block, list_caractere[i]), axis = axis)
return image_block
# -
def string_to_line_braille(text_str):
'''
converte texto em uma imagem em linha de braille
Args:
text_str (str) -- string de texto
Return:
line_image (array) -- imagem do texto em braille
'''
list_caractere = list()
for caractere in text_str:
list_caractere.append(caractere_generator(caractere))
return concatenate_caractere(list_caractere, axis = 1)
def string_to_column_braille(list_texts):
'''
cria uma imagem em braille a partir de uma matriz de texto
Args:
list_text (list) -- lista com as linhas em braille
Returns:
image_braille (array) -- imagem em braille
'''
line_array = list()
for line in list_texts:
line_array.append(string_to_line_braille(line))
return concatenate_caractere(line_array, axis = 0)
# ## 4. Teste da Classe
# ***
# +
from image_generator import image_generator
gen_figure = image_generator()
text = ['<NAME>',
'<NAME> ',
'<NAME> ']
plt.figure(figsize = (20, 10))
plt.imshow(gen_figure.string_to_column_braille(text), cmap = 'gray')
plt.axis('off')
image = Image.fromarray((gen_figure.string_to_column_braille(text) * 255).astype(np.uint8))
image.save('image.tiff')
#plt.savefig('image.tiff')
# -
| image-generator/image_generator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Praktische Übung 5: Ensemble Learning - Lösung
# In diesem Notebook werden wir verschiedene Formen des "Ensemble Learning" einsetzen und einen einfachen Bagging-Algorithmus selbst implementieren.
# Vorab initialisieren wir die Zufallsgeneratoren um vergleichbare Ergebnisse zu erhalten:
import numpy as np
import random
np.random.seed(0)
random.seed(0)
import sklearn
print("Numpy version:", np.__version__)
print("Sklearn version:", sklearn.__version__)
# ### Daten laden
# Für diese Übung verwenden wir den [Wein-Datensatz](https://archive.ics.uci.edu/ml/datasets/wine), welcher ebenfalls ein bekannter Datensatz in der ML-Welt ist.
# Die offizielle Beschreibung lautet:
# ```
# These data are the results of a chemical analysis of wines grown in the same region in Italy but derived from three different cultivars. The analysis determined the quantities of 13 constituents found in each of the three types of wines.
# ```
# Anhand dieser Merkmale soll die Qualität (Spalte `quality`) des Weins vorhergesagt werden.
import pandas as pd
df = pd.read_csv("../data/wine.csv")
df.head()
# Bevor wir loslegen, schauen wir uns die Verteilung des Labels an:
df['quality'].hist()
from sklearn.model_selection import train_test_split
X = df.drop('quality', axis=1)
y = df['quality']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# ### Aufgabe 1: Decision Tree, Random Forest, GBT
# Trainieren Sie die folgenden Modelle und ermitteln Sie die Accuarcy auf den Testdaten. Geben Sie dabei jeweils den Parameter `random_state=0` bei der Erstellung des Modells and und beschränken Sie die maximale Baumtiefe auf `max_depth=3`.
# - Einfacher Entscheidungsbaum (`DecisionTreeClassifier`)
# - Random Forest (`RandomForestClassifier`)
# - GBT (`GradientBoostingClassifier`)
#
# Hinweis: Für diese Modelle müssen wir die Daten nicht skalieren und kein One-hot-encoding durchführen.
from sklearn.tree import DecisionTreeClassifier
clfTre = DecisionTreeClassifier(criterion="entropy", random_state=0, max_depth=3)
clfTre.fit(X_train,y_train)
from sklearn.metrics import accuracy_score
predictions = clfTre.predict(X_test)
accuracy_score(y_test, predictions)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(random_state=0, max_depth=3)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
accuracy_score(y_test, predictions)
from sklearn.ensemble import GradientBoostingClassifier
clf = GradientBoostingClassifier(random_state=0, max_depth=3)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
accuracy_score(y_test, predictions)
# ### Aufgabe 2: GBT Tuning
# Der `GradientBoostingClassifier` und der `RandomForest` haben als Hyperparameter u.a. die Anzahl der Bäume die trainiert werden (`n_estimators`) und die maximale Baumtiefe (`max_depth`), siehe [hier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html).
#
# - Führen Sie für beide Modelle ein Cross-Validierung über diese Hyperparameter durch, betrachten Sie dabei folgende Werte: $n\_estimators \in [60, 80, 100, 120, 140]$ und $max\_depth \in [2, 3, 4, 5]$. Nehmen Sie das Notebook `6_TreeEnsembles` auf unserem GitHub als Vorlage. Hinweis: Sie können alle Hyperparameter auf einmal übergeben. Mehr Details finden Sie wenn Sie [hier](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html) bis nach unten zum Code-Beispiel scrollen.
# - Welches sind die besten Parameter für `max_depth` und `n_estimators` und welches ist das bessere Modell?
# - Trainieren Sie das bessere Modelle mit den besten Parametern und machen Sie eine Vorhersage auf den Testdaten. Vergleichen Sie die Ergebnisse mit Aufgabe 1.
parameter_candidates = [{'max_depth': [2, 3, 4, 5], 'n_estimators': [60,80,100,120,140]}]
# +
from sklearn.model_selection import GridSearchCV
gbt = GradientBoostingClassifier(random_state=0)
grid_clf = GridSearchCV(estimator=gbt, param_grid=parameter_candidates, n_jobs=-1)
grid_clf.fit(X_train, y_train)
# -
print('Best n_estimators:', grid_clf.best_estimator_.n_estimators)
print('Best max_depth:', grid_clf.best_estimator_.max_depth)
print('Best score:', grid_clf.best_score_)
rf = RandomForestClassifier(random_state=0)
grid_clf = GridSearchCV(estimator=rf, param_grid=parameter_candidates, n_jobs=-1)
grid_clf.fit(X_train, y_train)
print('Best n_estimators:', grid_clf.best_estimator_.n_estimators)
print('Best max_depth:', grid_clf.best_estimator_.max_depth)
print('Best score:', grid_clf.best_score_)
clf = GradientBoostingClassifier(random_state=0, max_depth=5, n_estimators=100)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
accuracy_score(y_test, predictions)
# Hinweis: Es hat sich gezeigt, dass mit unterschiedlichen Library-Versionen auch unterschiedliche Ergebnisse für `n_estimators` und `max_depth` gefunden werden.
# ### Aufgabe 3: Bagging-Modell
# Implementieren Sie ein Bagging-Modell von Hand (d.h. nicht die Sklearn-Library verwenden) und testen Sie es auf den Testdaten. Das Bagging-Modell soll folgende Eigenschaften haben:
# - Das Modell soll 10 Basismodelle haben, welche einfache `DecisionTreeClassifier` sind.
# - Jeder dieser DecisionTrees soll auf 70% der Trainingsdaten trainiert werden (Sampling mit Zurücklegen). Tipp: Nutzen Sie `X_train.sample(...)`.
# - Bei der Vorhersage soll die am häufigsten vorhergesagte Klasse als Gesamtvorhersage dienen.
# - Testen Sie das Modell auf den Testdaten.
no_trees = 10
subsample_size = 0.7
trees = []
for i in range(0, no_trees):
X_bootstrap = X_train.sample(frac=subsample_size, replace=True, random_state=i)
y_bootstrap = y_train[X_bootstrap.index]
clfTre = DecisionTreeClassifier(criterion="entropy", random_state=0)
clfTre.fit(X_bootstrap, y_bootstrap)
trees.append(clfTre)
# Um die Ergebnisse reproduzierbar zu machen, wird bei der `sample`-Methode ein `random_state` übergeben. Dieser `random_state` wird in jedem Schleifendurchlauf auf andere Zahl gesetzt, weil ansonsten immer die gleichen Daten gesampelt werden.
# +
from statistics import mode
test_predictions = []
for data_point in X_test.values:
predictions = [tree.predict([data_point])[0] for tree in trees]
predicted_class = mode(predictions)
test_predictions.append(predicted_class)
# -
from sklearn.metrics import accuracy_score
accuracy_score(y_test, test_predictions)
| solutions/P5_Exercise_TreeEnsembles.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # Convexity
# :label:`sec_convexity`
#
# Convexity plays a vital role in the design of optimization algorithms.
# This is largely due to the fact that it is much easier to analyze and test algorithms in such a context.
# In other words,
# if the algorithm performs poorly even in the convex setting,
# typically we should not hope to see great results otherwise.
# Furthermore, even though the optimization problems in deep learning are generally nonconvex, they often exhibit some properties of convex ones near local minima. This can lead to exciting new optimization variants such as :cite:`Izmailov.Podoprikhin.Garipov.ea.2018`.
#
# + origin_pos=2 tab=["pytorch"]
# %matplotlib inline
import numpy as np
import torch
from mpl_toolkits import mplot3d
from d2l import torch as d2l
# + [markdown] origin_pos=4
# ## Definitions
#
# Before convex analysis,
# we need to define *convex sets* and *convex functions*.
# They lead to mathematical tools that are commonly applied to machine learning.
#
#
# ### Convex Sets
#
# Sets are the basis of convexity. Simply put, a set $\mathcal{X}$ in a vector space is *convex* if for any $a, b \in \mathcal{X}$ the line segment connecting $a$ and $b$ is also in $\mathcal{X}$. In mathematical terms this means that for all $\lambda \in [0, 1]$ we have
#
# $$\lambda a + (1-\lambda) b \in \mathcal{X} \text{ whenever } a, b \in \mathcal{X}.$$
#
# This sounds a bit abstract. Consider :numref:`fig_pacman`. The first set is not convex since there exist line segments that are not contained in it.
# The other two sets suffer no such problem.
#
# 
# :label:`fig_pacman`
#
# Definitions on their own are not particularly useful unless you can do something with them.
# In this case we can look at intersections as shown in :numref:`fig_convex_intersect`.
# Assume that $\mathcal{X}$ and $\mathcal{Y}$ are convex sets. Then $\mathcal{X} \cap \mathcal{Y}$ is also convex. To see this, consider any $a, b \in \mathcal{X} \cap \mathcal{Y}$. Since $\mathcal{X}$ and $\mathcal{Y}$ are convex, the line segments connecting $a$ and $b$ are contained in both $\mathcal{X}$ and $\mathcal{Y}$. Given that, they also need to be contained in $\mathcal{X} \cap \mathcal{Y}$, thus proving our theorem.
#
# 
# :label:`fig_convex_intersect`
#
# We can strengthen this result with little effort: given convex sets $\mathcal{X}_i$, their intersection $\cap_{i} \mathcal{X}_i$ is convex.
# To see that the converse is not true, consider two disjoint sets $\mathcal{X} \cap \mathcal{Y} = \emptyset$. Now pick $a \in \mathcal{X}$ and $b \in \mathcal{Y}$. The line segment in :numref:`fig_nonconvex` connecting $a$ and $b$ needs to contain some part that is neither in $\mathcal{X}$ nor in $\mathcal{Y}$, since we assumed that $\mathcal{X} \cap \mathcal{Y} = \emptyset$. Hence the line segment is not in $\mathcal{X} \cup \mathcal{Y}$ either, thus proving that in general unions of convex sets need not be convex.
#
# 
# :label:`fig_nonconvex`
#
# Typically the problems in deep learning are defined on convex sets. For instance, $\mathbb{R}^d$,
# the set of $d$-dimensional vectors of real numbers,
# is a convex set (after all, the line between any two points in $\mathbb{R}^d$ remains in $\mathbb{R}^d$). In some cases we work with variables of bounded length, such as balls of radius $r$ as defined by $\{\mathbf{x} | \mathbf{x} \in \mathbb{R}^d \text{ and } \|\mathbf{x}\| \leq r\}$.
#
# ### Convex Functions
#
# Now that we have convex sets we can introduce *convex functions* $f$.
# Given a convex set $\mathcal{X}$, a function $f: \mathcal{X} \to \mathbb{R}$ is *convex* if for all $x, x' \in \mathcal{X}$ and for all $\lambda \in [0, 1]$ we have
#
# $$\lambda f(x) + (1-\lambda) f(x') \geq f(\lambda x + (1-\lambda) x').$$
#
# To illustrate this let us plot a few functions and check which ones satisfy the requirement.
# Below we define a few functions, both convex and nonconvex.
#
# + origin_pos=5 tab=["pytorch"]
f = lambda x: 0.5 * x**2 # Convex
g = lambda x: torch.cos(np.pi * x) # Nonconvex
h = lambda x: torch.exp(0.5 * x) # Convex
x, segment = torch.arange(-2, 2, 0.01), torch.tensor([-1.5, 1])
d2l.use_svg_display()
_, axes = d2l.plt.subplots(1, 3, figsize=(9, 3))
for ax, func in zip(axes, [f, g, h]):
d2l.plot([x, segment], [func(x), func(segment)], axes=ax)
# + [markdown] origin_pos=6
# As expected, the cosine function is *nonconvex*, whereas the parabola and the exponential function are. Note that the requirement that $\mathcal{X}$ is a convex set is necessary for the condition to make sense. Otherwise the outcome of $f(\lambda x + (1-\lambda) x')$ might not be well defined.
#
#
# ### Jensen's Inequality
#
# Given a convex function $f$,
# one of the most useful mathematical tools
# is *Jensen's inequality*.
# It amounts to a generalization of the definition of convexity:
#
# $$\sum_i \alpha_i f(x_i) \geq f\left(\sum_i \alpha_i x_i\right) \text{ and } E_X[f(X)] \geq f\left(E_X[X]\right),$$
# :eqlabel:`eq_jensens-inequality`
#
# where $\alpha_i$ are nonnegative real numbers such that $\sum_i \alpha_i = 1$ and $X$ is a random variable.
# In other words, the expectation of a convex function is no less than the convex function of an expectation, where the latter is usually a simpler expression.
# To prove the first inequality we repeatedly apply the definition of convexity to one term in the sum at a time.
#
#
# One of the common applications of Jensen's inequality is
# to bound a more complicated expression by a simpler one.
# For example,
# its application can be
# with regard to the log-likelihood of partially observed random variables. That is, we use
#
# $$E_{Y \sim P(Y)}[-\log P(X \mid Y)] \geq -\log P(X),$$
#
# since $\int P(Y) P(X \mid Y) dY = P(X)$.
# This can be used in variational methods. Here $Y$ is typically the unobserved random variable, $P(Y)$ is the best guess of how it might be distributed, and $P(X)$ is the distribution with $Y$ integrated out. For instance, in clustering $Y$ might be the cluster labels and $P(X \mid Y)$ is the generative model when applying cluster labels.
#
#
#
# ## Properties
#
# Convex functions have many useful properties. We describe a few commonly-used ones below.
#
#
# ### Local Minima Are Global Minima
#
# First and foremost, the local minima of convex functions are also the global minima.
# We can prove it by contradiction as follows.
#
# Consider a convex function $f$ defined on a convex set $\mathcal{X}$.
# Suppose that $x^{\ast} \in \mathcal{X}$ is a local minimum:
# there exists a small positive value $p$ so that for $x \in \mathcal{X}$ that satisfies $0 < |x - x^{\ast}| \leq p$ we have $f(x^{\ast}) < f(x)$.
#
# Assume that the local minimum $x^{\ast}$
# is not the global minumum of $f$:
# there exists $x' \in \mathcal{X}$ for which $f(x') < f(x^{\ast})$.
# There also exists
# $\lambda \in [0, 1)$ such as $\lambda = 1 - \frac{p}{|x^{\ast} - x'|}$
# so that
# $0 < |\lambda x^{\ast} + (1-\lambda) x' - x^{\ast}| \leq p$.
#
# However,
# according to the definition of convex functions, we have
#
# $$\begin{aligned}
# f(\lambda x^{\ast} + (1-\lambda) x') &\leq \lambda f(x^{\ast}) + (1-\lambda) f(x') \\
# &< \lambda f(x^{\ast}) + (1-\lambda) f(x^{\ast}) \\
# &= f(x^{\ast}),
# \end{aligned}$$
#
# which contradicts with our statement that $x^{\ast}$ is a local minimum.
# Therefore, there does not exist $x' \in \mathcal{X}$ for which $f(x') < f(x^{\ast})$. The local minimum $x^{\ast}$ is also the global minimum.
#
# For instance, the convex function $f(x) = (x-1)^2$ has a local minimum at $x=1$, which is also the global minimum.
#
# + origin_pos=7 tab=["pytorch"]
f = lambda x: (x - 1) ** 2
d2l.set_figsize()
d2l.plot([x, segment], [f(x), f(segment)], 'x', 'f(x)')
# + [markdown] origin_pos=8
# The fact that the local minima for convex functions are also the global minima is very convenient.
# It means that if we minimize functions we cannot "get stuck".
# Note, though, that this does not mean that there cannot be more than one global minimum or that there might even exist one. For instance, the function $f(x) = \mathrm{max}(|x|-1, 0)$ attains its minimum value over the interval $[-1, 1]$. Conversely, the function $f(x) = \exp(x)$ does not attain a minimum value on $\mathbb{R}$: for $x \to -\infty$ it asymptotes to $0$, but there is no $x$ for which $f(x) = 0$.
#
# ### Below Sets of Convex Functions Are Convex
#
# We can conveniently
# define convex sets
# via *below sets* of convex functions.
# Concretely,
# given a convex function $f$ defined on a convex set $\mathcal{X}$,
# any below set
#
# $$\mathcal{S}_b := \{x | x \in \mathcal{X} \text{ and } f(x) \leq b\}$$
#
# is convex.
#
# Let us prove this quickly. Recall that for any $x, x' \in \mathcal{S}_b$ we need to show that $\lambda x + (1-\lambda) x' \in \mathcal{S}_b$ as long as $\lambda \in [0, 1]$.
# Since $f(x) \leq b$ and $f(x') \leq b$,
# by the definition of convexity we have
#
# $$f(\lambda x + (1-\lambda) x') \leq \lambda f(x) + (1-\lambda) f(x') \leq b.$$
#
#
# ### Convexity and Second Derivatives
#
# Whenever the second derivative of a function $f: \mathbb{R}^n \rightarrow \mathbb{R}$ exists it is very easy to check whether $f$ is convex.
# All we need to do is check whether the Hessian of $f$ is positive semidefinite: $\nabla^2f \succeq 0$, i.e.,
# denoting the Hessian matrix $\nabla^2f$ by $\mathbf{H}$,
# $\mathbf{x}^\top \mathbf{H} \mathbf{x} \geq 0$
# for all $\mathbf{x} \in \mathbb{R}^n$.
# For instance, the function $f(\mathbf{x}) = \frac{1}{2} \|\mathbf{x}\|^2$ is convex since $\nabla^2 f = \mathbf{1}$, i.e., its Hessian is an identity matrix.
#
#
# Formally, a twice-differentiable one-dimensional function $f: \mathbb{R} \rightarrow \mathbb{R}$ is convex
# if and only if its second derivative $f'' \geq 0$. For any twice-differentiable multi-dimensional function $f: \mathbb{R}^{n} \rightarrow \mathbb{R}$,
# it is convex if and only if its Hessian $\nabla^2f \succeq 0$.
#
# First, we need to prove the one-dimensional case.
# To see that
# convexity of $f$ implies
# $f'' \geq 0$ we use the fact that
#
# $$\frac{1}{2} f(x + \epsilon) + \frac{1}{2} f(x - \epsilon) \geq f\left(\frac{x + \epsilon}{2} + \frac{x - \epsilon}{2}\right) = f(x).$$
#
# Since the second derivative is given by the limit over finite differences it follows that
#
# $$f''(x) = \lim_{\epsilon \to 0} \frac{f(x+\epsilon) + f(x - \epsilon) - 2f(x)}{\epsilon^2} \geq 0.$$
#
# To see that
# $f'' \geq 0$ implies that $f$ is convex
# we use the fact that $f'' \geq 0$ implies that $f'$ is a monotonically nondecreasing function. Let $a < x < b$ be three points in $\mathbb{R}$,
# where $x = (1-\lambda)a + \lambda b$ and $\lambda \in (0, 1)$.
# According to the mean value theorem,
# there exist $\alpha \in [a, x]$ and $\beta \in [x, b]$
# such that
#
# $$f'(\alpha) = \frac{f(x) - f(a)}{x-a} \text{ and } f'(\beta) = \frac{f(b) - f(x)}{b-x}.$$
#
#
# By monotonicity $f'(\beta) \geq f'(\alpha)$, hence
#
# $$\frac{x-a}{b-a}f(b) + \frac{b-x}{b-a}f(a) \geq f(x).$$
#
# Since $x = (1-\lambda)a + \lambda b$,
# we have
#
# $$\lambda f(b) + (1-\lambda)f(a) \geq f((1-\lambda)a + \lambda b),$$
#
# thus proving convexity.
#
# Second, we need a lemma before
# proving the multi-dimensional case:
# $f: \mathbb{R}^n \rightarrow \mathbb{R}$
# is convex if and only if for all $\mathbf{x}, \mathbf{y} \in \mathbb{R}^n$
#
# $$g(z) \stackrel{\mathrm{def}}{=} f(z \mathbf{x} + (1-z) \mathbf{y}) \text{ where } z \in [0,1]$$
#
# is convex.
#
# To prove that convexity of $f$ implies that $g$ is convex,
# we can show that for all $a, b, \lambda \in [0, 1]$ (thus
# $0 \leq \lambda a + (1-\lambda) b \leq 1$)
#
# $$\begin{aligned} &g(\lambda a + (1-\lambda) b)\\
# =&f\left(\left(\lambda a + (1-\lambda) b\right)\mathbf{x} + \left(1-\lambda a - (1-\lambda) b\right)\mathbf{y} \right)\\
# =&f\left(\lambda \left(a \mathbf{x} + (1-a) \mathbf{y}\right) + (1-\lambda) \left(b \mathbf{x} + (1-b) \mathbf{y}\right) \right)\\
# \leq& \lambda f\left(a \mathbf{x} + (1-a) \mathbf{y}\right) + (1-\lambda) f\left(b \mathbf{x} + (1-b) \mathbf{y}\right) \\
# =& \lambda g(a) + (1-\lambda) g(b).
# \end{aligned}$$
#
# To prove the converse,
# we can show that for
# all $\lambda \in [0, 1]$
#
# $$\begin{aligned} &f(\lambda \mathbf{x} + (1-\lambda) \mathbf{y})\\
# =&g(\lambda \cdot 1 + (1-\lambda) \cdot 0)\\
# \leq& \lambda g(1) + (1-\lambda) g(0) \\
# =& \lambda f(\mathbf{x}) + (1-\lambda) g(\mathbf{y}).
# \end{aligned}$$
#
#
# Finally,
# using the lemma above and the result of the one-dimensional case,
# the multi-dimensional case
# can be proven as follows.
# A multi-dimensional function $f: \mathbb{R}^n \rightarrow \mathbb{R}$ is convex
# if and only if for all $\mathbf{x}, \mathbf{y} \in \mathbb{R}^n$ $g(z) \stackrel{\mathrm{def}}{=} f(z \mathbf{x} + (1-z) \mathbf{y})$, where $z \in [0,1]$,
# is convex.
# According to the one-dimensional case,
# this holds if and only if
# $g'' = (\mathbf{x} - \mathbf{y})^\top \mathbf{H}(\mathbf{x} - \mathbf{y}) \geq 0$ ($\mathbf{H} \stackrel{\mathrm{def}}{=} \nabla^2f$)
# for all $\mathbf{x}, \mathbf{y} \in \mathbb{R}^n$,
# which is equivalent to $\mathbf{H} \succeq 0$
# per the definition of positive semidefinite matrices.
#
#
# ## Constraints
#
# One of the nice properties of convex optimization is that it allows us to handle constraints efficiently. That is, it allows us to solve *constrained optimization* problems of the form:
#
# $$\begin{aligned} \mathop{\mathrm{minimize~}}_{\mathbf{x}} & f(\mathbf{x}) \\
# \text{ subject to } & c_i(\mathbf{x}) \leq 0 \text{ for all } i \in \{1, \ldots, n\},
# \end{aligned}$$
#
# where $f$ is the objective and the functions $c_i$ are constraint functions. To see what this does consider the case where $c_1(\mathbf{x}) = \|\mathbf{x}\|_2 - 1$. In this case the parameters $\mathbf{x}$ are constrained to the unit ball. If a second constraint is $c_2(\mathbf{x}) = \mathbf{v}^\top \mathbf{x} + b$, then this corresponds to all $\mathbf{x}$ lying on a half-space. Satisfying both constraints simultaneously amounts to selecting a slice of a ball.
#
# ### Lagrangian
#
# In general, solving a constrained optimization problem is difficult. One way of addressing it stems from physics with a rather simple intuition. Imagine a ball inside a box. The ball will roll to the place that is lowest and the forces of gravity will be balanced out with the forces that the sides of the box can impose on the ball. In short, the gradient of the objective function (i.e., gravity) will be offset by the gradient of the constraint function (the ball need to remain inside the box by virtue of the walls "pushing back").
# Note that some constraints may not be active:
# the walls that are not touched by the ball
# will not be able to exert any force on the ball.
#
#
# Skipping over the derivation of the *Lagrangian* $L$,
# the above reasoning
# can be expressed via the following saddle point optimization problem:
#
# $$L(\mathbf{x}, \alpha_1, \ldots, \alpha_n) = f(\mathbf{x}) + \sum_{i=1}^n \alpha_i c_i(\mathbf{x}) \text{ where } \alpha_i \geq 0.$$
#
# Here the variables $\alpha_i$ ($i=1,\ldots,n$) are the so-called *Lagrange multipliers* that ensure that constraints are properly enforced. They are chosen just large enough to ensure that $c_i(\mathbf{x}) \leq 0$ for all $i$. For instance, for any $\mathbf{x}$ where $c_i(\mathbf{x}) < 0$ naturally, we'd end up picking $\alpha_i = 0$. Moreover, this is a saddle point optimization problem where one wants to *maximize* $L$ with respect to all $\alpha_i$ and simultaneously *minimize* it with respect to $\mathbf{x}$. There is a rich body of literature explaining how to arrive at the function $L(\mathbf{x}, \alpha_1, \ldots, \alpha_n)$. For our purposes it is sufficient to know that the saddle point of $L$ is where the original constrained optimization problem is solved optimally.
#
# ### Penalties
#
# One way of satisfying constrained optimization problems at least *approximately* is to adapt the Lagrangian $L$.
# Rather than satisfying $c_i(\mathbf{x}) \leq 0$ we simply add $\alpha_i c_i(\mathbf{x})$ to the objective function $f(x)$. This ensures that the constraints will not be violated too badly.
#
# In fact, we have been using this trick all along. Consider weight decay in :numref:`sec_weight_decay`. In it we add $\frac{\lambda}{2} \|\mathbf{w}\|^2$ to the objective function to ensure that $\mathbf{w}$ does not grow too large. From the constrained optimization point of view we can see that this will ensure that $\|\mathbf{w}\|^2 - r^2 \leq 0$ for some radius $r$. Adjusting the value of $\lambda$ allows us to vary the size of $\mathbf{w}$.
#
# In general, adding penalties is a good way of ensuring approximate constraint satisfaction. In practice this turns out to be much more robust than exact satisfaction. Furthermore, for nonconvex problems many of the properties that make the exact approach so appealing in the convex case (e.g., optimality) no longer hold.
#
# ### Projections
#
# An alternative strategy for satisfying constraints is projections. Again, we encountered them before, e.g., when dealing with gradient clipping in :numref:`sec_rnn_scratch`. There we ensured that a gradient has length bounded by $\theta$ via
#
# $$\mathbf{g} \leftarrow \mathbf{g} \cdot \mathrm{min}(1, \theta/\|\mathbf{g}\|).$$
#
# This turns out to be a *projection* of $\mathbf{g}$ onto the ball of radius $\theta$. More generally, a projection on a convex set $\mathcal{X}$ is defined as
#
# $$\mathrm{Proj}_\mathcal{X}(\mathbf{x}) = \mathop{\mathrm{argmin}}_{\mathbf{x}' \in \mathcal{X}} \|\mathbf{x} - \mathbf{x}'\|,$$
#
# which is the closest point in $\mathcal{X}$ to $\mathbf{x}$.
#
# 
# :label:`fig_projections`
#
# The mathematical definition of projections may sound a bit abstract. :numref:`fig_projections` explains it somewhat more clearly. In it we have two convex sets, a circle and a diamond.
# Points inside both sets (yellow) remain unchanged during projections.
# Points outside both sets (black) are projected to
# the points inside the sets (red) that are closet to the original points (black).
# While for $L_2$ balls this leaves the direction unchanged, this need not be the case in general, as can be seen in the case of the diamond.
#
#
# One of the uses for convex projections is to compute sparse weight vectors. In this case we project weight vectors onto an $L_1$ ball,
# which is a generalized version of the diamond case in :numref:`fig_projections`.
#
#
# ## Summary
#
# In the context of deep learning the main purpose of convex functions is to motivate optimization algorithms and help us understand them in detail. In the following we will see how gradient descent and stochastic gradient descent can be derived accordingly.
#
#
# * Intersections of convex sets are convex. Unions are not.
# * The expectation of a convex function is no less than the convex function of an expectation (Jensen's inequality).
# * A twice-differentiable function is convex if and only if its Hessian (a matrix of second derivatives) is positive semidefinite.
# * Convex constraints can be added via the Lagrangian. In practice we may simply add them with a penalty to the objective function.
# * Projections map to points in the convex set closest to the original points.
#
# ## Exercises
#
# 1. Assume that we want to verify convexity of a set by drawing all lines between points within the set and checking whether the lines are contained.
# 1. Prove that it is sufficient to check only the points on the boundary.
# 1. Prove that it is sufficient to check only the vertices of the set.
# 1. Denote by $\mathcal{B}_p[r] \stackrel{\mathrm{def}}{=} \{\mathbf{x} | \mathbf{x} \in \mathbb{R}^d \text{ and } \|\mathbf{x}\|_p \leq r\}$ the ball of radius $r$ using the $p$-norm. Prove that $\mathcal{B}_p[r]$ is convex for all $p \geq 1$.
# 1. Given convex functions $f$ and $g$, show that $\mathrm{max}(f, g)$ is convex, too. Prove that $\mathrm{min}(f, g)$ is not convex.
# 1. Prove that the normalization of the softmax function is convex. More specifically prove the convexity of
# $f(x) = \log \sum_i \exp(x_i)$.
# 1. Prove that linear subspaces, i.e., $\mathcal{X} = \{\mathbf{x} | \mathbf{W} \mathbf{x} = \mathbf{b}\}$, are convex sets.
# 1. Prove that in the case of linear subspaces with $\mathbf{b} = \mathbf{0}$ the projection $\mathrm{Proj}_\mathcal{X}$ can be written as $\mathbf{M} \mathbf{x}$ for some matrix $\mathbf{M}$.
# 1. Show that for twice-differentiable convex functions $f$ we can write $f(x + \epsilon) = f(x) + \epsilon f'(x) + \frac{1}{2} \epsilon^2 f''(x + \xi)$ for some $\xi \in [0, \epsilon]$.
# 1. Given a vector $\mathbf{w} \in \mathbb{R}^d$ with $\|\mathbf{w}\|_1 > 1$ compute the projection on the $L_1$ unit ball.
# 1. As an intermediate step write out the penalized objective $\|\mathbf{w} - \mathbf{w}'\|^2 + \lambda \|\mathbf{w}'\|_1$ and compute the solution for a given $\lambda > 0$.
# 1. Can you find the "right" value of $\lambda$ without a lot of trial and error?
# 1. Given a convex set $\mathcal{X}$ and two vectors $\mathbf{x}$ and $\mathbf{y}$, prove that projections never increase distances, i.e., $\|\mathbf{x} - \mathbf{y}\| \geq \|\mathrm{Proj}_\mathcal{X}(\mathbf{x}) - \mathrm{Proj}_\mathcal{X}(\mathbf{y})\|$.
#
#
# [Discussions](https://discuss.d2l.ai/t/350)
#
| python/d2l-en/pytorch/chapter_optimization/convexity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # FashionMNIST
# Load images from the [Fashion-MNIST data](https://github.com/zalandoresearch/fashion-mnist)
#
#
# The dataset comprised of 60,000 small square 28x28 pixel grayscale images of items of 10 types of clothing with 0-9 class labels.
# class labels:
# * 0: T-shirt/top
# * 1: Trouser
# * 2: Pullover
# * 3: Dress
# * 4: Coat
# * 5: Sandal
# * 6: Shirt
# * 7: Sneaker
# * 8: Bag
# * 9: Ankle boot
#
# ### Load the Fashion-MNIST data
# * Use ``torch.utils.data.dataset``
# * Data path: data
# * Apply transformations to the data (turning all images into Tensor's for training a NN
#
# ### Train and CNN to classify images
# * Load in both training and test datasets from the FashionMNIST class
#
#
#
# ## Import the Necessary Packages
# +
# basic torch libraries
import torch
import torchvision
# data loading and transforming
from torchvision.datasets import FashionMNIST
from torch.utils.data import DataLoader
from torchvision import transforms
# basic libraries
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# ### The output of ``torchvision`` are PILImage images of range [0, 1]
# * Transform them to Tensor for input into a CNN
# +
# Defin a transform to read the data in as a Tensor
data_transform = transforms.ToTensor()
# Choose the training and test datasets
path = './data'
# Training datasets
train_data = FashionMNIST(root=path,
train=True,
download=False,
transform=data_transform)
# Test datasets
test_data = FashionMNIST(root=path,
train=False,
download=False,
transform=data_transform)
# Print out some stats about the training data
print('Train data, number of images', len(train_data))
# Print out some stats about the training data
print('Test data, number of images', len(test_data))
# -
# ## Data iteration anbatching
# ``torch.utils.data.DataLoader`` is an iterator that allows to batch and shuffle the data
#
# +
# shuffle the data and load in image/label data in batches of size 20
# Depends on large or small size of batch size will affect the loss
batch_size = 20
# load train
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
# load test
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True)
# specify the image classes
classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# -
# Using ``dataiter.next()`` for cell iterates over the training dataset of loaded a random batch image/label data.
#
# Plots the batch of images and labels in a ``2*batch_size/2`` grid.
#
# +
# obtain one batch of training images
# iter
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy() # convert to numpy
# plot the images in the batch with labels
fig = plt.figure(figsize=(25, 4)) # fig size
for idx in np.arange(batch_size):
ax = fig.add_subplot(2, batch_size/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title(classes[labels[idx]])
# -
# ## View an image
# * Normalize
# * grayscale image
#
# ### Normalization
# Normalization ensures that, as we go through a feedforward and then backpropagation step in training our CNN, that each image feature will fall within a similar range of values and not overly activate any particular layer in our network. During the feedfoward step, a network takes in an input image and multiplies each input pixel by some convolutional filter weights (and adds biases!), then it applies some activation and pooling functions. Without normalization, it's much more likely that the calculated gradients in the backpropagaton step will be quite large and cause our loss to increase instead of converge
#
#
# +
# select an image by index
idx = 2
img = np.squeeze(images[idx])
# display the pixel values in the image
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
# -
# ### NN Architecture
# * Architecture for simple ConvNet [INPUT-CONV-RELU-POOL-FC]
# * [NN Layers](http://pytorch.org/docs/master/nn.html)
# * Flattening used for the output of conv/pooling layer to a linear layer. In Keras used ``Flatten()``. In Pytorch used an input x with ``x = x.view(x.size(0), -1)``
# * Keep tract output dimension for case ``output_dim = (W-F+2P)/S + 1``
# * Input volume size(W)
# * Receptive field size of the Conv Layer neurons(F)
# * The sride with which applied(S)
# * The amount of zero padding used(P)
#
# # Necessary Packages for NN Module
import torch.nn as nn
import torch.nn.functional as F
# +
# Define Layers of a model
# Will use [INPUT-CONV-RELU-POOL-CONV-RELU-POOL-FC]
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel(grayscale), 10 output channels/features maps
# Applies a 2D convolution over an input signal composed of several input planes.
# 3x3 square convolution kernel
# output_dim = (28-3)/1 + 1 = 26
# output Tensor for one image will have the dimensions: (10, 26, 26)
self.conv1 = nn.Conv2d(1, 10, 3)
# maxpool layer with kernel_size=2, stride=2
# Output_dim = 26/2 = 13
# output Tensor for one image will have the dimensions: (10, 13, 13)
self.pool = nn.MaxPool2d(2,2)
# Apply Second conv layer: 10 inputs, 20 outputs
# 3x3 square convolution kernel
# output_dim = (13-3)/1 + 1 = 11
# output Tensor for one image will have the dimensions: (20, 11, 11)
self.conv2 = nn.Conv2d(10, 20, 3)
# Outpu_dim for pooling after secon conv (20, 5, 5); 5.5 is rounded down
# FC
# 20 outputs * the 5*5 filtered/poled map size
# 10 output channels (for the 10 classes)
self.fc1 = nn.Linear(20*5*5, 10)
# feedforward behavior
def forward(self, x):
# Apply [CONV-RELU-POOL-CONV-RELU-POOL]
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
# Flattening used for the output of conv/pooling layer to a linear laye
# Flatten the inputs into a vector
x = x.view(x.size(0), -1)
# One linear layer
x = F.relu(self.fc1(x))
# Apply softmax layer to convert the 10 outputs (0-9) into a distribution prob of class scores
x = F.log_softmax(x, dim=1)
return x
# -
# Instantiate and print Net
net = Net()
print(net)
# # Loss function and Optimizer
#
# * Loss function typically uses cross entropy loss ``criterion = nn.CrossEntropyLoss()``; Cross entropy loss combines softmax and NLL loss (``nn.NLLLoss()``).
# * NLL Loss being uesd for the output of Net is a distribution of class scores which this condtion fit to the model.
#
# * Some standard stochastic optimizers are stochastic gradient descent and Adam.
# +
# additional necessary package for optimizer
import torch.optim as optim
# Apply NLL Loss for distribution of class scores
criterion = nn.NLLLoss()
# Optimizer used SGD with small learning rate 0.001
optimizer = optim.SGD(net.parameters(), lr=0.001)
# -
# # Accuracy before training
# * The accuracy of before and after training hepls to see the differnce whether netwrok has learned something.
# +
# Calculate accuracy before training
correct = 0
total = 0
# Iterate through test dataset
for images, labels in test_loader:
# forward pass to get outputs
# the outputs are a series of class scores
outputs = net(images)
# get the predicted class from the maximum value in the output-list of class scores
_, predicted = torch.max(outputs.data, 1)
# count up total number of correct labels for which the predicted and true labels are equal
total += labels.size(0)
correct += (predicted == labels).sum()
# calculate the accuracy to convert correct from a Tensor into a scalar, use .item()
accuracy = 100.0 * correct.item() / total
print('Accuracy before training: ', accuracy)
# -
# # Train the Network
# * n_epochs: The number of epochs how many times a netwrok will cycle through the entire training dataset
# * Loop over the training dataset in batches and record the loss every 1000 batches
# * Steps:
# * Zero's the gradients to prepare for a forward pass
# * Passes the input through the network(forward pass
# * Computes the loss
# * Propagates gradients back into the netorks' parameter(backward pass)
# * Updates the weight(parameter update
# * print calculated loss
def train(n_epochs):
# collect loss as the network trains
loss_over_time = []
# loop over the dataset
for epoch in range(n_epochs):
running_loss = 0.0
for batch_i, data in enumerate(train_loader):
# get the input images and their corresponding labels
inputs, labels = data
# Zero the parameter(weight) gradients
optimizer.zero_grad()
# Forward pass to get outputs
outputs = net(inputs)
# Calculate the loss
loss = criterion(outputs, labels)
# backward pass o calculate the parameter gradients
loss.backward()
# Update the parameters
optimizer.step()
#Print loss stat to convert loss into a scalar and add it to running_loss, here used .item()
running_loss += loss.item()
# show stat at every 1000 batches
if batch_i % 1000 == 999:
avg_loss = running_loss/1000
# record and print the avg loss over the 1000 batches
loss_over_time.append(avg_loss)
print('Epoch: {}, Batch: {}, Avg. Loss: {}'.format(epoch+1, batch_i+1, avg_loss))
running_loss = 0.0
print('Finished Training')
return loss_over_time
# +
# define the number of epochs to train for
# start with small epochs to see if model works initially
n_epochs = 30
# call train and record the loss over time
training_loss = train(n_epochs)
# -
# # Visualize the Loss
# print recorded avg loss for each 1000 batches and for each epoch
# Visualize the Loss
plt.plot(training_loss)
plt.xlabel('1000\'s of batches')
plt.ylabel('Loss')
plt.ylim(0, 2.5)
plt.show()
# As shown above plot shows the loss decreases over time.
#
# It takes a little bot for big initial loss decrease, and the loss is flattening out over time
# # Test the Trained Network
# * Test trained model on a previously unseen dataset
# * Use training images (good modle should reach greater than 85% accuracy on this test dataset)
# +
# Initialize tensor and lists to monitor test loss and accuracy
test_loss = torch.zeros(1)
class_correct = [0. for i in range(10)]
class_total = [0. for i in range(10)]
# set the module to evaluation mode
net.eval()
for batch_i, data in enumerate(test_loader):
# get the input images and their corresponding labels
inputs, labels = data
# forward pass to get outputs
outputs = net(inputs)
# calcuate the loss
loss = criterion(outputs, labels)
# update avg test loss
test_loss += ((torch.ones(1) / (batch_i + 1)) * (loss.data - test_loss))
# get the predicted class from the maximum valuein the output list of class scores
_, predicted = torch.max(outputs.data, 1)
# compare prediction to true label
# this creates a correct Tensor that holds the number of correctly classified images in a batch
correct = np.squeeze(predicted.eq(labels.data.view_as(predicted)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = labels.data[i]
# get the scalar value of correct items for a class, by calling 'correct[i].item()'
class_correct[label] += correct[i].item()
class_total[label] += 1
print('Test Loss: {:.6f}\n'.format(test_loss.numpy()[0]))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (classes[i],
100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]),
np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' %(100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct),
np.sum(class_total)))
# -
# # Visualize sample test results
# Shows predicted class(true class)
# +
# obtan one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get predictions
preds = np.squeeze(net(images).data.max(1, keepdim=True)[1].numpy())
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(batch_size):
ax = fig.add_subplot(2, batch_size/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(classes[preds[idx]], classes[labels[idx]]), color=('green' if preds[idx]==labels[idx] else 'red'))
# -
# # Weaknesses of current model
#
# Test Accuracy of Dress: 0% ( 0/1000)
#
# As a result this two have 0% accuracy. Due o incorrectly classifies most of other which has a similar overall shape. We can add regularization.
#
# Dropout layers to avoid overfitting certain classes at the cost of generalization.
#
#
# # Save the model
# +
# path and model name
model_dir = 'saved_models/'
model_name = 'fashion_net_simple.pt'
# after training, save your model parameters in the directoy 'saved_models'
torch.save(net.state_dict(), model_dir+model_name)
# -
| 04_Fashion_MNIST_Training/.ipynb_checkpoints/FashionMNIST_Training-checkpoint.ipynb |
# -*- coding: utf-8 -*-
"""lightningsynth.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/torchsynth/torchsynth/blob/lightning-synth/examples/lightningsynth.ipynb
# lightningsynth
Profiling for our synth on GPUs
Make sure you are on GPU runtime
If this hasn't been merged to master yet, run:
```
!pip uninstall -y torchsynth
!pip install git+https://github.com/torchsynth/torchsynth.git@lightning-synth
```
"""
# !pip uninstall -y torchsynth
# !pip install git+https://github.com/torchsynth/torchsynth.git@lightning-synth
# !pip install torchvision
from typing import Any
import pytorch_lightning as pl
import torch
# import torchvision.models as models
import torch.autograd.profiler as profiler
import torch.tensor as tensor
from torch import Tensor as T
from tqdm.auto import tqdm
import torchsynth.module
from torchsynth.config import SynthConfig
from torchsynth.synth import Voice
gpus = torch.cuda.device_count()
print("Usings %d gpus" % gpus)
# Note this is the batch size for our synth!
# i.e. this many synth sounds are generated at once
# Not the batch size of the datasets
BATCH_SIZE = 256
import multiprocessing
ncores = multiprocessing.cpu_count()
print(f"Using ncores {ncores} for generating batch numbers (low CPU usage)")
class batch_idx_dataset(torch.utils.data.Dataset):
def __init__(self, num_batches):
self.num_batches = num_batches
def __getitem__(self, idx):
return idx
def __len__(self):
return self.num_batches
# TODO Add this to torchsynth API
# see https://github.com/torchsynth/torchsynth/issues/154
class TorchSynthCallback(pl.Callback):
def on_test_batch_end(
self,
trainer,
pl_module: pl.LightningModule,
outputs: Any,
batch: Any,
batch_idx: int,
dataloader_idx: int,
) -> None:
assert batch.ndim == 1
_ = pl_module(batch_idx)
# I don't think the following is correct
# _ = torch.stack([pl_module(i) for i in batch])
synth1B = batch_idx_dataset(1024 * 1024 * 1024 // BATCH_SIZE)
test_dataloader = torch.utils.data.DataLoader(synth1B, num_workers=0, batch_size=1)
voice = Voice()
accelerator = None
if gpus == 0:
use_gpus = None
precision = 32
else:
# specifies all available GPUs (if only one GPU is not occupied,
# auto_select_gpus=True uses one gpu)
use_gpus = -1
# Golden cables
# # TODO: Change precision?
# precision = 16
if gpus > 1:
accelerator = "ddp"
# Use deterministic?
trainer = pl.Trainer(
precision=precision,
gpus=use_gpus,
auto_select_gpus=True,
accelerator=accelerator,
deterministic=synthconfig.reproducible,
max_epochs=0,
callbacks=[TorchSynthCallback()],
)
trainer.test(voice, test_dataloaders=test_dataloader)
| examples/lightningsynth.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/TheJoys2019/DS-Unit-1-Sprint-1-Dealing-With-Data/blob/master/Artin_Sinani_Code_Challenge_1_(03_20_19).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="KyQfChWPtNR-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="41f102cd-d2d6-4a76-eb5a-de3113eabbc3"
import pandas as pd
# Dataset 1:
## Load CSV File
bf = pd.read_csv("https://raw.githubusercontent.com/pierretd/datasets-1/master/BlackFriday.csv")
## Show first 5 rows
bf.head(5)
# + id="436A-K5DtVF5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 235} outputId="fa12d8cb-819a-4292-8340-506306c1dc90"
#1) Clean the data set and drop the Null/NaN values.
# Rename Product_Category_1-3 columns with an actual Product.
bf.isnull().sum()
# + id="sib2GsZ-x20m" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="baef975b-3d13-44fe-9dc4-58568e87000e"
bf['Product_Category_2'].mean()
# + id="r4MY8zPgyQD0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 2893} outputId="870ff1f3-b4d6-4e36-9d92-9ec5e941ff80"
bf.fillna(bf['Product_Category_2'])
# + id="l62u9Cvyt3ID" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 235} outputId="01f169fe-14da-41df-8132-bdd14e35786d"
bf.isna().sum()
# + id="KnDK_ybfuMRS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9f72ecf4-b3ba-49a7-da6d-60e9fdb24a4c"
bf.shape
# + id="FR1nZN5-uO-o" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 3229} outputId="a5203a6a-8a53-491e-faa1-f73e03acde70"
bf.describe
# + id="9Mn2k4S1uZVk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="123a6575-c030-4c78-e80a-e4041ff508a0"
#2. How many unique user_ids does the data set contain?
print(bf.User_ID.unique())
# + id="DCz9IJhyurX_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 151} outputId="1776ccf0-79af-4789-995e-2dffd36d8a14"
#3) How many unique age brackets are in the dataset.
# Which Age bracket has the most entries? Which has the least?
print(bf['Age'].value_counts())
# + id="x1X1hDX5wmxk" colab_type="code" colab={}
# + id="Suppi7mfwul5" colab_type="code" colab={}
| Artin_Sinani_Code_Challenge_1_(03_20_19).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="M9ps18GDtt5j"
# 
#
# [](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/jupyter/annotation/english/model-downloader/Running_Pretrained_pipelines.ipynb)
#
# ## 0. Colab Setup
# + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" executionInfo={"elapsed": 60575, "status": "ok", "timestamp": 1589250629601, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="gm0tZvJdtvgx" outputId="e8416fcf-8bed-4f34-ea7a-a0c182606908"
import os
# Install java
# ! apt-get update -qq
# ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
# ! java -version
# Install pyspark
# ! pip install --ignore-installed pyspark==2.4.4
# Install Spark NLP
# ! pip install --ignore-installed spark-nlp
# + [markdown] colab_type="text" id="cyumVtb_tt5k"
# ## Runing Pretrained models
#
# In the following example, we walk-through different use cases of some of our Pretrained models and pipelines which could be used off the shelf.
#
# There is BasicPipeline which will return tokens, normalized tokens, lemmas and part of speech tags. The AdvancedPipeline will return same as the BasicPipeline plus Stems, Spell Checked tokens and NER entities using the CRF model. All the pipelines and pre trained models are downloaded from internet at run time hence would require internet access.
# + [markdown] colab_type="text" id="Emh6GE1Ctt5l"
# #### 1. Call necessary imports and create the spark session
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 60933, "status": "ok", "timestamp": 1589250629975, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="DYPs5MTqtt5m" outputId="9b0c0759-b387-4a0f-83bb-c114fbded319"
import os
import sys
print(sys.version)
import sparknlp
from sparknlp.pretrained import ResourceDownloader
from sparknlp.base import DocumentAssembler
from sparknlp.annotator import *
from pyspark.sql import SparkSession
from pyspark.ml import Pipeline
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 73758, "status": "ok", "timestamp": 1589250642818, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="nfoLeCq9tt5r" outputId="e5ef3ebc-0c02-448d-f535-097ee2a90311"
spark = sparknlp.start()
print("Spark NLP version: ", sparknlp.version())
print("Apache Spark version: ", spark.version)
# + [markdown] colab_type="text" id="rr6G_81ftt5v"
# #### 2. Create a dummy spark dataframe
# + colab={} colab_type="code" id="Ur8mKlQTtt5v"
l = [
(1,'To be or not to be'),
(2,'This is it!')
]
data = spark.createDataFrame(l, ['docID','text'])
# + [markdown] colab_type="text" id="-TiWAq7-tt5z"
# #### 3. We use predefined BasicPipeline in order to annotate a dataframe with it
# + colab={"base_uri": "https://localhost:8080/", "height": 187} colab_type="code" executionInfo={"elapsed": 106852, "status": "ok", "timestamp": 1589250675925, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="OtpSOtKStt50" outputId="9c43a736-fc4d-4ea4-a58f-958016f6528f"
# download predefined - pipelines
from sparknlp.pretrained import PretrainedPipeline
explain_document_ml = PretrainedPipeline("explain_document_ml")
basic_data = explain_document_ml.annotate(data, 'text')
basic_data.show()
# + [markdown] colab_type="text" id="dT-FqWFOtt54"
# #### We can also annotate a single string
# + colab={"base_uri": "https://localhost:8080/", "height": 748} colab_type="code" executionInfo={"elapsed": 106843, "status": "ok", "timestamp": 1589250675926, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="TQ76lDOTtt55" outputId="46ccb9ad-94ea-4a5c-d857-3a451324cc07"
# annotat quickly from string
explain_document_ml.annotate("This world is made up of good and bad things")
# + [markdown] colab_type="text" id="6TG2d8N3tt5_"
# #### 4. Now we intend to use one of the fast pretrained models such as Preceptron model which is a POS model trained with ANC American Corpus
# + colab={"base_uri": "https://localhost:8080/", "height": 238} colab_type="code" executionInfo={"elapsed": 126761, "status": "ok", "timestamp": 1589250695851, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="zSGo6qZbtt6A" outputId="f6fc49a5-ea08-461b-9460-1499fadf427c"
document_assembler = DocumentAssembler() \
.setInputCol("text")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
wordEmbeddings = WordEmbeddingsModel.pretrained().setOutputCol("word_embeddings")
# download directly - models
pos = PerceptronModel.pretrained() \
.setInputCols(["sentence", "token"]) \
.setOutputCol("pos")
advancedPipeline = Pipeline(stages=[document_assembler, sentence_detector, tokenizer, pos, wordEmbeddings])
output = advancedPipeline.fit(data).transform(data)
output.show()
# + [markdown] colab_type="text" id="LPPaP1sxtt6G"
# #### 5. Now we proceed to download a Fast CRF Named Entity Recognitionl which is trained with Glove embeddings. Then, we retrieve the `advancedPipeline` and combine these models to use them appropriately meeting their requirements.
# + colab={"base_uri": "https://localhost:8080/", "height": 187} colab_type="code" executionInfo={"elapsed": 149593, "status": "ok", "timestamp": 1589250718691, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="MXo_zTNatt6H" outputId="54eba9db-ffa2-4f09-f042-7339d9b23c71"
ner = NerCrfModel.pretrained()
ner.setInputCols(["pos", "token", "document", "word_embeddings"]).setOutputCol("ner")
annotation_data = advancedPipeline.fit(data).transform(data)
pos_tagged = pos.transform(annotation_data)
ner_tagged = ner.transform(pos_tagged)
ner_tagged.show()
# + [markdown] colab_type="text" id="5nuR8cxytt6L"
# #### 6. Finally, lets try a pre trained sentiment analysis pipeline
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" executionInfo={"elapsed": 160782, "status": "ok", "timestamp": 1589250729887, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="CnjUFYqctt6L" outputId="678f21ad-7db5-40b7-8708-31c644260928"
PretrainedPipeline("analyze_sentiment").annotate("This is a good movie!!!")
# + colab={} colab_type="code" id="H0sOfKV9tt6P"
| jupyter/annotation/english/model-downloader/Running_Pretrained_pipelines.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import scipy as sp
import scipy.sparse as sps
import scipy.io as io
import time
import json
from scipy.sparse.linalg import svds
from sklearn import preprocessing
from sklearn.preprocessing import normalize
# +
# function to save a csr sparse matrix
def save_sparse_csr(filename,array):
np.savez(filename,data = array.data ,indices=array.indices,
indptr =array.indptr, shape=array.shape )
# function to read written csr sparse matrix
def load_sparse_csr(filename):
loader = np.load(filename)
return sps.csr_matrix((loader['data'], loader['indices'], loader['indptr']),
shape = loader['shape'])
# -
class RecommenderSystem(object):
def __init__(self, interactions_file = '../input/train_final.csv',
target_playlists = '../input/target_playlists.csv',
target_tracks = '../input/target_tracks.csv',
meta_track = '../input/tracks_final.csv'):
# read interactions file
train_final = pd.read_csv(interactions_file, sep = '\t')
train_final['interaction'] = 1.0
self.df_interactions = train_final.sort_values(['playlist_id', 'track_id'], ascending=[True, True])
self.numInteractions = train_final.shape[0]
print("Number of interactions (numInteractions): " + str(self.numInteractions))
print("\n")
# separate each column in list
playlist_id = list(self.df_interactions['playlist_id'])
track_id = list(self.df_interactions['track_id'])
interaction = list(self.df_interactions['interaction'])
playlist_id_unique = list(set(playlist_id))
self.df_playlist_id_unique = pd.DataFrame(playlist_id_unique)
self.df_playlist_id_unique.reset_index(level=0, inplace=True)
self.df_playlist_id_unique.columns = ['index_playlist', 'playlist_id']
track_id_unique = list(set(track_id))
self.df_track_id_unique = pd.DataFrame(track_id_unique)
self.df_track_id_unique.reset_index(level=0, inplace=True)
self.df_track_id_unique.columns = ['index_track', 'track_id']
print("Track_id translated to indexes (df_track_id_unique): ")
print(self.df_track_id_unique.head())
print("\n")
print("Playlist_id translated to indexes (df_playlist_id_unique): ")
print(self.df_playlist_id_unique.head())
print("\n")
# read target playlists which should receive a recommendation
self.df_target_playlists = pd.read_csv(target_playlists, sep = '\t')
self.list_target_playlists = list(self.df_target_playlists['playlist_id'])
self.df_target_playlists = self.df_target_playlists.merge(self.df_playlist_id_unique, how='inner', on='playlist_id')
print("Size of df_target_playlists: " + str(self.df_target_playlists.shape))
# read target tracks
self.df_target_tracks = pd.read_csv(target_tracks, sep = '\t')
self.list_target_tracks = list(self.df_target_tracks['track_id'])
self.df_target_tracks = self.df_target_tracks.merge(self.df_track_id_unique, how='inner', on='track_id')
print("Size of df_target_tracks file: " + str(self.df_target_tracks.shape))
print("Size of list_target_tracks file: " + str(len(self.df_target_tracks)))
print("\n")
self.numPlaylists = len(self.df_playlist_id_unique)
self.numTracks = len(self.df_track_id_unique)
print("Number of Playlists: " + str(self.numPlaylists))
print("Number of Tracks: " + str(self.numTracks))
print("\n")
self.df_interactions = self.df_interactions.merge(self.df_playlist_id_unique, how='inner', on='playlist_id')
self.df_interactions = self.df_interactions.merge(self.df_track_id_unique, how='inner', on='track_id')
self.df_interactions = self.df_interactions.sort_values(['playlist_id', 'track_id'], ascending=[True, True])
print("Interactions-file with IDs translated to indexes (saved in df_interactions): ")
print(self.df_interactions.head())
print("\n")
self.list_index_playlist = np.array(self.df_interactions['index_playlist'])
self.list_index_track = np.array(self.df_interactions['index_track'])
self.list_interactions = np.array(self.df_interactions['interaction'])
self.df_tracks = pd.read_csv(meta_track, sep = '\t')
self.df_tracks = self.df_tracks.merge(self.df_track_id_unique, how='inner', on='track_id')
self.df_tracks['tags'] = self.df_tracks.tags.apply(json.loads)
self.df_tracks['album'] = self.df_tracks.album.apply(lambda x: (str(x[1:-1]) + "a") if x != "[None]" and x != "[]" else "-10a")
print('Meta information about tracks read (df_tracks): ')
print(self.df_tracks.head())
print(self.df_tracks.shape)
def target_structure(self):
# filter interaction dataframe, to retain only target playlists
train = self.df_interactions.merge(self.df_target_playlists, how='inner', on='playlist_id')
# aggregate to playlist level and coerce tracks in that playlist to list
train_agg1 = train.groupby(by='playlist_id').track_id.apply(list).to_frame()
train_agg1.reset_index(level=0, inplace=True)
train_agg2 = train.groupby(by='playlist_id').index_track.apply(list).to_frame()
train_agg2.reset_index(level=0, inplace=True)
train_agg = train_agg1.merge(train_agg2, how='inner', on='playlist_id')
self.df_target = train_agg.merge(self.df_playlist_id_unique, how='inner', on='playlist_id')
self.df_target['recommend'] = np.empty((len(train_agg), 0)).tolist()
print("Data structure for final prediction was created (df_target): ")
print(self.df_target.head())
print(self.df_target.shape)
def sample_test(self, n=5):
# sample n records from each target playlist
for index, row in self.df_target.iterrows():
row['']
def interaction_aggregation(self):
agg1 = self.df_interactions.groupby(by='playlist_id').track_id.apply(list).to_frame()
agg1.reset_index(level=0, inplace=True)
agg2 = self.df_interactions.groupby(by='playlist_id').index_track.apply(list).to_frame()
agg2.reset_index(level=0, inplace=True)
agg3 = self.df_interactions.groupby(by='playlist_id').nunique()
agg3.reset_index(level=0, inplace=True)
agg = agg1.merge(agg2, how='inner', on='playlist_id')
agg = agg.merge(agg3, how='inner', on='playlist_id')
print(agg[:10])
def create_uim(self, sparse_mode="coo", create_testset = False, split = 0.8):
if sparse_mode.lower() == "coo" or sparse_mode.lower() == "csr":
self.UIM = sps.coo_matrix((self.list_interactions, (self.list_index_playlist, self.list_index_track)))
if create_testset:
self.split_traintest(train_test_split = split)
if sparse_mode.lower() == "csr" and create_testset != True:
self.UIM = self.UIM.tocsr()
elif sparse_mode.lower() == "csr" and create_testset == True:
self.UIM = self.UIM.tocsr()
self.UIM_train = self.UIM_train.tocsr()
self.UIM_test = self.UIM_test.tocsr()
else:
raise NotImplementedError('Sparse mode not implemented'.format(sparse_mode))
def split_traintest(self, train_test_split):
train_mask = np.random.choice([True,False], self.numInteractions, p=[train_test_split, 1-train_test_split])
test_mask = np.logical_not(train_mask)
self.UIM_train = sps.coo_matrix((self.list_interactions[train_mask],
(self.list_index_playlist[train_mask],
self.list_index_track[train_mask])))
self.UIM_test = sps.coo_matrix((self.list_interactions, (self.list_index_playlist, self.list_index_track)))
print("UIM successfully created in csr format.")
def create_icm(self, include_tags = True, include_album = True, include_artist = True):
tags_list = []
for index, row in self.df_tracks.iterrows():
if len(row['tags']) != 0 and include_tags:
for i in row['tags']:
tags_list.append([row['index_track'], i, 1.0])
if row['album'] != "-10a" and include_album:
tags_list.append([row['index_track'], row['album'], 1])
if include_artist:
tags_list.append([row['index_track'], str(row['artist_id']) + "b", 1.0])
tags_list = pd.DataFrame(tags_list)
tags_list.columns = ['index_track', 'tag', 'interaction']
track_list = list(tags_list['index_track'])
tag_list = list(tags_list['tag'])
self.final_taglist = list(tags_list['tag'])
interaction_list = list(tags_list['interaction'])
le = preprocessing.LabelEncoder()
le.fit(tag_list)
taglist_icm = le.transform(tag_list)
self.ICM = sps.coo_matrix((interaction_list, (track_list, taglist_icm)))
self.ICM = self.ICM.tocsr()
print("ICM successfully created in csr format.")
def recommend(self, at=5):
self.target_structure()
start_time = time.time()
for index, row in self.df_target.iterrows():
if index % 1000 == 0:
print("Current playlist: " + str(index))
#get row from URM_estm
estm = pd.DataFrame(self.UIM_estm[row['index_playlist'],:].T.toarray())
estm.reset_index(level=0, inplace=True)
estm.columns = ['index_track','pred']
# filter tracks which are already in the playlist, so they can't be recommended
estm = estm[-estm["index_track"].isin(row['index_track'])]
# translate track index back to track_id
estm = estm.merge(self.df_track_id_unique, how='inner', on='index_track')
# filter on target track set
estm = estm[estm['track_id'].isin(self.list_target_tracks)]
estm = estm.sort_values('pred',ascending=False)
# print(estm)
count = 1
for index2, row2 in estm.iterrows():
# insert 5 top recommendations into dataframe
if count < (at + 1):
row['recommend'].append(int(row2['track_id']))
count += 1
else:
break
print("--- %s minutes ---" % ((time.time() - start_time)/60))
def create_submission(self, filename):
try:
self.df_target
except AttributeError:
print("Target structure and recommendations were not yet calculated. \n Execute RecommenderSystem.recommend() first.")
# Convert list to string with spaces between track_ids
self.df_target['recommend'] = self.df_target['recommend'].apply(lambda x: " ".join(map(str, x)))
# rename columns for submission
self.final = self.df_target[['playlist_id','recommend']]
self.final.columns = ['playlist_id','track_ids']
print("Head of the submission file:")
print(self.final.head())
# export file
self.final.to_csv('../submission/' + filename, index=False)
def apply_shrinkage(self, X, dist, shrink=0):
# create an "indicator" version of X (i.e. replace values in X with ones)
X_ind = X.copy()
X_ind.data = np.ones_like(X_ind.data)
# compute the co-rated counts
co_counts = X_ind * X_ind.T
# remove the diagonal
co_counts = co_counts - sps.dia_matrix((co_counts.diagonal()[sp.newaxis, :], [0]), shape=co_counts.shape)
# compute the shrinkage factor as co_counts_ij / (co_counts_ij + shrinkage)
# then multiply dist with it
co_counts_shrink = co_counts.copy()
co_counts_shrink.data += shrink
co_counts.data /= co_counts_shrink.data
dist.data *= co_counts.data
return dist
# +
def check_matrix(X, format='csc', dtype=np.float32):
if format == 'csc' and not isinstance(X, sps.csc_matrix):
return X.tocsc().astype(dtype)
elif format == 'csr' and not isinstance(X, sps.csr_matrix):
return X.tocsr().astype(dtype)
elif format == 'coo' and not isinstance(X, sps.coo_matrix):
return X.tocoo().astype(dtype)
elif format == 'dok' and not isinstance(X, sps.dok_matrix):
return X.todok().astype(dtype)
elif format == 'bsr' and not isinstance(X, sps.bsr_matrix):
return X.tobsr().astype(dtype)
elif format == 'dia' and not isinstance(X, sps.dia_matrix):
return X.todia().astype(dtype)
elif format == 'lil' and not isinstance(X, sps.lil_matrix):
return X.tolil().astype(dtype)
else:
return X.astype(dtype)
class ISimilarity(object):
"""Abstract interface for the similarity metrics"""
def __init__(self, shrinkage=10):
self.shrinkage = shrinkage
def compute(self, X):
pass
class Cosine(ISimilarity):
def compute(self, X):
# convert to csc matrix for faster column-wise operations
X = check_matrix(X, 'csc', dtype=np.float32)
# 1) normalize the columns in X
# compute the column-wise norm
# NOTE: this is slightly inefficient. We must copy X to compute the column norms.
# A faster solution is to normalize the matrix inplace with a Cython function.
Xsq = X.copy()
Xsq.data **= 2
norm = np.sqrt(Xsq.sum(axis=0))
norm = np.asarray(norm).ravel()
norm += 1e-6
# compute the number of non-zeros in each column
# NOTE: this works only if X is instance of sparse.csc_matrix
col_nnz = np.diff(X.indptr)
# then normalize the values in each column
X.data /= np.repeat(norm, col_nnz)
print("Normalized")
# 2) compute the cosine similarity using the dot-product
dist = X * X.T
print("Computed")
# zero out diagonal values
dist_fin = dist - sps.dia_matrix((dist.diagonal()[sp.newaxis, :], [0]), shape=dist.shape)
print("Removed diagonal")
# and apply the shrinkage
if self.shrinkage > 0:
dist_fin = self.apply_shrinkage(X, dist_fin)
print("Applied shrinkage")
return dist, dist_fin
def apply_shrinkage(self, X, dist):
# create an "indicator" version of X (i.e. replace values in X with ones)
X_ind = X.copy()
X_ind.data = np.ones_like(X_ind.data)
# compute the co-rated counts
co_counts = X_ind * X_ind.T
# remove the diagonal
co_counts = co_counts - sps.dia_matrix((co_counts.diagonal()[sp.newaxis, :], [0]), shape=co_counts.shape)
# compute the shrinkage factor as co_counts_ij / (co_counts_ij + shrinkage)
# then multiply dist with it
co_counts_shrink = co_counts.copy()
co_counts_shrink.data += self.shrinkage
co_counts.data /= co_counts_shrink.data
dist.data *= co_counts.data
return dist
# -
class BasicItemCBFRecommender(RecommenderSystem):
'''Basic item CBF Recommender is a class to compute item similarity based on a item-content matrix
and subsequent filtering based on that matrix. This class does not leverage any KNN approach since it was
found that it produces inferior results for the challenge.'''
def __str__(self):
return "ItemCBF(similarity={},k={},shrinkage={})".format(self.similarity_name, self.k, self.shrinkage)
def fit(self, shrinkage=100, similarity='cosine', include_tags = True, include_album = True, include_artist = True):
self.shrinkage = shrinkage
self.similarity_name = similarity
if similarity == 'cosine':
self.distance = Cosine(shrinkage=self.shrinkage)
elif similarity == 'pearson':
self.distance = Pearson(shrinkage=self.shrinkage)
elif similarity == 'adj-cosine':
self.distance = AdjustedCosine(shrinkage=self.shrinkage)
else:
raise NotImplementedError('Distance {} not implemented'.format(similarity))
self.create_uim(sparse_mode = 'csr')
self.create_icm(include_tags, include_album, include_artist)
self.S, item_weights = self.distance.compute(self.ICM)
item_weights = check_matrix(item_weights, 'csr') # nearly 10 times faster
print("Converted to csr")
W = item_weights
self.UIM_estm = self.UIM.dot(W)
print('UIM_estm calculated')
class SVDRecommender(RecommenderSystem):
def fit(self, k):
self.k = k
self.create_uim(sparse_mode = 'coo')
# calcualte singular value decomposition
start_time = time.time()
U, s, Vt = svds(self.UIM, k = self.k)
print("Calculate SVD:")
print("--- %s seconds ---" % (time.time() - start_time))
# make diagonal matrix from sigma values
s_diag = np.diag(s)
# reconstruct URM matrix as prediction
Us = np.dot(U, s_diag)
# reconstruct URM matrix as prediction
start_time = time.time()
self.UIM_estm = np.dot(Us, Vt)
print("UIM_estm calculated:")
print("--- %s seconds ---" % (time.time() - start_time))
class ItemCFRecommender(RecommenderSystem):
def fit(self, shrinkage=0):
self.create_uim(sparse_mode = 'csr')
# calculate full item similarity matrix from UIM - item-based
# normalize UIM first
cf_UIM_norm = normalize(self.UIM, norm='l2', axis=0)
S_icf = cf_UIM_norm.T.dot(cf_UIM_norm)
print("Similarity matrix computed.")
# and apply the shrinkage
if shrinkage > 0:
# zero out diagonal values
S_icf = S_icf - sps.dia_matrix((S_icf.diagonal()[sp.newaxis, :], [0]), shape=S_icf.shape)
print("Removed diagonal")
S_icf_shrink = self.apply_shrinkage(cf_UIM_norm, S_icf, shrink=shrinkage)
print("Applied shrinkage")
self.UIM_estm = self.UIM.dot(S_icf_shrink)
else:
self.UIM_estm = self.UIM.dot(S_icf)
print("UIM_estm computed.")
class UserCFRecommender(RecommenderSystem):
def fit(self, shrinkage=0):
self.create_uim(sparse_mode = 'csr')
# calculate full item similarity matrix from UIM - item-based
# normalize UIM first
cf_UIM_norm = normalize(self.UIM, norm='l2', axis=0)
S_ucf = cf_UIM_norm.dot(cf_UIM_norm.T)
print("Similarity matrix computed.")
# and apply the shrinkage
if shrinkage > 0:
# zero out diagonal values
S_ucf = S_ucf - sps.dia_matrix((S_ucf.diagonal()[sp.newaxis, :], [0]), shape=S_ucf.shape)
print("Removed diagonal")
S_ucf_shrink = self.apply_shrinkage(cf_UIM_norm, S_ucf, shrink=shrinkage)
print("Applied shrinkage")
self.UIM_estm = self.UIM.T.dot(S_ucf_shrink)
self.UIM_estm = self.UIM_estm.T
else:
self.UIM_estm = self.UIM.T.dot(S_ucf)
self.UIM_estm = self.UIM_estm.T
print("UIM_estm computed.")
mat = SVDRecommender()
mat.fit(k=200)
icf = ItemCFRecommender()
icf.fit(shrinkage=0)
ucf = UserCFRecommender()
ucf.fit(shrinkage=50)
cbf = BasicItemCBFRecommender()
cbf.fit(shrinkage=0)
def recommend3(recsys, approach1, approach2, approach3, w1, w2, w3, at=5):
recsys.target_structure()
start_time = time.time()
for index, row in recsys.df_target.iterrows():
if index % 1000 == 0:
print("Current playlist: " + str(index))
#get row from URM_estm
estm = pd.DataFrame(approach1[row['index_playlist'],:].T.toarray() * w1 + approach2[row['index_playlist'],:].T.toarray() * w2 + approach3[row['index_playlist'],:].T.toarray() * w3)
estm.reset_index(level=0, inplace=True)
estm.columns = ['index_track','pred']
# filter tracks which are already in the playlist, so they can't be recommended
estm = estm[-estm["index_track"].isin(row['index_track'])]
# translate track index back to track_id
estm = estm.merge(recsys.df_track_id_unique, how='inner', on='index_track')
# filter on target track set
estm = estm[estm['track_id'].isin(recsys.list_target_tracks)]
estm = estm.sort_values('pred',ascending=False)
# print(estm)
count = 1
for index2, row2 in estm.iterrows():
# insert 5 top recommendations into dataframe
if count < (at + 1):
row['recommend'].append(int(row2['track_id']))
count += 1
else:
break
print("--- %s minutes ---" % ((time.time() - start_time)/60))
def recommend4(recsys, approach1, approach2, approach3, approach4, w1, w2, w3, w4, at=5):
recsys.target_structure()
start_time = time.time()
for index, row in recsys.df_target.iterrows():
if index % 1000 == 0:
print("Current playlist: " + str(index))
#get row from URM_estm
estm = pd.DataFrame(approach1[row['index_playlist'],:].T.toarray() * w1 + approach2[row['index_playlist'],:].T.toarray() * w2 + approach3[row['index_playlist'],:].T.toarray() * w3 + approach4[row['index_playlist'],:].T.toarray() * w4)
estm.reset_index(level=0, inplace=True)
estm.columns = ['index_track','pred']
# filter tracks which are already in the playlist, so they can't be recommended
estm = estm[-estm["index_track"].isin(row['index_track'])]
# translate track index back to track_id
estm = estm.merge(recsys.df_track_id_unique, how='inner', on='index_track')
# filter on target track set
estm = estm[estm['track_id'].isin(recsys.list_target_tracks)]
estm = estm.sort_values('pred',ascending=False)
# print(estm)
count = 1
for index2, row2 in estm.iterrows():
# insert 5 top recommendations into dataframe
if count < (at + 1):
row['recommend'].append(int(row2['track_id']))
count += 1
else:
break
print("--- %s minutes ---" % ((time.time() - start_time)/60))
recommend3(ucf, cbf.UIM_estm, icf.UIM_estm, ucf.UIM_estm, 0.381848739, 0.329603841, 0.288547418)
ucf.create_submission('010_hybrid_cbf_icf_ucfS50_MAPbased.csv')
| notebooks/.ipynb_checkpoints/010_Hybrid_ALL-checkpoint.ipynb |