code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text"
# You can easily export your model the best model found by AutoKeras as a Keras Model.
#
# The following example uses [ImageClassifier](/image_classifier) as an example.
# All the tasks and the [AutoModel](/auto_model/#automodel-class) has this
# [export_model](/auto_model/#export_model-method) function.
#
# + colab_type="code"
# !pip install autokeras
# + colab_type="code"
import tensorflow as tf
print(tf.__version__)
from tensorflow.keras.datasets import mnist
import tensorflow as tf
import autokeras as ak
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Initialize the image classifier.
clf = ak.ImageClassifier(
overwrite=True,
max_trials=1) # Try only 1 model.(Increase accordingly)
# Feed the image classifier with training data.
clf.fit(x_train, y_train, epochs=1) # Change no of epochs to improve the model
# Export as a Keras Model.
model = clf.export_model()
print(type(model)) # <class 'tensorflow.python.keras.engine.training.Model'>
try:
model.save("model_autokeras", save_format="tf")
except:
model.save("model_autokeras.h5")
from tensorflow.keras.models import load_model
loaded_model = load_model("model_autokeras", custom_objects=ak.CUSTOM_OBJECTS)
predicted_y = loaded_model.predict(tf.expand_dims(x_test, -1))
print(predicted_y)
| docs/ipynb/export.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://githubtocolab.com/giswqs/geemap/blob/master/examples/notebooks/11_export_image.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"/></a>
#
# Uncomment the following line to install [geemap](https://geemap.org) if needed.
# +
# # !pip install geemap
# -
import ee
import geemap
import os
geemap.show_youtube('_6JOA-iiEGU')
Map = geemap.Map()
Map
# ## Download an ee.Image
# +
image = ee.Image('LANDSAT/LE7_TOA_5YEAR/1999_2003')
landsat_vis = {'bands': ['B4', 'B3', 'B2'], 'gamma': 1.4}
Map.addLayer(image, landsat_vis, "LE7_TOA_5YEAR/1999_2003", True, 0.7)
# +
# Draw any shapes on the map using the Drawing tools before executing this code block
feature = Map.draw_last_feature
if feature is None:
geom = ee.Geometry.Polygon(
[
[
[-115.413031, 35.889467],
[-115.413031, 36.543157],
[-114.034328, 36.543157],
[-114.034328, 35.889467],
[-115.413031, 35.889467],
]
]
)
feature = ee.Feature(geom, {})
roi = feature.geometry()
# -
out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
filename = os.path.join(out_dir, 'landsat.tif')
# ### Exporting all bands as one single image
image = image.clip(roi).unmask()
geemap.ee_export_image(
image, filename=filename, scale=90, region=roi, file_per_band=False
)
# ### Exporting each band as one image
geemap.ee_export_image(
image, filename=filename, scale=90, region=roi, file_per_band=True
)
# ### Export an image to Google Drive
geemap.ee_export_image_to_drive(
image, description='landsat', folder='export', region=roi, scale=30
)
# ## Download an ee.ImageCollection
import ee
import geemap
import os
loc = ee.Geometry.Point(-99.2222, 46.7816)
collection = (
ee.ImageCollection('USDA/NAIP/DOQQ')
.filterBounds(loc)
.filterDate('2008-01-01', '2020-01-01')
.filter(ee.Filter.listContains("system:band_names", "N"))
)
out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
print(collection.aggregate_array('system:index').getInfo())
geemap.ee_export_image_collection(collection, out_dir=out_dir)
geemap.ee_export_image_collection_to_drive(collection, folder='export', scale=10)
# ## Extract pixels as a Numpy array
# +
import ee
import geemap
import numpy as np
import matplotlib.pyplot as plt
img = ee.Image('LANDSAT/LC08/C01/T1_SR/LC08_038029_20180810').select(['B4', 'B5', 'B6'])
aoi = ee.Geometry.Polygon(
[[[-110.8, 44.7], [-110.8, 44.6], [-110.6, 44.6], [-110.6, 44.7]]], None, False
)
rgb_img = geemap.ee_to_numpy(img, region=aoi)
print(rgb_img.shape)
# -
# Scale the data to [0, 255] to show as an RGB image.
# Adapted from https://bit.ly/2XlmQY8. Credits to <NAME>
rgb_img_test = (255 * ((rgb_img[:, :, 0:3] - 100) / 3500)).astype('uint8')
plt.imshow(rgb_img_test)
plt.show()
| examples/notebooks/11_export_image.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (ICTK)
# language: python
# name: ictk
# ---
# # Case Study on ANOVA
# **<NAME>**<br>
# *<NAME>*
# ***
# **Statement**<br>
# XYZ Company has offices in four different zones. The company wishes to investigate the following :<br>
# ● The mean sales generated by each zone.<br>
# ● Total sales generated by all the zones for each month.<br>
# ● Check whether all the zones generate the same amount of sales.<br>
# Help the company to carry out their study with the help of data provided.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv('./datasets/Sales_data_zone_wise.csv')
print('Sample shape = ',data.shape)
data.head()
# **Describe the data**
dis = data.describe().T.round(3)
dis
dis['mean'].plot(kind = 'bar')
dis['mean']
# The mean sales of the 4 zones are described above. It can be observed that **Zone-D** has the higest sales figures followed by **Zone-C** followed by **Zone-B** and at last position is **Zone-A**. The sales figures of Zone-B and Zone-C are comparable.
# ***
# ### Total sales generated by Zones in each month
data['Total Sales'] = data[['Zone - A', 'Zone - B', 'Zone - C', 'Zone - D']].sum(axis=1)
data[['Month','Total Sales']]
# ### Check whether all the zones generate the same amount of sales
# **H0 = All Zones generate same amount of sales<br>
# H1 = Sales of atleast one Zone differs**
from scipy.stats import f_oneway
stat, p = f_oneway(data['Zone - A'],data['Zone - B'],data['Zone - C'],data['Zone - D'])
print('Stats = ',stat)
print('P-Value = ',p)
if(p>0.05):
print('Accept Null Hypothesis')
else:
print('Reject Null Hypothesis')
# Therefore it can be concluded that all zones generate different amount of sales
# ***
| notebooks/cs-05-anova.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
import matplotlib.pyplot as plt
import pysequitur
# ### Explore
from subprocess import Popen, PIPE, STDOUT
command = Popen(["sequitur/sequitur", "-p", "-r"], stdin=PIPE, stdout=PIPE)
print command.communicate("abcabcabddda")[0]
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!#$%&*+,-.:;<=>?@[]"
print len(charset)
# ## Test pysequitur
words = "abc abc abd abd abd abc abd xxx abd abc abc abc abd".split()
sm = pysequitur.SequiturModel("sequitur/sequitur")
sm.fit(words)
print "\nraw_rules: \n", sm.raw_rules
print "\nrule0: \n", sm.rule0
print "\nrules: \n", sm.rules
print "\nword occurences in rules: \n", sm.word2rule_occurrences
print sm.get_printable_rules()
sm.word2rule_occurrences
zip(words, sm.tag(words))
| MSfingerprinter/MSfingerprinter/pysaxmaster/.ipynb_checkpoints/Tutorial-sequitur-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import cv2
from skimage import metrics
range_start = 5
range_end = 190
fake_dirs = ['results/psnr/fake_B_{}'.format(i) for i in range(range_start, range_end, 5)]
images = ['chapter_10_5', 'chapter_13_14', 'chapter_106_6', 'chapter_131_0']
gts = [ np.asarray(Image.open('datasets/sample/trainB/'+image+'.jpg').resize((256, 256))) for image in images]
images = [[ np.asarray(Image.open(fake+'/'+image+'.png')) for fake in fake_dirs ] for image in images]
psnr = []
ssim = []
hist_r = []
hist_g = []
hist_b = []
for i, gt in enumerate(gts):
actual_psnr = []
actual_ssim = []
actual_hist_r = []
actual_hist_g = []
actual_hist_b = []
for j in range(len(images[i])):
psnr_value = metrics.peak_signal_noise_ratio(gt, images[i][j])
ssim_value = metrics.structural_similarity(gt, images[i][j], multichannel=True)
color = ('b','g','r')
gt_hst = {}
img_hst = {}
for channel,col in enumerate(color):
gt_hst[col] = cv2.calcHist([gt],[channel],None,[256],[0,256])
img_hst[col] = cv2.calcHist([images[i][j]],[channel],None,[256],[0,256])
hist_value_b = cv2.compareHist(gt_hst['b'], img_hst['b'], cv2.HISTCMP_BHATTACHARYYA )
hist_value_g = cv2.compareHist(gt_hst['g'], img_hst['g'], cv2.HISTCMP_BHATTACHARYYA )
hist_value_r = cv2.compareHist(gt_hst['r'], img_hst['r'], cv2.HISTCMP_BHATTACHARYYA )
actual_psnr.append(psnr_value)
actual_ssim.append(ssim_value)
actual_hist_r.append(hist_value_r)
actual_hist_g.append(hist_value_g)
actual_hist_b.append(hist_value_b)
psnr.append(np.array(actual_psnr))
ssim.append(np.array(actual_ssim))
hist_r.append(np.array(actual_hist_r))
hist_g.append(np.array(actual_hist_g))
hist_b.append(np.array(actual_hist_b))
psnr = np.array(psnr)
ssim = np.array(ssim)
hist_r = np.array(hist_r)
hist_g = np.array(hist_g)
hist_b = np.array(hist_b)
avg_psnr = np.sum(psnr, axis=0)/len(images)
avg_ssim = np.sum(ssim, axis=0)/len(images)
avg_hist_r = np.sum(hist_r, axis=0)/len(images)
avg_hist_g = np.sum(hist_g, axis=0)/len(images)
avg_hist_b = np.sum(hist_b, axis=0)/len(images)
def avged(data, window_width):
cumsum_vec = np.cumsum(np.insert(data, 0, 0))
return (cumsum_vec[window_width:] - cumsum_vec[:-window_width]) / window_width
# -
y = avged(avg_psnr, 3)
plt.plot([5*i for i in range(1,len(y) +1) ], y)
plt.title('Average PSNR on test images')
plt.xlabel('Epoch')
plt.ylabel('PSNR')
plt.show()
y = avged(avg_ssim, 3)
plt.plot([5*i for i in range(1,len(y) +1) ], y)
plt.title('Average SSIM on test images')
plt.xlabel('Epoch')
plt.ylabel('SSIM')
plt.show()
# +
y = avged(avg_hist_r, 3)
plt.plot([5*i for i in range(1,len(y) +1) ], y, color='red')
y = avged(avg_hist_g, 3)
plt.plot([5*i for i in range(1,len(y) +1) ], y, color='green')
y = avged(avg_hist_b, 3)
plt.plot([5*i for i in range(1,len(y) +1) ], y, color='blue')
plt.title('Average Histogram difference on test images by channel')
plt.xlabel('Epoch')
plt.ylabel('Histogram Difference')
plt.show()
# -
| CUT/test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
############## PLEASE RUN THIS CELL FIRST! ###################
# import everything and define a test runner function
from importlib import reload
from helper import run
import block, helper, network
# -
# ### Exercise 1
# Parse this message
# ```
# f9beb4d976657261636b000000000000000000005df6e0e2
# ```
#
# +
# Exercise 1
msg = bytes.fromhex('f9beb4d976657261636b000000000000000000005df6e0e2')
# first 4 are network magic
# next 12 are command
# next 4 are payload length
# next 4 are checksum
# rest is payload
# print the command
# -
# ### Exercise 2
#
#
#
#
# #### Make [this test](/edit/week6/network.py) pass: `network.py:NetworkEnvelopeTest:test_parse`
# +
# Exercise 2
reload(network)
run(network.NetworkEnvelopeTest('test_parse'))
# -
# ### Exercise 3
#
#
#
#
# #### Make [this test](/edit/week6/network.py) pass: `network.py:NetworkEnvelopeTest:test_serialize`
# +
# Exercise 3
reload(network)
run(network.NetworkEnvelopeTest('test_serialize'))
# -
# ### Exercise 4
#
#
#
#
# #### Make [this test](/edit/week6/network.py) pass: `network.py:GetHeadersMessageTest:test_serialize`
# +
# Exercise 4
reload(network)
run(network.GetHeadersMessageTest('test_serialize'))
# -
# ### Exercise 5
#
#
#
#
# #### Make [this test](/edit/week6/network.py) pass: `network.py:HeadersMessageTest:test_parse`
# +
# Exercise 5
reload(network)
run(network.HeadersMessageTest('test_parse'))
# -
# Handshake Example
from network import SimpleNode, VersionMessage, VerAckMessage
node = SimpleNode('tbtc.programmingblockchain.com', testnet=True)
version = VersionMessage()
node.send(version)
print(node.wait_for(VerAckMessage).command)
# ### Exercise 6
#
#
#
#
# #### Make [this test](/edit/week6/network.py) pass: `network.py:SimpleNodeTest:test_handshake`
# +
# Exercise 6
reload(network)
run(network.SimpleNodeTest('test_handshake'))
# -
# Block Header Download Example
from network import GetHeadersMessage, HeadersMessage, SimpleNode
from block import GENESIS_BLOCK_HASH
node = SimpleNode('btc.programmingblockchain.com', testnet=False)
node.handshake()
last_block_hash = GENESIS_BLOCK_HASH
current_height = 1
for _ in range(20):
getheaders = GetHeadersMessage(start_block=last_block_hash)
node.send(getheaders)
headers = node.wait_for(HeadersMessage)
for header in headers.headers:
if not header.check_pow():
raise RuntimeError('bad proof of work at block {}'.format(count))
if last_block_hash != GENESIS_BLOCK_HASH and header.prev_block != last_block_hash:
raise RuntimeError('discontinuous block at {}'.format(count))
if current_height % 2016 == 0:
print(header.id())
current_height += 1
last_block_hash = header.hash()
# ### Exercise 7
# Download the first 40,000 blocks for testnet and validate them.
#
# +
# Exercise 7
from network import SimpleNode, GetHeadersMessage, HeadersMessage
from block import TESTNET_GENESIS_BLOCK_HASH
# connect to tbtc.programmingblockchain.com
# handshake
# set the last block hash to the TESTNET_GENESIS_BLOCK_HASH
# set the current height to 1
# loop until we we get 40,000 blocks
# create a GetHeadersMessage starting from the last block we have
# send the getheaders message
# wait for the HeadersMessage in response
# loop through the headers from the headers message
# check the proof of work
# the prev_block of the current block should be the last block
# print the id every 2016 blocks (difficulty adjustment)
# increment the block count
# set the last block hash
# -
# Merkle Parent Example
from helper import hash256
tx_hash0 = bytes.fromhex('c117ea8ec828342f4dfb0ad6bd140e03a50720ece40169ee38bdc15d9eb64cf5')
tx_hash1 = bytes.fromhex('c131474164b412e3406696da1ee20ab0fc9bf41c8f05fa8ceea7a08d672d7cc5')
parent = hash256(tx_hash0+tx_hash1)
print(parent.hex())
# ### Exercise 8
# Calculate the Merkle parent of these hashes:
# ```
# f391da6ecfeed1814efae39e7fcb3838ae0b02c02ae7d0a5848a66947c0727b0
# 3d238a92a94532b946c90e19c49351c763696cff3db400485b813aecb8a13181
# ```
#
# +
# Exercise 8
from helper import hash256
hex_hash1 = 'f391da6ecfeed1814efae39e7fcb3838ae0b02c02ae7d0a5848a66947c0727b0'
hex_hash2 = '3d238a92a94532b946c90e19c49351c763696cff3db400485b813aecb8a13181'
# bytes.fromhex to get the bin hashes
# hash256 the combination
# hex() to see the result
# -
# ### Exercise 9
#
#
#
#
# #### Make [this test](/edit/week6/helper.py) pass: `helper.py:HelperTest:test_merkle_parent`
# +
# Exercise 9
reload(helper)
run(helper.HelperTest('test_merkle_parent'))
# -
# Merkle Parent Level Example
from helper import merkle_parent
hex_hashes = [
'c117ea8ec828342f4dfb0ad6bd140e03a50720ece40169ee38bdc15d9eb64cf5',
'c131474164b412e3406696da1ee20ab0fc9bf41c8f05fa8ceea7a08d672d7cc5',
'f391da6ecfeed1814efae39e7fcb3838ae0b02c02ae7d0a5848a66947c0727b0',
'3d238a92a94532b946c90e19c49351c763696cff3db400485b813aecb8a13181',
'10092f2633be5f3ce349bf9ddbde36caa3dd10dfa0ec8106bce23acbff637dae',
]
hashes = [bytes.fromhex(x) for x in hex_hashes]
if len(hashes) % 2 == 1:
hashes.append(hashes[-1])
parent_level = []
for i in range(0, len(hex_hashes), 2):
parent = merkle_parent(hashes[i], hashes[i+1])
print(parent.hex())
parent_level.append(parent)
# ### Exercise 10
# Calculate the next Merkle Parent Level given these hashes
# ```
# 8b30c5ba100f6f2e5ad1e2a742e5020491240f8eb514fe97c713c31718ad7ecd
# 7f4e6f9e224e20fda0ae4c44114237f97cd35aca38d83081c9bfd41feb907800
# ade48f2bbb57318cc79f3a8678febaa827599c509dce5940602e54c7733332e7
# 68b3e2ab8182dfd646f13fdf01c335cf32476482d963f5cd94e934e6b3401069
# 43e7274e77fbe8e5a42a8fb58f7decdb04d521f319f332d88e6b06f8e6c09e27
# ```
#
# +
# Exercise 10
from helper import merkle_parent
hex_hashes = [
'8b30c5ba100f6f2e5ad1e2a742e5020491240f8eb514fe97c713c31718ad7ecd',
'7f4e6f9e224e20fda0ae4c44114237f97cd35aca38d83081c9bfd41feb907800',
'ade48f2bbb57318cc79f3a8678febaa827599c509dce5940602e54c7733332e7',
'68b3e2ab8182dfd646f13fdf01c335cf32476482d963f5cd94e934e6b3401069',
'43e7274e77fbe8e5a42a8fb58f7decdb04d521f319f332d88e6b06f8e6c09e27',
]
# bytes.fromhex to get all the hashes in binary
# if the number of hashes is odd, duplicate the last one
# initialize parent level
# skip by two: use range(0, len(hashes), 2)
# calculate merkle_parent of i and i+1 hashes
# print the hash's hex
# add parent to parent level
# -
# ### Exercise 11
#
#
#
#
# #### Make [this test](/edit/week6/helper.py) pass: `helper.py:HelperTest:test_merkle_parent_level`
# +
# Exercise 11
reload(helper)
run(helper.HelperTest('test_merkle_parent_level'))
# -
# Merkle Root Example
from helper import merkle_parent_level
hex_hashes = [
'c117ea8ec828342f4dfb0ad6bd140e03a50720ece40169ee38bdc15d9eb64cf5',
'<KEY>',
'f391da6ecfeed1814efae39e7fcb3838ae0b02c02ae7d0a5848a66947c0727b0',
'<KEY>',
'<KEY>',
'7d37b3d54fa6a64869084bfd2e831309118b9e833610e6228adacdbd1b4ba161',
'<KEY>',
'<KEY>',
'<KEY>',
'95513952a04bd8992721e9b7e2937f1c04ba31e0469fbe615a78197f68f52b7c',
'2e6d722e5e4dbdf2447ddecc9f7dabb8e299bae921c99ad5b0184cd9eb8e5908',
'b13a750047bc0bdceb2473e5fe488c2596d7a7124b4e716fdd29b046ef99bbf0',
]
current_level = [bytes.fromhex(x) for x in hex_hashes]
while len(current_level) > 1:
current_level = merkle_parent_level(current_level)
print(current_level[0].hex())
# ### Exercise 12
# Calculate the Merkle Root given these hashes
# ```
# 42f6f52f17620653dcc909e58bb352e0bd4bd1381e2955d19c00959a22122b2e
# 94c3af34b9667bf787e1c6a0a009201589755d01d02fe2877cc69b929d2418d4
# 959428d7c48113cb9149d0566bde3d46e98cf028053c522b8fa8f735241aa953
# a9f27b99d5d108dede755710d4a1ffa2c74af70b4ca71726fa57d68454e609a2
# 62af110031e29de1efcad103b3ad4bec7bdcf6cb9c9f4afdd586981795516577
# 766900590ece194667e9da2984018057512887110bf54fe0aa800157aec796ba
# e8270fb475763bc8d855cfe45ed98060988c1bdcad2ffc8364f783c98999a208
# 921b8cfd3e14bf41f028f0a3aa88c813d5039a2b1bceb12208535b0b43a5d09e
# 15535864799652347cec66cba473f6d8291541238e58b2e03b046bc53cfe1321
# 1c8af7c502971e67096456eac9cd5407aacf62190fc54188995666a30faf99f0
# 3311f8acc57e8a3e9b68e2945fb4f53c07b0fa4668a7e5cda6255c21558c774d
# ```
#
# +
# Exercise 12
from helper import merkle_parent_level
hex_hashes = [
'42f6f52f17620653dcc909e58bb352e0bd4bd1381e2955d19c00959a22122b2e',
'94c3af34b9667bf787e1c6a0a009201589755d01d02fe2877cc69b929d2418d4',
'<KEY>',
'a9f27b99d5d108dede755710d4a1ffa2c74af70b4ca71726fa57d68454e609a2',
'<KEY>',
'766900590ece194667e9da2984018057512887110bf54fe0aa800157aec796ba',
'<KEY>',
'<KEY>',
'15535864799652347cec66cba473f6d8291541238e58b2e03b046bc53cfe1321',
'1c8af7c502971e67096456eac9cd5407aacf62190fc54188995666a30faf99f0',
'3311f8acc57e8a3e9b68e2945fb4f53c07b0fa4668a7e5cda6255c21558c774d',
]
# bytes.fromhex to get all the hashes in binary
# initialize current level to be the hashes
# loop until current_level has only 1 element
# make the current level the parent level
# print the root's hex
# -
# ### Exercise 13
#
#
#
#
# #### Make [this test](/edit/week6/helper.py) pass: `helper.py:HelperTest:test_merkle_root`
# +
# Exercise 13
reload(helper)
run(helper.HelperTest('test_merkle_root'))
# -
# Block Merkle Root Example
from helper import merkle_root
tx_hex_hashes = [
'42f6f52f17620653dcc909e58bb352e0bd4bd1381e2955d19c00959a22122b2e',
'94c3af34b9667bf787e1c6a0a009201589755d01d02fe2877cc69b929d2418d4',
'959428d7c48113cb9149d0566bde3d46e98cf028053c522b8fa8f735241aa953',
'a9f27b99d5d108dede755710d4a1ffa2c74af70b4ca71726fa57d68454e609a2',
'62af110031e29de1efcad103b3ad4bec7bdcf6cb9c9f4afdd586981795516577',
'766900590ece194667e9da2984018057512887110bf54fe0aa800157aec796ba',
'e8270fb475763bc8d855cfe45ed98060988c1bdcad2ffc8364f783c98999a208',
]
current_level = [bytes.fromhex(x)[::-1] for x in tx_hex_hashes]
print(merkle_root(current_level)[::-1].hex())
# ### Exercise 14
# Validate the merkle root for this block on Testnet:
# Block Hash:
# ```
# 0000000000000451fa80fcdb243b84c35eaae215a85a8faa880559e8239e6f20
# ```
#
# Transaction Hashes:
# ```
# 42f6f52f17620653dcc909e58bb352e0bd4bd1381e2955d19c00959a22122b2e
# 94c3af34b9667bf787e1c6a0a009201589755d01d02fe2877cc69b929d2418d4
# 959428d7c48113cb9149d0566bde3d46e98cf028053c522b8fa8f735241aa953
# a9f27b99d5d108dede755710d4a1ffa2c74af70b4ca71726fa57d68454e609a2
# 62af110031e29de1efcad103b3ad4bec7bdcf6cb9c9f4afdd586981795516577
# 766900590ece194667e9da2984018057512887110bf54fe0aa800157aec796ba
# e8270fb475763bc8d855cfe45ed98060988c1bdcad2ffc8364f783c98999a208
# 921b8cfd3e14bf41f028f0a3aa88c813d5039a2b1bceb12208535b0b43a5d09e
# 15535864799652347cec66cba473f6d8291541238e58b2e03b046bc53cfe1321
# 1c8af7c502971e67096456eac9cd5407aacf62190fc54188995666a30faf99f0
# 3311f8acc57e8a3e9b68e2945fb4f53c07b0fa4668a7e5cda6255c21558c774d
# ```
#
# +
# Exercise 14
from helper import merkle_root
want = '4297fb95a0168b959d1469410c7527da5d6243d99699e7d041b7f3916ba93301'
tx_hex_hashes = [
'42f6f52f17620653dcc909e58bb352e0bd4bd1381e2955d19c00959a22122b2e',
'<KEY>',
'<KEY>',
'a9f27b99d5d108dede755710d4a1ffa2c74af70b4ca71726fa57d68454e609a2',
'<KEY>',
'766900590ece194667e9da2984018057512887110bf54fe0aa800157aec796ba',
'<KEY>',
'<KEY>',
'15535864799652347cec66cba473f6d8291541238e58b2e03b046bc53cfe1321',
'1c8af7c502971e67096456eac9cd5407aacf62190fc54188995666a30faf99f0',
'3311f8acc57e8a3e9b68e2945fb4f53c07b0fa4668a7e5cda6255c21558c774d',
]
# bytes.fromhex and reverse ([::-1]) to get all the hashes in binary
# get the merkle root
# see if the reversed root is the same as the wanted root
# -
# ### Exercise 15
#
#
#
#
# #### Make [this test](/edit/week6/block.py) pass: `block.py:BlockTest:test_validate_merkle_root`
# +
# Exercise 15
reload(block)
run(block.BlockTest('test_validate_merkle_root'))
# -
# ### Exercise 16
# Validate the merkle root for this block on Testnet via network protocol:
# Block Hash:
# ```
# 0000000000044b01a9440b34f582fe171c7b8642fedd0ebfccf8fdf6a1810900
# ```
#
# +
# Exercise 16
from network import SimpleNode, GetDataMessage, BLOCK_DATA_TYPE
from block import Block
block_hex = '0000000000044b01a9440b34f582fe171c7b8642fedd0ebfccf8fdf6a1810900'
block_hash = bytes.fromhex(block_hex)
# connect to tbtc.programmingblockchain.com on testnet
# handshake
# create a GetDataMessage
# add_data on the message (BLOCK_DATA_TYPE, block_hash)
# send the getdata message
# wait for the block message in response
# check the proof of work
# validate the tx_hashes
# print the merkle root hex
# -
# Merkle Tree Example
from math import ceil, log
total = 16
max_depth = ceil(log(total, 2))
merkle_tree = []
for depth in range(max_depth + 1):
num_items = ceil(total / 2**(max_depth - depth))
level_hashes = [None] * num_items
merkle_tree.append(level_hashes)
for level in merkle_tree:
print(level)
# Merkle Tree Populating and Navigating Example
from merkleblock import MerkleTree
hex_hashes = [
"9745f7173ef14ee4155722d1cbf13304339fd00d900b759c6f9d58579b5765fb",
"5573c8ede34936c29cdfdfe743f7f5fdfbd4f54ba0705259e62f39917065cb9b",
"82a02ecbb6623b4274dfcab82b336dc017a27136e08521091e443e62582e8f05",
"507ccae5ed9b340363a0e6d765af148be9cb1c8766ccc922f83e4ae681658308",
"a7a4aec28e7162e1e9ef33dfa30f0bc0526e6cf4b11a576f6c5de58593898330",
"bb6267664bd833fd9fc82582853ab144fece26b7a8a5bf328f8a059445b59add",
"<KEY>",
"457743861de496c429912558a106b810b0507975a49773228aa788df40730d41",
"7688029288efc9e9a0011c960a6ed9e5466581abf3e3a6c26ee317461add619a",
"<KEY>",
"<KEY>",
"b3a92b5b255019bdaf754875633c2de9fec2ab03e6b8ce669d07cb5b18804638",
"b5c0b915312b9bdaedd2b86aa2d0f8feffc73a2d37668fd9010179261e25e263",
"<KEY>",
"c555bc5fc3bc096df0a0c9532f07640bfb76bfe4fc1ace214b8b228a1297a4c2",
"f9dbfafc3af3400954975da24eb325e326960a25b87fffe23eef3e7ed2fb610e",
]
tree = MerkleTree(len(hex_hashes))
tree.nodes[4] = [bytes.fromhex(h) for h in hex_hashes]
tree.nodes[3] = merkle_parent_level(tree.nodes[4])
tree.nodes[2] = merkle_parent_level(tree.nodes[3])
tree.nodes[1] = merkle_parent_level(tree.nodes[2])
tree.nodes[0] = merkle_parent_level(tree.nodes[1])
print(tree)
# Merkle Tree Populating Example #2
from merkleblock import MerkleTree
hex_hashes = [
"9745f7173ef14ee4155722d1cbf13304339fd00d900b759c6f9d58579b5765fb",
"5573c8ede34936c29cdfdfe743f7f5fdfbd4f54ba0705259e62f39917065cb9b",
"82a02ecbb6623b4274dfcab82b336dc017a27136e08521091e443e62582e8f05",
"507ccae5ed9b340363a0e6d765af148be9cb1c8766ccc922f83e4ae681658308",
"a7a4aec28e7162e1e9ef33dfa30f0bc0526e6cf4b11a576f6c5de58593898330",
"<KEY>",
"<KEY>",
"457743861de496c429912558a106b810b0507975a49773228aa788df40730d41",
"7688029288efc9e9a0011c960a6ed9e5466581abf3e3a6c26ee317461add619a",
"<KEY>",
"<KEY>",
"b3a92b5b255019bdaf754875633c2de9fec2ab03e6b8ce669d07cb5b18804638",
"b5c0b915312b9bdaedd2b86aa2d0f8feffc73a2d37668fd9010179261e25e263",
"c9d52c5cb1e557b92c84c52e7c4bfbce859408bedffc8a5560fd6e35e10b8800",
"c555bc5fc3bc096df0a0c9532f07640bfb76bfe4fc1ace214b8b228a1297a4c2",
"f9dbfafc3af3400954975da24eb325e326960a25b87fffe23eef3e7ed2fb610e",
]
tree = MerkleTree(len(hex_hashes))
tree.nodes[4] = [bytes.fromhex(h) for h in hex_hashes]
while tree.root() is None:
if tree.is_leaf():
tree.up()
else:
left_hash = tree.get_left_node()
right_hash = tree.get_right_node()
if left_hash is None:
tree.left()
elif right_hash is None:
tree.right()
else:
tree.set_current_node(merkle_parent(left_hash, right_hash))
tree.up()
print(tree)
# Merkle Tree Populating Example #3
from merkleblock import MerkleTree
hex_hashes = [
"9745f7173ef14ee4155722d1cbf13304339fd00d900b759c6f9d58579b5765fb",
"5573c8ede34936c29cdfdfe743f7f5fdfbd4f54ba0705259e62f39917065cb9b",
"82a02ecbb6623b4274dfcab82b336dc017a27136e08521091e443e62582e8f05",
"507ccae5ed9b340363a0e6d765af148be9cb1c8766ccc922f83e4ae681658308",
"<KEY>",
"<KEY>",
"<KEY>",
"457743861de496c429912558a106b810b0507975a49773228aa788df40730d41",
"7688029288efc9e9a0011c960a6ed9e5466581abf3e3a6c26ee317461add619a",
"<KEY>",
"<KEY>",
"b3a92b5b255019bdaf754875633c2de9fec2ab03e6b8ce669d07cb5b18804638",
"b5c0b915312b9bdaedd2b86aa2d0f8feffc73a2d37668fd9010179261e25e263",
"c9d52c5cb1e557b92c84c52e7c4bfbce859408bedffc8a5560fd6e35e10b8800",
"c555bc5fc3bc096df0a0c9532f07640bfb76bfe4fc1ace214b8b228a1297a4c2",
]
tree = MerkleTree(len(hex_hashes))
tree.nodes[4] = [bytes.fromhex(h) for h in hex_hashes]
while tree.root() is None:
if tree.is_leaf():
tree.up()
else:
left_hash = tree.get_left_node()
if left_hash is None:
tree.left()
elif tree.right_exists():
right_hash = tree.get_right_node()
if right_hash is None:
tree.right()
else:
tree.set_current_node(merkle_parent(left_hash, right_hash))
tree.up()
else:
tree.set_current_node(merkle_parent(left_hash, left_hash))
tree.up()
print(tree)
| week6/week6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Flight Price Prediction
# # Import Libraries
# +
try:
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import datetime as dt
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error , mean_squared_error , r2_score
warnings.filterwarnings('ignore')
# %matplotlib inline
except ModuleNotFoundError as e:
print(e)
flight_data = pd.read_excel('Data_Train.xlsx')
flight_data.head()
# -
# # Analyze your Data
# +
class EDA:
def dimensions(self,dataset):
print('total rows : ',dataset.shape[0] , 'and' , 'total numbers of features are : ', dataset.shape[1])
def null_values(self,dataset):
return dataset.isnull().sum()
def total_source(self,dataset):
source = {}
for i in dataset['Source']:
if i not in source:
source[i] = 1
else:
source[i] = source[i] + 1
return source
def total_stops(self,dataset):
stops = {}
for i in dataset['Total_Stops']:
if i not in stops:
stops[i] = 1
else:
stops[i] = stops[i] + 1
return stops
def total_destinations(self,dataset):
destinations = {}
for i in dataset['Destination']:
if i not in destinations:
destinations[i] = 1
else:
destinations[i] = destinations[i] + 1
return destinations
def total_airlines(self,dataset):
flights = {}
for i in dataset['Airline']:
if i not in flights:
flights[i] = 1
else:
flights[i] = flights[i] + 1
return flights
e = EDA()
e.dimensions(flight_data)
print()
null = e.null_values(flight_data)
print(null)
print()
source = e.total_source(flight_data)
print('total source are')
print(source)
print()
haults = e.total_stops(flight_data)
print('total stops are')
print(haults)
print()
dest = e.total_destinations(flight_data)
print('total destinations are')
print(dest)
print()
total_flights = e.total_airlines(flight_data)
print('total airlines are')
print(total_flights)
# -
# # Handle The Features
#
# our dataset consist of all categorical independent features
# we need to convert them to numerical to build our model.most
# of our data is in the datetime form we will extract date day
# and month
# +
class Handle_Data:
def null_values(self,dataset):
for cols in dataset.columns:
if dataset[cols].isnull().sum() > 0:
dataset = dataset.dropna(inplace = True)
return dataset
def create_dummies(self,dataset):
return pd.get_dummies(dataset[['Source' , 'Total_Stops' , 'Destination']] , drop_first = True)
def concat_data(self,dataset,new):
return pd.concat([dataset,new],axis = 1)
class Time_Data(Handle_Data):
def arrival_hour(self,dataset):
return pd.to_datetime(dataset['Arrival_Time']).dt.hour
def arrival_minute(self,dataset):
return pd.to_datetime(dataset['Arrival_Time']).dt.minute
def dept_hour(self,dataset):
return pd.to_datetime(dataset['Dep_Time']).dt.hour
def dept_minute(self,dataset):
return pd.to_datetime(dataset['Dep_Time']).dt.minute
def journey_day(self,dataset):
return pd.to_datetime(dataset['Date_of_Journey']).dt.day
def journey_month(self,dataset):
return pd.to_datetime(dataset['Date_of_Journey']).dt.month
def airline_dummies(self,dataset):
return pd.get_dummies(dataset['Airline'],drop_first=True)
def concat_airlines(self,dataset):
return pd.concat([dataset , airline_dummies],axis = 1)
def duration_data(self,dataset):
duration = list(dataset['Duration'])
hours_data = []
minute_data = []
for i in range(len(duration)):
if len(duration[i].split()) != 2:
if 'h' in duration[i]:
duration[i] = duration[i] + " 0m"
else:
duration[i] = "0h " + duration[i]
for i in range(len(duration)):
hours_data.append(int(duration[i].split(sep = 'h')[0]))
minute_data.append(int(duration[i].split(sep = 'm')[0].split()[-1]))
dataset['duration hours'] = hours_data
dataset['duration_minute'] = minute_data
def remove_unwanted(self,dataset):
dataset = dataset.drop(['Airline','Date_of_Journey','Source','Destination','Route','Dep_Time',
'Arrival_Time','Duration','Total_Stops','Additional_Info'],axis = 1)
return dataset
t = Time_Data()
flight_data = t.null_values(flight_data)
dummies = t.create_dummies(flight_data)
new_data = t.concat_data(flight_data,dummies)
new_data['Arrival Hour'] = t.arrival_hour(new_data)
new_data['Arrival minute'] = t.arrival_minute(new_data)
new_data['departure hour'] = t.dept_hour(new_data)
new_data['departure minute'] = t.dept_minute(new_data)
new_data['day of journey'] = t.journey_day(new_data)
new_data['month of journey'] = t.journey_month(new_data)
airline_dummies = t.airline_dummies(new_data)
new_flight_data = t.concat_airlines(new_data)
t.duration_data(new_flight_data)
new_flight_data = t.remove_unwanted(new_flight_data)
# -
# # Seperating Dependent and Independent Features
x = new_flight_data.drop('Price',axis = 1)
y = new_flight_data['Price']
# # Feature Selection
selection_model = ExtraTreesRegressor()
class Feature_Selection:
def select_feature(self,model,x,y):
model.fit(x,y)
self.score = model.feature_importances_
def display(self):
plt.figure(figsize=(20,10))
scores = pd.Series(self.score , x.columns)
scores.nlargest(20).plot(kind = 'barh')
f = Feature_Selection()
f.select_feature(selection_model,x,y)
f.display()
# # Split the data from train and test
train_x , test_x , train_y , test_y = train_test_split(x,y , test_size = 0.3 , random_state = 42)
# # Model Prediction and Measuring the Accuracy
# +
model = RandomForestRegressor()
class Model_Prediction:
def fit_on_train(self,model):
model.fit(train_x,train_y)
self.predict_on_train = model.predict(train_x)
total_train_error = mean_squared_error(train_y,self.predict_on_train)
total_train_score = r2_score(train_y,self.predict_on_train)
absolute_error = mean_absolute_error(train_y,self.predict_on_train)
return total_train_error , total_train_score , absolute_error
def fit_on_test(self,model):
model.fit(train_x,train_y)
self.prediction_on_test = model.predict(test_x)
test_error = mean_squared_error(test_y,self.prediction_on_test)
total_test_score = r2_score(test_y,self.prediction_on_test)
return total_test_score
def display_train_result(self):
plt.figure(figsize=(20,10))
self.fit_on_train(model)
plt.subplot(1,2,1)
plt.scatter(train_y,self.predict_on_train)
plt.subplot(1,2,2)
sns.distplot(train_y-self.predict_on_train)
plt.show()
def display_test_result(self):
plt.figure(figsize=(20,10))
self.fit_on_test(model)
plt.subplot(1,2,1)
sns.distplot(test_y-self.prediction_on_test)
plt.subplot(1,2,2)
plt.scatter(test_y,self.prediction_on_test)
plt.show()
m = Model_Prediction()
training_error , training_score , abs_error = m.fit_on_train(model)
print('score of our model',training_score)
testing_score = m.fit_on_test(model)
print('test score of our model',testing_score)
print()
m.display_train_result()
m.display_test_result()
# -
| flight price prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Exercise 81: Compute the Hypotenuse(23 Lines)
#
# Write a function that takes the lengths of the two shorter sides of a right triangle as its parameters. Return the hypotenuse of the triangle, computed using Pythagorean theorem, as the function’s result. Include a main program that reads the lengths of the shorter sides of a right triangle from the user, uses your function to compute the length of the hypotenuse, and displays the result.
# Exercise 82: Taxi Fare (22 Lines)
#
# In a particular jurisdiction, taxi fares consist of a base fare of 4.00, plus 0.25 for every 140 meters traveled. Write a function that takes the distance traveled (in kilometers) as its only parameter and returns the total fare as its only result. Write a main program that demonstrates the function.
#
# Hint: Taxi fares change over time. Use constants to represent the base fare and
# the variable portion of the fare so that the program can be updated easily when
# the rates increase.
#
# Exercise 83: Shipping Calculator (23 Lines)
#
# An online retailer provides express shipping for many of its items at a rate of 10.95 for the first item, and 2.95 for each subsequent item. Write a function that takes the number of items in the order as its only parameter. Return the shipping charge for the order as the function’s result. Include a main program that reads the number of items purchased from the user and displays the shipping charge.
# Exercise 84: Median of Three Values (Solved—42 Lines)
#
# Write a function that takes three numbers as parameters, and returns the median value of those parameters as its result. Include a main program that reads three values from the user and displays their median.
#
# Hint: The median value is the middle of the three values when they are sorted into ascending order. It can be found using if statements, or with a little bit of
# mathematical creativity
# +
def median(a, b, c):
if a < b and b < c or a > b and b > c:
return b
if b < a and a < c or b > a and a > c:
return a
if c < a and b < c or c > a and b > c:
return c
def altMedian(a, b, c):
return a + b + c - min(a, b, c) - max(a, b, c)
def main():
x = float(input("Please enter the first value:"))
y = float(input("Please enter the second value:"))
z = float(input("Please enter the third value:"))
print("The median value is:", median(x, y, z))
print("Using the alternative method, the median value is:", altMedian(x, y,z))
main()
# -
# Exercise 85: Convert an Integer to its Ordinal Number(47 Lines)
#
# Words like first, second and third are referred to as ordinal numbers. In this exercise, you will write a function that takes an integer as its only parameter and returns a string containing the appropriate English ordinal number as its only result. Your function must handle the integers between 1 and 12 (inclusive). It should return an empty string if a value outside of this range is provided as a parameter. Include a main program that demonstrates your function by displaying each integer from 1 to
# 12 and its ordinal number. Your main program should only run when your file has not been imported into another program.
# Exercise 86: The Twelve Days of Christmas (Solved—48 Lines)
#
# The Twelve Days of Christmas is a repetitive song that describes an increasingly long list of gifts sent to one’s true love on each of 12 days. A single gift is sent on the first day. A new gift is added to the collection on each additional day, and then the complete collection is sent. The first three verses of the song are shown below.
#
# The complete lyrics are available on the internet.
#
# On the first day of Christmas
#
# my true love sent to me:
#
# A partridge in a pear tree.
#
# On the second day of Christmas
#
# my true love sent to me:
#
# Two turtle doves,
#
# And a partridge in a pear tree.
#
# On the third day of Christmas
#
# my true love sent to me:
#
# Three French hens,
#
# Two turtle doves,
#
# And a partridge in a pear tree.
#
# Your task is to write a program that displays the complete lyrics for The Twelve Days of Christmas. Write a function that takes the verse number as its only parameter and displays the specified verse of the song. Then call that function 12 times with integers that increase from 1 to 12. Each item that is sent to the recipient in the song should only appear once in your program, with the possible exception of the partridge. It may appear twice if that helps you handle the difference between “A partridge in a pear tree” in the first verse and “And a partridge in a pear tree” in the subsequent verses. Import your solution to Exercise 85 to help you complete this exercise.
# Exercise 87: Center a String in the Terminal(Solved—31 Lines)
#
# Write a function that takes a string of characters as its first parameter, and the width of the terminal in characters as its second parameter. Your function should return a new string that consists of the original string and the correct number of leading spaces so that the original string will appear centered within the provided width when it is printed. Do not add any characters to the end of the string. Include a main program that demonstrates your function.
# Exercise 88: Is it a Valid Triangle?(33 Lines)
#
# If you have 3 straws, possibly of differing lengths, it may or may not be possible to lay them down so that they form a triangle when their ends are touching. For example, if all of the straws have a length of 6 inches. then one can easily construct an equilateral triangle using them. However, if one straw is 6 inches. long, while the other two are each only 2 inches. long, then a triangle cannot be formed. In general, if any one length is greater than or equal to the sum of the other two then the lengths
# cannot be used to form a triangle. Otherwise they can form a triangle. Write a function that determines whether or not three lengths can form a triangle. The function will take 3 parameters and return a Boolean result. In addition, write a program that reads 3 lengths from the user and demonstrates the behaviour of this function.
# Exercise 89: Capitalize It (Solved—48 Lines)
#
# Many people do not use capital letters correctly, especially when typing on small devices like smart phones. In this exercise, you will write a function that capitalizes the appropriate characters in a string. A lowercase “i” should be replaced with an uppercase “I” if it is both preceded and followed by a space. The first character in the string should also be capitalized, as well as the first non-space character after a “.”, “!” or “?”. For example, if the function is provided with the string “what time
# do i have to be there? what’s the address?” then it should return the string “What time do I have to be there? What’s the address?”. Include a main program that reads a string from the user, capitalizes it using your function, and displays the result.
# Exercise 90: Does a String Represent an Integer? (Solved—30 Lines)
#
# In this exercise you will write a function named isInteger that determines whether or not the characters in a string represent a valid integer. When determining if a string represents an integer you should ignore any leading or trailing white space. Once this white space is ignored, a string represents an integer if its length is at least 1 and it only contains digits, or if its first character is either + or - and the first character is followed by one or more characters, all of which are digits. Write a main program that reads a string from the user and reports whether or not it represents an integer. Ensure that the main program will not run if the file containing your solution is imported into another program.
#
# Hint: You may find the lstrip, rstrip and/or strip methods for strings helpful when completing this exercise. Documentation for these methods is available online.
# Exercise 91: Operator Precedence(30 Lines)
#
# Write a function named precedence that returns an integer representing the precedence of a mathematical operator. A string containing the operator will be passed to the function as its only parameter. Your function should return 1 for + and -, 2 for * and /, and 3 for ˆ. If the string passed to the function is not one of these operators then the function should return -1. Include a main program that reads an operator from the user and either displays the operator’s precedence or an error message indicating that the input was not an operator. Your main program should only run when the file containing your solution has not been imported into another program.
#
# In this exercise, along with others that appear later in the book, we will use ˆ to represent exponentiation. Using ˆ instead of Python’s choice of ** will make these exercises easier because an operator will always be a single character.
# Exercise 92: Is a Number Prime? (Solved—28 Lines)
#
# A prime number is an integer greater than 1 that is only divisible by one and itself. Write a function that determines whether or not its parameter is prime, returning True if it is, and False otherwise. Write a main program that reads an integer from the user and displays a message indicating whether or not it is prime. Ensure that the main program will not run if the file containing your solution is imported into another program.
# Exercise 93: Next Prime (27 Lines)
#
# In this exercise you will create a function named nextPrime that finds and returns the first prime number larger than some integer, n. The value of n will be passed to the function as its only parameter. Include a main program that reads an integer from the user and displays the first prime number larger than the entered value. Import and use your solution to Exercise 92 while completing this exercise
# Exercise 94: Random Password (Solved—33 Lines)
#
# Write a function that generates a random password. The password should have a random length of between 7 and 10 characters. Each character should be randomly selected from positions 33 to 126 in the ASCII table. Your function will not take any parameters. It will return the randomly generated password as its only result. Display the randomly generated password in your file’s main program. Your main
# program should only run when your solution has not been imported into another file.
#
# Hint: You will probably find the chr function helpful when completing thisexercise. Detailed information about this function is available online.
# Exercise 95: Random License Plate (45 Lines)
#
# In a particular jurisdiction, older license plates consist of three letters followed by three numbers. When all of the license plates following that pattern had been used, the format was changed to four numbers followed by three letters. Write a function that generates a random license plate. Your function should have approximately equal odds of generating a sequence of characters for an old license
# plate or a new license plate. Write a main program that calls your function and displays the randomly generated license plate.
# Exercise 96: Check a Password (Solved—40 Lines)
#
# In this exercise you will write a function that determines whether or not a password is good. We will define a good password to be a one that is at least 8 characters long and contains at least one uppercase letter, at least one lowercase letter, and at least one number. Your function should return true if the password passed to it as its only parameter is good. Otherwise it should return false. Include a main program that reads a password from the user and reports whether or not it is good. Ensure that your main program only runs when your solution has not been imported into
# another file.
# Exercise 97: Random Good Password (22 Lines)
#
# Using your solutions to Exercises 94 and 96, write a program that generates a random good password and displays it. Count and display the number of attempts that were needed before a good password was generated. Structure your solution so that it imports the functions you wrote previously and then calls them from a function named main in the file that you create for this exercise.
# Exercise 98: Hexadecimal and Decimal Digits (41 Lines)
#
# Write two functions, hex2int and int2hex, that convert between hexadecimal digits (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, A, B, C, D, E and F) and base 10 integers. The hex2int function is responsible for converting a string containing a single hexadecimal digit to a base 10 integer, while the int2hex function is responsible for converting an integer between 0 and 15 to a single hexadecimal digit. Each function
# will take the value to convert as its only parameter and return the converted value as the function’s only result. Ensure that the hex2int function works correctly for both uppercase and lowercase letters. Your functions should end the program with a meaningful error message if an invalid parameter is provided.
# Exercise 99: Arbitrary Base Conversions (Solved—61 Lines)
# Write a program that allows the user to convert a number from one base to another. Your program should support bases between 2 and 16 for both the input number and the result number. If the user chooses a base outside of this range then an appropriate error message should be displayed and the program should exit. Divide your program into several functions, including a function that converts from an arbitrary base to base 10, a function that converts from base 10 to an arbitrary base, and a main program that reads the bases and input number from the user. You may find your solutions to Exercises 77, 78 and 98 helpful when completing this exercise.
# Exercise 100: Days in a Month (47 Lines)
#
# Write a function that determines how many days there are in a particular month. Your function will take two parameters: The month as an integer between 1 and 12, and the year as a four digit integer. Ensure that your function reports the correct number of days in February for leap years. Include a main program that reads a month and year from the user and displays the number of days in that month. You may find your solution to Exercise 57 helpful when solving this problem.
# Exercise 101: Reduce a Fraction to Lowest Terms (Solved—47 Lines)
#
# Write a function that takes two positive integers that represent the numerator and denominator of a fraction as its only two parameters. The body of the function should reduce the fraction to lowest terms and then return both the numerator and denominator of the reduced fraction as its result. For example, if the parameters passed to the function are 6 and 63 then the function should return 2 and 21. Include a main program that allows the user to enter a numerator and denominator. Then your program should display the reduced fraction.
#
# Hint: In Exercise 75 you wrote a program for computing the greatest common divisor of two positive integers. You may find that code useful when completing this exercise.
# Exercise 102: Reduce Measures (Solved—83 Lines)
#
# Many recipe books still use cups, tablespoons and teaspoons to describe the volumes of ingredients used when cooking or baking. While such recipes are easy enough to follow if you have the appropriate measuring cups and spoons, they can be difficult to double, triple or quadruple when cooking Christmas dinner for the entire extended family. For example, a recipe that calls for 4 tablespoons of an ingredient requires 16 tablespoons when quadrupled. However, 16 tablespoons would be better expressed
# (and easier to measure) as 1 cup. Write a function that expresses an imperial volume using the largest units possible. The function will take the number of units as its first parameter, and the unit of measure (cup, tablespoon or teaspoon) as its second parameter. Return a string representing the measure using the largest possible units as the function’s only result. For example, if the function is provided with parameters representing 59 teaspoons then it should return the string “1 cup, 3 tablespoons, 2 teaspoons”.
#
# Hint: One cup is equivalent to 16 tablesspoon. One tablespoon is equivalent to 3 teaspoons
# Exercise 103: Magic Dates (Solved—26 Lines)
#
# A magic date is a date where the day multiplied by the month is equal to the two digit year. For example, June 10, 1960 is a magic date because June is the sixth month, and 6 times 10 is 60, which is equal to the two digit year. Write a function that determines whether or not a date is a magic date. Use your function to create a main program that finds and displays all of the magic dates in the 20th century. You will probably find your solution to Exercise 100 helpful when completing this exercise.
| The Python Workbook Function Exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Creating a waterfall chart using Bokeh
#
# This is the notebook associated with the article at [Pbpython.com](http://pbpython.com/bokeh-bullet-waterfall.html)
from bokeh.plotting import figure, show
from bokeh.io import output_notebook
from bokeh.models import ColumnDataSource, LabelSet
from bokeh.models.formatters import NumeralTickFormatter
import pandas as pd
output_notebook()
# +
# Create the initial dataframe
index = ['sales','returns','credit fees','rebates','late charges','shipping']
data = {'amount': [350000,-30000,-7500,-25000,95000,-7000]}
df = pd.DataFrame(data=data,index=index)
# Determine the total net value by adding the start and all additional transactions
net = df['amount'].sum()
# -
df
# +
# Create additional columns that we will use to build the waterfall
df['running_total'] = df['amount'].cumsum()
df['y_start'] = df['running_total'] - df['amount']
# Where do we want to place the label
df['label_pos'] = df['running_total']
# -
df
# We need to have a net column at the end with the totals and a full bar
df_net = pd.DataFrame.from_records([(net, net, 0, net)],
columns=['amount', 'running_total', 'y_start', 'label_pos'],
index=["net"])
df = df.append(df_net)
df
# +
# We want to color the positive values gray and the negative red
df['color'] = 'grey'
df.loc[df.amount < 0, 'color'] = 'red'
# The 10000 factor is used to make the text positioned correctly.
# You will need to modify if the values are significantly different
df.loc[df.amount < 0, 'label_pos'] = df.label_pos - 10000
df["bar_label"] = df["amount"].map('{:,.0f}'.format)
# -
df
# +
# Build the Bokeh figure
# Limit the tools to only these three
TOOLS = "box_zoom,reset,save"
# Build the source data off the df dataframe
source = ColumnDataSource(df)
# Create the figure and assign range values that look good for the data set
p = figure(tools=TOOLS, x_range=list(df.index), y_range=(0, net+40000), plot_width=800, title = "Sales Waterfall")
p.grid.grid_line_alpha=0.3
# Add the segments
p.segment(x0='index', y0='y_start', x1="index", y1='running_total', source=source, color="color", line_width=55)
# Format the y-axis as dollars
p.yaxis[0].formatter = NumeralTickFormatter(format="($ 0 a)")
p.xaxis.axis_label = "Transactions"
# Add the labels
labels = LabelSet(x='index', y='label_pos', text='bar_label', text_font_size="8pt", level='glyph',
x_offset=-20, y_offset=0, source=source)
p.add_layout(labels)
# -
show(p)
| notebooks/16_Bokeh-Waterfall.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
from warehouse import LINEFEED
from HOFs import *
lines = [""" IF *PF-KEY EQ 'PF3' OR= 'PF15' OR= 'PF17'"""]
line = """ IF *PF-KEY EQ 'PF3' OR= 'PF15' OR= 'PF17'"""
wrd1 = 'OR='
lw = len(wrd1)
idx = line[lw:].index(wrd1)+2
wrd1 + ' ' + line[lw:idx]
ll = len(wrd1 + ' ' + line[lw:idx])
line[ll:]
wrd1 = word(line, 1)
wrd1
for line in lines:
wrd = words(line)
for w in xrange(1, wrd[0]):
if wrd[1][w] in LINEFEED:
joinLines.append(wrd[1][w] + ' ' + line[len(wrd[1][w]):]line.index(wrd[1][w])])
line = line[line.index(wrd[1][w]):]
joinLines.append(line)
joinLines
| Util/split_cmd.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.7 64-bit (''plot'': conda)'
# language: python
# name: python37764bitplotconda2c267f18819b461ab1125c6b70f3e730
# ---
# !conda env list
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib as mpl
# %matplotlib inline
# %config InlineBackend.figure_format="retina"
plt.style.use("fivethirtyeight")
mpl.rcParams['font.family'] = 'Avenir'
plt.rcParams['font.size'] = 18
plt.rcParams['axes.linewidth'] = 2
sns.set_style("whitegrid", {'axes.grid' : False})
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
sns.__version__
import os
os.chdir('/Users/pengl7/Downloads/WGS/compare-variants/NIST/')
# %ls -lth
# the long_cleared_QD has removed the reads with DP=0 compared to long_cleared.csv
# it also has correct calculation of QD (QUAL/DP)
# it has 339850 WITH 10 couts less than long_cleared.csv
df = pd.read_csv("long_cleared_QD.csv")
print(df.shape)
print()
df.head()
df.dtypes
# for the sake of plotting, keep the TYPE as float instead of categorical
#df["TYPE"] = df["TYPE"].astype("category")
df["UNMATCH"] = df["UNMATCH"].astype("category")
df.describe()
df.to_csv("long_cleared_QD.csv", index=None)
# +
# some choices of plot paramters
# using mulitple="stack"
# ax=sns.displot(df, x="QUAL", hue="UNMATCH", kind="kde", multiple="stack", log_scale=True, legend=False)
# missleading in the edge
# using fill=True
# ax=sns.displot(df, x="QUAL", hue="UNMATCH", kind="kde", fill=True, log_scale=True, legend=False)
# color is too light
# -
ax=sns.displot(df, x="QUAL", hue="UNMATCH", kind="kde", multiple="stack", log_scale=True, legend=False)
plt.legend(labels=['UnMatch', 'Match'])
ax.set(xlabel="QUAL (log)")
ax=sns.displot(df, x="QUAL", hue="UNMATCH", kind="kde", log_scale=True, legend=False)
plt.legend(labels=['UnMatch', 'Match'])
ax.set(xlabel="QUAL (log)")
ax=sns.displot(df, x="QUAL", hue="UNMATCH", kind="kde", fill=True, log_scale=True, legend=False)
plt.legend(labels=['UnMatch', 'Match'])
ax.set(xlabel="QUAL (log)")
sns.displot(df, x="AF", hue="UNMATCH", kind="kde", fill=True, legend=False)
plt.legend(loc='upper left', labels=['UnMatch', 'Match'])
sns.displot(df, x="AF", hue="UNMATCH", legend=False, kind="kde")
plt.legend(loc='upper left', labels=['UnMatch', 'Match'])
# +
# get the variants with AF < 0.05
filt_af = df["AF"]< 0.05
maf_df = df[filt_af]
print(maf_df.shape)
# +
# calculate the number of unmatch in these 897
maf_df["UNMATCH"].value_counts()
# -
sns.displot(maf_df, x="AF", hue="UNMATCH", kind="kde", legend=False)
plt.legend(labels=['UnMatch', 'Match'])
844/(844+53)
sns.displot(df, x="GQ", hue="UNMATCH", kind="kde", fill=True, legend=False)
plt.legend(loc='upper left', labels=['UnMatch', 'Match'])
ax=sns.displot(df, x="FS", hue="UNMATCH", kind="kde", fill=True, legend=False)
ax.set(xlim=(0,20))
plt.legend(labels=['UnMatch', 'Match'])
np.log(60)
ax=sns.displot(df, x="MQ", hue="UNMATCH", kind="kde", fill=True,legend=False)
ax.set(xlim=(55,65))
plt.legend(loc="upper left", labels=['UnMatch', 'Match'])
df["MQ"].max()
df["MQ"].min()
ax = sns.displot(df, x="DP", hue="UNMATCH", kind="kde", fill=True, legend=False)
ax.set(xlim=(0, 100))
plt.legend(labels=['UnMatch', 'Match'])
print(len(df[df["DP"]==0]))
print(len(df[df["DP"]<=10]))
ax = sns.displot(df, x="QD", hue="UNMATCH", kind="kde", fill=True, legend=False)
ax.set(xlim=(0, 100))
plt.legend(labels=['UnMatch', 'Match'])
ax=sns.displot(df, x="TYPE", hue="UNMATCH", multiple="stack", legend=False)
plt.legend(labels=['UnMatch', 'Match'])
ax.set_xticklabels(["", 'SNP', "", 'INDEL'])
df["TYPE"] = df["TYPE"].astype("int64")
ax=sns.displot(df, x="TYPE", hue="UNMATCH", multiple="stack", legend=False, discrete=True, shrink=0.3)
plt.legend(labels=['UnMatch', 'Match'])
ax.set_xticklabels(['', 'SNP', '', 'INDEL'])
# ## Check the quality of the whole vcf files
# +
# draw venn diagram
# -
import matplotlib_venn as venn
LONG VS REF ON chr1
Found 305039 sites common to both files.
Found 34553 sites only in main file.
Found 9939 sites only in second file.
Found 290 non-matching overlapping sites.
After filtering, kept 339882 out of a possible 339882 Sites
def cal_overlap(c, x_uniq, y_uniq):
perX = 100*c / (c+x_uniq)
print("1st overlap: ", perX)
perY = 100*c / (c+y_uniq)
print("2nd overlap: ", perY)
cal_overlap(305039,34553,9939)
# +
color1 = "darkorange"
color2 = "teal"
plt.title('comparison of long read NIST with its strandard reference (chr1)')
v1=venn.venn2(subsets = (34843, 9939, 305039), set_labels = ('Long read NIST', 'Standard NIST'),
set_colors=(color1, color2))
# v1.get_label_by_id('10').set_text('89.8%')
# v1.get_label_by_id('01').set_text('96.9%')
# -
# ## Check the whole call sets
REF VS LONG
Found 3901271 sites common to both files.
Found 129076 sites only in main file.
Found 593117 sites only in second file.
Found 8854 non-matching overlapping sites.
After filtering, kept 4039201 out of a possible 4039201 Sites
cal_overlap(3901271,593117,129076)
plt.title('comparison of long read NIST with its strandard reference')
v1=venn.venn2(subsets = (593117,129076,3901271), set_labels = ('Long read NIST', 'Standard NIST'),
set_colors=(color1, color2))
# v1.get_label_by_id('10').set_text('89.8%')
# v1.get_label_by_id('01').set_text('96.9%')
REF vs SHORT
Found 3850385 sites common to both files.
Found 108615 sites only in main file.
Found 832304 sites only in second file.
Found 80201 non-matching overlapping sites.
After filtering, kept 4039201 out of a possible 4039201 Sites
color3="blue"
cal_overlap(3850385,832304,108615)
plt.title('comparison of short read NIST with its strandard reference')
v2=venn.venn2(subsets = (832304,108615,3850385), set_labels = ('Short read NIST', 'Standard NIST'),
set_colors=(color3, color2))
# compare short and long
LONG VS SHORT
Found 4154024 sites common to both files.
Found 235884 sites only in main file.
Found 495532 sites only in second file.
Found 113334 non-matching overlapping sites.
After filtering, kept 4503242 out of a possible 4503242 Sites
# cal_overlap(common, left_uniq, right_uniq)
cal_overlap(4154024, 235884, 495532)
plt.title('comparison of long read NIST with short read NIST')
v4=venn.venn2(subsets = (235884, 495532,4154024), set_labels = ('Long read NIST', 'Short read NIST'),
set_colors=(color1, color3))
# +
# calcualte the three sets
# -
# # calculate precison, recall of the sequencing platform
# ## Calculate metrics:
#
# Ref vs long:
#
# REF VS LONG
# Found 3901271 sites common to both files.
# Found 129076 sites only in main file.
# Found 593117 sites only in second file.
# Found 8854 non-matching overlapping sites.
# After filtering, kept 4039201 out of a possible 4039201 Sites
#
# TP = the overlapping: i.e, common
# FN = REF_uniqe
# FP = Long_unique
# TN = 0
#
#
# recall = 100* TP/(TP+FN)
# precision = 100*TP/(TP+FP)
# accuracy = 100* (TP+FN)/(total)
#
#
# +
def cal_metrics(overlapping, ref_uniq, sample_uniq, TN=0):
recall = 100* overlapping/(overlapping+ref_uniq)
precision = 100*overlapping/(overlapping+sample_uniq)
accuracy = 100* (overlapping+ref_uniq)/(overlapping+sample_uniq+ref_uniq+TN)
print(f'Recall is {recall:.2f}%')
print(f'Precision is {precision:.2f}%')
print(f'Accuracy is {accuracy:.2f}%')
# -
cal_metrics(3901271, 129076, 593117)
# ## Do filtering on FS and QD
# +
# It is better fo remove FS > 60 for SNPs and FS > 200 for Indel
# Also remove those with Guality by Depth 2
# OR remove those with DP < 10
# or AF < 0.05
# or INDELS
# -
# the FS scores in our data is
# the value fo FS don't use ""
filt1= (df["FS"] > 60)&(df["TYPE"]==0)
filt2 = (df["FS"] > 200)&(df["TYPE"]==1)
filt3 = (df["QD"] < 2)
filt4 = (df["DP"] < 10)
filt5 = (df["AF"] < 0.05)
filt6 = (df["AF"] < 0.01)
filt7 = (df["MQ"] < 40)
print("SNPs with FS > 60: ", len(df[filt1]))
print("INDEL with FS > 200: ", len(df[filt2]))
print("variants with QD < 2: ", len(df[filt3]))
print("variants with DP < 10: ", len(df[filt4]))
print("variants with AF < 0.05: ", len(df[filt5]))
print("variants with AF < 0.01: ", len(df[filt6]))
print("variants with MQ < 40: ", len(df[filt7]))
df.shape
df_filt_FS = df[(~filt1) & (~filt2)]
print(df_filt_FS.shape)
df_filt_FS_QD = df_filt_FS[df_filt_FS["QD"] > 2]
print(df_filt_QD.shape)
df_filt_FS_QD_AF = df_filt_FS_QD[df_filt_FS_QD["AF"] >= 0.01]
print(df_filt_FS_QD_AF.shape)
df_filt_FS_QD_AF_MQ = df_filt_FS_QD_AF[df_filt_FS_QD_AF["MQ"] >= 40]
print(df_filt_FS_QD_AF_MQ.shape)
df_filt_FS_QD_AF_MQ_DP = df_filt_FS_QD_AF_MQ[df_filt_FS_QD_AF_MQ["DP"] >= 10]
print(df_filt_FS_QD_AF_MQ_DP.shape)
df_filt_FS_QD_AF_DP_MQ = df_filt_FS_QD_AF_DP[df_filt_FS_QD_AF_DP["MQ"] >= 40]
print(df_filt_FS_QD_AF_DP_MQ.shape)
# remove INDEL
df_filt_FS_QD_AF_DP_MQ_SNP = df_filt_FS_QD_AF_DP_MQ[df_filt_FS_QD_AF_DP_MQ["TYPE"]!=1]
print(df_filt_FS_QD_AF_DP_MQ_SNP.shape)
# ## Calculate the accuracy after each step of filtering
#
# print(f'pi = {x:.2f}')
def cal_cal_2(df, column="UNMATCH"):
match = df[column].value_counts().iloc[0]
unmatch = df[column].value_counts().iloc[1]
accuracy = 100*match/(match + unmatch)
print(f'{accuracy:.2f}%')
cal_cal_2(df)
for item in [df, df_filt_FS, df_filt_FS_QD, df_filt_FS_QD_AF, df_filt_FS_QD_AF_MQ, df_filt_FS_QD_AF_MQ_DP, df_filt_FS_QD_AF_DP_MQ_SNP]:
cal_cal_2(item)
# %ls -lth
after_filt_df = pd.read_csv("sum of variants filtering.csv")
| .ipynb_checkpoints/NIST-breakdown-QC-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="copyright"
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="title:migration,new"
# # Vertex SDK: Train & deploy a TensorFlow model with custom container (aka pre-built containers)
#
# + [markdown] id="install_aip"
# ## Installation
#
# Install the latest (preview) version of Vertex SDK.
#
# + id="KEMtN2uGdx7-"
# ! pip3 install -U google-cloud-aiplatform --user
# + [markdown] id="install_storage"
# Install the Google *cloud-storage* library as well.
#
# + id="dHmqkk5jdx7-"
# ! pip3 install google-cloud-storage
# + [markdown] id="restart"
# ### Restart the Kernel
#
# Once you've installed the Vertex SDK and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages.
#
# + id="067J3q5Wdx7_"
import os
if not os.getenv("AUTORUN") and False:
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
# + [markdown] id="before_you_begin"
# ## Before you begin
#
# ### GPU run-time
#
# *Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU**
#
# ### Set up your GCP project
#
# **The following steps are required, regardless of your notebook environment.**
#
# 1. [Select or create a GCP project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
#
# 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)
#
# 3. [Enable the Vertex APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)
#
# 4. [Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebooks.
#
# 5. Enter your project ID in the cell below. Then run the cell to make sure the
# Cloud SDK uses the right project for all the commands in this notebook.
#
# **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
#
# + id="set_project_id"
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
# + id="autoset_project_id"
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
# shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
# + id="set_gcloud_project_id"
# ! gcloud config set project $PROJECT_ID
# + [markdown] id="region"
# #### Region
#
# You can also change the `REGION` variable, which is used for operations
# throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend when possible, to choose the region closest to you.
#
# - Americas: `us-central1`
# - Europe: `europe-west4`
# - Asia Pacific: `asia-east1`
#
# You cannot use a Multi-Regional Storage bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see [Region support for Vertex AI services](https://cloud.google.com/vertex-ai/docs/general/locations)
#
# + id="mH8isSmUdx8B"
REGION = "us-central1" # @param {type: "string"}
# + [markdown] id="timestamp"
# #### Timestamp
#
# If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.
#
# + id="rP3ppCOxdx8C"
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# + [markdown] id="gcp_authenticate"
# ### Authenticate your GCP account
#
# **If you are using Google Cloud Notebooks**, your environment is already
# authenticated. Skip this step.
#
# *Note: If you are on an Vertex notebook and run the cell, the cell knows to skip executing the authentication steps.*
#
# + id="_0rTTHRRdx8C"
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your Google Cloud account. This provides access
# to your Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Vertex, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this tutorial in a notebook locally, replace the string
# below with the path to your service account key and run this cell to
# authenticate your Google Cloud account.
else:
# %env GOOGLE_APPLICATION_CREDENTIALS your_path_to_credentials.json
# Log in to your account on Google Cloud
# ! gcloud auth login
# + [markdown] id="bucket:batch_prediction"
# ### Create a Cloud Storage bucket
#
# **The following steps are required, regardless of your notebook environment.**
#
# This tutorial is designed to use training data that is in a public Cloud Storage bucket and a local Cloud Storage bucket for your batch predictions. You may alternatively use your own training data that you have stored in a local Cloud Storage bucket.
#
# Set the name of your Cloud Storage bucket below. It must be unique across all Cloud Storage buckets.
#
# + id="bucket"
BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"}
# + id="autoset_bucket"
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "[your-bucket-name]":
BUCKET_NAME = PROJECT_ID + "aip-" + TIMESTAMP
# + [markdown] id="create_bucket"
# **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
#
# + id="iFVSyRUndx8E"
# ! gsutil mb -l $REGION gs://$BUCKET_NAME
# + [markdown] id="validate_bucket"
# Finally, validate access to your Cloud Storage bucket by examining its contents:
#
# + id="WSUveW5Ydx8E"
# ! gsutil ls -al gs://$BUCKET_NAME
# + [markdown] id="setup_vars"
# ### Set up variables
#
# Next, set up some variables used throughout the tutorial.
# ### Import libraries and define constants
#
# + [markdown] id="import_aip"
# #### Import Vertex SDK
#
# Import the Vertex SDK into our Python environment.
#
# + id="_WA8wIGjdx8F"
import os
import sys
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
# + [markdown] id="aip_constants"
# #### Vertex AI constants
#
# Setup up the following constants for Vertex AI:
#
# - `API_ENDPOINT`: The Vertex AI API service endpoint for dataset, model, job, pipeline and endpoint services.
# - `API_PREDICT_ENDPOINT`: The Vertex AI API service endpoint for prediction.
# - `PARENT`: The Vertex AI location root path for dataset, model and endpoint resources.
#
# + id="rBI-1o0Rdx8F"
# API Endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex AI location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
# + [markdown] id="clients"
# ## Clients
#
# The Vertex SDK works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the server (Vertex).
#
# You will use several clients in this tutorial, so set them all up upfront.
#
# - Dataset Service for managed datasets.
# - Model Service for managed models.
# - Pipeline Service for training.
# - Endpoint Service for deployment.
# - Job Service for batch jobs and custom training.
# - Prediction Service for serving. *Note*: Prediction has a different service endpoint.
#
# + id="7OswurIxdx8G"
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
def create_job_client():
client = aip.JobServiceClient(client_options=client_options)
return client
clients = {}
clients["model"] = create_model_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
clients["job"] = create_job_client()
for client in clients.items():
print(client)
# + [markdown] id="bBZ62eSJdx8I"
# ## Prepare a trainer script
# + [markdown] id="8eNZwmHIdx8I"
# ### Package assembly
#
# + id="vSF5zUqddx8J"
# ! rm -rf cifar
# ! mkdir cifar
# ! touch cifar/README.md
setup_cfg = "[egg_info]\n\
tag_build =\n\
tag_date = 0"
# ! echo "$setup_cfg" > cifar/setup.cfg
setup_py = "import setuptools\n\
# Requires TensorFlow Datasets\n\
setuptools.setup(\n\
install_requires=[\n\
'tensorflow_datasets==1.3.0',\n\
],\n\
packages=setuptools.find_packages())"
# ! echo "$setup_py" > cifar/setup.py
pkg_info = "Metadata-Version: 1.0\n\
Name: Custom Training CIFAR-10\n\
Version: 0.0.0\n\
Summary: Demonstration training script\n\
Home-page: www.google.com\n\
Author: Google\n\
Author-email: <EMAIL>\n\
License: Public\n\
Description: Demo\n\
Platform: Vertex AI"
# ! echo "$pkg_info" > cifar/PKG-INFO
# ! mkdir cifar/trainer
# ! touch cifar/trainer/__init__.py
# + [markdown] id="-zNmWOxvdx8J"
# ### Write the docker file contents
#
# + id="KohUv929dx8J"
# %%writefile cifar/Dockerfile
FROM gcr.io/deeplearning-platform-release/tf2-cpu.2-1
WORKDIR /root
WORKDIR /
# Copies the trainer code to the docker image.
COPY trainer /trainer
# Sets up the entry point to invoke the trainer.
ENTRYPOINT ["python", "-m", "trainer.task"]
# + [markdown] id="TALbjjJ6dx8K"
# ### Task.py contents
# + id="x1ZpANzodx8L"
# %%writefile cifar/trainer/task.py
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default='/tmp/saved_model', type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.01, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=10, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=200, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
print('DEVICES', device_lib.list_local_devices())
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
BUFFER_SIZE = 10000
BATCH_SIZE = 64
def make_datasets_unbatched():
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255.0
return image, label
datasets, info = tfds.load(name='cifar10',
with_info=True,
as_supervised=True)
return datasets['train'].map(scale).cache().shuffle(BUFFER_SIZE).repeat()
def build_and_compile_cnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(32, 32, 3)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
loss=tf.keras.losses.sparse_categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(learning_rate=args.lr),
metrics=['accuracy'])
return model
NUM_WORKERS = strategy.num_replicas_in_sync
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
train_dataset = make_datasets_unbatched().batch(GLOBAL_BATCH_SIZE)
with strategy.scope():
model = build_and_compile_cnn_model()
model.fit(x=train_dataset, epochs=args.epochs, steps_per_epoch=args.steps)
model.save(args.model_dir)
# + [markdown] id="ZE6B22qpdx8L"
# ### Build the container locally
# + id="TYzH5IFkdx8L"
TRAIN_IMAGE = f"gcr.io/{PROJECT_ID}/cifar_migration:v1"
# ! docker build cifar -t $TRAIN_IMAGE
# + [markdown] id="OYJPD8uLdx8M"
# ### Register your custom container
# + id="fCxMAQKZdx8M"
# ! docker push $TRAIN_IMAGE
# + [markdown] id="text_create_and_deploy_model:migration"
# ## Train a model
# + [markdown] id="0oqIBOSnJjkW"
# ### [projects.locations.customJobs.create](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.trainingPipelines/create)
# + [markdown] id="WWsBQks0dx8N"
# #### Request
# + id="Pefe5OQOdx8O"
JOB_NAME = "custom_container_" + TIMESTAMP
WORKER_POOL_SPEC = [
{
"replica_count": 1,
"machine_spec": {"machine_type": "n1-standard-4", "accelerator_count": 0},
"container_spec": {
"image_uri": TRAIN_IMAGE,
"args": [
"--model-dir=" + "gs://" + BUCKET_NAME + "/" + JOB_NAME,
"--epochs=" + str(20),
"--steps=" + str(100),
],
},
}
]
CUSTOM_JOB = {
"display_name": JOB_NAME,
"job_spec": {"worker_pool_specs": WORKER_POOL_SPEC},
}
training_job = aip.CustomJob(**CUSTOM_JOB)
print(
MessageToJson(
aip.CreateCustomJobRequest(parent=PARENT, custom_job=training_job).__dict__[
"_pb"
]
)
)
# + [markdown] id="datasets_import:migration,new,request"
# *Example output*:
# ```
# {
# "parent": "projects/migration-ucaip-training/locations/us-central1",
# "customJob": {
# "displayName": "custom_container_20210226022223",
# "jobSpec": {
# "workerPoolSpecs": [
# {
# "machineSpec": {
# "machineType": "n1-standard-4"
# },
# "replicaCount": "1",
# "containerSpec": {
# "imageUri": "gcr.io/migration-ucaip-training/cifar_migration:v1",
# "args": [
# "--model-dir=gs://migration-ucaip-trainingaip-20210226022223/custom_container_20210226022223",
# "--epochs=20",
# "--steps=100"
# ]
# }
# }
# ]
# }
# }
# }
# ```
#
# + [markdown] id="Mul--swidx8P"
# #### Call
# + id="7CulTGVSdx8P"
request = clients["job"].create_custom_job(parent=PARENT, custom_job=training_job)
# + [markdown] id="gy3LcQ3ydx8P"
# #### Response
# + id="uRql9mxvdx8P"
print(MessageToJson(request.__dict__["_pb"]))
# + [markdown] id="sQ4EYFoqdx8P"
# *Example output*:
# ```
# {
# "name": "projects/116273516712/locations/us-central1/customJobs/957560278583607296",
# "displayName": "custom_container_20210226022223",
# "jobSpec": {
# "workerPoolSpecs": [
# {
# "machineSpec": {
# "machineType": "n1-standard-4"
# },
# "replicaCount": "1",
# "diskSpec": {
# "bootDiskType": "pd-ssd",
# "bootDiskSizeGb": 100
# },
# "containerSpec": {
# "imageUri": "gcr.io/migration-ucaip-training/cifar_migration:v1",
# "args": [
# "--model-dir=gs://migration-ucaip-trainingaip-20210226022223/custom_container_20210226022223",
# "--epochs=20",
# "--steps=100"
# ]
# }
# }
# ]
# },
# "state": "JOB_STATE_PENDING",
# "createTime": "2021-02-26T02:27:53.406955Z",
# "updateTime": "2021-02-26T02:27:53.406955Z"
# }
# ```
#
# + id="training_pipeline_id:migration,new,response"
# The full unique ID for the custom training job
custom_training_id = request.name
# The short numeric ID for the custom training job
custom_training_short_id = custom_training_id.split("/")[-1]
print(custom_training_id)
# + [markdown] id="L2VLjDD1dx8Q"
# ### [projects.locations.customJobs.get](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.trainingPipelines/get)
# + [markdown] id="N78Cr-wKdx8Q"
# #### Call
# + id="OiX3rQsrdx8Q"
request = clients["job"].get_custom_job(name=custom_training_id)
# + [markdown] id="CFvGgOKrdx8R"
# #### Response
# + id="_CtVywL8dx8R"
print(MessageToJson(request.__dict__["_pb"]))
# + [markdown] id="XEdTFuXpdx8R"
# *Example output*:
# ```
# {
# "name": "projects/116273516712/locations/us-central1/customJobs/957560278583607296",
# "displayName": "custom_container_20210226022223",
# "jobSpec": {
# "workerPoolSpecs": [
# {
# "machineSpec": {
# "machineType": "n1-standard-4"
# },
# "replicaCount": "1",
# "diskSpec": {
# "bootDiskType": "pd-ssd",
# "bootDiskSizeGb": 100
# },
# "containerSpec": {
# "imageUri": "gcr.io/migration-ucaip-training/cifar_migration:v1",
# "args": [
# "--model-dir=gs://migration-ucaip-trainingaip-20210226022223/custom_container_20210226022223",
# "--epochs=20",
# "--steps=100"
# ]
# }
# }
# ]
# },
# "state": "JOB_STATE_PENDING",
# "createTime": "2021-02-26T02:27:53.406955Z",
# "updateTime": "2021-02-26T02:27:53.406955Z"
# }
# ```
#
# + id="trainingpipelines_get:migration,new,wait"
while True:
response = clients["job"].get_custom_job(name=custom_training_id)
if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
if response.state == aip.PipelineState.PIPELINE_STATE_FAILED:
break
else:
print("Training Time:", response.end_time - response.start_time)
break
time.sleep(60)
# model artifact output directory on Google Cloud Storage
model_artifact_dir = (
response.job_spec.worker_pool_specs[0].container_spec.args[0].split("=")[-1]
)
print("artifact location " + model_artifact_dir)
# + [markdown] id="bAWHUeCadx8S"
# ## Deploy the model
# + [markdown] id="sRjgCGMjdx8T"
# ### Load the saved model
# + id="nFThw0Nwdx8T"
import tensorflow as tf
model = tf.keras.models.load_model(model_artifact_dir)
# + [markdown] id="Ch-ZOEqVdx8T"
# ### Serving function for image data
# + id="bJoK5szQdx8U"
CONCRETE_INPUT = "numpy_inputs"
def _preprocess(bytes_input):
decoded = tf.io.decode_jpeg(bytes_input, channels=3)
decoded = tf.image.convert_image_dtype(decoded, tf.float32)
resized = tf.image.resize(decoded, size=(32, 32))
rescale = tf.cast(resized / 255.0, tf.float32)
return rescale
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def preprocess_fn(bytes_inputs):
decoded_images = tf.map_fn(
_preprocess, bytes_inputs, dtype=tf.float32, back_prop=False
)
return {
CONCRETE_INPUT: decoded_images
} # User needs to make sure the key matches model's input
m_call = tf.function(model.call).get_concrete_function(
[tf.TensorSpec(shape=[None, 32, 32, 3], dtype=tf.float32, name=CONCRETE_INPUT)]
)
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def serving_fn(bytes_inputs):
images = preprocess_fn(bytes_inputs)
prob = m_call(**images)
return prob
tf.saved_model.save(
model,
model_artifact_dir,
signatures={
"serving_default": serving_fn,
},
)
# + [markdown] id="eg_QFIr0dx8U"
# ### Get the serving function signature
# + id="H0F-bxlNdx8U"
loaded = tf.saved_model.load(model_artifact_dir)
input_name = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", input_name)
# + [markdown] id="endpoints_undeploymodel:migration,new,response"
# *Example output*:
# ```
# Serving function input: bytes_inputs
# ```
#
# + [markdown] id="COwVZtxhJjkW"
# ### [projects.locations.models.upload](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.models/upload)
# + [markdown] id="z7J-ijeydx8V"
# #### Request
# + id="aawZj13Idx8W"
container_spec = {
"image_uri": "gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-1:latest",
"env": [{"name": "exmple_env_name", "value": "example_env_value"}],
"ports": [{"container_port": 8080}],
}
model = {
"display_name": "custom_container_TF" + TIMESTAMP,
"metadata_schema_uri": "",
"artifact_uri": model_artifact_dir,
"container_spec": container_spec,
}
print(MessageToJson(aip.UploadModelRequest(parent=PARENT, model=model).__dict__["_pb"]))
# + [markdown] id="M8HZOo3Xdx8W"
# *Example output*:
# ```
# {
# "parent": "projects/migration-ucaip-training/locations/us-central1",
# "model": {
# "displayName": "custom_container_TF20210226022223",
# "containerSpec": {
# "imageUri": "gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-1:latest",
# "env": [
# {
# "name": "example_env_name",
# "value": "example_env_value"
# }
# ],
# "ports": [
# {
# "containerPort": 8080
# }
# ]
# },
# "artifactUri": "gs://migration-ucaip-trainingaip-20210226022223/custom_container_20210226022223"
# }
# }
# ```
#
# + [markdown] id="cLIJ-zE8dx8W"
# #### Call
# + id="R57zQ1Ctdx8W"
request = clients["model"].upload_model(parent=PARENT, model=model)
# + [markdown] id="QpkjnQY4dx8X"
# #### Response
# + id="RLpp9Wy1dx8X"
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
# + [markdown] id="GiVosZjqdx9F"
# *Example output*:
# ```
# {
# "model": "projects/116273516712/locations/us-central1/models/394223297069318144"
# }
# ```
#
# + id="fmYZPWbKdx9G"
model_id = result.model
# + [markdown] id="make_batch_predictions:migration"
# ## Make batch predictions
#
# + [markdown] id="make_batch_prediction_file:migration,new"
# ### Make a batch prediction file
#
# + id="get_test_items:automl,icn,csv"
import cv2
import numpy as np
from tensorflow.keras.datasets import cifar10
(_, _), (x_test, y_test) = cifar10.load_data()
x_test = (x_test / 255.0).astype(np.float32)
print(x_test.shape, y_test.shape)
test_image_1, test_label_1 = x_test[0], y_test[0]
test_image_2, test_label_2 = x_test[1], y_test[1]
cv2.imwrite("tmp1.jpg", (test_image_1 * 255).astype(np.uint8))
cv2.imwrite("tmp2.jpg", (test_image_2 * 255).astype(np.uint8))
# ! gsutil cp tmp1.jpg gs://$BUCKET_NAME/tmp1.jpg
# ! gsutil cp tmp2.jpg gs://$BUCKET_NAME/tmp2.jpg
test_item_1 = "gs://" + BUCKET_NAME + "/" + "tmp1.jpg"
test_item_2 = "gs://" + BUCKET_NAME + "/" + "tmp2.jpg"
# + [markdown] id="make_batch_file:automl,image"
# ### Make the batch input file
#
# Let's now make a batch input file, which you will store in your local Cloud Storage bucket. The batch input file can be either CSV or JSONL. You will use JSONL in this tutorial. For JSONL file, you make one dictionary entry per line for each data item (instance). The dictionary contains the key/value pairs:
#
# - `content`: The Cloud Storage path to the image.
# - `mime_type`: The content type. In our example, it is an `jpeg` file.
#
# + id="HgMBCvNOdx9H"
import base64
import json
gcs_input_uri = "gs://" + BUCKET_NAME + "/" + "test.jsonl"
with tf.io.gfile.GFile(gcs_input_uri, "w") as f:
bytes = tf.io.read_file(test_item_1)
b64str = base64.b64encode(bytes.numpy()).decode("utf-8")
data = {input_name: {"b64": b64str}}
f.write(json.dumps(data) + "\n")
bytes = tf.io.read_file(test_item_2)
b64str = base64.b64encode(bytes.numpy()).decode("utf-8")
data = {input_name: {"b64": b64str}}
f.write(json.dumps(data) + "\n")
# ! gsutil cat $gcs_input_uri
# + [markdown] id="P6GeKGjVdx9I"
# *Example output*:
# ```
# {"bytes_inputs": {"b64": "/9j/4AAQSkZJRgABAQAAAQ<KEY>LDxMXGx8jJytLT1NX<KEY>"}}
# {"bytes_inputs": {"b64": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAIBAQEBAQIBAQECAgICAgQDAgICAgUEBAMEBgUGBgYFBgYGBwkIBgcJBwYGCAsICQoKCgoKBggLDAsKDAkKCgr/2wBDAQICAgICAgUDAwUKBwYHCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgr/wAARCAAgACADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD9qIntrti9vhg3KkLwR69Kbc3FrYskd1LGjOjsqNjJCjLH8Mj8xXw3+yr+3v8ABbUZL2/8L/G/4ja2L0raac/xAvEbTmndtyLFKOd5AwcZwCSccV6X8Xv22/jD4K+L2n+BPA/7H+qeP4v7LSb/AISLQNYjW0ieTmWLfIoUBQiksxA6VxwxtN0VOWn4nTPC1Y1XBHpuqftI6BZ+MrDw/FZSw2dyzRyXl3p8g/eblCgbcjBG/k8dPevU1tCWIKj/AL5r5+8aftTfCqx+H9leeM/i1pXw51aWJvtWkWF1b6ldQnkqnmRqyg9c7fXGag/Zm/aY+HL69d6MPjvr/jVNWm32M19pcgSwREyVZygAJO7PbAFZ08TUjNqpt32/AdSiuVOK2PyC/Zs/4LOfs7/s+fAbQvgz4K/Ywu7rw94Bd4op9WsbfUZ1u5CGlupHBBLSMCd2MYAA4Fe0eGf+Dm/4deO9EuvDvhvSLjSWt7MpPaw+DfNiihYgNvRWK4/hyRjn3r8WvjN8MviF4C+LPiPTvhtZ6lDo8l86W6QswDID0IHUA5x7Ve/ZF1f9pX4C/Gq1+Ifw90PV7e6mgms71o7QP58EowyMrgqwJCnB9K3w+UQxleFF4hw52lzSb5Y3aXM7Juy3dtbHRRzrCu0qlKEl17/fc/W6f/gsjpGtX40z4Zadp1280IVYYPAdsv70nO8ZQnPPToK7z4a/tKftD/ETU7TQPEur6nbpdgMmnrFHak5PUwwquPq3Wvk34QwftUfE/GtfE3xmnhm0LAiy0SwhiupgezSxouzPfb+dfdv7DPwl0rQtcivhZx4Ub1eWQtJu6lmZslmPqfWnmXD+DyjESgsSq1usYyjF+a5tWvkh18+w+IXJQpJeZ//Z"}}
# ```
#
# + [markdown] id="batchpredictionjobs_create:migration,new"
# ### [projects.locations.batchPredictionJobs.create](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.batchPredictionJobs/create)
#
# + [markdown] id="request:migration"
# #### Request
#
# + id="batchpredictionjobs_create:migration,new,request,icn"
batch_prediction_job = {
"display_name": "custom_container_TF" + TIMESTAMP,
"model": model_id,
"input_config": {
"instances_format": "jsonl",
"gcs_source": {"uris": [gcs_input_uri]},
},
"model_parameters": ParseDict(
{"confidenceThreshold": 0.5, "maxPredictions": 2}, Value()
),
"output_config": {
"predictions_format": "jsonl",
"gcs_destination": {
"output_uri_prefix": "gs://" + f"{BUCKET_NAME}/batch_output/"
},
},
"dedicated_resources": {
"machine_spec": {"machine_type": "n1-standard-2", "accelerator_type": 0},
"starting_replica_count": 1,
"max_replica_count": 1,
},
}
print(
MessageToJson(
aip.CreateBatchPredictionJobRequest(
parent=PARENT, batch_prediction_job=batch_prediction_job
).__dict__["_pb"]
)
)
# + [markdown] id="uTo1w7CQdx9J"
# *Example output*:
# ```
# {
# "parent": "projects/migration-ucaip-training/locations/us-central1",
# "batchPredictionJob": {
# "displayName": "custom_container_TF20210226022223",
# "model": "projects/116273516712/locations/us-central1/models/394223297069318144",
# "inputConfig": {
# "instancesFormat": "jsonl",
# "gcsSource": {
# "uris": [
# "gs://migration-ucaip-trainingaip-20210226022223/test.jsonl"
# ]
# }
# },
# "modelParameters": {
# "confidenceThreshold": 0.5,
# "maxPredictions": 2.0
# },
# "outputConfig": {
# "predictionsFormat": "jsonl",
# "gcsDestination": {
# "outputUriPrefix": "gs://migration-ucaip-trainingaip-20210226022223/batch_output/"
# }
# },
# "dedicatedResources": {
# "machineSpec": {
# "machineType": "n1-standard-2"
# },
# "startingReplicaCount": 1,
# "maxReplicaCount": 1
# }
# }
# }
# ```
#
# + [markdown] id="call:migration"
# #### Call
#
# + id="batchpredictionjobs_create:migration,new,call"
request = clients["job"].create_batch_prediction_job(
parent=PARENT, batch_prediction_job=batch_prediction_job
)
# + [markdown] id="response:migration"
# #### Response
#
# + id="print:migration,new,request"
print(MessageToJson(request.__dict__["_pb"]))
# + [markdown] id="batchpredictionjobs_create:migration,new,response,icn"
# *Example output*:
# ```
# {
# "name": "projects/116273516712/locations/us-central1/batchPredictionJobs/2465140253845880832",
# "displayName": "custom_container_TF20210226022223",
# "model": "projects/116273516712/locations/us-central1/models/394223297069318144",
# "inputConfig": {
# "instancesFormat": "jsonl",
# "gcsSource": {
# "uris": [
# "gs://migration-ucaip-trainingaip-20210226022223/test.jsonl"
# ]
# }
# },
# "modelParameters": {
# "maxPredictions": 2.0,
# "confidenceThreshold": 0.5
# },
# "outputConfig": {
# "predictionsFormat": "jsonl",
# "gcsDestination": {
# "outputUriPrefix": "gs://migration-ucaip-trainingaip-20210226022223/batch_output/"
# }
# },
# "dedicatedResources": {
# "machineSpec": {
# "machineType": "n1-standard-2"
# },
# "startingReplicaCount": 1,
# "maxReplicaCount": 1
# },
# "manualBatchTuningParameters": {},
# "state": "JOB_STATE_PENDING",
# "createTime": "2021-02-26T09:39:46.357554Z",
# "updateTime": "2021-02-26T09:39:46.357554Z"
# }
# ```
#
# + id="batch_job_id:migration,new,response"
# The fully qualified ID for the batch job
batch_job_id = request.name
# The short numeric ID for the batch job
batch_job_short_id = batch_job_id.split("/")[-1]
print(batch_job_id)
# + [markdown] id="batchpredictionjobs_get:migration,new"
# ### [projects.locations.batchPredictionJobs.get](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.batchPredictionJobs/get)
#
# + [markdown] id="hdp39iMPdx9M"
# #### Call
#
# + id="batchpredictionjobs_get:migration,new,call"
request = clients["job"].get_batch_prediction_job(name=batch_job_id)
# + [markdown] id="FyPU67HYdx9N"
# #### Response
#
# + id="SIX1qajtdx9N"
print(MessageToJson(request.__dict__["_pb"]))
# + [markdown] id="batchpredictionjobs_get:migration,new,response,icn"
# *Example output*:
# ```
# {
# "name": "projects/116273516712/locations/us-central1/batchPredictionJobs/2465140253845880832",
# "displayName": "custom_container_TF20210226022223",
# "model": "projects/116273516712/locations/us-central1/models/394223297069318144",
# "inputConfig": {
# "instancesFormat": "jsonl",
# "gcsSource": {
# "uris": [
# "gs://migration-ucaip-trainingaip-20210226022223/test.jsonl"
# ]
# }
# },
# "modelParameters": {
# "confidenceThreshold": 0.5,
# "maxPredictions": 2.0
# },
# "outputConfig": {
# "predictionsFormat": "jsonl",
# "gcsDestination": {
# "outputUriPrefix": "gs://migration-ucaip-trainingaip-20210226022223/batch_output/"
# }
# },
# "dedicatedResources": {
# "machineSpec": {
# "machineType": "n1-standard-2"
# },
# "startingReplicaCount": 1,
# "maxReplicaCount": 1
# },
# "manualBatchTuningParameters": {},
# "state": "JOB_STATE_PENDING",
# "createTime": "2021-02-26T09:39:46.357554Z",
# "updateTime": "2021-02-26T09:39:46.357554Z"
# }
# ```
#
# + id="batchpredictionjobs_get:migration,new,wait"
def get_latest_predictions(gcs_out_dir):
""" Get the latest prediction subfolder using the timestamp in the subfolder name"""
# folders = !gsutil ls $gcs_out_dir
latest = ""
for folder in folders:
subfolder = folder.split("/")[-2]
if subfolder.startswith("prediction-"):
if subfolder > latest:
latest = folder[:-1]
return latest
while True:
response = clients["job"].get_batch_prediction_job(name=batch_job_id)
if response.state != aip.JobState.JOB_STATE_SUCCEEDED:
print("The job has not completed:", response.state)
if response.state == aip.JobState.JOB_STATE_FAILED:
break
else:
folder = get_latest_predictions(
response.output_config.gcs_destination.output_uri_prefix
)
# ! gsutil ls $folder/prediction*
# ! gsutil cat $folder/prediction*
break
time.sleep(60)
# + [markdown] id="batchpredictionjobs_get:migration,new,wait,icn"
# *Example output*:
# ```
# gs://migration-ucaip-trainingaip-20210226022223/batch_output/prediction-custom_container_TF20210226022223-2021_02_26T01_39_46_305Z/prediction.errors_stats-00000-of-00001
# gs://migration-ucaip-trainingaip-20210226022223/batch_output/prediction-custom_container_TF20210226022223-2021_02_26T01_39_46_305Z/prediction.results-00000-of-00001
# {"instance": {"bytes_inputs": {"b64": <KEY>"}}, "prediction": [0.0441863872, 0.0965465382, 0.131534964, 0.111121729, 0.133242682, 0.0896093622, 0.160808876, 0.116257414, 0.0309254956, 0.0857665]}
# {"instance": {"bytes_inputs": {"b64": <KEY>"}}, "prediction": [0.0441891, 0.0966139063, 0.131601468, 0.111363865, 0.133115292, 0.0897044092, 0.160883322, 0.115729697, 0.0310073923, 0.0857914686]}
# ```
#
# + [markdown] id="make_online_predictions:migration"
# ## Make online predictions
#
# + [markdown] id="endpoints_create:migration,new"
# ### [projects.locations.endpoints.create](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/create)
#
# + [markdown] id="3Md5r_ytdx9P"
# #### Request
#
# + id="endpoints_create:migration,new,request"
endpoint = {"display_name": "custom_container_TF" + TIMESTAMP}
print(
MessageToJson(
aip.CreateEndpointRequest(parent=PARENT, endpoint=endpoint).__dict__["_pb"]
)
)
# + [markdown] id="__Sqn83udx9P"
# *Example output*:
# ```
# {
# "parent": "projects/migration-ucaip-training/locations/us-central1",
# "endpoint": {
# "displayName": "custom_container_TF20210226022223"
# }
# }
# ```
#
# + [markdown] id="FIjM1WQVdx9P"
# #### Call
#
# + id="endpoints_create:migration,new,call"
request = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint)
# + [markdown] id="6wQWT-6Zdx9P"
# #### Response
#
# + id="print:migration,new,response"
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
# + [markdown] id="endpoints_create:migration,new,response"
# *Example output*:
# ```
# {
# "name": "projects/116273516712/locations/us-central1/endpoints/2977125644296519680"
# }
# ```
#
# + id="endpoint_id:migration,new,response"
# The full unique ID for the endpoint
endpoint_id = result.name
# The short numeric ID for the endpoint
endpoint_short_id = endpoint_id.split("/")[-1]
print(endpoint_id)
# + [markdown] id="endpoints_deploymodel:migration,new"
# ### [projects.locations.endpoints.deployModel](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/deployModel)
#
# + [markdown] id="UYdBoNpWdx9Q"
# #### Request
#
# + id="endpoints_deploymodel:migration,new,request"
deployed_model = {
"model": model_id,
"display_name": "custom_container_TF" + TIMESTAMP,
"dedicated_resources": {
"min_replica_count": 1,
"machine_spec": {"machine_type": "n1-standard-4", "accelerator_count": 0},
},
}
print(
MessageToJson(
aip.DeployModelRequest(
endpoint=endpoint_id,
deployed_model=deployed_model,
traffic_split={"0": 100},
).__dict__["_pb"]
)
)
# + [markdown] id="wR4HXvHHdx9R"
# *Example output*:
# ```
# {
# "endpoint": "projects/116273516712/locations/us-central1/endpoints/2977125644296519680",
# "deployedModel": {
# "model": "projects/116273516712/locations/us-central1/models/394223297069318144",
# "displayName": "custom_container_TF20210226022223",
# "dedicatedResources": {
# "machineSpec": {
# "machineType": "n1-standard-4"
# },
# "minReplicaCount": 1
# }
# },
# "trafficSplit": {
# "0": 100
# }
# }
# ```
#
# + [markdown] id="HQmSPoszdx9R"
# #### Call
#
# + id="endpoints_deploymodel:migration,new,call"
request = clients["endpoint"].deploy_model(
endpoint=endpoint_id, deployed_model=deployed_model, traffic_split={"0": 100}
)
# + [markdown] id="z99BIGGsdx9R"
# #### Response
#
# + id="MPuA3Eoidx9R"
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
# + [markdown] id="endpoints_deploymodel:migration,new,response"
# *Example output*:
# ```
# {
# "deployedModel": {
# "id": "1297564458264035328"
# }
# }
# ```
#
# + id="deployed_model_id:migration,new,response"
# The unique ID for the deployed model
deployed_model_id = result.deployed_model.id
print(deployed_model_id)
# + [markdown] id="endpoints_predict:migration,new"
# ### [projects.locations.endpoints.predict](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/predict)
#
# + [markdown] id="03LnDUvjdx9S"
# ### Prepare file for online prediction
# + id="Rje-QRuYdx9T"
import base64
import cv2
test_image = x_test[0]
test_label = y_test[0]
print(test_image.shape)
cv2.imwrite("tmp.jpg", (test_image * 255).astype(np.uint8))
bytes = tf.io.read_file("tmp.jpg")
b64str = base64.b64encode(bytes.numpy()).decode("utf-8")
# + [markdown] id="r1Tk5DVkdx9T"
# #### Request
#
# + id="endpoints_predict:migration,new,request,icn"
instances_list = [{"bytes_inputs": {"b64": b64str}}]
prediction_request = aip.PredictRequest(endpoint=endpoint_id)
prediction_request.instances.append(instances_list)
print(MessageToJson(prediction_request.__dict__["_pb"]))
# + [markdown] id="FGY9u-IZdx9U"
# *Example output*:
# ```
# {
# "endpoint": "projects/116273516712/locations/us-central1/endpoints/2977125644296519680",
# "instances": [
# [
# {
# "bytes_inputs": {
# "b64": <KEY>"
# }
# }
# ]
# ]
# }
# ```
#
# + [markdown] id="Xq2Faefidx9U"
# #### Call
#
# + id="endpoints_predict:migration,new,call"
request = clients["prediction"].predict(endpoint=endpoint_id, instances=instances_list)
# + [markdown] id="QU9j-Yijdx9V"
# #### Response
#
# + id="dQew71RQdx9V"
print(MessageToJson(request.__dict__["_pb"]))
# + [markdown] id="endpoints_predict:migration,new,response,icn"
# *Example output*:
# ```
# {
# "predictions": [
# [
# 0.0441863947,
# 0.0965465382,
# 0.131534964,
# 0.111121736,
# 0.133242667,
# 0.0896093696,
# 0.160808861,
# 0.116257407,
# 0.0309255011,
# 0.0857665
# ]
# ],
# "deployedModelId": "1297564458264035328"
# }
# ```
#
# + [markdown] id="endpoints_undeploymodel:migration,new"
# ### [projects.locations.endpoints.undeployModel](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/undeployModel)
#
# + [markdown] id="IYlo41cBdx9W"
# #### Call
#
# + id="endpoints_undeploymodel:migration,new,call"
request = clients["endpoint"].undeploy_model(
endpoint=endpoint_id, deployed_model_id=deployed_model_id, traffic_split={}
)
# + [markdown] id="8_e5NspVdx9X"
# #### Response
#
# + id="MdeQNL7idx9X"
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
# + [markdown] id="iha0pqA2TBDu"
# *Example output*:
# ```
# {}
# ```
#
# + [markdown] id="cleanup:migration,new"
# # Cleaning up
#
# To clean up all GCP resources used in this project, you can [delete the GCP
# project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
#
# Otherwise, you can delete the individual resources you created in this tutorial.
#
# + id="G5pYm8a4dx9Y"
delete_model = True
delete_endpoint = True
delete_custom_job = True
delete_batchjob = True
delete_bucket = True
# Delete the model using the Vertex AI fully qualified identifier for the model
try:
if delete_model:
clients["model"].delete_model(name=model_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex AI fully qualified identifier for the endpoint
try:
if delete_endpoint:
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the custom training using the Vertex AI fully qualified identifier for the custom training
try:
if delete_custom_job:
clients["job"].delete_custom_job(name=custom_training_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex AI fully qualified identifier for the batch job
try:
if delete_batchjob:
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
# ! gsutil rm -r gs://$BUCKET_NAME
| notebooks/community/migration/UJ3 Custom Training Custom Container TF Keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NeuroMet
# Instructions:
# * this notebook cannot be used for two different analysis at the same time. If you are unsure if someone is using it click on File / Make a copy and work with the copy.
# * to run a cell press Umschalt (Shift) + Enter
# * to correctly run the workflow _EVERY_ cell has to be ran
# * When you run a cell, there is at first an [*] on the left, then a number (maybe an updated number) when the cell ran..6
# * normally the only thing to adjust in the settings is the subject_list
# ### External Modules
import sys, os, glob
sys.path.append(os.path.abspath('../'))
from neuromet.NeuroMet2_dev_fs7 import NeuroMet
from neuromet import scanner_to_bids
from IPython.display import Image
# %matplotlib inline
# ### Settings
# +
## Paths
raw_data_dir = '.' # Directory with files with scanner structure
base_dir = '.'
temp_dir = '.'
#cores
cores = 4
omp_nthreads = 2
# -
# Subject list as $ID$session. i.e. for sub-001/ses-01 '001T01'
subject_list = ['2004T2']
# ### Copy from raw data
import importlib
importlib.reload(scanner_to_bids)
s2b = scanner_to_bids.ScannerToBIDS(sublist = subject_list,
raw_data_dir = raw_data_dir,
temp_dir = temp_dir,
bids_root=base_dir)
s2bwf = s2b.make_workflow()
# + jupyter={"outputs_hidden": true}
s2bwf.run()
# +
# SPM Segment and Mask generation
# -
# ### Tissue Segmentation Pipeline
neuromet_creator = NeuroMet(sublist = subject_list,
raw_data_dir = raw_data_dir,
temp_dir = temp_dir,
w_dir = base_dir,
omp_nthreads = omp_nthreads,
project_prefix = 'ds003427')
neuromet = neuromet_creator.make_neuromet1_workflow()
neuromet.write_graph(graph2use = 'colored', dotfilename = './imgs/neuromet')
Image(filename='./imgs/neuromet.png', width=250)
# %%time
neuromet.run('MultiProc', plugin_args={'n_procs': cores})
# test_data/ds003427/sub-03/ses-01/anat/sub-03_ses-01_desc-UNI_MP2RAGE.nii.gz# Combined image and Freesurfer analysis
print(subject_list)
# +
# Uncomment here for all subjects:
#subject_list = [ x.split('/')[-1][8:11] for x in uni ]
# only some subjects? Then edit here:
#subject_list = ['2004']
# -
import importlib
from pipeline import NeuroMet2_dev_fs7
importlib.reload(NeuroMet2_dev_fs7)
from pipeline.nodes import qdec
importlib.reload(qdec)
neuromet_creator = NeuroMet2_dev_fs7.NeuroMet(subject_list,
temp_dir,
w_dir,
omp_nthreads,
raw_data_dir,
overwrite)
neuromet_fs = neuromet_creator.make_neuromet_fs_workflow()
neuromet_fs.write_graph(graph2use = 'colored', dotfilename = './imgs/neuromet_fs')
Image(filename='./imgs/neuromet_fs.png')
# ### Run Freesurfer Analysis
# %%time
neuromet_fs.run('MultiProc', plugin_args={'n_procs': cores})
# +
# Calculate and Ajust Volumes
# -
from pipeline.nodes import adj_vol, qdec
import importlib
importlib.reload(qdec)
q = qdec.QDec()
q.inputs.basedir = '/media/drive_s/AG/AG-Floeel-Imaging/02-User/NEUROMET2/Structural_Analysis_fs7'
q.run().outputs.stats_directory
import importlib
importlib.reload(adj_vol)
v = adj_vol.AdjustVolume()
v.inputs.stats_directory = '/media/drive_s/AG/AG-Floeel-Imaging/02-User/NEUROMET2/Structural_Analysis_fs7/stats_tables'
v.inputs.diag_csv =
| notebooks/NeuroMET.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from logicqubit.logic import *
from logicqubit.synthesis import *
from logicqubit.gates import *
from logicqubit.hilbert import *
A = Matrix([[15, 9, 5, -3],[9, 15, 3, -5],[5, 3, 15, -9],[-3, -5, -9, 15]])*(1/4)
dep = PauliDecomposition(A)
dep.get_a()
| pauli gates decomposition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Create a Learner for inference
# + hide_input=true
from fastai import *
from fastai.gen_doc.nbdoc import *
# -
# In this tutorial, we'll see how the same API allows you to create an empty [`DataBunch`](/basic_data.html#DataBunch) for a [`Learner`](/basic_train.html#Learner) at inference time (once you have trained your model) and how to call the `predict` method to get the predictions on a single item.
# + hide_input=true
jekyll_note("""As usual, this page is generated from a notebook that you can find in the <code>docs_src</code> folder of the
<a href="https://github.com/fastai/fastai">fastai repo</a>. We use the saved models from <a href="/tutorial.data.html">this tutorial</a> to
have this notebook run quickly.""")
# -
# ## Vision
# To quickly get acces to all the vision functionality inside fastai, we use the usual import statements.
from fastai import *
from fastai.vision import *
# ### A classification problem
# Let's begin with our sample of the MNIST dataset.
mnist = untar_data(URLs.MNIST_TINY)
tfms = get_transforms(do_flip=False)
# It's set up with an imagenet structure so we use it to split our training and validation set, then labelling.
data = (ImageItemList.from_folder(mnist)
.split_by_folder()
.label_from_folder()
.transform(tfms, size=32)
.databunch()
.normalize(imagenet_stats))
# Now that our data has been properly set up, we can train a model. Once the time comes to deploy it for inference, we'll need to save the information this [`DataBunch`](/basic_data.html#DataBunch) contains (classes for instance), to do this, we call `data.export()`. This will create an `export.pkl` file that you'll need to copy with your model file if you want to deploy it on another device.
# + [markdown] hide_input=false
# To create the [`DataBunch`](/basic_data.html#DataBunch) for inference, you'll need to use the `load_empty` method. Note that you don't have to specify anything: it remembers the classes, the transforms you used or the normalization.
# -
empty_data = ImageDataBunch.load_empty(mnist)
# + [markdown] hide_input=false
# Then, we use it to create a [`Learner`](/basic_train.html#Learner) and load the model we trained before.
# -
learn = create_cnn(empty_data, models.resnet18).load('mini_train')
# You can now get the predictions on any image via `learn.predict`.
img = data.train_ds[0][0]
learn.predict(img)
# It returns a tuple of three things: the object predicted (with the class in this instance), the underlying data (here the corresponding index) and the raw probabilities. You can also do inference on a larger set of data by adding a *test set*. Simply use the data bock API, but add a test set to your [`LabelLists`](/data_block.html#LabelLists):
sd = LabelLists.load_empty(mnist).add_test_folder('test', label='3')
empty_data = sd.databunch()
# Now you can use [`Learner.get_preds`](/basic_train.html#Learner.get_preds) in the usual way.
learn = create_cnn(empty_data, models.resnet18).load('mini_train')
preds,y = learn.get_preds(ds_type=DatasetType.Test)
preds[:5]
# ### A multi-label problem
# Now let's try these on the planet dataset, which is a little bit different in the sense that each image can have multiple tags (and not just one label).
planet = untar_data(URLs.PLANET_TINY)
planet_tfms = get_transforms(flip_vert=True, max_lighting=0.1, max_zoom=1.05, max_warp=0.)
# Here each images is labelled in a file named `labels.csv`. We have to add [`train`](/train.html#train) as a prefix to the filenames, `.jpg` as a suffix and indicate that the labels are separated by spaces.
data = (ImageItemList.from_csv(planet, 'labels.csv', folder='train', suffix='.jpg')
.random_split_by_pct()
.label_from_df(sep=' ')
.transform(planet_tfms, size=128)
.databunch()
.normalize(imagenet_stats))
# Again, we call `data.export()` to export our data object properties.
data.export()
# We can then create the [`DataBunch`](/basic_data.html#DataBunch) for inference, by using the `load_empty` method as before.
empty_data = ImageDataBunch.load_empty(planet)
learn = create_cnn(empty_data, models.resnet18)
learn.load('mini_train');
# And we get the predictions on any image via `learn.predict`.
img = data.train_ds[0][0]
learn.predict(img)
# Here we can specify a particular threshold to consider the predictions to be correct or not. The default is `0.5`, but we can change it.
learn.predict(img, thresh=0.3)
# ### A regression example
# For the next example, we are going to use the [BIWI head pose](https://data.vision.ee.ethz.ch/cvl/gfanelli/head_pose/head_forest.html#db) dataset. On pictures of persons, we have to find the center of their face. For the fastai docs, we have built a small subsample of the dataset (200 images) and prepared a dictionary for the correspondance fielname to center.
biwi = untar_data(URLs.BIWI_SAMPLE)
fn2ctr = pickle.load(open(biwi/'centers.pkl', 'rb'))
# To grab our data, we use this dictionary to label our items. We also use the [`PointsItemList`](/vision.data.html#PointsItemList) class to have the targets be of type [`ImagePoints`](/vision.image.html#ImagePoints) (which will make sure the data augmentation is properly applied to them). When calling [`transform`](/tabular.transform.html#tabular.transform) we make sure to set `tfm_y=True`.
data = (PointsItemList.from_folder(biwi)
.random_split_by_pct(seed=42)
.label_from_func(lambda o:fn2ctr[o.name])
.transform(get_transforms(), tfm_y=True, size=(120,160))
.databunch()
.normalize(imagenet_stats))
# As before, the road to inference is pretty straightforward: export the data, then load an empty [`DataBunch`](/basic_data.html#DataBunch).
data.export()
empty_data = ImageDataBunch.load_empty(biwi)
learn = create_cnn(empty_data, models.resnet18, lin_ftrs=[100], ps=0.05)
learn.load('mini_train');
# And now we can a prediction on an image.
img = data.valid_ds[0][0]
learn.predict(img)
# To visualize the predictions, we can use the [`Image.show`](/vision.image.html#Image.show) method.
img.show(y=learn.predict(img)[0])
# ### A segmentation example
# Now we are going to look at the [camvid dataset](http://mi.eng.cam.ac.uk/research/projects/VideoRec/CamVid/) (at least a small sample of it), where we have to predict the class of each pixel in an image. Each image in the 'images' subfolder as an equivalent in 'labels' that is its segmentations mask.
camvid = untar_data(URLs.CAMVID_TINY)
path_lbl = camvid/'labels'
path_img = camvid/'images'
# We read the classes in 'codes.txt' and the function maps each image filename with its corresponding mask filename.
codes = np.loadtxt(camvid/'codes.txt', dtype=str)
get_y_fn = lambda x: path_lbl/f'{x.stem}_P{x.suffix}'
# The data block API allows us to uickly get everything in a [`DataBunch`](/basic_data.html#DataBunch) and then we can have a look with `show_batch`.
data = (SegmentationItemList.from_folder(path_img)
.random_split_by_pct()
.label_from_func(get_y_fn, classes=codes)
.transform(get_transforms(), tfm_y=True, size=128)
.databunch(bs=16, path=camvid)
.normalize(imagenet_stats))
# As before, we export the data then create an empty [`DataBunch`](/basic_data.html#DataBunch) that we pass to a [`Learner`](/basic_train.html#Learner).
data.export()
empty_data = ImageDataBunch.load_empty(camvid)
learn = unet_learner(empty_data, models.resnet18)
learn.load('mini_train');
# And now we can a prediction on an image.
img = data.train_ds[0][0]
learn.predict(img);
# To visualize the predictions, we can use the [`Image.show`](/vision.image.html#Image.show) method.
img.show(y=learn.predict(img)[0])
# ## Text
# Next application is text, so let's start by importing everything we'll need.
from fastai import *
from fastai.text import *
# ### Language modelling
# First let's look a how to get a language model ready for inference. Since we'll load the model trained in the [visualize data tutorial](/tutorial.data.html), we load the vocabulary used there.
imdb = untar_data(URLs.IMDB_SAMPLE)
vocab = Vocab(pickle.load(open(imdb/'tmp'/'itos.pkl', 'rb')))
data_lm = (TextList.from_csv(imdb, 'texts.csv', cols='text', vocab=vocab)
.random_split_by_pct()
.label_for_lm()
.databunch())
# Like in vision, we just have to type `data_lm.export()` to save all the information inside the [`DataBunch`](/basic_data.html#DataBunch) we'll need. In this case, this includes all the vocabulary we created.
data_lm.export()
# Now let's define a language model learner from an empty data object.
empty_data = TextLMDataBunch.load_empty(imdb)
learn = language_model_learner(empty_data)
learn.unfreeze()
learn.load('mini_train_lm', with_opt=False);
# Then we can predict with the usual method, here we can specify how many words we want the model to predict.
learn.predict('This is a simple test of', n_words=20)
# ### Classification
# Now let's see a classification example. We have to use the same vocabulary as for the language model if we want to be able to use the encoder we saved.
data_clas = (TextList.from_csv(imdb, 'texts.csv', cols='text', vocab=vocab)
.split_from_df(col='is_valid')
.label_from_df(cols='label')
.databunch(bs=42))
# Again we export the data.
data_clas.export()
# Now let's define a text classifier from an empty data object.
empty_data = TextClasDataBunch.load_empty(imdb)
learn = text_classifier_learner(empty_data)
learn.load('mini_train_clas', with_opt=False);
# Then we can predict with the usual method.
learn.predict('I really loved that movie!')
# ## Tabular
# Last application brings us to tabular data. First let's import everything we'll need.
from fastai import *
from fastai.tabular import *
# We'll use a sample of the [adult dataset](https://archive.ics.uci.edu/ml/datasets/adult) here. Once we read the csv file, we'll need to specify the dependant variable, the categorical variables, the continuous variables and the processors we want to use.
adult = untar_data(URLs.ADULT_SAMPLE)
df = pd.read_csv(adult/'adult.csv')
dep_var = '>=50k'
cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country']
cont_names = ['education-num', 'hours-per-week', 'age', 'capital-loss', 'fnlwgt', 'capital-gain']
procs = [FillMissing, Categorify, Normalize]
# Then we can use the data block API to grab everything together before using `data.show_batch()`
data = (TabularList.from_df(df, path=adult, cat_names=cat_names, cont_names=cont_names, procs=procs)
.split_by_idx(valid_idx=range(800,1000))
.label_from_df(cols=dep_var)
.databunch())
# We define a [`Learner`](/basic_train.html#Learner) object that we fit and then save the model.
learn = tabular_learner(data, layers=[200,100], metrics=accuracy)
learn.fit(1, 1e-2)
learn.save('mini_train')
# As in the other applications, we just have to type `data.export()` to save everything we'll need for inference (here the inner state of each processor).
data.export()
# Then we create an empty data object and a learner from it like before.
data = TabularDataBunch.load_empty(adult)
learn = tabular_learner(data, layers=[200,100])
learn.load('mini_train');
# And we can predict on a row of dataframe that has the right `cat_names` and `cont_names`.
learn.predict(df.iloc[0])
| docs_src/tutorial.inference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !pip install matplotlib
# !pip install colorama
# +
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, transforms
import torchvision.models as models
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import colorama
matplotlib.rc('font', size=16)
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
# -
torch.__version__
resize_transformer = transforms.Resize(400)
affine_transformer = transforms.RandomAffine(degrees=10, scale=(0.8,1.2), shear=15)
random_resize_crop_transformer = transforms.RandomResizedCrop(299, scale=(0.5, 1))
tensor_transformer = transforms.ToTensor()
# +
data_dir = os.path.join(os.getcwd(), "dataset302")
train_data = datasets.ImageFolder(os.path.join(data_dir, "train"),
transform = transforms.Compose([
resize_transformer,
affine_transformer,
random_resize_crop_transformer,
tensor_transformer
]))
test_data = datasets.ImageFolder(os.path.join(data_dir, "val"),
transform = transforms.Compose([
transforms.Resize(350),
transforms.CenterCrop(299),
transforms.ToTensor()
]))
train_loader = torch.utils.data.DataLoader(train_data, batch_size=128, num_workers=16, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=64, num_workers=16, shuffle=True)
class_names = train_data.classes
# -
num_classes = len(class_names)
num_classes
# +
preview_data = datasets.ImageFolder(os.path.join(data_dir, "train"))
img, label = next(iter(preview_data))
fig = plt.figure(figsize=(16,9))
plt.subplot(1, 4, 1)
plt.xlabel('Original')
plt.imshow(tensor_transformer(img).permute(1, 2, 0))
plt.subplot(1, 4, 2)
plt.xlabel('Resized (400x400)')
plt.imshow(tensor_transformer(resize_transformer(img)).permute(1, 2, 0))
plt.subplot(1, 4, 3)
plt.xlabel('Affine transform')
plt.imshow(tensor_transformer(affine_transformer(img)).permute(1, 2, 0))
plt.subplot(1, 4, 4)
plt.xlabel('Random resizing + croping')
plt.imshow(tensor_transformer(random_resize_crop_transformer(img)).permute(1, 2, 0))
# +
data, labels = next(iter(test_loader))
data, labels = data[:5], labels[:5]
fig = plt.figure(figsize=(16,9))
for i in range(0, 5):
fig.add_subplot(1, 5, i+1)
plt.imshow(data[i].permute(1, 2, 0))
plt.xlabel(class_names[labels[i]])
# -
print(dir(models))
model_ft = models.inception_v3(pretrained=True)
model_ft
model_ft.fc
model_ft.AuxLogits.fc
model_ft.AuxLogits.fc = nn.Linear(in_features=768, out_features=num_classes)
model_ft.fc = nn.Linear(in_features=2048, out_features=num_classes)
model_ft.fc
def train(model, train_loader, test_loader, device, num_epochs=1, lr=1e-5, use_scheduler=False, is_inception=False):
model.train() # not necessary in our example, but still good practice since modules
# like nn.Dropout, nn.BatchNorm require it
# define an optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
criterion = torch.nn.CrossEntropyLoss()
if use_scheduler:
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, 0.85)
for epoch in range(num_epochs):
print("="*40, "Starting epoch %d" % (epoch + 1), "="*40)
if use_scheduler:
scheduler.step()
cum_loss = 0
# dataloader returns batches of images for 'data' and a tensor with their respective labels in 'labels'
for batch_idx, (data, labels) in enumerate(train_loader):
data, labels = data.to(device), labels.to(device)
optimizer.zero_grad()
if is_inception:
outputs, aux_outputs = model(data)
loss1 = criterion(outputs, labels)
loss2 = criterion(aux_outputs, labels)
loss = loss1 + 0.4*loss2
else:
output = model(data)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
cum_loss += loss.item()
if batch_idx % 100 == 0:
print("Batch %d/%d" % (batch_idx, len(train_loader)))
train_acc = accuracy(model, train_loader, device)
test_acc = accuracy(model, test_loader, device)
print(colorama.Fore.GREEN + "\nEpoch %d/%d, Loss=%.4f, Train-Acc=%d%%, Valid-Acc=%d%%"
% (epoch+1, num_epochs, cum_loss/len(train_data), 100*train_acc, 100*test_acc), colorama.Fore.RESET)
def accuracy(model, dataloader, device):
""" Computes the model's accuracy on the data provided by 'dataloader'
"""
model.eval()
num_correct = 0
num_samples = 0
with torch.no_grad(): # deactivates autograd, reduces memory usage and speeds up computations
for data, labels in dataloader:
data, labels = data.to(device), labels.to(device)
predictions = model(data).max(1)[1] # indices of the maxima along the second dimension
num_correct += (predictions == labels).sum().item()
num_samples += predictions.shape[0]
return num_correct / num_samples
for name, param in model_ft.named_parameters():
if name not in ["fc.weight", "fc.bias"]:
param.requires_grad = False
# +
# %%time
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_ft = model_ft.to(device)
train(model_ft, train_loader, test_loader, device, num_epochs=1, is_inception=True)
# -
data.shape
# %time
| Training302.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/merazlab/tensorflow/blob/master/basic.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="uw9DkyybldPY" colab_type="text"
# #MNIST CNN
# + id="o6w9Nx37uEkP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="9ccaa3cb-8fdc-4a03-8859-fe6a93b71983"
# %tensorflow_version 1.x
# + id="6mhazaCOuHQ1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="2d9479c6-34a0-49fa-9206-9f0562eda5fd"
import tensorflow
print(tensorflow.__version__)
# + id="qBJAFAL8lbFs" colab_type="code" colab={}
import tensorflow as tf
# + id="4MJUl2o_lnLJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="445bea92-89fc-42e4-f546-9038f6ba936d"
a = tf.constant(10)
b = tf.constant(20)
t1 = tf.multiply(a, b)
t2 = tf.add(a, b)
res = tf.divide(t1, t2)
tf.print(res)
# + id="jj-pogwBl4D3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="1b1a9eac-f834-49cd-f279-b063c3318be6"
with tf.Session() as session:
print(session.run(res))
# + [markdown] id="AGu8W3_AuX3-" colab_type="text"
# #Variables
# + [markdown] id="o8fbCfr2wGav" colab_type="text"
#
# + id="L9gj7bJLty3q" colab_type="code" colab={}
v = tf.get_variable("name", dtype=tf.float32, shape=[2, 3], initializer=tf.random_normal_initializer(stddev=0.1))
# + [markdown] id="fM1mQnzxwlW1" colab_type="text"
# assign new value in variabel
# + id="10h4bAt7wL1q" colab_type="code" colab={}
incremwnt_op = v.assign(v + 1) #method1
# + id="EjcMwsZNw4sw" colab_type="code" colab={}
incremwnt_op = tf.assign(v, v + 1) #method1
# + [markdown] id="SjPfsXnNxdNq" colab_type="text"
# all the variabel must be explicitly initialized before its first use in a seesion
#
# you can initialise all variabels in the graph at once as below
# + id="ihcQudccw8Qo" colab_type="code" colab={}
create graph() #include variabels
init = tf.global_variables_initializer()
with tf.Session as session:
# + [markdown] id="HK6BTwgxzJO2" colab_type="text"
# #Placeholders and feeds
#
# use for inject data in the graph at the time of exexution
# + id="6jFOoEipx60Y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="eeaa1de6-2dd0-4811-fd92-03aa4636c8be"
a = tf.placeholder(tf.float32, [])
b = tf.constant(1.0)
c = a + b
with tf.Session() as session:
print(session.run(c, feed_dict={a: 3.0}))
print(session.run(c, feed_dict={a: 5.0}))
# + id="4iRJP7tf0dr0" colab_type="code" colab={}
| basic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # WAV separation
# <div class="alert alert-info">
#
# This tutorial is available as an IPython notebook at [malaya-speech/example/multispeaker-separation-wav](https://github.com/huseinzol05/malaya-speech/tree/master/example/multispeaker-separation-wav).
#
# </div>
# <div class="alert alert-info">
#
# This module is language independent, so it save to use on different languages.
#
# </div>
import malaya_speech
import numpy as np
from malaya_speech import Pipeline
import matplotlib.pyplot as plt
import IPython.display as ipd
# ### Multispeaker separation description
#
# 1. FastSep-WAV only able to separate 8k sample rate.
# 2. FastSep-WAV trained to separate 4 unique speakers.
# 3. Trained on VCTK, Nepali, Mandarin and Malay mixed speakers.
# ### List available FastSep-WAV
malaya_speech.multispeaker_separation.available_deep_wav()
# ### Load model
#
# ```python
# def deep_wav(model: str = 'fastsep-4', quantized: bool = False, **kwargs):
# """
# Load FastSep model, trained on raw 8k wav using SISNR PIT loss.
#
# Parameters
# ----------
# model : str, optional (default='fastsep-4')
# Model architecture supported. Allowed values:
#
# * ``'fastsep-4'`` - FastSep 4 layers trained on raw 8k wav.
# * ``'fastsep-2'`` - FastSep 2 layers trained on raw 8k wav.
# quantized : bool, optional (default=False)
# if True, will load 8-bit quantized model.
# Quantized model not necessary faster, totally depends on the machine.
#
# Returns
# -------
# result : malaya_speech.model.tf.Split class
# """
# ```
model = malaya_speech.multispeaker_separation.deep_wav('fastsep-4')
# ### Load quantized model
quantized_model = malaya_speech.multispeaker_separation.deep_wav('fastsep-4', quantized = True)
# ### Generate random mixed audio
# +
import random
import malaya_speech.augmentation.waveform as augmentation
sr = 8000
speakers_size = 4
def read_wav(f):
return malaya_speech.load(f, sr = sr)
def random_sampling(s, length):
return augmentation.random_sampling(s, sr = sr, length = length)
def combine_speakers(files, n = 5, limit = 4):
w_samples = random.sample(files, n)
w_samples = [read_wav(f)[0] for f in w_samples]
w_lens = [len(w) / sr for w in w_samples]
w_lens = int(min(min(w_lens) * 1000, random.randint(3000, 7000)))
w_samples = [random_sampling(w, length = w_lens) for w in w_samples]
y = [w_samples[0]]
left = w_samples[0].copy()
combined = None
for i in range(1, n):
right = w_samples[i].copy()
overlap = random.uniform(0.98, 1.0)
len_overlap = int(overlap * len(right))
minus = len(left) - len_overlap
if minus < 0:
minus = 0
padded_right = np.pad(right, (minus, 0))
left = np.pad(left, (0, len(padded_right) - len(left)))
left = left + padded_right
if i >= (limit - 1):
if combined is None:
combined = padded_right
else:
combined = np.pad(
combined, (0, len(padded_right) - len(combined))
)
combined += padded_right
else:
y.append(padded_right)
if combined is not None:
y.append(combined)
maxs = [max(left)]
for i in range(len(y)):
if len(y[i]) != len(left):
y[i] = np.pad(y[i], (0, len(left) - len(y[i])))
maxs.append(max(y[i]))
max_amp = max(maxs)
mix_scaling = 1 / max_amp * 0.9
left = left * mix_scaling
for i in range(len(y)):
y[i] = y[i] * mix_scaling
return left, y
# +
from glob import glob
wavs = glob('speech/example-speaker/*.wav')
wavs.extend(glob('speech/vctk/*.flac'))
len(wavs)
# -
left, y = combine_speakers(wavs, speakers_size)
len(left) / sr, len(y)
ipd.Audio(left, rate = sr)
plt.plot(left, label = 'mixed')
plt.plot(y[0], label = 'y0')
plt.plot(y[1], label = 'y1')
plt.plot(y[2], label = 'y2')
plt.plot(y[3], label = 'y3')
plt.legend()
plt.show()
# ### Predict
#
# ```python
# def predict(self, input):
# """
# Split an audio into 4 different speakers.
#
# Parameters
# ----------
# input: np.array or malaya_speech.model.frame.Frame
#
# Returns
# -------
# result: np.array
# """
# ```
# +
# %%time
y = model.predict(left)
y.shape
# +
# %%time
quantized_y = quantized_model.predict(left)
quantized_y.shape
# -
# ### Results
ipd.Audio(y[0], rate = sr)
ipd.Audio(y[1], rate = sr)
ipd.Audio(y[2], rate = sr)
ipd.Audio(y[3], rate = sr)
# ### Quantized results
ipd.Audio(quantized_y[0], rate = sr)
ipd.Audio(quantized_y[1], rate = sr)
ipd.Audio(quantized_y[2], rate = sr)
ipd.Audio(quantized_y[3], rate = sr)
| example/multispeaker-separation-wav/multispeaker-separation-wav.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Рассмотрим задачу поиска мошенничества с кредитными картами
#
# 
#
# ### Dataset скачиваем отсюда - https://www.kaggle.com/dalpozz/creditcardfraud/data
# #### Пример решения задачи - https://www.kaggle.com/matheusfacure/semi-supervised-anomaly-detection-survey
# #### Другие примеры решения задачи - https://www.kaggle.com/dalpozz/creditcardfraud/kernels
# -------
# +
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.metrics import fbeta_score, precision_score, recall_score, confusion_matrix
import itertools
from sklearn.model_selection import train_test_split
plt.style.use('ggplot')
# -
# Helper functions
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=0)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
# +
# Load data
dataset = pd.read_csv('data/creditcard.csv')
dataset['Amount'] = np.log(dataset['Amount'] + 1)
dataset['Time'] = np.log(dataset['Time'] + 1)
normal = (dataset['Class'] == 0).sum()
fraud = (dataset['Class'] == 1).sum()
print('Normal transactions: {}'.format(normal))
print('Fraud transactions: {}'.format(fraud))
print('% of fraud = {:.4f}'.format(100 * fraud / (normal + fraud)) + ' its less than < 1%')
# +
normal = dataset[dataset['Class'] == 0]
train, valid = train_test_split(normal, test_size=.2, random_state=42)
valid, test = train_test_split(valid, test_size=.8, random_state=42)
fraud = dataset[dataset['Class'] == 1]
valid_fr, test_fr = train_test_split(fraud, test_size=.8, random_state=42)
train = train.reset_index(drop=True)
valid = valid.append(valid_fr).reset_index(drop=True)
test = test.append(test_fr).reset_index(drop=True)
print('Train shape: ', train.shape)
print('Proportion of anomaly in training set: %.4f\n' % train['Class'].mean())
print('Valid shape: ', valid.shape)
print('Proportion of anomaly in validation set: %.4f\n' % valid['Class'].mean())
print('Test shape:, ', test.shape)
print('Proportion of anomaly in test set: %.4f\n' % test['Class'].mean())
# -
# ------
# ## Соберем автоэнкодер в Tensorflow
import tensorflow as tf
tf.reset_default_graph()
tf.set_random_seed(2)
# +
batch_size = 10000
n_visible = train.drop('Class', axis=1).values.shape[1]
n_hidden1 = 27
n_hidden2 = 16
n_hidden3 = 2
learning_rate = 0.01
# -
# Placehoder node - Input for data
X_tf = tf.placeholder("float", [None, n_visible], name='X')
def autoencoder(X_tf):
# encoder
Y = tf.layers.dense(inputs=X_tf, units=n_hidden1, activation=tf.nn.tanh)
Y = tf.layers.dense(inputs=Y, units=n_hidden2, activation=tf.nn.tanh)
Y = tf.layers.dense(inputs=Y, units=n_hidden3, activation=tf.nn.tanh)
# decoder
Z = tf.layers.dense(inputs=Y, units=n_hidden2, activation=tf.nn.tanh)
Z = tf.layers.dense(inputs=Z, units=n_hidden3, activation=tf.nn.tanh)
Z = tf.layers.dense(inputs=Z, units=n_visible, activation=tf.nn.tanh)
return Z, Y
# +
Z, Y = autoencoder(X_tf)
cost = tf.reduce_mean(tf.pow(X_tf - Z, 2))
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost)
scores = tf.abs(X_tf - Z)
# -
X_train = train.drop('Class', axis=1).values
X_val_norm = valid[valid['Class'] == 0].drop('Class', axis=1).values
X_val_anorm = valid[valid['Class'] == 1].drop('Class', axis=1).values
with tf.Session() as sess:
tf.global_variables_initializer().run()
for step in range(15001):
offset = (step * batch_size) % (X_train.shape[0] - batch_size)
batch_data = X_train[offset:(offset + batch_size), :]
sess.run(train_op, feed_dict={X_tf: batch_data,})
if (step % 3000 == 0):
print('\nBatch loss at step %d: %f' % (step,sess.run(cost, feed_dict={X_tf: batch_data})))
print('Val Norm loss at step %d: %f' % (step,sess.run(cost, feed_dict={X_tf: X_val_norm})))
print('Val Anorm loss at step %d: %f' % (step,sess.run(cost, feed_dict={X_tf: X_val_anorm})))
y_scores_valid, enc_val = sess.run([scores, Y], feed_dict={X_tf: valid.drop('Class', axis=1).values})
y_scores_test, enc_test = sess.run([scores, Y], feed_dict={X_tf: test.drop('Class', axis=1).values})
# +
tresholds = np.linspace(0, 6, 100)
scores = []
for treshold in tresholds:
y_hat = (y_scores_valid.mean(axis=1) > treshold).astype(int)
scores.append([recall_score(y_pred=y_hat, y_true=valid['Class'].values),
precision_score(y_pred=y_hat, y_true=valid['Class'].values),
fbeta_score(y_pred=y_hat, y_true=valid['Class'].values, beta=2)])
scores = np.array(scores)
# -
plt.figure(figsize=(20, 10))
plt.plot(tresholds, scores[:, 0], label='$Recall$')
plt.plot(tresholds, scores[:, 1], label='$Precision$')
plt.plot(tresholds, scores[:, 2], label='$F_2$')
plt.ylabel('Score')
plt.xlabel('Threshold')
plt.legend(loc='best')
plt.show()
plt.figure(figsize=(20, 10))
plt.scatter(enc_val[:, 0], enc_val[:, 1], c=valid["Class"].values, alpha=.5)
plt.show()
# +
final_tresh = tresholds[scores[:, 2].argmax()]
y_hat_test = (y_scores_test.mean(axis=1) > final_tresh).astype(int)
# print(y_hat_test.shape)
print('Final threshold: %f' % final_tresh)
print('Test Recall Score: %.3f' % recall_score(y_pred=y_hat_test, y_true=test['Class'].values))
print('Test Precision Score: %.3f' % precision_score(y_pred=y_hat_test, y_true=test['Class'].values))
print('Test F2 Score: %.3f' % fbeta_score(y_pred=y_hat_test, y_true=test['Class'].values, beta=2))
cnf_matrix = confusion_matrix(test['Class'].values, y_hat_test)
plot_confusion_matrix(cnf_matrix, classes=['Normal','Anormal'], title='Confusion matrix')
# -
# --------
| week_09/Autoencoder_fraud_detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
t = (1,2,3)
t[0]
from collections import namedtuple
dog = namedtuple('Dog', 'age breed name')
dog1 = dog(age=2, breed='Lab', name='Sam')
dog1
dog1.age
dog1.breed
dog1.name
cat = namedtuple('Cat', 'fur claws name')
cat1 = cat(fur='Fuzzy', claws=False, name='Bella')
cat1
cat1.fur
cat1[2]
| Introduction to Python/Collections Module - namedtuple.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1><center>Assessmet 5 on Advanced Data Analysis using Pandas</center></h1>
# ## **Project 2: Correlation Between the GDP Rate and Unemployment Rate (2019)**
# +
import warnings
warnings.simplefilter('ignore', FutureWarning)
import pandas as pd
# -
pip install pandas_datareader
# # Getting the Datasets
# We got the two datasets we will be considering in this project from the Worldbank website. The first one dataset, available at http://data.worldbank.org/indicator/NY.GDP.MKTP.CD, lists the GDP of the world's countries in current US dollars, for various years. The use of a common currency allows us to compare GDP values across countries. The other dataset, available at https://data.worldbank.org/indicator/SL.UEM.TOTL.NE.ZS, lists the unemployment rate of the world's countries. The datasets were downloaded as Excel files in June 2021.
GDP_INDICATOR = 'NY.GDP.MKTP.CD'
#below is the first five rows of the first dataset, GDP Indicator.
gdpReset= pd.read_excel("API_NY.GDP.MKTP.CD.xls")
gdpReset.head()
#below is the last five rows of the first dataset, GDP Indicator.
gdpReset.tail()
UNEMPLOYMENT_INDICATORS = 'SL.UEM.TOTL.NE.ZS'
#below is the first five rows of the second dataset, Uemployment Rate Indicator.
UnemployReset= pd.read_excel('API_SL.UEM.TOTL.NE.ZS.xls')
UnemployReset.head()
#below is the last five rows of the second dataset, Unemployment Rate Indicator.
UnemployReset.tail()
# # Cleaning the data
# Inspecting the data with head() and tail() methods shows that for some countries the GDP and unemploymet rate values are missing. The data is, therefore, cleaned by removing the rows with unavailable values using the drop() method.
gdpCountries = gdpReset[0:].dropna()
gdpCountries
UnemployCountries = UnemployReset[0:].dropna()
UnemployCountries
# # Transforming the data
# The World Bank reports GDP in US dollars and cents. To make the data easier to read, the GDP is converted to millions of British pounds with the following auxiliary functions, using the average 2020 dollar-to-pound conversion rate provided by http://www.ukforex.co.uk/forex-tools/historical-rate-tools/yearly-average-rates..
# +
def roundToMillions (value):
return round(value / 1000000)
def usdToGBP (usd):
return usd / 1.284145
GDP = 'GDP (£m)'
gdpCountries[GDP] = gdpCountries[GDP_INDICATOR].apply(usdToGBP).apply(roundToMillions)
gdpCountries.head()
# -
# The unnecessary columns can be dropped.
COUNTRY = 'Country Name'
headings = [COUNTRY, GDP]
gdpClean = gdpCountries[headings]
gdpClean.head()
# + active=""
# The World Bank reports the unemployment rate with several decimal places. After rounding off, the original column is discarded.
# -
UNEMPLOYMENT = 'Unemploymet Rate'
UnemployCountries[UNEMPLOYMENT] = UnemployCountries[UNEMPLOYMENT_INDICATORS].apply(round)
headings = [COUNTRY, UNEMPLOYMENT]
UnempClean = UnemployCountries[headings]
UnempClean.head()
# # Combining the data
# The tables are combined through an inner join merge method on the common 'Country Name' column.
gdpVsUnemp = pd.merge(gdpClean, UnempClean, on=COUNTRY, how='inner')
gdpVsUnemp.head()
# # Calculating the correlation
# To measure if the unemployment rate and the GDP grow together or not, the Spearman rank correlation coefficient is used.
# +
from scipy.stats import spearmanr
gdpColumn = gdpVsUnemp[GDP]
UnemployColumn = gdpVsUnemp[UNEMPLOYMENT]
(correlation, pValue) = spearmanr(gdpColumn, UnemployColumn)
print('The correlation is', correlation)
if pValue < 0.05:
print('It is statistically significant.')
else:
print('It is not statistically significant.')
# -
# The value shows an indirect correlation, i.e. richer countries tend to have lower unemployment rate. A rise by one percentage point of unemployment will reduce real GDP growth by 0.26 percentage points with a delay of 7 lags. Studies have shown that the higher the GDP growth rate of a country, the higher the employment rate. Thus, resulting to a lower unemployment rate. Besides, a negative or inverse correlation, between two variables, indicates that one variable increases while the other decreases, and vice-versa.
# # Visualizing the Data
# Measures of correlation can be misleading, so it is best to view the overall picture with a scatterplot. The GDP axis uses a logarithmic scale to better display the vast range of GDP values, from a few million to several billion (million of million) pounds.
# %matplotlib inline
gdpVsUnemp.plot(x=GDP, y=UNEMPLOYMENT, kind='scatter', grid=True, logx=True, figsize=(10, 4))
# The plot shows there is no clear correlation: there are some poor countries with a low unemployment rate and very few averagely rich countries with a high employment rate. Hpwever, most extremely rich countries have a low unemployment rate. Besides, countries with around 10 thousand (10^4) to (10^6) million pounds GDP have almost the full range of values, from below 5 to over 10 percentage but there are still some countries with more than 10 thousand (10^5) million pounds GDP with a high unemployment rate.
# Comparing the 10 poorest countries and the 10 countries with the lowest unemployment rate shows that total GDP is a rather crude measure. The population size should be taken into consideration for a more precise definiton of what 'poor' and 'rich' means.
# the 10 countries with lowest GDP
gdpVsUnemp.sort_values(GDP).head(10)
# the 10 countries with the lowest unemployment rate
gdpVsUnemp.sort_values(UNEMPLOYMENT).head(10)
# # Conclusion
# The correlation between real GDP growth and unemployment is very important for policy makers in order to obtain a sustainable rise in living standards. If GDP growth rate is below its natural rate it is indicated to promote employment because this rise in total income will note generate inflationary pressures. In contrast, if the GDP growth is above its natural level, policy makers will decide not to intensively promote the creation of new jobs in order to obtain a sustainable growth rate which will not generate inflation. The correlation coefficient shows that the variables are negatively correlated as predicted by the theory. These values are particularly important for policy makers in order to obtain an optimal relation between unemployment and real GDP growth.
| Anagu Esther WT-21-074/Project2 on Economic Indicators' Correlation (GDP and Unemployment Rate).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
function greeting() {
console.log('Hello World');
}// Invoking the function
greeting(); // prints 'Hello World'
/ We can add properties to functions like we do with objects
greeting.lang = 'English';// Prints 'English'
console.log(greeting.lang);
# +
#python
def greeting():
print("Hello World")
greeting()
greeting.lang = "English"
print("English")
# -
function formalGreeting() {
console.log("How are you?");
}function casualGreeting() {
console.log("What's up?");
}function greet(type, greetFormal, greetCasual) {
if(type === 'formal') {
greetFormal();
} else if(type === 'casual') {
greetCasual();
}
}// prints 'What's up?'
greet('casual', formalGreeting, casualGreeting);
# +
#python
def formalGreeting():
print("How are you")
def casualGreeting():
print("What`s up?")
def greet(type, greetFormal, greetCasual):
if (type == 'formal'):
greetFormal()
elif(type == 'casual'):
greetCasual()
greet('casual', formalGreeting, casualGreeting)
# -
const arr1 = [1, 2, 3];
const arr2 = [];for(let i = 0; i < arr1.length; i++) {
arr2.push(arr1[i] * 2);
}// prints [ 2, 4, 6 ]
console.log(arr2);
# +
#python
arr1 = [1, 2, 3]
list(map(lambda x: x * 2, arr1))
# +
#Example 2, without higher function
const birthYear = [1975, 1997, 2002, 1995, 1985];
const ages = [];for(let i = 0; i < birthYear.length; i++) {
let age = 2018 - birthYear[i];
ages.push(age);
}// prints [ 43, 21, 16, 23, 33 ]
console.log(ages);
# +
#python
birthYear = [1975, 1997, 2002, 1995, 1985]
ages = map(lambda x: 2018 - x, birthYear)
print(*ages)
# -
birthYear = [1975, 1997, 2002, 1995, 1985]
ages = []
for x in range(0, len(birthYear)):
ages = 2018 - birthYear[x]
print(ages)
# examples 1
const persons = [
{ name: 'Peter', age: 16 },
{ name: 'Mark', age: 18 },
{ name: 'John', age: 27 },
{ name: 'Jane', age: 14 },
{ name: 'Tony', age: 24},
];const fullAge = [];for(let i = 0; i < persons.length; i++) {
if(persons[i].age >= 18) {
fullAge.push(persons[i]);
}
}console.log(fullAge);
# +
#python
persons = {}
persons["Peter"] = {
"name" : "Peter",
"age" : 16
}
persons["Mark"] = {
"name" : "Mark",
"age" : 18
}
persons["John"] = {
"name" : "John",
"age" : 27
}
persons["Jane"] = {
"name" : "Jane",
"age" : 14
}
persons["Tony"] = {
"name" : "Tony",
"age" : 24
}
print(persons)
# -
| novice/02-01/.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Individual Project Theme 2
# ## Hangman Game
# **How to use this notebook and complete the project?**
#
# 1. You need to answer all required questions by answering in the markdown cell.
# 2. Read the instruction and fill in the #todo sections.
# 3. Run all the cells to store the results of each codes.
# 4. Save this notebook by clicking on `File`, then click `Download as`, choose `HTML`.
# 5. Push this notebook to your GitHub reporitory by creating a new repository, named `AIP_IndividualProject`. Set this repository as `Private` until the day of your submission, which is on *29th June 2019, 1.30 PM*.
# **Instructions:**
#
# Let's build a game: Hangman! Have you ever heard about Hangman game?
# It's a game to guess a word letter-by-letter, but, if they make too many wrong guesses, they'll lose.
#
# Now, it's the time for you to make a hangman game where player can player with a computer.
#
# **Question 1**:
#
# Draw a flowchart to write the program of this game. You may draw a flowchart by using *powerpoint*, then save it as a picture in `.jpg` or `.png`. Then, use this command to the *Answer* column below.
# `<img src=_____.jpg>`
# **Answer**:
# <img src= "Flowchart_hangman.png" height="640" width="480">
# **Step 1**:
#
# Given a list of words, print a list of words at random.
# +
from random import choice
word = choice(["pretty", "old", "young", "nice", "kind", "bad", "weak", "strong", "lazy", "weird"])
print(word)
# -
# **Step 2**:
#
# #TODO
#
# Write a program to create the hangman game.
# +
#initialization
guesses = 3 # Number of guesses
count = 0
win = False
#create a list for guesed_letters
guessed_letters = []
#create temporary word
tempword = []
for i in word:
tempword.append("_")
print(tempword)
print()
while count < guesses and not win == True:
if '_' not in tempword:
print('YOU WIN!')
win = True
break
guess = input("guess a letter: ")
if guess in word:
for i in range(len(word)):
if word[i] == guess:
tempword[i] = guess
print("Correct!")
print('---------------------------------')
print(tempword)
guessed_letters.append(guess)
print("Guessed letters: ", guessed_letters)
print("Guesses left: ", (3 - count))
print('---------------------------------')
else:
print("Wrong!")
print('---------------------------------')
guessed_letters.append(guess)
print(tempword)
print("Guessed letters: ", guessed_letters)
print("Guesses left: ", (2 - count))
print('---------------------------------')
count+=1
print("The word is", word)
# -
# **BONUS**
#
# Add another player, which is Player 2, make Player 1 and Player 2 play in turns to guess the word.
| individual-projects-solutions/Hangman_Game_Solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.6 64-bit
# metadata:
# interpreter:
# hash: b035f594501fd99b0ba4fdbf39dd3ef4592c3539e4ec00f8ba05c88e0c5143ba
# name: python3
# ---
# # Sentiment analysis with Textblob-FR
# +
import sys
from textblob import Blobber
from textblob_fr import PatternTagger, PatternAnalyzer
# -
# ## Fonction
# +
tb = Blobber(pos_tagger=PatternTagger(), analyzer=PatternAnalyzer())
def get_sentiment(input_text):
blob = tb(input_text)
pola, subj = blob.sentiment
perc = f"{100*abs(pola):.0f}"
if pola > 0:
sent = f"{perc}% positive"
elif pola < 0:
sent = f"{perc}% negative"
else:
sent = "neutral"
if subj > 0:
fact = f"{100*subj:.0f}% subjective"
else:
fact = "perfectly objective"
print(f"This text is {sent} and {fact}.")
# -
# ## Analyser le sentiment d'une phrase
# + tags=[]
get_sentiment("En résumé, au point de vue de l'hygiène et de la facilité des abords, l'emplacement désigné pour l'établissement du nouveau marché laisse tout à désirer.")
# + tags=[]
get_sentiment("Il y a eu de l'exagération dans les inconvéniens que l'on a supposés au marché de la Madeleine sous le rapport de l'hygiène.")
# -
get_sentiment("La paroisse du Béguinage laisse moins à désirer sous le rapport de l'hygiène : les rues y sont en général assez larges et la ventilation v est, à quelques exceptions près, assez bien établie.")
get_sentiment("Il y a eu de l'exagération dans les inconvéniens que l'on a supposés au marché de la Madeleine sous le rapport de l'hygiène.")
get_sentiment("Je ne nie pas que, dans la classe ouvrière, on rencontre parfois des gens qui ne respectent pas les objets que l'on met à leur disposition, mais cette constatation peut se faire dans les autres classes de la société également et je suis persuadé que lorsque les habitants de ces maisons ouvrières sauront que ces appareils sont placés pour leur donner plus d'hygiène, ils sauront en user avec discernement.")
Mesdames et Messieurs, i l y aurait lieu de déclarer l'urgence pour une demande de crédit destiné à permettre à la Ville de participer à l'exposition de technique sanitaire ét d'hygiène urbaine de Lyon.
| module3/s4_sentiment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table class="ee-notebook-buttons" align="left">
# <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Tutorials/GlobalSurfaceWater/1_water_occurrence.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
# <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Tutorials/GlobalSurfaceWater/1_water_occurrence.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
# <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Tutorials/GlobalSurfaceWater/1_water_occurrence.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
# </table>
# ## Install Earth Engine API and geemap
# Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://geemap.org). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
# The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
# +
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('Installing geemap ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# -
import ee
import geemap
# ## Create an interactive map
# The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
Map = geemap.Map(center=[40,-100], zoom=4)
Map
# ## Add Earth Engine Python script
# +
# Add Earth Engine dataset
###############################
# Asset List
###############################
gsw = ee.Image('JRC/GSW1_1/GlobalSurfaceWater')
occurrence = gsw.select('occurrence')
###############################
# Constants
###############################
VIS_OCCURRENCE = {
'min':0,
'max':100,
'palette': ['red', 'blue']
}
VIS_WATER_MASK = {
'palette': ['white', 'black']
}
###############################
# Calculations
###############################
# Create a water mask layer, and set the image mask so that non-water areas
# are opaque.
water_mask = occurrence.gt(90).selfMask()
###############################
# Initialize Map Location
###############################
# Uncomment one of the following statements to center the map.
# Map.setCenter(-90.162, 29.8597, 10) # New Orleans, USA
# Map.setCenter(-114.9774, 31.9254, 10) # Mouth of the Colorado River, Mexico
# Map.setCenter(-111.1871, 37.0963, 11) # Lake Powell, USA
# Map.setCenter(149.412, -35.0789, 11) # Lake George, Australia
# Map.setCenter(105.26, 11.2134, 9) # Mekong River Basin, SouthEast Asia
# Map.setCenter(90.6743, 22.7382, 10) # Meghna River, Bangladesh
# Map.setCenter(81.2714, 16.5079, 11) # Godavari River Basin Irrigation Project, India
# Map.setCenter(14.7035, 52.0985, 12) # River Oder, Germany & Poland
# Map.setCenter(-59.1696, -33.8111, 9) # Buenos Aires, Argentina
Map.setCenter(-74.4557, -8.4289, 11) # Ucayali River, Peru
###############################
# Map Layers
###############################
Map.addLayer(occurrence.updateMask(occurrence.divide(100)), VIS_OCCURRENCE, "Water Occurrence (1984-2018)")
Map.addLayer(water_mask, VIS_WATER_MASK, '90% occurrence water mask', False)
# -
# ## Display Earth Engine data layers
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
| Tutorials/GlobalSurfaceWater/1_water_occurrence.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
# <h1><center>DOTA 2</center></h1>
# Пересчитаем число ног у героев игры Dota2! Сгруппируйте героев из датасэта по числу их ног (колонка legs), и заполните их число в задании ниже.
df = pd.read_csv('https://stepik.org/media/attachments/course/4852/dota_hero_stats.csv', index_col=0)
df.head()
df.groupby('legs').aggregate({'legs': 'count'})
df[df.legs == 6]
# Продолжим исследование героев Dota2. Сгруппируйте по колонкам attack_type и primary_attr и выберите самый распространённый набор характеристик.
df.groupby(['attack_type', 'primary_attr']).aggregate({'id': 'count'})
# <br /><br />
# <h1><center>Лупа и Пупа</center></h1>
# К нам поступили данные из бухгалтерии о заработках Лупы и Пупы за разные задачи! Посмотрите у кого из них больше средний заработок в различных категориях (колонка Type) и заполните таблицу, указывая исполнителя с большим заработком в каждой из категорий.
df_pupa = pd.read_csv('https://stepik.org/media/attachments/course/4852/accountancy.csv', index_col=0)
df_pupa.head(5)
df_pupa.groupby(['Type', 'Executor']).mean()
# <br /><br />
# <h1><center>Водоросли</center></h1>
# <NAME> изучает метаболом водорослей, и получил такую табличку. В ней он записал вид каждой водоросли, её род (группа, объединяющая близкие виды), группа (ещё одно объединение водорослей в крупные фракции) и концентрации анализируемых веществ.
#
# Помогите Ростиславу найти среднюю концентрацию каждого из веществ в каждом из родов (колонка genus)! Для этого проведите группировку датафрэйма, сохранённого в переменной concentrations, и примените метод, сохранив результат в переменной mean_concentrations.
concentrations = pd.read_csv('http://stepik.org/media/attachments/course/4852/algae.csv')
concentrations.head()
mean_concentrations = concentrations.groupby('genus') \
.aggregate({'sucrose': 'mean', 'alanin': 'mean', 'citrate': 'mean', 'glucose': 'mean', 'oleic_acid': 'mean'})
mean_concentrations
concentrations[concentrations.genus == 'Fucus'].describe()
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
concentrations.groupby('group').describe()
| notebooks/1.6_grouping_tasks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={} tags=[]
# <img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/>
# + [markdown] papermill={} tags=[]
# # WorldBank - Richest countries top10
# <a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/WorldBank/WorldBank_Richest_countries_top10.ipynb" target="_parent"><img src="https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg"/></a>
# + [markdown] papermill={} tags=[]
# **Tags:** #worldbank #opendata
# + [markdown] papermill={} tags=["naas"]
# **Author:** [<NAME>](https://www.linkedin.com/in/ACoAAAJHE7sB5OxuKHuzguZ9L6lfDHqw--cdnJg/)
# + [markdown] colab_type="text" id="GAqP42SS4yPD" papermill={} tags=[]
# **Goal**
#
# Top 10 richest regions & countries in GDP per capita (wealth created per habitant) and in GDP current (total wealth created by the country)
#
# **Data**
#
# GDP CURRENT & GDP PER CAPITA by countries, agregated by region
#
# **Sources**
#
# * World Bank national accounts data
# * OECD National Accounts data files
#
# **Notes**
#
# The top 10 for GDP current is including the G8, should the European Union be included in this ranking, it would come up 2nd biggest economy after the USA.
#
# In the top 10 for GDP per capita, the ranking include smaller countries, only the USA remains in this ranking from the GDP current ranking.
#
#
# **Pitch**
#
# https://drive.google.com/file/d/1wGo9aI6mXS_2AbmnNlLoSJ9bGxkXmDhq/view
# + [markdown] papermill={} tags=[]
# ## Input
# + [markdown] papermill={} tags=[]
# ### Import libraries
# + colab={"base_uri": "https://localhost:8080/", "height": 50} colab_type="code" id="7hqFGyx_377x" outputId="36fb8727-3b40-43b7-e7c6-8b9e47262410" papermill={} tags=[]
import pandas as pd
from pandas_datareader import wb
import plotly.graph_objects as go
# + [markdown] papermill={} tags=[]
# ## Model
# + [markdown] colab_type="text" id="yeUkoHbGTNVy" papermill={} tags=[]
# ### Get the association between the country and the region
# + colab={"base_uri": "https://localhost:8080/", "height": 402} colab_type="code" id="NrJlRC2cREYP" outputId="4f0e0224-fac2-4090-8bda-295b5db66b26" papermill={} tags=[]
pd.options.display.float_format = '{: .0f}'.format
countries = wb.get_countries()
countries = countries[['name', 'region']]
countries
# + [markdown] colab_type="text" id="RAUtg6PuTTGn" papermill={} tags=[]
# ### Get indicators
#
# + colab={"base_uri": "https://localhost:8080/", "height": 402} colab_type="code" id="TdXvSnfB4IMc" outputId="316286d1-03e6-4ede-87bf-01aafe661a7d" papermill={} tags=[]
indicators = wb.download(indicator=['NY.GDP.PCAP.CD', 'NY.GDP.MKTP.CD'], country='all', start=2018, end=2018)
indicators = indicators.reset_index()
indicators = indicators[['country', 'NY.GDP.PCAP.CD', 'NY.GDP.MKTP.CD']]
indicators.columns = ['country', 'GDP_PER_CAPITA', 'CURRENT_GDP']
indicators
# + [markdown] colab_type="text" id="QC222rrLUlXV" papermill={} tags=[]
# ### Format a master table
#
# 1. Associate countries with regions
# 1. Clean up the data
# 1. Group rows by columns
# + colab={"base_uri": "https://localhost:8080/", "height": 229} colab_type="code" id="CFTaHs5JQlu_" outputId="1212e73b-f882-43e5-f288-e6d121cb89dd" papermill={} tags=[]
master_table = pd.merge(indicators, countries, left_on='country', right_on='name')
master_table = master_table[master_table['region'] != 'Aggregates']
master_table = master_table[(master_table['GDP_PER_CAPITA'] > 0) | (master_table['CURRENT_GDP'] > 0)]
master_table = master_table.fillna(0)
master_table = pd.melt(master_table, id_vars=['region', 'country'], value_vars=['GDP_PER_CAPITA', 'CURRENT_GDP'], var_name='INDICATOR', value_name='VALUE')
master_table = master_table.set_index(['region', 'country', 'INDICATOR'])
master_table = master_table.sort_index()
master_table
# + [markdown] papermill={} tags=[]
# ## Output
# + [markdown] colab_type="text" id="CBCd2nU9T0nn" papermill={} tags=[]
# ### Visualize data with a chart
# + colab={"base_uri": "https://localhost:8080/", "height": 229} colab_type="code" id="Pmuhz88cTyya" outputId="5d53946d-ac8a-4207-c363-c6eee05dc9e5" papermill={} tags=[]
table = master_table.reset_index()
gdp_per_capita_per_region = table[table['INDICATOR'] == 'GDP_PER_CAPITA'][['region', 'VALUE']].groupby('region').mean().sort_values('VALUE', ascending=False)
current_gdp_per_region = table[table['INDICATOR'] == 'CURRENT_GDP'][['region', 'VALUE']].groupby('region').mean().sort_values('VALUE', ascending=False)
gdp_per_capita_per_country = table[table['INDICATOR'] == 'GDP_PER_CAPITA'][['country', 'VALUE']].sort_values('VALUE', ascending=False).head(10)
current_gdp_per_country = table[table['INDICATOR'] == 'CURRENT_GDP'][['country', 'VALUE']].sort_values('VALUE', ascending=False).head(10)
data = [
go.Bar(x=gdp_per_capita_per_region.index, y=gdp_per_capita_per_region['VALUE'], text=gdp_per_capita_per_region['VALUE'], textposition='outside'),
go.Bar(x=current_gdp_per_region.index, y=current_gdp_per_region['VALUE'], text=current_gdp_per_region['VALUE'], textposition='outside', visible=False),
go.Bar(x=gdp_per_capita_per_country['country'], y=gdp_per_capita_per_country['VALUE'], text=gdp_per_capita_per_country['VALUE'], textposition='outside', visible=False),
go.Bar(x=current_gdp_per_country['country'], y=current_gdp_per_country['VALUE'], text=current_gdp_per_country['VALUE'], textposition='outside', visible=False),
]
layout = go.Layout(
title='Top 10 richest regions & countries',
margin = dict(t = 60, b = 150),
updatemenus=list([
dict(showactive=True, type="buttons", active=0, buttons=[
{'label': 'GDP / Capita per region', 'method': 'update', 'args': [{'visible': [True, False, False, False]}]},
{'label': 'Current GDP per region', 'method': 'update', 'args': [{'visible': [False, True, False, False]}]},
{'label': 'GDP / Capita per country', 'method': 'update', 'args': [{'visible': [False, False, True, False]}]},
{'label': 'Current GDP per country', 'method': 'update', 'args': [{'visible': [False, False, False, True]}]}
])
]),
annotations=[dict(
text = 'Updated in 2018 from The World Bank',
showarrow = False,
xref = 'paper', x = 1,
yref = 'paper', y = -0.4)]
)
go.Figure(data, layout)
| WorldBank/WorldBank_Richest_countries_top10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Visualizing Chipotle's Data
# This time we are going to pull data directly from the internet.
# Special thanks to: https://github.com/justmarkham for sharing the dataset and materials.
#
# ### Step 1. Import the necessary libraries
# +
import pandas as pd
import matplotlib.pyplot as plt
from collections import Counter
# set this so the graphs open internally
# %matplotlib inline
# -
# ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv).
df=pd.read_csv('https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv', sep='\t')
# ### Step 3. Assign it to a variable called chipo.
df.head()
# ### Step 4. See the first 10 entries
df.head(10)
# ### Step 5. Create a histogram of the top 5 items bought
dfsel=df.groupby('item_name').quantity.sum().reset_index()
dfsel=dfsel.sort_values('quantity', ascending=False).head(5)
dfsel.columns
# +
dfsel.plot(x='item_name', y='quantity', kind='bar')
plt.xlabel('Items')
plt.ylabel('Quantity')
plt.title('Most ordered Chipotle\'s Items')
plt.show()
# -
# ### Step 6. Create a scatterplot with the number of items orderered per order price
# #### Hint: Price should be in the X-axis and Items ordered in the Y-axis
# +
df['item_price']= df['item_price'].replace('\$', ' ', regex= True).astype(float)
dfsel=df.groupby('order_id').sum()
plt.scatter(x= dfsel['item_price'], y= dfsel['quantity'], c= 'yellow')
plt.xlabel('Order Price')
plt.ylabel('Items ordered')
plt.title('Number of items ordered per order price')
# -
# ### Step 7. BONUS: Create a question and a graph to answer your own question.
| 07_Visualization/[ ] Chipotle/[ ] Chipotle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Cell Type proportions as a function of distance
#
# **Date**: 2020-09-10<br>
# **Author**: <NAME><br>
# **Description**:<br>
#
# Similar to `vein-analysis-ipynb` but using cell type proportions mapped by `stereoscope` rather than normalized expression values, in order to asses how cell type proportions varies as a function of the distance to veins.
#
#
# ## Setup
#
# Load data the necessary packages and data for the analysis, also specify certain constants which will be used throughout the analysis.
# %load_ext autoreload
# %autoreload 2
# +
import pandas as pd
import numpy as np
import anndata as ad
import re
import datetime
import os
import os.path as osp
from os import listdir
import matplotlib.pyplot as plt
from functools import reduce
from hepaquery import utils as ut
import hepaquery.visual as viz
# -
# ## Set global parameters and paths
# +
SAVE_RESULTS = True
TAG = re.sub(":| |-|\\.|","",str(datetime.datetime.today()))
TAG = TAG + "-analysis"
REPO_DIR = osp.dirname(osp.abspath(os.getcwd()))
DATA_DIR = osp.join("/Users/franziskahildebrandt/Desktop/PhDprojectFranziskaHildebrandt/ST-liver/uninfectedLiver-sequenced/ankarliver/data/h5ad-cca")
PROP_DIR = osp.join(REPO_DIR,"res/stereoscope-res/")
RESULTS_DIR = osp.join(REPO_DIR,"res",TAG)
if not osp.exists(RESULTS_DIR):
os.mkdir(RESULTS_DIR)
SCALE_FACTOR = 2.8
PTHS = list(filter(lambda x: x.split(".")[-1] == "h5ad",os.listdir(DATA_DIR)))
PTHS = {p.split(".")[0]:osp.join(DATA_DIR,p) for p in PTHS }
# -
# ## Load data
# +
exclude = ["CN65-C1","CN65-C2","CN16-D1", "CN16-E1"] # uncomment if you want to exclude
#exclude = []
data_set = {n:ad.read_h5ad(p) for n,p in PTHS.items()}
for ex in exclude:
data_set.pop(ex)
data_set
# +
import glob
prop_files = glob.glob(osp.join(PROP_DIR,"*/W*tsv"))
read_file = lambda f: pd.read_csv(f, sep = '\t', header = 0,index_col=0)
props = {osp.basename(osp.dirname(p)).replace("_","-"):read_file(p) for p in prop_files}
# -
# ### Merge vein proximity information and proportions
for k in data_set.keys():
if k in props.keys():
inter = data_set[k].obs.index.intersection(props[k].index)
_adata = data_set[k][inter,:]
_prop = props[k].loc[inter,:]
_prop /= _prop.max(axis = 0)
_adata.obsm["proportions"] = _prop
data_set[k] = _adata
else:
data_set[k] = None
data_set = {k:v for k,v in data_set.items() if isinstance(v,ad.AnnData)}
# ## Analysis : Proportion by distance
#
# Model cell type proportion as a function of the distance to the nearest gene. We use loess regression to get an approximation of the curve that describe the trends in the data. Envelopes represent 1 standard error.
# +
# set vein type to plot
vein_type = "all"
# set to true if plots should share y-axis
share_y = False
# set within which distance from the vein
# gene expresssion should be assessed; could
# be same as RADIUS
dist_thrs = 142
# which genes to be assessed
cell_types = dict(central = "Pericentral (PC) hepatocytes(Liver)",portal = "Periportal (PP) hepatocyte(Liver)")
# distance values to use
colormap = dict(central = {"envelope":"red",
"fitted":"red",
"background":"red"},
portal = {"envelope":"blue",
"fitted":"blue",
"background":"blue"},
)
fig,ax = viz.get_figure(len(cell_types),
n_cols = 5,
side_size = 5,
)
fig.set_facecolor("white")
# iterate over each type
for k,(cell_type,name) in enumerate(cell_types.items()):
for vt in ["central","portal"]:
xs = np.array([])
ys = np.array([])
for data in data_set.values():
if name not in data.obsm["proportions"].columns:
print("can't find {}".format(name))
continue
min_dist = data.obsm["vein_distances"]["dist_type_" + vt].values
xs = np.append(xs,min_dist)
ys = np.append(ys,data.obsm["proportions"][name].values)
smoothed = ut.smooth_fit(xs,ys,dist_thrs=dist_thrs)
viz.plot_expression_by_distance(ax[k],
data = smoothed,
curve_label = vt,
color_scheme = colormap[vt],
include_background = False,
feature = name,
distance_scale_factor = SCALE_FACTOR,
feature_type = "Cell Type",
)
ax[k].set_ylabel("Proportion Value")
ax[k].set_xlabel("Distance to vein [" + r"$\mu m$" + "]")
ax[k].legend()
fig.tight_layout()
if SAVE_RESULTS:
fig.savefig(osp.join(RESULTS_DIR,
"proportion-by-distance.svg"),
facecolor = "white",
dpi = 300)
plt.show()
# -
| scripts/proportion-analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import glob
# %matplotlib inline
from ipywidgets import interact, interactive, fixed
# +
#reading in an image
image = mpimg.imread('videotest.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image,cmap='gray') # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
# +
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def convert_to_hsv(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
def canny(img, low_threshold, high_threshold): ## """Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
#LINE DISPLAY PARAMETERS
color = [243, 105, 14]
thickness = 12
#LINE PARAMETERS
SLOPE_THRESHOLD = 0.3
Y_MIN_ADJUST = 15
positive_slopes = []
negative_slopes = []
positive_intercepts = []
negative_intercepts = []
#named as y_max despte being at the bottom corner of the image due to y axis in reverse direction
y_max = img.shape[0]
y_min = img.shape[0]
for line in lines:
for x1,y1,x2,y2 in line:
#calculate slope for the line
slope = (y2-y1)/(x2-x1)
intercept = y2 - (slope*x2)
#for negative slope
if slope < 0.0 and slope > -math.inf and abs(slope) > SLOPE_THRESHOLD:
#print('negative slope')
negative_slopes.append(slope)
negative_intercepts.append(intercept)
#for positive slope
elif slope > 0.0 and slope < math.inf and abs(slope) > SLOPE_THRESHOLD:
#print('positive slope')
positive_slopes.append(slope)
positive_intercepts.append(intercept)
y_min = min(y_min, y1, y2)
#cv2.line(img, (x1, y1), (x2, y2), color, thickness)
y_min+=Y_MIN_ADJUST
#get averages for positive and negative slopes
positive_slope_mean = np.mean(positive_slopes)
negative_slope_mean = np.mean(negative_slopes)
#get averages for potitive and negative intercepts
positive_intercept_mean = np.mean(positive_intercepts)
negative_intercept_mean = np.mean(negative_intercepts)
#calculation of coordinates for lane for positive slopes
if len(positive_slopes) > 0:
x_max = int((y_max - positive_intercept_mean)/positive_slope_mean)
x_min = int((y_min - positive_intercept_mean)/positive_slope_mean)
cv2.line(img, (x_min, y_min), (x_max, y_max), color, thickness)
#calculation of coordinates for lane for negative slopes
if len(negative_slopes) > 0:
x_max = int((y_max - negative_intercept_mean)/negative_slope_mean)
x_min = int((y_min - negative_intercept_mean)/negative_slope_mean)
cv2.line(img, (x_min, y_min), (x_max, y_max), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
img = np.uint8(img)
if len(img.shape) is 2:
img = np.dstack((img, np.zeros_like(img), np.zeros_like(img)))
return cv2.addWeighted(initial_img, α, img, β, γ)
def color_mask(hsv,low,high):
# Takes in low and high values and returns mask
mask = cv2.inRange(hsv, low, high)
return mask
# -
grayscale(image)
import os
os.listdir("data/")
# # Build a Lane Finding Pipeline
# +
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images directory.
#GAUSSIAN BLUR PARAMETERS
KERNEL_SIZE = 3
#CANNY EDGE DETECTION PARAMETERS
LOW_THRESHOLD = 75
HIGH_THRESHOLD = 150
#REGION OF INTEREST PARAMETERS
H_CONST = 0.08
RIGHT_LANE_SLOPE = 14/32
RIGHT_LANE_CONST = 400
TOP_SHIFT_H = 40
TOP_SHIFT_V = 40
#HOUGH LINES PARAMETERS
RHO = 3.5
THETA = np.pi/180
MIN_VOTES = 30
MIN_LINE_LEN = 5
MAX_LINE_GAP= 25
LOWER_EDGE = 800
def get_region_of_interest_vertices(img):
#get image parameters for extracting the region of interest
img_height = img.shape[0]
img_width = img.shape[1]
bottom_left = (img_width/9 - H_CONST*img_width, img_height)
top_left = (img_width / 2 - (TOP_SHIFT_H ), img_height / 2 + TOP_SHIFT_V)
top_right = (img_width /2 + TOP_SHIFT_H, img_height/2 + TOP_SHIFT_V)
bottom_right = (img_width - (RIGHT_LANE_SLOPE*img_width-RIGHT_LANE_CONST) +
(H_CONST*img_width), img_height)
vertices = np.array([[bottom_left, top_left, top_right, bottom_right]], dtype=np.int32)
#print ('vertices-->', vertices)
return vertices
def build_pipeline(img, mode = 'complete'):
#gray out the image
hsv_img = convert_to_hsv(img)
#smoothen the image
smooth_img = gaussian_blur(hsv_img, KERNEL_SIZE)
# Define color ranges and apply color mask
yellow_hsv_low = np.array([ 0, 100, 100])
yellow_hsv_high = np.array([ 50, 255, 255])
white_hsv_low = np.array([ 20, 0, 180])
white_hsv_high = np.array([ 255, 80, 255])
mask_yellow = color_mask(smooth_img,yellow_hsv_low,yellow_hsv_high)
mask_white = color_mask(smooth_img,white_hsv_low,white_hsv_high)
mask_img = cv2.bitwise_or(mask_yellow,mask_white)
#canny edge detection
canny_img = canny(mask_img, LOW_THRESHOLD, HIGH_THRESHOLD)
#vertices for extracting desired portion from the image
vertices = get_region_of_interest_vertices(img)
#poly_img = cv2.polylines(img, vertices, True, (0,255,255),3)
#get portion corresponding to the region of interest from the image
regions = region_of_interest(canny_img, vertices)
if mode == 'canny':
return regions
else:
#get hough lines for the lanes found in the img
hough_img = hough_lines(regions, RHO, THETA, MIN_VOTES, MIN_LINE_LEN, MAX_LINE_GAP)
#return original image masked by the hough lines
return weighted_img(hough_img, img)
# -
# # Visualization
# +
def load_images(image_type):
if image_type == 'data':
glob_regex = 'data/*.jpg'
elif image_type == 'challenge':
glob_regex = 'challenge_images/*.jpeg'
else:
print('Invalid Type')
return
images=[]
for f in glob.glob(glob_regex):
img=cv2.imread(f)
img=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
images.append((f,img))
return images
def visualize_transformation(images):
n = len(images)
def view_image(i):
file, img = images[i]
canny_img = build_pipeline(img, 'canny')
dst_img = build_pipeline(img)
# Visualize transformation
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20,10))
ax1.imshow(img)
ax1.set_title(file, fontsize=30)
ax2.imshow(canny_img, cmap = 'gray')
ax2.set_title('Canny Image', fontsize=30)
ax3.imshow(dst_img)
ax3.set_title('Final Image', fontsize=30)
interact(view_image, i=(0,n-1))
# +
images = load_images('data')
visualize_transformation(images)
# -
# # Videos testing
# +
# Import everything needed to edit/save/watch video clips# Impor
from moviepy.editor import VideoFileClip
from IPython.display import HTML
# -
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
result = build_pipeline(image)
return result
# # Video 1
#
# +
# Playing video from file:
cap = cv2.VideoCapture('test.mp4')
try:
if not os.path.exists('data'):
os.makedirs('data')
except OSError:
print ('Error: Creating directory of data')
currentFrame = 0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
if not ret:
break
# Saves image of the current frame in jpg file
name = './data/frame' + str(currentFrame) + '.jpg'
print ('Creating...' + name)
cv2.imwrite(name, frame)
# To stop duplicate images
currentFrame += 1
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
# -
| P1_TamNguyen.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
# # 1. Load Data
df = pd.read_excel(io='../data/Online Retail.xlsx', sheet_name='Online Retail')
df.shape
df.head()
df = df.loc[df['Quantity'] > 0]
# # 2. Data Preparation
# #### - Handle NaNs in CustomerID field
df['CustomerID'].describe()
df['CustomerID'].isna().sum()
df.loc[df['CustomerID'].isna()].head()
df.shape
df = df.dropna(subset=['CustomerID'])
df.shape
df.head()
# #### - Customer-Item Matrix
customer_item_matrix = df.pivot_table(
index='CustomerID',
columns='StockCode',
values='Quantity',
aggfunc='sum'
)
customer_item_matrix.loc[12481:].head()
customer_item_matrix.shape
df['StockCode'].nunique()
df['CustomerID'].nunique()
customer_item_matrix.loc[12348.0].sum()
customer_item_matrix = customer_item_matrix.applymap(lambda x: 1 if x > 0 else 0)
customer_item_matrix.loc[12481:].head()
# # 3. Collaborative Filtering
from sklearn.metrics.pairwise import cosine_similarity
# ## 3.1. User-based Collaborative Filtering
# #### - User-to-User Similarity Matrix
user_user_sim_matrix = pd.DataFrame(
cosine_similarity(customer_item_matrix)
)
user_user_sim_matrix.head()
# +
user_user_sim_matrix.columns = customer_item_matrix.index
user_user_sim_matrix['CustomerID'] = customer_item_matrix.index
user_user_sim_matrix = user_user_sim_matrix.set_index('CustomerID')
# -
user_user_sim_matrix.head()
# #### - Making Recommendations
user_user_sim_matrix.loc[12350.0].sort_values(ascending=False)
items_bought_by_A = set(customer_item_matrix.loc[12350.0].iloc[
customer_item_matrix.loc[12350.0].nonzero()
].index)
items_bought_by_A
items_bought_by_B = set(customer_item_matrix.loc[17935.0].iloc[
customer_item_matrix.loc[17935.0].nonzero()
].index)
items_bought_by_B
items_to_recommend_to_B = items_bought_by_A - items_bought_by_B
items_to_recommend_to_B
df.loc[
df['StockCode'].isin(items_to_recommend_to_B),
['StockCode', 'Description']
].drop_duplicates().set_index('StockCode')
# ## 3.2. Item-based Collaborative Filtering
# #### - Item-to-Item Similarity Matrix
item_item_sim_matrix = pd.DataFrame(cosine_similarity(customer_item_matrix.T))
# +
item_item_sim_matrix.columns = customer_item_matrix.T.index
item_item_sim_matrix['StockCode'] = customer_item_matrix.T.index
item_item_sim_matrix = item_item_sim_matrix.set_index('StockCode')
# -
item_item_sim_matrix
# #### - Making Recommendations
top_10_similar_items = list(
item_item_sim_matrix\
.loc[23166]\
.sort_values(ascending=False)\
.iloc[:10]\
.index
)
top_10_similar_items
df.loc[
df['StockCode'].isin(top_10_similar_items),
['StockCode', 'Description']
].drop_duplicates().set_index('StockCode').loc[top_10_similar_items]
| Chapter06/python/ProductRecommendation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# <center> <h1>Perturbation Analysis </h1> </center>
#
#
#
#
#
# In this notebook, we investigate the ability of the **GraphWave** algorithm to detect structural similarities, as well as its robustness to small perturbations. We propose an analysis of the accuracy of the recovery of the different topological roles based on simulations on toy graphs, so that we benefit from an actual ground-truth to benchmark the different results. For convenience purposes, this notebook contains code for running the "perturbed" experiments provided in the associated paper. We have tested here "small pertubations" (we randomly add a few edges to the structure, so as to maintain the patterns (this is why we did not delete edges as well), but shattering the symmetry of the system that yielded structural equivalents. For simulations with a higher noise level, we refer the reader to Notebook Synthetic Experiments--structures.
#
# The setup of the experiment is the following:
#
# 1. We begin by creating a toy graph (that is, a regular structure with repetitions of identical patterns at different areas of the graph). Each type of structural role (that is, bottom corner in the house shape, middle of the center ring, etc.) is endowed with a particular label.
# 2. __Optional__: to simulate the effect of small perturbations, we artificially remove and add a few edges on the graph
# 3. We compute the structural representations given by our method
# 4. To assess the relevance of our results, we propose to evaluate our method using 3 different criteria:
# + We project these representations in 2D using PCA: the idea is to try to assess visually the proximity of the different featurization. In the different plots,
# + We also plug-in these representations as input into a clustering algorithm (default: kmeans), and assess the purity of the clusters that are recovered using k-means. This gives us an indicator of the relevance of our embeddings if the goal was to recover $K$ distinct classes of structural equivalents.
#
#
# First of all, to provide a little bit of intuition and to explain some of the follwoing results, we note that RoleX is specifically designed for clustering role similarities, whereas struc2vec and GraphWave aim to find similarities across a spectrum of roles (hence the distance between nodes is more meaningful in the later than in the case of RoleX), which does not guarantee to provide comparisons across classes. Hence, since we propose to assess the performance via clustering, RoleX should perform better --it was designed specifically for this purpose.
# ## I. Loading the modules and creating the graph
# +
# %matplotlib inline
#### Tests like paper
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import pickle
import seaborn as sb
import sklearn as sk
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
import sys
sys.path.append('../')
import graphwave as gw
from shapes.shapes import *
from shapes.build_graph import *
from distances.distances_signature import *
from characteristic_functions import *
# +
# 1- Start by defining our favorite regular structure
width_basis = 15
nbTrials = 20
################################### EXAMPLE TO BUILD A SIMPLE REGULAR STRUCTURE ##########
## REGULAR STRUCTURE: the most simple structure: basis + n small patterns of a single type
### 1. Choose the basis (cycle, torus or chain)
basis_type = "cycle"
### 2. Add the shapes
nb_shapes = 5 ## numbers of shapes to add
#shape = ["fan",6] ## shapes and their associated required parameters (nb of edges for the star, etc)
#shape = ["star",6]
list_shapes = [["house"]] * nb_shapes
### 3. Give a name to the graph
identifier = 'AA' ## just a name to distinguish between different trials
name_graph = 'houses' + identifier
sb.set_style('white')
### 4. Pass all these parameters to the Graph Structure
add_edges = 4 ## nb of edges to add anywhere in the structure
del_edges =0
G, communities, plugins, role_id = build_structure(width_basis, basis_type, list_shapes, start=0,
rdm_basis_plugins =False, add_random_edges=0,
plot=True, savefig=False)
# -
print 'nb of nodes in the graph: ', G.number_of_nodes()
print 'nb of edges in the graph: ', G.number_of_edges()
# ## Start the analysis!!
# In this first set of experiments, we look at the performance of the algorithms when 3 edges are randomly added to the structure. This provides a "small perturbation", since the Jaccard distance between the adjacency matrices of the two graphs is:
# $$ d_{HAmming}(A, \tilde{A})=\frac{||A -\tilde{A}||_2}{||A +\tilde{A}||_*}=\frac{6}{63*2}=0.048$$
#
# We iterate the experiment 20 times, and average over the performance score to finally be able to compare the algorithms.
#
# We have included the results of this experiments in the cache folder, which the reader can either download or run again.
#
# +
from graphwave import graphwave_alg
chi,heat_print, taus = graphwave_alg(G, np.linspace(0,100,25), taus=range(19,21), verbose=True)
mapping_inv={i: taus[i] for i in range(len(taus))}
mapping={float(v): k for k,v in mapping_inv.iteritems()}
# +
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
colors = role_id
nb_clust = len(np.unique(colors))
pca = PCA(n_components=5)
trans_data = pca.fit_transform(StandardScaler().fit_transform(chi))
km = sk.cluster.KMeans(n_clusters=nb_clust)
km.fit(trans_data)
labels_pred = km.labels_
######## Params for plotting
cmapx = plt.get_cmap('rainbow')
x = np.linspace(0,1,np.max(labels_pred) + 1)
col = [cmapx(xx) for xx in x ]
markers = {0:'*',1: '.', 2:',',3: 'o',4: 'v',5: '^',6: '<',7: '>',8: 3 ,\
9:'d',10: '+',11:'x',12:'D',13: '|',14: '_',15:4,16:0,17:1,\
18:2,19:6,20:7}
########
for c in np.unique(colors):
indc = [i for i, x in enumerate(colors) if x == c]
#print indc
plt.scatter(trans_data[indc, 0], trans_data[indc, 1],
c=np.array(col)[list(np.array(labels_pred)[indc])],
marker=markers[c%len(markers)], s=500)
labels = colors
for label,c, x, y in zip(labels,labels_pred, trans_data[:, 0], trans_data[:, 1]):
plt.annotate(label,xy=(x, y), xytext=(0, 0), textcoords='offset points')
# -
ami=sk.metrics.adjusted_mutual_info_score(colors, labels_pred)
sil=sk.metrics.silhouette_score(trans_data,labels_pred, metric='euclidean')
ch=sk.metrics.calinski_harabaz_score(trans_data, labels_pred)
hom=sk.metrics.homogeneity_score(colors, labels_pred)
comp=sk.metrics.completeness_score(colors, labels_pred)
print 'Homogeneity \t Completeness \t AMI \t nb clusters \t CH \t Silhouette \n'
print str(hom)+'\t'+str(comp)+'\t'+str(ami)+'\t'+str(nb_clust)+'\t'+str(ch)+'\t'+str(sil)
# ## II. Varied shapes
# +
from sklearn.manifold import TSNE
################################### EXAMPLE TO BUILD A MORE COMPLICATED STRUCTURE ##########
######### Alternatively, to define a structure with different types of patterns, pass them as a list
######### In the following example, we have 3 fans (with param. 6), 3 stars on 4 nodes, and 3 house shapes
name_graph='regular'
from sklearn import preprocessing
width_basis=25
add_edges=10
list_shapes=[["fan",6]]*5+[["star",10]]*5+[["house"]]*5
G,colors_shape, plugins,colors=build_structure(width_basis,basis_type,list_shapes, start=0,add_random_edges=add_edges,plot=False,savefig=False)
nb_clust=len(np.unique(colors))
chi,heat_print, taus = graphwave_alg(G, np.linspace(0,100,25), taus='auto', verbose=True)
# +
pca=PCA(n_components=5)
trans_data=pca.fit_transform(StandardScaler().fit_transform(chi))
km=sk.cluster.KMeans(n_clusters=nb_clust)
km.fit(trans_data)
labels_pred=km.labels_
######## Params for plotting
cmapx=plt.get_cmap('rainbow')
x=np.linspace(0,1,np.max(labels_pred)+1)
col=[cmapx(xx) for xx in x ]
markers = {0:'*',1: '.', 2:',',3: 'o',4: 'v',5: '^',6: '<',7: '>',8: 3 ,9:'d',10: '+',11:'x',12:'D',13: '|',14: '_',15:4,16:0,17:1,18:2,19:6,20:7}
########
for c in np.unique(colors):
indc=[i for i,x in enumerate(colors) if x==c]
#print indc
plt.scatter(trans_data[indc,0], trans_data[indc,1],c=np.array(col)[list(np.array(labels_pred)[indc])] ,marker=markers[c%len(markers)],s=500)
labels = colors
for label,c, x, y in zip(labels,labels_pred, trans_data[:, 0], trans_data[:, 1]):
plt.annotate(label,xy=(x, y), xytext=(0, 0), textcoords='offset points')
ami=sk.metrics.adjusted_mutual_info_score(colors, labels_pred)
sil=sk.metrics.silhouette_score(trans_data,labels_pred, metric='euclidean')
ch=sk.metrics.calinski_harabaz_score(trans_data, labels_pred)
hom=sk.metrics.homogeneity_score(colors, labels_pred)
comp=sk.metrics.completeness_score(colors, labels_pred)
print 'Homogeneity \t Completeness \t AMI \t nb clusters \t CH \t Silhouette \n'
print str(hom)+'\t'+str(comp)+'\t'+str(ami)+'\t'+str(nb_clust)+'\t'+str(ch)+'\t'+str(sil)
# -
| graphwave/tests/Synthetic Experiments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
# +
#小数点3桁まで表示するがIPythonのバージョン差異で動かないorz
# %precision 3
pd.set_option('precision', 3)
# -
#CSVを読み込む
df = pd.read_csv('data/ch2_scores_em.csv', index_col='生徒番号')
df.head()
#配列に入れる
scores = np.array(df['英語'])[:10]
scores
#データフレームにセットする
scores_df = pd.DataFrame({'点数':scores}, index=pd.Index(['A','B','C','D','E','F','G','H','I','J'], name='生徒'))
scores_df
#平均値
sum(scores) / len(scores)
#平均値
np.mean(scores)
#平均値
df.mean()
#平均値
scores_df.mean()
#昇順にソート
sorted_socres = np.sort(scores)
sorted_socres
#中央値
n = len(sorted_socres)
if n % 2 == 0:
m_1 = sorted_socres[n // 2 - 1]
m = sorted_socres[n // 2]
# median = (m_1 + m) / 2
median = float((m_1 + m) / 2)
else:
# median = sorted_socres[n // 2]
median = float(sorted_socres[n // 2])
median
type(median)
#中央値
np.median(scores)
#中央値
scores_df.median()
#最頻値
pd.Series([1, 1, 1, 2, 2, 3]).mode()
#最頻値
pd.Series([1, 2 ,3, 4, 5]).mode()
#偏差
mean = np.mean(scores)
deviation = scores - mean
deviation
#平均値
another_scores = [50,60,58,54,51,56,57,53,52,59]
another_mean = np.mean(another_scores)
another_mean
#偏差
another_deviation = another_scores - another_mean
another_deviation
#偏差の平均を取る
np.mean(deviation)
#偏差の平均を取る
np.mean(another_deviation)
#データフレームに偏差をセットする
summary_df = scores_df.copy()
summary_df['偏差'] = deviation
summary_df
#偏差の平均
summary_df.mean()
#標本分散
sum(deviation ** 2) / len(deviation)
#標本分散
np.mean(deviation ** 2)
#標本分散
np.var(scores)
#標本分散
scores_df.var(ddof=0)
#不偏分散
sum(deviation ** 2) / (len(deviation) - 1)
#不偏分散
scores_df.var()
#不偏分散
np.var(scores,ddof=1)
#偏差の二乗
summary_df['偏差二乗'] = np.square(deviation)
summary_df
#それぞれの平均
summary_df.mean()
#標準偏差
np.sqrt(np.var(scores, ddof=0))
#標準偏差
np.std(scores, ddof=0)
#最大値
np.max(scores)
#最小値
np.min(scores)
#範囲
np.max(scores) - np.min(scores)
#第1四分位点
scores_Q1 = np.percentile(scores, 25)
scores_Q1
#第2四分位点 中央値
scores_Q2 = np.percentile(scores, 50)
scores_Q2
#第3四分位点
scores_Q3 = np.percentile(scores, 75)
scores_Q3
#四分位範囲
scores_IQR = scores_Q3 - scores_Q1
scores_IQR
#指標をまとめて出力
pd.Series(scores).describe()
#基準化変量
z = (scores - np.mean(scores)) / np.std(scores, ddof=0)
z
#基準化変量の平均 0にならないのおかしくないか
np.mean(z)
#基準化変量の標準偏差 1にならないのおかしくないか
np.std(z, ddof=0)
#偏差値
x = 50 + 10 * (scores - np.mean(scores)) / np.std(scores, ddof=0)
x
#データフレームに偏差値をセット
scores_df['偏差値'] = x
scores_df
#50人分の英語の配列
english_scores = np.array(df['英語'])
english_scores
#指標をまとめて出力
pd.Series(english_scores).describe()
#度数
freq, _ = np.histogram(english_scores, bins=10, range=(0,100))
freq
#データフレームに度数をセット
freq_class = [f'{i}~{i+10}' for i in range(0, 100, 10)]
freq_dist_df = pd.DataFrame({'度数':freq},index=pd.Index(freq_class, name='階級'))
freq_dist_df
#階級値
class_value = [(i+(i+10))//2 for i in range(0, 100, 10)]
class_value
#相対度数
rel_freq = freq / freq.sum()
rel_freq
#累積相対度数
cum_rel_freq = np.cumsum(rel_freq)
cum_rel_freq
#データフレームに階級値、相対度数、累積相対度数をセット
freq_dist_df['階級値'] = class_value
freq_dist_df['相対度数'] = rel_freq
freq_dist_df['累積相対度数'] = cum_rel_freq
freq_dist_df = freq_dist_df[['階級値','度数','相対度数','累積相対度数']]
freq_dist_df
#階級値の最頻値
freq_dist_df.loc[freq_dist_df['度数'].idxmax(),'階級値']
# +
import matplotlib.pyplot as plt
#グラフがnotebook上に表示されるようにする
# %matplotlib inline
#キャンパス
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
#階級数10、最小値0、最大値100
freq, _, _ = ax.hist(english_scores, bins=10, range=(0, 100))
#X軸にラベルを付ける 日本語文字化けしとるやないか
#ax.set_xlabel('点数')
ax.set_xlabel('Points')
#Y軸にラベルを付ける
ax.set_ylabel('Counts')
#X軸に10単位の目盛りを記載
#numpy.linspace(start, stop, element, endpoint=True)
ax.set_xticks(np.linspace(0, 100, 11))
#Y軸に1単位の目盛りを記載
#numpy.arange(stop)
#0 ≦ n < stop
ax.set_yticks(np.arange(0, freq.max()+1))
#描画
plt.show()
# +
#階級数を25、階級幅を4にしてみる
#キャンパス
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
#階級数25、最小値0、最大値100
freq, _, _ = ax.hist(english_scores, bins=25, range=(0, 100))
#X軸、Y軸のラベル
ax.set_xlabel('Points')
ax.set_ylabel('Counts')
#X軸, Y軸の目盛り
ax.set_xticks(np.linspace(0, 100, 26))
ax.set_yticks(np.arange(0, freq.max()+1))
#描画
plt.show()
# -
freq
# +
#相対度数のヒストグラムと累積相対度数の曲線を重ねて描画する
#キャンパス
fig = plt.figure(figsize=(10, 6))
ax_hist = fig.add_subplot(111)
ax_freq = ax_hist.twinx()
#相対度数の重みづけ
weights =np.ones_like(english_scores) / len(english_scores)
#度数のヒストグラム
rel_freq, _, _ = ax_hist.hist(english_scores, bins=25, range=(0, 100), weights=weights)
#相対度数のヒストグラム
cum_rel_freq = np.cumsum(rel_freq)
#階級値
class_value = [(i+(i+4))//2 for i in range(0, 100, 4)]
#累積相対度数のグラフ設定
ax_freq.plot(class_value, cum_rel_freq, ls='--', marker='o', color='gray')
#X軸、Y軸ラベルをそれぞれセット
ax_hist.set_xlabel('Points')
ax_hist.set_ylabel('Freq')
ax_freq.set_ylabel('SumFreq')
#X軸の目盛りをセット
ax_hist.set_xticks(np.linspace(0, 100, 26))
plt.show()
# -
weights
rel_freq
cum_rel_freq
#箱ひげ図
fig = plt.figure(figsize=(5, 6))
ax = fig.add_subplot(111)
ax.boxplot(english_scores, labels=['English'])
plt.show()
# +
from plot_util import plot_var_interact, plot_std_interact
#分散
plot_var_interact(scores[:4])
# -
#標準偏差
plot_std_interact(scores)
| chapter02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc="true"
# # Table of Contents
# <p><div class="lev1 toc-item"><a href="#Datasets" data-toc-modified-id="Datasets-1"><span class="toc-item-num">1 </span>Datasets</a></div><div class="lev2 toc-item"><a href="#Burned-areas" data-toc-modified-id="Burned-areas-11"><span class="toc-item-num">1.1 </span>Burned areas</a></div><div class="lev2 toc-item"><a href="#Glad" data-toc-modified-id="Glad-12"><span class="toc-item-num">1.2 </span>Glad</a></div><div class="lev2 toc-item"><a href="#LWP,-Loss,-Gain" data-toc-modified-id="LWP,-Loss,-Gain-13"><span class="toc-item-num">1.3 </span>LWP, Loss, Gain</a></div><div class="lev2 toc-item"><a href="#Land-cover-change" data-toc-modified-id="Land-cover-change-14"><span class="toc-item-num">1.4 </span>Land cover change</a></div><div class="lev2 toc-item"><a href="#Human-population" data-toc-modified-id="Human-population-15"><span class="toc-item-num">1.5 </span>Human population</a></div><div class="lev2 toc-item"><a href="#Chirps,-precitation" data-toc-modified-id="Chirps,-precitation-16"><span class="toc-item-num">1.6 </span>Chirps, precitation</a></div><div class="lev2 toc-item"><a href="#Protected-areas" data-toc-modified-id="Protected-areas-17"><span class="toc-item-num">1.7 </span>Protected areas</a></div><div class="lev2 toc-item"><a href="#Terrestrial-ecoregions-of-the-world" data-toc-modified-id="Terrestrial-ecoregions-of-the-world-18"><span class="toc-item-num">1.8 </span>Terrestrial ecoregions of the world</a></div>
# -
# # Datasets
# Datasets stored in GEE:
#
# | data | geeId | dId |
# |-------|--------|------|
# | Burned areas (modis) |`MODIS/006/MCD64A1`| `441427cf-12d4-4dca-b011-c0e392c9ea08`|
# |LWP, Loss, Gain | `users/Aliciaarenzana/natGeo/lossGainWP` |`29fdd99f-78c8-49ab-99ee-8e7268327b93` |
# |Land cover change | `users/adampain/ESACCI-LC-L4-LCCS-Map-300m-P1Y-1992_2015-v207` | `5ca98950-81d2-4c2f-b864-28d922ce1186` |
# |Human population | `CIESIN/GPWv4/population-density`|`a05738ec-dd6e-45b8-91a2-30e195a338f4`|
# |Chirps, precitation | `UCSB-CHG/CHIRPS/DAILY` |`774ef885-a9cb-4315-98c3-36a37f329b7e`|
#
# Datasets from RW-api:
#
# | data | dId |
# |-------|------|
# | Protected areas | `de452a4c-a55c-464d-9037-8c3e9fe48365`|
# | Terrestrial ecoregions of the world | `050f4146-566c-4a6d-9aaa-b49ab66a3090`|
#
# Datasets from RW-api:
#
# | data | dId |
# |-------|------|
# | Protected areas | `de452a4c-a55c-464d-9037-8c3e9fe48365`|
# | Terrestrial ecoregions of the world | `050f4146-566c-4a6d-9aaa-b49ab66a3090`|
#
#
# ## Burned areas
# layerId: `235076aa-64ab-422d-8f04-e027bad1b1c0`
#
#
# ```json
# {
# "application": ["ng"],
# "name": "Burned areas",
# "provider": "gee",
# "layerConfig": {
# "assetId": "MODIS/006/MCD64A1",
# "type": "gee",
# "position": "last",
# "isImageCollection": true,
# "body": {
# "sldValue": "<RasterSymbolizer><Opacity>1.0</Opacity><ChannelSelection><GrayChannel><SourceChannelName>1</SourceChannelName></GrayChannel></ChannelSelection><ColorMap type=\"ramp\" extended=\"false\" ><ColorMapEntry color=\"#4575b4\" quantity=\"0\" opacity=\"0\" /><ColorMapEntry color=\"#cf2317\" quantity=\"1\" opacity=\"1\" /><ColorMapEntry color=\"#cf2317\" quantity=\"366\" opacity=\"1\" /></ColorMap></RasterSymbolizer>",
# "styleType": "sld"
# }
# },
# "legendConfig": {
# "type": "basic",
# "items": [
# {
# "name": "Burned",
# "color": "#cf2317"
# }
# ]
# },
# "published": true,
# "default": true
# }
# ```
# ## Glad
# layerId:``
#
# ```json
# {
# "name": "GLAD Tree Cover Loss",
# "application": [
# "ng"
# ],
#
# "provider": "leaflet",
# "userId": "57a0aa1071e394dd32ffe137",
# "default": true,
# "protected": false,
# "published": true,
# "env": "production",
# "layerConfig": {
# "type": "tileLayer",
# "service": "leaflet",
# "url": "https://api.resourcewatch.org/v1/true-color-tiles/glad/{z}/{x}/{y}",
# "body": {
# "format": "image/png",
# "maxZoom": 13,
# "errorTileUrl": "https://storage.googleapis.com/earthenginepartners-hansen/tiles/gfc_v1.4/tree_alpha/6/37/29.png",
# "attribution": "2016 wri/google/Hansen",
# "transparent": true
# }
# },
# "legendConfig": {
# "type": "basic",
# "items": [
# {
# "name": "Loss (For the past 24 months)",
# "color": "#dc1e8c"
# }
# ]
# }
# ```
# ## LWP, Loss, Gain
# ### LWP
# layerId: `58982ad1-da91-45f9-a21f-6a75a1f46041`
#
# ```json
# {
# "application": ["ng"],
# "name": "Last Wild Places",
# "provider": "gee",
# "layerConfig": {
# "assetId": "users/adampain/LWPs_binary_WGS84",
# "type": "gee",
# "body": {
# "sldValue": "<RasterSymbolizer> \
# <Opacity>1.0</Opacity> \
# <ChannelSelection> \
# <GrayChannel> \
# <SourceChannelName>1</SourceChannelName> \
# </GrayChannel> \
# </ChannelSelection> \
# <ColorMap type=\"ramp\" extended=\"false\" > \
# <ColorMapEntry color=\"#4575b4\" quantity=\"0\" opacity=\"0\" /> \
# <ColorMapEntry color=\"#07818d\" quantity=\"1\" opacity=\"1\" /> \
# </ColorMap> \
# </RasterSymbolizer>",
# "styleType": "sld"
# }
# },
# "legendConfig": {
# "type": "basic",
# "items": [
# {
# "name": "Last Wild Places",
# "color": "#07818d"
# }
# ]
# },
# "published": true,
# "default": true
# }
# ````
# ### Loss & gain (1 per year?)
# layerId: `ca46341c-f691-4794-8695-f4387c2783bc`
#
# ```json
# {
# "application": ["ng"],
# "name": "Loss & Gain",
# "provider": "gee",
# "layerConfig": {
# "assetId": "users/Aliciaarenzana/natGeo/lossGainWP",
# "type": "gee",
# "position": "last",
# "isImageCollection": true,
# "body": {
# "sldValue": "\
# <RasterSymbolizer> \
# <Opacity>1.0</Opacity> \
# <ChannelSelection> \
# <GrayChannel> \
# <SourceChannelName>3</SourceChannelName> \
# </GrayChannel> \
# </ChannelSelection> \
# <ColorMap type=\"ramp\" extended=\"false\" > \
# <ColorMapEntry color=\"#ffe069\" quantity=\"-1\" opacity=\"1\" /> \
# <ColorMapEntry color=\"#4575b4\" quantity=\"0\" opacity=\"0\" /> \
# <ColorMapEntry color=\"#07818d\" quantity=\"1\" opacity=\"1\" /> \
# </ColorMap> \
# </RasterSymbolizer>",
# "styleType": "sld"
# }
# },
# "legendConfig": {
# "type": "basic",
# "items": [
# {
# "name": "Loss",
# "color": "#ffe069"
# },
# {
# "name": "Gain",
# "color": "#07818d"
# }
# ]
# },
# "published": true,
# "default": true
# }
# ```
# ## Land cover change
# layerId: `68d39a12-ed18-4347-9c4b-f105aa4852b2`
#
#
# ```json
# {
# "application": ["ng"],
# "name": "Land cover Gain",
# "provider": "gee",
# "layerConfig": {
# "assetId": "users/adampain/ESACCI-LC-L4-LCCS-Map-300m-P1Y-1992_2015-v207",
# "type": "gee",
# "body": {
# "sldValue": "<RasterSymbolizer> <Opacity>1.0</Opacity> <ChannelSelection> <GrayChannel> <SourceChannelName>24</SourceChannelName> </GrayChannel> </ChannelSelection> <ColorMap type=\"intervals\" extended=\"false\" > <ColorMapEntry color=\"#4575b4\" quantity=\"0\" opacity=\"0\" /> <ColorMapEntry color=\"#A0FFD3\" quantity=\"50\" opacity=\"1\" /> <ColorMapEntry color=\"#00E5BF\" quantity=\"100\" opacity=\"1\" /> <ColorMapEntry color=\"#00B996\" quantity=\"130\" opacity=\"1\" /> <ColorMapEntry color=\"#005C4A\" quantity=\"140\" opacity=\"1\" /> <ColorMapEntry color=\"#008368\" quantity=\"160\" opacity=\"1\" /> <ColorMapEntry color=\"#005C4A\" quantity=\"181\" opacity=\"0\" /> </ColorMap> </RasterSymbolizer>",
# "styleType": "sld"
# }
# },
# "legendConfig": {
# "type": "basic",
# "items": [
# {
# "name": "Forest",
# "color": "#A0FFD3"
# },
# {
# "name": "Shrubland",
# "color": "#00E5BF"
# },
# {
# "name": "Grassland",
# "color": "#00B996"
# },
# {
# "name": "Wetland",
# "color": "#008368"
# },
# {
# "name": "Sparse vegetation",
# "color": "#005C4A"
# },
# ]
# },
# "published": true,
# "default": true
# }
# }
# ```
# layerId: `56136020-d966-4e30-bc3b-9b8f1ef13d61`
#
#
# ```json
# {
# "application": ["ng"],
# "name": "Land cover Loss",
# "provider": "gee",
# "layerConfig": {
# "assetId": "users/adampain/ESACCI-LC-L4-LCCS-Map-300m-P1Y-1992_2015-v207",
# "type": "gee",
# "body": {
# "sldValue": "<RasterSymbolizer> <Opacity>1.0</Opacity> <ChannelSelection> <GrayChannel> <SourceChannelName>24</SourceChannelName> </GrayChannel> </ChannelSelection> <ColorMap type=\"intervals\" extended=\"false\" > <ColorMapEntry color=\"#4575b4\" quantity=\"0\" opacity=\"0\" /> <ColorMapEntry color=\"#C40610\" quantity=\"10\" opacity=\"1\" /> <ColorMapEntry color=\"#FB001B\" quantity=\"41\" opacity=\"0\" /> <ColorMapEntry color=\"#FB001B\" quantity=\"180\" opacity=\"0\" /> <ColorMapEntry color=\"#FB001B\" quantity=\"190\" opacity=\"1\" /> <ColorMapEntry color=\"#FF5D6E\" quantity=\"200\" opacity=\"1\" /> <ColorMapEntry color=\"#75aaff\" quantity=\"210\" opacity=\"0\" /> </ColorMap> </RasterSymbolizer>",
# "styleType": "sld"
# }
# },
# "legendConfig": {
# "type": "basic",
# "items": [
# {
# "name": "Agriculture",
# "color": "#C40610"
# },
# {
# "name": "Urban areas",
# "color": "#FB001B"
# },
# {
# "name": "Bare",
# "color": "#FF5D6E"
# }
# ]
# },
# "published": true,
# "default": true
# }
# ```
# ## Human population
# layerId: `f2c4f0c3-afe5-43ca-ab97-e09abcc96d60`
#
#
# ```json
# {
# "application": ["ng"],
# "name": "Population Density",
# "provider": "gee",
# "layerConfig": {
# "assetId": "CIESIN/GPWv4/population-density/2020",
# "type": "gee",
# "body": {
# "sldValue": "<RasterSymbolizer> <Opacity>1.0</Opacity> <ChannelSelection> <GrayChannel> <SourceChannelName>1</SourceChannelName> </GrayChannel> </ChannelSelection> <ColorMap type=\"ramp\" extended=\"false\" > <ColorMapEntry color=\"#feebee\" quantity=\"0\" opacity=\"0\" /> <ColorMapEntry color=\"#fdd49e\" quantity=\"1\" opacity=\"1\" /> <ColorMapEntry color=\"#f768a1\" quantity=\"10\" opacity=\"1\" /> <ColorMapEntry color=\"#dd3497\" quantity=\"50\" opacity=\"1\" /> <ColorMapEntry color=\"#ae017e\" quantity=\"100\" opacity=\"1\" /> </ColorMap> </RasterSymbolizer>",
# "styleType": "sld"
# }
# },
# "legendConfig": {
# "type": "choropleth",
# "items": [
# {
# "name": "1",
# "color": "#fdd49e"
# },
# {
# "name": "10",
# "color": "#f768a1"
# },
# {
# "name": "100",
# "color": "#dd3497"
# },
# {
# "name": "1000",
# "color": "#FF8C00"
# }
# ]
# },
# "published": true,
# "default": true
# }
# ```
# ## Chirps, precitation
# layerId: `76be44f4-7234-4fc0-921f-6a04a73ad0ba`
#
# ```json
# {
# "application": ["ng"],
# "name": "Precipitation in the last 24H (Chirps)",
# "provider": "gee",
# "layerConfig": {
# "assetId": "UCSB-CHG/CHIRPS/DAILY",
# "type": "gee",
# "position": "last",
# "isImageCollection": true,
# "body": {
# "sldValue": "<RasterSymbolizer><Opacity>1.0</Opacity><ChannelSelection><GrayChannel><SourceChannelName>1</SourceChannelName></GrayChannel></ChannelSelection><ColorMap type=\"ramp\" extended=\"false\" ><ColorMapEntry color=\"#fef0d9\" quantity=\"0\" opacity=\"0\" /><ColorMapEntry color=\"#fef0d9\" quantity=\"1\" opacity=\"1\" /><ColorMapEntry color=\"#a8ddb5\" quantity=\"10\" opacity=\"1\" /><ColorMapEntry color=\"#2b8cbe\" quantity=\"20\" opacity=\"1\" /><ColorMapEntry color=\"#08589e\" quantity=\"50\" opacity=\"1\" /></ColorMap></RasterSymbolizer>",
# "styleType": "sld"
# }
# },
# "legendConfig": {
# "type": "choropleth",
# "items": [
# {
# "name": "1",
# "color": "#fef0d9"
# },
# {
# "name": "10",
# "color": "#a8ddb5"
# },
# {
# "name": "20",
# "color": "#2b8cbe"
# },
# {
# "name": "50",
# "color": "#08589e"
# }
# ]
# },
# "published": true,
# "default": true
# }
# ```
# ## Protected areas
# layerId: `81558df8-3c86-4f18-934f-b4907d99bdb3`
#
# ```json
# {
# "name": "Marine and Terrestrial Protected Areas",
# "description": "Legally protected areas, according to various designations (e.g., national parks, state reserves, and wildlife reserves), which are managed to achieve conservation objectives. Updated monthly.",
# "application": ["ng"],
# "provider": "cartodb",
# "default": true,
# "published": true,
# "env": "production",
# "layerConfig": {
# "account": "wri-01",
# "body": {
# "maxzoom": 18,
# "minzoom": 3,
# "layers": [
# {
# "type": "cartodb",
# "options": {
# "sql": "SELECT * FROM wdpa_protected_areas",
# "cartocss": "#wdpa_protected_areas { polygon-opacity: 0.5; polygon-fill: #0079B0; polygon-gamma-method: power; line-color:#0079B0; line-opacity:1; line-width:1}",
# "cartocss_version": "2.3.0"
# }
# }
# ]
# }
# },
# "legendConfig": {
# "type": "basic",
# "items": [
# {
# "name": "Protected areas",
# "color": "#0079B0"
# }
# ]
# },
# "interactionConfig": {
# "output": [
# {
# "column": "name",
# "format": null,
# "prefix": "",
# "property": "Name",
# "suffix": "",
# "type": "string"
# },
# {
# "column": "sub_loc",
# "format": null,
# "prefix": "",
# "property": "Sub-Location",
# "suffix": "",
# "type": "string"
# },
# {
# "column": "status",
# "format": null,
# "prefix": "",
# "property": "Status",
# "suffix": "",
# "type": "string"
# },
# {
# "column": "desig_type",
# "format": null,
# "prefix": "",
# "property": "Designation Type",
# "suffix": "",
# "type": "string"
# },
# {
# "column": "own_type",
# "format": null,
# "prefix": "",
# "property": "Ownership Type",
# "suffix": "",
# "type": "string"
# },
# {
# "column": "gov_type",
# "format": null,
# "prefix": "",
# "property": "Governance Type",
# "suffix": "",
# "type": "string"
# },
# {
# "column": "mang_auth",
# "format": null,
# "prefix": "",
# "property": "Management Authority",
# "suffix": "",
# "type": "string"
# }
# ]
# }
# }
# ```
# ## Terrestrial ecoregions of the world
# layerId: `9db2b973-25d4-401a-ac17-bf853dc9b8c7`
#
# ```json
# {
# "name": "Terrestrial Ecoregions",
# "description": "A classification of terrestrial land into 14 major ecoregions or habitat types from 2008.",
# "application": [
# "ng"
# ],
# "provider": "cartodb",
# "userId": "5980838ae24e6a1dae3dd446",
# "default": true,
# "protected": false,
# "published": true,
# "env": "production",
# "layerConfig": {
# "body": {
# "layers": [
# {
# "options": {
# "cartocss_version": "2.3.0",
# "cartocss": "#bio_021_terrestrial_ecoregions {polygon-opacity:1;} #bio_021_terrestrial_ecoregions{[wwf_mhtnam='Boreal Forests/Taiga']{polygon-fill: #016460;} [wwf_mhtnam='Deserts and Xeric Shrublands']{polygon-fill:#ffffcc;} [wwf_mhtnam='Flooded Grasslands and Savannas']{polygon-fill:#9ecae1;} [wwf_mhtnam='Inland Water']{polygon-fill:#084594;} [wwf_mhtnam='Mangroves']{polygon-fill:#cb181d;} [wwf_mhtnam='Mediterranean Forests, Woodlands and Scrub']{polygon-fill:#fcbba1} [wwf_mhtnam='Montane Grasslands and Shrublands']{polygon-fill:#808000;} [wwf_mhtnam='Rock and Ice']{polygon-fill:#d9d9d9;} [wwf_mhtnam='Temperate Broadleaf and Mixed Forests']{polygon-fill:#238b45;} [wwf_mhtnam='Temperate Conifer Forests']{polygon-fill:#66c2a5;} [wwf_mhtnam='Temperate Grasslands, Savannas and Shrublands']{polygon-fill:#F5DEB3;} [wwf_mhtnam='Tropical and Subtropical Coniferous Forests']{polygon-fill:#40E0D0;} [wwf_mhtnam='Tropical and Subtropical Dry Broadleaf Forests']{polygon-fill:#679267;} [wwf_mhtnam='Tropical and Subtropical Grasslands, Savannas and Shrublands']{polygon-fill:#addd8e;} [wwf_mhtnam='Tropical and Subtropical Moist Broadleaf Forests']{polygon-fill:#005a32;} [wwf_mhtnam='Tundra']{polygon-fill:#a6bddb;}}",
# "sql": "SELECT * FROM bio_021_terrestrial_ecoregions",
# "interactivity": [
# "cartodb_id",
# "wwf_mhtnam",
# "eco_name",
# "wwf_realm2"
# ]
# },
# "type": "mapnik"
# }
# ],
# "minzoom": 3,
# "maxzoom": 18
# },
# "account": "wri-rw"
# },
# "legendConfig": {
# "items": [
# {
# "color": "#016460",
# "name": "Boreal Forests/Taiga"
# },
# {
# "color": "#ffffcc",
# "name": "Deserts and Xeric Shrublands"
# },
# {
# "color": "#9ecae1",
# "name": "Flooded Grasslands and Savannas"
# },
# {
# "color": "#084594",
# "name": "Inland Water"
# },
# {
# "color": "#cb181d",
# "name": "Mangroves"
# },
# {
# "color": "#fcbba1",
# "name": "Mediterranean Forests, Woodlands and Scrub"
# },
# {
# "color": "#808000",
# "name": "Montane Grasslands and Shrublands"
# },
# {
# "color": "#d9d9d9",
# "name": "Rock and Ice"
# },
# {
# "color": "#238b45",
# "name": "Temperate Broadleaf and Mixed Forests"
# },
# {
# "color": "#66c2a5",
# "name": "Temperate Conifer Forests"
# },
# {
# "color": "#F5DEB3",
# "name": "Temperate Grasslands, Savannas and Shrublands"
# },
# {
# "color": "#40E0D0",
# "name": "Tropical and Subtropical Coniferous Forests"
# },
# {
# "color": "#679267",
# "name": "Tropical and Subtropical Dry Broadleaf Forests"
# },
# {
# "color": "#addd8e",
# "name": "Tropical and Subtropical Grasslands, Savannas and Shrublands"
# },
# {
# "color": "#005a32",
# "name": "Tropical and Subtropical Moist Broadleaf Forests"
# },
# {
# "color": "#a6bddb",
# "name": "Tundra"
# }
# ],
# "type": "basic"
# },
# "interactionConfig": {
# "type": "gridjson",
# "config": {},
# "output": [
# {
# "column": "wwf_realm2",
# "property": "Biogeographical Realm",
# "prefix": "",
# "suffix": "",
# "type": "string",
# "format": null
# },
# {
# "column": "wwf_mhtnam",
# "property": "Biome",
# "prefix": "",
# "suffix": "",
# "type": "string",
# "format": null
# },
# {
# "column": "eco_name",
# "property": "Ecoregion",
# "prefix": "",
# "suffix": "",
# "type": "string",
# "format": null
# }
# ]
# }
# }
# ```
# +
import numpy as np
import matplotlib.pyplot as plt
t = np.arange(-90, 90, 1)
s = 50*np.tan((2)*(2*np.pi*(t)/360))
line, = plt.plot(s, t, lw=2)
# -
| natGeo/Dataset-layer-management.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Kubeflow Pipelines e2e mnist example
#
# In this notebook you will create e2e mnist Kubeflow Pipeline to perfom:
# - Hyperparameter tuning using Katib
# - Distributive training with the best hyperparameters using TFJob
# - Serve the trained model using KFServing
#
# Reference documentation:
#
# - https://www.kubeflow.org/docs/components/training/tftraining/
# - https://www.kubeflow.org/docs/components/katib/
# - https://www.kubeflow.org/docs/components/kfserving/
#
# **Note**: This Pipeline runs in the multi-user mode. Follow [this guide](https://github.com/kubeflow/katib/tree/master/examples/v1beta1/kubeflow-pipelines#multi-user-pipelines-setup) to give your Notebook access to Kubeflow Pipelines.
# Install required packages (Kubeflow Pipelines and Katib SDK).
# !pip install kfp==1.8.12
# !pip install kubeflow-katib==0.13.0
# +
import kfp
import kfp.dsl as dsl
from kfp import components
from kubeflow.katib import ApiClient
from kubeflow.katib import V1beta1ExperimentSpec
from kubeflow.katib import V1beta1AlgorithmSpec
from kubeflow.katib import V1beta1ObjectiveSpec
from kubeflow.katib import V1beta1ParameterSpec
from kubeflow.katib import V1beta1FeasibleSpace
from kubeflow.katib import V1beta1TrialTemplate
from kubeflow.katib import V1beta1TrialParameterSpec
# -
# ## Define the Pipelines tasks
#
# To run this Pipeline, you should define:
# 1. Katib hyperparameter tuning
# 2. TFJob training
# 3. KFServing inference
#
#
# ### Step 1. Katib hyperparameter tuning task
#
# Create the Kubeflow Pipelines task for the Katib hyperparameter tuning. This Experiment uses "random" algorithm and TFJob for the Trial's worker.
#
# The Katib Experiment is similar to this example: https://github.com/kubeflow/katib/blob/master/examples/v1beta1/kubeflow-training-operator/tfjob-mnist-with-summaries.yaml.
# You should define the Experiment name, namespace and number of training steps in the arguments.
def create_katib_experiment_task(experiment_name, experiment_namespace, training_steps):
# Trial count specification.
max_trial_count = 5
max_failed_trial_count = 3
parallel_trial_count = 2
# Objective specification.
objective = V1beta1ObjectiveSpec(
type="minimize",
goal=0.001,
objective_metric_name="loss"
)
# Algorithm specification.
algorithm = V1beta1AlgorithmSpec(
algorithm_name="random",
)
# Experiment search space.
# In this example we tune learning rate and batch size.
parameters = [
V1beta1ParameterSpec(
name="learning_rate",
parameter_type="double",
feasible_space=V1beta1FeasibleSpace(
min="0.01",
max="0.05"
),
),
V1beta1ParameterSpec(
name="batch_size",
parameter_type="int",
feasible_space=V1beta1FeasibleSpace(
min="80",
max="100"
),
)
]
# Experiment Trial template.
# TODO (andreyvelich): Use community image for the mnist example.
trial_spec = {
"apiVersion": "kubeflow.org/v1",
"kind": "TFJob",
"spec": {
"tfReplicaSpecs": {
"Chief": {
"replicas": 1,
"restartPolicy": "OnFailure",
"template": {
"metadata": {
"annotations": {
"sidecar.istio.io/inject": "false"
}
},
"spec": {
"containers": [
{
"name": "tensorflow",
"image": "docker.io/liuhougangxa/tf-estimator-mnist",
"command": [
"python",
"/opt/model.py",
"--tf-train-steps=" + str(training_steps),
"--tf-learning-rate=${trialParameters.learningRate}",
"--tf-batch-size=${trialParameters.batchSize}"
]
}
]
}
}
},
"Worker": {
"replicas": 1,
"restartPolicy": "OnFailure",
"template": {
"metadata": {
"annotations": {
"sidecar.istio.io/inject": "false"
}
},
"spec": {
"containers": [
{
"name": "tensorflow",
"image": "docker.io/liuhougangxa/tf-estimator-mnist",
"command": [
"python",
"/opt/model.py",
"--tf-train-steps=" + str(training_steps),
"--tf-learning-rate=${trialParameters.learningRate}",
"--tf-batch-size=${trialParameters.batchSize}"
]
}
]
}
}
}
}
}
}
# Configure parameters for the Trial template.
trial_template = V1beta1TrialTemplate(
primary_container_name="tensorflow",
trial_parameters=[
V1beta1TrialParameterSpec(
name="learningRate",
description="Learning rate for the training model",
reference="learning_rate"
),
V1beta1TrialParameterSpec(
name="batchSize",
description="Batch size for the model",
reference="batch_size"
),
],
trial_spec=trial_spec
)
# Create an Experiment from the above parameters.
experiment_spec = V1beta1ExperimentSpec(
max_trial_count=max_trial_count,
max_failed_trial_count=max_failed_trial_count,
parallel_trial_count=parallel_trial_count,
objective=objective,
algorithm=algorithm,
parameters=parameters,
trial_template=trial_template
)
# Create the KFP task for the Katib Experiment.
# Experiment Spec should be serialized to a valid Kubernetes object.
katib_experiment_launcher_op = components.load_component_from_url(
"https://raw.githubusercontent.com/kubeflow/pipelines/master/components/kubeflow/katib-launcher/component.yaml")
op = katib_experiment_launcher_op(
experiment_name=experiment_name,
experiment_namespace=experiment_namespace,
experiment_spec=ApiClient().sanitize_for_serialization(experiment_spec),
experiment_timeout_minutes=60,
delete_finished_experiment=False)
return op
# ### Step 2. TFJob training task
#
# Create the Kubeflow Pipelines task for the TFJob training. In this example TFJob runs the Chief and Worker with 1 replica.
#
# Learn more about TFJob replica specifications in the Kubeflow docs: https://www.kubeflow.org/docs/components/training/tftraining/#what-is-tfjob.
# This function converts Katib Experiment HP results to args.
def convert_katib_results(katib_results) -> str:
import json
import pprint
katib_results_json = json.loads(katib_results)
print("Katib results:")
pprint.pprint(katib_results_json)
best_hps = []
for pa in katib_results_json["currentOptimalTrial"]["parameterAssignments"]:
if pa["name"] == "learning_rate":
best_hps.append("--tf-learning-rate=" + pa["value"])
elif pa["name"] == "batch_size":
best_hps.append("--tf-batch-size=" + pa["value"])
print("Best Hyperparameters: {}".format(best_hps))
return " ".join(best_hps)
# You should define the TFJob name, namespace, number of training steps, output of Katib and model volume tasks in the arguments.
def create_tfjob_task(tfjob_name, tfjob_namespace, training_steps, katib_op, model_volume_op):
import json
# Get parameters from the Katib Experiment.
# Parameters are in the format "--tf-learning-rate=0.01 --tf-batch-size=100"
convert_katib_results_op = components.func_to_container_op(convert_katib_results)
best_hp_op = convert_katib_results_op(katib_op.output)
best_hps = str(best_hp_op.output)
# Create the TFJob Chief and Worker specification with the best Hyperparameters.
# TODO (andreyvelich): Use community image for the mnist example.
tfjob_chief_spec = {
"replicas": 1,
"restartPolicy": "OnFailure",
"template": {
"metadata": {
"annotations": {
"sidecar.istio.io/inject": "false"
}
},
"spec": {
"containers": [
{
"name": "tensorflow",
"image": "docker.io/liuhougangxa/tf-estimator-mnist",
"command": [
"sh",
"-c"
],
"args": [
"python /opt/model.py --tf-export-dir=/mnt/export --tf-train-steps={} {}".format(training_steps, best_hps)
],
"volumeMounts": [
{
"mountPath": "/mnt/export",
"name": "model-volume"
}
]
}
],
"volumes": [
{
"name": "model-volume",
"persistentVolumeClaim": {
"claimName": str(model_volume_op.outputs["name"])
}
}
]
}
}
}
tfjob_worker_spec = {
"replicas": 1,
"restartPolicy": "OnFailure",
"template": {
"metadata": {
"annotations": {
"sidecar.istio.io/inject": "false"
}
},
"spec": {
"containers": [
{
"name": "tensorflow",
"image": "docker.io/liuhougangxa/tf-estimator-mnist",
"command": [
"sh",
"-c",
],
"args": [
"python /opt/model.py --tf-export-dir=/mnt/export --tf-train-steps={} {}".format(training_steps, best_hps)
],
}
],
}
}
}
# Create the KFP task for the TFJob.
tfjob_launcher_op = components.load_component_from_url(
"https://raw.githubusercontent.com/kubeflow/pipelines/master/components/kubeflow/launcher/component.yaml")
op = tfjob_launcher_op(
name=tfjob_name,
namespace=tfjob_namespace,
chief_spec=json.dumps(tfjob_chief_spec),
worker_spec=json.dumps(tfjob_worker_spec),
tfjob_timeout_minutes=60,
delete_finished_tfjob=False)
return op
# ### Step 3. KFServing inference
#
# Create the Kubeflow Pipelines task for the KFServing inference.
# You should define the model name, namespace, output of the TFJob and model volume tasks in the arguments.
def create_kfserving_task(model_name, model_namespace, tfjob_op, model_volume_op):
inference_service = '''
apiVersion: "serving.kubeflow.org/v1beta1"
kind: "InferenceService"
metadata:
name: {}
namespace: {}
annotations:
"sidecar.istio.io/inject": "false"
spec:
predictor:
tensorflow:
storageUri: "pvc://{}/"
'''.format(model_name, model_namespace, str(model_volume_op.outputs["name"]))
kfserving_launcher_op = components.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/master/components/kubeflow/kfserving/component.yaml')
kfserving_launcher_op(action="create", inferenceservice_yaml=inference_service).after(tfjob_op)
# ## Run the Kubeflow Pipeline
#
# You should create the Kubeflow Pipeline from the above tasks.
# +
name="mnist-e2e"
namespace="kubeflow-user-example-com"
training_steps="200"
@dsl.pipeline(
name="End to End Pipeline",
description="An end to end mnist example including hyperparameter tuning, train and inference"
)
def mnist_pipeline(name=name, namespace=namespace, training_steps=training_steps):
# Run the hyperparameter tuning with Katib.
katib_op = create_katib_experiment_task(name, namespace, training_steps)
# Create volume to train and serve the model.
model_volume_op = dsl.VolumeOp(
name="model-volume",
resource_name="model-volume",
size="1Gi",
modes=dsl.VOLUME_MODE_RWO
)
# Run the distributive training with TFJob.
tfjob_op = create_tfjob_task(name, namespace, training_steps, katib_op, model_volume_op)
# Create the KFServing inference.
create_kfserving_task(name, namespace, tfjob_op, model_volume_op)
# Run the Kubeflow Pipeline in the user's namespace.
kfp_client=kfp.Client()
run_id = kfp_client.create_run_from_pipeline_func(mnist_pipeline, namespace=namespace, arguments={}).run_id
print("Run ID: ", run_id)
# -
# The finished Pipeline should look as follows.
# ## Predict from the trained model
#
# Once Kubeflow Pipeline is finished, you are able to call the API endpoint with [mnist image](https://raw.githubusercontent.com/kubeflow/katib/master/examples/v1beta1/kubeflow-pipelines/images/9.bmp) to predict from the trained model.
#
# **Note**: If you are using Kubeflow + Dex setup and runing this Notebook outside of your Kubernetes cluster, follow [this guide](https://github.com/kubeflow/kfserving/tree/master/docs/samples/istio-dex#authentication) to get Session ID for the API requests.
# +
import numpy as np
from PIL import Image
import requests
# Pipeline Run should be succeeded.
kfp_run = kfp_client.get_run(run_id=run_id)
if kfp_run.run.status == "Succeeded":
print("Run {} has been Succeeded\n".format(run_id))
# Specify the image URL here.
image_url = "https://raw.githubusercontent.com/kubeflow/katib/master/examples/v1beta1/kubeflow-pipelines/images/9.bmp"
image = Image.open(requests.get(image_url, stream=True).raw)
data = np.array(image.convert('L').resize((28, 28))).astype(np.float).reshape(-1, 28, 28, 1)
data_formatted = np.array2string(data, separator=",", formatter={"float": lambda x: "%.1f" % x})
json_request = '{{ "instances" : {} }}'.format(data_formatted)
# Specify the prediction URL. If you are runing this notebook outside of Kubernetes cluster, you should set the Cluster IP.
url = "http://{}-predictor-default.{}.svc.cluster.local/v1/models/{}:predict".format(name, namespace, name)
response = requests.post(url, data=json_request)
print("Prediction for the image")
display(image)
print(response.json())
# -
| examples/v1beta1/kubeflow-pipelines/kubeflow-e2e-mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/suminarwb/word2vec_thesis/blob/main/Word2Vec.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="6AspNlpItD9q"
# # Word2Vec
# + id="Mo0SCfd6hyFv" colab={"base_uri": "https://localhost:8080/"} outputId="1fb2559d-b9e0-4a76-8435-6a5c6d44626a"
import os
import pandas as pd
from tqdm.auto import tqdm
import nltk
nltk.download('punkt')
from nltk.tokenize import word_tokenize
from gensim.models import Word2Vec
# + [markdown] id="q8EfmCxsmcco"
# Prepare Corpus
# + colab={"base_uri": "https://localhost:8080/", "height": 649} id="D-KaqCq9mgIG" outputId="964d66b6-49a9-471a-ed39-9cf2a2f3cb4c"
df = pd.read_csv("re_dataset.csv", encoding='Windows-1252')
df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["8cc38e1b6a504a4babd93ec03e36c6f0", "1f96d866f3f84e8ebf8afd575832098a", "<KEY>", "21db68caf22c454d90ded88109fd9a1e", "32da77597dab44168ea1e8fa96973840", "63648ea499c94c0695481d07a8cb5c84", "<KEY>", "84e6f6462c2d480580c6ae4df5aa4a54"]} id="LjtgnpgOuU-t" outputId="05cd9607-8501-471c-ee66-95ebcf21139c"
sentences = [word_tokenize(Tweet.lower()) for Tweet in tqdm(df.Tweet)]
sentences[:5]
# + [markdown] id="QGzw99dYySwb"
# Training
# + id="nv7Wvwj3yQze"
model = Word2Vec(sentences, size=100, window=3, min_count=3, sg=0, workers=4, iter=1000)
# + id="mEThS60Fzwxt"
os.makedirs("model/w2v/", exist_ok=True)
model.save("model/w2v/contoh_model.w2v")
# + [markdown] id="EQ2geL_a1MvB"
# Load Model
# + id="ISa2fgPL1FhZ"
model = Word2Vec.load("model/w2v/contoh_model.w2v")
# + [markdown] id="V-iNvrRp1Pxg"
# Continue training
# + colab={"base_uri": "https://localhost:8080/"} id="4EGtc7xZ1fiB" outputId="5e7c9df6-bf59-443e-eb01-edcefac17a44"
"Kaum cebong kapir udah keliatan dongoknya dari awal tambah dongok lagi hahahah".lower().split()
# + id="6WDPbClm1SBh"
contoh_data = [
['deklarasi',
'pilkada',
'2018',
'aman',
'dan',
'anti',
'hoax',
'warga',
'dukuh',
'sari',
'jabon'],
['kaum',
'cebong',
'kapir',
'udah',
'keliatan',
'dongoknya',
'dari',
'awal',
'tambah',
'dongok',
'lagi',
'hahahah']
]
# + colab={"base_uri": "https://localhost:8080/"} id="x9AFGQR12ZhA" outputId="dffa919f-baa6-4331-eaf1-488eee737264"
model.train(contoh_data, total_examples=len(contoh_data), epochs=1)
# + id="2fsIf6qD24JA"
model.save("model/w2v/contoh_model.w2v")
# + colab={"base_uri": "https://localhost:8080/"} id="vZqUiTKR3nv6" outputId="5002ee72-d1b3-4e88-906a-5268f62a7f0d"
w2v = model.wv
w2v.index2word
# + colab={"base_uri": "https://localhost:8080/"} id="LPeh4vK14FrD" outputId="aaa8195d-e994-4b81-e545-3dee0d16c4f9"
w2v.vectors
# + colab={"base_uri": "https://localhost:8080/"} id="Eh-Q7FEr4PuE" outputId="6d494959-8f09-478a-b801-8d5a3f1c71e1"
w2v.vector_size
# + colab={"base_uri": "https://localhost:8080/"} id="gb1qzAEY4S2E" outputId="3e06e527-5509-482c-f358-7dfbdde9a559"
w2v["goblok"]
# + [markdown] id="6Mqcq7cz5Itt"
# Similiar word
# + colab={"base_uri": "https://localhost:8080/"} id="qNkQlL4B5MAi" outputId="ac73b10e-c6d1-43a7-90f9-3188835671ae"
w2v.similar_by_word("jokowi", topn=10)
# + [markdown] id="r_R5rvfk5VxR"
# higher order visualization
# + id="EeyeC7AH5Yaz"
from umap import UMAP
import numpy as np
import pandas as pd
import plotly.express as px
# + id="H0WNiT6p6wWD"
X = UMAP().fit_transform(w2v.vectors)
# + id="UKWyUAM37Z01"
df = pd.DataFrame(X, columns=["umap1", "umap2"])
df["text"] = w2v.index2word
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="C-Isyx6S8A-S" outputId="0245498c-4296-4170-cecb-5468a6d3b25e"
df
# + colab={"base_uri": "https://localhost:8080/", "height": 817} id="TJucJkKO7jeR" outputId="af60a299-1b8b-45e3-d2ca-594ce279174c"
fig = px.scatter(df, x="umap1", y="umap2", text="text")
fig.update_traces(textposition='top center')
fig.update_layout(
height=800,
title_text='Reduced word2vec visualization'
)
fig.show()
| Word2Vec.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 04. The Scale-Free Property
# ## Contents
#
# * [04.01. Introduction](#0401-introduction)
# * [04.02. Power Laws and Scale-Free Networks](#0402-power-laws-and-scale-free-networks)
#
#
# +
# import
import numpy as np
import scipy
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import IPython
import networkx as nx
from IPython.display import Image
# %matplotlib inline
import datetime
dt = datetime.datetime.now()
print(
"""{}-{}-{} {}:{}:{}
----------------------
numpy\t\t{}
scipy\t\t{}
pandas\t\t{}
matplotlib\t{}
----------------------
ipython\t\t{}
----------------------
networkx\t{}"""
.format(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, np.__version__, scipy.__version__, pd.__version__, matplotlib.__version__, IPython.__version__, nx.__version__))
c = {'b': '#1f77b4',
'o': '#ff7f0e',
'g': '#2ca02c',
'r': '#d62728',
'v': '#9467bd',
'br': '#8c564b',
'p': '#e377c2',
'gr': '#7f7f7f',
'y': '#bcbd22',
'c': '#17becf'}
# -
# ## 04.01. Introduction
# ## 04.02. Power Laws and Scale-Free Networks
# +
N, p = 1000, .4
# G = nx.gnp_random_graph(N, p, seed=123)
# p_k_arr = nx.degree_histogram(G)
# p_ks = [p/N for p in p_k_arr]
# ks = range(len(p_k_arr))
fig, ax = plt.subplots(figsize=(5, 5))
ax.scatter(ks, p_ks, c=c['v'])
ax.set(xscale='log',
yscale='log',
xlabel=r'$k$',
ylabel=r'$p_k$',
xlim=(350, 450),
# ylim=(0, )
yticks=( 10**(np.linspace(-2, -1, 5)) )
)
ax.grid(True)
# ax.
plt.tight_layout()
plt.show()
# +
def plot(N, p, seedN):
G = nx.gnp_random_graph(N, p, seed=seedN)
p_k_arr = nx.degree_histogram(G)
p_ks = [p/N for p in p_k_arr]
ks = range(len(p_k_arr))
fig, ax = plt.subplots(ncols=2, figsize=(10, 5))
nx.draw_networkx(G, ax=ax[0], pos=nx.spring_layout(G), node_size=20, node_color=c[4], with_labels=False, alpha=.3)
ax[0].set(xticks=(), yticks=())
ax[1].scatter(ks, p_ks, c=c[4], alpha=.5)
ax[1].set(xlabel=r'$k$',
ylabel=r'$p_k$',
xlim=(0, N-1),
#ylim(0, .020)
)
#xscale='log', yscale='log')
ax[1].grid(True)
plt.tight_layout()
plt.show()
from ipywidgets import interactive
interactive_plot = interactive(plot, N=(1, 1000, 10), p=(0, 1, 0.1), seedN=(1, 999, 1))
output = interactive_plot.children[-1]
output.layout.height = '350px'
interactive_plot
# +
from scipy.special import zeta
xmax, ymax = 5, 10
x = np.linspace(0, xmax, 100)
zetas = zeta(x)
fig, ax = plt.subplots(figsize=(5,5))
ax.plot(x, zetas, c=c[4])
ax.text(xmax/2+1/2, ymax/2,
r'$\zeta(x) = \sum_{k=0}^\infty{\frac{1}{(k+1)^x}}$',
fontsize=15, ha='center', va='center',
bbox=dict(fc='white', ec='white', alpha=0.75))
# ax.legend(loc='best')
ax.set(title='Riemann-zeta function',
xlabel=r'$x$',
ylabel=r'$\zeta(x)$',
xlim=(0, xmax),
ylim=(0, ymax)
)
ax.grid(True)
plt.tight_layout()
# plt.savefig('fig/fig_Riemann-zeta.png')
plt.show()
# +
gamma = -2.1
def scale_free_pk(k, gamma):
return k**(gamma) /1200
ks = np.linspace(0.1, 1, 100)
ps = scale_free_pk(ks, gamma)
fig, ax = plt.subplots(figsize=(10, 5), ncols=2, nrows=1)
ax[0].plot(ks, ps, label=r'$p_k \sim k^{-2.1}$', c=c[4])
ax[0].set(xlabel=r'$k$',
ylabel=r'$p_k$'
)
ax[0].grid(True)
ax[0].legend(loc='best')
ax[1].plot(ks, ps, label=r'$p_k \sim k^{-2.1}$', c=c[4])
ax[1].set(xlabel=r'$k$',
ylabel=r'$p_k$',
xscale='log',
yscale='log'
)
ax[1].grid(True)
ax[1].legend(loc='best')
plt.tight_layout()
plt.show()
# +
def random_network(N):
return np.log(N)
def scale_free(N, gamma):
return N**(1/(gamma - 1))
N = 10 ** np.linspace(0, 6, 100)
gamma = 2.5
fig, ax = plt.subplots(figsize=(5,5))
ax.plot(N, random_network(N), label=('Random network'), c=c[2], linestyle='--')
ax.plot(N, scale_free(N, gamma), label=('Scale-free'), c=c[4])
ax.text(10**4, random_network(10**5.1), ('Random network: ' r'$k_{\max} \sim \ln{N}$'), ha='center', va='bottom', bbox=dict(fc='white', ec='none', alpha=.7))
ax.text(10**4, scale_free(10**5.3, gamma), ('Scale-free: ' r'$k_{\max} \sim N^{\frac{1}{\gamma - 1}}$'), ha='center', va='bottom', bbox=dict(fc='white', ec='none', alpha=.7))
ax.set(title='Scale-free vs. Random network',
xscale='log',
yscale='log',
xlabel=r'$N$',
ylabel=r'$k_{max}$',
xlim=(1, 10**6),
ylim=(0, 10**4)
)
ax.grid(True)
# ax.legend('best')
plt.tight_layout()
# plt.savefig('fig/fig0405.png')
plt.show()
# -
| NetworkScience/04/code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # "Analyzing Netflix Data"
# > "DataCamp Project: Analyzing Netflix Data"
#
# - toc: true
# - branch: master
# - badges: true
# - comments: true
# - categories: [datacamp, projects, python]
# - hide: false
# + dc={"key": "4"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 1. Loading your friend's data into a dictionary
# <p><img src="https://assets.datacamp.com/production/project_1237/img/netflix.jpg" alt="Someone's feet on table facing a television"></p>
# <p>Netflix! What started in 1997 as a DVD rental service has since exploded into the largest entertainment/media company by <a href="https://www.marketwatch.com/story/netflix-shares-close-up-8-for-yet-another-record-high-2020-07-10">market capitalization</a>, boasting over 200 million subscribers as of <a href="https://www.cbsnews.com/news/netflix-tops-200-million-subscribers-but-faces-growing-challenge-from-disney-plus/">January 2021</a>.</p>
# <p>Given the large number of movies and series available on the platform, it is a perfect opportunity to flex our data manipulation skills and dive into the entertainment industry. Our friend has also been brushing up on their Python skills and has taken a first crack at a CSV file containing Netflix data. For their first order of business, they have been performing some analyses, and they believe that the average duration of movies has been declining. </p>
# <p>As evidence of this, they have provided us with the following information. For the years from 2011 to 2020, the average movie durations are 103, 101, 99, 100, 100, 95, 95, 96, 93, and 90, respectively.</p>
# <p>If we're going to be working with this data, we know a good place to start would be to probably start working with <code>pandas</code>. But first we'll need to create a DataFrame from scratch. Let's start by creating a Python object covered in <a href="https://learn.datacamp.com/courses/intermediate-python">Intermediate Python</a>: a dictionary!</p>
# + dc={"key": "4"} tags=["sample_code"]
# Create the years and durations lists
years = list(range(2011, 2021))
durations = [103, 101, 99, 100, 100, 95, 95, 96, 93, 90]
# Create a dictionary with the two lists
movie_dict = {'years': years, 'durations': durations}
# Print the dictionary
movie_dict
# + dc={"key": "11"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 2. Creating a DataFrame from a dictionary
# <p>Perfect! We now have our friend's data stored in a nice Python object. We can already perform several operations on a dictionary to manipulate its contents (such as updating or adding to it). But a more useful structure might be a <code>pandas</code> DataFrame, a tabular data structure containing labeled axes and rows. Luckily, DataFrames can be created very easily from the dictionary created in the previous step!</p>
# <p>To convert our dictionary <code>movie_dict</code> to a <code>pandas</code> DataFrame, we will first need to import the library under its usual alias. We'll also want to inspect our DataFrame to ensure it was created correctly. Let's perform these steps now.</p>
# + dc={"key": "11"} tags=["sample_code"]
# Import pandas under its usual alias
import pandas as pd
# Create a DataFrame from the dictionary
durations_df = pd.DataFrame.from_dict(movie_dict)
# Print the DataFrame
print(durations_df)
# + dc={"key": "18"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 3. A visual inspection of our data
# <p>Alright, we now have a <code>pandas</code> DataFrame, the most common way to work with tabular data in Python. Now back to the task at hand. We want to follow up on our friend's assertion that movie lengths have been decreasing over time. A great place to start will be a visualization of the data.</p>
# <p>Given that the data is continuous, a line plot would be a good choice, with the dates represented along the x-axis and the average length in minutes along the y-axis. This will allow us to easily spot any trends in movie durations. There are many ways to visualize data in Python, but <code>matploblib.pyplot</code> is one of the most common packages to do so.</p>
# <p><em>Note: In order for us to correctly test your plot, you will need to initalize a <code>matplotlib.pyplot</code> Figure object, which we have already provided in the cell below. You can continue to create your plot as you have learned in Intermediate Python.</em></p>
# + dc={"key": "18"} tags=["sample_code"]
# Import matplotlib.pyplot under its usual alias and create a figure
import matplotlib.pyplot as plt
fig = plt.figure()
# Draw a line plot of release_years and durations
plt.plot(durations_df['years'], durations_df['durations'])
# Create a title
plt.title("Netflix Movie Durations 2011-2020")
# Show the plot
plt.show()
# + dc={"key": "25"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 4. Loading the rest of the data from a CSV
# <p>Well, it looks like there is something to the idea that movie lengths have decreased over the past ten years! But equipped only with our friend's aggregations, we're limited in the further explorations we can perform. There are a few questions about this trend that we are currently unable to answer, including:</p>
# <ol>
# <li>What does this trend look like over a longer period of time?</li>
# <li>Is this explainable by something like the genre of entertainment?</li>
# </ol>
# <p>Upon asking our friend for the original CSV they used to perform their analyses, they gladly oblige and send it. We now have access to the CSV file, available at the path <code>"datasets/netflix_data.csv"</code>. Let's create another DataFrame, this time with all of the data. Given the length of our friend's data, printing the whole DataFrame is probably not a good idea, so we will inspect it by printing only the first five rows.</p>
# + dc={"key": "25"} tags=["sample_code"]
# Read in the CSV as a DataFrame
netflix_df = pd.read_csv("datasets/netflix_data.csv")
# Print the first five rows of the DataFrame
netflix_df.head()
# + dc={"key": "32"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 5. Filtering for movies!
# <p>Okay, we have our data! Now we can dive in and start looking at movie lengths. </p>
# <p>Or can we? Looking at the first five rows of our new DataFrame, we notice a column <code>type</code>. Scanning the column, it's clear there are also TV shows in the dataset! Moreover, the <code>duration</code> column we planned to use seems to represent different values depending on whether the row is a movie or a show (perhaps the number of minutes versus the number of seasons)?</p>
# <p>Fortunately, a DataFrame allows us to filter data quickly, and we can select rows where <code>type</code> is <code>Movie</code>. While we're at it, we don't need information from all of the columns, so let's create a new DataFrame <code>netflix_movies</code> containing only <code>title</code>, <code>country</code>, <code>genre</code>, <code>release_year</code>, and <code>duration</code>.</p>
# <p>Let's put our data subsetting skills to work!</p>
# + dc={"key": "32"} tags=["sample_code"]
# Subset the DataFrame for type "Movie"
netflix_df_movies_only = netflix_df[netflix_df['type'] == 'Movie']
# Select only the columns of interest
netflix_movies_col_subset = netflix_df_movies_only[['title', 'country', 'genre', 'release_year', 'duration']]
# Print the first five rows of the new DataFrame
netflix_movies_col_subset.head()
# + dc={"key": "39"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 6. Creating a scatter plot
# <p>Okay, now we're getting somewhere. We've read in the raw data, selected rows of movies, and have limited our DataFrame to our columns of interest. Let's try visualizing the data again to inspect the data over a longer range of time.</p>
# <p>This time, we are no longer working with aggregates but instead with individual movies. A line plot is no longer a good choice for our data, so let's try a scatter plot instead. We will again plot the year of release on the x-axis and the movie duration on the y-axis.</p>
# <p><em>Note: Although not taught in Intermediate Python, we have provided you the code <code>fig = plt.figure(figsize=(12,8))</code> to increase the size of the plot (to help you see the results), as well as to assist with testing. For more information on how to create or work with a <code>matplotlib</code> <code>figure</code>, refer to the <a href="https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.figure.html">documentation</a>.</em></p>
# + dc={"key": "39"} tags=["sample_code"]
# Create a figure and increase the figure size
fig = plt.figure(figsize=(12,8))
# Create a scatter plot of duration versus year
plt.scatter(netflix_movies_col_subset['release_year'], netfl'duration')
# Create a title
plt.title("Movie Duration by Year of Release")
# Show the plot
plt.show()
# + dc={"key": "46"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 7. Digging deeper
# <p>This is already much more informative than the simple plot we created when our friend first gave us some data. We can also see that, while newer movies are overrepresented on the platform, many short movies have been released in the past two decades.</p>
# <p>Upon further inspection, something else is going on. Some of these films are under an hour long! Let's filter our DataFrame for movies with a <code>duration</code> under 60 minutes and look at the genres. This might give us some insight into what is dragging down the average.</p>
# + dc={"key": "46"} tags=["sample_code"]
# Filter for durations shorter than 60 minutes
short_movies = netflix_movies_col_subset[netflix_movies_col_subset['duration'] < 60]
# Print the first 20 rows of short_movies
short_movies.head(20)
# + dc={"key": "53"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 8. Marking non-feature films
# <p>Interesting! It looks as though many of the films that are under 60 minutes fall into genres such as "Children", "Stand-Up", and "Documentaries". This is a logical result, as these types of films are probably often shorter than 90 minute Hollywood blockbuster. </p>
# <p>We could eliminate these rows from our DataFrame and plot the values again. But another interesting way to explore the effect of these genres on our data would be to plot them, but mark them with a different color.</p>
# <p>In Python, there are many ways to do this, but one fun way might be to use a loop to generate a list of colors based on the contents of the <code>genre</code> column. Much as we did in Intermediate Python, we can then pass this list to our plotting function in a later step to color all non-typical genres in a different color!</p>
# <p><em>Note: Although we are using the basic colors of red, blue, green, and black, <code>matplotlib</code> has many named colors you can use when creating plots. For more information, you can refer to the documentation <a href="https://matplotlib.org/stable/gallery/color/named_colors.html">here</a>!</em></p>
# + dc={"key": "53"} tags=["sample_code"]
# Define an empty list
colors = list()
# Iterate over rows of netflix_movies_col_subset
for idx, row in netflix_movies_col_subset.iterrows():
if row['genre'] == "Children":
colors.append("red")
elif row['genre'] == "Documentaries":
colors.append("blue")
elif row['genre'] == "Stand-Up":
colors.append("green")
else:
colors.append("black")
# Inspect the first 10 values in your list
colors[:10]
# + dc={"key": "60"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 9. Plotting with color!
# <p>Lovely looping! We now have a <code>colors</code> list that we can pass to our scatter plot, which should allow us to visually inspect whether these genres might be responsible for the decline in the average duration of movies.</p>
# <p>This time, we'll also spruce up our plot with some additional axis labels and a new theme with <code>plt.style.use()</code>. The latter isn't taught in Intermediate Python, but can be a fun way to add some visual flair to a basic <code>matplotlib</code> plot. You can find more information on customizing the style of your plot <a href="https://matplotlib.org/stable/tutorials/introductory/customizing.html">here</a>!</p>
# + dc={"key": "60"} tags=["sample_code"]
# Set the figure style and initalize a new figure
plt.style.use('fivethirtyeight')
fig = plt.figure(figsize=(12,8))
# Create a scatter plot of duration versus release_year
netflix_movies_col_subset.plot(kind='scatter', x='release_year', y='duration', c=colors)
# Create a title and axis labels
plt.title("Movie duration by year of release")
plt.xlabel("Release year")
plt.ylabel("Duration (min)")
# Show the plot
plt.show()
# + dc={"key": "67"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 10. What next?
# <p>Well, as we suspected, non-typical genres such as children's movies and documentaries are all clustered around the bottom half of the plot. But we can't know for certain until we perform additional analyses. </p>
# <p>Congratulations, you've performed an exploratory analysis of some entertainment data, and there are lots of fun ways to develop your skills as a Pythonic data scientist. These include learning how to analyze data further with statistics, creating more advanced visualizations, and perhaps most importantly, learning more advanced ways of working with data in <code>pandas</code>. This latter skill is covered in our fantastic course <a href="www.datacamp.com/courses/data-manipulation-with-pandas">Data Manipulation with pandas</a>.</p>
# <p>We hope you enjoyed this application of the skills learned in Intermediate Python, and wish you all the best on the rest of your journey!</p>
# + dc={"key": "67"} tags=["sample_code"]
# Are we certain that movies are getting shorter?
are_movies_getting_shorter = "Yes"
| _notebooks/2021-03-31-Analyzing-Netflix-Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# os module provides a portable way of using operating system dependent functionality.
import os
# sys module provides access to some variables used/maintained by the interpreter &
# to functions that interact strongly with it.
import sys
# time module provides various time-related functions.
import time
# Reference: https://docs.python.org/2/library/os.html
path = input("Please input the path: ")
# listdir() returns a list containing the names of the entries in the directory given by path parameter
os.listdir(path)
def remove(path):
"""
Remove an empty directory or file
"""
if os.path.isdir(path):
print(path, "is a directory.")
try:
os.rmdir(path)
print("The empty folder removed.")
except OSError:
print("Unable to remove the folder: {0:s}".format(path))
# logger.fatal("Unable to remove the folder: {0:s}".format(path), exc_info=True)
else:
print(path, "is not a directory.")
try:
if os.path.exists(path):
print("The file exists.")
os.remove(path)
print("The file removed.")
except OSError:
print("Unable to remove the file: {0:s}".format(path))
# logger.fatal("Unable to remove the file: {0:s}".format(path), exc_info=True)
remove(path)
# Command line arguments
for i in range(len(sys.argv)):
print("Command line argument #", i, ": ", sys.argv[i], sep="")
def cleanup(number_of_days, path):
"""
Removes files from the passed in path that are older than or equal to the number_of_days
"""
# The time() function returns the number of seconds passed since epoch (the point where time begins).
time_in_secs = time.time() - (number_of_days * 24 * 60 * 60) # time.time() is current time
for root, dirs, files in os.walk(path, topdown=False):
# walk() to walk through the directories
for file in files:
full_path = os.path.join(root, file)
stat = os.stat(full_path)
if stat.st_mtime <= time_in_secs: # stat.st_mtime: time of last modification
remove(full_path)
if not os.listdir(root):
remove(root)
if __name__ == "__main__":
days, path = int(sys.argv[1]), sys.argv[2]
cleanup(days, path)
| learn_os_module.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hurriyet Üzerinden Haber İçeriklerinin Scrape Edilmesi
#
# 1. Burada öncelikle, hürriyetin günlük olarak haberlerini listelediği bir sayfa üzerinden 20 adet haberin url'ini alıp, ardından bu haberlerin içerisindeki verileri kazıyacağız.
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup as bs
import requests
import datetime
def GetText(url):
# Öncelikle haber linkine giriyoruz, Hurriyette birkaç farklı yapı var. Bunları tek tek deniyoruz.
html = requests.get("http://www.hurriyet.com.tr/"+url).text
soup = bs(html, "lxml")
try:
#Bu kısımda ana metin olarak rhd-all-article-detail'ine bakıyoruz.
body = soup.find("div", class_="rhd-all-article-detail").findAll('p')
except AttributeError:
try:
# Bu kısımda tek tek elemanlar var mı diye bakıyoruz, yoksa hata veriyor ve diğer bir tipte olduğunu anlayıp o şekilde ilerliyoruz.
body_text = soup.findAll("div", class_="news-box")[1].find('p').text
summarized_text=soup.findAll("div", class_="news-detail-spot news-detail-spot-margin")[0].find('h2').text
header = soup.find("h2", class_="news-detail-title selectionShareable local-news-title").text
time = soup.find("div", class_="col-md-8 text-right").text[:10]
return (body_text,summarized_text,header,time)
except:
header = soup.find("h1", class_="rhd-article-title").text
time = soup.find("div", class_="rhd-time-box").text[:10]
body = soup.findAll("h3", class_="description")
body_text = ''
for element in body:
body_text += ''.join(element.findAll(text = True))
summarized_text = soup.find("h2", class_="rhd-article-spot").text
return (body_text,summarized_text,header,time)
body_text = ''
for element in body:
body_text += ''.join(element.findAll(text = True))
# Koruma
if len(body_text) == 0:
body_text = soup.find("div", class_="rhd-all-article-detail").text
summarized_text = soup.find("h2", class_="rhd-article-spot").text
try:
header = soup.find("h2", class_="news-detail-title selectionShareable local-news-title").text
time = soup.find("div", class_="col-md-8 text-right").text[:10]
except AttributeError:
header = soup.find("h1", class_="rhd-article-title").text
time = soup.find("div", class_="rhd-time-box").text[:10]
summarized_text = soup.find("h2", class_="rhd-article-spot").text
return (body_text,summarized_text,header,time)
return (body_text,summarized_text,header,time)
# +
# Bu kısımda o güne ait 40 içeriği alacağız
url = 'http://www.hurriyet.com.tr/index/?d=20191130'
html = requests.get(url).text
soup = bs(html, "lxml")
# Bu kısımda news tag'ının altındaki bütün linkleri topluyoruz. Bunlarda haber linklerimiz oluyor.
urlx = []
for links in soup.findAll("div", class_="news"):
urlx.append(links.find('a').get('href'))
# 10 Adet url'i alacağız.
url = urlx[10:20]
# Bazı urller sadece video oluyor, bunlarda metin içeriği olmadığı için işlemek çok zor oluyor.
for c in url:
if c[:6] == "/video":
url.remove(c)
url.append(urlx[np.random.randint(20,len(urlx))])
else:
continue
# Bu kısımda url'lerin verilerini topluyoruz.
data = []
for i in url:
data.append(GetText(i))
# veriyi pandas df haline getiriyoruz.
data = pd.DataFrame(data)
data.columns = ["body_text","summarized_text","header","time"]
data.head()
# -
data
| examples/hurriyetScraper.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
medicare = pd.read_csv("/netapp2/home/se197/RPDR/<NAME>/3_EHR_V2/CMS/Data/final_medicare.csv")
medicare = medicare[(medicare.Co_CAD_R0 == 1) | (medicare.Co_Diabetes_R0 == 1) | (medicare.Co_CAD_R0 == 1) |
(medicare.Co_Embolism_R0 == 1) | (medicare.Co_DVT_R0 == 1) | (medicare.Co_PE_R0 == 1) |
(medicare.Co_AFib_R0 == 1) | (medicare.Co_HF_R0 == 1) | (medicare.Co_HemoStroke_R0 == 1) |
(medicare.Co_IscheStroke_R0 == 1) | (medicare.Co_OthStroke_R0 == 1) |(medicare.Co_TIA_R0 == 1)
| (medicare.Co_OldMI_R0 == 1) | (medicare.Co_AcuteMI_R0 == 1) | (medicare.Co_PVD_R0 == 1)]
medicare.shape
# +
train_set = medicare[medicare.Hospital != 'BWH'] # MGH; n = 204014
validation_set = medicare[medicare.Hospital == 'BWH'] # BWH and Neither; n = 115726
import numpy as np
fifty_perc_EHR_cont = np.percentile(medicare['Cal_MPEC_R0'],50)
train_set_high = train_set[train_set.Cal_MPEC_R0 >= fifty_perc_EHR_cont]
train_set_low= train_set[train_set.Cal_MPEC_R0 < fifty_perc_EHR_cont]
validation_set_high = validation_set[validation_set.Cal_MPEC_R0 >= fifty_perc_EHR_cont]
validation_set_low = validation_set[validation_set.Cal_MPEC_R0 < fifty_perc_EHR_cont]
# -
medicare.head()
# +
predictor_variable = [
'Co_CAD_R0', 'Co_Embolism_R0', 'Co_DVT_R0', 'Co_PE_R0', 'Co_AFib_R0',
'Co_Hypertension_R0', 'Co_Hyperlipidemia_R0', 'Co_Atherosclerosis_R0',
'Co_HF_R0', 'Co_HemoStroke_R0', 'Co_IscheStroke_R0', 'Co_OthStroke_R0',
'Co_TIA_R0', 'Co_COPD_R0', 'Co_Asthma_R0', 'Co_Pneumonia_R0', 'Co_Alcoholabuse_R0',
'Co_Drugabuse_R0', 'Co_Epilepsy_R0', 'Co_Cancer_R0', 'Co_MorbidObesity_R0',
'Co_Dementia_R0', 'Co_Depression_R0', 'Co_Bipolar_R0', 'Co_Psychosis_R0',
'Co_Personalitydisorder_R0', 'Co_Adjustmentdisorder_R0', 'Co_Anxiety_R0',
'Co_Generalizedanxiety_R0', 'Co_OldMI_R0', 'Co_AcuteMI_R0', 'Co_PUD_R0',
'Co_UpperGIbleed_R0', 'Co_LowerGIbleed_R0', 'Co_Urogenitalbleed_R0',
'Co_Othbleed_R0', 'Co_PVD_R0', 'Co_LiverDisease_R0', 'Co_MRI_R0',
'Co_ESRD_R0', 'Co_Obesity_R0', 'Co_Sepsis_R0', 'Co_Osteoarthritis_R0',
'Co_RA_R0', 'Co_NeuroPain_R0', 'Co_NeckPain_R0', 'Co_OthArthritis_R0',
'Co_Osteoporosis_R0', 'Co_Fibromyalgia_R0', 'Co_Migraine_R0', 'Co_Headache_R0',
'Co_OthPain_R0', 'Co_GeneralizedPain_R0', 'Co_PainDisorder_R0',
'Co_Falls_R0', 'Co_CoagulationDisorder_R0', 'Co_WhiteBloodCell_R0', 'Co_Parkinson_R0',
'Co_Anemia_R0', 'Co_UrinaryIncontinence_R0', 'Co_DecubitusUlcer_R0',
'Co_Oxygen_R0', 'Co_Mammography_R0', 'Co_PapTest_R0', 'Co_PSATest_R0',
'Co_Colonoscopy_R0', 'Co_FecalOccultTest_R0', 'Co_FluShot_R0', 'Co_PneumococcalVaccine_R0', 'Co_RenalDysfunction_R0', 'Co_Valvular_R0', 'Co_Hosp_Prior30Days_R0',
'Co_RX_Antibiotic_R0', 'Co_RX_Corticosteroid_R0', 'Co_RX_Aspirin_R0', 'Co_RX_Dipyridamole_R0',
'Co_RX_Clopidogrel_R0', 'Co_RX_Prasugrel_R0', 'Co_RX_Cilostazol_R0', 'Co_RX_Ticlopidine_R0',
'Co_RX_Ticagrelor_R0', 'Co_RX_OthAntiplatelet_R0', 'Co_RX_NSAIDs_R0',
'Co_RX_Opioid_R0', 'Co_RX_Antidepressant_R0', 'Co_RX_AAntipsychotic_R0', 'Co_RX_TAntipsychotic_R0',
'Co_RX_Anticonvulsant_R0', 'Co_RX_PPI_R0', 'Co_RX_H2Receptor_R0', 'Co_RX_OthGastro_R0',
'Co_RX_ACE_R0', 'Co_RX_ARB_R0', 'Co_RX_BBlocker_R0', 'Co_RX_CCB_R0', 'Co_RX_Thiazide_R0',
'Co_RX_Loop_R0', 'Co_RX_Potassium_R0', 'Co_RX_Nitrates_R0', 'Co_RX_Aliskiren_R0',
'Co_RX_OthAntihypertensive_R0', 'Co_RX_Antiarrhythmic_R0', 'Co_RX_OthAnticoagulant_R0',
'Co_RX_Insulin_R0', 'Co_RX_Noninsulin_R0', 'Co_RX_Digoxin_R0', 'Co_RX_Statin_R0',
'Co_RX_Lipid_R0', 'Co_RX_Lithium_R0', 'Co_RX_Benzo_R0', 'Co_RX_ZDrugs_R0',
'Co_RX_OthAnxiolytic_R0', 'Co_RX_Dementia_R0', 'Co_RX_Hormone_R0',
'Co_RX_Osteoporosis_R0', 'Co_N_Drugs_R0', 'Co_N_Hosp_R0', 'Co_Total_HospLOS_R0',
'Co_N_MDVisit_R0', 'Co_RX_AnyAspirin_R0', 'Co_RX_AspirinMono_R0', 'Co_RX_ClopidogrelMono_R0',
'Co_RX_AspirinClopidogrel_R0', 'Co_RX_DM_R0', 'Co_RX_Antipsychotic_R0'
]
co_train_gpop = train_set[predictor_variable]
co_train_high = train_set_high[predictor_variable]
co_train_low = train_set_low[predictor_variable]
co_validation_gpop = validation_set[predictor_variable]
co_validation_high = validation_set_high[predictor_variable]
co_validation_low = validation_set_low[predictor_variable]
# +
out_train_death_gpop = train_set['Out_Hemorrhage_RC1']
out_train_death_high = train_set_high['Out_Hemorrhage_RC1']
out_train_death_low = train_set_low['Out_Hemorrhage_RC1']
out_validation_death_gpop = validation_set['Out_Hemorrhage_RC1']
out_validation_death_high = validation_set_high['Out_Hemorrhage_RC1']
out_validation_death_low = validation_set_low['Out_Hemorrhage_RC1']
# -
"""
def a(b):
count = 0
tval = 0
for val in b:
tval = tval + 1
if (val == 1):
count = count + 1
print(count, tval)
a(out_train_hemorrhage_gpop)
a(out_train_hemorrhage_high)
a(out_train_hemorrhage_low)
a(out_validation_hemorrhage_gpop)
a(out_validation_hemorrhage_high)
a(out_validation_hemorrhage_low)
"""
# # Template LR
def lr(X_train, y_train):
from sklearn.linear_model import Lasso
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from imblearn.over_sampling import SMOTE
from sklearn.preprocessing import StandardScaler
model = LogisticRegression()
param_grid = [
{'C' : np.logspace(-4, 4, 20)}
]
clf = GridSearchCV(model, param_grid, cv = 5, verbose = True, n_jobs = 10)
best_clf = clf.fit(X_train, y_train)
return best_clf
# +
import pandas as pd
import numpy as np
import scipy.stats
# AUC comparison adapted from
# https://github.com/Netflix/vmaf/
def compute_midrank(x):
"""Computes midranks.
Args:
x - a 1D numpy array
Returns:
array of midranks
"""
J = np.argsort(x)
Z = x[J]
N = len(x)
T = np.zeros(N, dtype=np.float)
i = 0
while i < N:
j = i
while j < N and Z[j] == Z[i]:
j += 1
T[i:j] = 0.5*(i + j - 1)
i = j
T2 = np.empty(N, dtype=np.float)
# Note(kazeevn) +1 is due to Python using 0-based indexing
# instead of 1-based in the AUC formula in the paper
T2[J] = T + 1
return T2
def fastDeLong(predictions_sorted_transposed, label_1_count):
"""
The fast version of DeLong's method for computing the covariance of
unadjusted AUC.
Args:
predictions_sorted_transposed: a 2D numpy.array[n_classifiers, n_examples]
sorted such as the examples with label "1" are first
Returns:
(AUC value, DeLong covariance)
Reference:
@article{sun2014fast,
title={Fast Implementation of DeLong's Algorithm for
Comparing the Areas Under Correlated Receiver Operating Characteristic Curves},
author={<NAME> and <NAME>},
journal={IEEE Signal Processing Letters},
volume={21},
number={11},
pages={1389--1393},
year={2014},
publisher={IEEE}
}
"""
# Short variables are named as they are in the paper
m = label_1_count
n = predictions_sorted_transposed.shape[1] - m
positive_examples = predictions_sorted_transposed[:, :m]
negative_examples = predictions_sorted_transposed[:, m:]
k = predictions_sorted_transposed.shape[0]
tx = np.empty([k, m], dtype=np.float)
ty = np.empty([k, n], dtype=np.float)
tz = np.empty([k, m + n], dtype=np.float)
for r in range(k):
tx[r, :] = compute_midrank(positive_examples[r, :])
ty[r, :] = compute_midrank(negative_examples[r, :])
tz[r, :] = compute_midrank(predictions_sorted_transposed[r, :])
aucs = tz[:, :m].sum(axis=1) / m / n - float(m + 1.0) / 2.0 / n
v01 = (tz[:, :m] - tx[:, :]) / n
v10 = 1.0 - (tz[:, m:] - ty[:, :]) / m
sx = np.cov(v01)
sy = np.cov(v10)
delongcov = sx / m + sy / n
return aucs, delongcov
def calc_pvalue(aucs, sigma):
"""Computes log(10) of p-values.
Args:
aucs: 1D array of AUCs
sigma: AUC DeLong covariances
Returns:
log10(pvalue)
"""
l = np.array([[1, -1]])
z = np.abs(np.diff(aucs)) / np.sqrt(np.dot(np.dot(l, sigma), l.T))
return np.log10(2) + scipy.stats.norm.logsf(z, loc=0, scale=1) / np.log(10)
def compute_ground_truth_statistics(ground_truth):
assert np.array_equal(np.unique(ground_truth), [0, 1])
order = (-ground_truth).argsort()
label_1_count = int(ground_truth.sum())
return order, label_1_count
def delong_roc_variance(ground_truth, predictions):
"""
Computes ROC AUC variance for a single set of predictions
Args:
ground_truth: np.array of 0 and 1
predictions: np.array of floats of the probability of being class 1
"""
order, label_1_count = compute_ground_truth_statistics(ground_truth)
predictions_sorted_transposed = predictions[np.newaxis, order]
aucs, delongcov = fastDeLong(predictions_sorted_transposed, label_1_count)
assert len(aucs) == 1, "There is a bug in the code, please forward this to the developers"
return aucs[0], delongcov
def delong_roc_test(ground_truth, predictions_one, predictions_two):
"""
Computes log(p-value) for hypothesis that two ROC AUCs are different
Args:
ground_truth: np.array of 0 and 1
predictions_one: predictions of the first model,
np.array of floats of the probability of being class 1
predictions_two: predictions of the second model,
np.array of floats of the probability of being class 1
"""
order, label_1_count = compute_ground_truth_statistics(ground_truth)
predictions_sorted_transposed = np.vstack((predictions_one, predictions_two))[:, order]
aucs, delongcov = fastDeLong(predictions_sorted_transposed, label_1_count)
return calc_pvalue(aucs, delongcov)
# +
def train_scores(X_train,y_train):
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import log_loss
pred = best_clf.predict(X_train)
actual = y_train
print(accuracy_score(actual,pred))
print(f1_score(actual,pred))
print(fbeta_score(actual,pred, average = 'macro', beta = 2))
print(roc_auc_score(actual, best_clf.predict_proba(X_train)[:,1]))
print(log_loss(actual,best_clf.predict_proba(X_train)[:,1]))
def test_scores(X_test,y_test):
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import log_loss
pred = best_clf.predict(X_test)
actual = y_test
print(accuracy_score(actual,pred))
print(f1_score(actual,pred))
print(fbeta_score(actual,pred, average = 'macro', beta = 2))
print(roc_auc_score(actual, best_clf.predict_proba(X_test)[:,1]))
print(log_loss(actual,best_clf.predict_proba(X_test)[:,1]))
# -
def cross_val(X,y):
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_validate
from sklearn.metrics import log_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import fbeta_score
import sklearn
import numpy as np
cv = KFold(n_splits=5, random_state=1, shuffle=True)
log_loss = []
auc = []
accuracy = []
f1 = []
f2 = []
for train_index, test_index in cv.split(X):
X_train, X_test, y_train, y_test = X.iloc[train_index], X.iloc[test_index], y.iloc[train_index], y.iloc[test_index]
model = lr(X_train, y_train)
prob = model.predict_proba(X_test)[:,1] # prob is a vector of probabilities
print(prob)
pred = np.round(prob) # pred is the rounded predictions
log_loss.append(sklearn.metrics.log_loss(y_test, prob))
auc.append(sklearn.metrics.roc_auc_score(y_test, prob))
accuracy.append(sklearn.metrics.accuracy_score(y_test, pred))
f1.append(sklearn.metrics.f1_score(y_test, pred, average = 'macro'))
f2.append(fbeta_score(y_test,pred, average = 'macro', beta = 2))
print(np.mean(accuracy))
print(np.mean(f1))
print(np.mean(f2))
print(np.mean(auc))
print(np.mean(log_loss))
# +
from prince import FAMD
famd = FAMD(n_components = 15, n_iter = 3, random_state = 101)
for (colName, colData) in co_train_gpop.iteritems():
if (colName != 'Co_N_Drugs_R0' and colName!= 'Co_N_Hosp_R0' and colName != 'Co_Total_HospLOS_R0' and colName != 'Co_N_MDVisit_R0'):
co_train_gpop[colName].replace((1,0) ,('yes','no'), inplace = True)
co_train_low[colName].replace((1,0) ,('yes','no'), inplace = True)
co_train_high[colName].replace((1,0) ,('yes','no'), inplace = True)
co_validation_gpop[colName].replace((1,0), ('yes','no'), inplace = True)
co_validation_high[colName].replace((1,0), ('yes','no'), inplace = True)
co_validation_low[colName].replace((1,0), ('yes','no'), inplace = True)
famd.fit(co_train_gpop)
co_train_gpop_FAMD = famd.transform(co_train_gpop)
famd.fit(co_train_high)
co_train_high_FAMD = famd.transform(co_train_high)
famd.fit(co_train_low)
co_train_low_FAMD = famd.transform(co_train_low)
famd.fit(co_validation_gpop)
co_validation_gpop_FAMD = famd.transform(co_validation_gpop)
famd.fit(co_validation_high)
co_validation_high_FAMD = famd.transform(co_validation_high)
famd.fit(co_validation_low)
co_validation_low_FAMD = famd.transform(co_validation_low)
# -
# # General Population
# +
best_clf = lr(co_train_gpop_FAMD, out_train_death_gpop)
cross_val(co_train_gpop_FAMD, out_train_death_gpop)
print()
test_scores(co_validation_gpop_FAMD, out_validation_death_gpop)
comb = []
for i in range(len(predictor_variable)):
comb.append(predictor_variable[i] + str(best_clf.best_estimator_.coef_[:,i:i+1]))
comb
# -
# # High Continuity
# +
best_clf = lr(co_train_high_FAMD, out_train_death_high)
cross_val(co_train_high_FAMD, out_train_death_high)
print()
test_scores(co_validation_high_FAMD, out_validation_death_high)
comb = []
for i in range(len(predictor_variable)):
comb.append(predictor_variable[i] + str(best_clf.best_estimator_.coef_[:,i:i+1]))
comb
# -
# # Low Continuity
# +
best_clf = lr(co_train_low_FAMD, out_train_death_low)
cross_val(co_train_low_FAMD, out_train_death_low)
print()
test_scores(co_validation_low_FAMD, out_validation_death_low)
comb = []
for i in range(len(predictor_variable)):
comb.append(predictor_variable[i] + str(best_clf.best_estimator_.coef_[:,i:i+1]))
comb
# -
| EHR_Claims/LR/Hemorrhage_FAMD.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="tsUsCSbBGZfG"
# # **Build Instance Graph (BIG) Algorithm**
# + [markdown] id="yFdXz_15G7j5"
# The code has been developed with the pm4py library, a set of functions used for process mining tasks
# + id="hrzTfqZbAOxm"
# !pip install -U pm4py
# + id="9x7Wtb7NDfrs"
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="xs9hBrP2Hj84"
# ## Find Causal Relationship
# This function finds the casual relationships from the petri-net of the process model.
# The inputs are the petri-net, the initial marking, and the final marking.
# The output is a list of python pairs where the second element directly follows the first element in the petri-net, which translates to a causal relationship.
# + id="IBPIcxgoP8Zj"
from pm4py.algo.discovery.footprints import algorithm as footprints_discovery
def findCausalRelationships(net, im, fm):
fp_net = footprints_discovery.apply(net, im, fm)
return list(fp_net.get('sequence'))
# + [markdown] id="s3Bj0dBQGTkq"
# ##Extract Instance Graph
#
# This function extracts the instance graph of a trace. Every event of the trace is saved in a list V which represents the set of the nodes of the graph. An event is a pair of an ID (generated incrementally) and the activity label. The edges instead are saved as a pair of events in a list W. The algorithm is based on the definition 18 of the original paper.
# + id="hGLit-Iwe1xz"
def ExtractInstanceGraph(trace, cr):
V = []
W = []
id = 1
for event in trace:
V.append((id, event.get("concept:name")))
id += 1
# print("IG")
for i in range(len(V)):
for k in range(i+1,len(V)):
e1 = V[i]
e2 = V[k]
'''if e1[0]==e2[0]:
continue;'''
if (e1[1],e2[1]) in cr:
flag_e1=True
for s in range(i+1, k):
e3 = V[s]
if (e1[1],e3[1]) in cr:
flag_e1 = False
break
flag_e2=True
for s in range(i+1, k):
e3 = V[s]
if (e3[1],e2[1]) in cr:
flag_e2 = False
break
if flag_e1 or flag_e2:
W.append((e1,e2))
return V, W
# + [markdown] id="Jw57g_F0Wtd5"
# ##Check Trace Conformance
#
# This function checks whether a given trace is conform to a petri-net through alignment based conformance checking. It takes in input the trace, the petri-net, the initial marking, and the final marking.
# It returns two lists: a list of sequences of deleted activities, and a list of sequences of inserted activities in the trace.
# Each element of the two lists is a list itself, this sublist contains pairs where the first element is the position of the deleted/inserted activity and the second is the activity label. In sequences of deleted activities the position value is always the same, while for inserted activities it's incremental.
# + id="bSR1aL0ITLLN"
from pm4py.algo.conformance.alignments.petri_net import algorithm as alignments
def checkTraceConformance(trace, net, initial_marking, final_marking):
aligned_traces = alignments.apply_trace(trace, net, initial_marking, final_marking)
D = []
I = []
id = 0
temp_d = []
temp_i = []
prev_d = False
curr_d = False
prev_i = False
curr_i = False
del_count = 1
for edge in aligned_traces['alignment']:
id+=1
if edge[1] is None:
id-=1
continue
if edge[0] == '>>':
temp_d.append((id, edge[1]))
curr_d = True
id-=1
if edge[1] == '>>':
temp_i.append((id, edge[0]))
curr_i = True
if (prev_i and not curr_i):
if len(temp_i) > 0:
I.append(temp_i)
temp_i = []
prev_i = curr_i
curr_i = False
if (prev_d and not curr_d):
if len(temp_d) > 0:
D.append(temp_d)
temp_d = []
prev_d = curr_d
curr_d = False
if len(temp_i) > 0:
I.append(temp_i)
if len(temp_d) > 0:
D.append(temp_d)
return D, I
# + [markdown] id="uPvSdX0CXCCh"
# ##View Instance Graph
#
# This function takes in input the list of nodes (events) and the list of edges and returns a GraphViz object. The other two inputs are view and the title of the graph. By default view is true so the function will show the graph in the output window.
#
# + id="dAEgPhJgKaDe"
from IPython import display
from graphviz import Digraph
def viewInstanceGraph(V, W, view=True, title="Instance Graph"):
# Conversion to string indexes
V2 = []
W2 = []
for node in V:
V2.append((str(node[0]), "{0} = {1}".format(node[0],node[1])))
for edge in W:
W2.append(((str(edge[0][0]), "{0} = {1}".format(edge[0][0],edge[0][1])),(str(edge[1][0]), "{0} = {1}".format(edge[1][0],edge[1][1]))))
dot = Digraph(comment=title, node_attr={'shape': 'circle'})
for e in V2:
dot.node(e[0], e[1])
for w in W2:
dot.edge(w[0][0], w[1][0])
if view:
display.display(dot)
return dot
# + [markdown] id="k_KuMuuEXQW4"
# ##Irregular Graph Repairing
#
# This is the central function of the BIG Algorithm. Given an irregular graph and the lists of deleted (D) and inserted (I) activities, the algorithm returns a repaired graph. This function repairs first all the deleted activities, and then all the inserted activities, calling the DeletionRepair function and the InsertionRepair function respectively.
# + id="tDGHnve-YSri"
def irregularGraphRepairing(V, W, D, I, cr, view=False):
Wi=W
all_deleted_labels = []
for d_element in D:
for element in d_element:
if element[1] not in all_deleted_labels:
all_deleted_labels.append(element[1])
for d_element in D:
Wi=DeletionRepair(Wi, V, d_element,cr, all_deleted_labels)
if view:
print("Deletion repaired Instance Graph")
graph = viewInstanceGraph(V, Wi, view)
all_inserted = []
for i_element in I:
for i in i_element:
if i not in all_inserted:
all_inserted.append(i)
for i_elements in I:
Wi=InsertionRepair(Wi,V,i_elements,cr, all_inserted)
if view:
print("Insertion repaired Instance Graph")
graph = viewInstanceGraph(V, Wi, view)
return Wi, graph
# + [markdown] id="NjCbyBe5Xb4p"
# ###Is Reachable
#
# This is a boolean function used in both DelationRepair and InsertionRepair functions. It takes as input the instance graph and two events (source and destination) and checks if a path from the source and the destination events exists in the graph.
# + id="FA8tWyk4yTWb"
def isReachable(V, W, s, d):
# Mark all the vertices as not visited
visited =[False]*(len(V))
# Create a queue for BFS
queue=[]
# Mark the source node as visited and enqueue it
queue.append(s)
visited[s[0] -1] = True
while queue:
#Dequeue a vertex from queue
j = queue.pop(0)
# If this adjacent node is the destination node, then return true
if j == d:
return True
# Else, continue to do BFS
for edge in W:
if edge[0] == j:
if visited[edge[1][0] - 1] == False:
queue.append(edge[1])
visited[edge[1][0] - 1] = True
# If BFS is complete without visited d
return False
# + [markdown] id="dN1Ko5b3Xwer"
# ###Deletion Repair
#
# The idea underlying this function consists in connecting activities that occurred before and after the deleted activities, and in removing those edges which should not have been created according to the petri-net. For more information, see sub-chapter 5.1 of the reference paper.
# + id="rOR1BoFUa0Uk"
def DeletionRepair(Wi, V, d_elements, cr, all_deleted):
v_len = len(V)
Wr1 = []
Wr2 = []
i = d_elements[0][0]
if i <= v_len:
for edge in Wi:
if edge[1][0] == i and edge[0][0] < i and (d_elements[-1][1],V[i-1][1]) in cr:
for h in range(edge[0][0], i):
if (V[h-1][1],d_elements[0][1]) in cr:
Wr1.append(edge)
break
if edge[0][0] < i and edge[1][0] > i and (d_elements[-1][1],edge[1][1]) in cr:
if edge[0][1] in all_deleted:
Wr2.append(edge)
elif (edge[0][1],d_elements[0][1]) in cr:
for l in range(i+1, edge[1][0]):
if (V[l-1],edge[1]) in Wi:
Wr2.append(edge)
break
Wi = list(set(Wi) - set(Wr1 + Wr2))
for k in range(i - 1, 0, -1):
for j in range(i, v_len+1):
if (V[k-1][1],d_elements[0][1]) in cr:
if (d_elements[-1][1], V[j-1][1]) in cr:
if not isReachable(V, Wi, V[k-1], V[j-1]):
flag1 = True
for l in range(k + 1, j):
if (V[k-1],V[l-1]) in Wi:
flag1 = False
break
flag2 = True
for m in range(k + 1, i):
if (V[m-1],V[j-1]) in Wi:
flag2 = False
break
if flag1 or flag2:
Wi.append((V[k-1],V[j-1]))
return Wi
# + [markdown] id="SUdEK_a5YCMY"
# ###Insertion Repair
#
# This function is aimed at restructuring an irregular graph when a sequence of inserted activities is detected. For more information, see sub-chapter 5.2 of the original paper.
# + id="xLaIGSWly3cQ"
def InsertionRepair(W, V, i_elements, cr, all_inserted):
v_len = len(V)
Wr1=[]
Wr2=[]
Wr3=[]
Wr4=[]
Wr5=[]
Wa1=[]
Wa2=[]
Wa3=[]
i= i_elements[0][0]
j=i+len(i_elements)-1
Wi=W.copy()
for edge in Wi:
if edge[0][0]<i and edge[1][0]>=i and edge[1][0]<=j:
Wr1.append(edge)
if edge[0][0]>=i and edge[0][0]<=j and edge[1][0]>j:
Wr2.append(edge)
if edge[0][0]>=i and edge[0][0]<=j and edge[1][0]>=i and edge[1][0]<=j:
Wr3.append(edge)
Wi= list(set(Wi) - set(Wr1 + Wr2 + Wr3))
for k in range(j+1, v_len+1):
if V[k-1] not in all_inserted:
if (V[i-2][1],V[k-1][1]) in cr or (V[i-2],V[k-1]) in Wi:
if not isReachable(V, Wi, V[j-1], V[k-1]):
Wi.append((V[j-1],V[k-1]))
Wa1.append((V[j-1],V[k-1]))
# if i < v_len and (V[i-2][1],V[i][1]) not in cr:
if i == v_len or (V[i-2][1],V[i][1]) not in cr:
Wi.append((V[i-2],V[i-1]))
Wa2.append((V[i-2],V[i-1]))
else:
for k in range(i-1,0,-1):
if V[k-1] not in all_inserted:
if j < v_len and ((V[k-1][1],V[j][1]) in cr or (V[k-1],V[j]) in Wi):
if not isReachable(V, Wi, V[k-1],V[i-1]):
Wi.append((V[k-1],V[i-1]))
Wa2.append((V[k-1],V[i-1]))
for k in range(i, j):
Wa3.append((V[k-1],V[k]))
if len(Wa3)>0:
Wi=Wi+Wa3
for edge in Wa2:
for edge2 in Wa1:
if edge[1][0]>=i and edge[1][0]<=j:
if edge2[0][0]>=i and edge2[0][0]<=j:
Wr4.append((edge[0],edge2[1]))
Wi= list(set(Wi) - set(Wr4))
# if i < v_len and (V[i-2][1],V[i][1]) not in cr
if i == v_len or (V[i-2][1],V[i][1]) not in cr:
for edge in Wi:
if edge[1][0]>i and edge[0][0]==i-1:
Wr5.append(edge)
Wi = list(set(Wi) - set(Wr5))
return Wi
# + [markdown] id="B-A_1jJEYLFg"
# ##Save GFile
#
# The SaveGFile function saves the repaired graph generated from the BIG algorithm in the .g format with extra information relatively the execution time of the algorithm and the eventual repaired events (deleted or inserted).
# The saveGfinal function instead saves all the repaired graph generated in a single .g format file.
# + id="MfNJ8VkUUVq5"
def saveGFile(V, W, path, D, I, time, sort_labels):
with open(path, 'w') as f:
f.write("# Execution Time: {0:.3f} s\n".format(time))
f.write("# Deleted Activities: {0}\n".format(D))
f.write("# Inserted Activities: {0}\n".format(I))
for n in V:
f.write("v {0} {1}\n".format(n[0], n[1]))
f.write("\n")
if (sort_labels):
W.sort()
for e in W:
f.write("e {0} {1} {2}__{3}\n".format(e[0][0], e[1][0], e[0][1], e[1][1]))
# + id="ak82yrcJMnaW"
def saveGfinal(V, W, path, sort_labels):
with open(path, 'a') as f:
f.write("XP \n")
for n in V:
f.write("v {0} {1}\n".format(n[0], n[1]))
if (sort_labels):
W.sort()
for e in W:
f.write("e {0} {1} {2}__{3}\n".format(e[0][0], e[1][0], e[0][1], e[1][1]))
f.write("\n")
f.close()
# + [markdown] id="sE4hKAsyYWUo"
# ##Main function
#
# This block puts together the previous functions to form the complete BIG algorithm.
# It takes in input, the path of the petri-net, the path of the xes log, the starting trace (not the trace id but the position in the log), the ending trace, view (to enable to visualize the instance graphs in the output window), and sort_labels(to sort the labels of the edges in the .g files). It will also save the graphviz and the .g files for each correct/repaired graph and one for all the instance graph.
#
# + id="nBd2cLSYNq2m"
from pm4py.streaming.importer.xes import importer as xes_importer
import time
from pm4py.objects.petri_net.importer import importer as pnml_importer
import graphviz
from pm4py.visualization.petri_net import visualizer as pn_visualizer
log_file = "/content/drive/MyDrive/DWH/BIG Datasets/BPI2017Denied/BPI2017Denied.xes"
net_file = "/content/drive/MyDrive/DWH/BIG Datasets/BPI2017Denied/BPI2017Denied_petriNet.pnml"
log_file2 = "/content/drive/MyDrive/DWH/BIG Datasets/testBank2000NoRandomNoise/testBank2000NoRandomNoise.xes"
net_file2 = "/content/drive/MyDrive/DWH/BIG Datasets/testBank2000NoRandomNoise/testBank2000NoRandomNoise_petriNet.pnml"
log_file3 = "/content/drive/MyDrive/DWH/BIG Datasets/Hospital_dcc/Hospital_dcc.xes"
net_file3 = "/content/drive/MyDrive/DWH/BIG Datasets/Hospital_dcc/Hospital_dcc_petriNet.pnml"
log_file4 = "/content/drive/MyDrive/DWH/BIG Datasets/bpi2012decompositionExpr/bpi2012decompositionExpr.xes"
net_file4 = "/content/drive/MyDrive/DWH/BIG Datasets/bpi2012decompositionExpr/bpi2012decompositionExpr_petriNet.pnml"
log_file5 = "/content/drive/MyDrive/DWH/BIG Datasets/testBank2000SCCUpdatedCopia/testBank2000SCCUpdatedCopia.xes"
net_file5 = "/content/drive/MyDrive/DWH/BIG Datasets/testBank2000SCCUpdatedCopia/testBank2000SCCUpdatedCopia_petriNet.pnml"
log_file6 = "/content/drive/MyDrive/DWH/BIG Datasets/toyex.xes"
net_file6 = "/content/drive/MyDrive/DWH/BIG Datasets/toyex_petriNet.pnml"
def BIG(net_path, log_path, tr_start=0, tr_end=None, view=False, sort_labels=False):
splits = log_path.split('/')
name = splits[-1].split(".")[0]
streaming_ev_object = xes_importer.apply(log_path, variant=xes_importer.Variants.XES_TRACE_STREAM)
net, initial_marking, final_marking = pnml_importer.apply(net_path)
gviz = pn_visualizer.apply(net, initial_marking, final_marking)
display.display(gviz)
gviz.render(filename="petri")
start_time_total = time.time()
cr = findCausalRelationships(net, initial_marking, final_marking)
if view:
print(cr)
count = 0
repairs = 0
for trace in streaming_ev_object:
count += 1
if count < tr_start:
continue
elif tr_end is not None and count > tr_end:
break
num = trace.attributes.get('concept:name')
trace_start_time = time.time()
V, W = ExtractInstanceGraph(trace,cr)
if view:
print("\n\n------------------------------------\nUnrepaired Instance Graph")
print(V)
graph = viewInstanceGraph(V, W, view)
D, I = checkTraceConformance(trace,net,initial_marking, final_marking)
print("Count {0}, Len {1}, Trace name: {2}".format(count, len(V), num))
if view:
print(D)
print(I)
if len(D)+len(I)>0:
repairs += 1
W, graph = irregularGraphRepairing(V,W,D,I,cr, view)
graph.save("{0}_instance_graphs/gviz_{1}.gv".format(name, num))
saveGFile(V, W, "{0}_instance_graphs/IG_{1}.g".format(name, num), D, I, time.time()-trace_start_time, sort_labels)
saveGfinal(V, W, "{0}_instance_graphs.g".format(name), sort_labels)
elapsed = time.time() - start_time_total
with open("{0}_instance_graphs/{1}.txt".format(name, name), 'w') as f:
f.write("Execution Time: {0:.3f} s\n".format(elapsed))
f.write("Number of traces: {0}\n".format(count))
f.write("Number of repairs: {0}\n".format(repairs))
BIG(net_file5, log_file5)
# + id="2f4c1i9N0OoT"
# !zip -r /content/BPI2017Denied_instance_graphs.zip /content/BPI2017Denied_instance_graphs
# + id="FxLsN0d4Y74R"
# !zip -r /content/testBank2000NoRandomNoise.zip /content/testBank2000NoRandomNoise_instance_graphs
# + id="wp2Q-y-qgt8Y"
# !zip -r /content/bpi2012decompositionExpr.zip /content/bpi2012decompositionExpr_instance_graphs
# + id="4shXiqefi8ou"
# !zip -r /content/testBank2000SCCUpdated.zip /content/testBank2000SCCUpdatedCopia_instance_graphs
| Algoritmo BIG Final 14-11-2021.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Configurations for Classification Tasks
import os,nbloader,warnings,sys,time
warnings.filterwarnings("ignore")
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from random import shuffle
from define_choiceNet_cls import choiceNet_cls_class
from define_cnn_cls import cnn_cls_class
from multiprocessing import Process,Manager
from util import grid_maker,gpusession,load_mnist_with_noise,get_properIdx
if __name__ == "__main__":
print ("TensorFlow version is [%s]."%(tf.__version__))
# ### Main Configurations
methodList = ['CN']
useMixupList = [False]
outlierRatioList = [0.90]
errTypeList = ['rs'] # Random permutation (rp), random shuffle (rs), and biased
tau_invList = [1e+0,1e+1,1e+2,1e+3,1e-10,1e-8,1e-6,1e-4,1e-2,1e-1]
def get_mnist_config(_processID=0,_maxProcessID=8,_maxGPU=8,_DO_SHUFFLE=False):
_G = grid_maker(methodList,useMixupList,outlierRatioList,errTypeList,tau_invList)
# Get current configurations
_ids = get_properIdx(_processID,_maxProcessID,_nTask=_G.nIter)
_paramsList = list(_G.paramList[i] for i in _ids)
# Set GPU ID
_GPU_ID = (_processID % _maxGPU)
# Suffle if required
if _DO_SHUFFLE:
shuffle(_paramsList)
return _paramsList,_GPU_ID
# ### Common Configurations
def get_mnist_common_config():
kmix,rho_ref_train,pi1_bias,logSigmaZval = 10,0.95,0.0,-2 # 10,0.95,1e-4,0.0,-2
logsumexp_coef,kl_reg_coef,l2_reg_coef = 1e-2,1e-4,1e-5 # 0.1,0.1,1e-5
USE_INPUT_BN,USE_RESNET,USE_GAP,USE_KENDALL_LOSS = False,True,False,False
maxEpoch = 40
return kmix,rho_ref_train,pi1_bias,logSigmaZval \
,logsumexp_coef,kl_reg_coef,l2_reg_coef \
,USE_INPUT_BN,USE_RESNET,USE_GAP,USE_KENDALL_LOSS,maxEpoch
# ### MNIST Train Wrapper
def train_wrapper_mnist(_paramsList,_GPU_ID,_DO_PRINT=True):
# Train on different configurations
for pIdx,params in enumerate(_paramsList): # For all current configurations
# Parse current configuration
method,useMixup,outlierRatio,errType,tau_inv \
= params[0],params[1],params[2],params[3],params[4]
# Get common parameters
xdim,ydim,hdims,filterSizes,max_pools,feat_dim = [28,28,1],10,[64,64],[3,3],[2,2],256
actv,bn,VERBOSE = tf.nn.relu,slim.batch_norm,False
kmix,rho_ref_train,pi1_bias,logSigmaZval \
,logsumexp_coef,kl_reg_coef,l2_reg_coef \
,USE_INPUT_BN,USE_RESNET,USE_GAP,USE_KENDALL_LOSS,maxEpoch \
= get_mnist_common_config()
# Load CIFAR-10 with outlier
trainimg,trainlabel,testimg,testlabel,valimg,vallabel \
= load_mnist_with_noise(_errType=errType,_outlierRatio=outlierRatio,_seed=0)
# ===== // =====
if method == 'CN':
tf.reset_default_graph(); tf.set_random_seed(0)
if tau_inv < 0.99999:
_name = ('mnist_%s_err%.0f_%s_tau_inv%.0e_choiceNet'%(errType,outlierRatio*100,('mixup' if useMixup else 'basic'),tau_inv ))
else:
_name = ('mnist_%s_err%.0f_%s_tau_inv%d_choiceNet'%(errType,outlierRatio*100,('mixup' if useMixup else 'basic'),tau_inv ))
CN = choiceNet_cls_class(_name=_name
,_xdim=xdim,_ydim=ydim,_hdims=hdims,_filterSizes=filterSizes
,_max_pools=max_pools,_feat_dim=feat_dim,_kmix=kmix,_actv=actv,_bn=slim.batch_norm
,_rho_ref_train=rho_ref_train,_tau_inv=tau_inv,_pi1_bias=pi1_bias,_logSigmaZval=logSigmaZval
,_logsumexp_coef=logsumexp_coef,_kl_reg_coef=kl_reg_coef,_l2_reg_coef=l2_reg_coef
,_USE_INPUT_BN=USE_INPUT_BN,_USE_RESNET=USE_RESNET,_USE_GAP=USE_GAP,_USE_KENDALL_LOSS=USE_KENDALL_LOSS
,_USE_MIXUP=useMixup,_GPU_ID=_GPU_ID
,_VERBOSE=VERBOSE)
sess = gpusession(); sess.run(tf.global_variables_initializer())
CN.train(_sess=sess,_trainimg=trainimg,_trainlabel=trainlabel
,_testimg=testimg,_testlabel=testlabel,_valimg=valimg,_vallabel=vallabel
,_maxEpoch=maxEpoch,_batchSize=256,_lr=1e-5,_kp=0.95
,_LR_SCHEDULE=True,_PRINT_EVERY=100,_SAVE_BEST=True)
sess.close()
elif method == 'CNN':
tf.reset_default_graph(); tf.set_random_seed(0)
CNN = cnn_cls_class(_name=('mnist_%s_err%.0f_%s_tau_inv%.0e_cnn'%(errType,outlierRatio*100
,('mixup' if useMixup else 'basic'),tau_inv))
,_xdim=xdim,_ydim=ydim,_hdims=hdims,_filterSizes=filterSizes
,_max_pools=max_pools,_feat_dim=feat_dim,_actv=actv,_bn=slim.batch_norm
,_l2_reg_coef=1e-5
,_USE_INPUT_BN=USE_INPUT_BN,_USE_RESNET=USE_RESNET,_USE_GAP=USE_GAP
,_USE_MIXUP=useMixup,_GPU_ID=_GPU_ID
,_VERBOSE=VERBOSE)
sess = gpusession(); sess.run(tf.global_variables_initializer())
CNN.train(_sess=sess,_trainimg=trainimg,_trainlabel=trainlabel
,_testimg=testimg,_testlabel=testlabel,_valimg=valimg,_vallabel=vallabel
,_maxEpoch=maxEpoch,_batchSize=256,_lr=1e-5,_kp=0.95
,_LR_SCHEDULE=True,_PRINT_EVERY=100,_SAVE_BEST=True)
sess.close()
else:
print ('Unknown method: [%s]'%(method))
# ### Worker Class
class worker_class(Process):
def __init__(self,_idx=0,_maxProcessID=8,_maxGPU=8,_name='worker',_FLAG='',_period=1,_maxTick=5,_VERBOSE=True):
Process.__init__(self)
# Initialize class
self.idx = _idx
self.maxProcessID = _maxProcessID
self.maxGPU = _maxGPU
self.name = _name
self.FLAG = _FLAG # Running flag
self.VERBOSE = _VERBOSE
# Initialize Process
self.setName = 'T_'+self.name
self.args = (self.FLAG,)
self.daemon = True # Make sure that each child is killed when the parent is dead. (?!)
# Print
if self.VERBOSE:
print ("[%s] Instantiated."%(self.name))
def run(self):
print ("Starting [%s]"%(self.name))
time.sleep(1e-1)
# Get configurations
paramsList,GPU_ID = get_mnist_config(self.idx,self.maxProcessID,self.maxGPU)
print ("processID:[%d/%d] GPU_ID:[%d] #Config:[%d]"
%(self.idx,self.maxProcessID,GPU_ID,len(paramsList)))
# Run
train_wrapper_mnist(_paramsList=paramsList,_GPU_ID=GPU_ID,_DO_PRINT=False)
print ("[%s] Done."%(self.name))
if __name__ == "__main__":
print ("worker_class defined.")
| code/nips_cls_config_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # sklearn-porter
#
# Repository: https://github.com/nok/sklearn-porter
#
# ## ExtraTreesClassifier
#
# Documentation: [sklearn.ensemble.ExtraTreesClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html)
# ### Loading data:
# +
from sklearn.datasets import load_iris
iris_data = load_iris()
X, y = iris_data.data, iris_data.target
print X.shape, y.shape
# -
# ### Train classifier:
# +
from sklearn.ensemble import ExtraTreesClassifier
clf = ExtraTreesClassifier(n_estimators=15, random_state=0)
clf.fit(X, y)
# -
# ### Transpile classifier:
# +
# %%time
from sklearn_porter import Porter
output = Porter(clf, language='c').export()
print output
# -
# ### Run classification in C:
# Save the transpiled estimator:
with open('forest.c', 'w') as f:
f.write(output)
# Compiling:
# + language="bash"
#
# gcc forest.c -std=c99 -lm -o forest
# -
# Prediction:
# + language="bash"
#
# ./forest 1 2 3 4
| examples/estimator/classifier/ExtraTreesClassifier/c/basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Predicting Survival on the Titanic
#
# ### History
# Perhaps one of the most infamous shipwrecks in history, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 people on board. Interestingly, by analysing the probability of survival based on few attributes like gender, age, and social status, we can make very accurate predictions on which passengers would survive. Some groups of people were more likely to survive than others, such as women, children, and the upper-class. Therefore, we can learn about the society priorities and privileges at the time.
#
# ### Assignment:
#
# Build a Machine Learning Pipeline, to engineer the features in the data set and predict who is more likely to Survive the catastrophe.
#
# Follow the Jupyter notebook below, and complete the missing bits of code, to achieve each one of the pipeline steps.
# + tags=[]
import re
# to handle datasets
import pandas as pd
import numpy as np
# for visualization
import matplotlib.pyplot as plt
# to divide train and test set
from sklearn.model_selection import train_test_split
# feature scaling
from sklearn.preprocessing import StandardScaler
# to build the models
from sklearn.linear_model import LogisticRegression
# to evaluate the models
from sklearn.metrics import accuracy_score, roc_auc_score
# to persist the model and the scaler
import joblib
# to visualise al the columns in the dataframe
pd.pandas.set_option('display.max_columns', None)
# + tags=[]
#later, cell requires random seed to be 0
#so everything that needs a seed will run with this seed
RANDOM_SEED = 0
#What fraction of data set should be held out for
#testing
TEST_SIZE = 0.2
# -
# ## Prepare the data set
# + tags=[]
# load the data - it is available open source and online
data = pd.read_csv('https://www.openml.org/data/get_csv/16826755/phpMYEkMl')
# display data
data.head()
# + tags=[]
# replace interrogation marks by NaN values
data = data.replace('?', np.nan)
# -
data.head()
# + tags=[]
# retain only the first cabin if more than
# 1 are available per passenger
def get_first_cabin(row):
try:
return row.split()[0]
except:
return np.nan
data['cabin'] = data['cabin'].apply(get_first_cabin)
# + tags=[]
# extracts the title (Mr, Ms, etc) from the name variable
def get_title(passenger):
line = passenger
if re.search('Mrs', line):
return 'Mrs'
elif re.search('Mr', line):
return 'Mr'
elif re.search('Miss', line):
return 'Miss'
elif re.search('Master', line):
return 'Master'
else:
return 'Other'
data['title'] = data['name'].apply(get_title)
# -
data.head()
# + tags=[]
# cast numerical variables as floats
data['fare'] = data['fare'].astype('float')
data['age'] = data['age'].astype('float')
# + tags=[]
# drop unnecessary variables
data.drop(labels=['name','ticket', 'boat', 'body','home.dest'], axis=1, inplace=True)
# display data
data.head()
# -
# How was it determined that `['name','ticket', 'boat', 'body','home.dest']` are unnecessary variables?
# They don't influence the value of the target, which is `survived`
# + tags=[]
# save the data set
data.to_csv('titanic.csv', index=False)
# -
# ## Data Exploration
#
# ### Find numerical and categorical variables
# + tags=[]
target = 'survived'
# + tags=[]
vars_num = [var for var in data.columns if data[var].dtype != 'O' and var != target]
vars_cat = [var for var in data.columns if var not in vars_num and var != target]
print('Number of numerical variables: {}'.format(len(vars_num)))
print('Number of categorical variables: {}'.format(len(vars_cat)))
# + tags=[]
#double checking
print("vars_num = ",vars_num)
print("vars_cat = ", vars_cat)
# -
# ### Find missing values in variables
# + tags=[]
# first in numerical variables
na_vars_num = [var for var in vars_num if data[var].isnull().sum() > 0]
na_vars_cat = [var for var in vars_cat if data[var].isnull().sum() > 0]
print("na_vars_num = ", na_vars_num)
print("na_vars_cat = ", na_vars_cat)
# + tags=[]
# now in categorical variables
na_vars_cat = [var for var in vars_cat if data[var].isnull().sum() > 0]
print("na_vars_cat = ", na_vars_cat)
# -
# ### Determine cardinality of categorical variables
# + tags=[]
data[vars_cat].nunique().sort_values(ascending=False).plot.bar(figsize=(24,10))
plt.ylabel("Cardinality of Categorical Variable")
plt.xlabel("Categorical Variable")
plt.show();
# -
# ### Determine the distribution of numerical variables
# + tags=[]
data[vars_num].hist(bins=50,density=True,figsize=(24,10));
# + tags=[]
def extract_letter_from_cabin(cabin_id):
if type(cabin_id) == str:
temp_m = re.search(r"^([a-zA-Z])\d+",cabin_id)
if temp_m:
return temp_m.group(1)
else:
return cabin_id
else:
return cabin_id
# + tags=[]
data["cabin"] = data["cabin"].apply(extract_letter_from_cabin)
# + tags=[]
#data.head()
# -
# ### Replace Missing data in categorical variables with the string **Missing**
# + tags=[]
for na_var in na_vars_cat:
data[na_var] = np.where(data[na_var].isnull(),"MISSING",data[na_var])
# -
# Only variable I see as being anywhere near normally distributed in `age`
# ### Remove rare labels in categorical variables
#
# - remove labels present in less than 5 % of the passengers
# + tags=[]
#find rare lables in set of categorical variables
def print_rare_labels(thedf, the_var, rare_threshold = 0.01):
temp_df = thedf.copy()
tmp_pcts = temp_df.groupby(the_var)[the_var].count() / len(temp_df)
return tmp_pcts[ tmp_pcts < rare_threshold]
for cat_var in vars_cat:
print(print_rare_labels(data,cat_var),"\n")
# -
# I dropped rows with rare labels. The solution marked them as "Rare"
# + tags=[]
data.shape
# + tags=[]
#remove from training a test set to prevent overfitting
data = data[ (data["cabin"] != "G") & (data["cabin"] != "T") & (data["embarked"] != "MISSING")]
# + tags=[]
data.shape
# -
# ## Separate data into train and test
#
# Use the code below for reproducibility. Don't change it.
# + tags=[]
X_train, X_test, y_train, y_test = train_test_split(
data.drop('survived', axis=1), # predictors
data['survived'], # target
test_size=TEST_SIZE, # percentage of obs in test set
random_state=RANDOM_SEED) # seed to ensure reproducibility
X_train.shape, X_test.shape
# -
# ## Feature Engineering
#
# ### Extract only the letter (and drop the number) from the variable Cabin
# + tags=[]
X_train.head()
# -
# ### Fill in Missing data in numerical variables:
#
# - Add a binary missing indicator
# - Fill NA in original variable with the median
# + tags=[]
#not sure why we should grow the data frame by adding new columns which are binary flags indicating whether or not a corresponding column in the same row isnull()
#create variable names for the binary missing indicator
na_vars_num_names = [var + '_na' for var in na_vars_num]
for na_var in na_vars_num:
#at each row where na_var is null, replace it with the average of all values in column na_var
median_for_na_var = X_train[na_var].median()
X_train[na_var] = np.where(X_train[na_var].isnull(), median_for_na_var, X_train[na_var])
X_test[na_var] = np.where(X_test[na_var].isnull(), median_for_na_var, X_test[na_var])
# -
X_train.isnull().sum()
X_test.isnull().sum()
# + tags=[]
X_train.head()
# + tags=[]
X_train.shape
# + tags=[]
X_test.shape
# + tags=[]
X_train.shape
# + tags=[]
X_test.shape
# -
# ### Perform one hot encoding of categorical variables into k-1 binary variables
#
# - k-1, means that if the variable contains 9 different categories, we create 8 different binary variables
# - Remember to drop the original categorical variable (the one with the strings) after the encoding
# We can't separately one-hot encode the categorical variables in the training and the test set
# as the set of values for each categorical variable may not exist in both sets.
#
# So to get around that, I combine the two sets before performing the one-hot encoding
#
# There is no leakage of information between the two sets as I don't change which categorical variables exist in either set, I just make the encoding
# *uniform* between the two sets.
# + tags=[]
X = pd.concat([X_train, X_test])
for cat_var in vars_cat:
one_df = pd.get_dummies(X[cat_var], prefix=cat_var, drop_first=True)
X = pd.concat([X, one_df], axis=1)
X.drop([cat_var], axis=1, inplace=True)
X_train = X[ : X_train.shape[0] ]
X_test = X[ X_train.shape[0] : ]
# + tags=[]
X_train.shape
# + tags=[]
X_test.shape
# + tags=[]
X_train
# + tags=[]
X_test
# -
# Create list of new one-hot encoded variables
# ### Scale the variables
#
# - Use the standard scaler from Scikit-learn
# I don't normalized the OHE variables. Is it OK to do this? `TODO`: Research affect of normalizing OHE variables.
# When I did not normalize the OHE variables I got this result:
# ```
# Test Set Accuracy = 0.6053639846743295
# Test Set ROC AUC Score = 0.8308427241548261
# Training Set Accuracy = 0.6269230769230769
# Training Set ROC AUC Score = 0.8127672319011345
# ```
#
# When I _did_ normalize the OHE variables I got this result:
#
# ```
# Test Set Accuracy = 0.6781609195402298
# Test Set ROC AUC Score = 0.8438878000979911
# Training Set Accuracy = 0.7201923076923077
# Training Set ROC AUC Score = 0.8413928175946468
# ```
#
# Normalizing the OHE variables resulted in better results.
# + tags=[]
normalizer = StandardScaler()
#normalizer.fit(X_train[vars_num])
#X_train_normalized = pd.DataFrame(normalizer.transform(X_train[vars_num]),columns=pd.Series(vars_num))
normalizer.fit(X_train)
X_train_normalized = pd.DataFrame(normalizer.transform(X_train),columns=X_train.columns)
# + tags=[]
X_train_normalized.shape
# + tags=[]
X_train_normalized.head()
# + tags=[]
#X_train.shape
# + tags=[]
#X_train.head()
# + tags=[]
#X_train.drop(vars_num, axis=1, inplace=True)
# + tags=[]
#X_train.reset_index(inplace=True)
# + tags=[]
#X_train.drop(["index"],axis=1, inplace=True)
# + tags=[]
#X_train.head()
# + tags=[]
#X_train.shape
# + tags=[]
#X_train2 = pd.concat([X_train_normalized, X_train], axis=1)
# + tags=[]
#X_train2.shape
# + tags=[]
#X_train = X_train2
# + tags=[]
#X_train.shape
# + tags=[]
#X_train.head()
# + tags=[]
X_test_normalized = pd.DataFrame(normalizer.transform(X_test),columns=X_test.columns)
#X_test_normalized = pd.DataFrame(normalizer.transform(X_test[vars_num]),columns=pd.Series(vars_num))
#X_test.drop(vars_num, axis=1, inplace=True)
#X_test.reset_index(inplace=True)
#X_test.drop(["index"],axis=1, inplace=True)
#X_test2 = pd.concat([X_test_normalized, X_test], axis=1)
#X_test = X_test2
# -
X_test_normalized
# + tags=[]
#X_test.head()
# + tags=[]
#X_test.shape
# -
# ## Train the Logistic Regression model
#
# - Set the regularization parameter to 0.0005
# - Set the seed to 0
# + tags=[]
y_train.shape
# + tags=[]
X_train_normalized.shape
# + tags=[]
lr_model = LogisticRegression(random_state=RANDOM_SEED, C=0.0005)
lr_model.fit(X_train_normalized, y_train)
# -
# ## Make predictions and evaluate model performance
#
# Determine:
# - roc-auc
# - accuracy
#
# **Important, remember that to determine the accuracy, you need the outcome 0, 1, referring to survived or not. But to determine the roc-auc you need the probability of survival.**
# + tags=[]
y_pred = lr_model.predict(X_test_normalized)
y_train_pred = lr_model.predict(X_train_normalized)
# + tags=[]
#y_pred
# + tags=[]
y_pred_proba = lr_model.predict_proba(X_test_normalized)
y_train_proba = lr_model.predict_proba(X_train_normalized)
# + tags=[]
#y_pred_proba[:,1]
# + tags=[]
#y_pred_proba
# + tags=[]
#y_test
# + tags=[]
print("Test Set Accuracy = ", accuracy_score(y_test, y_pred))
print("Test Set ROC AUC Score = ", roc_auc_score(y_test, y_pred_proba[:,1]))
print("Training Set Accuracy = ", accuracy_score(y_train, y_train_pred))
print("Training Set ROC AUC Score = ", roc_auc_score(y_train, y_train_proba[:,1]))
# -
# Going to write out the scaler and logistic regression model to read into the next notebook
# + tags=[]
joblib.dump(lr_model,"lr_model.joblib")
joblib.dump(normalizer, "normalizer.joblib")
# -
# That's it! Well done
#
# **Keep this code safe, as we will use this notebook later on, to build production code, in our next assignement!!**
| section-04-research-and-development/titanic-assignment/01-predicting-survival-titanic-assignement.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import matplotlib
import seaborn as sns
import pandas as pd
from mlxtend.frequent_patterns import apriori
from mlxtend.frequent_patterns import association_rules
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import expectexception
df = pd.read_csv("./sam/data/combinedData.csv")
df.head()
diag = df[["ClaimID",
"Depression",
"Alzheimer",
"RheumatoidArthritis",
"Osteoporasis",
"Diabetes",
"KidneyDisease",
"IschemicHeart",
"Stroke",
"ObstrPulmonary",
"HeartFailure",
"Cancer",
]]
diag.head()
diag = diag.fillna(0).set_index('ClaimID')
diag.head()
frequent_itemsets = apriori(diag, min_support=0.3, use_colnames=True)
rules = association_rules(frequent_itemsets, metric="lift", min_threshold=1)
rules.head()
rules[(rules['lift'] >= 1)&(rules['confidence'] >= 0.8)].sort_values("lift",ascending=False)
lift=rules['lift'].values
support=rules['support'].values
confidence=rules['confidence'].values
# +
import random
import matplotlib.pyplot as plt
for i in range (len(support)):
support[i] = support[i]
confidence[i] = confidence[i]
plt.scatter(support, confidence, alpha=0.5)
plt.xlabel('support')
plt.ylabel('confidence')
plt.show()
# +
for i in range (len(support)):
support[i] = support[i]
confidence[i] = confidence[i]
plt.scatter(support, confidence, alpha=0.5)
plt.xlabel('lift')
plt.ylabel('confidence')
plt.show()
# -
| Market Basket Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Using ipyWidgets
#
# In this notebook, we will use ipyWidgets to make dynamic selections of the data being visualized.
# +
import matplotlib.pyplot as plt
import plotnine as p9
import pandas as pd
import numpy as np
from copy import copy
from ipywidgets import widgets
from IPython.display import display
from plotnine.data import mtcars
# -
# First of all, [install ipywidgets](https://ipywidgets.readthedocs.io/en/stable/user_install.html):
#
# ```bash
# pip install ipywidgets
#
# # for jupyter notebooks:
# jupyter nbextension enable --py widgetsnbextension
#
# # for jupyter lab (requires npm):
# jupyter labextension install @jupyter-widgets/jupyterlab-manager
# ```
# Let's have alook on the plot with all the data. We are comparing cars with their horse-power in the X axis and miles-per-gallon in the Y axis. The points are collored by car weight.
# +
# This has to be used the first time you make a plot. This magic allows the notebook to update plots.
# %matplotlib notebook
p = p9.ggplot(mtcars, p9.aes(x="hp", y="mpg", color="wt")) + \
p9.geom_point() + p9.theme_linedraw()
p
# -
# Now we will get relevant values for the creation of plots with sub-sets of data.
# Initially, select cars based on number of cylinders
# Prepre the list we will use to selec sub-sets of data based on number of cylinders.
cylList = np.unique( mtcars['cyl'] )
# +
# The first selection is a drop-down menu for number of cylinders
cylSelect = widgets.Dropdown(
options=list(cylList),
value=cylList[1],
description='Cylinders:',
disabled=False,
)
# For the widgets to update the same plot, instead of creating one new image every time
# a selection changes. We keep track of the matplotlib image and axis, so we create only one
# figure and set of axis, for the first plot, and then just re-use the figure and axis
# with plotnine's "_draw_using_figure" function.
fig = None
axs = None
# This is the main function that is called to update the plot every time we chage a selection.
def plotUpdate(*args):
# Use global variables for matplotlib's figure and axis.
global fig, axs
# Get current values of the selection widget
cylValue = cylSelect.value
# Create a temporary dataset that is constrained by the user's selections.
tmpDat = mtcars.loc[(mtcars['cyl'] == cylValue),:]
# Create plotnine's plot
# Using the maximum and minimum values we gatehred before, we can keep the plot axis from
# changing with the cyinder selection
p = p9.ggplot(tmpDat, p9.aes(x="hp", y="mpg", color="wt")) + \
p9.geom_point() + p9.theme_linedraw()
if fig is None:
# If this is the first time a plot is made in the notebook, we let plotnine create a new
# matplotlib figure and axis.
fig, plot = p.draw(return_ggplot=True)
axs = plot.axs
else:
#p = copy(p)
# This helps keeping old selected data from being visualized after a new selection is made.
# We delete all previously reated artists from the matplotlib axis.
for artist in plt.gca().lines +\
plt.gca().collections +\
plt.gca().artists + plt.gca().patches + plt.gca().texts:
artist.remove()
# If a plot is being updated, we re-use the figure an axis created before.
p._draw_using_figure(fig, axs)
cylSelect.observe(plotUpdate, 'value')
# Display the widgets
display(cylSelect)
# Plots the first image, with inintial values.
plotUpdate()
# Matplotlib function to make the image fit within the plot dimensions.
plt.tight_layout()
# Trick to get the first rendered image to follow the previous "tight_layout" command.
# without this, only after the first update would the figure be fit inside its dimensions.
cylSelect.value = cylList[0]
# -
# Having axis ranges change between selections does not help probing the data.
# +
# We now get the maximum ranges of relevant variables to keep axis constant between images.
# Get range of weight
minWt = min(mtcars['wt'])
maxWt = max(mtcars['wt'])
# We get all unique values of weigh, sort them, and transform the numpy.array into a python list.
wtOptions = list( np.sort(np.unique(mtcars.loc[mtcars['cyl']==cylList[0],'wt'])) )
minHP = min(mtcars['hp'])
maxHP = max(mtcars['hp'])
minMPG = min(mtcars['mpg'])
maxMPG = max(mtcars['mpg'])
# +
# The first selection is a drop-down menu for number of cylinders
cylSelect = widgets.Dropdown(
options=list(cylList),
value=cylList[1],
description='Cylinders:',
disabled=False,
)
# For the widgets to update the same plot, instead of creating one new image every time
# a selection changes. We keep track of the matplotlib image and axis, so we create only one
# figure and set of axis, for the first plot, and then just re-use the figure and axis
# with plotnine's "_draw_using_figure" function.
fig = None
axs = None
# This is the main function that is called to update the plot every time we chage a selection.
def plotUpdate(*args):
# Use global variables for matplotlib's figure and axis.
global fig, axs
# Get current values of the selection widget
cylValue = cylSelect.value
# Create a temporary dataset that is constrained by the user's selections.
tmpDat = mtcars.loc[(mtcars['cyl'] == cylValue),:]
# Create plotnine's plot
# Using the maximum and minimum values we gatehred before, we can keep the plot axis from
# changing with the cyinder selection
p = p9.ggplot(tmpDat, p9.aes(x="hp", y="mpg", color="wt")) + \
p9.geom_point() + p9.theme_linedraw() + \
p9.xlim([minHP, maxHP]) + p9.ylim([minMPG, maxMPG]) + \
p9.scale_color_continuous(limits=(minWt, maxWt))
if fig is None:
fig, plot = p.draw(return_ggplot=True)
axs = plot.axs
else:
#p = copy(p)
for artist in plt.gca().lines +\
plt.gca().collections +\
plt.gca().artists + plt.gca().patches + plt.gca().texts:
artist.remove()
p._draw_using_figure(fig, axs)
cylSelect.observe(plotUpdate, 'value')
# Display the widgets
display(cylSelect)
# Plots the first image, with inintial values.
plotUpdate()
# Matplotlib function to make the image fit within the plot dimensions.
plt.tight_layout()
# Trick to get the first rendered image to follow the previous "tight_layout" command.
# without this, only after the first update would the figure be fit inside its dimensions.
cylSelect.value = cylList[0]
# -
# Now we can make our selection more complicated by restricting the car data being visualized.
# Using a range slider we can restric data based on car weight.
# +
# The first selection is a drop-down menu for number of cylinders
cylSelect = widgets.Dropdown(
options=list(cylList),
value=cylList[1],
description='Cylinders:',
disabled=False,
)
# The second selection is a range of weights
wtSelect = widgets.SelectionRangeSlider(
options=wtOptions,
index=(0,len(wtOptions)-1),
description='Weight',
disabled=False
)
widgetsCtl = widgets.HBox([cylSelect, wtSelect])
# The range of weights needs to always be dependent on the cylinder selection.
def updateRange(*args):
'''Updates the selection range from the slider depending on the cylinder selection.'''
cylValue = cylSelect.value
wtOptions = list( np.sort(np.unique(mtcars.loc[mtcars['cyl']==cylValue,'wt'])) )
wtSelect.options = wtOptions
wtSelect.index = (0,len(wtOptions)-1)
cylSelect.observe(updateRange,'value')
# For the widgets to update the same plot, instead of creating one new image every time
# a selection changes. We keep track of the matplotlib image and axis, so we create only one
# figure and set of axis, for the first plot, and then just re-use the figure and axis
# with plotnine's "_draw_using_figure" function.
fig = None
axs = None
# This is the main function that is called to update the plot every time we chage a selection.
def plotUpdate(*args):
# Use global variables for matplotlib's figure and axis.
global fig, axs
# Get current values of the selection widgets
cylValue = cylSelect.value
wrRange = wtSelect.value
# Create a temporary dataset that is constrained by the user's selections.
tmpDat = mtcars.loc[(mtcars['cyl'] == cylValue) & \
(mtcars['wt'] >= wrRange[0]) & \
(mtcars['wt'] <= wrRange[1]),:]
# Create plotnine's plot
p = p9.ggplot(tmpDat, p9.aes(x="hp", y="mpg", color="wt")) + \
p9.geom_point() + p9.theme_linedraw() + \
p9.xlim([minHP, maxHP]) + p9.ylim([minMPG, maxMPG]) + \
p9.scale_color_continuous(limits=(minWt, maxWt))
if fig is None:
fig, plot = p.draw(return_ggplot=True)
axs = plot.axs
else:
for artist in plt.gca().lines +\
plt.gca().collections +\
plt.gca().artists + plt.gca().patches + plt.gca().texts:
artist.remove()
p._draw_using_figure(fig, axs)
cylSelect.observe(plotUpdate, 'value')
wtSelect.observe(plotUpdate, 'value')
# Display the widgets
display(widgetsCtl)
# Plots the first image, with inintial values.
plotUpdate()
# Matplotlib function to make the image fit within the plot dimensions.
plt.tight_layout()
# Trick to get the first rendered image to follow the previous "tight_layout" command.
# without this, only after the first update would the figure be fit inside its dimensions.
cylSelect.value = cylList[0]
# -
# Finally, we can change some plot properties to make the final figure more understandable.
# +
# The first selection is a drop-down menu for number of cylinders
cylSelect = widgets.Dropdown(
options=list(cylList),
value=cylList[1],
description='Cylinders:',
disabled=False,
)
# The second selection is a range of weights
wtSelect = widgets.SelectionRangeSlider(
options=wtOptions,
index=(0,len(wtOptions)-1),
description='Weight',
disabled=False
)
widgetsCtl = widgets.HBox([cylSelect, wtSelect])
# The range of weights needs to always be dependent on the cylinder selection.
def updateRange(*args):
'''Updates the selection range from the slider depending on the cylinder selection.'''
cylValue = cylSelect.value
wtOptions = list( np.sort(np.unique(mtcars.loc[mtcars['cyl']==cylValue,'wt'])) )
wtSelect.options = wtOptions
wtSelect.index = (0,len(wtOptions)-1)
cylSelect.observe(updateRange,'value')
fig = None
axs = None
# This is the main function that is called to update the plot every time we chage a selection.
def plotUpdate(*args):
# Use global variables for matplotlib's figure and axis.
global fig, axs
# Get current values of the selection widgets
cylValue = cylSelect.value
wrRange = wtSelect.value
# Create a temporary dataset that is constrained by the user's selections of
# number of cylinders and weight.
tmpDat = mtcars.loc[(mtcars['cyl'] == cylValue) & \
(mtcars['wt'] >= wrRange[0]) & \
(mtcars['wt'] <= wrRange[1]),:]
# Create plotnine's plot showing all data ins smaller grey points, and
# the selected data with coloured points.
p = p9.ggplot(tmpDat, p9.aes(x="hp", y="mpg", color="wt") ) + \
p9.geom_point(mtcars, p9.aes(x="hp", y="mpg"), color="grey") + \
p9.geom_point(size=3) + p9.theme_linedraw() + \
p9.xlim([minHP, maxHP]) + p9.ylim([minMPG, maxMPG]) + \
p9.scale_color_continuous(name="spring",limits=(np.floor(minWt), np.ceil(maxWt))) +\
p9.labs(x = "Horse-Power", y="Miles Per Gallon", color="Weight" )
if fig is None:
fig, plot = p.draw(return_ggplot=True)
axs = plot.axs
else:
for artist in plt.gca().lines +\
plt.gca().collections +\
plt.gca().artists + plt.gca().patches + plt.gca().texts:
artist.remove()
p._draw_using_figure(fig, axs)
cylSelect.observe(plotUpdate, 'value')
wtSelect.observe(plotUpdate, 'value')
# Display the widgets
display(widgetsCtl)
# Plots the first image, with inintial values.
plotUpdate()
# Matplotlib function to make the image fit within the plot dimensions.
plt.tight_layout()
# Trick to get the first rendered image to follow the previous "tight_layout" command.
# without this, only after the first update would the figure be fit inside its dimensions.
cylSelect.value = cylList[0]
| plotnine_examples/tutorials/miscellaneous-using-notebook-widgets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Sum
def sum(num1,num2):
return num1+num2
sum(1,2)
# ## Simple List
# +
ls =[4,5,6]
print(ls[0])
#pop
ls.pop(2)
#loop
for l in ls:
print(l)
ls.append(7)
for l in ls:
print("After {}".format(l))
# +
#nested list
nestedlist =[1,2,[100,101,['target']]]
nestedlist[2]
nestedlist[2][2][0]
# -
# ## Dictionary
#dict
dic ={1:"One",2:"two",3:"three",4:"four"}
dic[1]
#List in a dictionary
dic_1={1:"First",2:"Two",3:['threw','three']}
print(dic_1)
print("show list within dictionary :{}".format(dic_1[3][0]))
# ## Sets
#Sets
set ={1,4,5,6}
print(set)
set.add(1)
print(set)
# +
# Strings in a set
str_set={"one","two","three"}
print("Set values {}".format(str_set))
# -
# add string to a set
str_set.add("one")
print("After set values {}".format(str_set))
str_set.add("four")
print("After set values {}".format(str_set))
# ## TUPLES
empty_tuple =()
print(empty_tuple)
tuple =(1,4,5)
tuple[0]=2
| python-basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="SXxqwSh0C0Ou"
# # Using Embeddings for Document Classification
# + [markdown] colab_type="text" id="bPAKy-ytQmgM"
# ## Colab configurations
# + colab={"base_uri": "https://localhost:8080/", "height": 50} colab_type="code" executionInfo={"elapsed": 4441, "status": "ok", "timestamp": 1586004182079, "user": {"displayName": "Banausic", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjYVsjWa_XDZoiSoyArYulTOek6XUfsvUZixt6_=s64", "userId": "07824873713850476084"}, "user_tz": -480} id="NE9SwK_FC2Dl" outputId="a17813f1-3ba6-477e-ed52-271b497095f4"
import os
colab = False # Change to True if using Colab
if colab:
from google.colab import drive
drive.mount('/content/drive')
gdrivedir = '/content/drive/My Drive/Colab Notebooks/deepnlpa3/'
os.chdir(gdrivedir)
# !pwd
# + [markdown] colab_type="text" id="hMncl9roXEwq"
# ### Download dataset (Skip if not required)
# + colab={"base_uri": "https://localhost:8080/", "height": 67} colab_type="code" executionInfo={"elapsed": 10446, "status": "ok", "timestamp": 1586004188102, "user": {"displayName": "Banausic", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjYVsjWa_XDZoiSoyArYulTOek6XUfsvUZixt6_=s64", "userId": "07824873713850476084"}, "user_tz": -480} id="bTaOBsYcUxAZ" outputId="ea5ffa25-1248-40f9-9fe0-2538903c6e4a"
# !pip install requests
import requests
def progress_bar(some_iter):
try:
from tqdm import tqdm
return tqdm(some_iter)
except ModuleNotFoundError:
return some_iter
def download_file_from_google_drive(id, destination):
print("Trying to fetch {}".format(destination))
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in progress_bar(response.iter_content(CHUNK_SIZE)):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
if not colab:
gdrivedir = ''
datadir = 'data/ag_news/'
# !mkdir 'data/'
# !mkdir 'data/ag_news/'
filename = gdrivedir + datadir + 'news_with_splits.csv'
if not os.path.exists(filename):
download_file_from_google_drive('1Z4fOgvrNhcn6pYlOxrEuxrPNxT-bLh7T', filename)
# !pwd
# + [markdown] colab_type="text" id="zirxAlI0QqBU"
# ## Requirements
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 13696, "status": "ok", "timestamp": 1586004191367, "user": {"displayName": "Banausic", "photoUrl": "<KEY>", "userId": "07824873713850476084"}, "user_tz": -480} id="B0Ph_BZLC_ip" outputId="f01edd45-5332-40b2-eb55-86c49a5f0396"
# !pip install sentencepiece
# !pip install pandas
# !pip install tqdm
# + [markdown] colab_type="text" id="_jP01aDGC0Ov"
# ## Imports
# + colab={} colab_type="code" id="6vHmQqo3C0Ow"
import os
from argparse import Namespace
from collections import Counter, defaultdict # defaultdict NEW
import json
import re
import string
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from tqdm.notebook import tqdm #from tqdm import tqdm_notebook
### NEW ####
import sentencepiece as spm
import tempfile
import sys
import copy
import random
# + [markdown] colab_type="text" id="0ZZ79vcyC0O1"
# ## Data Vectorization classes
# + [markdown] colab_type="text" id="wqbtzx1LC0O2"
# ### The Vocabulary
# + colab={} colab_type="code" id="EwszpKHMC0O3"
class Vocabulary(object):
"""Class to process text and extract vocabulary for mapping"""
def __init__(self, token_to_idx=None):
"""
Args:
token_to_idx (dict): a pre-existing map of tokens to indices
"""
if token_to_idx is None:
token_to_idx = {}
self._token_to_idx = token_to_idx
self._idx_to_token = {idx: token
for token, idx in self._token_to_idx.items()}
def to_serializable(self):
""" returns a dictionary that can be serialized """
return {'token_to_idx': self._token_to_idx}
@classmethod
def from_serializable(cls, contents):
""" instantiates the Vocabulary from a serialized dictionary """
return cls(**contents)
def add_token(self, token):
"""Update mapping dicts based on the token.
Args:
token (str): the item to add into the Vocabulary
Returns:
index (int): the integer corresponding to the token
"""
if token in self._token_to_idx:
index = self._token_to_idx[token]
else:
index = len(self._token_to_idx)
self._token_to_idx[token] = index
self._idx_to_token[index] = token
return index
def add_many(self, tokens):
"""Add a list of tokens into the Vocabulary
Args:
tokens (list): a list of string tokens
Returns:
indices (list): a list of indices corresponding to the tokens
"""
return [self.add_token(token) for token in tokens]
def lookup_token(self, token):
"""Retrieve the index associated with the token
Args:
token (str): the token to look up
Returns:
index (int): the index corresponding to the token
"""
return self._token_to_idx[token]
def lookup_index(self, index):
"""Return the token associated with the index
Args:
index (int): the index to look up
Returns:
token (str): the token corresponding to the index
Raises:
KeyError: if the index is not in the Vocabulary
"""
if index not in self._idx_to_token:
raise KeyError("the index (%d) is not in the Vocabulary" % index)
return self._idx_to_token[index]
def __str__(self):
return "<Vocabulary(size=%d)>" % len(self)
def __len__(self):
return len(self._token_to_idx)
# + colab={} colab_type="code" id="icULWyCqC0O6"
class WordSequenceVocabulary(Vocabulary):
def __init__(self, token_to_idx=None, unk_token="<UNK>",
mask_token="<MASK>", begin_seq_token="<BEGIN>",
end_seq_token="<END>"):
super(WordSequenceVocabulary, self).__init__(token_to_idx)
self._mask_token = mask_token
self._unk_token = unk_token
self._begin_seq_token = begin_seq_token
self._end_seq_token = end_seq_token
self.mask_index = self.add_token(self._mask_token)
self.unk_index = self.add_token(self._unk_token)
self.begin_seq_index = self.add_token(self._begin_seq_token)
self.end_seq_index = self.add_token(self._end_seq_token)
def to_serializable(self):
contents = super(WordSequenceVocabulary, self).to_serializable()
contents.update({'unk_token': self._unk_token,
'mask_token': self._mask_token,
'begin_seq_token': self._begin_seq_token,
'end_seq_token': self._end_seq_token})
return contents
def lookup_token(self, token):
"""Retrieve the index associated with the token
or the UNK index if token isn't present.
Args:
token (str): the token to look up
Returns:
index (int): the index corresponding to the token
Notes:
`unk_index` needs to be >=0 (having been added into the Vocabulary)
for the UNK functionality
"""
if self.unk_index >= 0:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
# + colab={} colab_type="code" id="IaAAOmU-C0O-"
class CharacterSequenceVocabulary(Vocabulary):
def __init__(self, token_to_idx=None, pad_token="<PAD>"):
super(CharacterSequenceVocabulary, self).__init__(token_to_idx)
self._pad_token = pad_token
self.pad_index = self.add_token(self._pad_token)
def to_serializable(self):
contents = super(CharacterSequenceVocabulary, self).to_serializable()
contents.update({'pad_token': self._pad_token})
return contents
def lookup_token(self, token):
"""Retrieve the index associated with the token
or the UNK index if token isn't present.
Args:
token (str): the token to look up
Returns:
index (int): the index corresponding to the token
Notes:
`unk_index` needs to be >=0 (having been added into the Vocabulary)
for the UNK functionality
"""
return self._token_to_idx[token]
# + colab={} colab_type="code" id="tR-Qqsm4BAp7"
class SubwordSequenceVocabulary(Vocabulary):
def __init__(self, token_to_idx=None,
pad_token="<PAD>", unk_token="<UNK>",
begin_seq_token="<BEGIN>",
end_seq_token="<END>"):
super(SubwordSequenceVocabulary, self).__init__(token_to_idx)
self.token_freq = {}
self.bpe_codes = []
self._pad_token = pad_token
self._unk_token = unk_token
self._begin_seq_token = begin_seq_token
self._end_seq_token = end_seq_token
self.pad_index = self.add_token(self._pad_token)
self.unk_index = self.add_token(self._unk_token)
self.begin_seq_index = self.add_token(self._begin_seq_token)
self.end_seq_index = self.add_token(self._end_seq_token)
def to_serializable(self):
contents = super(SubwordSequenceVocabulary, self).to_serializable()
contents.update({'token_freq': self.token_freq})
contents.update({'bpe_codes': self.bpe_codes})
contents.update({'pad_token': self._pad_token,
'unk_token': self._unk_token,
'begin_seq_token': self._begin_seq_token,
'end_seq_token': self._end_seq_token})
return contents
def add_bpe_codes_list(self, bpe_code_list):
self.bpe_codes.extend(bpe_code_list)
def add_bpe_token(self, token, frequency):
"""Update mapping dicts based on the token.
Args:
token (str): the item to add into the Vocabulary
Returns:
index (int): the integer corresponding to the token
"""
if token in self._token_to_idx:
index = self._token_to_idx[token]
else:
index = len(self._token_to_idx)
self._token_to_idx[token] = index
self._idx_to_token[index] = token
self.token_freq[token] = frequency
return index
def lookup_token(self, token):
"""Retrieve the index associated with the token
or the UNK index if token isn't present.
Args:
token (str): the token to look up
Returns:
index (int): the index corresponding to the token
Notes:
`unk_index` needs to be >=0 (having been added into the Vocabulary)
for the UNK functionality
"""
return self._token_to_idx[token]
# + colab={} colab_type="code" id="sgVS8SoADIGS"
class SentenceSequenceVocabulary(Vocabulary):
def __init__(self, token_to_idx=None, pad_token="<pad>", bos_token="<s>",
eos_token="</s>", unk_token="<unk>"):
super(SentenceSequenceVocabulary, self).__init__(token_to_idx)
self._pad_token = pad_token
self._bos_token = bos_token
self._eos_token = eos_token
self._unk_token = unk_token
self.pad_index = self.add_token(self._pad_token) # 0
self.bos_index = self.add_token(self._bos_token) # 1
self.eos_index = self.add_token(self._eos_token) # 2
self.unk_index = self.add_token(self._unk_token) # 3
self.sp_segmenter = None
def to_serializable(self):
contents = super(SentenceSequenceVocabulary, self).to_serializable()
contents.update({'pad_token': self._pad_token , 'bos_token': self._bos_token,
'eos_token' : self._eos_token, 'unk_token': self._unk_token })
return contents
def load_vocab_file(self, vocab_file):
with open(vocab_file, encoding='utf-8') as f:
vo = [doc.strip().split("\t") for doc in f]
for i, w in enumerate(vo): # w[0]: token name, w[1]: token score
self.add_token(w[0]) # add_token will skip duplicates
def load_model_file(self, model_file):
self.sp_segmenter = spm.SentencePieceProcessor()
self.sp_segmenter.load(model_file)
self.sp_segmenter.SetEncodeExtraOptions('bos:eos') # auto append bos eos tokens
def lookup_token(self, token):
"""Retrieve the index associated with the token
or the UNK index if token isn't present.
Args:
token (str): the token to look up
Returns:
index (int): the index corresponding to the token
Notes:
`unk_index` needs to be >=0 (having been added into the Vocabulary)
for the UNK functionality
"""
return self._token_to_idx[token]
# + [markdown] colab_type="text" id="TNSjU3bOEJ2d"
# ### BPE Trainer and Segmenter
# + colab={} colab_type="code" id="InjM2OFAEIrW"
def get_vocabulary(fobj, is_dict=False):
"""Read text and return dictionary that encodes vocabulary
"""
vocab = Counter()
for i, line in enumerate(fobj):
if is_dict:
try:
word, count = line.strip('\r\n ').split(' ')
except:
print('Failed reading vocabulary file at line {0}: {1}'.format(i, line))
sys.exit(1)
vocab[word] += int(count)
else:
for word in line.strip('\r\n ').split(' '):
if word:
vocab[word] += 1
return vocab
def update_pair_statistics(pair, changed, stats, indices):
"""Minimally update the indices and frequency of symbol pairs
if we merge a pair of symbols, only pairs that overlap with occurrences
of this pair are affected, and need to be updated.
"""
stats[pair] = 0
indices[pair] = defaultdict(int)
first, second = pair
new_pair = first+second
for j, word, old_word, freq in changed:
# find all instances of pair, and update frequency/indices around it
i = 0
while True:
# find first symbol
try:
i = old_word.index(first, i)
except ValueError:
break
# if first symbol is followed by second symbol, we've found an occurrence of pair (old_word[i:i+2])
if i < len(old_word)-1 and old_word[i+1] == second:
# assuming a symbol sequence "A B C", if "B C" is merged, reduce the frequency of "A B"
if i:
prev = old_word[i-1:i+1]
stats[prev] -= freq
indices[prev][j] -= 1
if i < len(old_word)-2:
# assuming a symbol sequence "A B C B", if "B C" is merged, reduce the frequency of "C B".
# however, skip this if the sequence is A B C B C, because the frequency of "C B" will be reduced by the previous code block
if old_word[i+2] != first or i >= len(old_word)-3 or old_word[i+3] != second:
nex = old_word[i+1:i+3]
stats[nex] -= freq
indices[nex][j] -= 1
i += 2
else:
i += 1
i = 0
while True:
try:
# find new pair
i = word.index(new_pair, i)
except ValueError:
break
# assuming a symbol sequence "A BC D", if "B C" is merged, increase the frequency of "A BC"
if i:
prev = word[i-1:i+1]
stats[prev] += freq
indices[prev][j] += 1
# assuming a symbol sequence "A BC B", if "B C" is merged, increase the frequency of "BC B"
# however, if the sequence is A BC BC, skip this step because the count of "BC BC" will be incremented by the previous code block
if i < len(word)-1 and word[i+1] != new_pair:
nex = word[i:i+2]
stats[nex] += freq
indices[nex][j] += 1
i += 1
def get_pair_statistics(vocab):
"""Count frequency of all symbol pairs, and create index"""
# data structure of pair frequencies
stats = defaultdict(int)
#index from pairs to words
indices = defaultdict(lambda: defaultdict(int))
for i, (word, freq) in enumerate(vocab):
prev_char = word[0]
for char in word[1:]:
stats[prev_char, char] += freq
indices[prev_char, char][i] += 1
prev_char = char
return stats, indices
def replace_pair(pair, vocab, indices):
"""Replace all occurrences of a symbol pair ('A', 'B') with a new symbol 'AB'"""
first, second = pair
pair_str = ''.join(pair)
pair_str = pair_str.replace('\\','\\\\')
changes = []
pattern = re.compile(r'(?<!\S)' + re.escape(first + ' ' + second) + r'(?!\S)')
if sys.version_info < (3, 0):
iterator = indices[pair].iteritems()
else:
iterator = indices[pair].items()
for j, freq in iterator:
if freq < 1:
continue
word, freq = vocab[j]
new_word = ' '.join(word)
new_word = pattern.sub(pair_str, new_word)
new_word = tuple(new_word.split(' '))
vocab[j] = (new_word, freq)
changes.append((j, new_word, word, freq))
return changes
def prune_stats(stats, big_stats, threshold):
"""Prune statistics dict for efficiency of max()
The frequency of a symbol pair never increases, so pruning is generally safe
(until we the most frequent pair is less frequent than a pair we previously pruned)
big_stats keeps full statistics for when we need to access pruned items
"""
for item,freq in list(stats.items()):
if freq < threshold:
del stats[item]
if freq < 0:
big_stats[item] += freq
else:
big_stats[item] = freq
def learn_bpe(data, num_symbols, min_frequency=2, verbose=False, is_dict=False, total_symbols=False):
"""Learn num_symbols BPE operations from vocabulary, and write to list of tuples.
"""
vocab = get_vocabulary(data, is_dict) # count words in text
vocab = dict([(tuple(x[:-1])+(x[-1]+'</w>',) ,y) for (x,y) in vocab.items()])
sorted_vocab = sorted(vocab.items(), key=lambda x: x[1], reverse=True)
stats, indices = get_pair_statistics(sorted_vocab)
big_stats = copy.deepcopy(stats)
if total_symbols:
uniq_char_internal = set()
uniq_char_final = set()
for word in vocab:
for char in word[:-1]:
uniq_char_internal.add(char)
uniq_char_final.add(word[-1])
sys.stderr.write('Number of word-internal characters: {0}\n'.format(len(uniq_char_internal)))
sys.stderr.write('Number of word-final characters: {0}\n'.format(len(uniq_char_final)))
sys.stderr.write('Reducing number of merge operations by {0}\n'.format(len(uniq_char_internal) + len(uniq_char_final)))
num_symbols -= len(uniq_char_internal) + len(uniq_char_final)
sys.stderr.write('Number of symbols left: {0}\n'.format(num_symbols))
bpe_codes = []
# threshold is inspired by Zipfian assumption, but should only affect speed
threshold = max(stats.values()) / 10
for i in range(num_symbols):
if stats:
most_frequent = max(stats, key=lambda x: (stats[x], x))
# we probably missed the best pair because of pruning; go back to full statistics
if not stats or (i and stats[most_frequent] < threshold):
prune_stats(stats, big_stats, threshold)
stats = copy.deepcopy(big_stats)
most_frequent = max(stats, key=lambda x: (stats[x], x))
# threshold is inspired by Zipfian assumption, but should only affect speed
threshold = stats[most_frequent] * i/(i+10000.0)
prune_stats(stats, big_stats, threshold)
if stats[most_frequent] < min_frequency:
sys.stderr.write('no pair has frequency >= {0}. Stopping\n'.format(min_frequency))
break
if verbose:
sys.stderr.write('pair {0}: {1} {2} -> {1}{2} (frequency {3})\n'.format(i, most_frequent[0], most_frequent[1], stats[most_frequent]))
bpe_codes.append((most_frequent[0], most_frequent[1]))
changes = replace_pair(most_frequent, sorted_vocab, indices)
update_pair_statistics(most_frequent, changes, stats, indices)
stats[most_frequent] = 0
if not i % 100:
prune_stats(stats, big_stats, threshold)
if verbose:
print(bpe_codes)
return bpe_codes
class BPE(object):
def __init__(self, codes, merges=-1, separator='@@', vocab=None, glossaries=None):
self.version = (0, 2) #Hardcode
self.bpe_codes = codes
# some hacking to deal with duplicates (only consider first instance)
self.bpe_codes = dict([(code,i) for (i,code) in reversed(list(enumerate(self.bpe_codes)))])
self.bpe_codes_reverse = dict([(pair[0] + pair[1], pair) for pair,i in self.bpe_codes.items()])
self.separator = separator
self.vocab = vocab
self.glossaries = glossaries if glossaries else []
self.glossaries_regex = re.compile('^({})$'.format('|'.join(glossaries))) if glossaries else None
self.cache = {}
def process_line(self, line, dropout=0):
"""segment line, dealing with leading and trailing whitespace"""
out = ""
leading_whitespace = len(line)-len(line.lstrip('\r\n '))
if leading_whitespace:
out += line[:leading_whitespace]
out += self.segment(line, dropout)
trailing_whitespace = len(line)-len(line.rstrip('\r\n '))
if trailing_whitespace and trailing_whitespace != len(line):
out += line[-trailing_whitespace:]
# print(out)
return out
def segment(self, sentence, dropout=0):
"""segment single sentence (whitespace-tokenized string) with BPE encoding"""
segments = self.segment_tokens(sentence.strip('\r\n ').split(' '), dropout)
return ' '.join(segments)
def segment_tokens(self, tokens, dropout=0):
"""segment a sequence of tokens with BPE encoding"""
output = []
for word in tokens:
# eliminate double spaces
if not word:
continue
new_word = [out for segment in self._isolate_glossaries(word)
for out in encode(segment,
self.bpe_codes,
self.bpe_codes_reverse,
self.vocab,
self.separator,
self.version,
self.cache,
self.glossaries_regex,
dropout)]
for item in new_word[:-1]:
output.append(item + self.separator)
output.append(new_word[-1])
return output
def _isolate_glossaries(self, word):
word_segments = [word]
for gloss in self.glossaries:
word_segments = [out_segments for segment in word_segments
for out_segments in isolate_glossary(segment, gloss)]
return word_segments
def encode(orig, bpe_codes, bpe_codes_reverse, vocab, separator, version, cache, glossaries_regex=None, dropout=0):
"""Encode word based on list of BPE merge operations, which are applied consecutively
"""
if not dropout and orig in cache:
return cache[orig]
if glossaries_regex and glossaries_regex.match(orig):
cache[orig] = (orig,)
return (orig,)
if len(orig) == 1:
return orig
if version == (0, 1):
word = list(orig) + ['</w>']
elif version == (0, 2): # more consistent handling of word-final segments
word = list(orig[:-1]) + [orig[-1] + '</w>']
else:
raise NotImplementedError
while len(word) > 1:
# get list of symbol pairs; optionally apply dropout
pairs = [(bpe_codes[pair],i,pair) for (i,pair) in enumerate(zip(word, word[1:])) if (not dropout or random.random() > dropout) and pair in bpe_codes]
if not pairs:
break
#get first merge operation in list of BPE codes
bigram = min(pairs)[2]
# find start position of all pairs that we want to merge
positions = [i for (rank,i,pair) in pairs if pair == bigram]
i = 0
new_word = []
bigram = ''.join(bigram)
for j in positions:
# merges are invalid if they start before current position. This can happen if there are overlapping pairs: (x x x -> xx x)
if j < i:
continue
new_word.extend(word[i:j]) # all symbols before merged pair
new_word.append(bigram) # merged pair
i = j+2 # continue after merged pair
new_word.extend(word[i:]) # add all symbols until end of word
word = new_word
# don't print end-of-word symbols
if word[-1] == '</w>':
word = word[:-1]
elif word[-1].endswith('</w>'):
word[-1] = word[-1][:-4]
word = tuple(word)
if vocab:
word = check_vocab_and_split(word, bpe_codes_reverse, vocab, separator)
cache[orig] = word
return word
def recursive_split(segment, bpe_codes, vocab, separator, final=False):
"""Recursively split segment into smaller units (by reversing BPE merges)
until all units are either in-vocabulary, or cannot be split futher."""
try:
if final:
left, right = bpe_codes[segment + '</w>']
right = right[:-4]
else:
left, right = bpe_codes[segment]
except:
#sys.stderr.write('cannot split {0} further.\n'.format(segment))
yield segment
return
if left + separator in vocab:
yield left
else:
for item in recursive_split(left, bpe_codes, vocab, separator, False):
yield item
if (final and right in vocab) or (not final and right + separator in vocab):
yield right
else:
for item in recursive_split(right, bpe_codes, vocab, separator, final):
yield item
def check_vocab_and_split(orig, bpe_codes, vocab, separator):
"""Check for each segment in word if it is in-vocabulary,
and segment OOV segments into smaller units by reversing the BPE merge operations"""
out = []
for segment in orig[:-1]:
if segment + separator in vocab:
out.append(segment)
else:
#sys.stderr.write('OOV: {0}\n'.format(segment))
for item in recursive_split(segment, bpe_codes, vocab, separator, False):
out.append(item)
segment = orig[-1]
if segment in vocab:
out.append(segment)
else:
#sys.stderr.write('OOV: {0}\n'.format(segment))
for item in recursive_split(segment, bpe_codes, vocab, separator, True):
out.append(item)
return out
def read_vocabulary(vocab_file, threshold):
"""read vocabulary file produced by get_vocab.py, and filter according to frequency threshold.
"""
vocabulary = set()
for line in vocab_file:
word, freq = line.strip('\r\n ').split(' ')
freq = int(freq)
if threshold == None or freq >= threshold:
vocabulary.add(word)
return vocabulary
def isolate_glossary(word, glossary):
"""
Isolate a glossary present inside a word.
Returns a list of subwords. In which all 'glossary' glossaries are isolated
For example, if 'USA' is the glossary and '1934USABUSA' the word, the return value is:
['1934', 'USA', 'B', 'USA']
"""
# regex equivalent of (if word == glossary or glossary not in word)
if re.match('^'+glossary+'$', word) or not re.search(glossary, word):
return [word]
else:
segments = re.split(r'({})'.format(glossary), word)
segments, ending = segments[:-1], segments[-1]
segments = list(filter(None, segments)) # Remove empty strings in regex group.
return segments + [ending.strip('\r\n ')] if ending != '' else segments
### CALL THIS FUNCTION TO TRAIN AND CREATE VOCAB ###
def learn_joint_bpe_and_vocab(data, symbols, min_frequency, total_symbols, dropout=0, separator="@@", verbose=False):
# get combined vocabulary of all input texts
full_vocab = get_vocabulary(data)
vocab_list = ['{0} {1}'.format(key, freq) for (key, freq) in full_vocab.items()]
# learn BPE on combined vocabulary
bpe_codes = learn_bpe(vocab_list, symbols, min_frequency, verbose, is_dict=True, total_symbols=total_symbols)
bpe = BPE(bpe_codes, separator=separator)
# apply BPE to each training corpus and get vocabulary
segments = []
for line in data:
line_segment = bpe.segment(line, dropout=dropout).strip()
segments.append(line_segment)
vocab = get_vocabulary(segments)
subword_vocab = SubwordSequenceVocabulary()
subword_vocab.add_bpe_codes_list(bpe_codes)
for key, freq in sorted(vocab.items(), key=lambda x: x[1], reverse=True):
subword_vocab.add_bpe_token(key, freq)
return subword_vocab
# + [markdown] colab_type="text" id="zyTm3ARbC0PB"
# ### The Vectorizer
# + colab={} colab_type="code" id="3ONBp_6MC0PC"
class NewsVectorizer(object):
""" The Vectorizer which coordinates the Vocabularies and puts them to use"""
def __init__(self, title_vocab, category_vocab, mode): # , title_vocab
self.mode = mode
self.title_vocab = title_vocab
self.category_vocab = category_vocab
if "bpe" in mode:
self.bpe = BPE(self.title_vocab.bpe_codes, vocab = self.title_vocab.token_freq)
def vectorize(self, title, max_seq_length, max_word_length, max_sent_length):
"""
Args:
word (str): a word
vector_length (int): an argument for forcing the length of index vector
Returns:
the vetorized title (numpy.array)
"""
if self.mode == "word":
indices = [self.title_vocab.begin_seq_index]
indices.extend(self.title_vocab.lookup_token(token)
for token in title.split(" "))
indices.append(self.title_vocab.end_seq_index)
vector_length = max_seq_length
if vector_length < 0:
vector_length = len(indices)
out_vectors = np.zeros(vector_length, dtype=np.int64)
out_vectors[:len(indices)] = indices
out_vectors[len(indices):] = self.title_vocab.mask_index
elif self.mode == "char":
words = title.split(" ")
if len(words) > max_seq_length:
words = words[:max_seq_length]
out_vectors = []
for word in words:
word_indices = [self.title_vocab.lookup_token(token) for token in word]
if len(word_indices) > max_word_length:
word_indices = word_indices[:max_word_length]
out_vector = np.zeros(max_word_length, dtype=np.int64)
out_vector[:len(word_indices)] = word_indices
if len(word_indices) < max_word_length:
out_vector[len(word_indices):] = self.title_vocab.pad_index
out_vectors.append(out_vector)
if len(words) < max_seq_length:
null_word_emb = np.array([self.title_vocab.pad_index] * max_word_length, dtype=np.int64)
for _ in range(max_seq_length - len(words)):
out_vectors.append(null_word_emb)
out_vectors = np.array(out_vectors, dtype=np.int64)
elif self.mode == "bpe-char":
words = title.strip().split() # segment title into words
out_vectors = []
for titleword in words:
encoded = self.bpe.process_line(titleword.strip())
subwords = encoded.strip().split()
word_indices = [self.title_vocab.lookup_token(token) for token in subwords]
if len(word_indices) > max_word_length:
word_indices = word_indices[:max_word_length]
out_vector = np.zeros(max_word_length, dtype=np.int64)
out_vector[:len(word_indices)] = word_indices
if len(word_indices) < max_word_length:
out_vector[len(word_indices):] = self.title_vocab.pad_index
out_vectors.append(out_vector) # append each subword as a rep of each word
if len(words) < max_seq_length:
null_word_emb = np.array([self.title_vocab.pad_index] * max_word_length, dtype=np.int64)
for _ in range(max_seq_length - len(words)):
out_vectors.append(null_word_emb)
out_vectors = np.array(out_vectors, dtype=np.int64)
elif self.mode == "bpe-word":
indices = [self.title_vocab.begin_seq_index]
encoded = self.bpe.process_line(title.strip())
indices.extend(self.title_vocab.lookup_token(token)
for token in encoded.strip().split())
indices.append(self.title_vocab.end_seq_index)
vector_length = max_sent_length
out_vectors = np.zeros(vector_length, dtype=np.int64)
out_vectors[:len(indices)] = indices
out_vectors[len(indices):] = self.title_vocab.pad_index
elif self.mode == "sent":
# words = self.title_vocab.sp_segmenter.encode_as_pieces(title) # for debugging
# spm already configured to auto add bos and eos to ids
# encodes entire sentence into tokens
indices = self.title_vocab.sp_segmenter.encode_as_ids(title)
vector_length = max_sent_length
out_vectors = np.zeros(vector_length, dtype=np.int64)
out_vectors[:len(indices)] = indices
out_vectors[len(indices):] = self.title_vocab.pad_index
return out_vectors
@classmethod
def from_dataframe(cls, news_df, mode, vocab_size, cutoff=25, delete_files=False):
"""Instantiate the vectorizer from the dataset dataframe
Args:
news_df (pandas.DataFrame): the target dataset
cutoff (int): frequency threshold for including in Vocabulary
Returns:
an instance of the NewsVectorizer
"""
category_vocab = Vocabulary()
for category in sorted(set(news_df.category)):
category_vocab.add_token(category)
if mode == "word":
word_counts = Counter()
for title in news_df.title:
for token in title.split(" "):
if token not in string.punctuation:
word_counts[token] += 1
title_vocab = WordSequenceVocabulary()
for word, word_count in word_counts.items():
if word_count >= cutoff:
title_vocab.add_token(word)
elif mode == "char":
title_vocab = CharacterSequenceVocabulary()
for title in news_df.title:
for token in title.split(" "):
title_vocab.add_many(list(token))
elif "bpe" in mode:
title_vocab = SubwordSequenceVocabulary()
total_symbols = False
separator = "@@"
min_frequency = 0
title_vocab = learn_joint_bpe_and_vocab(news_df.title, vocab_size,
min_frequency, total_symbols,
dropout=0, separator=separator, verbose=False)
for title in news_df.title:
for token in title.split(" "):
title_vocab.add_many(list(token))
# title_vocab.compress_dict(vocab_size)
elif mode == "sent":
title_vocab = SentenceSequenceVocabulary()
news_df_title = news_df.title
# create a temporary text file from dataframe for spm input
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.close()
with open(tmp.name, 'w') as tmpout:
for key, value in news_df_title.iteritems():
tmpout.write(value + '\n')
with open(tmp.name, 'r') as tmpin:
if not delete_files:
prefix = "spm/" # create a dir to store
handle_dirs(prefix)
handle_dirs(prefix)
prefix = prefix + str(vocab_size) + "_train_news_spm"
model_name = prefix + ".model"
vocab_name = prefix + ".vocab"
character_coverage = 1.0 # to reduce character set
model_type ="bpe" # choose from unigram (default), bpe, char, or word
templates= "--input={} --pad_id={} --bos_id={} --eos_id={} --unk_id={} \
--model_prefix={} --vocab_size={} \
--character_coverage={} --model_type={}"
cmd = templates.format(tmp.name,
title_vocab.pad_index,
title_vocab.bos_index,
title_vocab.eos_index,
title_vocab.unk_index,
prefix, vocab_size,
character_coverage, model_type)
spm.SentencePieceTrainer.Train(cmd) # run the trainer on trainset
os.remove(tmp.name) # delete temp txt file after training
title_vocab.load_model_file(model_name) # load the model into spm
title_vocab.load_vocab_file(vocab_name) # load the vocab file for saving
if delete_files:
os.remove(model_name)
os.remove(vocab_name)
return cls(title_vocab, category_vocab, mode) # title_char_vocab,
@classmethod
def from_serializable(cls, contents, mode):
if mode == "word":
title_vocab = \
WordSequenceVocabulary.from_serializable(contents['title_vocab'])
elif mode == "char":
title_vocab = \
CharacterSequenceVocabulary.from_serializable(contents['title_vocab'])
elif "bpe" in mode:
title_vocab = \
SubwordSequenceVocabulary.from_serializable(contents['title_vocab'])
elif mode == "sent":
title_vocab = \
SentenceSequenceVocabulary.from_serializable(contents['title_vocab'])
category_vocab = \
Vocabulary.from_serializable(contents['category_vocab'])
return cls(title_vocab=title_vocab, category_vocab=category_vocab) # title_vocab=title_vocab,
def to_serializable(self):
return {'title_vocab': self.title_vocab.to_serializable(),
'category_vocab': self.category_vocab.to_serializable()}
# 'title_vocab': self.title_vocab.to_serializable(),
# + [markdown] colab_type="text" id="tlDY-mjpC0PE"
# ### The Dataset
# + colab={} colab_type="code" id="2hvfxY2pC0PF"
class NewsDataset(Dataset):
def __init__(self, news_df, vectorizer):
"""
Args:
news_df (pandas.DataFrame): the dataset
vectorizer (NewsVectorizer): vectorizer instatiated from dataset
"""
self.news_df = news_df
self._vectorizer = vectorizer
# +1 if only using begin_seq, +2 if using both begin and end seq tokens
measure_len = lambda context: len(context.split(" "))
self._max_seq_length = max(map(measure_len, news_df.title)) + 2
self._max_word_length = 0
self._max_sent_length = 0
for title in news_df.title:
if len(title) > self._max_sent_length:
self._max_sent_length = len(title)
for token in title.split(" "):
if len(token) > self._max_word_length:
self._max_word_length = len(token)
self.train_df = self.news_df[self.news_df.split=='train']
self.train_size = len(self.train_df)
self.val_df = self.news_df[self.news_df.split=='val']
self.validation_size = len(self.val_df)
self.test_df = self.news_df[self.news_df.split=='test']
self.test_size = len(self.test_df)
self._lookup_dict = {'train': (self.train_df, self.train_size),
'val': (self.val_df, self.validation_size),
'test': (self.test_df, self.test_size)}
self.set_split('train')
# Class weights
class_counts = news_df.category.value_counts().to_dict()
def sort_key(item):
return self._vectorizer.category_vocab.lookup_token(item[0])
sorted_counts = sorted(class_counts.items(), key=sort_key)
frequencies = [count for _, count in sorted_counts]
self.class_weights = 1.0 / torch.tensor(frequencies, dtype=torch.float32)
@classmethod
def load_dataset_and_make_vectorizer(cls, news_csv, mode, vocab_size):
"""Load dataset and make a new vectorizer from scratch
Args:
surname_csv (str): location of the dataset
Returns:
an instance of SurnameDataset
"""
news_df = pd.read_csv(news_csv)
train_news_df = news_df[news_df.split=='train']
return cls(news_df, NewsVectorizer.from_dataframe(train_news_df, mode = mode,
vocab_size = vocab_size))
@classmethod
def load_dataset_and_load_vectorizer(cls, news_csv, vectorizer_filepath):
"""Load dataset and the corresponding vectorizer.
Used in the case in the vectorizer has been cached for re-use
Args:
surname_csv (str): location of the dataset
vectorizer_filepath (str): location of the saved vectorizer
Returns:
an instance of SurnameDataset
"""
news_df = pd.read_csv(news_csv)
vectorizer = cls.load_vectorizer_only(vectorizer_filepath)
return cls(news_csv, vectorizer)
@staticmethod
def load_vectorizer_only(vectorizer_filepath):
"""a static method for loading the vectorizer from file
Args:
vectorizer_filepath (str): the location of the serialized vectorizer
Returns:
an instance of SurnameVectorizer
"""
with open(vectorizer_filepath) as fp:
return NameVectorizer.from_serializable(json.load(fp))
def save_vectorizer(self, vectorizer_filepath):
"""saves the vectorizer to disk using json
Args:
vectorizer_filepath (str): the location to save the vectorizer
"""
with open(vectorizer_filepath, "w") as fp:
json.dump(self._vectorizer.to_serializable(), fp)
def get_vectorizer(self):
""" returns the vectorizer """
return self._vectorizer
def set_split(self, split="train"):
""" selects the splits in the dataset using a column in the dataframe """
self._target_split = split
self._target_df, self._target_size = self._lookup_dict[split]
def __len__(self):
return self._target_size
def __getitem__(self, index):
"""the primary entry point method for PyTorch datasets
Args:
index (int): the index to the data point
Returns:
a dictionary holding the data point's features (x_data) and label (y_target)
"""
row = self._target_df.iloc[index]
title_vector = \
self._vectorizer.vectorize(row.title, self._max_seq_length,
self._max_word_length, self._max_sent_length)
category_index = \
self._vectorizer.category_vocab.lookup_token(row.category)
return {'x_data': title_vector,
'y_target': category_index}
def get_num_batches(self, batch_size):
"""Given a batch size, return the number of batches in the dataset
Args:
batch_size (int)
Returns:
number of batches in the dataset
"""
return len(self) // batch_size
def generate_batches(dataset, batch_size, shuffle=True,
drop_last=True, device="cpu"):
"""
A generator function which wraps the PyTorch DataLoader. It will
ensure each tensor is on the write device location.
"""
dataloader = DataLoader(dataset=dataset, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
for data_dict in dataloader:
out_data_dict = {}
for name, tensor in data_dict.items():
out_data_dict[name] = data_dict[name].to(device)
yield out_data_dict
# + [markdown] colab_type="text" id="NqyWjAcPC0PI"
# ## The Model: NewsClassifier
# + colab={} colab_type="code" id="LIpn92p9C0PI"
class NewsClassifier(nn.Module):
def __init__(self, model_mode, char_embedding_size, word_embedding_size,
char_num_embeddings, word_num_channels,
char_kernel_size, hidden_dim, num_classes, dropout_p,
char_pretrained_embeddings=None, padding_idx=0):
"""
Args:
embedding_size (int): size of the embedding vectors
num_embeddings (int): number of embedding vectors
filter_width (int): width of the convolutional kernels
num_channels (int): number of convolutional kernels per layer
hidden_dim (int): the size of the hidden dimension
num_classes (int): the number of classes in classification
dropout_p (float): a dropout parameter
pretrained_embeddings (numpy.array): previously trained word embeddings
default is None. If provided,
padding_idx (int): an index representing a null position
"""
super(NewsClassifier, self).__init__()
print(("model_mode={}, char_embedding_size={}, word_embedding_size={}, char_num_embeddings={}, word_num_channels={}, " \
+ "char_kernel_size={}, hidden_dim={}, num_classes={}" \
+ "").format(model_mode, char_embedding_size, word_embedding_size, char_num_embeddings, word_num_channels,
char_kernel_size, hidden_dim, num_classes))
self.model_mode = model_mode
if "word" in self.model_mode or self.model_mode == "sent":
if char_pretrained_embeddings is None: # token_emb
self.char_emb = nn.Embedding(embedding_dim=word_embedding_size,
num_embeddings=char_num_embeddings,
padding_idx=padding_idx)
else:
char_pretrained_embeddings = torch.from_numpy(char_pretrained_embeddings).float()
self.char_emb = nn.Embedding(embedding_dim=word_embedding_size,
num_embeddings=char_num_embeddings,
padding_idx=padding_idx,
_weight=char_pretrained_embeddings)
elif "char" in self.model_mode:
if char_pretrained_embeddings is None: # char_emb
self.char_emb = nn.Embedding(embedding_dim=char_embedding_size,
num_embeddings=char_num_embeddings,
padding_idx=padding_idx)
else:
char_pretrained_embeddings = torch.from_numpy(char_pretrained_embeddings).float()
self.char_emb = nn.Embedding(embedding_dim=char_embedding_size,
num_embeddings=char_num_embeddings,
padding_idx=padding_idx,
_weight=char_pretrained_embeddings)
self.char_convnet = nn.Sequential(
nn.Conv1d(in_channels=char_embedding_size, out_channels=word_embedding_size, kernel_size=char_kernel_size),
nn.ReLU()
)
self.word_convnet = nn.Sequential(
nn.Conv1d(in_channels=word_embedding_size,
out_channels=word_num_channels, kernel_size=3),
nn.ELU(),
nn.Conv1d(in_channels=word_num_channels, out_channels=word_num_channels,
kernel_size=3, stride=2),
nn.ELU(),
nn.Conv1d(in_channels=word_num_channels, out_channels=word_num_channels,
kernel_size=3, stride=2),
nn.ELU(),
nn.Conv1d(in_channels=word_num_channels, out_channels=word_num_channels,
kernel_size=3),
nn.ELU()
)
self._dropout_p = dropout_p
self.fc1 = nn.Linear(word_num_channels, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
"""The forward pass of the classifier
Args:
x_in (torch.Tensor): an input data tensor.
x_in.shape should be (batch, dataset._max_seq_length)
apply_softmax (bool): a flag for the softmax activation
should be false if used with the Cross Entropy losses
Returns:
the resulting tensor. tensor.shape should be (batch, num_classes)
"""
# embed and permute so features are channels
# x_in: (batch_size, max_seq_size, max_word_size)
# x_emb: (batch_size, max_seq_size, max_word_size, char_embedding_size)
x_emb = self.char_emb(x_in)
if "char" in self.model_mode: # char or bpe-char
batch_size = x_emb.size(dim=0)
max_seq_size = x_emb.size(dim=1)
max_word_size = x_emb.size(dim=2)
char_embedding_size = x_emb.size(dim=3)
# x_reshaped: (batch_size * max_seq_size, char_embedding_size, max_word_size)
x_reshaped = x_emb.view(batch_size * max_seq_size, max_word_size, char_embedding_size).permute(0, 2, 1)
# x_conv: (batch_size * max_seq_size, word_embedding_size, max_word_size - char_kernel_size + 1)
x_conv = self.char_convnet(x_reshaped)
# x_conv_out: (batch_size * max_seq_size, word_embedding_size)
word_embedding_size = x_conv.size(dim=1)
remaining_size = x_conv.size(dim=2)
x_conv_out = F.max_pool1d(x_conv, remaining_size).squeeze(dim=2)
x_embedding = x_conv_out.view(batch_size, max_seq_size, word_embedding_size)
elif "word" in self.model_mode or self.model_mode == "sent":
x_embedding = x_emb
features = self.word_convnet(x_embedding.permute(0, 2, 1))
# average and remove the extra dimension
remaining_size = features.size(dim=2)
features = F.avg_pool1d(features, remaining_size).squeeze(dim=2)
features = F.dropout(features, p=self._dropout_p)
# mlp classifier
intermediate_vector = F.relu(F.dropout(self.fc1(features), p=self._dropout_p))
prediction_vector = self.fc2(intermediate_vector)
if apply_softmax:
prediction_vector = F.softmax(prediction_vector, dim=1)
return prediction_vector
# + [markdown] colab_type="text" id="jRbYNNxoC0PK"
# ## Training Routine
# + [markdown] colab_type="text" id="mRWg3BxEC0PL"
# ### Helper functions
# + colab={} colab_type="code" id="cB77O3A1C0PL"
def make_train_state(args):
return {'stop_early': False,
'early_stopping_step': 0,
'early_stopping_best_val': 1e8,
'learning_rate': args.learning_rate,
'epoch_index': 0,
'train_loss': [],
'train_acc': [],
'val_loss': [],
'val_acc': [],
'test_loss': -1,
'test_acc': -1,
'model_filename': args.model_state_file}
def update_train_state(args, model, train_state):
"""Handle the training state updates.
Components:
- Early Stopping: Prevent overfitting.
- Model Checkpoint: Model is saved if the model is better
:param args: main arguments
:param model: model to train
:param train_state: a dictionary representing the training state values
:returns:
a new train_state
"""
# Save one model at least
if train_state['epoch_index'] == 0:
torch.save(model.state_dict(), train_state['model_filename'])
train_state['stop_early'] = False
# Save model if performance improved
elif train_state['epoch_index'] >= 1:
loss_tm1, loss_t = train_state['val_loss'][-2:]
# If loss worsened
if loss_t >= train_state['early_stopping_best_val']:
# Update step
train_state['early_stopping_step'] += 1
# Loss decreased
else:
# Save the best model
if loss_t < train_state['early_stopping_best_val']:
torch.save(model.state_dict(), train_state['model_filename'])
# Reset early stopping step
train_state['early_stopping_step'] = 0
# Stop early ?
train_state['stop_early'] = \
train_state['early_stopping_step'] >= args.early_stopping_criteria
return train_state
def compute_accuracy(y_pred, y_target):
_, y_pred_indices = y_pred.max(dim=1)
n_correct = torch.eq(y_pred_indices, y_target).sum().item()
return n_correct / len(y_pred_indices) * 100
# + [markdown] colab_type="text" id="fQfw6SqPC0PN"
# #### general utilities
# + colab={} colab_type="code" id="fIMa66CbC0PN"
def set_seed_everywhere(seed, cuda):
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed_all(seed)
def handle_dirs(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
def load_glove_from_file(glove_filepath):
"""
Load the GloVe embeddings
Args:
glove_filepath (str): path to the glove embeddings file
Returns:
word_to_index (dict), embeddings (numpy.ndarary)
"""
word_to_index = {}
embeddings = []
with open(glove_filepath, "r", encoding='utf8') as fp:
for index, line in enumerate(fp):
line = line.split(" ") # each line: word num1 num2 ...
word_to_index[line[0]] = index # word = line[0]
embedding_i = np.array([float(val) for val in line[1:]])
embeddings.append(embedding_i)
return word_to_index, np.stack(embeddings)
def make_embedding_matrix(glove_filepath, words):
"""
Create embedding matrix for a specific set of words.
Args:
glove_filepath (str): file path to the glove embeddigns
words (list): list of words in the dataset
"""
word_to_idx, glove_embeddings = load_glove_from_file(glove_filepath)
embedding_size = glove_embeddings.shape[1]
final_embeddings = np.zeros((len(words), embedding_size))
for i, word in enumerate(words):
if word in word_to_idx:
final_embeddings[i, :] = glove_embeddings[word_to_idx[word]]
else:
embedding_i = torch.ones(1, embedding_size)
torch.nn.init.xavier_uniform_(embedding_i)
final_embeddings[i, :] = embedding_i
return final_embeddings
# + [markdown] colab_type="text" id="Ug51YzMLC0PR"
# ### Settings and some prep work
# + colab={} colab_type="code" id="q3g36XC1C0PS"
from argparse import Namespace
# + colab={"base_uri": "https://localhost:8080/", "height": 84} colab_type="code" executionInfo={"elapsed": 16543, "status": "ok", "timestamp": 1586004194322, "user": {"displayName": "Banausic", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjYVsjWa_XDZoiSoyArYulTOek6XUfsvUZixt6_=s64", "userId": "07824873713850476084"}, "user_tz": -480} id="tD-fOfG7C0PU" outputId="0eb663bc-f540-42c9-d846-f0e6d931ecc7"
args = Namespace(
# Data and Path hyper parameters
news_csv="data/ag_news/news_with_splits.csv",
vectorizer_file="vectorizer.json",
model_state_file="model.pth",
save_dir="model_storage/ch5/document_classification",
model_mode = "sent", # choose from word, char, bpe-char, bpe-word, sent
# Model hyper parameters
glove_filepath='data/glove/glove.6B.100d.txt',
use_glove=False,
word_embedding_size=100,
char_embedding_size=50,
char_kernel_size=5,
hidden_dim=100,
word_num_channels=100,
# Training hyper parameter
seed=1337,
learning_rate=0.001,
weight_decay=1e-5, # Newly added to regularize variance
dropout_p=0.2, #0.1
batch_size= 128,
num_epochs=100,
early_stopping_criteria=5,
vocab_size = 10000, # 1000, 3000, 10000
# Runtime option
cuda=True,
catch_keyboard_interrupt=True,
reload_from_files=False,
expand_filepaths_to_save_dir=True
)
if args.expand_filepaths_to_save_dir:
args.vectorizer_file = args.model_mode + "_" + args.vectorizer_file
args.model_state_file = args.model_mode + "_" + args.model_state_file
if "bpe" in args.model_mode or args.model_mode == "sent":
args.vectorizer_file = str(args.vocab_size) + "_" + args.vectorizer_file
args.model_state_file = str(args.vocab_size) + "_" + args.model_state_file
args.vectorizer_file = os.path.join(args.save_dir,
args.vectorizer_file)
args.model_state_file = os.path.join(args.save_dir,
args.model_state_file)
print("Expanded filepaths: ")
print("\t{}".format(args.vectorizer_file))
print("\t{}".format(args.model_state_file))
# Check CUDA
if not torch.cuda.is_available():
args.cuda = False
args.device = torch.device("cuda" if args.cuda else "cpu")
print("Using CUDA: {}".format(args.cuda))
# Set seed for reproducibility
set_seed_everywhere(args.seed, args.cuda)
# handle dirs
handle_dirs(args.save_dir)
# + [markdown] colab_type="text" id="yzA1NHAaC0PX"
# ### Initializations
# + colab={} colab_type="code" id="HKUf6plpC0PX"
args.use_glove = False
# + colab={"base_uri": "https://localhost:8080/", "height": 437} colab_type="code" executionInfo={"elapsed": 15875, "status": "ok", "timestamp": 1586004215368, "user": {"displayName": "Banausic", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjYVsjWa_XDZoiSoyArYulTOek6XUfsvUZixt6_=s64", "userId": "07824873713850476084"}, "user_tz": -480} id="XB1QULtbC0PZ" outputId="4591035e-62b4-4556-baf6-71fe33c6474e"
if args.reload_from_files:
# training from a checkpoint
dataset = NewsDataset.load_dataset_and_load_vectorizer(args.news_csv,
args.vectorizer_file)
else:
# create dataset and vectorizer
dataset = NewsDataset.load_dataset_and_make_vectorizer(args.news_csv,
mode = args.model_mode,
vocab_size = args.vocab_size)
dataset.save_vectorizer(args.vectorizer_file)
vectorizer = dataset.get_vectorizer()
print("Title vocabulary size:", len(vectorizer.title_vocab))
print("Max sentence length:", dataset._max_sent_length)
print("Max sequence length:", dataset._max_seq_length)
print("Max word length:", dataset._max_word_length)
# Use GloVe or randomly initialized embeddings
if args.use_glove:
words = vectorizer.title_vocab._token_to_idx.keys()
embeddings = make_embedding_matrix(glove_filepath=args.glove_filepath,
words=words)
print("Using pre-trained embeddings")
else:
print("Not using pre-trained embeddings")
embeddings = None
classifier = NewsClassifier(model_mode = args.model_mode,
char_embedding_size=args.char_embedding_size,
word_embedding_size=args.word_embedding_size,
char_num_embeddings=len(vectorizer.title_vocab),
word_num_channels=args.word_num_channels,
char_kernel_size=args.char_kernel_size,
hidden_dim=args.hidden_dim,
num_classes=len(vectorizer.category_vocab),
dropout_p=args.dropout_p,
char_pretrained_embeddings=embeddings,
padding_idx=0)
# print(classifier)
# + [markdown] colab_type="text" id="xjxGxz8NC0Pb"
# ### Training loop
# + colab={"base_uri": "https://localhost:8080/", "height": 113, "referenced_widgets": ["b9b262f1d38e4474a4dc34187f252ddb", "d9fdaab286bf46ea9a29a0953e764cf9", "9d664f31a1874432b7e98a8e30a3dd85", "09d3e392c9564e149d6ebe54201951cf", "485369e8103a4dd0a6c047c8f1f517d0", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "df87c06505104ce595d39474dc2a2b97", "85601091aeb3405ab63ba45b00a6defb", "52d23709e0124978a104d3ac5db443cd", "3838672add9e4f8e9aa0f7544eb31f62", "2990caa532ce4e3eb33edac5eeb86159", "bda1baac71274e46bb3879e31e4b0a82", "3ecad7d44eab45e5a1a2db4e740c17f0", "65419a3ebaad40698d5391ee780cbfcb", "e862a0cafb334453bd8ad74183e417ce", "64ae2237214f47a9aed34659ca9018a0"]} colab_type="code" executionInfo={"elapsed": 4344580, "status": "ok", "timestamp": 1586008550171, "user": {"displayName": "Banausic", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjYVsjWa_XDZoiSoyArYulTOek6XUfsvUZixt6_=s64", "userId": "07824873713850476084"}, "user_tz": -480} id="zEdWKhqUC0Pb" outputId="ace92d5d-e86a-4624-c373-eebfdbbfda55"
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
optimizer = optim.Adam(classifier.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
mode='min', factor=0.5,
patience=1, verbose=True) #turned on verbose
train_state = make_train_state(args)
epoch_bar = tqdm(desc='training routine',
total=args.num_epochs,
position=0)
dataset.set_split('train')
train_bar = tqdm(desc='split=train',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
dataset.set_split('val')
val_bar = tqdm(desc='split=val',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
try:
for epoch_index in range(args.num_epochs):
train_state['epoch_index'] = epoch_index
# Iterate over training dataset
# setup: batch generator, set loss and acc to 0, set train mode on
dataset.set_split('train')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.0
running_acc = 0.0
classifier.train()
for batch_index, batch_dict in enumerate(batch_generator):
# the training routine is these 5 steps:
# --------------------------------------
# step 1. zero the gradients
optimizer.zero_grad()
# step 2. compute the output
y_pred = classifier(batch_dict['x_data'])
# step 3. compute the loss
loss = loss_func(y_pred, batch_dict['y_target'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# step 4. use loss to produce gradients
loss.backward()
# step 5. use optimizer to take gradient step
optimizer.step()
# -----------------------------------------
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_target'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
# update bar
train_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
train_bar.update()
train_state['train_loss'].append(running_loss)
train_state['train_acc'].append(running_acc)
# Iterate over val dataset
# setup: batch generator, set loss and acc to 0; set eval mode on
dataset.set_split('val')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = classifier(batch_dict['x_data'])
# step 3. compute the loss
loss = loss_func(y_pred, batch_dict['y_target'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_target'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
val_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
val_bar.update()
train_state['val_loss'].append(running_loss)
train_state['val_acc'].append(running_acc)
train_state = update_train_state(args=args, model=classifier,
train_state=train_state)
scheduler.step(train_state['val_loss'][-1])
if train_state['stop_early']:
break
train_bar.n = 0
val_bar.n = 0
epoch_bar.update()
except KeyboardInterrupt:
print("Exiting loop")
# + colab={} colab_type="code" id="zPB3qYEmC0Pd"
# compute the loss & accuracy on the test set using the best available model
classifier.load_state_dict(torch.load(train_state['model_filename']))
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
dataset.set_split('test')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = classifier(batch_dict['x_data'])
# compute the loss
loss = loss_func(y_pred, batch_dict['y_target'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_target'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
train_state['test_loss'] = running_loss
train_state['test_acc'] = running_acc
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 4247982, "status": "ok", "timestamp": 1586008555713, "user": {"displayName": "Banausic", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjYVsjWa_XDZoiSoyArYulTOek6XUfsvUZixt6_=s64", "userId": "07824873713850476084"}, "user_tz": -480} id="UAkr6GA6C0Pg" outputId="5bfb49a5-8c91-4f43-830f-34beadfca1ca"
print("Test loss: {};".format(train_state['test_loss']))
print("Test Accuracy: {}".format(train_state['test_acc']))
# + [markdown] colab_type="text" id="gHlT5Gr5C0Ph"
# ### Inference
# + colab={} colab_type="code" id="KUO6RBjkC0Pi"
# Preprocess the reviews
def preprocess_text(text):
text = ' '.join(word.lower() for word in text.split(" "))
text = re.sub(r"([.,!?])", r" \1 ", text)
text = re.sub(r"[^a-zA-Z.,!?]+", r" ", text)
return text
# + colab={} colab_type="code" id="cJY1_yCNC0Pj"
def predict_category(title, classifier, vectorizer, max_seq_length, max_word_length, max_sent_length):
"""Predict a News category for a new title
Args:
title (str): a raw title string
classifier (NewsClassifier): an instance of the trained classifier
vectorizer (NewsVectorizer): the corresponding vectorizer
max_length (int): the max sequence length
Note: CNNs are sensitive to the input data tensor size.
This ensures to keep it the same size as the training data
"""
title = preprocess_text(title)
vectorized_title = \
torch.tensor(vectorizer.vectorize(title, max_seq_length, max_word_length, max_sent_length))
result = classifier(vectorized_title.unsqueeze(0), apply_softmax=True)
probability_values, indices = result.max(dim=1)
predicted_category = vectorizer.category_vocab.lookup_index(indices.item())
return {'category': predicted_category,
'probability': probability_values.item()}
# + colab={} colab_type="code" id="HHCFNxAJC0Pl"
def get_samples():
samples = {}
for cat in dataset.val_df.category.unique():
samples[cat] = dataset.val_df.title[dataset.val_df.category==cat].tolist()[:5]
return samples
val_samples = get_samples()
# + colab={"base_uri": "https://localhost:8080/", "height": 969} colab_type="code" executionInfo={"elapsed": 1358, "status": "ok", "timestamp": 1586010107609, "user": {"displayName": "Banausic", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjYVsjWa_XDZoiSoyArYulTOek6XUfsvUZixt6_=s64", "userId": "07824873713850476084"}, "user_tz": -480} id="7gEhCWe6C0Pn" outputId="97c6ec98-6f49-49c4-e21c-1d673e47cb0a"
#title = input("Enter a news title to classify: ")
classifier = classifier.to("cpu")
for truth, sample_group in val_samples.items():
print(f"True Category: {truth}")
print("="*30)
for sample in sample_group:
prediction = predict_category(sample, classifier,
vectorizer, dataset._max_seq_length + 1,
dataset._max_word_length + 1,
dataset._max_sent_length + 1)
print("Prediction: {} (p={:0.2f})".format(prediction['category'],
prediction['probability']))
print("\t + Sample: {}".format(sample))
print("-"*30 + "\n")
# + colab={} colab_type="code" id="oXHZhY74C0Po"
| assignment3/assignment3_subword.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Dynamic & Static
#
# At present, the neural network framework is divided into a static graph framework and a dynamic graph framework. The biggest difference between PyTorch and TensorFlow, Caffe and other frameworks is that they have different computational graph representations. TensorFlow uses static graphs, which means that we first define the computation graph and then use it continuously, and in PyTorch, we rebuild a new computation graph each time. Through this course, we will understand the advantages and disadvantages between static and dynamic images.
#
# For the user, there are very big differences between the two forms of calculation graphs. At the same time, static graphs and dynamic graphs have their own advantages. For example, dynamic graphs are more convenient for debugging, and users can debug in any way they like. At the same time, it is very intuitive, and the static graph is defined by running it first. After running it again, it is no longer necessary to rebuild the graph, so the speed will be faster than the dynamic graph.
# 
# ### Tensorflow: Static Graph
#
# In TensorFlow, we define the computational graph once and then execute the same graph over and over again, possibly feeding different input data to the graph.
# Here we use TensorFlow to fit a simple two-layer net:
# +
# Code in file autograd/tf_two_layer_net.py
import tensorflow as tf
import numpy as np
# First we set up the computational graph:
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# +
# Create placeholders for the input and target data; these will be filled
# with real data when we execute the graph.
x = tf.placeholder(tf.float32, shape=(None, D_in))
y = tf.placeholder(tf.float32, shape=(None, D_out))
# Create Variables for the weights and initialize them with random data.
# A TensorFlow Variable persists its value across executions of the graph.
w1 = tf.Variable(tf.random_normal((D_in, H)))
w2 = tf.Variable(tf.random_normal((H, D_out)))
# -
# Forward pass: Compute the predicted y using operations on TensorFlow Tensors.
# Note that this code does not actually perform any numeric operations; it
# merely sets up the computational graph that we will later execute.
h = tf.matmul(x, w1)
h_relu = tf.maximum(h, tf.zeros(1))
y_pred = tf.matmul(h_relu, w2)
# +
# Compute loss using operations on TensorFlow Tensors
loss = tf.reduce_sum((y - y_pred) ** 2.0)
# Compute gradient of the loss with respect to w1 and w2.
grad_w1, grad_w2 = tf.gradients(loss, [w1, w2])
# Update the weights using gradient descent. To actually update the weights
# we need to evaluate new_w1 and new_w2 when executing the graph. Note that
# in TensorFlow the the act of updating the value of the weights is part of
# the computational graph; in PyTorch this happens outside the computational
# graph.
learning_rate = 1e-6
new_w1 = w1.assign(w1 - learning_rate * grad_w1)
new_w2 = w2.assign(w2 - learning_rate * grad_w2)
# -
# Now we have built our computational graph, so we enter a TensorFlow session to
# actually execute the graph.
with tf.Session() as sess:
# Run the graph once to initialize the Variables w1 and w2.
sess.run(tf.global_variables_initializer())
# Create numpy arrays holding the actual data for the inputs x and targets y
x_value = np.random.randn(N, D_in)
y_value = np.random.randn(N, D_out)
for _ in range(500):
# Execute the graph many times. Each time it executes we want to bind
# x_value to x and y_value to y, specified with the feed_dict argument.
# Each time we execute the graph we want to compute the values for loss,
# new_w1, and new_w2; the values of these Tensors are returned as numpy
# arrays.
loss_value, _, _ = sess.run([loss, new_w1, new_w2],
feed_dict={x: x_value, y: y_value})
print(loss_value)
# ### Pytorch: Dynamic Graph
#
# Here we use PyTorch Tensors and autograd to implement our two-layer network.
#
# When using autograd, the forward pass of your network will define a computational graph; nodes in the graph will be Tensors, and edges will be functions that produce output Tensors from input Tensors. Backpropagating through this graph then allows you to easily compute gradients.
# +
import torch
device = torch.device('cpu')
# device = torch.device('cuda') # Uncomment this to run on GPU
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# +
# Create random Tensors to hold input and outputs
x = torch.randn(N, D_in, device=device)
y = torch.randn(N, D_out, device=device)
# Create random Tensors for weights; setting requires_grad=True means that we
# want to compute gradients for these Tensors during the backward pass.
w1 = torch.randn(D_in, H, device=device, requires_grad=True)
w2 = torch.randn(H, D_out, device=device, requires_grad=True)
# -
learning_rate = 1e-6
for t in range(500):
# Forward pass: compute predicted y using operations on Tensors. Since w1 and
# w2 have requires_grad=True, operations involving these Tensors will cause
# PyTorch to build a computational graph, allowing automatic computation of
# gradients. Since we are no longer implementing the backward pass by hand we
# don't need to keep references to intermediate values.
y_pred = x.mm(w1).clamp(min=0).mm(w2)
# Compute and print loss. Loss is a Tensor of shape (), and loss.item()
# is a Python number giving its value.
loss = (y_pred - y).pow(2).sum()
print(t, loss.item())
# Use autograd to compute the backward pass. This call will compute the
# gradient of loss with respect to all Tensors with requires_grad=True.
# After this call w1.grad and w2.grad will be Tensors holding the gradient
# of the loss with respect to w1 and w2 respectively.
loss.backward()
# Update weights using gradient descent. For this step we just want to mutate
# the values of w1 and w2 in-place; we don't want to build up a computational
# graph for the update steps, so we use the torch.no_grad() context manager
# to prevent PyTorch from building a computational graph for the updates
with torch.no_grad():
w1 -= learning_rate * w1.grad
w2 -= learning_rate * w2.grad
# Manually zero the gradients after running the backward pass
w1.grad.zero_()
w2.grad.zero_()
| Chapter_1_Pytorch_Basic/Dynamic_Graph.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
from collections import defaultdict
fn = '../data2021/input03.txt'
lnth = 12
# +
def most_common(lst):
return max(set(lst), key=lst.count)
def least_common(lst):
return min(set(lst), key=lst.count)
# +
# part 1
d = {x:[] for x in range(lnth)}
with open(fn, "r") as file:
for line in file:
lst = list(line.replace("\n",""))
for i in range(lnth):
d[i].append(lst[i])
# +
gamma = []
epsilon = []
for i in range(lnth):
gamma.append(most_common(d[i]))
epsilon.append(least_common(d[i]))
print(int("".join(gamma), 2) * int("".join(epsilon), 2))
# +
# part 2
d2 = []
with open(fn, "r") as file:
for line in file:
lst = list(line.replace("\n",""))
d2.append(lst)
n = len(d2)
dgood = [True for x in range(n)]
# dvallist = []
for i in range(lnth):
n1 = [d[0][x] == "1" for x in range(n)]
n0 = [d[0][x] == "0" for x in range(n)]
if n1 > n0:
dval = "1"
elif n0 > n1:
dval = "0"
elif n1 == n0:
dval = "1"
# dvallist.append(dval)
for j,d2val in enumerate(d2):
if d2val[i] != dval:
d2val[i] *= False
if
# -
d
| 2021/Day03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # VARIATIONAL AUTOENCODER
# Right now the setup is executing the alternative way, with the log likelihood mathematically hardcoded in the loss function, to revert to CrossEntropy follow the comments starting with !!!
# +
from torch.utils.data import DataLoader
import torch.optim as optim
from torchvision import datasets
import torchvision.transforms as transforms
import vae
from vae_utils import *
# -
# set learning parameters
epochs = 100
batch_size = 128
lr = 0.001
# +
# prepare the data, divided in train and test (no validation)
train_data = datasets.MNIST(
root='../input/data',
train=True,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
(0,), (1,))
]) # data is otherwise an image in wrong format and not normalized
)
test_data = datasets.MNIST(
root='../input/data',
train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
(0,), (1,))
])
)
# prepare dataloaders for both data, useful in pytorch
train_loader = DataLoader(
train_data,
batch_size=batch_size,
shuffle=True
)
test_loader = DataLoader(
test_data,
batch_size=batch_size,
shuffle=False
)
# -
# Execute asked tasks with latent_dim = 2
# prepare model and optimizer
model = vae.VAE(latent_dim=2) # !!! put use_BCE_loss to True if want to do standard
optimizer = optim.Adam(model.parameters(), lr=lr)
train_loss = []
test_loss = []
for epoch in range(epochs):
print(f"Epoch {epoch+1} of {epochs}")
train_epoch_loss = fit(model, train_loader, optimizer, train_data) # !!! use fit instead of fit_alternative for standard
train_loss.append(train_epoch_loss)
print(f"Train Loss: {train_epoch_loss[0]:.4f}, {train_epoch_loss[1]:.4f}, {train_epoch_loss[2]:.4f}")
plot_reconstructed_digits(model, epoch=epoch, save=True)
test_epoch_loss = test(model, test_loader, test_data=test_data, epoch=epoch, save=True, labelled=True) # !!! use test instead of test_alternative
test_loss.append(test_epoch_loss)
print(f"Test Loss: {test_epoch_loss[2]:.4f}")
num_samples = 15
generated_digits = model.generate_many(num_samples=num_samples)
save_image(generated_digits.view(num_samples, 1, 28, 28), f"../outputs/generated/generated{epoch}.png", nrow=num_samples)
plot_loss(train_loss, test_loss, epochs)
# Execute asked tasks with latent_dim = 32
# prepare model and optimizer
model = vae.VAE(latent_dim=32, use_BCE_loss=False)
optimizer = optim.Adam(model.parameters(), lr=lr)
train_loss = []
test_loss = []
for epoch in range(epochs):
print(f"Epoch {epoch+1} of {epochs}")
train_epoch_loss = fit_alternative(model, train_loader, optimizer, train_data) # !!! use fit instead of fit_alternative
train_loss.append(train_epoch_loss)
print(f"Train Loss: {train_epoch_loss[0]:.4f}, {train_epoch_loss[1]:.4f}, {train_epoch_loss[2]:.4f}")
plot_reconstructed_digits(model, epoch=epoch, save=True)
test_epoch_loss = test_alternative(model, test_loader, test_data=test_data, epoch=epoch, save=True, labelled=True) # !!! use test instead of test_alternative
test_loss.append(test_epoch_loss)
print(f"Test Loss: {test_epoch_loss[2]:.4f}")
num_samples = 15
generated_digits = model.generate_many(num_samples=num_samples)
save_image(generated_digits.view(num_samples, 1, 28, 28), f"../outputs/generated/generated{epoch}.png", nrow=num_samples)
plot_loss(train_loss, test_loss, epochs)
| EX4/src/task3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from mplsoccer.pitch import Pitch
from mplsoccer.statsbomb import read_event, EVENT_SLUG
import os
# load first game that Messi played as a false-9 and the match before
kwargs = {'related_event_df': False,'shot_freeze_frame_df': False, 'tactics_lineup_df': False}
df_false9 = read_event(os.path.join(EVENT_SLUG,'69249.json'), **kwargs)['event']
df_before_false9 = read_event(os.path.join(EVENT_SLUG,'69251.json'), **kwargs)['event']
# filter messi's actions (starting positions)
df_false9 = df_false9.loc[df_false9.player_id == 5503,['x', 'y']]
df_before_false9 = df_before_false9.loc[df_before_false9.player_id == 5503,['x', 'y']]
# plotting
pitch = Pitch(pitch_type = 'statsbomb', figsize = (16, 9), layout = (1, 2), pitch_color = 'grass', stripe = True)
fig, ax = pitch.draw()
ax[0].set_title('Messi in the game directly before \n playing in the false 9 role', fontsize = 25, pad = 20)
pitch.kdeplot(df_before_false9.x, df_before_false9.y, ax = ax[0], cmap = 'plasma', linewidths = 3)
pitch.annotate('6-2 thrashing \nof Real Madrid', (25,10), color = 'white',
fontsize = 25, ha = 'center', va = 'center', ax = ax[1])
ax[1].set_title('The first Game Messi \nplayed in the false 9 role', fontsize = 25, pad = 20)
pitch.kdeplot(df_false9.x, df_false9.y, ax = ax[1], cmap = 'plasma', linewidths = 3)
pitch.annotate('2-2 draw \nagainst Valencia', (25,10), color = 'white',
fontsize = 25, ha = 'center', va = 'center', ax = ax[0])
pitch.annotate('more events', (90,68), (30,68), ax=ax[0], color='white', ha = 'center', va = 'center',
fontsize = 20, arrowprops=dict(facecolor='white', edgecolor = 'None'))
pitch.annotate('fewer events', (80,17), (80,5), ax=ax[0], color='white', ha = 'center', va = 'center',
fontsize = 20, arrowprops=dict(facecolor='white', edgecolor = 'None'))
fig.savefig(os.path.join('figures', 'README_kdeplot_example.png'), bbox_inches = 'tight')
| docs/05-Plotting-kdeplot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# name: ir
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/mbururyan/Carrefour-KE-Data-Analysis/blob/main/Carrefour_Data_Analysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="HQHvuYYQbhJo"
# # Carrefour Kenya Data Analysis
# + [markdown] id="zhxRDtJibpC7"
# This is a notebook where unsupervised learning processes such as Dimensionality Reduction, Feature Selection, association analysis and anomaly detection will be implemented so as to help boost the sales of the company.
# + [markdown] id="3S39xG3zZnhS"
# # Load Required Libraries
# + colab={"base_uri": "https://localhost:8080/"} id="aR7NaUmCZsnd" outputId="041bb38b-ac5c-4e8d-bd35-20ba5548b71a"
library(tidyverse)
# + id="zmTsKgOfao3S"
# View available libraries
library()
# + colab={"base_uri": "https://localhost:8080/"} id="C_5p5V0ybZZc" outputId="0198d4f5-be04-473e-e208-0555fd9c420b"
# Load Tsne package
install.packages("Rtsne")
# + colab={"base_uri": "https://localhost:8080/"} id="BD5nOUwClon0" outputId="612b5a06-a255-4612-b21a-733fb8dcaa96"
# Caret package
install.packages('caret')
# + colab={"base_uri": "https://localhost:8080/"} id="GdrbBXJxyIyE" outputId="bd1cff61-f324-4a27-b1f8-a50783a0f5b6"
#Corrplot package
install.packages('corrplot')
# + colab={"base_uri": "https://localhost:8080/"} id="A4S6ibqZ9HMs" outputId="b3b98bf5-e778-45c3-e8dd-4f431e80fb9a"
# Installing the wrapper packages
install.packages('clustvarsel')
# + colab={"base_uri": "https://localhost:8080/"} id="LoldhaydcFEv" outputId="a39d5fab-a307-4efa-e6c3-f942ac2a73cd"
install.packages('mclust')
# + colab={"base_uri": "https://localhost:8080/"} id="spwoVG1-EDTv" outputId="08e2e084-dcb4-4965-ea71-1bb1109bb904"
# Arules package for association analysis
install.packages('arules')
# + colab={"base_uri": "https://localhost:8080/"} id="-QJf6eXATask" outputId="e59b2497-726f-4993-c83b-2734cb6fbab2"
# Anomalize package for anomaly detection
install.packages('anomalize')
# + colab={"base_uri": "https://localhost:8080/"} id="dEv5cPkEemHo" outputId="8b76d481-607c-4e5e-a2be-4175d4f32c69"
install.packages("tibbletime")
# + [markdown] id="LzARUh90cMJn"
# # Loading the data
# + colab={"base_uri": "https://localhost:8080/", "height": 410} id="Qy9k1pQoaWXn" outputId="12f0cf90-eefb-469e-f1c0-9d4baf7bddba"
# Load the first data set for dimension Reduction and feature selection
df1 <- read.csv('/content/Supermarket_Dataset_1 - Sales Data.csv')
head(df1)
# + colab={"base_uri": "https://localhost:8080/"} id="PnOk38s-cP7F" outputId="1d27796f-60b0-4f07-c356-177306583d15"
# Check for no of rows vs columns
print(nrow(df1))
print(ncol(df1))
# + [markdown] id="hnDJOuVfdUue"
# The dataset has 1000 rows of data with 16 features
# + [markdown] id="yjT5LWP8eTFS"
# * We will go right into Dimensionality Reduction, due to time constraints.
# + [markdown] id="RdpzuIh4edsM"
# # Dimensionality Reduction
# + [markdown] id="dWQ3p0kWhYGw"
# ### Will use t-SNE to reduce the dimensions of the data to a low dimensional space.
#
# This is so as PCA cannot retain non-linear variance. Our data consistes of different datatypes and scales, most of which are non linear, and will hopefully be handled by t-SNE.
# + id="GP4QdfDD9J1y"
# Load the t-SNE library
library(Rtsne)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="exbgbrODencV" outputId="d21f15e3-cc26-4189-cd2e-dc0494d78390"
# Load the data
df_tsne <- df1
# Separate the label for plotting
label <- df_tsne$Branch
unique(label)
# + colab={"base_uri": "https://localhost:8080/"} id="V_5qeOaGiuJ5" outputId="b6671d6a-c64a-48ef-8e51-53b68e78087b"
# Change the label to factor datatype and assign colors
label <- as.factor(label)
print(class(label))
# Assign colors
colors <- rainbow(length(unique(label)))
names(colors) <- unique(label)
print(colors)
# + colab={"base_uri": "https://localhost:8080/", "height": 286} id="kx-aide7jSLS" outputId="aa25b513-7af0-4a33-884c-9f1f8ad1167b"
# Specify features to be used and proceed in encoding
df_tsne2 <- df_tsne[, c(2:8, 11, 12, 14, 15, 16)]
head(df_tsne2)
# + colab={"base_uri": "https://localhost:8080/"} id="qEaLHKz5k7B-" outputId="2ba9611f-6638-40e6-d215-7b86cc215ed1"
# Will proceed to hot encode the categorical columns then apply tsne
# Will try to use caret
library(caret)
# + colab={"base_uri": "https://localhost:8080/", "height": 306} id="F91ncurDmWrL" outputId="d4d49123-2482-4b08-f5d0-c516a79bc2fa"
# We will use the DummyVars function
dummys <- dummyVars('~.', data=df_tsne2)
df_encoded <- data.frame(predict(dummys, newdata = df_tsne2))
# Load first 5 rows
head(df_encoded)
# + colab={"base_uri": "https://localhost:8080/"} id="kLAERTlQnXdP" outputId="1f7b4ee9-a591-4aba-dd05-1619c441731b"
# Ok the data is encoded so lets reduce the features that were actually increased after hot encoding using dummyVars
# Will use the Rtsne library
tsne <- Rtsne(df_encoded, dims=2, perplexity = 30, verbose = TRUE, max_iter = 500)
# + [markdown] id="AoMfQI_KoJuz"
# The model is build on an execution time of 0.96 secods, thats way faster than when done on python
# + colab={"base_uri": "https://localhost:8080/", "height": 437} id="BV9pJGJ0oQmQ" outputId="7f9354a8-a331-408d-8b8c-f8046a66044c"
# Vizualize the dimensions
plot(tsne$Y, t='n', main='Low Dimension Data after t-SNE')
text(tsne$Y)
# + [markdown] id="mIh1alWVpirN"
# Attempted to reduce the features and tSNE is successful. The 21 features have been reduced to two.
#
# However, clustering of the data is an issue as there is no outlined label. Attempted to use ranch of the supermarkets and here are the results.
# + colab={"base_uri": "https://localhost:8080/", "height": 437} id="i7NjjWoUpMOj" outputId="ef78f4e6-247c-4d69-d4b6-3005396c68d7"
# Attempting to cluster the dimensions according to the Branch of the supermarket
plot(tsne$Y, t='n', main='Low Dimension Data after t-SNE')
text(tsne$Y, labels = label, col = colors)
# + [markdown] id="j-CCkwR8rAW7"
# # Feature Selection
# + [markdown] id="az85vZP4rLb8"
# When performing Machine Lerning or Data Analysis in General, some features have weight more than others, and feature selection techniques come in handy to aid us pick out those features for more accurate models and results.
# + colab={"base_uri": "https://localhost:8080/", "height": 306} id="s4tCDg8wrC6_" outputId="0a496e54-4d64-42fb-fd51-d8d0e0ec3d60"
# View our data
# Will use the encoded data created in the t-SNE section
df_fs <- df_encoded
head(df_fs)
# + [markdown] id="bNL3DF-5xQht"
# ## Will use filter method and wrapper method then compare the results
# + [markdown] id="5wGLhUx5xWMC"
# ## 1. Filter Method
# + colab={"base_uri": "https://localhost:8080/"} id="gTl7Db_iyARL" outputId="397d447d-72d2-458c-a5d1-b8c0af14353c"
# Load corrplot library
library(corrplot)
# + colab={"base_uri": "https://localhost:8080/", "height": 286} id="BtcZLn4R0MoK" outputId="0227f543-e84e-4e5c-8058-75aab43e8775"
## The data will be encoded via label encoding this time to avoid adding unneccessary features with hot encoding
# Branch
# Change the column to factor then as numeric
df_fs2 <- df_tsne2
df_fs2$Branch <- factor(df_fs2$Branch)
df_fs2$Branch <- as.numeric(df_fs2$Branch)
# Same for gender, customer type, product line and payment
# Customer type
df_fs2$Customer.type <- factor(df_fs2$Customer.type)
df_fs2$Customer.type <- as.numeric(df_fs2$Customer.type)
# Gender
df_fs2$Gender <- factor(df_fs2$Gender)
df_fs2$Gender <- as.numeric(df_fs2$Gender)
# Product line
df_fs2$Product.line <- factor(df_fs2$Product.line)
df_fs2$Product.line <- as.numeric(df_fs2$Product.line)
#Payment
df_fs2$Payment <- factor(df_fs2$Payment)
df_fs2$Payment <- as.numeric(df_fs2$Payment)
head(df_fs2)
# + colab={"base_uri": "https://localhost:8080/", "height": 52} id="chZBqK2X1SiX" outputId="64b0fe02-c228-440c-f140-58a51e9bc9fe"
# Find factors with a high correlation. Cutoff set to 0.7
corrmatrix <- cor(df_fs2)
highcorr <- findCorrelation(corrmatrix, cutoff = 0.7)
highcorr
names(df_fs2[, highcorr])
# + [markdown] id="sFn7pxnp4J6F"
# The factors with high correlation include cogs, total and tax
# + colab={"base_uri": "https://localhost:8080/", "height": 286} id="8g6ji_al51xO" outputId="ee173fe8-ca19-4d4c-e2a2-0cfca75c301c"
# Dropping the highly correlated factors from the data
df_fs_clean <- df_fs2[-highcorr]
head(df_fs_clean)
# + colab={"base_uri": "https://localhost:8080/", "height": 437} id="sONrzlr962Iy" outputId="052433a7-a9a0-4ed2-fd07-7cbb0fa05397"
# Original correlation
corrplot(cor(df_fs2), order = 'hclust')
# + colab={"base_uri": "https://localhost:8080/", "height": 437} id="PvbzSJFM4HbS" outputId="4d0247d9-af54-42b5-f668-7d802603214a"
# Vizualize the new correlations
corrplot(cor(df_fs_clean), order = 'hclust')
# + [markdown] id="bJBJEy8e8Fft"
# The factors have been reduced to 8 from 12, which shows a great improvement in the correlation matrix.
# + [markdown] id="0AUS7FqO8fiX"
# ## 2. Wrapper Method
# + colab={"base_uri": "https://localhost:8080/"} id="Rxye1pr-8i0r" outputId="64613661-7db1-4d37-a5a1-2188b42fe6b1"
# Will use the mclust module to have the function tell us what factors to drop.
# It uses a Greedy Search type of search algorithm.
# Load the necessary libraries
library(clustvarsel)
library(mclust)
# + colab={"base_uri": "https://localhost:8080/", "height": 225} id="AL1P7DDvBgvT" outputId="ccdfbeec-bc5b-4f51-e633-f833becb75f5"
# Lets build a greedy search algo
greedy <- clustvarsel(df_fs2, G = 1 : 5)
greedy
# + [markdown] id="513oiRDuC8LB"
# The algorithm has suggested that only two features , (Product Line and Branch) will be used in the model and the others to be rejected.
#
# Lets build the cluster model
# + colab={"base_uri": "https://localhost:8080/", "height": 225} id="FeaZQIt7B30u" outputId="9d63ae04-6106-468f-94aa-6af9fca756c6"
# Build the mclust model
subset <- df_fs2[, greedy$subset]
model <- Mclust(subset, G = 1 : 5)
# summary of the built model
summary(model)
# + colab={"base_uri": "https://localhost:8080/", "height": 437} id="GCV1-ypRCQaP" outputId="4d4b2916-2aaa-4961-f65e-36dd50fb454a"
# The model plotted into clusters
plot(model, c('classification'))
# + [markdown] id="C7SzEQo2CxVS"
# The clusters have been built, using the two features selected by the greedy algorithm, therefore our reduction is a success!
# + [markdown] id="z8C4BqVtDeYi"
# # Association Analysis
# + [markdown] id="Omqyw2iQDizt"
# - Here is where things get interesting. Carrefour provided a separate dataset that had all transactions done by the customers.
#
# - The data will be used so as to find the most popular products and analyze the customers behaviour when picking certain products.
#
# - Vamos!
# + colab={"base_uri": "https://localhost:8080/"} id="7EkjPtnpD5kb" outputId="8a6518e6-247c-4456-bdd7-1b98a009e965"
# Load the data
# Will use arules library
library(arules)
# + colab={"base_uri": "https://localhost:8080/", "height": 104} id="ckbntI2XESZm" outputId="95cfa3e1-1fbb-446e-8f8a-3e9d570945c8"
# Load the data
# Will use read.transactions
trs <- read.transactions('/content/customer purchases.csv', sep = ',')
trs
# + [markdown] id="CmL-w5AnJM_0"
# We have 7500 transactions and 119 items
# + colab={"base_uri": "https://localhost:8080/"} id="NoqtaA8vJEQj" outputId="2d70d22b-40c5-4481-fd53-b8f19b0c2e8a"
# Lets see 5 transactions
inspect(trs[1:5])
# + colab={"base_uri": "https://localhost:8080/", "height": 451} id="qBHbvdVLJYyL" outputId="509bc0b1-8f6c-4468-90e2-27286c0c88d9"
# summary of the transactions
summary(trs)
# + [markdown] id="D_qg6CNrJd5h"
# We can see most popular item is mineral water, followed by eggs and spaghetti
# + colab={"base_uri": "https://localhost:8080/", "height": 568} id="iSzqzrYeJsmF" outputId="51395c9e-7a94-499b-9429-39b3257874d4"
# Viewing the items
items <- as.data.frame(itemLabels(trs))
colnames(items) <- "Items In the Supermarket"
#Preview 15
head(items, 15)
# + [markdown] id="HnEHcrA3L2L4"
# ### Most popular items (support)
# + colab={"base_uri": "https://localhost:8080/", "height": 437} id="dYKvq2H6L6r2" outputId="37fde662-1fa9-4b65-c4bb-145e0dbf33bb"
# Most 10 most popular items
itemFrequencyPlot(trs, col='blue', topN = 10)
# + colab={"base_uri": "https://localhost:8080/", "height": 437} id="Y83IGszrM7wu" outputId="a8dc2874-83cc-42fc-d0fa-c638a467c550"
# Items with 10% importance
itemFrequencyPlot(trs, support =0.1)
# + [markdown] id="44azuWpuNrJa"
# These are the most important items for carrefour customers.
# + [markdown] id="WW8P5raJN3yy"
# ### developing apriori Rules for comparison purposes
# + colab={"base_uri": "https://localhost:8080/"} id="dmyXaO7MNvD3" outputId="30095812-fcdf-4d3c-d45d-94316b3fdfb9"
rules <- apriori(trs, parameter = list(
support = 0.001,
confidence = 0.8
))
# + [markdown] id="fHl4Lzd3OQ_y"
# We have 74 rules
# + colab={"base_uri": "https://localhost:8080/"} id="pgs8xJU-OStA" outputId="29a0ae0b-1cf4-4ebd-e589-c4748dd4d09b"
# For comparison, lets build another ruled module and compare
rules2 <- apriori(trs, parameter = list(
support = 0.002,
confidence = 0.7
))
# + [markdown] id="Rt3VdQYDOlw8"
# Here there are 11 rules, therefore not efficient as the parameter's confidence is lowered and the support increased.
#
# We will use the first one
# + colab={"base_uri": "https://localhost:8080/", "height": 538} id="Se1IShzjOve8" outputId="8733b350-f596-4164-a196-757e82ddd463"
# Lets view our rules summary
summary(rules)
# + colab={"base_uri": "https://localhost:8080/"} id="UKO-fGztO5A4" outputId="36fd1fb8-fbf7-429a-9d2b-495a99aa504f"
# Lets view first 5 rules
inspect(rules[1:5])
# + [markdown] id="ymLoGR9KPPEd"
# In the first rule, if someone buys smoothie and spinach, then there is an 89% chance of the customer buying mineral water
# + [markdown] id="erhcght-Pd-r"
# ### Sorting the rules by confidence
# + colab={"base_uri": "https://localhost:8080/"} id="P6pYRiJoPbRZ" outputId="70e73799-1662-4bf1-ca06-c5b4f847f728"
rules <- sort(rules, by = 'confidence', decreasing = TRUE)
# Lets view rules lower than 100% confidence
inspect(rules[5:9])
# + [markdown] id="dEppmo2DP3MM"
# There is a 95% chance of a customer buying escalope after buying pasta
# + [markdown] id="2AEQJcovQAwc"
# ### Lets imagine Carrefour wants to boost their sales on milk as they have a deal with Brookside.
# + colab={"base_uri": "https://localhost:8080/"} id="LW_5A4k_P-IC" outputId="947c65a0-afbf-4c4e-cda5-51188327c1c2"
# Well see the most common items bought before a customer picks milk
milk <- subset(rules, subset = rhs %pin% 'milk')
# sort by confidence
milk <- sort(milk, by = 'confidence', decreasing = TRUE)
# Inspect first 5 commodities
inspect(milk[1 : 5])
# + [markdown] id="jdjwBFvzR_qw"
# We can see, customers buying cake, meatballs and water have the highest chances of picking milk, with a confidence of 100 %
#
# Milk should be placed around the said items.
#
# Other items to be considered includes black tea , hot dogs and ground beef
# + colab={"base_uri": "https://localhost:8080/"} id="pfBDV-bfRgXU" outputId="b6711289-66bc-41f6-b0e7-6f392a7e47df"
# Well see the most common items bought after a customer picks milk
milk2 <- subset(rules, subset = lhs %pin% 'milk')
# sort by confidence
milk2 <- sort(milk2, by = 'confidence', decreasing = TRUE)
# Inspect first 10 commodities
inspect(milk2[1 : 10])
# + [markdown] id="adZObWn7QxH-"
# Mineral water, spaghetti and shrimp should be placed around milk, to boost the sales of both commodities
# + [markdown] id="RqPz0lADRNo7"
# This can be done on a variety of commodities so as to boost the sales of the items and bring substancial profits to carrefour Kenya.
#
# Very interesting.
# + [markdown] id="S6jUNLn_S6oK"
# # Anomaly Detection
# + [markdown] id="qdqa6GsLS83z"
# As a Data Scientist, I will proceed to use some techniques so as to identify any inconsistencies within the company's day to day data, also known as anomalies
# + colab={"base_uri": "https://localhost:8080/"} id="WVbh1secSxy9" outputId="4e568e78-7502-4c5d-8ef2-5023ee2bad6d"
# Load anomalize library and tibbletime
library(anomalize)
library(tibbletime)
# + colab={"base_uri": "https://localhost:8080/"} id="owPhMF6ZiDWZ" outputId="4bd6ee74-4f2b-41e7-ec6a-0bddb051c146"
#Load data'
sales3 <- read.csv('/content/sales time series.csv') %>%
#Change date column to date time
mutate(Date = as.Date(Date, format = '%m/%d/%Y')) %>%
# Change to tibble
as_tbl_time(Date) %>%
#sort days per 'daily'
as_period('daily')
# + colab={"base_uri": "https://localhost:8080/", "height": 286} id="VCQ8U3Naj99m" outputId="ad54de26-c965-43ac-e681-f9c13ab7dd9b"
# output first 6 recorde
head(sales3)
# + colab={"base_uri": "https://localhost:8080/"} id="g3XXRoTcaqFQ" outputId="7392f5eb-c142-422c-fd99-af21d21188d5"
# Run dataset on anomalizer
sales3_out <- sales3 %>%
time_decompose(Sales, merge = TRUE) %>%
anomalize(remainder) %>%
time_recompose()
# + colab={"base_uri": "https://localhost:8080/", "height": 437} id="GBvdreFognLx" outputId="602a3a68-2ef8-419c-b5da-1c5607767835"
# Plotting the outliers
sales3_out %>%
plot_anomalies()
# + [markdown] id="69VHHOb1lgcq"
# Woohoo! No anomalies detected in the company's data. Safe to say no fraud is going on
| Carrefour_Data_Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# The CRISP-DM method will be applied here to keep track of the analysis process
# import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('seaborn')
import seaborn as sns
import geopandas
# # 1.Business Understanding
#
# I downloaded Berlin Airbnb dataset via http://insideairbnb.com/get-the-data.html.
#
# For this project, I have been interested in using Berlin Airbnb dataset.
#
# I will focus on these questions below:!
#
# 1)Which place is the average most expensive accommodation in Berlin?
#
# 2)What is the most expensive price on a room type basis?
#
# 3)What is the distribution of prices on the basis of latitude and longitude?
#
# 4)What is the most correlated three variable with price?
#
# 5)Please you can create a model about make a prediction of room price.
#
# # Berlin City Map
#
# #resorce:http://ontheworldmap.com/germany/city/berlin/berlin-districts-map.html
#
# # 2.Data Understanding
# Berlin city Maps is located on below side.
#Downloaded https://gist.github.com/pfloh/ae03cdabca0c822d5283 geojson and convert name to Berlin_map
countries_gdf = geopandas.read_file("Berlin_map.geojson")
countries_gdf.plot()
# load dataset
listings_df = pd.read_csv('listings.csv')
listings_df.head()
# number of data
listings_df.shape[0]
# data format for each column
listings_df.info()
# What kind information is provied?
listings_df.columns
#describe
listings_df.describe()
# # 3. Prepare Data
# There are some necessary stpes to apply before continue exploring the dataset:
#
# Drop unused columns
#
# Convert string values to number
#
# Handle missing values, drop them if necessary
# +
# Drop unused columns
columns_to_drop = ['host_name','last_review']
listings_df.drop(columns_to_drop, axis=1, inplace=True)
# -
# Convert string to number
def listings_df_room_type(listings_df):
"""
Convert room_type from words to integer for calculating the mean
Parameters:
listings_df: a dataframe that will be converted
Returns:
dataframe: a converted dataframe with room_type column becomes measurable
"""
room_type_map = {
'Entire home/apt' : 1,
'Hotel room' : 2,
'Private room' : 3,
'Shared room' : 4,
np.nan: np.nan
}
listings_df['room_type_num'] = listings_df['room_type'].apply(lambda x: np.nan if x == np.nan else room_type_map[x] )
return listings_df
listings_df_room_type(listings_df)
listings_df.head()
# Convert string to number
def listings_df_neighbourhood_group(listings_df):
"""
Convert neighbourhood_group from words to integer for calculating the mean
Parameters:
listings_df: a dataframe that will be converted
Returns:
dataframe: a converted dataframe with neighbourhood_group column becomes measurable
"""
neighbourhood_group_map = {
'Friedrichshain-Kreuzberg' :1
,'Mitte' :2
,'Pankow' :3
,'Neukölln' :4
,'Charlottenburg-Wilm.' :5
,'<NAME>' :6
,'Lichtenberg' :7
,'Treptow - Köpenick' :8
,'Steglitz - Zehlendorf' :9
,'Reinickendorf' :10
,'Spandau' :11
,'Marzahn - Hellersdorf':12,
np.nan: np.nan
}
listings_df['neighbourhood_group_num'] = listings_df['neighbourhood_group'].apply(lambda x: np.nan if x == np.nan else neighbourhood_group_map[x] )
return listings_df
listings_df_neighbourhood_group(listings_df)
listings_df.head()
# missing values
listings_df.isnull().sum()
# +
# drop columns
missing_columns_to_drop = ['name','reviews_per_month']
listings_df.drop(missing_columns_to_drop, axis=1, inplace=True)
# -
# missing values check
listings_df.isnull().sum()
# # 4. Answer Questions base on dataset
# ### Question 1
# Which place is the average most expensive accommodation in Berlin?
listings_df.neighbourhood_group.value_counts()
# +
red_square = dict(markerfacecolor='salmon', markeredgecolor='salmon', marker='.')
listings_df.boxplot(column='price', by='neighbourhood_group',
flierprops=red_square, vert=False, figsize=(10,8))
plt.xlabel('\nMedian Price', fontsize=12)
plt.ylabel('District\n', fontsize=12)
plt.title('\nBoxplot: Prices by Neighbourhood\n', fontsize=14, fontweight='bold')
# get rid of automatic boxplot title
plt.suptitle('');
# -
def display_bar_chart (df,column, title):
'''
Displays a bar chart with a title
Parameters:
df: a dataframe
column: the column which we want to show
title: the title of the chart
Returns:
None
'''
status_vals=df[column].value_counts()
(status_vals[:20]/df.shape[0]).plot(kind="bar");
plt.title(title);
#Provide a pandas series of the counts for each Price
display_bar_chart(listings_df, "price", "What are the most twenty demanded room price in Berlin Airbnb ?")
#Provide a pandas series of the counts for each neighbourhood_group
display_bar_chart(listings_df, "neighbourhood_group", "Where are the most demanded rooms place in Berlin Airbnb ?")
#Compare selected neighbourhood_group based on average room price
Compare=listings_df.groupby(['neighbourhood_group'])['price'].mean()
Compare.plot(kind="bar")
Compare
# Charlottenburg-Wilm. is the average most expensive accomodation place in Berlin
# ### Question 2
# What is the most expensive price on a room type basis?
listings_df.room_type.value_counts()
#Compare selected room type based on average price
compare_room_type=listings_df.groupby(['room_type'])['price'].max()
compare_room_type.plot(kind='bar')
compare_room_type
# Entire home/apt is most expensive price on a room type basis. This price is 9.000€.
# ### Question 3
# What is the distribution of prices on the basis of latitude and longitude?
plot=listings_df.plot(kind="scatter", x="longitude", y="latitude", alpha=0.6, figsize=(8,6),
c="price", cmap="plasma", colorbar=True ,sharex=False);
plot
# Distribution of prices is on the basis of latitude and longitude on above side.
# ### Question 4
# What is the most correlated three variable with price?
ax = plt.subplots( figsize=(10,10) )
sns.heatmap(listings_df.corr(),annot=True,linewidths=1)
plt.show()
# Availability_365 is that the most correlated with price. Correlation is 0.12.
# ### Question 5
# Please you can create a model about make a prediction of room price.
from sklearn import datasets, linear_model
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
listings_df.columns
X=listings_df.drop(['id', 'host_id', 'neighbourhood_group', 'neighbourhood','room_type','price'],axis=1)
y=listings_df['price']
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
print('X_train Shape:', X_train.shape)
print('y_train Shape:', y_train.shape)
print('X_test Shape:', X_test.shape)
print('y_test Shape:', y_test.shape)
# Import the model we are using
from sklearn.ensemble import RandomForestRegressor
estimators = [2, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80,100]
# Instantiate model with 2-100 decision trees
for i in estimators:
rf = RandomForestRegressor(n_estimators = i, random_state = 42)
# Train the model on training data
rf.fit(X_train, y_train);
print('estimators:',i)
# Use the forest's predict method on the test data
predictions = rf.predict(X_test)
# Calculate the absolute errors
errors = abs(predictions - y_test)
# Print out the mean absolute error (mae)
print('Mean Absolute Error:', round(np.mean(errors), 2), 'degrees.')
# We can choose estimators:100 and create model again.
# Instantiate model with 100 decision trees
rf = RandomForestRegressor(n_estimators = 100, random_state = 42)
# Train the model on training data
rf.fit(X_train, y_train);
# Use the forest's predict method on the test data
predictions = rf.predict(X_test)
# Calculate the absolute errors
errors = abs(predictions - y_test)
# Print out the mean absolute error (mae)
print('Mean Absolute Error:', round(np.mean(errors), 2), 'degrees.')
# # 5.Conculsion
# We can create a model via Random Forest Regressor for prediction price
#
# We can say that room price is higher than other place at city of centre.
#
# We can observe that customers demand to Entire home/apt and Private room in Airbnb.
#
# The most popular places for accomadation in Berlin that is Friedrichshain-Kreuzberg and Mitte in Airbnb.
#
# If you can stay in Mitte, you will visit this link https://www.airbnb.com.tr/a/Mitte--Germany
#
# If you can stay in Mitte, you will visit this link https://www.airbnb.com.tr/s/Friedrichshain~Kreuzberg--Berlin--Germany
# # 6. References
#
# Airbnb Berlin, Germany http://insideairbnb.com/get-the-data.html.
#
# Date Compiled:19 September, 2019
#
# Berlin Map http://ontheworldmap.com/germany/city/berlin/berlin-districts-map.html
#
# Berlin Map Geojson https://gist.github.com/pfloh/ae03cdabca0c822d5283
| Berlin_Airbnb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
"""
In this notebook, one example of simon's algorithm will be modeled using both IBM Qiskit and pgmpy.
The results are compared at the end.
"""
# +
# Comment out these lines
import sys
sys.path.insert(0, 'C:\\Users\\masch\\QuantumComputing\\QCompMAS\\pgmpy')
# Imports (some imports may not be necessary)
import numpy as np
from qiskit import IBMQ, Aer
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, assemble, transpile
from qiskit.visualization import plot_histogram
import qiskit.quantum_info as qi
# set the length of the n-bit input string.
n = 2
m = 2
# Construct the oracle
oracle = QuantumCircuit(n+m)
oracle.barrier()
oracle.cx(0,2)
oracle.cx(0,3)
oracle.cx(1,2)
oracle.cx(1,3)
oracle.barrier()
oracle.draw()
# -
# The next 3 cells show the construction of the entire circuit
simonAlg = QuantumCircuit(n+m,n+m)
simonAlg.barrier()
for qubit in range(n):
simonAlg.h(qubit)
simonAlg += oracle
simonAlg.draw()
for qubit in range(n):
simonAlg.h(qubit)
simonAlg.barrier()
simonAlg.draw()
# +
# Here, we obtain the state vector for the system after the cx gate
simon = qi.Statevector.from_instruction(simonAlg)
simonVec = simon.__array__()
# The format of the state vector is [|0000>, |1000>, |0100>, |1100>, ...]
print(simonVec)
# +
# Imports
from pgmpy.models import BayesianNetwork
from pgmpy.factors.discrete.CPD import TabularCPD
import numpy as np
from pgmpy.inference import VariableElimination
# Bayesian Network for Simon's Algorithm
simon = BayesianNetwork([('q0m0', 'q0m1'), ('q0m1', 'q0m2'), ('q1m0', 'q1m1'), ('q1m1', 'q1m2'), ('q2m0', 'q2m1'), ('q2m1', 'q2m2'), ('q3m0', 'q3m1'), ('q3m1', 'q3m2'), ('q0m1', 'q2m2'), ('q0m1', 'q3m2'), ('q1m1', 'q2m2'), ('q1m1', 'q3m2'), ('q0m2', 'q0m3'), ('q1m2', 'q1m3')])
# Conditional Amplitude Tables
cpd_q0m0 = TabularCPD(variable = 'q0m0', variable_card = 2, values = [[1], [0]])
cpd_q1m0 = TabularCPD(variable = 'q1m0', variable_card = 2, values = [[1], [0]])
cpd_q2m0 = TabularCPD(variable = 'q2m0', variable_card = 2, values = [[1], [0]])
cpd_q3m0 = TabularCPD(variable = 'q3m0', variable_card = 2, values = [[1], [0]])
cpd_q0m1 = TabularCPD(variable='q0m1', variable_card = 2, values = [[1/np.sqrt(2), 1/np.sqrt(2)], [1/np.sqrt(2), -1/np.sqrt(2)]], evidence = ['q0m0'], evidence_card = [2])
cpd_q1m1 = TabularCPD(variable='q1m1', variable_card = 2, values = [[1/np.sqrt(2), 1/np.sqrt(2)], [1/np.sqrt(2), -1/np.sqrt(2)]], evidence = ['q1m0'], evidence_card = [2])
cpd_q2m1 = TabularCPD(variable='q2m1', variable_card = 2, values = [[1, 0], [0, 1]], evidence = ['q2m0'], evidence_card = [2])
cpd_q3m1 = TabularCPD(variable='q3m1', variable_card = 2, values = [[1, 0], [0, 1]], evidence = ['q3m0'], evidence_card = [2])
cpd_q0m2 = TabularCPD(variable='q0m2', variable_card = 2, values = [[1, 0], [0, 1]], evidence = ['q0m1'], evidence_card = [2])
cpd_q1m2 = TabularCPD(variable='q1m2', variable_card = 2, values = [[1, 0], [0, 1]], evidence = ['q1m1'], evidence_card = [2])
cpd_q2m2 = TabularCPD(variable='q2m2', variable_card = 2, values = [[1,0,0,1,0,1,1,0],[0,1,1,0,1,0,0,1]], evidence = ['q0m1', 'q1m1', 'q2m1'], evidence_card = [2,2,2])
cpd_q3m2 = TabularCPD(variable='q3m2', variable_card = 2, values = [[1,0,0,1,0,1,1,0],[0,1,1,0,1,0,0,1]], evidence = ['q0m1', 'q1m1', 'q3m1'], evidence_card = [2,2,2])
cpd_q0m3 = TabularCPD(variable='q0m3', variable_card = 2, values = [[1/np.sqrt(2), 1/np.sqrt(2)], [1/np.sqrt(2), -1/np.sqrt(2)]], evidence = ['q0m2'], evidence_card = [2])
cpd_q1m3 = TabularCPD(variable='q1m3', variable_card = 2, values = [[1/np.sqrt(2), 1/np.sqrt(2)], [1/np.sqrt(2), -1/np.sqrt(2)]], evidence = ['q1m2'], evidence_card = [2])
# Add CADs to Bayesian Network
simon.add_cpds(cpd_q0m0, cpd_q1m0, cpd_q2m0, cpd_q3m0, cpd_q0m1, cpd_q1m1, cpd_q2m1, cpd_q3m1, cpd_q0m2, cpd_q1m2, cpd_q2m2, cpd_q3m2, cpd_q0m3, cpd_q1m3)
simonInfer = VariableElimination(simon)
# Print Results of Variable Elimination (pay attention to the ordering of variables)
q = simonInfer.query(['q0m3', 'q1m3', 'q2m2', 'q3m2'])
print(q)
# +
# Obtain the ordering of the variables in the display above, as well as their values
qVars = q.variables
qValues = q.values
print(qVars)
print(qValues)
# +
def bitListBack(n):
N = 2**n
numList = []
numFormat = "0" + str(n) + "b"
for i in range(N):
numList.append((str(format(i,numFormat))[::-1]))
return numList
def QiskitDict(stateVec,n):
qbits = bitListBack(n)
QbitDict = {}
for i in range(2**n):
QbitDict[qbits[i]]=np.round(stateVec[i],4)
return QbitDict
print("simon's algorithm")
print(QiskitDict(simonVec,4))
# +
# Obtain the ordering of the variables in the display above, as well as their values
valArr = q.variables
valuesArr = q.values
def create_var_order(orderArr):
currNum = 0
numArr = []
for order in orderArr:
if len(order) == 4:
currNum = order[1]
numArr.append(currNum)
return numArr
def bitList(n):
N = 2**n
numList = []
numFormat = "0" + str(n) + "b"
for i in range(N):
numList.append((str(format(i,numFormat))))
return numList
def columnize(listOfBits):
n = len(listOfBits[0])
holder = []
for i in range(n):
col = []
for bit in listOfBits:
col.append(bit[i])
holder.append(col)
return holder
def reform():
varOrderArr = create_var_order(valArr)
listOfBits = bitList(len(varOrderArr))
columns = columnize(listOfBits)
rearrangedColumns = [None]*len(columns)
for index, order in enumerate(varOrderArr):
rearrangedColumns[int(order)] = columns[int(index)]
numOfCols = len(rearrangedColumns)
bitStr = ""
finalBitArr = []
for bitIndex in range(len(rearrangedColumns[0])):
for num in range(numOfCols):
bitStr+=str(rearrangedColumns[num][bitIndex])
finalBitArr.append(bitStr)
bitStr = ""
return finalBitArr
def createHashTable():
resHash = {}
bitOrder=reform()
valuesFlat = valuesArr.flatten()
for index, key in enumerate(bitOrder):
resHash[key] = np.round(valuesFlat[index], 4)
return resHash
PgmpyHash = createHashTable()
print(PgmpyHash == QiskitDict(simonVec,4))
print(PgmpyHash)
print(QiskitDict(simonVec,4))
# -
| Notebooks/SimonAlg.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **평균 손실 함수**
#
# $$ -E = \frac{1}{N}\sum\sum t_nk logy_nk $$
#
# N으로 나눔으로써 '평균 손실 함수'를 구하는 것이다. 이렇게 평균을 구해 사용하면 훈련 데이터 개수와 상관없이 언제든 동일한 지표를 얻을 수 있다.
#
# 신경망 학습에서도 훈련데이터의 일부를 추려 전체의 근사치로 이용할 수 있다. 신경망 학습에서도 훈련데이터로부터 일부만 골라 학습을 수행한다.
# 이 일부를 **미니 배치**라고 한다. 가령 60000자의 훈련 데이터 중에서 100장을 무작위로 뽑아 그 100장만을 사용하여 학습하는 것이다.
# 이러한 학습 방법을 **미니배치 학습** 이라고 한다.
# +
import sys,os
sys.path.append(os.pardir)
import numpy as np
from dataset.mnist import load_mnist
(x_train, t_train),(x_test, t_test)= load_mnist(normalize=True, one_hot_label=True)
print(x_train.shape) #훈련데이터는 700, 입력데이터 784
print(t_train.shape) #정답레이블은 10줄
# +
#무작위로 10장만 빼내려면 어떻게 하면 될까
train_size=x_train.shape[0]
batch_size=10
batch_mask=np.random.choice(train_size, batch_size)
x_batch=x_train[batch_mask]
t_batch=t_train[batch_mask]
# -
np.random.choice(60000,10)
# +
#배치용 교차 엔트로피
def cross_entropy_error(y,t):
if y.dim==1:
t=t.reshape(1,t.size) # t: 정답 레이블
y=y.reshape(1,y.size) # y: 신경망의 출력
batch_size=y.shape[0]
return -np.sum(t*np.log(y+1e-7))/batch_size
# +
#정답 레이블이 원핫 인코딩이 아니라 '2','7'등의 숫자 레이블로 주어졌을때 교차 엔트로피
def cross_entropy_error(y,t):
if y.dim==1:
t=t.reshape(1,t_size) # t: 정답 레이블
y=y.reshape(1,y_size) # y: 신경망의 출력
batch_size=y.shape[0]
return -np.sum(np.log(y[np.arrange(batch_size),t]+1e-7))/batch_size #np.arrange(batch_size)는 0부터 batch_size -1까지 배열 생성
# -
# t가 0일 때는 교차 엔트로피도 0이기 때문에 그 계산은 무시해도 좋다
# **왜 정확도가 아닌 손실함수를 사용해야 할까?**
#
# 정확도를 지표로 하면 매개변수의 미분이 대부분의 장소에서 0이 되기 때문이다. 정확도는 불연속적인 띄엄띄엄한 값으로 바뀌어버리고 손실함수의 값은 연속적으로 변화한다. 이는 계단 함수를 활성화 함수로 사용하지 않는 이유이기도 하다.
# +
#미분 계산 예제
#나쁜 구현의 예
def numerical_diff(f,x):
h=10e-50
return (f(x+h)-f(x))/h
# -
# 1. 반올림 오차를 발생시킨다. 반올림 오차는 소숫점 8자리 이하가 생략되어 최종 결과에 오차가 생긴다.
#
# 2. 애당초 오차가 있기 때문에 진정한 미분이 아니다.
def numerical_diff(x,y):
h=1e-4
return (f(x+h)-f(x-h))/ (2*h)
| ch04/04_05_20.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#initialization
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'svg' # Makes the images look nice
import numpy as np
import math
from os import path
# importing Qiskit
from qiskit import IBMQ, Aer
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.ibmq import least_busy
from qiskit.tools.monitor import job_monitor
# import basic plot tools
from qiskit.visualization import plot_histogram
# +
def phase_kickback(circ, t, phase):
'''
First part of PEA, consisting on applying repeated
controlled U gates in order to kickback the phase
to the first register
'''
for i in range(t):
circ.h(i)
for j in range(2**(t-1-i)):
circ.cu1(2*np.pi*phase, i, -1)
circ.barrier()
def inv_qft(circ, t):
'''
Second part of PEA, applying the inverse QFT
to the first register of t qubits
'''
for i in range(math.floor(int(t/2))):
circ.swap(i, t-1-i)
for i in range(t):
for j in range(i):
circ.cu1(-2*np.pi/2**(i+1-j), t-j-1, t-1-i)
circ.h(t-i-1)
circ.barrier()
# UTILITY FUNCTIONS
def bf_to_dec(s):
'''
Convert binary fraction to decimal
'''
n = 0.
for i in range(len(s)):
n += float(s[i])*2**(-(i+1))
return n
def p(theta, phase):
'''
Theoretical probability distribution if the phase can't be expressed in binary fraction notation.
- theta: is an estimation of the phase
- phase: the phase that we want to estimate
'''
return np.sin(2**t*np.pi*(theta-phase))**2/(2**(2*t)*np.sin(np.pi*(theta-phase))**2)
# +
# PEA implementation
phase = 1./3
t = 4 # number of qubits for storing the phase estimation
circ = QuantumCircuit(t+1, t)
circ.x(-1) # Prepare the eigenstate
phase_kickback(circ, t, phase)
inv_qft(circ, t)
# measure the first register
# reverse measuring order to get correct formatting
for i in range(t):
circ.measure(i, t-1-i)
#circ.draw(output='mpl')
# +
# RUN THIS BLOCK FOR SIMULATING
backend = Aer.get_backend('qasm_simulator')
shots = 2048
results = execute(circ, backend=backend, shots=shots).result()
answer = results.get_counts()
# %config InlineBackend.figure_format = 'svg'
plot_histogram(answer, title=r'$\phi = {}$'.format(phase), color='C0')
# -
# RUN THIS BLOCK FOR RUNNING THE CIRCUIT ON A REAL QUANTUM COMPUTER
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
backend_qc = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= t+1 and
not x.configuration().simulator and x.status().operational==True))
print("least busy backend: ", backend_qc)
# +
provider = IBMQ.load_account()
backend_qc = provider.backends.ibmq_essex
job_exp = execute(circ, backend=backend_qc, shots=shots)
job_monitor(job_exp)
result = job_exp.result()
answer = result.get_counts()
plot_histogram(answer, title=r'$\phi = {}$'.format(phase), color='C0').savefig('PEA-essex-bf-t3')
# +
#plt.style.use('seaborn')
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
plt.annotate('{}'.format(round(height,2)),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom', fontsize=8)
answer_dec = {bf_to_dec(key):value/shots for key, value in zip(answer.keys(), answer.values())}
t_answer_dec = {key:p(key, phase) for key in answer_dec.keys()}
plt.rc('axes', axisbelow=True)
plt.grid(axis='y', linestyle='--')
rect = plt.bar(answer_dec.keys(), answer_dec.values(), width=0.05, label='Experimental')
autolabel(rect)
plt.bar(t_answer_dec.keys(), t_answer_dec.values(), alpha=0.5, width=0.025, color='red', label='Theoretical')
plt.xticks(list(answer_dec.keys()), list(answer.keys()), rotation=75)
plt.legend()
plt.ylabel('Probabilities')
plt.savefig('PEA-essex-nbf-t4')
# -
# AVERAGE RULE: Take most probable value as an estimation
print('MAJORITY RULE')
print()
est_phase = max(answer_dec, key=answer_dec.get)
print('Estimated phase: {}'.format(est_phase))
print('Probability: {}'.format(max(answer_dec.values())))
print('Phase: {}'.format(round(phase, 3)))
acc = np.abs(est_phase - phase)
print('Accuracy: {}'.format(round(acc, 3), acc_n))
print('2^-(t+1) = {}'.format(2**(-(t+1))))
# +
# AVERAGE RULE: Use circular statistics to get an estimation
def mu(phase):
A = 2**t -1
return np.arctan((A*np.sin(2*np.pi*phase)-np.sin(A*2*np.pi*phase))/(A*np.cos(2*np.pi*phase) + np.cos(A*2*np.pi*phase)))/(2*np.pi)
def mu2(phase):
A = 2**t -1
return np.arctan2((A*np.sin(2*np.pi*phase)-np.sin(A*2*np.pi*phase)), (A*np.cos(2*np.pi*phase)+ np.cos(A*2*np.pi*phase)))/(2*np.pi)
def rho(phase):
A = 2**t - 1
return np.sqrt(4**(-t)*(4**t - 2**(t+1)+2+2*A*np.cos(2**(t+1)*np.pi*phase)))
# First trigonometric moment about the mean direction
theta_1 = np.sum([value*np.exp(2*np.pi*1j*key) for key, value in zip(answer_dec.keys(), answer_dec.values())])
est_phase = np.angle(theta_1)/(2*np.pi)
print('Estimated phase: {}'.format(est_phase))
print('Phase: {}'.format(round(phase, 3)))
print('Accuracy: {}'.format(round(np.abs(est_phase - phase), 3)))
print('2^-(t+2) = {}'.format(2**(-(t+2))))
mrl = np.absolute(theta_1)
sigma = np.sqrt(-2*np.log(mrl))/(2*np.pi)
print(sigma)
# -
# # Simulation with noise
# +
# Load our saved IBMQ accounts and get the least busy backend device with less than or equal to n qubits
IBMQ.load_account()
from qiskit.providers.ibmq import least_busy
from qiskit.tools.monitor import job_monitor
provider = IBMQ.get_provider(hub='ibm-q')
backend_qc = provider.get_backend('ibmq_ourense')
#backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= t+1 and not x.configuration().simulator and x.status().operational==True))
print("backend: ", backend_qc)
# Run with 2048 shots
shots = 2048
# +
# Build noise model from backend properties
noise_model = NoiseModel.from_backend(backend_qc)
# Get coupling map from backend
coupling_map = backend_qc.configuration().coupling_map
# Get basis gates from noise model
basis_gates = noise_model.basis_gates
# Perform a noise simulation
result = execute(circ, Aer.get_backend('qasm_simulator'),
coupling_map=coupling_map,
basis_gates=basis_gates,
noise_model=noise_model).result()
answer = result.get_counts(0)
plot_histogram(answer)
# +
answer_dec = {bf_to_dec(key):value/shots for key, value in zip(answer.keys(), answer.values())}
t_answer_dec = {key:p(key, phase) for key in answer_dec.keys()}
plt.bar(answer_dec.keys(), answer_dec.values(), width=0.08, label='Experimental')
plt.bar(t_answer_dec.keys(), t_answer_dec.values(), alpha=0.5, width=0.04, color='red', label='Theoretical')
plt.xticks(list(answer_dec.keys()), list(answer.keys()),rotation=75)
plt.legend()
plt.ylabel('Probabilities')
# -
# AVERAGE RULE: Take most probable value as an estimation
print('MAJORITY RULE')
print()
est_phase = max(answer_dec, key=answer_dec.get)
print('Estimated phase: {}'.format(est_phase))
print('Phase: {}'.format(round(phase, 3)))
print('Accuracy: {}'.format(round(np.abs(est_phase - phase), 3)))
print('2^-(t+1) = {}'.format(2**(-(t+1))))
# +
# First trigonometric moment about the mean direction
theta_1 = np.sum([value*np.exp(2*np.pi*1j*key) for key, value in zip(answer_dec.keys(), answer_dec.values())])
est_phase = np.abs(np.angle(theta_1)/(2*np.pi))
print('Estimated phase: {}'.format(est_phase))
print('Phase: {}'.format(round(phase, 3)))
print('Accuracy: {}'.format(round(np.abs(est_phase - phase), 3)))
print('2^-(t+2) = {}'.format(2**(-(t+2))))
mrl = np.absolute(theta_1)
sigma = np.sqrt(-2*np.log(mrl))/(2*np.pi)
print(sigma)
# -
# # Running in QC
# +
def save(answer, filename='test'):
'''
Save the results obtained from running the circuit.
Input: dictionary containing the results
'''
np.save(filename, [list(answer.keys()), list(answer.values())])
def load(filename='test'):
l = np.load(filename + '.npy')
return {key:float(value) for key,value in zip(l[0], l[1])}
filename = '{}-p{}-t{}'.format(backend, round(phase, 3), t)
if path.isfile(filename + '.npy'):
answer = load(filename)
else:
job = execute(circ, backend=backend_qc, shots=2048, optimization_level=3)
job_monitor(job)
# get the results from the computation
results = job.result()
answer = results.get_counts(circ)
save(answer, filename)
# %config InlineBackend.figure_format = 'svg'
plot_histogram(answer)
# +
answer_dec = {bf_to_dec(key):value/shots for key, value in zip(answer.keys(), answer.values())}
t_answer_dec = {key:p(key, phase) for key in answer_dec.keys()}
plt.bar(answer_dec.keys(), answer_dec.values(), width=0.08, label='Experimental')
plt.bar(t_answer_dec.keys(), t_answer_dec.values(), alpha=0.5, width=0.04, color='red', label='Theoretical')
plt.xticks(list(answer_dec.keys()), list(answer.keys()),rotation=75)
plt.legend()
plt.ylabel('Probabilities')
| PEA/PEA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot as plt
import imageio
#im = imageio.imread('QRCoin.png')
im = plt.imread('QRCoin.png')
print(im.shape)
plt.imshow(im)
# -
from PIL import Image
# Create an Image object from an Image
colorImage = Image.open("QRCoin.png")
# Rotate it by 45 degrees
rotated = colorImage.rotate(45)
coin_area = (80, 80, 280, 250)
cropped = colorImage.crop(coin_area)
cropped.show()
rotated = cropped.rotate(2)
rotated.show()
| Python/QR/.ipynb_checkpoints/ImageQRProcessing-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Matrix Factorization for Recommender Systems - Part 1
# **Table of contents of this tutorial series on matrix factorization for recommender systems:**
#
# - [Part 1 - Traditional Matrix Factorization methods for Recommender Systems](/examples/matrix-factorization-for-recommender-systems-part-1)
# - [Part 2 - Factorization Machines and Field-aware Factorization Machines](/examples/matrix-factorization-for-recommender-systems-part-2)
# - [Part 3 - Large scale learning and better predictive power with multiple pass learning](/examples/matrix-factorization-for-recommender-systems-part-3)
# ## Introduction
# A [recommender system](https://en.wikipedia.org/wiki/Recommender_system) is a software tool designed to generate and suggest items or entities to the users. Popular large scale examples include:
#
# - Amazon (suggesting products)
# - Facebook (suggesting posts in users' news feeds)
# - Spotify (suggesting music)
#
# Social recommendation from graph (mostly used by social networks) are not covered in `river`. We focus on the general case, item recommendation. This problem can be represented with the user-item matrix:
#
# $$
# \normalsize
# \begin{matrix}
# & \begin{matrix} _1 & _\cdots & _\cdots & _\cdots & _I \end{matrix} \\
# \begin{matrix} _1 \\ _\vdots \\ _\vdots \\ _\vdots \\ _U \end{matrix} &
# \begin{bmatrix}
# {\color{Red} ?} & 2 & \cdots & {\color{Red} ?} & {\color{Red} ?} \\
# {\color{Red} ?} & {\color{Red} ?} & \cdots & {\color{Red} ?} & 4.5 \\
# \vdots & \ddots & \ddots & \ddots & \vdots \\
# 3 & {\color{Red} ?} & \cdots & {\color{Red} ?} & {\color{Red} ?} \\
# {\color{Red} ?} & {\color{Red} ?} & \cdots & 5 & {\color{Red} ?}
# \end{bmatrix}
# \end{matrix}
# $$
#
# Where $U$ and $I$ are the number of user and item of the system, respectively. A matrix entry represents a user's preference for an item, it can be a rating, a like or dislike, etc. Because of the huge number of users and items compared to the number of observed entries, those matrices are very sparsed (usually less than 1% filled).
#
# [Matrix Factorization (MF)](https://en.wikipedia.org/wiki/Matrix_factorization_(recommender_systems)) is a class of [collaborative filtering](https://en.wikipedia.org/wiki/Collaborative_filtering) algorithms derived from [Singular Value Decomposition (SVD)](https://en.wikipedia.org/wiki/Singular_value_decomposition). MF strength lies in its capacity to able to model high cardinality categorical variables interactions. This subfield boomed during the famous [Netflix Prize](https://en.wikipedia.org/wiki/Netflix_Prize) contest in 2006, when numerous novel variants has been invented and became popular thanks to their attractive accuracy and scalability.
#
# MF approach seeks to fill the user-item matrix considering the problem as a [matrix completion](https://en.wikipedia.org/wiki/Matrix_completion) one. MF core idea assume a latent model learning its own representation of the users and the items in a lower latent dimensional space by factorizing the observed parts of the matrix.
#
# A factorized user or item is represented as a vector $\mathbf{v}_u$ or $\mathbf{v}_i$ composed of $k$ latent factors, with $k << U, I$. Those learnt latent variables represent, for an item the various aspects describing it, and for a user its interests in terms of those aspects. The model then assume a user's choice or fondness is composed of a sum of preferences about the various aspects of the concerned item. This sum being the dot product between the latent vectors of a given user-item pair:
#
# $$
# \normalsize
# \langle \mathbf{v}_u, \mathbf{v}_i \rangle = \sum_{f=1}^{k} \mathbf{v}_{u, f} \cdot \mathbf{v}_{i, f}
# $$
#
# MF models weights are learnt in an online fashion, often with stochastic gradient descent as it provides relatively fast running time and good accuracy. There is a great and widely popular library named [surprise](http://surpriselib.com/) that implements MF models (and others) but in contrast with `river` doesn't follow a pure online philosophy (all the data have to be loaded in memory and the API doesn't allow you to update your model with new data).
#
# **Notes:**
#
# - In recent years, proposed deep learning techniques for recommendation tasks claim state of the art results. However, [recent work](https://arxiv.org/abs/1907.06902) (August 2019) showed that those promises can't be taken for granted and traditional MF methods are still relevant today.
# - For more information about how the business value of recommender systems is measured and why they are one of the main success stories of machine learning, see the following [literature survey](https://arxiv.org/abs/1908.08328) (December 2019).
# ## Let's start
# In this tutorial, we are going to explore MF algorithms available in `river` and test them on a movie recommendation problem with the MovieLens 100K dataset. This latter is a collection of movie ratings (from 1 to 5) that includes various information about both the items and the users. We can access it from the [river.datasets](/api/overview/#datasets) module:
# +
import json
from river import datasets
for x, y in datasets.MovieLens100K():
print(f'x = {json.dumps(x, indent=4)}\ny = {y}')
break
# -
# Let's define a routine to evaluate our different models on MovieLens 100K. Mean Absolute Error and Root Mean Squared Error will be our metrics printed alongside model's computation time and memory usage:
# +
from river import metrics
from river.evaluate import progressive_val_score
def evaluate(model):
X_y = datasets.MovieLens100K()
metric = metrics.MAE() + metrics.RMSE()
_ = progressive_val_score(X_y, model, metric, print_every=25_000, show_time=True, show_memory=True)
# -
# ## Naive prediction
# It's good practice in machine learning to start with a naive baseline and then iterate from simple things to complex ones observing progress incrementally. Let's start by predicing the target running mean as a first shot:
# +
from river import stats
mean = stats.Mean()
metric = metrics.MAE() + metrics.RMSE()
for i, x_y in enumerate(datasets.MovieLens100K(), start=1):
_, y = x_y
metric.update(y, mean.get())
mean.update(y)
if not i % 25_000:
print(f'[{i:,d}] {metric}')
# -
# ## Baseline model
# Now we can do machine learning and explore available models in [river.reco](https://online-ml.github.io/api/overview/#reco) module starting with the baseline model. It extends our naive prediction by adding to the global running mean two bias terms characterizing the user and the item discrepancy from the general tendency. The model equation is defined as:
#
# $$
# \normalsize
# \hat{y}(x) = \bar{y} + bu_{u} + bi_{i}
# $$
#
# This baseline model can be viewed as a linear regression where the intercept is replaced by the target running mean with the users and the items one hot encoded.
#
# All machine learning models in `river` expect dicts as input with feature names as keys and feature values as values. Specifically, models from `river.reco` expect a `'user'` and an `'item'` entries without any type constraint on their values (i.e. can be strings or numbers), e.g.:
#
# ```python
# x = {
# 'user': 'Guido',
# 'item': "Monty Python's Flying Circus"
# }
# ```
#
# Other entries, if exist, are simply ignored. This is quite useful as we don't need to spend time and storage doing one hot encoding.
# +
from river import meta
from river import optim
from river import reco
baseline_params = {
'optimizer': optim.SGD(0.025),
'l2': 0.,
'initializer': optim.initializers.Zeros()
}
model = meta.PredClipper(
regressor=reco.Baseline(**baseline_params),
y_min=1,
y_max=5
)
evaluate(model)
# -
# We won two tenth of MAE compared to our naive prediction (0.7546 vs 0.9421) meaning that significant information has been learnt by the model.
# ## Funk Matrix Factorization (FunkMF)
# It's the pure form of matrix factorization consisting of only learning the users and items latent representations as discussed in introduction. Simon Funk popularized its [stochastic gradient descent optimization](https://sifter.org/simon/journal/20061211.html) in 2006 during the Netflix Prize. The model equation is defined as:
#
# $$
# \normalsize
# \hat{y}(x) = \langle \mathbf{v}_u, \mathbf{v}_i \rangle
# $$
#
# **Note:** FunkMF is sometimes referred as [Probabilistic Matrix Factorization](https://papers.nips.cc/paper/3208-probabilistic-matrix-factorization.pdf) which is an extended probabilistic version.
# +
funk_mf_params = {
'n_factors': 10,
'optimizer': optim.SGD(0.05),
'l2': 0.1,
'initializer': optim.initializers.Normal(mu=0., sigma=0.1, seed=73)
}
model = meta.PredClipper(
regressor=reco.FunkMF(**funk_mf_params),
y_min=1,
y_max=5
)
evaluate(model)
# -
# Results are equivalent to our naive prediction (0.9448 vs 0.9421). By only focusing on the users preferences and the items characteristics, the model is limited in his ability to capture different views of the problem. Despite its poor performance alone, this algorithm is quite useful combined in other models or when we need to build dense representations for other tasks.
# ## Biased Matrix Factorization (BiasedMF)
# It's the combination of the Baseline model and FunkMF. The model equation is defined as:
#
# $$
# \normalsize
# \hat{y}(x) = \bar{y} + bu_{u} + bi_{i} + \langle \mathbf{v}_u, \mathbf{v}_i \rangle
# $$
#
# **Note:** *Biased Matrix Factorization* name is used by some people but some others refer to it by *SVD* or *Funk SVD*. It's the case of <NAME> and <NAME> in [Recommender Systems Handbook](https://www.cse.iitk.ac.in/users/nsrivast/HCC/Recommender_systems_handbook.pdf) (Chapter 5 *Advances in Collaborative Filtering*) and of `surprise` library. Nevertheless, *SVD* could be confused with the original *Singular Value Decomposition* from which it's derived from, and *Funk SVD* could also be misleading because of the biased part of the model equation which doesn't come from <NAME>'s work. For those reasons, we chose to side with *Biased Matrix Factorization* which fits more naturally to it.
# +
biased_mf_params = {
'n_factors': 10,
'bias_optimizer': optim.SGD(0.025),
'latent_optimizer': optim.SGD(0.05),
'weight_initializer': optim.initializers.Zeros(),
'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.1, seed=73),
'l2_bias': 0.,
'l2_latent': 0.
}
model = meta.PredClipper(
regressor=reco.BiasedMF(**biased_mf_params),
y_min=1,
y_max=5
)
evaluate(model)
# -
# Results improved (0.7485 vs 0.7546) demonstrating that users and items latent representations bring additional information.
# To conclude this first tutorial about factorization models, let's review the important parameters to tune when dealing with this family of methods:
#
# - `n_factors`: the number of latent factors. The more you set, the more items aspects and users preferences you are going to learn. Too many will cause overfitting, `l2` regularization could help.
# - `*_optimizer`: the optimizers. Classic stochastic gradient descent performs well, finding the good learning rate will make the difference.
# - `initializer`: the latent weights initialization. Latent vectors have to be initialized with non-constant values. We generally sample them from a zero-mean normal distribution with small standard deviation.
| docs/examples/matrix-factorization-for-recommender-systems-part-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
S_text = "Does this thing really work? Lets see."
from nltk.tokenize import word_tokenize,sent_tokenize
wrd = word_tokenize(S_text)
sent_tokenize(S_text)
from nltk.corpus import stopwords
import nltk
nltk.download('stopwords')
stp_wrds = stopwords.words('english')
stp_wrds
new_wrd = {i for i in wrd if not i in stp_wrds}
new_wrd
| 22.NLP/.ipynb_checkpoints/Stemming&MovieReviewDataset-checkpoint.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.1
# language: julia
# name: julia-1.0
# ---
# # The Delta Method and the NW Covariance Matrix
#
# ## Load Packages
# +
using Dates, DelimitedFiles, Statistics, LinearAlgebra
include("jlFiles/printmat.jl")
# -
# # The Delta Method Applied to the Sharpe Ratio
x = readdlm("Data/FFmFactorsPs.csv",',',skipstart=1)
x = x[:,2] #x is an excess return in % (on the US equity market)
T = size(x,1)
# ## Point Estimates of Mean and 2nd Moment
# Estimate the mean and second moment as usual
#
# $
# \hat{\mu} =\frac{1}{T}\sum_{t=1}^{T}x_{t}
# $
#
# $
# \hat{\mu}_{2} =\frac{1}{T}\sum_{t=1}^{T}x_{t}^{2}
# $
#
# Use GMM to estimate the covariance matrix ($V$) of $(\hat{\mu},\hat{\mu}_{2})$
μ = mean(x) #estimates of the mean and 2nd moment
μ₂ = mean(x.^2)
printlnPs("\n","μ and μ₂",μ,μ₂)
# The next cell constructs the variance-covariance matrix (called $V$) of the point estimates (collected into a vector $\hat{\beta}$). Recall that we typically have that
#
# $
# \sqrt{T}(\hat{\beta}-\beta_{0})\overset{d}{\rightarrow}N(0,V_{k\times k})
# $
#
# $V$ is constructed by using the usual GMM properties (discussed in detail later on). For now, you could just run the next cell without checking the details.
# +
g = [(x .- μ) (x.^2 .- μ₂)] #moment conditions
T = size(g,1)
gbar = vec(mean(g,dims=1))
println("Sample moment conditions, gbar ")
printmat(gbar)
S = cov(g) #Var[sqrt(T)*gbar] = var(g) if iid
#S = NWFn(g,1) #to use Newey-West instead
D = -Matrix(1.0I,2,2) #-I(2)
V = inv(D*inv(S)*D') #Var[sqrt(T)*(mu,μ₂)]
println("Cov(params)")
printmat(V/T)
# -
# ## The Sharpe Ratio and Its Derivatives
# The Sharpe ratio and its derivatives (with respect to the parameters of the
# Sharpe ratio) are
#
# $
# \frac{\text{E}(x)}{\sigma(x)} =\frac{\mu}{(\mu_{2}-\mu^{2})^{1/2}},\: \text{where}\: \beta=(\mu,\mu_{2})
# $
#
# Let $f(\beta)$ denote the Sharpe ratio where $\beta$ is a vector of parameters
# consisting of the mean and the second moment ($\mu,\mu_{2}$). The derivates are then
#
# $
# \frac{\partial f(\beta)}{\partial\beta^{\prime}} = \left[
# \begin{array}[c]{cc}
# \frac{\mu_{2}}{(\mu_{2}-\mu^{2})^{3/2}} & \frac{-\mu}{2(\mu_{2}-\mu^{2})^{3/2}}
# \end{array}
# \right]
# $
#
# For information on numerical derivatives, see the "integration" chapter of my *Julia Tutorial* (it contains also material on numerical derivatives).
"""
SRFn(par)
Calculate the Sharpe ratio from the mean and 2nd moment
"""
function SRFn(par)
(μ,μ₂) = (par[1],par[2]) #E(Z),E(Z^2)
s2 = μ₂ - μ^2
SR = μ/sqrt(s2)
df = hcat(μ₂/(μ₂ - μ^2)^(3/2), -μ/(2*(μ₂ - μ^2)^(3/2))) #Jacobian of SR, 1x2
return SR, df
end
# +
(SR,df) = SRFn([μ,μ₂])
printlnPs("Sharpe ratio from parameters and direct: ",SR,mean(x)/std(x))
println("\nDerivatives ")
printmat(df)
# -
# ## Applying the Delta Method to the Sharpe Ratio
#
# Recall that if
#
# $
# \sqrt{T}(\hat{\beta}-\beta_{0})\overset{d}{\rightarrow}N(0,V_{k\times k}) ,
# $
#
# then the distribution of the function $f(\hat{\beta})$ is
#
# $
# \sqrt{T}[f(\hat{\beta})-f(\beta_{0})] \overset{d}{\rightarrow}N(0,\Lambda_{q\times q})
# $
#
# with
#
# $
# \Lambda = \frac{\partial f(\beta_{0})}{\partial\beta^{\prime}}
# V
# \frac{\partial f(\beta_{0}) ^{\prime}}{\partial\beta}, \:
# \text{where } \:
# \frac{\partial f(\beta)}{\partial\beta^{\prime}} =
# \left[
# \begin{array}[c]{ccc}
# \frac{\partial f_{1}(\beta)}{\partial\beta_{1}} & \cdots &\frac{\partial f_{1}(\beta)}{\partial\beta_{k}} \\
# \vdots & \ddots & \vdots \\
# \frac{\partial f_{q}(\beta) }{\partial\beta_{1}} & \cdots & \frac{\partial f_{q}(\beta)}{\partial\beta_{k}}
# \end{array}
# \right] _{q\times k}
# $
# +
Std_SR = sqrt((df*V*df'/T)[1]) #[1] to convert from 1x1 matrix to scalar
println("\nSR and its Std ")
printmat([SR Std_SR])
println("SR and 90% conf band: ")
printmat([SR (SR-1.65*Std_SR) (SR+1.65*Std_SR)])
# -
# # Newey-West
#
# Let $g_t$ be a vector of data series (or moment conditions).
#
# To calculate the Newey-West covariance matrix, we first need the
# autocovariance matrices $\Omega_{s}=\text{Cov}(g_{t},g_{t-s}) $,
# which is estimated as
# $
# \sum_{t=s+1}^{T} (g_{t}-\bar{g})(g_{t-s}-\bar{g})^{\prime}/T.
# $
#
# Then we form a linear
# combination (with tent-shaped weights) of those autocovariance matrices (from
# lag $-m$ to $m$), or equivalently
#
# $
# \text{Cov}(\sqrt{T}\bar{g}) =
# \Omega_{0} + \sum_{s=1}^{m}( 1-\frac{s}{m+1})
# (\Omega_{s}+\Omega_{s}^{\prime}).
# $
#
# The cells below illustrate this by estimating the std of a sample average in different ways.
# +
"""
NWFn(g0,m=0)
Calculates covariance matrix of sqrt(T)*sample average.
# Usage
S = NWFn(g0,m)
# Input
- `g0::Array`: Txq array of q moment conditions
- `m:int`: scalar, number of lags to use
# Output
- `S::Array`: qxq covariance matrix
"""
function NWFn(g0,m=0)
T = size(g0,1) #g0 is Txq
m = min(m,T-1) #number of lags
g = g0 .- mean(g0,dims=1) #normalizing to Eg=0
S = g'g/T #(qxT)*(Txq)
for s = 1:m
Gamma_s = g[s+1:T,:]'g[1:T-s,:]/T #same as Sum[g(t)*g(t-s)',t=s+1,T]
S = S + ( 1 - s/(m+1) ) * (Gamma_s + Gamma_s')
end
return S
end
# +
T = 300 #simulate two AR(1) process with different persistence
g = [randn(1,2)*10;zeros(T-1,2)]
for t = 2:T
g[t,1] = 0.90*g[t-1,1] + randn()*10
g[t,2] = 0.00*g[t-1,2] + randn()*10
end
println(" Series 1 Series 2")
printlnPs("Std(of average) according to an iid assumption:",sqrt.(var(g,dims=1)/T))
printlnPs("Std(of average) according to NW: ",sqrt.(diag(NWFn(g,15)/T)))
# -
| DeltaMethodAndNW.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # viresclient API
# > Author: <NAME>
# >
# > Abstract: Describe the main classes and methods defined in the viresclient package
# ## Introduction
#
# The `viresclient` Python package allows to connect to the VirES server to download [Swarm](https://earth.esa.int/web/guest/missions/esa-operational-eo-missions/swarm) data and data calculated using magnetic models.
#
# Documentation:
#
# - https://viresclient.readthedocs.io/
# <a id="top"/>
#
# ## Contents
# - [Access token configuration](#access_token_configuration)
# - [Send requests to the server - SwarmRequest](#SwarmRequest)
# - [Get the available collections](#SwarmRequest.available_collections)
# - [Get the available measurements](#SwarmRequest.available_measurements)
# - [Get the available auxiliaries](#SwarmRequest.available_auxiliaries)
# - [Get the available magnetic models](#SwarmRequest.available_models)
# - [Get information about one or mode models](#SwarmRequest.get_model_info)
# - [Get the orbit number](#SwarmRequest.get_orbit_number)
# - [Get times for orbits](#SwarmRequest.get_times_for_orbits)
# - [Set collections](#SwarmRequest.set_collection)
# - [Set products](#SwarmRequest.set_products)
# - [Set/clear filters](#SwarmRequest.set_range_filter)
# - [Send request to the server](#SwarmRequest.get_between)
# - [Handle downloaded data - ReturnedData](#ReturnedData)
# - [Get the list of source data](#ReturnedData.sources)
# - [ReturnedData contents](#ReturnedData.contents)
# - [Get type of downloaded data files](#ReturnedData.filetype)
# - [Get list of magnetic models used during calculations](#ReturnedData.magnetic_models)
# - [Get list of filters applied to the request](#ReturnedData.range_filters)
# - [Convert ReturnedData to Pandas DataFrame](#ReturnedData.as_dataframe)
# - [Convert ReturnedData to xarray Dataset](#ReturnedData.as_xarray)
# - [Save downloaded data to a file](#ReturnedData.to_file)
# - [Save downloaded data to multiple files](#ReturnedData.to_files)
# - [Handle downloaded temporary file - ReturnedDataFile](#ReturnedDataFile)
# - [Get NamedTemporaryFile associated to a ReturnedDataFile](#ReturnedDataFile._file)
# - [Get type of the downloaded data file](#ReturnedDataFile.filetype)
# - [Convert ReturnedDataFile to Pandas DataFrame](#ReturnedDataFile.as_dataframe)
# - [Convert ReturnedDataFile to xarray Dataset](#ReturnedDataFile.as_xarray)
# - [Save ReturnedDataFile object to a file](#ReturnedDataFile.to_file)
# - [Handle viresclient configuration - ClientConfig](#ClientConfig)
# - [Get path of the configuration file](#ClientConfig.path)
# - [Get or set the default URL](#ClientConfig.default_url)
# - [Set site configuration](#ClientConfig.set_site_config)
# - [Get site configuration](#ClientConfig.get_site_config)
# - [Save configuration](#ClientConfig.save)
# - [Upload data to the server - DataUpload](#DataUpload)
# - [Upload a file to the server](#DataUpload.post)
# - [Get the identifier(s) of the uploaded file(s)](#DataUpload.ids)
# - [Get info about the uploaded file](#DataUpload.get)
# - [Set constant parameters to the uploaded file](#DataUpload.set_constant_parameters)
# - [Get constant parameters](#DataUpload.get_constant_parameters)
# - [Delete a specific uploaded file](#DataUpload.delete)
# - [Delete the uploaded files](#DataUpload.clear)
# [TOP](#top)
#
# <a id="access_token_configuration"/>
#
# ## Access token configuration
#
# Before using the client, you need to set the access token. This can be done using the `set_token()` function:
#
# ```python
# set_token(url='https://vires.services/ows', token=None, set_default=False)
# ```
# **Parameters**:
#
# - **url** (*str*, optional): server URL (default value: `https://vires.services/ows`).
# - **token** (*str*, optional): token string obtained from the VirES access token management page (https://vires.services/accounts/tokens/). If this parameter is not set, the user is prompted to enter its value interactively.
# - **set_default** (*bool*, optional): if `True`, the server identified by *url* is configured as default server (default `False`).
# Since the *url* parameter is by default set to the correct value you don't need to specify it. You can avoid also to specify the *token* parameter because this value will be asked interactively. Moreover, setting `set_default=True` you can configure this URL as default in the configuration file and avoid to specify it while sending requests to the server.
# First, the function must be imported from the `viresclient` package:
from viresclient import set_token
# Now it can be executed:
set_token(set_default=True)
# [TOP](#top)
#
# <a id="SwarmRequest"/>
#
# ## Send requests to the server - SwarmRequest
#
# The `SwarmRequest` object allows to create a request to the server and to download data according to the input parameters.
#
# ```python
# class SwarmRequest(url=None, username=None, password=None, token=None, config=None, logging_level='NO_LOGGING')
# ```
# **Parameters**:
#
# - **url** (*str*, optional): if not provided, the request will be sent to the default URL set in the `~/.viresclient.ini` configuration file.
# - **username** (*str*, optional): username. The usage of username and password is deprecated and will be removed in future releases.
# - **password** (*str*, optional): password. The usage of username and password is deprecated and will be removed in future releases.
# - **token** (*str*, optional): token string obtained from the VirES access token management page (https://vires.services/accounts/tokens/). If this parameter is not specified and it is not set in the configuration file, the user is prompted to enter its value interactively.
# - **config** (*str* or *ClientConfig*, optional): viresclient configuration. By default, it is read from `~/.viresclient.ini`.
# - **logging_level** (*str*, optional): set the logging level. Allowed values are: `DEBUG`, `INFO`, `WARNING`, `ERROR`, `NO_LOGGING` (default).
# Let's import the class from the `viresclient` package:
from viresclient import SwarmRequest
# After configuring the access token using the `set_token()` function (see [Access token configuration](#access_token_configuration)) , the `SwarmRequest` object can be created as follows:
request = SwarmRequest()
# The access token for the `https://vires.services/ows` default URL configured in the `~/.viresclient.ini` file is automatically retrieved:
#
# ```ini
# [https://vires.services/ows]
# token = <token>
#
# [default]
# url = https://vires.services/ows
# ```
# If the default server URL is not configured (i.e. the "default" section is not present), you must specify the server URL:
request = SwarmRequest('https://vires.services/ows')
# `SwarmRequest` object has the following methods:
#
# - `SwarmRequest.available_collections()`
# - `SwarmRequest.available_measurements()`
# - `SwarmRequest.available_auxiliaries()`
# - `SwarmRequest.available_models()`
# - `SwarmRequest.get_model_info()`
# - `SwarmRequest.get_orbit_number()`
# - `SwarmRequest.get_times_for_orbits()`
# - `SwarmRequest.set_collection()`
# - `SwarmRequest.set_products()`
# - `SwarmRequest.set_range_filter()`
# - `SwarmRequest.clear_range_filter()`
# - `SwarmRequest.get_between()`
#
#
# [TOP](#top)
#
# <a id="SwarmRequest.available_collections"/>
#
# ### Get the available collections
#
# Swarm data are organized in *collections*. Each collection is related to a Swarm file type (e.g. collection *SW_OPER_MAGA_LR_1B* is related to file type *MAGA_LR_1B*). The list of the available collections are provided invoking the `SwarmRequest.available_collections()` method.
#
# ```python
# SwarmRequest.available_collections(details=True)
# ```
# **Parameters**:
#
# - **details** (*bool*, optional): if `True` (default), the method prints the list of all the available collections and the related details. If `False`, it returns the available collections as a *list*.
# **Example**: print the list of the available collections and their details:
# +
# Import SwarmRequest object (this step can be skipped if SwarmRequest has been already imported)
from viresclient import SwarmRequest
# Create the request object
request = SwarmRequest()
# Get the available collections
request.available_collections()
# -
# **Example**: get the list of available collections without the details:
request.available_collections(details=False)
# [TOP](#top)
#
# <a id="SwarmRequest.available_measurements"/>
#
# ### Get the available measurements
#
# It is possible to get the available measurements using the `SwarmRequest.available_measurements()` method:
#
# ```python
# SwarmRequest.available_measurements(collection=None)
# ```
# **Parameters**:
#
# - **collection** (*str*, optional). If indicated, returns the available measurements for this collection as a *list*. It can be set to: `MAG`, `EFI`, `IBI`, `TEC`, `FAC`, `EEF`, `IPD` or one of the collections returned by the *available_collections* method. If not indicated, it returns the available measurements for all the collections as a *dict*.
# **Example**: get the list of measurements for the `MAG` collections:
# +
# Import SwarmRequest object (this step can be skipped if SwarmRequest has been already imported)
from viresclient import SwarmRequest
# Create the request object
request = SwarmRequest()
# Get the available measurements
request.available_measurements('MAG')
# -
# Setting `collection=SW_OPER_MAGA_LR_1B` we obtain the same result:
request.available_measurements('SW_OPER_MAGA_LR_1B')
# **Example**: get all the available measurements:
request.available_measurements()
# [TOP](#top)
#
# <a id="SwarmRequest.available_auxiliaries"/>
#
# ### Get the available auxiliaries
#
# It is possible to get the available auxiliaries using the `SwarmRequest.available_auxiliaries()` method:
#
# ```python
# SwarmRequest.available_auxiliaries()
# ```
# This method does not accept input parameters and provides the available auxiliaries as a list.
# **Example**: get the list of the available auxiliaries:
# +
# Import SwarmRequest object (this step can be skipped if SwarmRequest has been already imported)
from viresclient import SwarmRequest
# Create the request object
request = SwarmRequest()
# Get the list of available auxiliaries
request.available_auxiliaries()
# -
# [TOP](#top)
#
# <a id="SwarmRequest.available_models"/>
#
# ### Get the available magnetic models
#
# The available magnetic models can be obtained using the `SwarmRequest.available_models()` method:
#
# ```python
# SwarmRequest.available_models(param=None, details=True, nice_output=True)
# ```
# **Parameters**:
#
# - **param** (*str*, optional): it can be set to one of `F`, `C`, `D`, `MCO`, `MLI`, `MMA`, `MIO` to filter all the available magnetic models.
# - **details** (*bool*, optional): if it is set to `True` (default), it gets the models and their details. If `False`, it returns only the models as a *list*.
# - **nice_output** (*bool*, optional): if it is set to `True` (default), it prints the models and their details. If `False`, it returns the models as a dictionary (if `details=True`) or a s a list (if `details=False`).
#
# **Example**: get the list of all the available models and their details:
# +
# Import SwarmRequest object (this step can be skipped if SwarmRequest has been already imported)
from viresclient import SwarmRequest
# Create the request object
request = SwarmRequest()
# Get available models
request.available_models()
# -
# **Example**: print the list of the available `F` (Fast-Track) models and their details:
request.available_models('F')
# **Example**: get the If the list of models of type `F` as a dictionary:
request.available_models(param='F', nice_output=False)
# **Example**: get the list of all the available models without their details:
request.available_models(details=False)
# [TOP](#top)
#
# <a id="SwarmRequest.get_model_info"/>
#
# ### Get information about one or more models.
#
# It is possible to get information about one or more specific models using the `SwarmRequest.get_model_info()` method:
#
# ```python
# SwarmRequest.get_model_info(models=None, custom_model=None, original_response=False)
# ```
# **Parameters**:
#
# - **models** (*list[str]*, optional): models as a list of strings. If the list is not provided, returns information about all the available magnetic models.
# - **custom_model** (*str*, optional): name of the file containing the spherical harmonics coefficients (custom model).
# - **original_response** (*bool*, optional): if set to `False` (default), returns the result as a dictionary. If set to `True`, returns the result as a list of dictionaries.
#
# **Example**: get info about all the available magnetic models:
# +
# Import SwarmRequest object (this step can be skipped if SwarmRequest has been already imported)
from viresclient import SwarmRequest
# Create the request object
request = SwarmRequest()
# Get model info
request.get_model_info()
# -
# **Example**: get info about CHAOS-Core and CHAOS-Static:
request.get_model_info(models=['CHAOS-Core', 'CHAOS-Static'])
# **Example**: set `original_response=True` to get the result as a list of dictionaries:
request.get_model_info(models=['CHAOS-Core', 'CHAOS-Static'], original_response=True)
# **Example**: get info on a custom model by providing the name of the file containing its coefficients:
# +
# Download shc file with WGET (command line tool)
# !wget "http://www.spacecenter.dk/files/magnetic-models/LCS-1/LCS-1.shc"
# Upload a .shc file and update the file name
request.get_model_info(custom_model='LCS-1.shc')
# -
# Delete downloaded file(s)
# !rm LCS-1.shc*
# [TOP](#top)
#
# <a id="SwarmRequest.get_orbit_number"/>
#
# ### Get the orbit number
#
# The `SwarmRequest.get_orbit_number()` allows to get the orbit number of a given spacecraft providing date and time:
#
# ```python
# SwarmRequest.get_orbit_number(spacecraft, input_time)
# ```
# **Parameters**:
#
# - **spacecraft** (*str*): spacecraft identifier: `A`, `B` or `C`.
# - **input_time** (*datetime.datetime* or *str*): date and time.
# **Example**: get orbit numbers corresponding to date 2020-01-01 00:00:00 for the three spacecrafts:
# +
# Create the request object
request = SwarmRequest()
# Get results
for sc in ('A', 'B', 'C'):
orbit = request.get_orbit_number(sc, '2020-01-01T00:00:00')
print(f's/c {sc}: {orbit}')
# -
# [TOP](#top)
#
# <a id="SwarmRequest.get_times_for_orbits"/>
#
# ### Get times for orbits
#
# Get the time interval corresponding to a pair of orbit numbers using the `SwarmRequest.get_times_for_orbits()` method:
#
# ```python
# SwarmRequest.get_times_for_orbits(spacecraft, start_orbit, end_orbit)
# ```
# **Parameters**:
#
# - **spacecraft** (*str*): spacecraft identifier: `A`, `B` or `C`.
# - **start_orbit** (*int*): start orbit number
# - **end_orbit** (*int*): end orbit number
#
# **Example**: get time intervals corresponding to *start_orbit* 1000 and *end_orbit* 2000 for the three spacecrafts:
# +
# Import SwarmRequest object (this step can be skipped if SwarmRequest has been already imported)
from viresclient import SwarmRequest
# Set start/end orbits
start_orbit = 1000
end_orbit = 2000
# Create the request object
request = SwarmRequest()
# Get results
for sc in ('A', 'B', 'C'):
start_date, end_date = request.get_times_for_orbits(sc, start_orbit, end_orbit)
print(f's/c {sc}: {start_date} - {end_date}')
# -
# [TOP](#top)
#
# <a id="SwarmRequest.set_collection"/>
#
# ### Set collections
#
# Before sending the request to the server, you need to set the Swarm collection including the measurement(s) of interest. This can be done using the `SwarmRequest.set_collection()` method:
#
# ```python
# SwarmRequest.set_collection(*args)
# ```
#
# **Parameters**:
#
# - ***args** (*str*): one or more collections (see [Get available collections](#SwarmRequest.available_collections)) as a string.
# **Example**: to get data from SW_OPER_MAGA_LR_1B and SW_OPER_EFIA_LP_1B collections:
# +
# Import SwarmRequest object (this step can be skipped if SwarmRequest has been already imported)
from viresclient import SwarmRequest
# Create the request object
request = SwarmRequest()
# Set collections
request.set_collection('SW_OPER_MAGA_LR_1B', 'SW_OPER_EFIA_LP_1B')
# -
# [TOP](#top)
#
# <a id="SwarmRequest.set_products"/>
#
# ### Set products
#
# After setting the collection, you must set the combination of measurements and/or auxiliaries and/or magnetic model(s) data to retrieve. This can be done using the `SwarmRequest.set_products()` method:
#
# ```python
# SwarmRequest.set_products(measurements=None, models=None, custom_model=None, auxiliaries=None, residuals=False, sampling_step=None)
# ```
# **Parameters**:
#
# - **measurements** (*list[str]*, optional): list of measurements to be downloaded. To get the list of the available measurements see [Available measurements](#SwarmRequest.available_measurements) (e.g.: `['F', 'B_NEC', 'Ne']`.).
# - **models** (*list[str]*, optional): list of magnetic models. To get the list of the available models see [Available models](#SwarmRequest.available_models) (e.g.: `['CHAOS-Core', 'CHAOS-Static']`). In addition to the list, this parameters accepts also expression for the definition of magnetic models (e.g.: `'CHAOS = "CHAOS-Core" + "CHAOS-Static"'`).
# - **custom_model** (*str*, optional): path to the the file containing the spherical harmonics coefficients of the custom model.
# - **auxiliaries** (*list[str]*, optional): list of auxiliaries to be downloaded. To get the list of the available auxiliaries see [Available auxiliaries](#SwarmRequest.available_auxiliaries). Please note that the following parameters are always retrieved (i.e. they don't need to be specified): `Spacecraft`, `Timestamp`, `Latitude`, `Longitude`, `Radius`.
# - **residuals** (*bool*, optional): if it is set to `True`, returns the residuals between measurements (specified with *measurements*) and models (specified with *models*). If it is set to `False` (default), returns measurements and models.
# - **sampling_step** (*str*, optional): set the sampling step as an [ISO 8601 time interval](https://en.wikipedia.org/wiki/ISO_8601#Time_intervals). If not provided, data is returned with the original sampling.
#
# **Example**: get measurements: `F` and `B_NEC` from `SW_OPER_MAGA_LR_1B` and `Ne` from `SW_OPER_EFIA_LP_1B`, model `CHAOS = "CHAOS-Core" + "Chaos-Static"` and auxiliary `OrbitNumber` with a sampling step of 10 seconds:
# +
# Import SwarmRequest object (this step can be skipped if SwarmRequest has been already imported)
from viresclient import SwarmRequest
# Create the request object
request = SwarmRequest()
# Set collections
request.set_collection('SW_OPER_MAGA_LR_1B', 'SW_OPER_EFIA_LP_1B')
# Set products
request.set_products(
measurements=['F', 'B_NEC', 'Ne'],
models=['CHAOS = "CHAOS-Core" + "CHAOS-Static"'],
auxiliaries=['OrbitNumber'],
residuals=False,
sampling_step='PT10S'
)
# -
# [TOP](#top)
#
# <a id="SwarmRequest.set_range_filter"/>
#
# ### Set/clear filters
#
# Filter(s) can be applied to the requested measurements using the `SwarmRequest.set_range_filter()` method:
#
# ```python
# SwarmRequest.set_range_filter(parameter=None, minimum=None, maximum=None)
# ```
# This method allows to set filter(s) in the form: $minimum \le parameter \le maximum$.
#
# **Parameters**:
#
# - **parameter** (*str*, optional): parameter to be used as a filter (e.g. `Latitude`)
# - **minimum** (*float*, optional): allowed minimum value
# - **maximum** (*float*, optional): allowed maximum value
#
# It is possible to apply multiple filters with consecutive calls to this method.
# **Example:** to set filter: -20 <= `Longitude` <= 50 and 30 <= `Latitude` <= 70:
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Create the request object
request = SwarmRequest()
# Set collection
request.set_collection('SW_OPER_MAGA_LR_1B')
# Set product
request.set_products(measurements=['F', 'B_NEC'])
# Set filters
request.set_range_filter('Longitude', -20.0, 50.0)
request.set_range_filter('Latitude', 30.0, 70.0)
# -
# Filters can be removed using the `SwarmRequest.clear_range_filter()` method:
#
# ```python
# SwarmRequest.clear_range_filter()
# ```
request.clear_range_filter()
# [TOP](#top)
#
# <a id="SwarmRequest.get_between"/>
#
# ### Send request to the server
#
# After setting collection(s), measurements, auxiliaries and models, we are ready to send the request to the server using the `SwarmRequest.get_between()` method:
#
# ```python
# SwarmRequest.get_between(start_time=None, end_time=None, filetype='cdf', asynchronous=True, show_progress=True, nrecords_limit=None, tmpdir=None)
# ```
# **Parameters**:
#
# - **start_time** (*datetime.datetime* or *str*, optional): lower bound of temporal interval. If provided as string, it must be compliant to [ISO-8601](https://en.wikipedia.org/wiki/ISO_8601).
# - **end_time** (*datetime.datetime* or *str*, optional): upper bound of temporal interval. If provided as string, it must be compliant to [ISO-8601](https://en.wikipedia.org/wiki/ISO_8601).
# - **filetype** (*str*, optional): file format. Allowed values: `csv` and `cdf` (default).
# - **asynchronous** (*bool*, optional): if `True` (default), set the asynchronous processing.
# - **show_progress** (*bool*, optional): if `True` (default), enable the progress bar while processing and downloading data.
# - **nrecords_limit** (*int*): overrides the limit of 3456000 records.
#
# **Example**: download data according to the following inut parameters and get the list of source data files:
# - measurements: F, B_NEC, Ne
# - magnetic models: CHAOS = "CHAOS-Core" + "Chaos-Static"
# - auxiliaries: OrbitNumber
# - sampling step: 10 seconds
# - time interval: [2019-10-01T00:00:00, 2019-10-01T00:00:00]
# - file format: CDF
# - filters: none
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Create the request object
request = SwarmRequest()
# Set collections
request.set_collection('SW_OPER_MAGA_LR_1B', 'SW_OPER_EFIA_LP_1B')
# Set products
request.set_products(
measurements=['F', 'B_NEC', 'Ne'],
models=['CHAOS = "CHAOS-Core" + "CHAOS-Static"'],
auxiliaries=['OrbitNumber'],
residuals=False,
sampling_step='PT10S'
)
# Get data
data = request.get_between('2019-10-01T00:00:00', '2019-10-01T01:00:00')
# -
# Data is returned as a `ReturnedData` object:
type(data)
# [TOP](#top)
#
# <a id="ReturnedData"/>
#
# ## Handle downloaded data - ReturnedData
#
# Once the server receives the user's request, it creates the product, than this product is automatically downloaded and returned to the client as a `ReturnedData` object. Thus you don't need to create it by yourself. It is now possible to convert this data to a `pandas.DataFrame` object or to a `xarray.Dataset` object or to save it to one or more files.
#
# `ReturnedData` object has the following attributes:
#
# - `ReturnedData.sources`
# - `ReturnedData.contents`
# - `ReturnedData.file_types`
# - `ReturnedData.magnetic_models`
# - `ReturnedData.range_filters`
#
# and the following methods:
#
# - `ReturnedData.as_dataframe()`
# - `ReturnedData.as_xarray()`
# - `ReturnedData.to_file()`
# - `ReturnedData.to_files()`
#
# [TOP](#top)
#
# <a id="ReturnedData.sources"/>
#
# ### Get the list of source data
#
# This attribute contains the list of source data files from which the values have been extracted.
# **Example**: download data according to the following inut parameters and get the list of source data files:
# - measurements: F, B_NEC, Ne
# - magnetic models: CHAOS = "CHAOS-Core" + "Chaos-Static"
# - auxiliaries: OrbitNumber
# - sampling step: 10 seconds
# - time interval: [2019-10-01T00:00:00, 2019-10-01T00:00:00]
# - file format: CDF
# - filters: none
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download data
request = SwarmRequest()
request.set_collection('SW_OPER_MAGA_LR_1B', 'SW_OPER_EFIA_LP_1B')
request.set_products(
measurements=['F', 'B_NEC', 'Ne'],
models=['CHAOS = "CHAOS-Core" + "CHAOS-Static"'],
auxiliaries=['OrbitNumber'],
residuals=False,
sampling_step='PT10S'
)
data = request.get_between('2019-10-01T00:00:00', '2019-10-01T01:00:00')
# -
# Get list of sources
data.sources
# This is the list of the files from which the measurement and auxiliaries values and the magnetic models values have been retrieved.
# [TOP](#top)
#
# <a id="ReturnedData.contents"/>
#
# ### ReturnedData contents
#
# Downloaded data is saved to one or more temporary files represente as `ReturnedDataFile` objects. The `ReturnedData.contents` attribute contains the list of these objects.
# **Example**: download data according to the following inut parameters and get the `ReturnedData` contents:
# - measurements: F, B_NEC, Ne
# - magnetic models: CHAOS = "CHAOS-Core" + "Chaos-Static"
# - auxiliaries: OrbitNumber
# - sampling step: 10 seconds
# - time interval: [2019-10-01T00:00:00, 2019-10-01T00:00:00]
# - file format: CDF
# - filters: none
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download data
request = SwarmRequest()
request.set_collection('SW_OPER_MAGA_LR_1B', 'SW_OPER_EFIA_LP_1B')
request.set_products(
measurements=['F', 'B_NEC', 'Ne'],
models=['CHAOS = "CHAOS-Core" + "CHAOS-Static"'],
auxiliaries=['OrbitNumber'],
residuals=False,
sampling_step='PT10S'
)
data = request.get_between('2019-10-01T00:00:00', '2019-10-01T01:00:00')
# -
# Get the ReturnedData contents
data.contents
# [TOP](#top)
#
# <a id="ReturnedData.filetype"/>
#
# ### Get type of downloaded data files
#
# This attribute contains the type of downloaded files (i.e. `cdf` or `csv`).
# **Example**: download data according to the following inut parameters and get the `ReturnedData` file type:
# - measurements: F, B_NEC, Ne
# - magnetic models: CHAOS = "CHAOS-Core" + "Chaos-Static"
# - auxiliaries: OrbitNumber
# - sampling step: 10 seconds
# - time interval: [2019-10-01T00:00:00, 2019-10-01T00:00:00]
# - file format: CDF
# - filters: none
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download data
request = SwarmRequest()
request.set_collection('SW_OPER_MAGA_LR_1B', 'SW_OPER_EFIA_LP_1B')
request.set_products(
measurements=['F', 'B_NEC', 'Ne'],
models=['CHAOS = "CHAOS-Core" + "CHAOS-Static"'],
auxiliaries=['OrbitNumber'],
residuals=False,
sampling_step='PT10S'
)
data = request.get_between('2019-10-01T00:00:00', '2019-10-01T01:00:00')
# Get ReturnedData file type
data.filetype
# -
# [TOP](#top)
#
# <a id="ReturnedData.magnetic_models"/>
#
# ### Get list of magnetic models used during calculations
#
# This attribute contains the list of the magnetic models used during calculations.
# **Example**: get F and B_NEC from SW_OPER_MAGA_LR_1B, Ne from SW_OPER_EFIA_LP_1B, model CHAOS = "CHAOS-Core" + "Chaos-Static" and auxiliary OrbitNumber with a sampling step of 10 seconds, between 2019-10-01T00:00:00 and 2019-10-01T01:00:00 in CDF format and get the list of magnetic models used during calculations:
# **Example**: download data according to the following inut parameters and get the list of magnetic models:
# - measurements: F, B_NEC, Ne
# - magnetic models: CHAOS = "CHAOS-Core" + "Chaos-Static"
# - auxiliaries: OrbitNumber
# - sampling step: 10 seconds
# - time interval: [2019-10-01T00:00:00, 2019-10-01T01:00:00]
# - file format: CDF
# - filters: none
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download data
request = SwarmRequest()
request.set_collection('SW_OPER_MAGA_LR_1B', 'SW_OPER_EFIA_LP_1B')
request.set_products(
measurements=['F', 'B_NEC', 'Ne'],
models=['CHAOS = "CHAOS-Core" + "CHAOS-Static"'],
auxiliaries=['OrbitNumber'],
residuals=False,
sampling_step='PT10S'
)
data = request.get_between('2019-10-01T00:00:00', '2019-10-01T01:00:00')
# -
# Get magnetic models
data.magnetic_models
# [TOP](#top)
#
# <a id="ReturnedData.range_filters"/>
#
# ### Get list of filters applied to the request
#
# This attribute contains the list of applied filters
# **Example**: download data according to the following inut parameters and get the list of the applied filters:
# - measurements: F, B_NEC, Ne
# - magnetic models: CHAOS = "CHAOS-Core" + "Chaos-Static"
# - auxiliaries: OrbitNumber
# - sampling step: 10 seconds
# - time interval: [2019-10-01T00:00:00, 2019-10-02T00:00:00]
# - file format: CDF
# - filters:
# - Longitude: $[-20.0, 50.0]$
# - Latitude: $[30.0, 70.0]$
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download data
request = SwarmRequest()
request.set_collection('SW_OPER_MAGA_LR_1B', 'SW_OPER_EFIA_LP_1B')
request.set_products(
measurements=['F', 'B_NEC', 'Ne'],
models=['CHAOS = "CHAOS-Core" + "CHAOS-Static"'],
auxiliaries=['OrbitNumber'],
residuals=False,
sampling_step='PT10S'
)
request.set_range_filter('Longitude', -20.0, 50.0)
request.set_range_filter('Latitude', 30.0, 70.0)
data = request.get_between('2019-10-01T00:00:00', '2019-10-02T00:00:00')
# -
# Get list of applied filters
data.range_filters
# [TOP](#top)
#
# <a id="ReturnedData.as_dataframe"/>
#
# ### Convert ReturnedData to Pandas DataFrame
#
# Data downloaded from the server can be converted to a `pandas.DataFrame` object. This is a general 2D labeled, size-mutable tabular structure with potentially heterogeneously-typed column allowing the user to directly access data. For more information:
#
# - https://pandas.pydata.org/pandas-docs/stable/
# - https://pandas.pydata.org/pandas-docs/stable/reference/frame.html
#
# This conversion can be obtained with the `ReturnedData.as_dataframe()` method:
#
# ```python
# ReturnedData.as_dataframe(expand=False)
# ```
# **Parameters**:
#
# - **expand** (*bool*, optional): If set to `False` (default), the vector parameters are represented as arrays (i.e. all the vector components in the same column). If this parameter is stet to `True`, the vector parameters are expanded (i.e. each component in a separate column).
# **Example**: download data according to the following inut parameters and convert the `ReturnedData` object to a pandas `DataFrame`:
# - measurements: F, B_NEC, Ne
# - magnetic models: CHAOS = "CHAOS-Core" + "Chaos-Static"
# - auxiliaries: OrbitNumber
# - sampling step: 10 seconds
# - time interval: [2019-10-01T00:00:00, 2019-10-01T01:00:00]
# - file format: CDF
# - filters: none
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download data
request = SwarmRequest()
request.set_collection('SW_OPER_MAGA_LR_1B', 'SW_OPER_EFIA_LP_1B')
request.set_products(
measurements=['F', 'B_NEC', 'Ne'],
models=['CHAOS = "CHAOS-Core" + "CHAOS-Static"'],
auxiliaries=['OrbitNumber'],
residuals=False,
sampling_step='PT10S'
)
data = request.get_between('2019-10-01T00:00:00', '2019-10-01T01:00:00')
# -
# Convert ReturnedData to pandas DataFrame
df = data.as_dataframe()
# You can visualize the first 5 records using the `DataFrame.head()` method:
df.head()
# Setting `expand=True`:
df = data.as_dataframe(expand=True)
# each vector component is in a separate column:
df.head()
# [TOP](#top)
#
# <a id="ReturnedData.as_xarray"/>
#
# ### Convert ReturnedData to xarray Dataset
#
# Data downloaded from the server can be converted to a `xarray.Dataset` object. This is a multi-dimentional array allowing the user to directly access data. For more information:
#
# - http://xarray.pydata.org/en/stable/
# - http://xarray.pydata.org/en/stable/data-structures.html#dataset
#
# This conversion can be obtained with the `ReturnedData.as_xarray()`.
# **Example**: download data according to the following inut parameters and convert the `ReturnedData` object to an xarray `Dataset`:
# - measurements: F, B_NEC, Ne
# - magnetic models: CHAOS = "CHAOS-Core" + "Chaos-Static"
# - auxiliaries: OrbitNumber
# - sampling step: 10 seconds
# - time interval: [2019-10-01T00:00:00, 2019-10-01T01:00:00]
# - file format: CDF
# - filters: none
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download data
request = SwarmRequest()
request.set_collection('SW_OPER_MAGA_LR_1B', 'SW_OPER_EFIA_LP_1B')
request.set_products(
measurements=['F', 'B_NEC', 'Ne'],
models=['CHAOS = "CHAOS-Core" + "CHAOS-Static"'],
auxiliaries=['OrbitNumber'],
residuals=False,
sampling_step='PT10S'
)
data = request.get_between('2019-10-01T00:00:00', '2019-10-01T01:00:00')
# -
# Convert ReturnedData to pandas DataFrame
ds = data.as_xarray()
# You can se how the xarray `Dataset` is represented:
ds
# [TOP](#top)
#
# <a id="ReturnedData.to_file"/>
#
# ### Save downloaded data to a file
#
# Data downloaded from the server can be saved to a file using the `ReturnedData.to_file()` method:
#
# ```python
# ReturnedData.to_file(path, overwrite=False)
# ```
#
# **Parameters**:
#
# - **path** (*str*): output file path.
# - **overwrite** (*bool*, optional):if `True` allows to overwrite the file if already present at *path*.
#
# **Example**: download data according to the following inut parameters and save the `ReturnedData` object to a file:
# - measurements: F, B_NEC, Ne
# - magnetic models: CHAOS = "CHAOS-Core" + "Chaos-Static"
# - auxiliaries: OrbitNumber
# - sampling step: 10 seconds
# - time interval: [2019-10-01T00:00:00, 2019-10-01T01:00:00]
# - file format: CDF
# - filters: none
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download data
request = SwarmRequest()
request.set_collection('SW_OPER_MAGA_LR_1B', 'SW_OPER_EFIA_LP_1B')
request.set_products(
measurements=['F', 'B_NEC', 'Ne'],
models=['CHAOS = "CHAOS-Core" + "CHAOS-Static"'],
auxiliaries=['OrbitNumber'],
residuals=False,
sampling_step='PT10S'
)
data = request.get_between('2019-10-01T00:00:00', '2019-10-01T01:00:00')
# -
# Save ReturnedData to a file
data.to_file('out.cdf', overwrite=True)
# **Note**: this method can be used only if the amount of downloaded data is small (i.e. if the request is not split between multiple requests).
# **Example**: download data according to the following inut parameters and save the `ReturnedData` object to a file:
# - measurements: U_orbit, Ne, Te, Vs
# - magnetic models: none
# - auxiliaries: none
# - sampling step: default
# - time interval: [2019-10-01T00:00:00, 2019-11-01T00:00:00]
# - file format: CDF
# - filters: none
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download data
request = SwarmRequest()
request.set_collection('SW_OPER_EFIA_LP_1B')
request.set_products(
measurements=['U_orbit', 'Ne', 'Te', 'Vs'],
)
data = request.get_between('2019-10-01T00:00:00', '2019-11-01T00:00:00')
# -
# The request is split between multiple requests. Try to execute the cell below:
# +
# data.to_file('huge.cdf', True)
## will return:
## NotImplementedError: Data is split into multiple files. Use .to_files instead
# -
# Remove saved files (if any)
# !rm *.cdf
# [TOP](#top)
#
# <a id="ReturnedData.to_files"/>
#
# ### Save downloaded data to multiple files
#
# Data downloaded from the server can be saved to one or more files using the `ReturnedData.to_files()` method:
#
# ```python
# ReturnedData.to_files(paths, overwrite=False)
# ```
# **Parameters**:
#
# - **paths** (*list[str]*): output files path as a list of strings.
# - **overwrite** (*bool*, optional):if `True` allows to overwrite the file if already present at *path*.
#
# **Example**: download data according to the following inut parameters and save the `ReturnedData` object a file using the `ReturnedData.to_files()` method:
# - measurements: F, B_NEC, Ne
# - magnetic models: CHAOS = "CHAOS-Core" + "Chaos-Static"
# - auxiliaries: OrbitNumber
# - sampling step: 10 seconds
# - time interval: [2019-10-01T00:00:00, 2019-10-01T01:00:00]
# - file format: CDF
# - filters: none
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download data
request = SwarmRequest()
request.set_collection('SW_OPER_MAGA_LR_1B', 'SW_OPER_EFIA_LP_1B')
request.set_products(
measurements=['F', 'B_NEC', 'Ne'],
models=['CHAOS = "CHAOS-Core" + "CHAOS-Static"'],
auxiliaries=['OrbitNumber'],
residuals=False,
sampling_step='PT10S'
)
data = request.get_between('2019-10-01T00:00:00', '2019-10-01T01:00:00')
# -
# Save ReturnedData to a file
data.to_files(['out.cdf'], overwrite=True)
# This method is very useful in case the request has been split between multiple requests
# **Example**: download data according to the following inut parameters and save the `ReturnedData` object to a files:
# - measurements: U_orbit, Ne, Te, Vs
# - magnetic models: none
# - auxiliaries: none
# - sampling step: default
# - time interval: [2019-10-01T00:00:00, 2019-11-01T00:00:00]
# - file format: CDF
# - filters: none
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download data
request = SwarmRequest()
request.set_collection('SW_OPER_EFIA_LP_1B')
request.set_products(
measurements=['U_orbit', 'Ne', 'Te', 'Vs'],
)
data = request.get_between('2019-10-01T00:00:00', '2019-11-01T00:00:00')
# -
# Save ReturnedData to files
data.to_files(['first.cdf', 'second.cdf'], overwrite=True)
# **Note**: the number of files to be specified must be equal to the number of files indicated by the `ReturnedData.contents` attribute. In the above case:
data.contents
# Remove saved files (if any)
# !rm *.cdf
# [TOP](#top)
#
# <a id="ReturnedDataFile"/>
#
# ## Handle downloaded temporary data file - ReturnedDataFile
#
# This object holds the file downloaded from the server. Even if data has not ben saved to a file with the `ReturnedData.to_files()`, it is stored in a temporary file and automatically deleted when not needed anymore. As indicated in the "[ReturnedData contents](#ReturnedData.contents)" section, you can get the list of the returned data files using the `ReturnedData.contents` attribute. Thus, you don't need to create this object by yourself.
#
# **Note**: the description of this object has been included for completeness only. You won't need to use this object and its methods directly. To handle the downloaded data is preferable to use the [ReturnedData](#ReturnedData) object.
# `ReturnedDataFile` object has the following attributes:
#
# - `ReturnedDataFile._file`
# - `ReturnedDataFile.filetype`
#
# and the following methods:
#
# - `ReturnedDataFile.as_dataframe()`
# - `ReturnedDataFile.as_xarray()`
# - `ReturnedDataFile.to_file()`
# [TOP](#top)
#
# <a id="ReturnedDataFile._file"/>
#
# ### Get the NamedTemporaryFile associated to a ReturnedDataFile
#
# The `NamedTemporaryFile` corresponding to the `ReturnedDataFile` is contained in the `ReturnedDataFile._file` attribute.
# **Example**: download data according to the following inut parameters and get the NamedTemporaryFiles objects associated to ReturnedDataFiles:
# - measurements: F, B_NEC, Ne
# - magnetic models: CHAOS = "CHAOS-Core" + "Chaos-Static"
# - auxiliaries: OrbitNumber
# - sampling step: 10 seconds
# - time interval: [2019-10-01T00:00:00, 2019-10-01T01:00:00]
# - file format: CDF
# - filters: none
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download data
request = SwarmRequest()
request.set_collection('SW_OPER_MAGA_LR_1B', 'SW_OPER_EFIA_LP_1B')
request.set_products(
measurements=['F', 'B_NEC', 'Ne'],
models=['CHAOS = "CHAOS-Core" + "CHAOS-Static"'],
auxiliaries=['OrbitNumber'],
residuals=False,
sampling_step='PT10S'
)
data = request.get_between('2019-10-01T00:00:00', '2019-10-01T01:00:00')
# -
# Get ReturnedData contents
data.contents
# `ReturnedData` contains only one `ReturnedDataFile`. Let's get the associated `NamedTemporaryFile`:
data.contents[0]._file
# `tempfile.NamedTemporaryFile` is part of the Python standard library. For more information see: https://docs.python.org/3/library/tempfile.html#tempfile.NamedTemporaryFile
# [TOP](#top)
#
# <a id="ReturnedDataFile.filetype"/>
#
# ### Get type of the downloaded data file
#
# This attribute contains the type of downloaded files (i.e. `cdf`, `csv` or `nc`)
# **Example**: download data according to the following inut parameters and get the file type of the `ReturnedDataFile`:
# - measurements: F, B_NEC, Ne
# - magnetic models: CHAOS = "CHAOS-Core" + "Chaos-Static"
# - auxiliaries: OrbitNumber
# - sampling step: 10 seconds
# - time interval: [2019-10-01T00:00:00, 2019-10-01T01:00:00]
# - file format: CDF
# - filters: none
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download data
request = SwarmRequest()
request.set_collection('SW_OPER_MAGA_LR_1B', 'SW_OPER_EFIA_LP_1B')
request.set_products(
measurements=['F', 'B_NEC', 'Ne'],
models=['CHAOS = "CHAOS-Core" + "CHAOS-Static"'],
auxiliaries=['OrbitNumber'],
residuals=False,
sampling_step='PT10S'
)
data = request.get_between('2019-10-01T00:00:00', '2019-10-01T01:00:00')
# -
# Get ReturnedData contents
data.contents
# Get file type
data.contents[0].filetype
# [TOP](#top)
#
# <a id="ReturnedDataFile.as_dataframe"/>
#
# ### Convert ReturnedDataFile to Pandas Dataframe
#
# As for the `ReturnedData` object (see [Convert ReturnedData to Pandas DataFrame](#ReturnedData.as_dataframe)), the `ReturnedDataFile` object can be converted to a Pandas DataFrame using the `ReturnedDataFile.as_dataframe()` method:
#
# ```python
# ReturnedDataFile.as_dataframe(expand=False)
# ```
# **Parameters**:
#
# - **expand** (*bool*, optional): If set to `False` (default), the vector parameters are represented as arrays (i.e. all the vector components in the same column). If this parameter is stet to `True`, the vector parameters are expanded (i.e. each component in a separate column).
# [TOP](#top)
#
# <a id="ReturnedDataFile.as_xarray"/>
#
# ### Convert ReturnedDataFile to xarray Dataset
#
# As for the `ReturnedData` object (see [Convert ReturnedData to xarray Dataset](#ReturnedData.as_xarray)), the `ReturnedDataFile` object can be converted to an xarray Dataset using the `ReturnedDataFile.as_xarray()` method:
#
# ```python
# ReturnedDataFile.as_xarray()
# ```
# [TOP](#top)
#
# <a id="ReturnedDataFile.to_file"/>
#
# ### Save ReturnedDataFile object to a file
#
# Data stored in the `ReturnedDataFile` object can be saved to a file with the `ReturnedDataFile.to_file()` method:
#
# ```python
# ReturnedDataFile.to_file(path, overwrite=False)
# ```
# **Parameters**:
#
# - **path** (*str*): output file path.
# - **overwrite** (*bool*, optional):if `True` allows to overwrite the file if already present at *path*.
#
# **Example**: download data according to the following inut parameters and get the file type of the `ReturnedDataFile`:
# - measurements: F, B_NEC, Ne
# - magnetic models: CHAOS = "CHAOS-Core" + "Chaos-Static"
# - auxiliaries: OrbitNumber
# - sampling step: 10 seconds
# - time interval: [2019-10-01T00:00:00, 2019-10-01T01:00:00]
# - file format: CDF
# - filters: none
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download data
request = SwarmRequest()
request.set_collection('SW_OPER_MAGA_LR_1B', 'SW_OPER_EFIA_LP_1B')
request.set_products(
measurements=['F', 'B_NEC', 'Ne'],
models=['CHAOS = "CHAOS-Core" + "CHAOS-Static"'],
auxiliaries=['OrbitNumber'],
residuals=False,
sampling_step='PT10S'
)
data = request.get_between('2019-10-01T00:00:00', '2019-10-01T01:00:00')
# -
# Save the ReturnedDataFile to a file
data.contents[0].to_file('out.cdf', overwrite=True)
# Remove saved files (if any)
# !rm out.cdf
# [TOP](#top)
#
# <a id="ClientConfig"/>
#
# ## Handle viresclient configuration - ClientConfig
#
# You can acces the `viresclient` configuration using the `ClientConfig` class:
#
# ```python
# class viresclient.ClientConfig(path=None)
# ```
# **Parameters**:
# - **path** (*str*, optional): path of the configuration file. If not specified, the default configuration file is assumed: `~/.viresclient.ini`.
# **Example**: create the `ClientConfig` object associated to the default configuration file:
# +
# Import the ClientConfig
from viresclient import ClientConfig
# Create the ClientConfig object
default = ClientConfig()
# -
# `ClientConfig` object has the following attributes:
#
# - `ClientConfig.path`
# - `ClientConfig.default_url`
#
# and the following methods:
#
# - `ClientConfig.set_site_config()`
# - `ClientConfig.get_site_config()`
# - `ClientConfig.save()`
# [TOP](#top)
#
# <a id="ClientConfig.path"/>
#
# ### Get path of the configuration file
#
# The `ClientConfig.path` read-only attribute contains the path of the configuration file.
# **Example**: create the ClientConfig object associated to the default configuration file and check its path:
# +
# Import the ClientConfig (this step can be omitted if already executed in the previous examples)
from viresclient import ClientConfig
# Create the ClientConfig object
default = ClientConfig()
default.path
# -
# [TOP](#top)
#
# <a id="ClientConfig.default_url"/>
#
# ### Get or set the default URL
#
# The `ClientConfig.default_url` attribute contains server's default URL (i.e. the one used when `SwarmClient` class is invoked without URL).
# **Example**: create the ClientConfig object associated to the default configuration file and check the default URL:
# +
# Import the ClientConfig (this step can be omitted if already executed in the previous examples)
from viresclient import ClientConfig
# Create the ClientConfig object
default = ClientConfig()
default.default_url
# -
# If the default URL is not set, the attribute returns `None`.
# **Example**: create a new configuration:
# +
# Import the ClientConfig (this step can be omitted if already executed in the previous examples)
from viresclient import ClientConfig
# Create new configuration
newcfg = ClientConfig('newfile.ini')
# Print default URL
print(newcfg.default_url)
# -
# Set default URL to: `https://vires.services/ows`:
newcfg.default_url = 'https://vires.services/ows'
# Get the updated result:
# Print default URL
print(newcfg.default_url)
# [TOP](#top)
#
# <a id="ClientConfig.set_site_config"/>
#
# ### Set site configuration
#
# It is possible to set the configuration for a server identified by an URL with the `Client.Config.set_site_config()` method:
#
# ```python
# ClientConfig.set_site_config(url, **options)
# ```
# **Parameters**:
#
# - **url** (*str*): server URL
# - ****options** (*str*): configuration options in the form: *key*=*value* (e.g.: token='...')
# **Example**: create a new configuration, set default URL to: `https://vires.services/ows` and set the access token for this URL:
# +
# Import the ClientConfig (this step can be omitted if already executed in the previous examples)
from viresclient import ClientConfig
# Create new configuration
newcfg = ClientConfig('newfile.ini')
# Set default URL
newcfg.default_url = 'https://vires.services/ows'
# Set the access token
newcfg.set_site_config(newcfg.default_url, token='<PASSWORD>')
# -
# [TOP](#top)
#
# <a id="ClientConfig.get_site_config"/>
#
# ### Get site configuration
#
# It is possible to get the configuration for a server identified by an URL with the `ClientConfig.get_site_config()` method:
#
# ```python
# ClientConfig.get_site_config(url)
# ```
# **Parameters**:
#
# - **url** (*str*): server URL
# **Example**: create a new configuration, set default URL to: `https://vires.services/ows`, set the access token for this URL and get the configuration for `https://vires.services/ows`:
# +
# Import the ClientConfig (this step can be omitted if already executed in the previous examples)
from viresclient import ClientConfig
# Create new configuration
newcfg = ClientConfig('newfile.ini')
# Set default URL
newcfg.default_url = 'https://vires.services/ows'
# Set the access token
newcfg.set_site_config(newcfg.default_url, token='<PASSWORD>')
# Get the configuration
newcfg.get_site_config('https://vires.services/ows')
# -
# [TOP](#top)
#
# <a id="ClientConfig.save"/>
#
# ### Save configuration
#
# The configuration stored in the `ClientConfig` object can be saved using the `ClientConfig.save()` method:
#
# ```python
# ClientConfig.save()
# ```
# This method saves the configuration to the path specified during the `ClientConfig` creation. You can check this value via the `ClientConfig.path` attribute.
# **Example**: create a new configuration, set default URL to: `https://vires.services/ows`, set the access token for this URL and save the configuration to file:
# +
# Import the ClientConfig (this step can be omitted if already executed in the previous examples)
from viresclient import ClientConfig
# Create new configuration
newcfg = ClientConfig('newfile.ini')
# Set default URL
newcfg.default_url = 'https://vires.services/ows'
# Set the access token
newcfg.set_site_config(newcfg.default_url, token='<PASSWORD>')
# Save the configuration
newcfg.save()
# -
# Let's have a look to the new configuration file:
# !cat newfile.ini
# delete newfile.ini
# !rm newfile.ini
# [TOP](#top)
#
# <a id="DataUpload"/>
#
# ## Upload data to the server - DataUpload
#
# You can upload your data to the server to view it in the VirES web interface. File format can be CDF or CSV and must be compliant to: https://github.com/ESA-VirES/VirES-Server/blob/master/vires/custom_data_format_description.md.
#
# Data can be uploaded using the `DataUpload` object:
#
# ```python
# class DataUpload(url, token, **kwargs)
# ```
# **Parameters**:
#
# - **url** (*str*): server URL
# - **token** (*str*): access token
# - ****kwargs** (*str*): additional parameters (currently not used)
# `DataUpload` object has the following attributes:
#
# - `DataUpload.ids`
#
# and the following methods:
#
# - `DataUpload.post()`
# - `DataUpload.get()`
# - `DataUpload.set_constant_parameters()`
# - `DataUpload.get_constant_parameters()`
# **Example**: create a `DataUpload` object for data upload to the default server. You can retrieve the default URL and the access token from the configuration, using the `ClientConfig` object:
# +
# Import DataUpload object
from viresclient import DataUpload
# Import ClientConfig object (this step can be avoided if ClientConfig has been already imported)
from viresclient import ClientConfig
# Create ClientConfig object associated to the default configuration file: ~/.viresclient.ini
default = ClientConfig()
# Get default URL and access token from the configuration
url = default.default_url
token = default.get_site_config(url)['token']
# Create the DataUpload object:
du = DataUpload(url, token)
# -
# [TOP](#top)
#
# <a id="DataUpload.post"/>
#
# ### Upload a file to the server
#
# You can upload a file to the server using the `DataUpload.post()` method:
#
# ```python
# DataUpload.post(file, filename=None)
# ```
# **Parameters**:
#
# - **file** (*str*): file to be uploaded
#
# The method returns the info about the uploaded file as a dictionary.
# **Example**: upload a product to the server:
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download 1 hour of MAGA_LR_1B data
request = SwarmRequest()
request.set_collection('SW_OPER_MAGA_LR_1B')
request.set_products(measurements=['F', 'B_NEC'])
data = request.get_between('2020-01-01T00:00:00', '2020-01-01T01:00:00')
data.to_file('out.cdf', overwrite=True)
# +
# Import ClientConfig and DataUpload (this step can be omitted if already executed in the previous examples)
from viresclient import ClientConfig, DataUpload
# Create ClientConfig object associated to the default configuration file: ~/.viresclient.ini
default = ClientConfig()
# Get default URL and access token from the configuration
url = default.default_url
token = default.get_site_config(url)['token']
# Create the DataUpload object:
du = DataUpload(url, token)
# Upload the file to the server to be visualized in the web client
info = du.post('out.cdf')
# -
# Check info about the uploaded file:
info
# Delete test file (if any)
# !rm out.cdf
# [TOP](#top)
#
# <a id="DataUpload.ids"/>
#
# ### Get the identifier(s) of the uploaded file(s)
#
# You can obtain the identifiers of the uploaded files via the `DataUpload.ids` attribute as a list. Please note that currently the server accepts only one file at a time, thus the returned list will have length 1.
# **Example**: upload a product to the server and get its identifier:
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download 1 hour of MAGA_LR_1B data
request = SwarmRequest()
request.set_collection('SW_OPER_MAGA_LR_1B')
request.set_products(measurements=['F', 'B_NEC'])
data = request.get_between('2020-01-01T00:00:00', '2020-01-01T01:00:00')
data.to_file('out.cdf', overwrite=True)
# +
# Import ClientConfig and DataUpload (this step can be omitted if already executed in the previous examples)
from viresclient import ClientConfig, DataUpload
# Create ClientConfig object associated to the default configuration file: ~/.viresclient.ini
default = ClientConfig()
# Get default URL and access token from the configuration
url = default.default_url
token = default.get_site_config(url)['token']
# Create the DataUpload object:
du = DataUpload(url, token)
# Upload the file to the server to be visualized in the web client
info = du.post('out.cdf')
# -
# Get id of the uploaded file:
du.ids
# Delete test file (if any)
# !rm out.cdf
# [TOP](#top)
#
# <a id="DataUpload.get"/>
#
# ### Get info about the uploaded file
#
# You can get the info of the uploaded file using the `DataUpload.get()` method:
#
# ```python
# DataUpload.get(identifier=None)
# ```
#
# **Parameters**:
#
# - **identifier** (*str*, optional): identifier of the uploaded file obtained via `DataUpload.ids` attribute (see [Get the identifier(s) of the uploaded file(s)](#DataUpload.ids)) or from the info returned by the `DataUpload.post()` method (see [Upload a file to the server](#DataUpload.post)). If not provided, returns the info af all the uploaded files as a list.
# **Example**: upload a product to the server and get product's info with `DataUpload.get()`:
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download 1 hour of MAGA_LR_1B data
request = SwarmRequest()
request.set_collection('SW_OPER_MAGA_LR_1B')
request.set_products(measurements=['F', 'B_NEC'])
data = request.get_between('2020-01-01T00:00:00', '2020-01-01T01:00:00')
data.to_file('out.cdf', overwrite=True)
# +
# Import ClientConfig and DataUpload (this step can be omitted if already executed in the previous examples)
from viresclient import ClientConfig, DataUpload
# Create ClientConfig object associated to the default configuration file: ~/.viresclient.ini
default = ClientConfig()
# Get default URL and access token from the configuration
url = default.default_url
token = default.get_site_config(url)['token']
# Create the DataUpload object:
du = DataUpload(url, token)
# Upload the file to the server to be visualized in the web client
info = du.post('out.cdf')
# -
# Get info about the uploaded file:
du.get(du.ids[0])
# If the identifier is not provided, you will get info about all the files as a list:
du.get()
# Delete test file (if any)
# !rm out.cdf
# [TOP](#top)
#
# <a id="DataUpload.set_constant_parameters"/>
#
# ### Set constant parameters to the uploaded file
#
# It is possible to set constant parameters to the uploaded file using the `DataUpload.set_constant_parameters()` method:
#
# ```python
# DataUpload.set_constant_parameters(identifier, parameters, replace=False)
# ```
# **Parameters**:
#
# - **identifier** (*str*): file identifier.
# - **parameters** (*dict*): constant parameters provided as a dictionary
# - **replace** (*bool*, optional): if set to `True`, all the parameters will be replaced by the new parameters, otherwise the new parameters will update the existing ones (default behaviour).
#
# **Example**: upload a product to the server and set constant parameters to the uploaded file:
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download 1 hour of MAGA_LR_1B data
request = SwarmRequest()
request.set_collection('SW_OPER_MAGA_LR_1B')
request.set_products(measurements=['F', 'B_NEC'])
data = request.get_between('2020-01-01T00:00:00', '2020-01-01T01:00:00')
data.to_file('out.cdf', overwrite=True)
# +
# Import ClientConfig and DataUpload (this step can be omitted if already executed in the previous examples)
from viresclient import ClientConfig, DataUpload
# Create ClientConfig object associated to the default configuration file: ~/.viresclient.ini
default = ClientConfig()
# Get default URL and access token from the configuration
url = default.default_url
token = default.get_site_config(url)['token']
# Create the DataUpload object:
du = DataUpload(url, token)
# Upload the file to the server to be visualized in the web client
info = du.post('out.cdf')
# -
# Assign constant parameters: $param1 = 12345$ and $param2 = 34567$:
du.set_constant_parameters(du.ids[0], {'param1': 12345, 'param2': 34567})
# If you want to set `param1` to a new value you can update the existing set of parameters:
du.set_constant_parameters(du.ids[0], {'param1': 1})
# or replace the entire set of parameters:
du.set_constant_parameters(du.ids[0], {'param1': 1}, replace=True)
# Note that `param2` has been removed.
# Delete test file (if any)
# !rm out.cdf
# [TOP](#top)
#
# <a id="DataUpload.get_constant_parameters"/>
#
# ### Get constant parameters applied to the uploaded file
#
# It is possible to get the list of constant parameters applied to the uploaded file using the `DataUpload.get_constant_parameters()` method:
#
# ```python
# DataUpload.get_constant_parameters(identifier)
# ```
# **Parameters**:
#
# - **identifier** (*str*): file identifier.
#
# **Example**: upload a product to the server and get constant parameters applied to the uploaded file:
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download 1 hour of MAGA_LR_1B data
request = SwarmRequest()
request.set_collection('SW_OPER_MAGA_LR_1B')
request.set_products(measurements=['F', 'B_NEC'])
data = request.get_between('2020-01-01T00:00:00', '2020-01-01T01:00:00')
data.to_file('out.cdf', overwrite=True)
# +
# Import ClientConfig and DataUpload (this step can be omitted if already executed in the previous examples)
from viresclient import ClientConfig, DataUpload
# Create ClientConfig object associated to the default configuration file: ~/.viresclient.ini
default = ClientConfig()
# Get default URL and access token from the configuration
url = default.default_url
token = default.get_site_config(url)['token']
# Create the DataUpload object:
du = DataUpload(url, token)
# Upload the file to the server to be visualized in the web client
info = du.post('out.cdf')
# -
# Assign constant parameters: $param1 = 12345$ and $param2 = 34567$:
du.set_constant_parameters(du.ids[0], {'param1': 12345, 'param2': 34567})
# Get the list of constant parameters:
du.get_constant_parameters(du.ids[0])
# Delete test file (if any)
# !rm out.cdf
# [TOP](#top)
#
# <a id="DataUpload.delete"/>
#
# ### Delete a specific uploaded file
#
# You can delete a specific uploaded file using the `DataUpload.delete()` method:
#
# ```python
# DataUpload.delete(identifier)
# ```
# **Parameters**:
#
# - **identifier** (*str*): identifier of the uploaded file obtained via `DataUpload.ids` attribute (see [Get the identifier(s) of the uploaded file(s)](#DataUpload.ids)) or from the info returned by the `DataUpload.post()` method (see [Upload a file to the server](#DataUpload.post)). If not provided, returns the info af all the uploaded files as a list.
#
# **Example**: upload a product to the server and delete it with `DataUpload.delete()`:
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download 1 hour of MAGA_LR_1B data
request = SwarmRequest()
request.set_collection('SW_OPER_MAGA_LR_1B')
request.set_products(measurements=['F', 'B_NEC'])
data = request.get_between('2020-01-01T00:00:00', '2020-01-01T01:00:00')
data.to_file('out.cdf', overwrite=True)
# +
# Import ClientConfig and DataUpload (this step can be omitted if already executed in the previous examples)
from viresclient import ClientConfig, DataUpload
# Create ClientConfig object associated to the default configuration file: ~/.viresclient.ini
default = ClientConfig()
# Get default URL and access token from the configuration
url = default.default_url
token = default.get_site_config(url)['token']
# Create the DataUpload object:
du = DataUpload(url, token)
# Upload the file to the server to be visualized in the web client
info = du.post('out.cdf')
# -
# Delete the uploaded product:
# +
du.delete(du.ids[0])
du.ids
# -
# Delete test file (if any)
# !rm out.cdf
# [TOP](#top)
#
# <a id="DataUpload.clear"/>
#
# ### Delete the uploaded files
#
# You can delete *all* the uploaded files using the `DataUpload.clear()` method:
#
# ```python
# DataUpload.clear()
# ```
# **Example**: upload a product to the server and delete it with `DataUpload.clear()`:
# +
# Import SwarmClient (this step can be omitted if already executed in the previous examples)
from viresclient import SwarmRequest
# Download 1 hour of MAGA_LR_1B data
request = SwarmRequest()
request.set_collection('SW_OPER_MAGA_LR_1B')
request.set_products(measurements=['F', 'B_NEC'])
data = request.get_between('2020-01-01T00:00:00', '2020-01-01T01:00:00')
data.to_file('out.cdf', overwrite=True)
# +
# Import ClientConfig and DataUpload (this step can be omitted if already executed in the previous examples)
from viresclient import ClientConfig, DataUpload
# Create ClientConfig object associated to the default configuration file: ~/.viresclient.ini
default = ClientConfig()
# Get default URL and access token from the configuration
url = default.default_url
token = default.get_site_config(url)['token']
# Create the DataUpload object:
du = DataUpload(url, token)
# Upload the file to the server to be visualized in the web client
info = du.post('out.cdf')
# -
# Delete the uploaded product(s):
# +
du.clear()
du.ids
| notebooks/02c__viresclient-API.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#-*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.font_manager import FontProperties
font = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=14) # 解决windows环境下画图汉字乱码问题
# 加载txt和csv文件
def loadtxtAndcsv_data(fileName,split,dataType):
return np.loadtxt(fileName,delimiter=split,dtype=dataType)
#data = loadtxtAndcsv_data("data.txt",",",np.float64)
data = np.loadtxt("data.txt",delimiter=",",dtype=np.float64)
X = data[:,0:-1] # X对应0到倒数第2列
y = data[:,-1] # y对应最后一列
m = len(y) # 总的数据条数
col = data.shape[1]
print (X)
X_norm = np.array(X)
mu = np.zeros((1,X.shape[1]))
sigma = np.zeros((1,X.shape[1]))
mu = np.mean(X_norm,0)
import pandas as pd
from pandas import Series, DataFrame
def featureNormaliza(X):
X_norm = np.array(X) #将X转化为numpy数组对象,才可以进行矩阵的运算
#定义所需变量
mu = np.zeros((1,X.shape[1]))
sigma = np.zeros((1,X.shape[1]))
mu = np.mean(X_norm,0) # 求每一列的平均值(0指定为列,1代表行)
sigma = np.std(X_norm,0) # 求每一列的标准差
for i in range(X.shape[1]): # 遍历列
X_norm[:,i] = (X_norm[:,i]-mu[i])/sigma[i] # 归一化
return X_norm,mu,sigma
X,mu,sigma = featureNormaliza(X) # 归一化
print(X)
# 画二维图
def plot_X1_X2(X):
plt.scatter(X[:,0],X[:,1])
plt.show()
plot_X1_X2(X) # 画图看一下归一化效果
X = np.hstack((np.ones((m,1)),X))
print(X)
theta = np.zeros((col,1))
# +
def linearRegression(alpha=0.01,num_iters=400):
print(u"加载数据...\n")
data = loadtxtAndcsv_data("data.txt",",",np.float64) #读取数据
X = data[:,0:-1] # X对应0到倒数第2列
y = data[:,-1] # y对应最后一列
m = len(y) # 总的数据条数
col = data.shape[1] # data的列数
X,mu,sigma = featureNormaliza(X) # 归一化
plot_X1_X2(X) # 画图看一下归一化效果
X = np.hstack((np.ones((m,1)),X)) # 在X前加一列1
print(u"\n执行梯度下降算法....\n")
theta = np.zeros((col,1))
y = y.reshape(-1,1) #将行向量转化为列
theta,J_history = gradientDescent(X, y, theta, alpha, num_iters)
plotJ(J_history, num_iters)
return mu,sigma,theta #返回均值mu,标准差sigma,和学习的结果theta
# 加载txt和csv文件
def loadtxtAndcsv_data(fileName,split,dataType):
return np.loadtxt(fileName,delimiter=split,dtype=dataType)
# 加载npy文件
def loadnpy_data(fileName):
return np.load(fileName)
# 归一化feature
def featureNormaliza(X):
X_norm = np.array(X) #将X转化为numpy数组对象,才可以进行矩阵的运算
#定义所需变量
mu = np.zeros((1,X.shape[1]))
sigma = np.zeros((1,X.shape[1]))
mu = np.mean(X_norm,0) # 求每一列的平均值(0指定为列,1代表行)
sigma = np.std(X_norm,0) # 求每一列的标准差
for i in range(X.shape[1]): # 遍历列
X_norm[:,i] = (X_norm[:,i]-mu[i])/sigma[i] # 归一化
return X_norm,mu,sigma
# 画二维图
def plot_X1_X2(X):
plt.scatter(X[:,0],X[:,1])
plt.show()
# 梯度下降算法
def gradientDescent(X,y,theta,alpha,num_iters):
m = len(y)
n = len(theta)
temp = np.matrix(np.zeros((n,num_iters))) # 暂存每次迭代计算的theta,转化为矩阵形式
J_history = np.zeros((num_iters,1)) #记录每次迭代计算的代价值
for i in range(num_iters): # 遍历迭代次数
h = np.dot(X,theta) # 计算内积,matrix可以直接乘
temp[:,i] = theta - ((alpha/m)*(np.dot(np.transpose(X),h-y))) #梯度的计算
theta = temp[:,i]
J_history[i] = computerCost(X,y,theta) #调用计算代价函数
print('.', end=' ')
return theta,J_history
# 计算代价函数
def computerCost(X,y,theta):
m = len(y)
J = 0
J = (np.transpose(X*theta-y))*(X*theta-y)/(2*m) #计算代价J
return J
# 画每次迭代代价的变化图
def plotJ(J_history,num_iters):
x = np.arange(1,num_iters+1)
plt.plot(x,J_history)
plt.xlabel(u"迭代次数",fontproperties=font) # 注意指定字体,要不然出现乱码问题
plt.ylabel(u"代价值",fontproperties=font)
plt.title(u"代价随迭代次数的变化",fontproperties=font)
plt.show()
# 测试linearRegression函数
def testLinearRegression():
mu,sigma,theta = linearRegression(0.01,400)
#print u"\n计算的theta值为:\n",theta
#print u"\n预测结果为:%f"%predict(mu, sigma, theta)
# 测试学习效果(预测)
def predict(mu,sigma,theta):
result = 0
# 注意归一化
predict = np.array([1650,3])
norm_predict = (predict-mu)/sigma
final_predict = np.hstack((np.ones((1)),norm_predict))
result = np.dot(final_predict,theta) # 预测结果
return result
if __name__ == "__main__":
testLinearRegression()
# -
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
from matplotlib.font_manager import FontProperties
font = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=14) # 解决windows环境下画图汉字乱码问题
data = loadtxtAndcsv_data("data.txt", ",", np.float64)
X = data[:,0:-1]
y = data[:,-1]
data = loadtxtAndcsv_data("data.txt", ",", np.float64)
print (data)
type(data)
X = data[:,0:-1]
y = data[:,-1]
print (y)
type(y)
plot_data(X,y)
pos = np.where(y==1) #找到y==1的坐标位置
neg = np.where(y==0) #找到y==0的坐标位置
#作图
plt.figure(figsize=(15,12))
plt.plot(X[pos,0],X[pos,1],'ro') # red o
plt.plot(X[neg,0],X[neg,1],'bo') # blue o
plt.title(u"两个类别散点图",fontproperties=font)
plt.show()
print (X[0])
print (X)
print (X[0][0])
import numpy as np
a = np.array([1,2,3])
print (a)
import numpy as np
a = np.array([[1, 2], [3, 4]])
print (a)
print (a[0])
import numpy as np
a = np.array([[[1, 2], [3, 4]],[[5, 6], [7, 8]]])
print (a)
print (a[0])
# 首先创建结构化数据类型
import numpy as np
dt = np.dtype([('age',np.int8)])
print(dt)
| LinearRegression/Yunxi_steps.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import copy
import functools
import traceback
import imlib as im
import numpy as np
import pylib as py
import scipy
import tensorflow as tf
import tflib as tl
import tfprob
import tqdm
import data
import module
# ==============================================================================
# = param =
# ==============================================================================
py.arg('--img_dir', default='./data/img_celeba/aligned/align_size(572,572)_move(0.250,0.000)_face_factor(0.450)_jpg/data')
py.arg('--load_size', type=int, default=256)
py.arg('--crop_size', type=int, default=256)
py.arg('--n_channels', type=int, choices=[1, 3], default=3)
py.arg('--n_epochs', type=int, default=160)
py.arg('--epoch_start_decay', type=int, default=160)
py.arg('--batch_size', type=int, default=64)
py.arg('--learning_rate', type=float, default=1e-4)
py.arg('--beta_1', type=float, default=0.5)
py.arg('--moving_average_decay', type=float, default=0.999)
py.arg('--n_d', type=int, default=1) # # d updates per g update
py.arg('--adversarial_loss_mode', choices=['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan'], default='hinge_v1')
py.arg('--gradient_penalty_mode', choices=['none', '1-gp', '0-gp', 'lp'], default='0-gp')
py.arg('--gradient_penalty_sample_mode', choices=['line', 'real', 'fake', 'real+fake', 'dragan', 'dragan_fake'], default='real')
py.arg('--d_loss_weight_x_gan', type=float, default=1)
py.arg('--d_loss_weight_x_gp', type=float, default=10)
py.arg('--d_lazy_reg_period', type=int, default=3)
py.arg('--g_loss_weight_x_gan', type=float, default=1)
py.arg('--g_loss_weight_orth_loss', type=float, default=1) # if 0, use Gram–Schmidt orthogonalization (slower)
py.arg('--d_attribute_loss_weight', type=float, default=1.0)
py.arg('--g_attribute_loss_weight', type=float, default=10.0)
py.arg('--g_reconstruction_loss_weight', type=float, default=100.0)
py.arg('--weight_decay', type=float, default=0)
py.arg('--z_dims', type=int, nargs='+', default=[6] * 6)
py.arg('--eps_dim', type=int, default=512)
py.arg('--n_samples', type=int, default=100)
py.arg('--n_traversal', type=int, default=5)
py.arg('--n_left_axis_point', type=int, default=10)
py.arg('--truncation_threshold', type=int, default=1.5)
py.arg('--sample_period', type=int, default=1000)
py.arg('--traversal_period', type=int, default=2500)
py.arg('--checkpoint_save_period', type=int, default=10000)
py.arg('--experiment_name', default='default')
#args = py.args()
# +
args = py.args(["--experiment_name","Eigen128_0526_unet_recon100","--z_dims","7","7","7","7","7","--load_size","128","--crop_size","128"])
#args = py.args(["--experiment_name","Eigen128_0602_unet_recon100","--z_dims","7","7","7","7","7","--load_size","128","--crop_size","128"])
#args = py.args(["--experiment_name","Eigen256_0524_unet_l","--load_size","256","--crop_size","256","--batch_size","32"])
# +
import functools
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tflib as tl
from tqdm.auto import tqdm, trange
from pdb import set_trace
class DD(tl.Module):
def call(self,
x,
n_atts,
dim_10=4,
fc_dim=1024,
n_downsamplings=6,
weight_norm='none',
feature_norm='none',
act=tf.nn.leaky_relu,
training=True):
MAX_DIM = 512
nd = lambda size: min(int(2**(10 - np.log2(size)) * dim_10), MAX_DIM)
w_norm = tl.get_weight_norm(weight_norm, training)
conv = functools.partial(tl.conv2d, weights_initializer=tl.get_initializer(act), weights_normalizer_fn=w_norm, weights_regularizer=slim.l2_regularizer(1.0))
fc = functools.partial(tl.fc, weights_initializer=tl.get_initializer(act), weights_normalizer_fn=w_norm, weights_regularizer=slim.l2_regularizer(1.0))
f_norm = tl.get_feature_norm(feature_norm, training, updates_collections=None)
conv_norm_act = functools.partial(conv, normalizer_fn=f_norm, activation_fn=act)
h = x
h = act(conv(h, nd(h.shape[1].value), 7, 1))
for i in range(n_downsamplings):
# h = conv_norm_act(h, nd(h.shape[1].value // 2), 4, 2)
h = conv_norm_act(h, nd(h.shape[1].value), 3, 1)
h = conv_norm_act(h, nd(h.shape[1].value // 2), 3, 2)
h = conv_norm_act(h, nd(h.shape[1].value), 3, 1)
h = slim.flatten(h)
h = act(fc(h, min(fc_dim, MAX_DIM)))
logit_gan = fc(h, 1)
logit_att = fc(h, n_atts)
return logit_gan, logit_att
class UNetGenc(tl.Module):
def call(self,
x,
dim_10=4,
n_channels=3,
n_downsamplings=6,
weight_norm='none',
feature_norm='none',
act=tf.nn.leaky_relu,
training=True):
MAX_DIM = 512
nd = lambda size: min(int(2**(10 - np.log2(size)) * dim_10), MAX_DIM)
w_norm = tl.get_weight_norm(weight_norm, training)
conv = functools.partial(tl.conv2d, weights_initializer=tl.get_initializer(act), weights_normalizer_fn=w_norm, weights_regularizer=slim.l2_regularizer(1.0))
fc = functools.partial(tl.fc, weights_initializer=tl.get_initializer(act), weights_normalizer_fn=w_norm, weights_regularizer=slim.l2_regularizer(1.0))
f_norm = tl.get_feature_norm(feature_norm, training, updates_collections=None)
conv_norm_act = functools.partial(conv, normalizer_fn=f_norm, activation_fn=act)
hiddenLayer = []
h = x
h = act(conv(h, nd(h.shape[1].value), 7, 1))
for i in range(n_downsamplings):
# h = conv_norm_act(h, nd(h.shape[1].value // 2), 4, 2)
h = conv_norm_act(h, nd(h.shape[1].value), 3, 1)
hiddenLayer.append(h)
h = conv_norm_act(h, nd(h.shape[1].value // 2), 3, 2)
hiddenLayer.append(h)
return hiddenLayer
class UNetGdec(tl.Module):
def call(self,
zs,
eps,
dim_10=4,
n_channels=3,
weight_norm='none',
feature_norm='none',
act=tf.nn.leaky_relu,
use_gram_schmidt=True,
training=True,
shortcut_layers=1):
MAX_DIM = 512
nd = lambda size: min(int(2**(10 - np.log2(size)) * dim_10), MAX_DIM)
w_norm = tl.get_weight_norm(weight_norm, training)
transposed_w_norm = tl.get_weight_norm(weight_norm, training, transposed=True)
fc = functools.partial(tl.fc, weights_initializer=tl.get_initializer(act), weights_normalizer_fn=w_norm, weights_regularizer=slim.l2_regularizer(1.0))
conv = functools.partial(tl.conv2d, weights_initializer=tl.get_initializer(act), weights_normalizer_fn=w_norm, weights_regularizer=slim.l2_regularizer(1.0))
dconv = functools.partial(tl.dconv2d, weights_initializer=tl.get_initializer(act), weights_normalizer_fn=transposed_w_norm, weights_regularizer=slim.l2_regularizer(1.0))
f_norm = tl.get_feature_norm(feature_norm, training, updates_collections=None)
f_norm = (lambda x: x) if f_norm is None else f_norm
def orthogonal_regularizer(U):
with tf.name_scope('orthogonal_regularizer'):
U = tf.reshape(U, [-1, U.shape[-1]])
orth = tf.matmul(tf.transpose(U), U)
tf.add_to_collections(['orth'], orth)
return 0.5 * tf.reduce_sum((orth - tf.eye(U.shape[-1].value)) ** 2)
h=eps[-1]
for i, z in enumerate(zs):
height = width = 4 * 2 ** i
U = tf.get_variable('U_%d' % i,
shape=[height, width, nd(height), z.shape[-1]],
initializer=tf.initializers.orthogonal(),
regularizer=orthogonal_regularizer,
trainable=True)
if use_gram_schmidt:
U = tf.transpose(tf.reshape(U, [-1, U.shape[-1]]))
U = tl.gram_schmidt(U)
U = tf.reshape(tf.transpose(U), [height, width, nd(height), z.shape[-1]])
L = tf.get_variable('L_%d' % i,
shape=[z.shape[-1]],
initializer=tf.initializers.constant([3 * i for i in range(z.shape[-1], 0, -1)]),
trainable=True)
mu = tf.get_variable('mu_%d' % i,
shape=[height, width, nd(height)],
initializer=tf.initializers.zeros(),
trainable=True)
h_ = tf.reduce_sum(U[None, ...] * (L[None, :] * z)[:, None, None, None, :], axis=-1) + mu[None, ...]
h_1 = dconv(h_, nd(height), 1, 1)
if shortcut_layers > i:
h_2 = dconv(h_, nd(height * 2)*2, 3, 2)
else:
h_2 = dconv(h_, nd(height * 2), 3, 2)
#deconv1
h=act(f_norm(h + h_1))
#if shortcut_layers > i:
# h = tl.tile_concat([h, eps[-1 - 2*i]])
h = dconv(h, nd(height * 2), 3, 2)
if shortcut_layers > i:
h = tl.tile_concat([h, eps[-2 - 2*i]])
#deconv2
h=act(f_norm(h + h_2))
h = dconv(h, nd(height * 2), 3, 1)
x = tf.tanh(conv(act(h), n_channels, 7, 1))
return x
# +
import numpy as np
import pylib as py
import tensorflow as tf
import tflib as tl
def make_dataset(img_paths,
batch_size,
load_size=286,
crop_size=256,
n_channels=3,
training=True,
drop_remainder=True,
shuffle=True,
repeat=1):
if shuffle:
img_paths = np.random.permutation(img_paths)
if training:
def _map_fn(img):
if n_channels == 1:
img = tf.image.rgb_to_grayscale(img)
img = tf.image.resize(img, [load_size, load_size])
img = tf.image.random_flip_left_right(img)
img = tl.center_crop(img, size=crop_size)
# img = tf.image.random_crop(img, [crop_size, crop_size, n_channels])
img = tf.clip_by_value(img, 0, 255) / 127.5 - 1
return img
else:
def _map_fn(img):
if n_channels == 1:
img = tf.image.rgb_to_grayscale(img)
img = tf.image.resize(img, [load_size, load_size])
img = tl.center_crop(img, size=crop_size)
img = tf.clip_by_value(img, 0, 255) / 127.5 - 1
return img
dataset = tl.disk_image_batch_dataset(img_paths,
batch_size,
drop_remainder=drop_remainder,
map_fn=_map_fn,
shuffle=shuffle,
repeat=repeat)
if drop_remainder:
len_dataset = len(img_paths) // batch_size
else:
len_dataset = int(np.ceil(len(img_paths) / batch_size))
return dataset, len_dataset
# +
# check
assert np.log2(args.crop_size / 4) == len(args.z_dims)
# output_dir
output_dir = py.join('output', args.experiment_name)
py.mkdir(output_dir)
# save settings
py.args_to_yaml(py.join(output_dir, 'settings.yml'), args)
sess = tl.session()
# ==============================================================================
# = data =
# ==============================================================================
img_paths=sorted(py.glob(args.img_dir, '*'))
img_paths_train = img_paths[:int(len(img_paths)*0.95)]
img_paths_test = img_paths[int(len(img_paths)*0.95):]
train_dataset, len_train_dataset = make_dataset(img_paths_train, args.batch_size, load_size=args.load_size, crop_size=args.crop_size, n_channels=args.n_channels, repeat=None)
train_iter = train_dataset.make_one_shot_iterator()
val_dataset, len_val_dataset = make_dataset(img_paths_test, max(args.n_traversal, args.n_samples), load_size=args.load_size, crop_size=args.crop_size, n_channels=args.n_channels, shuffle=False,repeat=None,training=False)
val_iter = val_dataset.make_one_shot_iterator()
# ==============================================================================
# = model =
# ==============================================================================
#D = functools.partial(module.D(scope='D'), n_downsamplings=len(args.z_dims))
D = functools.partial(DD(scope='D'), n_atts=sum(args.z_dims), n_downsamplings=len(args.z_dims))
#G = functools.partial(module.G(scope='G'), n_channels=args.n_channels, use_gram_schmidt=args.g_loss_weight_orth_loss == 0)
Genc = functools.partial(UNetGenc(scope='Gdec'), n_channels=args.n_channels, n_downsamplings=len(args.z_dims))
Gdec = functools.partial(UNetGdec(scope='Genc'), n_channels=args.n_channels, use_gram_schmidt=args.g_loss_weight_orth_loss == 0)
G_test = functools.partial(UNetGdec(scope='G_test'), n_channels=args.n_channels, use_gram_schmidt=args.g_loss_weight_orth_loss == 0, training=False)
# exponential moving average
G_ema = tf.train.ExponentialMovingAverage(decay=args.moving_average_decay, name='G_ema')
# loss function
d_loss_fn, g_loss_fn = tfprob.get_adversarial_losses_fn(args.adversarial_loss_mode)
# ==============================================================================
# = graph =
# =============================================================================
def D_train_graph():
# ======================================
# = graph =
# ======================================
# placeholders & inputs
lr = tf.placeholder(dtype=tf.float32, shape=[])
x_r = train_iter.get_next()
zs = [tf.random.normal([args.batch_size, z_dim]) for z_dim in args.z_dims]
eps = tf.random.normal([args.batch_size, args.eps_dim])
# counter
step_cnt, _ = tl.counter()
# optimizer
optimizer = tf.train.AdamOptimizer(lr, beta1=args.beta_1)
def graph_per_gpu(x_r, zs, eps):
# generate
eps=Genc(x_r)
x_f=Gdec(zs,eps)
# discriminate
x_r_logit,_ = D(x_r)
x_f_logit,x_f_logit_att = D(x_f)
# loss
x_r_loss, x_f_loss = d_loss_fn(x_r_logit, x_f_logit)
x_gp = tf.cond(tf.equal(step_cnt % args.d_lazy_reg_period, 0),
lambda: tfprob.gradient_penalty(D, x_r, x_f, args.gradient_penalty_mode, args.gradient_penalty_sample_mode) * args.d_lazy_reg_period,
lambda: tf.constant(0.0))
if args.d_loss_weight_x_gp == 0:
x_gp = tf.constant(0.0)
reg_loss = tf.reduce_sum(D.func.reg_losses)
zs_flatten = tf.concat(zs,axis=1)
xb__loss_att=tf.losses.mean_squared_error(zs_flatten, x_f_logit_att)
loss = (
(x_r_loss + x_f_loss) * args.d_loss_weight_x_gan +
x_gp * args.d_loss_weight_x_gp +
reg_loss * args.weight_decay +
xb__loss_att * args.d_attribute_loss_weight
)
# optim
grads = optimizer.compute_gradients(loss, var_list=D.func.trainable_variables)
return grads, x_r_loss, x_f_loss, x_gp, reg_loss
split_grads, split_x_r_loss, split_x_f_loss, split_x_gp, split_reg_loss = zip(*tl.parellel_run(tl.gpus(), graph_per_gpu, tl.split_nest((x_r, zs, eps), len(tl.gpus()))))
# split_grads, split_x_r_loss, split_x_f_loss, split_x_gp, split_reg_loss = zip(*tl.parellel_run(['cpu:0'], graph_per_gpu, tl.split_nest((x_r, zs, eps), 1)))
grads = tl.average_gradients(split_grads)
x_r_loss, x_f_loss, x_gp, reg_loss = [tf.reduce_mean(t) for t in [split_x_r_loss, split_x_f_loss, split_x_gp, split_reg_loss]]
step = optimizer.apply_gradients(grads, global_step=step_cnt)
# summary
summary = tl.create_summary_statistic_v2(
{'x_gan_loss': x_r_loss + x_f_loss,
'x_gp': x_gp,
'reg_loss': reg_loss,
'lr': lr},
'./output/%s/summaries/D' % args.experiment_name,
step=step_cnt,
n_steps_per_record=10,
name='D'
)
# ======================================
# = run function =
# ======================================
def run(**pl_ipts):
for _ in range(args.n_d):
sess.run([step, summary], feed_dict={lr: pl_ipts['lr']})
return run
def G_train_graph():
# ======================================
# = graph =
# ======================================
# placeholders & inputs
lr = tf.placeholder(dtype=tf.float32, shape=[])
zs = [tf.random.normal([args.batch_size, z_dim]) for z_dim in args.z_dims]
eps = tf.random.normal([args.batch_size, args.eps_dim])
x_r = train_iter.get_next()
# counter
step_cnt, _ = tl.counter()
# optimizer
optimizer = tf.train.AdamOptimizer(lr, beta1=args.beta_1)
def graph_per_gpu(zs, eps):
# generate
_,zs_a = D(x_r)
zs_a=tf.split(zs_a, len(args.z_dims), axis=1)
eps=Genc(x_r)
x_f=Gdec(zs,eps)
x_a=Gdec(zs_a,eps)
# discriminate
x_f_logit,xb__logit_att = D(x_f)
# loss
x_f_loss = g_loss_fn(x_f_logit)
orth_loss = tf.reduce_sum(tl.tensors_filter(Gdec.func.reg_losses, 'orthogonal_regularizer'))
reg_loss_Gdec = tf.reduce_sum(tl.tensors_filter(Gdec.func.reg_losses, 'l2_regularizer'))
reg_loss_Genc = tf.reduce_sum(tl.tensors_filter(Genc.func.reg_losses, 'l2_regularizer'))
reg_loss=reg_loss_Gdec+reg_loss_Genc
zs_flatten = tf.concat(zs,axis=1)
xb__loss_att= xb__loss_att=tf.losses.mean_squared_error(zs_flatten, xb__logit_att)
xa__loss_rec = tf.losses.absolute_difference(x_r, x_a)
loss = (
x_f_loss * args.g_loss_weight_x_gan +
orth_loss * args.g_loss_weight_orth_loss +
reg_loss * args.weight_decay +
xb__loss_att * args.g_attribute_loss_weight +
xa__loss_rec * args.g_reconstruction_loss_weight
)
# optim
#grads = optimizer.compute_gradients(loss, var_list=G.func.trainable_variables)
grads = optimizer.compute_gradients(loss, var_list=Genc.func.trainable_variables+Gdec.func.trainable_variables)
return grads, x_f_loss, orth_loss, reg_loss
split_grads, split_x_f_loss, split_orth_loss, split_reg_loss = zip(*tl.parellel_run(tl.gpus(), graph_per_gpu, tl.split_nest((zs, eps), len(tl.gpus()))))
# split_grads, split_x_f_loss, split_orth_loss, split_reg_loss = zip(*tl.parellel_run(['cpu:0'], graph_per_gpu, tl.split_nest((zs, eps), 1)))
grads = tl.average_gradients(split_grads)
x_f_loss, orth_loss, reg_loss = [tf.reduce_mean(t) for t in [split_x_f_loss, split_orth_loss, split_reg_loss]]
step = optimizer.apply_gradients(grads, global_step=step_cnt)
# moving average
with tf.control_dependencies([step]):
step = G_ema.apply(Gdec.func.trainable_variables)
# summary
summary_dict = {'x_f_loss': x_f_loss,
'orth_loss': orth_loss,
'reg_loss': reg_loss}
summary_dict.update({'L_%d' % i: t for i, t in enumerate(tl.tensors_filter(Genc.func.trainable_variables+Gdec.func.trainable_variables, 'L'))})
summary_loss = tl.create_summary_statistic_v2(
summary_dict,
'./output/%s/summaries/G' % args.experiment_name,
step=step_cnt,
n_steps_per_record=10,
name='G_loss'
)
summary_image = tl.create_summary_image_v2(
{'orth_U_%d' % i: t[None, :, :, None] for i, t in enumerate(tf.get_collection('orth', Gdec.func.scope + '/'))},
'./output/%s/summaries/G' % args.experiment_name,
step=step_cnt,
n_steps_per_record=10,
name='G_image'
)
# ======================================
# = model size =
# ======================================
n_params, n_bytes = tl.count_parameters(Genc.func.trainable_variables+Gdec.func.trainable_variables)
print('Model Size: n_parameters = %d = %.2fMB' % (n_params, n_bytes / 1024 / 1024))
# ======================================
# = run function =
# ======================================
def run(**pl_ipts):
sess.run([step, summary_loss, summary_image], feed_dict={lr: pl_ipts['lr']})
return run
def sample_graph():
# ======================================
# = graph =
# ======================================
# placeholders & inputs
zs = [tl.truncated_normal([args.n_samples, z_dim], minval=-args.truncation_threshold, maxval=args.truncation_threshold) for z_dim in args.z_dims]
eps = tl.truncated_normal([args.n_samples, args.eps_dim], minval=-args.truncation_threshold, maxval=args.truncation_threshold)
xa = tf.placeholder(tf.float32, shape=[None, args.crop_size, args.crop_size, 3])
# generate
x_r = val_iter.get_next()
x_f = G_test(zs,Genc(xa, training=False), training=False)
# ======================================
# = run function =
# ======================================
save_dir = './output/%s/samples_training/sample' % (args.experiment_name)
py.mkdir(save_dir)
def run(epoch, iter):
xa_ipt = sess.run(x_r)
x_f_opt = sess.run(x_f, feed_dict={xa: xa_ipt[:args.n_samples]})
sample = im.immerge(x_f_opt, n_rows=int(args.n_samples ** 0.5))
im.imwrite(sample, '%s/Epoch-%d_Iter-%d.jpg' % (save_dir, epoch, iter))
return run
def traversal_graph():
# ======================================
# = graph =
# ======================================
# placeholders & inputs
zs = [tf.placeholder(dtype=tf.float32, shape=[args.n_traversal, z_dim]) for z_dim in args.z_dims]
eps = tf.placeholder(dtype=tf.float32, shape=[args.n_traversal, args.eps_dim])
x = tf.placeholder(tf.float32, shape=[None, args.crop_size, args.crop_size, 3])
# generate
x_r = val_iter.get_next()
_,x_r_zs=D(x, training=False)
x_r_zs=tf.split(x_r_zs, len(args.z_dims), axis=1)
x_f = G_test(zs,Genc(x, training=False), training=False)
# ======================================
# = run function =
# ======================================
save_dir = './output/%s/samples_training/traversal' % (args.experiment_name)
py.mkdir(save_dir)
def run(epoch, iter):
x_r_input = sess.run(x_r)
x_r_input=x_r_input[:args.n_traversal]
x_r_zs_input=sess.run(x_r_zs, feed_dict={x:x_r_input})
x_r_zs_input=np.array(x_r_zs_input)
feed_dict = {z: z_ipt for z, z_ipt in zip(zs, x_r_zs_input)}
feed_dict.update({x: x_r_input})
x_f_recon= sess.run(x_f,feed_dict=feed_dict)
zs_ipt_fixed=x_r_zs_input
#zs_ipt_fixed = [scipy.stats.truncnorm.rvs(-args.truncation_threshold, args.truncation_threshold, size=[args.n_traversal, z_dim]) for z_dim in args.z_dims]
#eps_ipt = scipy.stats.truncnorm.rvs(-args.truncation_threshold, args.truncation_threshold, size=[args.n_traversal, args.eps_dim])
# set the first sample as the "mode"
#for l in range(len(args.z_dims)):
# zs_ipt_fixed[l][0, ...] = 0.0
#eps_ipt[0, ...] = 0.0
L_opt = sess.run(tl.tensors_filter(G_test.func.variables, 'L'))
for l in range(len(args.z_dims)):
for j, i in enumerate(np.argsort(np.abs(L_opt[l]))[::-1]):
x_f_opts = [x_r_input,x_f_recon]
vals = np.linspace(-4.5, 4.5, args.n_left_axis_point * 2 + 1)
for v in vals:
zs_ipt = copy.deepcopy(zs_ipt_fixed)
zs_ipt[l][:, i] = v
feed_dict = {z: z_ipt for z, z_ipt in zip(zs, zs_ipt)}
feed_dict.update({x: x_r_input})
x_f_opt = sess.run(x_f, feed_dict=feed_dict)
x_f_opts.append(x_f_opt)
sample = im.immerge(np.concatenate(x_f_opts, axis=2), n_rows=args.n_traversal)
im.imwrite(sample, '%s/Epoch-%d_Iter-%d_Traversal-%d-%d-%.3f-%d.jpg' % (save_dir, epoch, iter, l, j, np.abs(L_opt[l][i]), i))
return run
def clone_graph():
# ======================================
# = graph =
# ======================================
clone_tr = G_test.func.clone_from_vars(tl.tensors_filter(tl.global_variables(), 'G_ema'), var_type='trainable')
clone_non = G_test.func.clone_from_module(Gdec.func, var_type='nontrainable')
# ======================================
# = run function =
# ======================================
def run(**pl_ipts):
sess.run([clone_tr, clone_non])
return run
d_train_step = D_train_graph()
g_train_step = G_train_graph()
sample = sample_graph()
traversal = traversal_graph()
clone = clone_graph()
# ==============================================================================
# = train =
# ==============================================================================
# init
checkpoint, step_cnt, update_cnt = tl.init(py.join(output_dir, 'checkpoints'), checkpoint_max_to_keep=1, session=sess)
# learning rate schedule
lr_fn = tl.LinearDecayLR(args.learning_rate, args.n_epochs, args.epoch_start_decay)
# +
# train
try:
for ep in trange(args.n_epochs, desc='Epoch Loop'):
# learning rate
lr_ipt = lr_fn(ep)
for it in trange(len_train_dataset // (args.n_d + 1), desc='Inner Epoch Loop'):
if it + ep * (len_train_dataset // (args.n_d + 1)) < sess.run(step_cnt):
continue
step = sess.run(update_cnt)
# train D
d_train_step(lr=lr_ipt)
# train G
g_train_step(lr=lr_ipt)
# save
if step % args.checkpoint_save_period == 0:
checkpoint.save(step, session=sess)
# sample
if step % args.sample_period == 0 :
clone()
sample(ep, it)
if step % args.traversal_period == 0 :
clone()
traversal(ep, it)
except Exception:
traceback.print_exc()
finally:
clone()
sample(ep, it)
traversal(ep, it)
checkpoint.save(step, session=sess)
sess.close()
# +
#display sample
from IPython.display import display
from PIL import Image
from imlib import dtype
def display_sample():
# ======================================
# = graph =
# ======================================
# placeholders & inputs
zs = [tl.truncated_normal([args.n_samples, z_dim], minval=-args.truncation_threshold, maxval=args.truncation_threshold) for z_dim in args.z_dims]
eps = tl.truncated_normal([args.n_samples, args.eps_dim], minval=-args.truncation_threshold, maxval=args.truncation_threshold)
xa = tf.placeholder(tf.float32, shape=[None, args.crop_size, args.crop_size, 3])
# generate
x_r = val_iter.get_next()
_,x_r_zs=D(xa, training=False)
x_r_zs=tf.split(x_r_zs, len(args.z_dims), axis=1)
x_f_rand = G_test(zs,Genc(xa, training=False), training=False)
x_f_recon = G_test(x_r_zs,Genc(xa, training=False), training=False)
# ======================================
# = run function =
# ======================================
def run():
xa_ipt = sess.run(x_r)[:args.n_samples]
x_f_opt_rand = sess.run(x_f_rand, feed_dict={xa: xa_ipt})
x_f_opt_recon = sess.run(x_f_recon, feed_dict={xa: xa_ipt})
img=Image.fromarray(dtype.im2uint(xa_ipt[0]))
display(img)
img=Image.fromarray(dtype.im2uint(x_f_opt_recon[0]))
display(img)
img=Image.fromarray(dtype.im2uint(x_f_opt_rand[0]))
display(img)
return run
display_sample_func=display_sample()
display_sample_func()
# -
| bicycleEigenGAN.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .fs
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: .NET (F#)
// language: F#
// name: .net-fsharp
// ---
// # The F# Notebook Programming Model
//
// F# Notebooks are written using the F# notebook programming model, a variation on the F# scripting programming model used for `.fsx` scripts and familiar to many F# users.
//
// This article documents the ways that this programming model differs from normal F# scripting.
//
// ### Standard open namespaces and referenced packages
//
// The following assemblies or packages are referenced by default:
//
// * `.NETStandard.Library`
// * `FSharp.Core`
// * `Microsoft.AspNetCore.Html.Abstractions`
// * `Microsoft.DotNet.Interactive`
// * `Microsoft.DotNet.Interactive.Formatting`
// * `Microsoft.DotNet.Interactive.FSharp`
//
// The following namespaces are opened by default:
//
// * `FSharp.Core`
// * `FSharp.Control`
// * `FSharp.Collections`
// * `System`
// * `System.IO`
// * `System.Text`
// * `Microsoft.DotNet.Interactive.FSharp.FSharpKernelHelpers`
//
// Note that the final four are extra namespaces available by default in the F# notebook programming model that are not available by default in the F# scripting model.
//
// ### Referencing packages
//
// See [Referencing packages](Importing-packages.ipynb) for how package references differ.
//
// ### Displaying outputs
//
// See [Displaying outputs](Displaying-output.ipynb).
//
// ### Formatting outputs
//
// See [Formatting outputs](Formatting-outputs.ipynb).
//
// ### The `fsi` object and formatting outputs
//
// Currently the `fsi` object is not available by default and neither `fsi.AddPrinter` nor `fsi.AddHtmlPrinter` are supported. See [Formatting outputs](Formatting-outputs.ipynb)
// for how to register plain text and HTML formatters.
//
//
//
//
| samples/notebooks/fsharp/Docs/Programming-model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # DescTC
#
# https://github.com/marianealves/DescTC
#
#
# ### Installation
# pip install DescTC==0.1.1
#
#
#
# ## To save you time! <(ºvº)>**
#
# The DescTC python package provides the distribution and valuable information about each variable of your dataset helping you to decide which data cleansing method should be used without having to type lots of commands one at a time.
#
#
#
# Methods provided:
#
# **DescTC.table( )**
#
# Offers you the following information of each quantitative/qualitative variable:
#
# - Type
# - Quantity of zero numbers
# - Quantity of NaN's
# - % of NaN's
# - Quantity of uniques values
# - Quantity of outliers
# - Min value / Lowest category
# - Mean
# - Median
# - Mode
# - Max value / Highest category
#
#
# **DescTC.chart( )**
#
# Condense large amounts of information of each variable into easy-to-understand formats
# that clearly and effectively communicate important points:
#
# - Plot the distribution of each variable
# - Box plot of each quantitative variables
# - Plot the correlation between quantitative variables
#
#
# **DescTC.printfullTable( )**
#
# - Useful to see the entire outcome independently on which environment you are executing the package.
#
#
#
#
# *Please be aware that your data must be coverted to a pandas DataFrame with column names.*
#
#
# *Use the help( ) function to display the documentation of the specified module.*
#
#
#
# See below the package outcome using a pandas DataFrames example.
# #### Installing required packages
# !pip install pandas
# !pip install numpy
# !pip install matplotlib
# !pip install seaborn
# #### Installing DescTC package
# !pip install DescTC==0.1.1
# #### Importing methods
from DescTC import *
# #### Importing data
# +
import pandas as pd
df = pd.read_csv("census.csv")
# -
# #### Creating new instance of DescTC( )
test = DescTC(df)
# #### Printing head/tail of the DataFrame
test.df
# #### Accessing method: DescTC.table( )
test.table()
# #### Other alternative for the table method:
# - The printfullTable method is useful to see the entire outcome independently on which environment you are executing the package.
#
# test.printfullTable( )
#
# #### Accessing method: DescTC.chart( )
test.chart()
#
#
# ##### Note:
# The object data type can actually contain multiple different types. For instance, the column
# could include integers, floats, and strings which collectively are labeled as an object.
# Therefore, you may not get the box plot plotted from an object dtype variable.
#
| DescTC_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib as mpl
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
from tensorflow import keras
print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
print(module.__name__, module.__version__)
# -
layer = tf.keras.layers.Dense(100)
layer = tf.keras.layers.Dense(100, input_shape=[None, 5])
layer(tf.zeros([10, 5]))
# layer.variables
layer.trainable_variables
# +
from sklearn.datasets import fetch_california_housing
housing = fetch_california_housing()
print(housing.DESCR)
print(housing.data.shape)
print(housing.target.shape)
# +
from sklearn.model_selection import train_test_split
x_train_all, x_test, y_train_all, y_test = train_test_split(
housing.data, housing.target, random_state = 7)
x_train, x_valid, y_train, y_valid = train_test_split(
x_train_all, y_train_all, random_state = 11)
print(x_train.shape, y_train.shape)
print(x_valid.shape, y_valid.shape)
print(x_test.shape, y_test.shape)
# +
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(x_train)
x_valid_scaled = scaler.transform(x_valid)
x_test_scaled = scaler.transform(x_test)
# -
customized_softplus = keras.layers.Lambda(lambda x : tf.nn.softplus(x))
print(customized_softplus([-10., -5., 0., 5., 10.]))
# +
def customized_mse(y_true, y_pred):
return tf.reduce_mean(tf.square(y_pred - y_true))
class CustomizedDenseLayer(keras.layers.Layer):
def __init__(self, units, activation=None, **kwargs):
self.units = units
self.activation = keras.layers.Activation(activation)
super(CustomizedDenseLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[1], self.units),
initializer='uniform',
trainable=True)
self.bias = self.add_weight(name='bias',
shape=(self.units,),
initializer='zeros',
trainable=True)
super(CustomizedDenseLayer, self).build(input_shape)
def call(self, x):
return self.activation(x @ self.kernel + self.bias)
model = keras.models.Sequential([
CustomizedDenseLayer(30, activation='relu',
input_shape=x_train.shape[1:]),
CustomizedDenseLayer(1),
customized_softplus
])
model.summary()
model.compile(loss=customized_mse, optimizer='sgd', metrics=['mean_squared_error'])
callbacks = [keras.callbacks.EarlyStopping(
patience=5, min_delta=1e-2)]
history = model.fit(x_train_scaled, y_train,
validation_data = (x_valid_scaled, y_valid),
epochs = 5,
callbacks = callbacks)
# -
def plot_learning_curves(history):
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 4)
plt.show()
plot_learning_curves(history)
model.evaluate(x_test_scaled, y_test)
| tf_keras_regression-with_basic_api.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CHEN 5595 Homework 8
# This HW focuses on linear basis function models and singular value decomposition. It consists only of coding exercises. I provide several definitions and hints throughout that will help you. If you are confused about anything, do not panic, send us a message on Piazza! We are here to help you learn.
#
# Please answer the numerical exercises using an ipython notebook **in [Google Colab](https://colab.research.google.com/)** (provide the link with your handwritten homework). **Please answer each coding problem in a different cell**.
#
# You can also view this ipython notebook in your browser through [binder](https://mybinder.org/v2/gh/smcantab/chen5595-fall2020/d18d22db7f1e11efec07c09883526be2c2dbe7b3?filepath=homework%2Fhw8-questions.ipynb) or through nbviewer [link](https://nbviewer.jupyter.org/github/smcantab/chen5595-fall2020/blob/master/homework/hw8-questions.ipynb).
# ## Linear Basis Function Models
# Consider the following Python functions that generates synthetic data of the form
#
# $$t = f(x) + \epsilon$$
#
# where
#
# $$f(x) = \sin(2 \pi x) + \sin(4 \pi x)$$
#
# and
#
# $$\epsilon \sim \mathcal{N}(\mu, \sigma)$$
# +
import numpy as np
def func(x):
fmax = np.sqrt((207 + 33**1.5)/128)
return (np.sin(2 * np.pi * x) + np.sin(4 * np.pi * x))/fmax
def create_toy_data(func, sample_size, std):
x = np.linspace(0, 1, sample_size)
t = func(x) + np.random.normal(scale=std, size=x.shape)
return x, t
# -
# Now consider the linear basis function model
#
# $$\begin{aligned}
# y(x, \mathbf{w}) &= w_0 + \sum_{j=1}^{M-1}w_j\phi_j(\mathbf{x}) \\
# &= \mathbf{w}^\top \mathbf{\phi}(\mathbf{x})
# \end{aligned}$$
#
# where we have defined the dummy basis function $\phi_0(\mathbf{x})=1$, $\mathbf{w} = (w_0, \dots, w_{M-1})^\top$ are the parameters, $\mathbf{x} = (x_0, \dots, x_{D-1})^\top$ is the D-dimensional input and $\mathbf{\phi}(\mathbf{x})=(\phi_0(\mathbf{x}), \dots, \phi_{M-1}(\mathbf{x}))^\top$ is the M-dimensional _feature vector_.
#
#
# Now consider a dataset of 1D inputs $\mathbf{X} = \{ x_1, x_2, \dots, x_N\}$ with corresponding target vectors $\mathbf{t} = \{ t_1, t_2, \dots, t_N\}$. The sum-of-squares error function for the linear regression problem is
#
# $$\begin{aligned}
# E(\mathbf{w}) &= \dfrac{1}{2} \sum_{n=1}^N \{t_n - y(x, \mathbf{w})\}^2 \\ &= \dfrac{1}{2} \sum_{n=1}^N \{ t_n- \mathbf{w}^\top \mathbf{\phi}(x_n)\}^2
# \end{aligned}$$
#
# Solving for $\mathbf{w}$ we obtain the Maximum Likelihood estimate of the parameters
#
# $$\mathbf{w}_{ML} = \Phi^\dagger \mathbf{t}$$
#
# where $\Phi^\dagger = (\Phi^\top\Phi)^{-1}\Phi^T$ is the Moore-Pensore pseudo-inverse of the _design matrix_ $\Phi$, defined as:
#
# $$\Phi = \begin{pmatrix}\phi_0(x_1) & \phi_1(x_1) & \dots & \phi_M(x_1) \\ \phi_0(x_2) & \phi_1(x_2) & \dots & \phi_M(x_2) \\ \vdots & \vdots & \ddots & \vdots \\ \phi_0(x_N) & \phi_1(x_N) & \dots & \phi_M(x_N) \end{pmatrix}$$
#
# The Maximum Likelihood estimate of the variance can be found by maximizing the log-likelihood with respect to the noise and yields
#
# $$\sigma_{ML}^2 = \frac{1}{N}\sum_{n=1}^N \{ t_n - \mathbf{w}_{ML}^\top \mathbf{\phi}(x_n)\}^2$$
# ### Problem 1
#
# Write two Python classes that can generate the feature vectors $\mathbf{\phi}(x)$ for Gaussian basis functions
#
# $$\phi_j(x) = \exp\left\{ -\frac{(x-\mu_j)^2}{2s^2}\right\}$$
#
# and sigmoidal basis functions
#
# $$\phi_j(x) = \sigma \left( \frac{x-\mu_j}{s}\right)$$
#
# where $\sigma(a) = 1/(1+ \exp(-a))$ is the logistic sigmoid function.
#
# Each class should take the form (e.g. for the Gaussian features)
#
# ```python
# class GaussianFeature(object):
# def __init__(self, mean, var):
# self.mean = mean
# self.var = var
#
# def _gauss(self, x, mean):
# #returns gaussian basis function evaluated at point x
#
# def get_feature_vec(self, x):
# basis = []
# for mean in self.mean:
# basis.append(self._gauss(x, mean))
# return np.array(basis)
# ```
#
# Then write a function to build the design matrix $\Phi$ for a dataset $\mathbf{X} = \{ x_1, x_2, \dots, x_N\}$, of the following form
#
# ```python
# def build_design_matrix(xdata, feature):
# # write the missing code here to generate design matrix W
# return W
# ```
#
# where `xdata` is the array of inputs $\mathbf{X} = \{ x_1, x_2, \dots, x_N\}$ and `feature` is an instance of `GaussianFeature` of `SigmoidFeature`.
#
# Finally write a class to perform linear regression of the form
#
# ```python
# class LinearRegression(object):
#
# def fit(self, W, t):
# # W is the design matrix for the training data and t is the corresponding target variables
# # compute the weights self.w and the variance self.var
#
# def predict(self, W):
# # W is the design matrix for the test data
# # returns the prediction y(x, self.w) and the variance y_std = self.var
# ```
#
# Note that you can compute the pseudoinverse using the function [`numpy.linalg.pinv`](https://numpy.org/doc/stable/reference/generated/numpy.linalg.pinv.html).
#
# Armed with these functions we generate a training set and test set as follows
np.random.seed(42) # fix the random seed
x_train, y_train = create_toy_data(func, 30, 0.33)
x_test = np.linspace(0, 1, 100)
y_test = func(x_test)
# Also define the means of the basis of the feature vectors
means = np.linspace(0, 1, 8)
# **Q1.** Using a plotting library of your choosing (e.g. matplotlib) make a graph showing
#
# - the function $f(x)$ i.e. `(x_test, y_test)`, shown as a solid line
# - the training data `(x_train, y_train)`, shown as circles
# - the line of best fit `(x_test, y_fit)`, shown as a solid line, where `y_fit` has been obtained by linear regression using the Gaussian basis functions with means given by `means` and variance $s^2=1$
# - plot the confidence intervals around the line of best fit `y` using the function [`plt.fill_between`](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.fill_between.html) to shade the region between `y-y_std` and `y+ystd` using `color='orange'`.
#
# Add a legend to the plot.
#
# **Q2.** Then do the same (in a separate graph) for the sigmoidal basis function with with means given by `means` and $s=1$.
#
# **Q3.** Finally show the effect of varying `s` for the two choices of basis functions by changing `s` between $10^{-3}$ and $10^4$. Explain what you observe.
# ## Singular Value Decomposition
# The computation of the pseudoinverse required for the solution of the linear basis function models, discussed above, can be achieved through the use of _Singular Value Decomposition_ (SVD), which is a unique matric factorization/decomposition that exists for every complex valued matrix. If $\mathbf{X} \in \mathbb{C}^{N \times M}$, then
#
# $$\mathbf{X} = \mathbf{U} \mathbf{\Sigma} \mathbf{V}^*$$
#
# where $\mathbf{U} \in \mathbb{C}^{N \times N}$ and $\mathbf{V} \in \mathbb{C}^{M \times M}$ are _unitary matrices_ with orthonormal columns, and $\mathbf{\Sigma} \in \mathbb{R}^{N \times M}$ is a diagonal matrix with real, nonnegative entries. The $^*$ indicates the complex conjugate transpose, which for $\mathbf{U}$ and $\mathbf{V}$ real amounts to the transpose. Then if we write the generic linear system
#
# $$\mathbf{X} \mathbf{w} = \mathbf{t}$$
#
# we can solve for $\mathbf{w}$ in terms of the SVD as
#
# $$\begin{aligned}
# \mathbf{w} &= \mathbf{V} \mathbf{\Sigma}^{-1} \mathbf{U}^* \mathbf{t}\\
# &= \mathbf{X}^\dagger \mathbf{t}
# \end{aligned}$$
#
# where $\mathbf{X}^\dagger = \mathbf{V} \mathbf{\Sigma}^{-1} \mathbf{U}^*$ is the Moore-Penrose (left) pseudoinverse.
#
# Te most defining property of the SVD is that is provides an _optimal_ low-rank approximation to a matrix $\mathbf{X}$. In fact, since a rank-$r$ approximation is obtained by keeping the $r$ singular values and discarding the rest (i.e. by keeping the first $r$ leading columns of $\mathbf{U}$, $r$ leading diagonal elements of $\mathbf{\Sigma}$ and $r$ leading rows of $\mathbf{V}^*$). This result can be phrased more rigorously in terms of the Eckart-Young theorem (though it was first discovered by Schmidt), stating that (from Brunton and Kutz's DDSE)
#
# **Eckart-Young theorem**: _The optimal rank-r approximation to $\mathbf{X}$, in a least-squares sense, is given by the rank-r SVD truncation $\tilde{\mathbf{X}}$:_
#
# $$ \mathop{\mathrm{argmin}}_{\tilde{\mathbf{X}}, \text{ s.t. } \mathrm{rank}(\mathbf{X})=r} ||\mathbf{X} - \tilde{\mathbf{X}}||_F = \tilde{\mathbf{U}} \tilde{\mathbf{\Sigma}} \tilde{\mathbf{V}}^*$$
#
# _where $\tilde{\mathbf{U}}$ and $\tilde{\mathbf{V}}$ denote the first $r$ leading columns of $\mathbf{U}$ and $\mathbf{V}$; $\tilde{\mathbf{\Sigma}}$ contains the leading $r \times r$ sub-block $\mathbf{\Sigma}$, and $||\cdot||_F$ is the Frobenius norm._
#
# Because $\mathbf{\Sigma}$ is diagonal, the rank-$r$ approximation can also be expressed in terms of the dyadic sum
#
# $$\tilde{\mathbf{X}} = \sum_{i=1}^r \sigma_i \mathbf{u}_i \mathbf{v}_i = \sigma_1 \mathbf{u}_1 \mathbf{v}_1^* + \dots + \sigma_r \mathbf{u}_r \mathbf{v}_r^*$$
#
# where $\sigma_i$ denotes the i-th element of the diagonal of $\mathbf{\Sigma}$ and $\mathbf{u}_i$ and $\mathbf{v}_i$ are the columns of $\mathbf{U}$ and $\mathbf{V}$.
#
# ### Problem 2
#
# Use the function [`np.linalg.svd`](https://numpy.org/doc/stable/reference/generated/numpy.linalg.svd.html) to compute the SVD of a real random matrix of size $5 \times 5$ (use [`np.random.rand`](https://numpy.org/doc/stable/reference/random/generated/numpy.random.rand.html) to generate the random matrix) and verify that $\mathbf{X}^\dagger \mathbf{X} = \mathbf{I}$. Then run the code below to load the matrix in figure [`Samantha_Cristoforetti_official_portrait_in_an_EMU_spacesuit.txt`](https://github.com/smcantab/chen5595-fall2020/blob/master/homework/Samantha_Cristoforetti_official_portrait_in_an_EMU_spacesuit.txt) and plot it.
import numpy as np
import matplotlib.pyplot as plt
X = np.loadtxt('Samantha_Cristoforetti_official_portrait_in_an_EMU_spacesuit.txt')
plt.imshow(X, cmap='gist_gray')
plt.axis('off')
# This is a portrait of Samantha Cristoforetti (from [Wikipedia](https://en.wikipedia.org/wiki/Samantha_Cristoforetti)) _"an Italian European Space Agency astronaut, former Italian Air Force pilot and engineer. She holds the record for the longest uninterrupted spaceflight by a European astronaut (199 days, 16 hours), and until June 2017 held the record for the longest single space flight by a woman until this was broken by <NAME> and later by <NAME>. She is also the first Italian woman in space."_
#
# This figure has $1000 \times 800$ pixels. A good way of illustrating the idea of matrix approximation is image compression. The grayscale image $\mathbf{X}$ above can be thought of as a matrix of size $1000 \times 800$ of integers between 0-255. We compress the image by producing the approximate matrix $\tilde{\mathbf{X}}$ for various choices of the truncation value $r$. Use the function below to produce the rank-$r$ approximation of $\mathbf{X}$
def rankr_approximation(X, r):
u, s, vh = np.linalg.svd(X, full_matrices=False)
Xr = u[:, :r] @ np.diag(s)[:r, :r] @ vh[:r, :]
return np.round(Xr).astype('uint8')
# **Q1.** Define the amount of storage that would be required for a rank-r approximation vs. the original matrix, assuming that each pixel is an 8 bits integer.
#
# **Q2.** Plot the approximate matrix $\tilde{\mathbf{X}}$ for various choice of $r \in [1, 800]$ and comment on what you observe and the relative savings in data storage. For each $r$ also plot $|\mathbf{X} - \tilde{\mathbf{X}}|$ and comment on what you observe.
| homework/hw8-questions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # What is Abstraction in OOP
# <ul><li>Abstraction is the concept of object-oriented programming that “shows” only essential attributes and “hides” unnecessary information.</li><li>The main purpose of abstraction is hiding the unnecessary details from the users. </li><li> Abstraction is selecting data from a larger pool to show only relevant details of the object to the user. </li><li> It helps in reducing programming complexity and efforts. </li><li>It is one of the most important concepts of OOPs.</li></ul>
# # Abstraction in Python
# <ul><li>Abstraction in python is defined as hiding the implementation of logic from the client and using the particular application. </li><li>It hides the irrelevant data specified in the project, reducing complexity and giving value to the efficiency.</li><li> Abstraction is made in Python using <b>Abstract classes</b> and their methods in the code.</li></ul>
# ## What is an Abstract Class?
# <ul><li>Abstract Class is a type of class in OOPs, that declare one or more abstract methods. </li><li>These classes can have abstract methods as well as concrete methods. </li><li>A normal class cannot have abstract methods.</li><li>An abstract class is a class that contains at least one abstract method.</li></ul>
# ## What are Abstract Methods?
# <ul><li>Abstract Method is a method that has just the method definition but does not contain implementation.</li><li>A method without a body is known as an Abstract Method.</li><li>It must be declared in an abstract class.</li><li>The abstract method will never be final because the abstract class must implement all the abstract methods.</li></ul>
# ## When to use Abstract Methods & Abstract Class?
# <ul><li>Abstract methods are mostly declared where two or more subclasses are also doing the same thing in different ways through different implementations.</li><li>It also extends the same Abstract class and offers different implementations of the abstract methods.</li><li>Abstract classes help to describe generic types of behaviors and object-oriented programming class hierarchy. </li><li>It also describes subclasses to offer implementation details of the abstract class.</li></ul>
# ## Difference between Abstraction and Encapsulation
# <table style="background-color:#ffe6e6">
# <tr><th><b>Abstraction</b></th><th><b>Encapsulation</b></th></tr>
# <tr><td>Abstraction in Object Oriented Programming solves the issues at the design level.</td><td>Encapsulation solves it implementation level.</td></tr>
# <tr><td>Abstraction in Programming is about hiding unwanted details while showing most essential information.</td><td>Encapsulation means binding the code and data into a single unit.</td></tr>
# <tr><td>Data Abstraction in Java allows focussing on what the information object must contain</td><td>Encapsulation means hiding the internal details or mechanics of how an object does something for security reasons.</td></tr>
# </table>
# ## Advantages of Abstraction
# <ol><li>The main benefit of using an Abstraction in Programming is that it allows you to group several related classes as siblings.</li><li>
# Abstraction in Object Oriented Programming helps to reduce the complexity of the design and implementation process of software.</li></ol>
# ## How Abstract Base classes work :
# <ul><li>By default, Python does not provide abstract classes. Python comes with a module that provides the base for defining Abstract Base classes(ABC) and that module name is ABC. </li><li>ABC works by decorating methods of the base class as abstract and then registering concrete classes as implementations of the abstract base. </li><li>A method becomes abstract when decorated with the keyword @abstractmethod.</li></ul>
# #### Syntax
#
# Abstract class Syntax is declared as:
# +
from abc import ABC
# declaration
class classname(ABC):
pass
# -
# Abstract method Syntax is declared as
def abstractmethod_name():
return
# ### Few things to be noted in Python:
#
# <ul><li>In python, an abstract class can hold both an abstract method and a normal method.</li><li>
# The second point is an abstract class is not initiated (no objects are created).</li><li>
# The derived class implementation methods are defined in abstract base classes.</li></ul>
# +
from abc import ABC
# FIXED
# here abc and ABC are case-sensitive. When we swap it creates
# -
# ### Code I:
# +
from abc import ABC, abstractmethod
# Abstract Class
class product(ABC):
# Normal Method
def item_list(self, rate):
print("amount submitted : ",rate)
# Abstract Method
@abstractmethod
def product(self,rate):
return
# -
# ### Code II:
# A program to generate the volume of geometric shapes
# +
from abc import ABC
class geometric(ABC):
def volume(self):
#abstract method
pass
class Rect(geometric):
length = 4
width = 6
height = 6
def volume(self):
return self.length * self.width *self.height
class Sphere(geometric):
radius = 8
def volume(self):
return 1.3 * 3.14 * self.radius * self.radius *self.radius
class Cube(geometric):
Edge = 5
def volume(self):
return self.Edge * self.Edge *self.Edge
class Triangle_3D:
length = 5
width = 4
def volume(self):
return 0.5 * self.length * self.width
rr = Rect()
ss = Sphere()
cc = Cube()
tt = Triangle_3D()
print("Volume of a rectangle:", rr.volume())
print("Volume of a circle:", ss.volume())
print("Volume of a square:", cc.volume())
print("Volume of a triangle:", tt.volume())
# -
# ### Code III
# A program to generate different invoices
# +
from abc import ABC, abstractmethod
class Bill(ABC):
def final_bill(self, pay):
print('Purchase of the product: ', pay)
@abstractmethod
def Invoice(self, pay):
pass
class Paycheque(Bill):
def Invoice(self, pay):
print('paycheque of: ', pay)
class CardPayment(Bill):
def Invoice(self, pay):
print('pay through card of: ', pay)
aa = Paycheque()
aa.Invoice(6500)
aa.final_bill(6500)
print(isinstance(aa.Invoice,Paycheque))
aa = CardPayment()
aa.Invoice(2600)
aa.final_bill(2600)
print(isinstance(aa.Invoice,CardPayment))
# -
# ### Code IV:
# Python program showing abstract base class work
# +
from abc import ABC, abstractmethod
class Animal(ABC):
@abstractmethod
def move(self):
pass
class Human(Animal):
def move(self):
print("I can walk and run")
class Snake(Animal):
def move(self):
print("I can crawl")
class Dog(Animal):
def move(self):
print("I can bark")
class Lion(Animal):
def move(self):
print("I can roar")
# Object Instantiation
r = Human()
r.move()
K = Snake()
K.move()
R = Dog()
R.move()
K = Lion()
K.move()
# -
# ### Concrete Methods in Abstract Base Classes :
# <ul><li>Concrete (normal) classes contain only concrete (normal) methods whereas abstract classes may contain both concrete methods and abstract methods.</li><li> The concrete class provides an implementation of abstract methods, the abstract base class can also provide an implementation by invoking the methods via super().</li></ul>
# ### Code V:
# Python program invoking a method using super()
# +
from abc import ABC, abstractmethod
class R(ABC):
def rk(self):
print("Abstract Base Class")
class K(R):
def rk(self):
super().rk()
print("subclass")
# Object instantiation
r = K()
r.rk()
# -
# ### Code VI:
# +
from abc import ABC, abstractmethod
class Bank(ABC):
def branch(self, Naira):
print("Fees submitted : ",Naira)
@abstractmethod
def Bank(Naira):
pass
class private(Bank):
def Bank(Naira):
print("Total Naira Value here: ",Naira)
class public(Bank):
def Bank(Naira):
print("Total Naira Value here:",Naira)
private.Bank(5000)
public.Bank(2000)
a = public()
#a.branch(3500)
# -
# ## Class Project I
# Develop a python OOP program that creates an abstract base class called coup_de_ecriva. The base class will have one abstract method called <b>Fan_Page</b> and four subclassses namely; <b>FC_Cirok, Madiba_FC, Blue_Jay_FC and TSG_Walker</b>. The program will receive as input the name of the club the user supports and instantiate an object that will invoke the <b>Fan_Page</b> method in the subclass that prints Welcome to <b>"club name"</b>.
#
# <p><b>Hint:</b></p>
# The subclasses will use <b>Single Inheritance</b> to inherit the abstract base class.
#
# ## Class Project II
# The Service Unit of PAU has contacted you to develop a program to manage some of the External Food Vendors. With your knowledge in python OOP develop a program to manage the PAU External Food Vendors. The program receives as input the vendor of interest and display the menu of the interested vendor. The External vendors are Faith hostel, Cooperative Hostel, and Student Center. Find below the menus:
#
# <table><tr><td>
# <table style="background-color:#47b5ff">
# <tr><th colspan='2'>Cooperative Cafeteria</th></tr>
# <tr><th>Main Meal</th><th>Price (N)</th></tr>
# <tr><td>Jollof Rice and Stew</td><td>200</td></tr>
# <tr><td>White Rice and Stew</td><td>200</td></tr>
# <tr><td>Fried Rice</td><td>200</td></tr>
# <tr><td>Salad</td><td>100</td></tr>
# <tr><td>Platain</td><td>100</td></tr>
# </table>
# </td><td>
# <table style="background-color:pink">
# <tr><th colspan='2'>Faith Hostel Cafeteria</th></tr>
# <tr><th>Main Meal</th><th>Price (N)</th></tr>
# <tr><td>Fried Rice</td><td>400</td></tr>
# <tr><td>White Rice and Stew</td><td>400</td></tr>
# <tr><td>Jollof Rice</td><td>400</td></tr>
# <tr><td>Beans</td><td>200</td></tr>
# <tr><td>Chicken</td><td>1000</td></tr>
# </table>
# </td><td>
# <table style="background-color:#fcf96c">
# <tr><th colspan='2'>Student Centre Cafeteria</th></tr>
# <tr><th>Main Meal</th><th>Price (N)</th></tr>
# <tr><td>Chicken Fried Rice</td><td>800</td></tr>
# <tr><td>Pomo Sauce</td><td>300</td></tr>
# <tr><td>Spaghetti Jollof</td><td>500</td></tr>
# <tr><td>Amala/Ewedu</td><td>500</td></tr>
# <tr><td>Semo with Eforiro Soup</td><td>500</td></tr>
# </table>
# </td></tr>
# <table>
#
# <p><b>Hints:</b></p>
# <ul><li>The abstract base class is called <b>External_Vendors()</b>.</li><li>
# The abstract method is called <b>menu()</b>.</li><li>
# The subclasses (the different vendors) will inherit the abstract base class.</li><li>
# Each subclass will have a normal method called <b>menu()</b>.</li></ul>
#
#
| Week_12/.ipynb_checkpoints/Week 12 - Abstraction Practice-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MNIST Data Set - Basic Approach
# ### Get the MNIST Data
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)
# ** Alternative sources of the data just in case: **
#
# * http://yann.lecun.com/exdb/mnist/
# * https://github.com/mrgloom/MNIST-dataset-in-different-formats
type(mnist)
mnist.train.images
mnist.train.num_examples
mnist.test.num_examples
mnist.validation.num_examples
# ### Visualizing the Data
import matplotlib.pyplot as plt
# %matplotlib inline
mnist.train.images[1].shape
plt.imshow(mnist.train.images[1].reshape(28,28))
plt.imshow(mnist.train.images[1].reshape(28,28),cmap='gist_gray')
mnist.train.images[1].max()
plt.imshow(mnist.train.images[1].reshape(784,1))
plt.imshow(mnist.train.images[1].reshape(784,1),cmap='gist_gray',aspect=0.02)
# ## Create the Model
x = tf.placeholder(tf.float32,shape=[None,784])
# 10 because 0-9 possible numbers
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
# Create the Graph
y = tf.matmul(x,W) + b
# Loss and Optimizer
y_true = tf.placeholder(tf.float32,[None,10])
# +
# Cross Entropy
# -
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.5)
train = optimizer.minimize(cross_entropy)
# ### Create Session
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# Train the model for 1000 steps on the training set
# Using built in batch feeder from mnist for convenience
for step in range(1000):
batch_x , batch_y = mnist.train.next_batch(100)
sess.run(train,feed_dict={x:batch_x,y_true:batch_y})
# Test the Train Model
matches = tf.equal(tf.argmax(y,1),tf.argmax(y_true,1))
acc = tf.reduce_mean(tf.cast(matches,tf.float32))
print(sess.run(acc,feed_dict={x:mnist.test.images,y_true:mnist.test.labels}))
# While this may seem pretty good, we can actually do much better, the best models can get above 99% accuracy.
#
# How do they do this? By using other models, such as convolutional neural networks!
| Week_04/00-MNIST-Data-Basic-Approach.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # SSD
#
# This is to go through each important step of SSD.
# Firstly, load the model. You only need to do this one time.
# +
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rcParams['figure.figsize'] = (10, 10)
plt.rcParams['image.interpolation'] = 'nearest'
import numpy as np
import os
os.chdir('..')
caffe_root = './'
import sys
sys.path.insert(0, caffe_root + 'python')
import time
import caffe
from caffe.proto import caffe_pb2
caffe.set_device(0)
caffe.set_mode_gpu()
# caffe.set_mode_cpu()
# We create a solver that fine-tunes from a previously trained network.
solver = caffe.SGDSolver(caffe_root + 'models/VGGNet/VOC0712/SSD_300x300/solver.prototxt')
solver.net.copy_from(caffe_root + 'models/VGGNet/VGG_ILSVRC_16_layers_fc_reduced.caffemodel')
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({'data': solver.net.blobs['data'].data.shape})
transformer.set_transpose('data', (2, 0, 1))
transformer.set_mean('data', np.array([104,117,123])) # mean pixel
transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]
transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB
net = solver.net
# +
from google.protobuf import text_format
from caffe.proto import caffe_pb2
# load PASCAL VOC labels
labelmap_file = 'data/VOC0712/labelmap_voc.prototxt'
file = open(labelmap_file, 'r')
labelmap = caffe_pb2.LabelMap()
text_format.Merge(str(file.read()), labelmap)
def get_labelname(labelmap, labels):
num_labels = len(labelmap.item)
labelnames = []
if type(labels) is not list:
labels = [labels]
for label in labels:
found = False
for i in xrange(0, num_labels):
if label == labelmap.item[i].label:
found = True
labelnames.append(labelmap.item[i].display_name)
break
assert found == True
return labelnames
# -
# Forward one step.
solver.step(1)
# Now let's see the annotated datum after one forward-backward step.
# +
colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
img_blob = net.blobs['data'].data
num_imgs = img_blob.shape[0]
img_width = img_blob.shape[2]
img_height = img_blob.shape[3]
label_blob = net.blobs['label'].data[0,0,:,:]
num_labels = label_blob.shape[0]
for i in xrange(num_imgs):
img = transformer.deprocess('data', img_blob[i])
plt.subplot(1, num_imgs, i + 1)
plt.imshow(img)
currentAxis = plt.gca()
for j in xrange(num_labels):
gt_bbox = label_blob[j, :]
if gt_bbox[0] == i:
xmin = gt_bbox[3] * img_width
ymin = gt_bbox[4] * img_height
xmax = gt_bbox[5] * img_width
ymax = gt_bbox[6] * img_height
gt_label = int(gt_bbox[1])
coords = (xmin, ymin), xmax - xmin + 1, ymax - ymin + 1
color = colors[gt_label]
currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
label = get_labelname(labelmap, gt_bbox[1])[0]
currentAxis.text(xmin, ymin, label, bbox={'facecolor':color, 'alpha':0.5})
# -
# take an array of shape (n, height, width) or (n, height, width, channels)
# and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)
def vis_square(data, padsize=1, padval=0):
data -= data.min()
data /= data.max()
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
plt.imshow(data)
# Let's visualize the conv1_1's filters to make sure we have loaded a good pretrained model. Otherwise, it should plot random noise squares.
# the parameters are a list of [weights, biases]
filters = net.params['conv1_1'][0].data
vis_square(filters.transpose(0, 2, 3, 1))
# And let's visualize conv5_3 layer responses. You should see nicely pattern.
feat = net.blobs['conv5_3'].data[0, :]
vis_square(feat, padval=1)
# Make sure that the PermuteLayer is doing the right thing.
# +
fc7_mbox_loc = net.blobs['fc7_mbox_loc'].data
print fc7_mbox_loc[0,:,1,2]
fc7_mbox_loc_perm = net.blobs['fc7_mbox_loc_perm'].data
print fc7_mbox_loc_perm[0,1,2,:]
# -
# Make sure the PriorBoxLayer generates the correct priors.
# +
img_blob = net.blobs['data'].data
num_imgs = img_blob.shape[0]
img_width = img_blob.shape[2]
img_height = img_blob.shape[3]
priorbox = net.blobs['mbox_priorbox'].data[0,0,:]
num_priors = priorbox.shape[0]
colors='rgbcmy'
for i in xrange(num_imgs):
img = transformer.deprocess('data', img_blob[i])
plt.subplot(1, num_imgs, i + 1)
plt.imshow(img)
currentAxis = plt.gca()
for j in xrange(240,243):
prior_bbox = priorbox[j*4:(j+1)*4]
xmin = prior_bbox[0] * img_width
ymin = prior_bbox[1] * img_height
xmax = prior_bbox[2] * img_width
ymax = prior_bbox[3] * img_height
coords = (xmin, ymin), xmax - xmin + 1, ymax - ymin + 1
currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=colors[j%4], linewidth=2))
| examples/ssd.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Count
#
# +
def count(start, end, step):
i = start
while i < end:
yield i
i += step
counter = list(count(5, 10, 0.5))
# -
for num in counter :
print(num, end=" ")
# # Reverse String
def rev_str(my_str):
for char in range(len(my_str)-1, -1, -1):
yield my_str[char]
char -= 1
list(rev_str("Hello World"))
# # fib series
def fibgen(n):
start, end = 0, 1
for i in range(n):
yield start
start, end = end, start + end
list(fibgen(10))
def reve(n):
num = 0
while(n>0):
num = num *10 + n%10
n = n // 10
return num
def rev_nums(args):
for i in args:
i = reve(i)
yield i
list(rev_nums([101, 201, 301, 401]))
iter = rev_nums([101, 201, 301, 401])
print(next(iter))
print(next(iter))
print(next(iter))
print(next(iter))
| 21. Generators/02. generator examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Cleaning FERC 1 Fuel data
# This notebook is meant to help get you understand the data cleaning process for FERC Form 1 fuel data for heat content and price per MMBTU data.
import sys
import os
sys.path.append(os.path.abspath(os.path.join('..','..')))
from pudl import pudl, ferc1, eia923, settings, constants
from pudl import models, models_ferc1, models_eia923
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
pudl_engine = pudl.connect_db()
# ## Define a function to clean data frames: What kind of errors are we fixing? Why is this kind of function a reasonable thing to do?
# Looking at some of the distributions of the data below, it becomes clear immediately that there are the data was originally entered in a variety of ways.
#
# Data has been entered in the different units, for example coal heat content values are entered in mmbtu per pound for some records while for most records it is entered in mmbtu per ton. Similarly we see data entered in different orders of magnitude, like gas heat content values entered in mmbtu per cubic foot rather than in mmbtu per thousand cubic foot.
#
# Since these data entry differences are separated by two or more orders of magnitude, we can often clearly delineate between populations. We can identify the primary distribution of values by looking at alternative sources, like the EIA, for the same data set and looking at physical properties of the fuels. For example, the average heat content of a ton of U.S. coal is roughly 20 mmbtu. We can identify the end points of the primary distrubtion in other data sources and apply that range of values to the FERC data to identify the primary distribtuion in the FERC data.
#
# Since data outside of the primary distribution is in most cases easily distinguishable and off by a an order of magnitude (e.g. 2000 for heat content per pound rather than per ton) we can with reasonable confidence use a small set of multipliers to bring outlying populations of data into the primary distribtuion, correcting for errors in how the data was entered.
#
# First let's pull in the applicable data from the PUDL FERC tables:
fuel_df = pd.read_sql('''SELECT * from fuel_ferc1''',pudl_engine)
coal = fuel_df[fuel_df.fuel=='coal']
gas = fuel_df[fuel_df.fuel=='gas']
oil = fuel_df[fuel_df.fuel=='oil']
# Then, we'll define a function that takes as arguments:
#
# - a data series to be cleaned
# - minimum and maximum values bounding the reasonable range of values for the series
# - multipliers (a list) to the minimum and maximum values are divided to define additional ranges of data, which are brought into the reasonable range by multiplying the values in the additional ranges by the multipliers (e.g. if a set of values is 1000 times too large to fit in the reasonable range, its multiplier will be .001).
#
# The function will return a cleaned series.
def fixit(tofix, min, max, mults):
fixed = tofix.copy()
fixed = fixed[fixed > 0]
for mult in mults:
fixed = fixed.apply(lambda x: x*mult if x > min/mult and x < max/mult else x)
fixed = fixed.apply(lambda x: np.nan if x < min or x > max else x)
return(fixed)
# We'll next define a function to show us graphs of what the data looks like before and after it is cleaned, and to show us how much data the cleaning process was not able to clean. This function will take 5 arguments:
#
# - a series of data to be cleaned
# - a series of data which has been cleaned by the cleaning function
# - the title (string) to apply to the before and after graphs
# - the x label (string) to apply to the before and after graphs
# - the y label (string) to apply to the before and after graphs
#
# The function will returned the before and after graphs and the percentage of how many values were not cleaned and were instead set to NaN.
#
# Our showfix function first displays unaltered populations on a log-log scale for two reasons. These populations are separated by orders of magnitude and it would be difficult to show them on the same graph otherwise. These populations are also often vastly different sizes: when one population is has thousands of occurences and the other tens, one is not able to see both without a logarthmic scale.
#
# The showfix function then displays the data once multipliers have been applied to the populations. This population necessarily lies in the same order of magnitude so a linear x axis scale makes sense for view. It's more intuitive for most to view the frequency of occurences on a linear scale as well so a linear y axis is used as well.
def showfix(tofix,fixed,title,xlabel,ylabel):
min_1 = tofix[tofix > 0].min()
max_1 = tofix.max()
fraction_lost = ( fixed.isnull().sum() / len(fixed) ) * 100
show_fraction = 'The percentage of values set to NaN is {} %'\
.format(round(fraction_lost,2))
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 14,
}
fig_1 = plt.figure(figsize=(16,12))
plt.subplot(2,1,1)
plt.title(title)
plt.xlabel(xlabel, fontsize=12)
plt.ylabel(ylabel, fontsize=12)
plt.grid()
ax = fig_1.axes[0]
ax.set_xscale('log')
ax.set_yscale('log')
plt.xlim(min_1,max_1)
plt.hist(tofix, bins=np.logspace(np.log10(min_1), np.log10(max_1), 100))
plt.subplot(2,1,2)
plt.xlabel(xlabel, fontsize=12)
plt.ylabel(ylabel, fontsize=12)
plt.grid()
plt.hist(fixed.dropna(),bins=100)
plt.text(0, .05, show_fraction, transform=fig_1.transFigure,fontdict=font)
plt.show()
# We'll define a simple graphing function to help us make the graph and a flexible graphing function to help us make slightly more complex graphs later on.
def simplegraph(series,xlabel,ylabel,title,range1,range2):
plt.figure(figsize=(10,7))
plt.xlabel(xlabel, fontsize=12)
plt.ylabel(ylabel, fontsize=12)
plt.title(title)
plt.hist(series,bins=100,range=(range1,range2))
plt.show()
def flexiblegraph(series,xlabel,ylabel,title,yscale,range1,range2,bins):
plt.figure(figsize=(10,7))
plt.xlabel(xlabel, fontsize=12)
plt.ylabel(ylabel, fontsize=12)
plt.title(title)
plt.yscale(yscale)
plt.hist(series,bins=bins,range=(range1,range2))
plt.show()
# ## FERC Form 1 Cost per mmbtu
# Let's use these two functions to clean the cost per mmbtu of Form 1 coal and gas data, respectively.
#
# One would expect to find the main distribution of coal cost per mmbtu around $2 per mmbtu (per EIA monthly data https://www.eia.gov/electricity/monthly/epm_table_grapher.cfm?t=epmt_4_10_a and annual data https://www.eia.gov/electricity/annual/html/epa_07_04.html). Zooming in on this population, one sees that most of the values lie between .5 and 6 dollars per mmbtu.
simplegraph(coal.fuel_cost_per_mmbtu,'dollars per mmbtu','number of occurences', 'FERC Coal cost per mmbtu', .5,10)
# We can compare these results to EIA 923 data. We'll start by pulling the fuel receipts and costs data and converting the cost from cents per mmbtu to dollars per mmbtu. When weighted by the amount of fuel delivered we see that most values lie between .5 and 7 dollars. We'll use these values to define the bounds of the main population of FERC data.
fuel_rc = pd.read_sql('''SELECT fuel_quantity,energy_source, fuel_group, average_heat_content, fuel_cost, plant_id, report_date \
FROM fuel_receipts_costs_eia923''', con = pudl_engine)
fuel_rc['fuel_cost'] = fuel_rc['fuel_cost'] * .01
fuel_rc
fuel_rc.fuel_group.value_counts()
eia_rc_coal = fuel_rc[fuel_rc.fuel_group == 'Coal']
eia_rc_gas = fuel_rc[fuel_rc.fuel_group == 'Natural Gas']
eia_rc_oil = fuel_rc[fuel_rc.fuel_group == 'Petroleum']
plt.figure(figsize=(14,10))
plt.hist(eia_rc_coal.fuel_cost,range=(0,10),weights=eia_rc_coal.fuel_quantity,bins=100)
plt.title('EIA fuel receipts and costs: fuel cost by fuel group: Coal, weighted by quantity delivered')
plt.ylabel('tons')
plt.xlabel('dollars per mmbtu')
# The population begins around .5.
plt.figure(figsize=(14,10))
plt.hist(eia_rc_coal.fuel_cost,range=(0,1),weights=eia_rc_coal.fuel_quantity,bins=100)
plt.title('EIA fuel receipts and costs: fuel cost by fuel group: Coal, weighted by quantity delivered')
plt.ylabel('tons')
plt.xlabel('dollars per mmbtu')
# The population ends around 7.5.
plt.figure(figsize=(14,10))
plt.hist(eia_rc_coal.fuel_cost,range=(6,8),weights=eia_rc_coal.fuel_quantity,bins=100)
plt.title('EIA fuel receipts and costs: fuel cost by fuel group: Coal, weighted by quantity delivered')
plt.ylabel('tons')
plt.xlabel('dollars per mmbtu')
# Returning our attention to the FERC data, there are two outlying populations:
# - one population around .002, which values are all associated with one utility, respondent_id 130.
# - one population around 200, which is off by a factor of 100, as the utility reported cost in cents per mmbtu, rather than dollars per mmbtu.
#
# Let's take a look at the population around .001. All of these values appear to be attributable to `respondent_id` 130.
simplegraph(coal.fuel_cost_per_mmbtu,'dollars per mmbtu','number of occurences', 'FERC Coal cost per mmbtu', .00001,.01)
coal[(coal.fuel_cost_per_mmbtu > 0) & (coal.fuel_cost_per_mmbtu < .004)]
# Let's take a look at the population around 200. The shape mirrors the primary distribution. These values were likely the result of a utility entering cost per mmbtu in cents rather than dollars.
simplegraph(coal.fuel_cost_per_mmbtu,'dollars per mmbtu','number of occurences', 'FERC Coal cost per mmbtu', 100,600)
# There is also a cluster of values around 2000 but it is unclear what is causing these values to be off by 1000. The shape does not resemble the primary distrubtion as much as the previous population.
simplegraph(coal.fuel_cost_per_mmbtu,'dollars per mmbtu','number of occurences', 'FERC Coal cost per mmbtu', 2000,6000)
# When we apply the two cleaning functions, we see a distribution with two peaks, which appear to correspond with the price premium subbituminous higher heat content coal versus lower heat content lignite coal.
# +
to_fix_cost_per_mmbtu_coal = coal.fuel_cost_per_mmbtu
fixed_cost_per_mmbtu_coal = fixit(coal.fuel_cost_per_mmbtu,.5,7.5,[.01,.001])
showfix(coal.fuel_cost_per_mmbtu,fixed_cost_per_mmbtu_coal,'FERC Coal: Cost per mmbtu',\
'dollars per mmbtu','number of occurences')
# -
# ### Gas cost per mmbtu
# As we turn our attention to natural gas, one expects to find the main population between roughly 2.5 and 15 dollars per mmbtu (per EIA Henry Hub data https://www.eia.gov/dnav/ng/hist/rngwhhdd.htm). One does see residential prices as high as nearly 40 dollars per mcf (in Hawaii! https://www.eia.gov/dnav/ng/ng_pri_sum_a_epg0_prs_dmcf_m.htm). Looking at the data near 5, one finds the main population begins after 2.5 and with a tail extending out to 40 dollars per mmbtu.
#
# For natural gas pricing, EIA 923 instructions instruct respondents to "include the following pipeline charges: fuel losses, transportation reservation charges, balancing costs, and distribution system costs outside of the plant. Because these types of fees can skew the cost of the fuel per MMBtu, provide an explanation." Therefore, we're likely to see some values above the 15 dollars per mmbtu Henry Hub maximum, although we shouldn't see consistently high pricing like we see in the residential data.
simplegraph(gas.fuel_cost_per_mmbtu,'dollars per mmbtu','number of occurences', 'FERC Gas cost per mmbtu', 0,150)
# When we look at EIA fuel cost data weighted by quantity delivered we see a clear drop off after 15 dollars per mcf and a tail of values going out to roughly 35.
plt.figure(figsize=(14,10))
plt.hist(eia_rc_gas.fuel_cost,range=(0,40),weights=eia_rc_gas.fuel_quantity,bins=100)
plt.title('EIA fuel receipts and costs: fuel cost by fuel group: Natural Gas, weighted by quantity delivered')
plt.ylabel('mmbtu')
plt.xlabel('dollars per mmbtu')
# This tail is more apparent when fuel costs are not weighted by quantity delivered.
plt.figure(figsize=(14,10))
plt.hist(eia_rc_gas.fuel_cost,range=(0,40),bins=100)
plt.title('EIA fuel receipts and costs: fuel cost by fuel group: Natural Gas')
plt.ylabel('mmbtu')
plt.xlabel('dollars per mcf')
# The population appears to begin around 1 - it's difficult to say exactly where to draw the line.
plt.figure(figsize=(14,10))
plt.hist(eia_rc_gas.fuel_cost,range=(0,5),weights=eia_rc_gas.fuel_quantity,bins=100)
plt.title('EIA fuel receipts and costs: fuel cost by fuel group: Natural Gas, weighted by quantity delivered')
plt.ylabel('mmbtu')
plt.xlabel('dollars per mcf')
# Zooming in.
simplegraph(gas.fuel_cost_per_mmbtu,'dollars per mmbtu','number of occurences', 'FERC Gas cost per mmbtu', .1,5)
# The distribution begins to drop off considerable after 12 dollars per mmbtu.
simplegraph(gas.fuel_cost_per_mmbtu,'dollars per mmbtu','number of occurences', 'FERC Gas cost per mmbtu', 10,20)
# The tail continues and drops off even further after roughly 50 dollars per mmbtu.
simplegraph(gas.fuel_cost_per_mmbtu,'dollars per mmbtu','number of occurences', 'FERC Gas cost per mmbtu', 15,150)
# When weighted by quantity burned, we see a much tighter distribution and values don't range past 35 dollars per mmbtu.
plt.figure(figsize=(14,10))
plt.hist(gas.fuel_cost_per_mmbtu,range=(10,35),weights=gas.fuel_qty_burned,bins=100)
plt.title('FERC Gas cost, weighted by quantity burned')
plt.ylabel('mcf')
plt.xlabel('dollars per mcf')
# The second population stretches from just after 250 to 4000 - off by a factor of 100. These values were likely the result of a utility entering cost per mmbtu in cents rather than dollars.
simplegraph(gas.fuel_cost_per_mmbtu,'dollars per mmbtu','number of occurences', 'FERC Gas cost per mmbtu', 250,3500)
# The third population is between .002 and .0175 - off by a factor of 1000. These values are likely the result of the utility entering the cost in mmbtu per cf rather than mmbtu per mcf.
simplegraph(gas.fuel_cost_per_mmbtu,'dollars per mmbtu','number of occurences', 'FERC Gas cost per mmbtu',.0001,.03)
# With the except of two records, all of these values are also attributable to utility `respondent_id` 130.
gas[(gas.fuel_cost_per_mmbtu > .0001) & (gas.fuel_cost_per_mmbtu < .02)]
# Applying the two cleaning functions, we see a distribution peaking around 5 dollars per mmbtu with a tail out to 35 dollars per mmbtu.
# +
to_fix_cost_per_mmbtu_gas = gas.fuel_cost_per_mmbtu
fixed_cost_per_mmbtu_gas = fixit(gas.fuel_cost_per_mmbtu,1,35,[.01,1000])
showfix(to_fix_cost_per_mmbtu_gas,fixed_cost_per_mmbtu_gas,'FERC Gas: Cost per mmbtu',\
'dollars per mmbtu','number of occurences')
# -
# ### Oil cost per mmbtu
# The price of a barrel of oil has fluctuated between 30 and 145 dollars per barrel over the last decade (https://www.eia.gov/dnav/pet/hist/LeafHandler.ashx?n=PET&s=RWTC&f=D). With roughly 5.8 mmbtu per barrel, we'd expect to find the primary distribution of oil cost per mmbtu around 5 to 25 dollars per mmbtu.
#
# Let's first drop values at or below zero.
oil_cost_per_mmbtu = oil[oil.fuel_cost_per_mmbtu > 0]
simplegraph(oil_cost_per_mmbtu.fuel_cost_per_mmbtu,'dollars per mmbtu',\
'number of occurences', 'FERC Oil cost per mmbtu', 0,40)
# Looking at the EIA cost per mmbtu data, one seems a distribution roughly between 5 and 40. When looking at the same values weighted by quantity delivered, the range is narrower, from 5 to roughly 33.
simplegraph(eia_rc_oil.fuel_cost,'dollars per mmbtu',\
'number of occurences', 'EIA Oil cost per mmbtu', 0,50)
plt.figure(figsize=(14,10))
plt.hist(eia_rc_oil.fuel_cost,range=(2.5,40),weights=eia_rc_oil.fuel_quantity,bins=100)
plt.title('EIA Oil fuel cost per mmbtu weighted by quantity delivered')
plt.xlabel('dollars per mmbtu')
plt.ylabel('number of occurences')
# Therefore, we'll use 5 and 33 as the boundaries for the primary distribution when cleaning the FERC oil cost per mmbtu data.
# +
to_fix_cost_per_mmbtu_oil = oil.fuel_cost_per_mmbtu
fixed_cost_per_mmbtu_oil = fixit(oil.fuel_cost_per_mmbtu,5,33,[.01])
showfix(to_fix_cost_per_mmbtu_oil,fixed_cost_per_mmbtu_oil,'FERC Oil: Cost per mmbtu',\
'dollars per mmbtu','number of occurences')
# -
# Once again, it looks like there is an outlying population where utilities entered data in cents per mmbtu rather than dollars per mmbtu, so we'll use a multiplier of .01 to correct this data and bring it into the primary distribution.
simplegraph(oil.fuel_cost_per_mmbtu,'dollars per mmbtu',\
'number of occurences', 'FERC Oil cost per mmbtu', 500,4000)
# There's a small population of values near .02 where three utilities - including utility 130! - have reported their oil cost per mmbtu but the cost values are 1000 times too small.
simplegraph(oil.fuel_cost_per_mmbtu,'dollars per mmbtu',\
'number of occurences', 'FERC Oil cost per mmbtu', .005,.04)
oil[(oil.fuel_cost_per_mmbtu > .01) & (oil.fuel_cost_per_mmbtu < .1)]
| results/notebooks/ferc1/ferc1_fuel_cleaning/ferc1_fuel_cleaning_cost_per_mmbtu.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # LAB 07.02 - Neural networks
# !wget --no-cache -O init.py -q https://raw.githubusercontent.com/rramosp/ai4eng.v1/main/content/init.py
import init; init.init(force_download=False); init.get_weblink()
from local.lib.rlxmoocapi import submit, session
session.LoginSequence(endpoint=init.endpoint, course_id=init.course_id, lab_id="L07.02", varname="student");
# +
import numpy as np
from IPython.display import Image
try:
# %tensorflow_version 2.x
print ("Using TF2 in Google Colab")
except:
pass
import tensorflow as tf
# -
# ## Task 1. Compute softmax output
#
# You have a classification problem with $n_c$ classes. You have a dataset of $m$ elements and you are given:
#
#
# - $\hat{\mathbf{y}} \in \mathbb{R}^{m\times n_c}$, the **logits** of the output layer of a neural network
#
# compute $\hat{\bar{\mathbf{y}}}$ the softmax output corresponding the given **logits**, as explained in the notes.
#
# **NOTE**: You will not be able to use tensorflow in your solution. The grader will penalize you if you do.
#
# **CHALLENGE**: Use a single line of Python code
def softmax(y_hat):
return ... # YOUR CODE HERE
# check your code against Tensorflow. Your answer must be exactly the same
# +
m = np.random.randint(10)+10
nc = np.random.randint(5)+5
y_hat = np.round(np.random.normal(size=(m,nc)), 2)
y_hat
# -
my_output = softmax(y_hat)
my_output
tf_output = tf.nn.softmax(y_hat).numpy()
tf_output
np.allclose(my_output, my_output)
# **submit your code**
student.submit_task(globals(), task_id="task_01");
# ## Task 2: Compute cross entropy
#
# you are also given
#
# - $\mathbf{y}$, a vector of $m$ elements, each elemet being an integer between $0$ and $n_c-1$, corresponding to the labels of your dataset.
#
# Complete the following function to compute cross entropy as explained in the notes. Observe that you are given:
#
# - $\mathbf{y}$ with the original class labels. You will have to convert it to a one hot encoding.
# - $\hat{\bar{\mathbf{y}}}$ the softmax activations such as from the output on your previous function
#
# **NOTE**: You will not be able to use tensorflow in your solution. The grader will penalize you if you do.
#
# **SUGGESTION**: Use `np.log(expression+1e-10)` for any `expression` better than `np.log(expression)` since this will avoid numeric errors if `expression` evaluates to 0. Recall that `1e-10` is $10^{-10}$
#
# **CHALLENGE**: Use a single line of Python code
def cross_entropy(y, y_hatb):
return ... # YOUR CODE HERE
# check your answer against **Tensorflow** corresponding implementation
# +
m = np.random.randint(10)+10
nc = np.random.randint(5)+5
y = np.random.randint(nc, size=m)
y_hatb = np.random.random(size=(m, nc))
y_hatb /= np.sum(y_hatb, axis=1).reshape(-1,1)
print (y)
print (y_hatb)
print (y_hatb.sum(axis=1))
# -
cross_entropy(y, y_hatb)
tf.keras.losses.sparse_categorical_crossentropy(y, y_hatb).numpy()
# **submit your code**
student.submit_task(globals(), task_id="task_02");
# ## Task 3: Compute multilayer perceptron network output
#
# You have
#
# - a dataset of $m$ elements and $n$ columns.
# - a classification problem with $n_c$ classes.
#
# And we build a multilayer perceptron with
# - two hidden layers with $h_1$ and $h_2$ neurons respectively and **tanh** activation.
# - one output layer with $n_c$ neurons with **softmax** activation.
#
# so that we have the following architecture
# +
Image("local/imgs/labmlp.png", width=500)
# -
# This way, we have the following set of weights:
#
# - $W_0 \in \mathbb{R}^{n\times h_1}$
# - $b_0 \in \mathbb{R}^{h_1}$
#
#
# - $W_1 \in \mathbb{R}^{h1\times h_2}$
# - $b_1 \in \mathbb{R}^{h_2}$
#
#
# - $W_2 \in \mathbb{R}^{h2\times n_c}$
# - $b_2 \in \mathbb{R}^{n_c}$
#
# Complete the function below so that it produces the following output:
#
# $$\text{softmax}(\text{tanh}(\text{tanh}(\mathbf{X}\cdot\mathbf{W}_0+b_0)\cdot\mathbf{W}_1+b_1)\cdot\mathbf{W}_0+b_0)$$
#
#
# **NOTE**: You will not be able to use tensorflow in your solution. The grader will penalize you if you do.
#
# **CHALLENGE**: Use a single line of Python code (not counting your previous `softmax` function).
def nn_output(X, W0, b0, W1, b1, W2, b2):
def softmax .... # use your softmax from task above
return ... # YOUR CODE HERE
# check your code with random input and weights against the implementation in **Tensorflow**
# +
m = np.random.randint(10)+10
n = np.random.randint(3)+3
nc = np.random.randint(5)+5
m = 10
n = 3
nc = 4
h1 = np.random.randint(5)+5
h2 = np.random.randint(5)+5
print (m,n,nc)
X = np.random.normal(size=(m,n))
W0 = np.random.normal(size=(n,h1))
b0 = np.random.normal(size=h1)
W1 = np.random.normal(size=(h1,h2))
b1 = np.random.normal(size=h2)
W2 = np.random.normal(size=(h2,nc))
b2 = np.random.normal(size=nc)
# -
# your function output
nn_output(X, W0, b0, W1, b1, W2, b2)
model = tf.keras.Sequential([
tf.keras.layers.Dense(h1, activation='tanh', dtype=tf.float64),
tf.keras.layers.Dense(h2, activation='tanh', dtype=tf.float64),
tf.keras.layers.Dense(nc, activation='softmax', dtype=tf.float64)
])
model.build(input_shape=[None, n])
model.set_weights([W0, b0, W1, b1, W2, b2])
# tensorflow output (must be exactly as yours)
model(X).numpy()
# **submit your code**
student.submit_task(globals(), task_id="task_03");
| content/LAB 07.02 - NEURAL NETWORKS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import os
import json
import pickle
root = "."
input_path = os.path.join(root, 'processed', 'dictionary_full_with_summaries.pkl')
file = open(input_path,'rb')
dictionary = pickle.load(file)
df_dict = pd.DataFrame(dictionary)
parent_dir = "Error_Analysis"
sub_dir = "results_fulltext"
f = open(os.path.join(parent_dir,sub_dir,"fulltext_mst.json"))
js = json.load(f)
js
df_failure = pd.DataFrame(js['failure'])
df_success = pd.DataFrame(js['success'])
df_failure.head()
df_success.head()
columns = ['mention_name', 'mention_gold_cui', 'predicted_cui']
# look at 272 and 235 as good examples
df_dict['description'][df_dict['cui'] == '272'].iloc[0]
df_dict['title'][df_dict['cui'] == '272'].iloc[0]
df_failure[df_failure['mention_gold_cui'] == '272'].shape
df_failure[df_failure['mention_gold_cui'] == '272'][columns]
df_success[df_success['mention_gold_cui'] == '272'].shape
for i in range(df_success[df_success['mention_gold_cui'] == '272'].shape[0]):
print(df_success[df_success['mention_gold_cui'] == '272']['mention_id'].iloc[i])
print(df_success[df_success['mention_gold_cui'] == '272']['mention_name'].iloc[i])
print()
| blink/analysis/Error_Analysis_fulltext_mst.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### 1. Revision control systems are
# ##### Ans: Useful even with a sole developer working on a project
# #### 2. Git was originally a creation of:
# ##### Ans: <NAME>
# #### 3. Which statement is true?
# ##### Ans: With Git, conflicting contributions must be resolved by a human being
# #### 4. Which of the following commands will provide documentation on how to create a new branch with git (Select all answers that apply)?
# ##### Ans:
# - git branch --help
# - man git-branch
# - git help branch
# #### 5. Which of the following are Revision Control Systems? Select all answers that apply.
# ##### Ans:
# - Git
# - Subversion
# - CVS
| Coursera/Using Git for Distributed Development/Week-1/Quiz/Introduction-to-Git.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
from flask import Flask
from flask_restful import Api, Resource, reqparse
import random
import pandas as pd
import os
app = Flask(__name__)
api = Api(app)
# +
# создадим пул штрихкодов по брендам+категориям, которые будем получать для каждого client_id
path_to_repo = '/Users/kuznetsovnikita'
import_path = path_to_repo + '/recommendations/data/raw/'
vygruz = pd.read_excel(import_path+'goods.xlsx').iloc[:,1:]
vygruz = vygruz.loc[vygruz.id.str.len() > 10]
# -
id_pool = vygruz.groupby(['brand','Группа категорий']).agg(
{'id':lambda x: list(x.astype(int))}).to_dict()['id']
# +
import sys
# insert at 1, 0 is the script path (or '' in REPL)
sys.path.insert(1, '../src/data')
path_to_repo = '/Users/kuznetsovnikita'
with open(path_to_repo+'/recommendations/src/data/mongodb_pass.txt', 'r') as file:
path2 = file.read()
# -
from pymongo import MongoClient
import certifi
# +
client = MongoClient(path2, tlsCAFile=certifi.where())
current_db = client['spin_services']
orders = current_db['cs_cart_orders'].find()
# -
id = '887'
import json
json.dumps(list(current_db['сs_cart_orders'].find({'_id':int(id)},{'products':1}))[0])
len(id_pool[list(id_pool.keys())[i]])
i = random.randint(0, len(id_pool))
j = random.randint(0, len(id_pool[list(id_pool.keys())[i]]))
# random.randint(0, len(id_pool))
# random.randint(0, len(list(id_pool.keys())[i]))
i = random.randint(0, len(id_pool)-1)
j = random.randint(0, len(id_pool[list(id_pool.keys())[i]])-1)
rand_id = id_pool.get(list(id_pool.keys())[i])[j]
# id_pool[list(id_pool.keys())[i]], len(id_pool[list(id_pool.keys())[i]])
id_pool.get(list(id_pool.keys())[i])[j]
class Id(Resource):
def get(self, client_id = 0):
id_list = []
for k in range(5):
i = random.randint(0, len(id_pool)-1)
j = random.randint(0, len(id_pool[list(id_pool.keys())[i]])-1)
rand_id = id_pool.get(list(id_pool.keys())[i])[j]
id_list.append(rand_id)
return id_list, 200
id_ = Id()
id_.get()
api.add_resource(Id, )
| notebooks/flask_intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
df = pd.read_csv('ATP_W35.csv', low_memory=False)
#drop the survey experiment... i guess i dont have to, i just don't preprocess it
#df.drop(columns = df[104:166])
# -
df.columns.get_loc("V1_W35") #104
df.columns.get_loc("V4Q4_Refused_W35") #166
# target variable:
# SM10B_W35 (collapse into a binary variable); or, just B
#
#code target variable
import numpy as np
df["SM10B_W35"].value_counts(dropna = False)
df.SM10B_W35.replace((2, np.NaN), (0, 98), inplace=True)
df["SM10B_W35"].value_counts(dropna = False)
y = df['SM10B_W35']
# +
from sklearn.preprocessing import OrdinalEncoder
# df1.educ.unique()
# edu_cats=['No HS','High school graduate', 'Some college','2-year', '4-year', 'Post-grad']
# enc_edu = OrdinalEncoder(categories = [edu_cats])
# enc_edu.fit(df[['educ']])
df.columns.get_loc("ECIMPA_W35")
df.df[:,:5].unique()
# -
df["YT4_W35"].value_counts(dropna = False)
# ordinal encoder:
#
#
#
#
# TC2A_W35
# TC4_W35 (but need to switch 2 and 3)
# TC5_W35 (but need to switch 2 and 3)
# TC6A_W35(but need to switch 2 and 3)
# TC6B_W35(but need to switch 2 and 3)
#
# SNSFRA_W35
# SNSFRB_W35
#
# SM1A_W35
# SM1B_W35
# SM1C_W35
# SM1D_W35
# SM1E_W35
# SM1F_W35
#
# SM2A_W35
# SM2B_W35
# SM2C_W35
# SM2D_W35
#
# SM4A_W35
# SM4B_W35
# SM4C_W35
# SM4D_W35
#
# SM5A_W35
# SM5B_W35
# SM5C_W35
# SM5D_W35
#
#
# SM6A (switch 2 and 3)
# SM6B (switch 2 and 3)
#
# SM7
#
# SM9A_W35
# SM9B_W35
# SM9C_W35
# SM9D_W35
# SM9E_W35
#
# SM12A_W35
# SM12B_W35
# SM12C_W35
#
# SM13A_W35
# SM13B_W35
# SM13C_W35
#
# SM14A_W35
# SM14B_W35
# SM14C_W35
# SM14D_W35
#
# FB1_W35
# FB2_W35
# FB3A_W35
#
# FB4A_W35
# FB4B_W35
#
#
#
#
#
#
#
# +
#switch 2 and 3 to make it ordinal
# TC4_W35
# TC5_W35
# TC6A_W35
# TC6B_W35
# SM6A_W35
# SM6B_W35
df.TC4_W35.replace((3), (5), inplace=True)
df.TC4_W35.replace((2), (3), inplace=True)
df.TC4_W35.replace((5), (2), inplace=True)
df.TC5_W35.replace((3), (5), inplace=True)
df.TC5_W35.replace((2), (3), inplace=True)
df.TC5_W35.replace((5), (2), inplace=True)
df.TC6A_W35.replace((3), (5), inplace=True)
df.TC6A_W35.replace((2), (3), inplace=True)
df.TC6A_W35.replace((5), (2), inplace=True)
df.TC6B_W35.replace((3), (5), inplace=True)
df.TC6B_W35.replace((2), (3), inplace=True)
df.TC6B_W35.replace((5), (2), inplace=True)
df.SM6A_W35.replace((3), (5), inplace=True)
df.SM6A_W35.replace((2), (3), inplace=True)
df.SM6A_W35.replace((5), (2), inplace=True)
df.SM6B_W35.replace((3), (5), inplace=True)
df.SM6B_W35.replace((2), (3), inplace=True)
df.SM6B_W35.replace((5), (2), inplace=True)
# +
ord_features = df[['TC2A_W35', 'TC4_W35', 'TC5_W35', 'TC6A_W35', 'TC6B_W35', 'SNSFRA_W35', 'SNSFRB_W35', 'SM1A_W35', 'SM1B_W35', 'SM1C_W35', 'SM1D_W35', 'SM1E_W35', 'SM1F_W35', 'SM2A_W35', 'SM2B_W35', 'SM2C_W35', 'SM2D_W35', 'SM4A_W35', 'SM4B_W35', 'SM4C_W35', 'SM4D_W35', 'SM5A_W35', 'SM5B_W35', 'SM5C_W35', 'SM5D_W35', 'SM6A_W35', 'SM6B_W35', 'SM7_W35', 'SM9A_W35', 'SM9B_W35', 'SM9C_W35', 'SM9D_W35', 'SM9E_W35', 'SM12A_W35', 'SM12B_W35', 'SM12C_W35', 'SM13A_W35', 'SM13B_W35', 'SM13C_W35', 'SM14A_W35', 'SM14B_W35', 'SM14C_W35', 'SM14D_W35', 'FB1_W35', 'FB2_W35', 'FB3A_W35', 'FB4A_W35', 'FB4B_W35']]
#df_ord = pd.DataFrame(data=df,columns = ord_features)
# +
#demographic variables
demo_feat = df[['Device_Type_W35', 'LANGUAGE_W35', 'FORM_W35', 'F_CREGION_FINAL', 'F_AGECAT_FINAL', 'F_SEX_FINAL', 'F_EDUCCAT_FINAL', 'F_EDUCCAT2_FINAL', 'F_HISP_RECRUITMENT', 'F_RACECMB_RECRUITMENT', 'F_RACETHN_RECRUITMENT', 'F_CITIZEN_RECODE_FINAL', 'F_MARITAL_FINAL', 'F_RELIG_FINAL', 'F_BORN_FINAL', 'F_ATTEND_FINAL', 'F_PARTY_FINAL', 'F_PARTYLN_FINAL', 'F_PARTYSUM_FINAL', 'F_INCOME_FINAL', 'F_INCOME_RECODE_FINAL', 'F_REG_FINAL', 'F_IDEO_FINAL', 'F_INTUSER_FINAL', 'F_VOLSUM_FINAL', 'WEIGHT_W35']]
df_demo = pd.DataFrame(data=df, columns = demo_feat)
# -
# one hot encoder:
#
# ALG1_W35
# TC2B_W35
# TC2C_W35
# TC3A_W35
# TC3B_W35
# TC3C_W35
# SM3_W35
# SM8A_W35
# SM8B_W35
# SM8C_W35
#
# SM10A_W35
# SM10C_W35
# SM10D_W35
# SM10E_W35
#
# SM11_W35
#
# FB3B_W35
# FB3C1_W35
# FB3C2_W35
# FB3C3_W35
# FB3C4_W35
# FB3C5_W35
# FB3C6_W35
#
# FB5A_W35
# FB5B_W35
# FB5C_W35
# FB5D_W35
#
#
# Fine:
# SNSA_W35
# SNSB_W35
# SNSE_W35
# SNSH_W35
# SNSJ_W35
# SNSL_W35
# SNSUSER_W35
# FB3C1_W35
# FB3C2_W35
# FB3C3_W35
# FB3C4_W35
# FB3C5_W35
# FB3C6_W35
#
#
#
#
# +
#categorical columns where I can replace NaN with 98 (99 is missing acrosss the board; 98 is not asked)
cat_columns = df[['ALG1_W35', 'TC2B_W35', 'TC2C_W35', 'TC3A_W35', 'TC3B_W35', 'TC3C_W35', 'SM3_W35', 'SM8A_W35', 'SM8B_W35', 'SM8C_W35','SM10A_W35','SM10C_W35', 'SM10D_W35', 'SM10E_W35', 'SM11_W35','FB3B_W35', 'FB3C1_W35', 'FB3C2_W35', 'FB3C3_W35', 'FB3C4_W35', 'FB3C5_W35', 'FB3C6_W35', 'FB5A_W35', 'FB5B_W35', 'FB5C_W35', 'FB5D_W35']]
from sklearn.impute import SimpleImputer
import numpy as np
simp_constant = SimpleImputer(fill_value=98,
missing_values=np.nan, strategy='constant')
for column in cat_columns:
df[column] = simp_constant.fit_transform(df[[column]])
# +
add_cat_col = df[['SNSA_W35', 'SNSB_W35', 'SNSE_W35', 'SNSH_W35', 'SNSJ_W35', 'SNSL_W35', 'SNSUSER_W35', 'FB3C1_W35', 'FB3C2_W35', 'FB3C3_W35', 'FB3C4_W35', 'FB3C5_W35', 'FB3C6_W35']]
#df_cat = pd.DataFrame(data=df,columns = add_cat_col)
# +
from sklearn.preprocessing import OneHotEncoder
enc = OneHotEncoder(sparse=False,handle_unknown='ignore')
df_oh1 = enc.fit_transform(df[['ALG1_W35', 'TC2B_W35', 'TC2C_W35', 'TC3A_W35', 'TC3B_W35', 'TC3C_W35', 'SM3_W35', 'SM8A_W35', 'SM8B_W35', 'SM8C_W35', 'SM8C_W35','SM10A_W35','SM10C_W35', 'SM10D_W35', 'SM10E_W35', 'SM11_W35','FB3B_W35', 'FB3C1_W35', 'FB3C2_W35', 'FB3C3_W35', 'FB3C4_W35', 'FB3C5_W35', 'FB3C6_W35', 'FB5A_W35', 'FB5B_W35', 'FB5C_W35', 'FB5D_W35']])
df_oh1 = pd.DataFrame(data=df_oh1)
print(df_oh1.head)
# -
# #YouTube... what to do about them?
# YT1A_W35
# YT1B_W35
# YT1C_W35
# YT1D_W35
#
# YT2_W35
#
# YT3A_W35
# YT3B_W35
# YT3C_W35
#
# YT4_W35
#
#
# YT5_W35
# +
#drop survey experiment - many NaN's
df.drop(columns=['ECIMPA_W35', 'ECIMPB_W35', 'ECIMPC_W35', 'ECIMPD_W35', 'ECIMPE_W35', 'ECIMPF_W35', 'ECIMPG_W35', 'ECIMPH_W35'],inplace=True)
#drop vignette experiment - many NaN's
df.drop(columns=['V1_W35', 'V1Q1_W35', 'V1Q2_W35', 'V1Q3_W35', 'V1Q4_POS1_W35', 'V1Q4_POS2_W35', 'V1Q4_POS3_W35', 'V1Q4_POS4_W35', 'V1Q4_POS5_W35', 'V1Q4_NEG1_W35', 'V1Q4_NEG2_W35', 'V1Q4_NEG3_W35', 'V1Q4_NEG4_W35', 'V1Q4_NEG5_W35', 'V1Q4_OTHER_W35', 'V1Q4_DK_W35', 'V1Q4_Refused_W35', 'V2Q1_W35', 'V2Q2_W35', 'V2Q3_W35', 'V2Q4_POS1_W35', 'V2Q4_POS2_W35', 'V2Q4_POS3_W35', 'V2Q4_POS4_W35', 'V2Q4_POS5_W35', 'V2Q4_NEG1_W35', 'V2Q4_NEG2_W35', 'V2Q4_NEG3_W35', 'V2Q4_NEG4_W35', 'V2Q4_NEG5_W35', 'V2Q4_DK_W35', 'V2Q4_OTHER_W35', 'V2Q4_Refused_W35', 'V2_W35', 'V3Q1_W35', 'V3Q2_W35', 'V3Q3_W35', 'V3Q4_POS1_W35', 'V3Q4_POS2_W35', 'V3Q4_POS3_W35', 'V3Q4_POS4_W35', 'V3Q4_NEG1_W35', 'V3Q4_NEG2_W35', 'V3Q4_NEG3_W35', 'V3Q4_NEG4_W35', 'V3Q4_NEG5_W35', 'V3Q4_OTHER_W35', 'V3Q4_DK_W35', 'V3Q4_Refused_W35', 'V4Q1_W35', 'V4Q2_W35', 'V4Q3_W35', 'V4Q4_POS1_W35', 'V4Q4_POS2_W35', 'V4Q4_POS3_W35', 'V4Q4_POS4_W35', 'V4Q4_POS5_W35', 'V4Q4_NEG1_W35', 'V4Q4_NEG2_W35', 'V4Q4_NEG3_W35', 'V3Q4_OTHER_W35', 'V3Q4_DK_W35', 'V3Q4_Refused_W35'])
# +
df1 = df
df1.drop(columns=cat_columns)
#print(df1.head)
frames = [df1, df_oh1]
df2 = pd.concat(frames, axis = 1, sort=True)
print(df1.head)
# +
# df1.loc[:, 99] = np.nan
# df1.loc[:, 98] = np.nan
#df1.where((df1 < 98) | (df1 > 99), inplace=True)
df.replace((99), (np.NaN), inplace=True)
#df.replace((98), (np.NaN), inplace=True)
# -
df1["SM10B_W35"].value_counts(dropna=False)
# +
#import matplotlib
from matplotlib import pylab as plt
corr_matrix = df1.corr()
#print(corr_matrix)
plt.figure(figsize=(10,10))
plt.matshow(df.corr(),vmin=-1,vmax=1,cmap='seismic',fignum=0)
plt.colorbar(label='corr. coeff.')
plt.xticks(np.arange(df.corr().shape[0]),list(df.corr().columns),rotation=90)
plt.yticks(np.arange(df.corr().shape[0]),list(df.corr().columns))
plt.tight_layout()
#plt.savefig('figures/corr_coeff.png',dpi=300)
plt.show()
# +
corrmat = df1.corr()
all_cols = np.abs(corrmat).sort_values('SM10B_W35',ascending=False)['SM10B_W35'].index
cols = all_cols[:10]
cm = corrmat.loc[cols,cols]
plt.figure(figsize=(10,10))
plt.matshow(cm,vmin=-1,vmax=1,cmap='seismic',fignum=0)
plt.colorbar(label='corr. coeff.')
plt.xticks(np.arange(cm.shape[0]),list(cols),rotation=90)
plt.yticks(np.arange(cm.shape[0]),list(cols))
plt.tight_layout()
#plt.savefig('corr_coeff_nan.png',dpi=300)
plt.show()
# -
# SM12B_W35 (and C) are backwards coded
#
#
df1.replace((98), (np.NaN), inplace=True)
df1["FB3C6_W35"].value_counts(dropna=False)
# +
corrmat = df1.corr()
all_cols = corrmat.sort_values('SM10B_W35',ascending=False)['SM10B_W35'].index
cols = all_cols[:20] # least correlated features
cm = corrmat.loc[cols,cols]
plt.figure(figsize=(10,10))
plt.matshow(cm,vmin=-1,vmax=1,cmap='seismic',fignum=0)
plt.colorbar(label='corr. coeff.')
plt.xticks(np.arange(cm.shape[0]),list(cols),rotation=90)
plt.yticks(np.arange(cm.shape[0]),list(cols))
plt.tight_layout()
#plt.savefig('corr_coeff_nan.png',dpi=300)
plt.show()
# +
import pandas as pd
pd.value_counts(df1['SM10B_W35'],normalize=True).plot.bar()
plt.ylabel('count')
plt.show()
# df.plot.bar('SM10B_W35', 'FB1_W35', cmap = 'RdBu')
# #plt.savefig('figures/scatter.png',dpi=300)
# plt.show()
df1[['SM10B_W35','FB1_W35']].boxplot(by='SM10B_W35')
plt.ylabel('Posting')
#plt.savefig('figures/boxplot.png',dpi=300)
plt.show()
# -
df.plot.scatter('TC2A_W35','FB1_W35', c='SM10B_W35', cmap = 'RdBu') # the color and size of each point can also be defined
#plt.savefig('figures/scatter.png',dpi=300)
plt.show()
# +
# import numpy as np
# df.FB1_W35.replace((99), (np.NaN), inplace=True)
df1["TC2A_W35"].value_counts(dropna=False)
# -
#df1.SM10B_W35.replace(98, np.NaN, inplace=True)
df1["SM10B_W35"].value_counts(dropna=False)
# +
#F_REG_FINAL
count_matrix = df1.groupby(['F_REG_FINAL', 'SM10B_W35']).size().unstack()
print(count_matrix)
count_matrix_norm = count_matrix.div(count_matrix.sum(axis=1),axis=0)
print(count_matrix_norm)
count_matrix_norm.plot(kind='bar', stacked=True)
plt.ylabel('Fraction of people who posted about politics')
plt.xlabel('Self-reported voter registration')
plt.legend(loc=4)
#plt.savefig('figures/stacked_bar.png',dpi=300)
plt.show()
count_matrix.plot(kind='bar', stacked=True)
plt.ylabel('Fraction of people who posted about politics')
plt.xlabel('Self-reported voter registration')
plt.legend(loc=4)
#plt.savefig('figures/stacked_bar.png',dpi=300)
plt.show()
# +
count_matrix = df1.groupby(['TC2A_W35', 'SM10B_W35']).size().unstack()
print(count_matrix)
count_matrix_norm = count_matrix.div(count_matrix.sum(axis=1),axis=0)
print(count_matrix_norm)
count_matrix_norm.plot(kind='bar', stacked=True)
plt.ylabel('Fraction of people who posted about politics')
plt.xlabel('Trust in technology companies')
plt.legend(loc=4)
plt.savefig('tech_trust_norm.png',dpi=300)
plt.show()
count_matrix.plot(kind='bar', stacked=True)
plt.ylabel('Fraction of people who posted about politics')
plt.xlabel('Trust in technology companies')
plt.legend(loc=4)
plt.savefig('tech_trust_count.png',dpi=300)
plt.show()
# -
df1[['SM10B_W35', 'TC2A_W35']].boxplot(by='TC2A_W35')
plt.ylabel('SM10B_W35')
#plt.savefig('figures/boxplot.png',dpi=300)
plt.show()
# +
# violin plot
# dataset = [df1[df1['SM10B_W35']==0]['F_AGECAT_FINAL'].values,
# df1[df1['SM10B_W35']==1]['F_AGECAT_FINAL'].values]
# plt.violinplot(dataset = dataset)
# plt.xticks([1,2],['0','1'])
# plt.ylabel('label')
# plt.ylabel('age')
# plt.show()
df[['F_AGECAT_FINAL','SM10B_W35']].boxplot(by='SM10B_W35')
plt.ylabel('F_AGECAT_FINAL')
#plt.savefig('figures/boxplot.png',dpi=300)
plt.show()
# +
categories = df1['HouseStyle'].unique()
bin_range = (df['SalePrice'].min(),df['SalePrice'].max())
for c in categories:
plt.hist(df[df['HouseStyle']==c]['SalePrice'],alpha=0.5,label=c,range=bin_range,bins=50)
plt.legend()
plt.ylabel('count')
plt.xlabel('SalePrice')
plt.tight_layout()
plt.savefig('figures/histo.png',dpi=300)
plt.show()
| src/Pew_code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Tensor Field Networks
#
# Implementation of missing point experiment
# %load_ext autoreload
# %autoreload 2
from __future__ import division
import random
import numpy as np
import tensorflow as tf
import tensorfieldnetworks.layers as layers
import tensorfieldnetworks.utils as utils
from tensorfieldnetworks.utils import EPSILON, FLOAT_TYPE
tf.reset_default_graph()
training_set_size = 1000
from ase.db import connect
with connect('qm9.db') as conn:
qm9_coords = []
qm9_atoms = []
qm9_test_coords = []
qm9_test_atoms = []
qm9_test_23_coords = []
qm9_test_23_atoms = []
qm9_test_29_coords = []
qm9_test_29_atoms = []
for atoms in conn.select('4<natoms<=18', limit=training_set_size):
qm9_coords.append(atoms.positions)
qm9_atoms.append(atoms.numbers)
for atoms in conn.select('natoms=19', limit=training_set_size):
qm9_test_coords.append(atoms.positions)
qm9_test_atoms.append(atoms.numbers)
for atoms in conn.select('natoms=23', limit=training_set_size):
qm9_test_23_coords.append(atoms.positions)
qm9_test_23_atoms.append(atoms.numbers)
for atoms in conn.select('24<natoms<=29', limit=training_set_size):
qm9_test_29_coords.append(atoms.positions)
qm9_test_29_atoms.append(atoms.numbers)
atom_order = list(set(np.concatenate(qm9_atoms)))
num_atom_types = len(atom_order)
def atom_type_to_one_hot(atom_numbers, atom_order):
one_hot_dict = {atom_type: [1 if i == j else 0 for i in range(len(atom_order))]
for j, atom_type in enumerate(atom_order)}
return list(map(lambda x: one_hot_dict[x], atom_numbers))
qm9_one_hot = list(map(lambda x: atom_type_to_one_hot(x, atom_order), qm9_atoms))
qm9_test_one_hot = list(map(lambda x: atom_type_to_one_hot(x, atom_order), qm9_test_atoms))
qm9_test_23_one_hot = list(map(lambda x: atom_type_to_one_hot(x, atom_order), qm9_test_23_atoms))
qm9_test_29_one_hot = list(map(lambda x: atom_type_to_one_hot(x, atom_order), qm9_test_29_atoms))
# radial basis functions
rbf_low = 0.
rbf_high = 2.5
rbf_count = 4
rbf_spacing = (rbf_high - rbf_low) / rbf_count
centers = tf.cast(tf.lin_space(rbf_low, rbf_high, rbf_count), FLOAT_TYPE)
# +
# [N, 3]
r = tf.placeholder(FLOAT_TYPE, shape=(None, 3))
# [N, num_types]
one_hot = tf.placeholder(FLOAT_TYPE, shape=(None, num_atom_types))
# [N, N, 3]
rij = utils.difference_matrix(r)
# [N, N, 3]
unit_vectors = rij / tf.expand_dims(tf.norm(rij, axis=-1) + EPSILON, axis=-1)
dij = utils.distance_matrix(r)
# rbf : [N, N, rbf_count]
gamma = 1. / rbf_spacing
rbf = tf.exp(-gamma * tf.square(tf.expand_dims(dij, axis=-1) - centers))
layer_dims = [15, 15, 15, 1]
# EMBEDDING
# [N, layer1_dim, 1]
with tf.variable_scope(None, 'embed', values=[one_hot]):
embed = layers.self_interaction_layer_with_biases(tf.reshape(one_hot, [-1, num_atom_types, 1]), layer_dims[0])
input_tensor_list = {0: [embed]}
# LAYERS 1-3
num_layers = len(layer_dims) - 1
for layer in range(num_layers):
layer_dim = layer_dims[layer + 1]
with tf.variable_scope(None, 'layer' + str(layer), values=[input_tensor_list]):
input_tensor_list = layers.convolution(input_tensor_list, rbf, unit_vectors)
input_tensor_list = layers.concatenation(input_tensor_list)
if layer == num_layers - 1:
with tf.variable_scope(None, 'atom_types', values=[input_tensor_list[0]]):
atom_type_list = layers.self_interaction({0: input_tensor_list[0]}, num_atom_types)
input_tensor_list = layers.self_interaction(input_tensor_list, layer_dim)
if layer < num_layers - 1:
with tf.variable_scope(None, 'nonlinearity', values=[input_tensor_list]):
input_tensor_list = layers.nonlinearity(input_tensor_list, nonlin=utils.ssp)
probabilty_scalars = input_tensor_list[0][0]
missing_coordinates = input_tensor_list[1][0]
atom_type_scalars = atom_type_list[0][0]
# [N]
p = tf.nn.softmax(tf.squeeze(probabilty_scalars))
# [N, 3], when layer3_dim == 1
output = tf.squeeze(missing_coordinates)
# votes : [N, 3]
votes = r + output
# guess : [3]
guess_coord = tf.tensordot(p, votes, [[0], [0]])
# guess_coord = tf.einsum('a,ai->i', p, votes)
guess_atom = tf.tensordot(p, tf.squeeze(atom_type_scalars), [[0], [0]])
# guess_atom = tf.einsum('a,ai->i', p, tf.squeeze(atom_type_scalars))
# missing_point [3]
missing_point = tf.placeholder(FLOAT_TYPE, shape=(3))
missing_atom_type = tf.placeholder(FLOAT_TYPE, shape=(num_atom_types))
# loss : []
loss = tf.nn.l2_loss(missing_point - guess_coord)
loss += tf.nn.l2_loss(missing_atom_type - guess_atom)
# -
sess = tf.Session()
# sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess, "miniteacup/experiments/paper_tmp/qm9_model_50.ckpt")
guesses = []
for shape, types in zip(qm9_coords, qm9_one_hot):
if len(shape) < 3:
# Shape stuff fails with shape length of 2 -- skipping for now
continue
for remove_index in range(len(shape)):
new_shape = np.delete(shape, remove_index, 0)
new_types = np.delete(types, remove_index, 0)
removed_point = shape[remove_index]
removed_types = types[remove_index]
#embedding = np.array([1 for _ in range(len(new_shape))])
loss_value, guess_point, guess_type, votes_points, probs = sess.run(
[loss, guess_coord, guess_atom, votes, p],
feed_dict={r: new_shape,
missing_point: removed_point,
missing_atom_type: removed_types,
one_hot: new_types})
guesses.append([new_shape, removed_point, removed_types, loss_value,
guess_point, guess_type, votes_points, probs])
test_guesses = []
for shape, types in zip(qm9_test_coords, qm9_test_one_hot):
for remove_index in range(len(shape)):
new_shape = np.delete(shape, remove_index, 0)
new_types = np.delete(types, remove_index, 0)
removed_point = shape[remove_index]
removed_types = types[remove_index]
loss_value, guess_point, guess_type, votes_points, probs = sess.run(
[loss, guess_coord, guess_atom, votes, p],
feed_dict={r: new_shape,
missing_point: removed_point,
missing_atom_type: removed_types,
one_hot: new_types})
test_guesses.append([new_shape, removed_point, removed_types, loss_value,
guess_point, guess_type, votes_points, probs])
test_23_guesses = []
for shape, types in zip(qm9_test_23_coords, qm9_test_23_one_hot):
for remove_index in range(len(shape)):
new_shape = np.delete(shape, remove_index, 0)
new_types = np.delete(types, remove_index, 0)
removed_point = shape[remove_index]
removed_types = types[remove_index]
loss_value, guess_point, guess_type, votes_points, probs = sess.run(
[loss, guess_coord, guess_atom, votes, p],
feed_dict={r: new_shape,
missing_point: removed_point,
missing_atom_type: removed_types,
one_hot: new_types})
test_23_guesses.append([new_shape, removed_point, removed_types, loss_value,
guess_point, guess_type, votes_points, probs])
test_29_guesses = []
for shape, types in zip(qm9_test_29_coords, qm9_test_29_one_hot):
for remove_index in range(len(shape)):
new_shape = np.delete(shape, remove_index, 0)
new_types = np.delete(types, remove_index, 0)
removed_point = shape[remove_index]
removed_types = types[remove_index]
#embedding = np.array([1 for _ in range(len(new_shape))])
loss_value, guess_point, guess_type, votes_points, probs = sess.run(
[loss, guess_coord, guess_atom, votes, p],
feed_dict={r: new_shape,
missing_point: removed_point,
missing_atom_type: removed_types,
one_hot: new_types})
test_29_guesses.append([new_shape, removed_point, removed_types, loss_value,
guess_point, guess_type, votes_points, probs])
sort_guesses = list(sorted(guesses, key=lambda x: -x[3]))
sort_test_guesses = sorted(test_guesses, key=lambda x: -x[3])
sort_test_23_guesses = sorted(test_23_guesses, key=lambda x: -x[3])
sort_test_29_guesses = sorted(test_29_guesses, key=lambda x: -x[3])
# number of predictions
print(len(sort_guesses))
print(len(sort_test_guesses))
print(len(sort_test_23_guesses))
print(len(sort_test_29_guesses))
# This should be the same as what's output during training for validation
print("train", np.sqrt(2 * np.sum(np.array(sort_guesses)[:,3]) / len(sort_guesses)))
print("test19", np.sqrt(2 * np.sum(np.array(sort_test_guesses)[:,3]) / len(sort_test_guesses)))
print("test23", np.sqrt(2 * np.sum(np.array(sort_test_23_guesses)[:,3]) / len(sort_test_23_guesses)))
print("test29", np.sqrt(2 * np.sum(np.array(sort_test_29_guesses)[:,3]) / len(sort_test_29_guesses)))
sort_test_18_dist = np.linalg.norm(
np.vstack(np.array(guesses)[:,1].tolist()) -\
np.vstack(np.array(guesses)[:,4].tolist()), axis=-1)
sort_test_19_dist = np.linalg.norm(
np.vstack(np.array(sort_test_guesses)[:,1].tolist()) -\
np.vstack(np.array(sort_test_guesses)[:,4].tolist()), axis=-1)
sort_test_23_dist = np.linalg.norm(
np.vstack(np.array(sort_test_23_guesses)[:,1].tolist()) -\
np.vstack(np.array(sort_test_23_guesses)[:,4].tolist()), axis=-1)
sort_test_29_dist = np.linalg.norm(
np.vstack(np.array(sort_test_29_guesses)[:,1].tolist()) -\
np.vstack(np.array(sort_test_29_guesses)[:,4].tolist()), axis=-1)
# True or False for correct atom type
test_18_atom_type = np.equal(np.argmax(np.vstack(np.array(sort_guesses)[:,2].tolist()), axis=-1),
np.argmax(np.vstack(np.array(sort_guesses)[:,5].tolist()), axis=-1))
test_19_atom_type = np.equal(np.argmax(np.vstack(np.array(sort_test_guesses)[:,2].tolist()), axis=-1),
np.argmax(np.vstack(np.array(sort_test_guesses)[:,5].tolist()), axis=-1))
test_23_atom_type = np.equal(np.argmax(np.vstack(np.array(sort_test_23_guesses)[:,2].tolist()), axis=-1),
np.argmax(np.vstack(np.array(sort_test_23_guesses)[:,5].tolist()), axis=-1))
test_29_atom_type = np.equal(np.argmax(np.vstack(np.array(sort_test_29_guesses)[:,2].tolist()), axis=-1),
np.argmax(np.vstack(np.array(sort_test_29_guesses)[:,5].tolist()), axis=-1))
onehot_to_number = lambda x: atom_order[x]
atoms_18 = list(map(lambda x: onehot_to_number(x),
list(map(lambda x: np.argmax(x), np.array(sort_guesses)[:,2].tolist()))))
atoms_19 = list(map(lambda x: onehot_to_number(x),
list(map(lambda x: np.argmax(x), np.array(sort_test_guesses)[:,2].tolist()))))
atoms_23 = list(map(lambda x: onehot_to_number(x),
list(map(lambda x: np.argmax(x), np.array(sort_test_23_guesses)[:,2].tolist()))))
atoms_29 = list(map(lambda x: onehot_to_number(x),
list(map(lambda x: np.argmax(x), np.array(sort_test_29_guesses)[:,2].tolist()))))
# Accuracy by atom
acc_dist = 0.5
for atom_int, atom_name in zip([1, 6, 7, 8, 9], ['H', 'C', 'N', 'O', 'F']):
print(atom_name)
len_18 = len(list(filter(lambda x: x == atom_int, atoms_18)))
len_19 = len(list(filter(lambda x: x == atom_int, atoms_19)))
len_23 = len(list(filter(lambda x: x == atom_int, atoms_23)))
len_29 = len(list(filter(lambda x: x == atom_int, atoms_29)))
if len_18 > 0:
print("5-18", "%.1f" % (len(list(
filter(lambda x: x[0] < acc_dist and x[1] and x[2] == atom_int,
zip(sort_test_18_dist, test_18_atom_type, atoms_18)))) /\
len_18 * 100),
len_18)
else:
print(None)
if len_19 > 0:
print("19", "%.1f" % (len(list(
filter(lambda x: x[0] < acc_dist and x[1] and x[2] == atom_int,
zip(sort_test_19_dist, test_19_atom_type, atoms_19)))) /\
len_19 * 100),
len_19)
else:
print(None)
if len_23 > 0:
print("23", "%.1f" % (len(list(
filter(lambda x: x[0] < acc_dist and x[1] and x[2] == atom_int,
zip(sort_test_23_dist, test_23_atom_type, atoms_23)))) /\
len_23 * 100),
len_23)
else:
print(None)
if len_29 > 0:
print("24-29", "%.1f" % (len(list(
filter(lambda x: x[0] < acc_dist and x[1] and x[2] == atom_int,
zip(sort_test_29_dist, test_29_atom_type, atoms_29)))) /\
len_29 * 100),
len_29)
else:
print(None)
# Accuracy over all predictions
print(len(list(
filter(lambda x: x[0] < 0.5 and x[1],
zip(sort_test_18_dist, test_18_atom_type)))) / len(list(sort_test_18_dist)))
print(len(list(
filter(lambda x: x[0] < 0.5 and x[1],
zip(sort_test_19_dist, test_19_atom_type)))) / len(list(sort_test_19_dist)))
print(len(list(
filter(lambda x: x[0] < 0.5 and x[1],
zip(sort_test_23_dist, test_23_atom_type)))) / len(list(sort_test_23_dist)))
print(len(list(
filter(lambda x: x[0] < 0.5 and x[1],
zip(sort_test_29_dist, test_29_atom_type)))) / len(list(sort_test_29_dist)))
# MAE by atom
acc_dist = 0.5
for atom_int, atom_name in zip([1, 6, 7, 8, 9], ['H', 'C', 'N', 'O', 'F']):
print(atom_name)
len_18 = len(list(filter(lambda x: x == atom_int, atoms_18)))
len_19 = len(list(filter(lambda x: x == atom_int, atoms_19)))
len_23 = len(list(filter(lambda x: x == atom_int, atoms_23)))
len_29 = len(list(filter(lambda x: x == atom_int, atoms_29)))
if len_18 > 0:
print("5-18", "%.2f" % np.mean(np.array(list(filter(lambda x: x[1] == atom_int,
list(zip(sort_test_18_dist,
atoms_18)))))[:,0]))
else:
print(None)
if len_19 > 0:
print("19", "%.2f" % np.mean(np.array(list(filter(lambda x: x[1] == atom_int,
list(zip(sort_test_19_dist,
atoms_19)))))[:,0]))
else:
print(None)
if len_23 > 0:
print("23", "%.2f" % np.mean(np.array(list(filter(lambda x: x[1] == atom_int,
list(zip(sort_test_23_dist,
atoms_23)))))[:,0]))
else:
print(None)
if len_29 > 0:
print("24-29", "%.2f" % np.mean(np.array(list(filter(lambda x: x[1] == atom_int,
list(zip(sort_test_29_dist,
atoms_29)))))[:,0]))
else:
print(None)
# MAE for distance
print(np.mean(sort_test_18_dist))
print(np.mean(sort_test_19_dist))
print(np.mean(sort_test_23_dist))
print(np.mean(sort_test_29_dist))
# True or False for correct atom type
test_18_atom_type_vector = np.linalg.norm(np.vstack(np.array(sort_guesses)[:,2].tolist()) -\
np.vstack(np.array(sort_guesses)[:,5].tolist()), axis=-1)
test_19_atom_type_vector = np.linalg.norm(np.vstack(np.array(sort_test_guesses)[:,2].tolist()) -\
np.vstack(np.array(sort_test_guesses)[:,5].tolist()), axis=-1)
test_23_atom_type_vector = np.linalg.norm(np.vstack(np.array(sort_test_23_guesses)[:,2].tolist()) -\
np.vstack(np.array(sort_test_23_guesses)[:,5].tolist()), axis=-1)
test_29_atom_type_vector = np.linalg.norm(np.vstack(np.array(sort_test_29_guesses)[:,2].tolist()) -\
np.vstack(np.array(sort_test_29_guesses)[:,5].tolist()), axis=-1)
# Accuracy of atom type (binary)
print(float(np.count_nonzero(test_18_atom_type)) / test_18_atom_type.shape[0])
print(float(np.count_nonzero(test_19_atom_type)) / test_19_atom_type.shape[0])
print(float(np.count_nonzero(test_23_atom_type)) / test_23_atom_type.shape[0])
print(float(np.count_nonzero(test_29_atom_type)) / test_29_atom_type.shape[0])
# MAE atom type
print(np.mean(test_18_atom_type_vector))
print(np.mean(test_19_atom_type_vector))
print(np.mean(test_23_atom_type_vector))
print(np.mean(test_29_atom_type_vector))
# Accuracy by distance
print(len(list(filter(lambda x: x < 0.5, sort_test_18_dist))) / len(list(sort_test_18_dist)))
print(len(list(filter(lambda x: x < 0.5, sort_test_19_dist))) / len(list(sort_test_19_dist)))
print(len(list(filter(lambda x: x < 0.5, sort_test_23_dist))) / len(list(sort_test_23_dist)))
print(len(list(filter(lambda x: x < 0.5, sort_test_29_dist))) / len(list(sort_test_29_dist)))
| missing_point_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={} colab_type="code" id="ddS2ZoUzyFKK"
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
# -
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0,1,2"
# !wget http://ufldl.stanford.edu/housenumbers/train_32x32.mat
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="9hjOmqPK9VFh" outputId="0fe03253-0045-485f-d511-498f833b6383"
# !wget http://ufldl.stanford.edu/housenumbers/extra_32x32.mat
# + colab={} colab_type="code" id="BDfVRAPW9mD1"
# ls
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="X-7jWS6Pzk6h" outputId="3cfa1a8c-9f2b-48cb-b47b-3cb02bc78246"
# !wget http://ufldl.stanford.edu/housenumbers/test_32x32.mat
# + colab={} colab_type="code" id="LnWpgWtR1sF0"
from PIL import Image
import numpy
def convert_img_square(im_pth='', dest_path='', desired_size=224):
# print(im_pth)
im = Image.open(im_pth)
old_size = im.size # (width, height) format
ratio = float(desired_size)/max(old_size)
new_size = tuple([int(x*ratio) for x in old_size])
# new_im = im.resize(new_size, Image.ANTIALIAS)
new_im = im.resize(new_size)
new_im = new_im.convert('RGB')
new_im.save(dest_path)
return True
# path = 'train/0/twinjet_s_001442.png'
# dest_path = 't1/test4.jpg'
# orig_arr = convert_img_square(path, dest_path, 499)
# #convert to RGB and Save
# # orig_arr = orig_arr.convert('RGB')
# # orig_arr.save('t1/test2.jpg')
# from IPython.display import Image
# Image(filename='t1/test4.jpg')
# + colab={} colab_type="code" id="1hmmIZ9jbjff"
# + colab={} colab_type="code" id="CrFKG9Sgbs8A"
# + colab={} colab_type="code" id="SvVh5O9kbnUt"
# + colab={} colab_type="code" id="igXkdJTE1v5-"
# ls -l
# + colab={} colab_type="code" id="MtF1nCmV1zw9"
import os
import numpy as np
import struct
import scipy.io as sio
import matplotlib.pyplot as plt
def save_svhn():
dir_name = "./svhn_train"
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
print("Loading matlab train of SVHN")
mat = sio.loadmat("train_32x32.mat")
data = mat['X']
folders = mat['y']
for i in range(data.shape[3]):
if not os.path.isfile(os.path.join(dir_name, str(folders[i][0]), "%05d.png" % i)):
# create folder if not existed
if not os.path.exists(os.path.join(dir_name, str(folders[i][0]))):
os.makedirs(os.path.join(dir_name, str(folders[i][0])))
plt.imsave(os.path.join(dir_name, str(folders[i][0]), "%05d.png" % i), data[..., i])
print("Program done!")
save_svhn()
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="wCI0358H14H8" outputId="b468ab4c-48c3-49d5-b900-0ad1626aec62"
import os
import numpy as np
import struct
import scipy.io as sio
import matplotlib.pyplot as plt
def save_svhn():
dir_name = "./svhn_extra"
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
print("Loading matlab data of SVHN")
mat = sio.loadmat("extra_32x32.mat")
data = mat['X']
folders = mat['y']
for i in range(data.shape[3]):
if not os.path.isfile(os.path.join(dir_name, str(folders[i][0]), "%05d.png" % i)):
# create folder if not existed
if not os.path.exists(os.path.join(dir_name, str(folders[i][0]))):
os.makedirs(os.path.join(dir_name, str(folders[i][0])))
plt.imsave(os.path.join(dir_name, str(folders[i][0]), "%05d.png" % i), data[..., i])
print("Program done!")
save_svhn()
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="gpN67dZ76EfW" outputId="6c8555fd-2658-48f9-ebdd-f8d0fc379921"
import os
import numpy as np
import struct
import scipy.io as sio
import matplotlib.pyplot as plt
def save_svhn_test():
dir_name = "./svhn_test"
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
print("Loading matlab data of SVHN")
mat = sio.loadmat("test_32x32.mat")
data = mat['X']
folders = mat['y']
for i in range(data.shape[3]):
if not os.path.isfile(os.path.join(dir_name, str(folders[i][0]), "%05d.png" % i)):
# create folder if not existed
if not os.path.exists(os.path.join(dir_name, str(folders[i][0]))):
os.makedirs(os.path.join(dir_name, str(folders[i][0])))
plt.imsave(os.path.join(dir_name, str(folders[i][0]), "%05d.png" % i), data[..., i])
print("done!")
save_svhn_test()
# -
# ls
# + colab={} colab_type="code" id="505yv8PKCFSF"
####=======================
import glob
import os
import shutil
import random
from PIL import Image
import numpy
#move class folder from classname_# to classname/#
def get_image_parts(image_path):
"""Given a full path to an image, return its parts."""
parts = image_path.split(os.path.sep)
#print(parts)
filename = parts[2]
filename_no_ext = filename.split('.')[0]
classname = parts[1]
train_or_test = parts[0]
return train_or_test, classname, filename_no_ext, filename
move_folders = ['svhn_train']
dest_folder = 'train_resized_299'
data_file = []
# look for all images in sub-folders
for folder in move_folders:
class_folders = glob.glob(os.path.join(folder, '*'))
print('folder %s' %class_folders)
# for sub_folder in class_folders:
# sub_class_folders = glob.glob(os.path.join(sub_folder, '*'))
# print('sub folder %s' %sub_class_folders)
for iid_class in class_folders:
print(iid_class)
class_files = glob.glob(os.path.join(iid_class, '*.png'))
# #Determize Set# (No Suffle)
set = len(class_files)
inner = range(0*set, 1*set) #all
print('moving %d files' %(len(inner)))
# random_list = random.sample(range(len(class_files)), int(len(class_files)/5)) #1/5 dataset
# for idx in range(len(random_list)):
for idx in range(len(inner)):
src = class_files[inner[idx]]
train_or_test, classname, filename_no_ext, filename = get_image_parts(src)
dst = os.path.join(dest_folder, classname, 'train_'+filename)
# image directory
img_directory = os.path.join(dest_folder, classname)
# create folder if not existed
if not os.path.exists(img_directory):
os.makedirs(img_directory)
# convert image
convert_img_square(src, dst, 299)
#moving file
# shutil.move(src, dst)
# shutil.copy(src, dst)
# + colab={} colab_type="code" id="hPB1Rz3a_SUw"
####=======================
import glob
import os
import shutil
import random
from PIL import Image
import numpy
#move class folder from classname_# to classname/#
def get_image_parts(image_path):
"""Given a full path to an image, return its parts."""
parts = image_path.split(os.path.sep)
#print(parts)
filename = parts[2]
filename_no_ext = filename.split('.')[0]
classname = parts[1]
train_or_test = parts[0]
return train_or_test, classname, filename_no_ext, filename
move_folders = ['svhn_extra']
dest_folder = 'train_resized_299'
data_file = []
# look for all images in sub-folders
for folder in move_folders:
class_folders = glob.glob(os.path.join(folder, '*'))
print('folder %s' %class_folders)
# for sub_folder in class_folders:
# sub_class_folders = glob.glob(os.path.join(sub_folder, '*'))
# print('sub folder %s' %sub_class_folders)
for iid_class in class_folders:
print(iid_class)
class_files = glob.glob(os.path.join(iid_class, '*.png'))
# #Determize Set# (No Suffle)
set = len(class_files)
inner = range(0*set, 1*set) #all
print('moving %d files' %(len(inner)))
# random_list = random.sample(range(len(class_files)), int(len(class_files)/5)) #1/5 dataset
# for idx in range(len(random_list)):
for idx in range(len(inner)):
src = class_files[inner[idx]]
train_or_test, classname, filename_no_ext, filename = get_image_parts(src)
dst = os.path.join(dest_folder, classname, 'extra_'+filename)
# image directory
img_directory = os.path.join(dest_folder, classname)
# create folder if not existed
if not os.path.exists(img_directory):
os.makedirs(img_directory)
# convert image
convert_img_square(src, dst, 299)
#moving file
# shutil.move(src, dst)
# shutil.copy(src, dst)
# + colab={} colab_type="code" id="pWIq2AAg-Rjh"
####=======================
import glob
import os
import shutil
import random
from PIL import Image
import numpy
#move class folder from classname_# to classname/#
def get_image_parts(image_path):
"""Given a full path to an image, return its parts."""
parts = image_path.split(os.path.sep)
#print(parts)
filename = parts[2]
filename_no_ext = filename.split('.')[0]
classname = parts[1]
train_or_test = parts[0]
return train_or_test, classname, filename_no_ext, filename
move_folders = ['svhn_test']
dest_folder = 'test_resized_299'
data_file = []
# look for all images in sub-folders
for folder in move_folders:
class_folders = glob.glob(os.path.join(folder, '*'))
print('folder %s' %class_folders)
# for sub_folder in class_folders:
# sub_class_folders = glob.glob(os.path.join(sub_folder, '*'))
# print('sub folder %s' %sub_class_folders)
for iid_class in class_folders:
print(iid_class)
class_files = glob.glob(os.path.join(iid_class, '*.png'))
# #Determize Set# (No Suffle)
set = len(class_files)
inner = range(0*set, 1*set) #all
print('moving %d files' %(len(inner)))
# random_list = random.sample(range(len(class_files)), int(len(class_files)/5)) #1/5 dataset
# for idx in range(len(random_list)):
for idx in range(len(inner)):
src = class_files[inner[idx]]
train_or_test, classname, filename_no_ext, filename = get_image_parts(src)
dst = os.path.join(dest_folder, classname, filename)
# image directory
img_directory = os.path.join(dest_folder, classname)
# create folder if not existed
if not os.path.exists(img_directory):
os.makedirs(img_directory)
# convert image
convert_img_square(src, dst, 299)
#moving file
# shutil.move(src, dst)
# shutil.copy(src, dst)
# + colab={} colab_type="code" id="u2MwT1Dh-WUG"
# + colab={} colab_type="code" id="gv37mrDqCL8h"
from google.colab import drive
drive.mount('/content/gdrive', force_remount=True)
# + colab={} colab_type="code" id="Sg09eH64C-kO"
# + colab={} colab_type="code" id="pIlCeOsECoDf"
# !zip -r svhn_train_resized_229.zip svhn_train_resized_229
# !zip -r svhn_test_resized_229.zip svhn_test_resized_229
# + colab={} colab_type="code" id="n_0aMFCoCrnD"
# # %cp svhn_train_resized_229.zip gdrive/My\ Drive/svhn_train_resized_229.zip
# # %cp svhn_test_resized_229.zip gdrive/My\ Drive/svhn_test_resized_229.zip
# + colab={} colab_type="code" id="zVeqXfhNMzQx"
# %cp gdrive/My\ Drive/svhn_train.zip svhn_train_32.zip
# %cp gdrive/My\ Drive/svhn_test.zip svhn_test_32.zip
# + colab={} colab_type="code" id="_g2b9RNYM7XO"
# !unzip -q svhn_train_32.zip
# !unzip -q svhn_test_32.zip
# + colab={"base_uri": "https://localhost:8080/", "height": 513} colab_type="code" id="SU2cgVh2jjks" outputId="da972a51-a4ea-4cd6-e463-7949784eb206"
# !pip3 install -U git+https://github.com/qubvel/efficientnet
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="ZIRj8Ae2jne1" outputId="779932d7-7398-4d4f-effa-5ef345499243"
#MUL 1 - Inception - ST
# from keras.applications import InceptionV3
# from keras.applications import Xception
# from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras.models import Model
from keras.layers import concatenate
from keras.layers import Dense, GlobalAveragePooling2D, Input, Embedding, SimpleRNN, LSTM, Flatten, GRU, Reshape
# from keras.applications.inception_v3 import preprocess_input
from efficientnet.keras import preprocess_input
# from keras.applications.xception import preprocess_input
from keras.layers import GaussianNoise
import efficientnet.keras as efn
f1_base = efn.EfficientNetB2(include_top=False, weights='imagenet',
input_shape=(299, 299, 3),
pooling='avg')
# f1_base = Xception(weights='imagenet', include_top=False, input_shape=(299,299,3))
# f1_base = EfficientNetB4((224,224,3), classes=1000, include_top=False, weights='imagenet')
f1_x = f1_base.output
# f1_x = f1_base.layers[-151].output #layer 5
# f1_x = GlobalAveragePooling2D()(f1_x)
# f1_x = Flatten()(f1_x)
# f1_x = Reshape([1,1280])(f1_x)
# f1_x = SimpleRNN(2048,
# return_sequences=False,
# # dropout=0.8
# input_shape=[1,1280])(f1_x)
#Regularization with noise
f1_x = GaussianNoise(0.1)(f1_x)
f1_x = Dense(1024, activation='relu')(f1_x)
f1_x = Dense(10, activation='softmax')(f1_x)
model_1 = Model(inputs=[f1_base.input],outputs=[f1_x])
model_1.summary()
# + colab={} colab_type="code" id="uYlOSabnEy1p"
## fix for multi_gpu_model prediction time longer
from keras.layers import Lambda, concatenate
from keras import Model
import tensorflow as tf
def multi_gpu_model(model, gpus):
if isinstance(gpus, (list, tuple)):
num_gpus = len(gpus)
target_gpu_ids = gpus
else:
num_gpus = gpus
target_gpu_ids = range(num_gpus)
def get_slice(data, i, parts):
shape = tf.shape(data)
batch_size = shape[:1]
input_shape = shape[1:]
step = batch_size // parts
if i == num_gpus - 1:
size = batch_size - step * i
else:
size = step
size = tf.concat([size, input_shape], axis=0)
stride = tf.concat([step, input_shape * 0], axis=0)
start = stride * i
return tf.slice(data, start, size)
all_outputs = []
for i in range(len(model.outputs)):
all_outputs.append([])
# Place a copy of the model on each GPU,
# each getting a slice of the inputs.
for i, gpu_id in enumerate(target_gpu_ids):
with tf.device('/gpu:%d' % gpu_id):
with tf.name_scope('replica_%d' % gpu_id):
inputs = []
# Retrieve a slice of the input.
for x in model.inputs:
input_shape = tuple(x.get_shape().as_list())[1:]
slice_i = Lambda(get_slice,
output_shape=input_shape,
arguments={'i': i,
'parts': num_gpus})(x)
inputs.append(slice_i)
# Apply model on slice
# (creating a model replica on the target device).
outputs = model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
# Save the outputs for merging back together later.
for o in range(len(outputs)):
all_outputs[o].append(outputs[o])
# Merge outputs on CPU.
with tf.device('/cpu:0'):
merged = []
for name, outputs in zip(model.output_names, all_outputs):
merged.append(concatenate(outputs,
axis=0, name=name))
return Model(model.inputs, merged)
# + colab={} colab_type="code" id="zxB7_So2E3Eu"
from keras.callbacks import Callback
import pickle
import sys
#Stop training on val_acc
class EarlyStoppingByAccVal(Callback):
def __init__(self, monitor='val_acc', value=0.00001, verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.value = value
self.verbose = verbose
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning)
if current >= self.value:
if self.verbose > 0:
print("Epoch %05d: early stopping" % epoch)
self.model.stop_training = True
#Save large model using pickle formate instead of h5
class SaveCheckPoint(Callback):
def __init__(self, model, dest_folder):
super(Callback, self).__init__()
self.model = model
self.dest_folder = dest_folder
#initiate
self.best_val_acc = 0
self.best_val_loss = sys.maxsize #get max value
def on_epoch_end(self, epoch, logs={}):
val_acc = logs['val_acc']
val_loss = logs['val_loss']
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
# Save weights in pickle format instead of h5
print('\nSaving val_acc %f at %s' %(self.best_val_acc, self.dest_folder))
weigh= self.model.get_weights()
#now, use pickle to save your model weights, instead of .h5
#for heavy model architectures, .h5 file is unsupported.
fpkl= open(self.dest_folder, 'wb') #Python 3
pickle.dump(weigh, fpkl, protocol= pickle.HIGHEST_PROTOCOL)
fpkl.close()
# model.save('tmp.h5')
elif val_acc == self.best_val_acc:
if val_loss < self.best_val_loss:
self.best_val_loss=val_loss
# Save weights in pickle format instead of h5
print('\nSaving val_acc %f at %s' %(self.best_val_acc, self.dest_folder))
weigh= self.model.get_weights()
#now, use pickle to save your model weights, instead of .h5
#for heavy model architectures, .h5 file is unsupported.
fpkl= open(self.dest_folder, 'wb') #Python 3
pickle.dump(weigh, fpkl, protocol= pickle.HIGHEST_PROTOCOL)
fpkl.close()
# + colab={} colab_type="code" id="nkmOYEZU0Db3"
import numpy as np
import keras.backend as K
from PIL import Image
def get_cutout_v2(p=0.5, n_holes=2, length=20):
def cutout(np_img):
# print(type(image))
# h = img.size(1)
# w = img.size(2)
img = Image.fromarray(((np_img)).astype(np.uint8))
w, h = img.size
mask = np.ones((h, w), np.float32)
length = np.random.randint(low=w//16, high=w//4) #w=h
for n in range(n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - length // 2, 0, h)
y2 = np.clip(y + length // 2, 0, h)
x1 = np.clip(x - length // 2, 0, w)
x2 = np.clip(x + length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
# mask = torch.from_numpy(mask)
# ary = np.random.normal(size=(2, 2))
# mask = K.cast(mask, dtype='float32')
# mask = mask.expand_as(img)
mask = np.expand_dims(mask, axis=2)
re_img = img * mask
return preprocess_input(re_img)
# return re_img
return cutout
# + colab={} colab_type="code" id="KHW7L6CEaO5s"
# ls -l
# + colab={} colab_type="code" id="IO8JJRbeFEgK"
# mkdir checkpoints
# -
# %mkdir svhn_output
# %mkdir svhn_output/logs
# + colab={"base_uri": "https://localhost:8080/", "height": 445} colab_type="code" id="fQREEQLxE5qu" outputId="751beac8-6384-4318-f387-88fb1690e633"
#Non-Groups
#Split training and validation
#Using Expert Data
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, CSVLogger, ReduceLROnPlateau
from keras.optimizers import Adam
# from keras.utils import multi_gpu_model
import time, os
from math import ceil
import multiprocessing
train_datagen = ImageDataGenerator(
# rescale = 1./255,
rotation_range=30,
width_shift_range=0.3,
height_shift_range=0.3,
shear_range=0.3,
zoom_range=0.3,
# horizontal_flip=True,
# vertical_flip=True,##
# brightness_range=[0.5, 1.5],##
channel_shift_range=10,##
fill_mode='nearest',
# preprocessing_function=get_cutout_v2(),
preprocessing_function=preprocess_input,
)
test_datagen = ImageDataGenerator(
# rescale = 1./255
preprocessing_function=preprocess_input
)
NUM_GPU = 3
batch_size = 128
train_set = train_datagen.flow_from_directory('train_resized_299',
target_size = (299, 299),
batch_size = batch_size,
class_mode = 'categorical',
shuffle=True,
seed=7,
# subset="training"
)
valid_set = test_datagen.flow_from_directory('test_resized_299',
target_size = (299, 299),
batch_size = batch_size,
class_mode = 'categorical',
shuffle=False,
seed=7,
# subset="validation"
)
model_txt = 'st'
# Helper: Save the model.
savedfilename = os.path.join('checkpoints', 'SVHN_EffB2_299_v2.hdf5')
checkpointer = ModelCheckpoint(savedfilename,
monitor='val_acc', verbose=1,
save_best_only=True, mode='max',save_weights_only=True)########
# Helper: TensorBoard
tb = TensorBoard(log_dir=os.path.join('svhn_output', 'logs', model_txt))
# Helper: Save results.
timestamp = time.time()
csv_logger = CSVLogger(os.path.join('svhn_output', 'logs', model_txt + '-' + 'training-' + \
str(timestamp) + '.log'))
earlystopping = EarlyStoppingByAccVal(monitor='val_acc', value=0.9900, verbose=1)
#Using multiple models if more than 1 GPU
if NUM_GPU != 1:
model_mul = multi_gpu_model(model_1, gpus=NUM_GPU)
else:
model_mul = model_1
epochs = 20##!!!
lr = 1e-3
decay = lr/epochs
optimizer = Adam(lr=lr, decay=decay)
model_mul.compile(optimizer=optimizer,loss='categorical_crossentropy',metrics=['accuracy'])
step_size_train=ceil(train_set.n/train_set.batch_size)
step_size_valid=ceil(valid_set.n/valid_set.batch_size)
# step_size_test=ceil(testing_set.n//testing_set.batch_size)
result = model_mul.fit_generator(
generator = train_set,
steps_per_epoch = step_size_train,
validation_data = valid_set,
validation_steps = step_size_valid,
shuffle=True,
epochs=epochs,
# callbacks=[earlystopping],
callbacks=[csv_logger, checkpointer, earlystopping],
# callbacks=[tb, csv_logger, checkpointer, earlystopping],
verbose=1)
# +
#Non-Groups
#Split training and validation
#Using Expert Data
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, CSVLogger, ReduceLROnPlateau
from keras.optimizers import Adam
# from keras.utils import multi_gpu_model
import time, os
from math import ceil
import multiprocessing
train_datagen = ImageDataGenerator(
# rescale = 1./255,
rotation_range=30,
width_shift_range=0.3,
height_shift_range=0.3,
shear_range=0.3,
zoom_range=0.3,
# horizontal_flip=True,
# vertical_flip=True,##
# brightness_range=[0.5, 1.5],##
channel_shift_range=10,##
fill_mode='nearest',
# preprocessing_function=get_cutout_v2(),
preprocessing_function=preprocess_input,
)
test_datagen = ImageDataGenerator(
# rescale = 1./255
preprocessing_function=preprocess_input
)
NUM_GPU = 3
batch_size = 128
train_set = train_datagen.flow_from_directory('train_resized_299',
target_size = (299, 299),
batch_size = batch_size,
class_mode = 'categorical',
shuffle=True,
seed=7,
# subset="training"
)
valid_set = test_datagen.flow_from_directory('test_resized_299',
target_size = (299, 299),
batch_size = batch_size,
class_mode = 'categorical',
shuffle=False,
seed=7,
# subset="validation"
)
model_txt = 'st'
# Helper: Save the model.
savedfilename = os.path.join('checkpoints', 'SVHN_EffB2_299_v2_tmp.hdf5')
checkpointer = ModelCheckpoint(savedfilename,
monitor='val_acc', verbose=1,
save_best_only=True, mode='max',save_weights_only=True)########
# Helper: TensorBoard
tb = TensorBoard(log_dir=os.path.join('svhn_output', 'logs', model_txt))
# Helper: Save results.
timestamp = time.time()
csv_logger = CSVLogger(os.path.join('svhn_output', 'logs', model_txt + '-' + 'training-' + \
str(timestamp) + '.log'))
earlystopping = EarlyStoppingByAccVal(monitor='val_acc', value=0.9900, verbose=1)
#Using multiple models if more than 1 GPU
if NUM_GPU != 1:
model_mul = multi_gpu_model(model_1, gpus=NUM_GPU)
else:
model_mul = model_1
epochs = 20##!!!
lr = 1e-3
decay = lr/epochs
optimizer = Adam(lr=lr, decay=decay)
model_mul.compile(optimizer=optimizer,loss='categorical_crossentropy',metrics=['accuracy'])
step_size_train=ceil(train_set.n/train_set.batch_size)
step_size_valid=ceil(valid_set.n/valid_set.batch_size)
# step_size_test=ceil(testing_set.n//testing_set.batch_size)
result = model_mul.fit_generator(
generator = train_set,
steps_per_epoch = step_size_train,
validation_data = valid_set,
validation_steps = step_size_valid,
shuffle=True,
epochs=epochs,
# callbacks=[earlystopping],
callbacks=[csv_logger, checkpointer, earlystopping],
# callbacks=[tb, csv_logger, checkpointer, earlystopping],
verbose=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="1JwAn1jKFZV6" outputId="e047b4e6-fdc2-491f-aadd-f5aa72fa6f7a"
model_mul.load_weights(os.path.join('checkpoints', 'SVHN_EffB2_299_v2.hdf5'))
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="qyBzNzsdahbe" outputId="4676c11c-6355-47c8-c044-4817adfa66af"
#Non-Groups
#Split training and validation
#Using Expert Data
savedfilename = os.path.join('checkpoints', 'SVHN_EffB2_299_v2_L2.hdf5')
checkpointer = ModelCheckpoint(savedfilename,
monitor='val_acc', verbose=1,
save_best_only=True, mode='max',save_weights_only=True)########
epochs = 15##!!!
lr = 1e-4
decay = lr/epochs
optimizer = Adam(lr=lr, decay=decay)
model_mul.compile(optimizer=optimizer,loss='categorical_crossentropy',metrics=['accuracy'])
result = model_mul.fit_generator(
generator = train_set,
steps_per_epoch = step_size_train,
validation_data = valid_set,
validation_steps = step_size_valid,
shuffle=True,
epochs=epochs,
# callbacks=[earlystopping, checkpointer],
callbacks=[csv_logger, checkpointer, earlystopping],
# callbacks=[tb, csv_logger, checkpointer, earlystopping],
verbose=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="BDQffVPvamBk" outputId="80fd2662-2141-46e5-8dbb-abc1d6bcaaa5"
model_mul.load_weights(os.path.join('checkpoints', 'SVHN_EffB2_299_v2_L2.hdf5'))
# +
#Non-Groups
#Split training and validation
#Using Expert Data
savedfilename = os.path.join('checkpoints', 'SVHN_EffB0_299_v2_L3.hdf5')
checkpointer = ModelCheckpoint(savedfilename,
monitor='val_acc', verbose=1,
save_best_only=True, mode='max',save_weights_only=True)########
epochs = 15##!!!
lr = 1e-5
decay = lr/epochs
optimizer = Adam(lr=lr, decay=decay)
model_mul.compile(optimizer=optimizer,loss='categorical_crossentropy',metrics=['accuracy'])
result = model_mul.fit_generator(
generator = train_set,
steps_per_epoch = step_size_train,
validation_data = valid_set,
validation_steps = step_size_valid,
shuffle=True,
epochs=epochs,
# callbacks=[earlystopping, checkpointer],
callbacks=[csv_logger, checkpointer, earlystopping],
# callbacks=[tb, csv_logger, checkpointer, earlystopping],
verbose=1)
# -
model_mul.load_weights(os.path.join('checkpoints', 'SVHN_EffB2_299_v2_L2.hdf5'))
# +
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
import time, os
from math import ceil
# PREDICT ON OFFICIAL TEST
train_datagen = ImageDataGenerator(
# rescale = 1./255,
rotation_range=30,
width_shift_range=0.3,
height_shift_range=0.3,
shear_range=0.3,
zoom_range=0.3,
# horizontal_flip=True,
# vertical_flip=True,##
# brightness_range=[0.5, 1.5],##
channel_shift_range=10,##
fill_mode='nearest',
preprocessing_function=preprocess_input,
)
test_datagen1 = ImageDataGenerator(
# rescale = 1./255,
preprocessing_function=preprocess_input
)
batch_size = 36
train_set = train_datagen.flow_from_directory('train_resized_299',
target_size = (299, 299),
batch_size = batch_size,
class_mode = 'categorical',
shuffle=True,
seed=7,
# subset="training"
)
test_set1 = test_datagen1.flow_from_directory('test_resized_299',
target_size = (299, 299),
batch_size = batch_size,
class_mode = 'categorical',
shuffle=False,
seed=7,
# subset="validation"
)
# if NUM_GPU != 1:
predict1=model_mul.predict_generator(test_set1, steps = ceil(test_set1.n/test_set1.batch_size),verbose=1)
# else:
# predict1=model.predict_generator(test_set1, steps = ceil(test_set1.n/test_set1.batch_size),verbose=1)
predicted_class_indices=np.argmax(predict1,axis=1)
labels = (train_set.class_indices)
labels = dict((v,k) for k,v in labels.items())
predictions1 = [labels[k] for k in predicted_class_indices]
import pandas as pd
filenames=test_set1.filenames
results=pd.DataFrame({"file_name":filenames,
"predicted1":predictions1,
})
results.to_csv('SVHN_Eff_B2_299_v2_L2_2609.csv')
results.head()
# -
np.save(os.path.join('pred_npy','SVHN_Eff_B2_299_v2_L2_2609.npy'), predict1)
| SVHN/v2/sourcecode/SVHN_ExtraTrain_B2-299_v2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Lithology
# language: python
# name: ela
# ---
# ## Canberra lithologies case study
#
# Motivated by learning that the ACT is interested in managed aquifer recharge for watering some green spaces.
#
# This notebook does not look at AEM data although sitting under a repository suggesting so.
#
# ## Downloading the data
#
# Not throughly documented.
#
# Data was downloaded from the usual places, NGIS and Elvis. NGIS when using the Murrumbidgee catchment was actually not including the bores in the ACT, so needed to download the ACT ones also, and this present notebook will do the merging of the lithology logs. Spatial locations were merged manually, and subsetted, in QGIS
#
# Some of the data output by this present notebook fed into a [lithology log viewer](https://github.com/csiro-hydrogeology/lithology-viewer) that can be run as a dashboard on Binder.
#
# + init_cell=true
import os
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import rasterio
from rasterio.plot import show
import geopandas as gpd
# -
# Only set to True for co-dev of ela from this use case:
ela_from_source = False
ela_from_source = True
# +
if ela_from_source:
if ('ELA_SRC' in os.environ):
root_src_dir = os.environ['ELA_SRC']
elif sys.platform == 'win32':
root_src_dir = r'C:\src\github_jm\pyela'
else:
username = os.environ['USER']
root_src_dir = os.path.join('/home', username, 'src/ela/pyela')
pkg_src_dir = root_src_dir
sys.path.insert(0, pkg_src_dir)
from ela.textproc import *
from ela.utils import *
from ela.classification import *
from ela.visual import *
from ela.spatial import SliceOperation
# -
# ## Importing data
#
# There are two main sets of information we need: the borehole lithology logs, and the spatial information in the surface elevation (DEM) and geolocation of a subset of bores around Bungendore.
data_path = None
# You probably want to explicitly set `data_path` to the location where you put the folder(s) e.g:
# +
#data_path = '/home/myusername/data' # On Linux, if you now have the folder /home/myusername/data/Bungendore
#data_path = r'C:\data\Lithology' # windows, if you have C:\data\Lithology\Bungendore
# -
# Otherwise a fallback for the pyela developer(s)
if data_path is None:
if ('ELA_DATA' in os.environ):
data_path = os.environ['ELA_DATA']
elif sys.platform == 'win32':
data_path = r'C:\data\Lithology'
else:
username = os.environ['USER']
data_path = os.path.join('/home', username, 'data')
data_path
cbr_datadir = os.path.join(data_path, 'Brisbane')
cbr_datadir_out = os.path.join(cbr_datadir, 'out')
ngis_datadir = os.path.join(data_path, 'NGIS')
bidgee_shp_datadir = os.path.join(ngis_datadir, 'shp_brisbane_river')
write_outputs = True
# ## DEM
#
dem = rasterio.open(os.path.join(cbr_datadir,'CLIP.tif'))
fig, ax = plt.subplots(figsize=(12, 12))
show(dem,title='Canberra', cmap='terrain', ax=ax)
# ## Bore data
bore_locations_raw = gpd.read_file(os.path.join(cbr_datadir, 'Bores/act_bores.shp'))
bore_locations_raw.columns
bore_locations_raw.crs, dem.crs
# The DEM raster and the bore location shapefile do not use the same projection (coordinate reference system) so we reproject one of them. We choose the raster's UTM.
bore_locations = bore_locations_raw.to_crs(dem.crs)
# For this location we actually had to download two data sets from the NGIS: the data for the murrumbidgee catchment does not include much of the ones also inside the ACT.
lithology_logs_act = pd.read_csv(os.path.join(act_shp_datadir, 'NGIS_LithologyLog.csv'))
lithology_logs_bidgee = pd.read_csv(os.path.join(bidgee_shp_datadir, 'NGIS_LithologyLog.csv'))
len(lithology_logs_act), len(lithology_logs_bidgee)
lithology_logs = pd.concat([lithology_logs_act, lithology_logs_bidgee])
fig, ax = plt.subplots(figsize=(12, 12))
show(dem,title='Canberra', cmap='terrain', ax=ax)
bore_locations.plot(ax=ax, facecolor='black')
# Let's create a copy of the logs merged, so that we can fall back on to the original one if we mess things up
df = lithology_logs.copy()
df.columns
# +
# These are probably the defaults from the ela package imports, but to be explicit:
DEPTH_FROM_COL = 'FromDepth'
DEPTH_TO_COL = 'ToDepth'
TOP_ELEV_COL = 'TopElev'
BOTTOM_ELEV_COL = 'BottomElev'
LITHO_DESC_COL = 'Description'
HYDRO_CODE_COL = 'HydroCode'
HYDRO_ID_COL = 'HydroID'
BORE_ID_COL = 'BoreID'
# -
# We suspect that there are locations registered for which there is actually no lithology logs recorded. We want to keep boreholes that have at least one row in the lithology logs.
#
# TODO: this should be a feature in the package.
df_ids = set(df[BORE_ID_COL].values)
geolog_ids = set(bore_locations[HYDRO_ID_COL].values)
len(df_ids), len(geolog_ids)
keep = df_ids.intersection(geolog_ids)
s = bore_locations[HYDRO_ID_COL]
bore_locations = bore_locations[s.isin(keep)]
# Visually we do have indeed a few less bores:
fig, ax = plt.subplots(figsize=(12, 12))
show(dem,title='Canberra', cmap='terrain', ax=ax)
bore_locations.plot(ax=ax, facecolor='black')
# ### Subset further to a location of interest
#
# Here, we devised how we could reduce the area further for the purpose of a case study as small as possible for submission to a gallery (pyvista). However we ended up with not enough classified bores and missing data everywhere. Selecting data sets size with enough data is needed. Tricky.
#
#
# +
# max/min bounds
shp_bbox = get_bbox(bore_locations)
shp_bbox
# -
raster_bbox = dem.bounds
raster_bbox
x_min,x_max,y_min,y_max = intersecting_bounds([shp_bbox, raster_bbox])
trial = cookie_cut_gpd(bore_locations, x_min, x_max, y_min, y_max)
fig, ax = plt.subplots(figsize=(12, 12))
show(dem,title='Canberra', cmap='terrain', ax=ax)
trial.plot(ax=ax, facecolor='black')
# +
# Tried to use only a further subset but there is not enough data to do the interpolation (too many "none" descriptions)
# Parking this for now.
# bore_locations = trial
# shp_bbox = get_bbox(trial)
# x_min = shp_bbox[0]
# x_max = shp_bbox[2]
# y_min = shp_bbox[1]
# y_max = shp_bbox[3]
# -
# ### Merging the geolocation from the shapefile and lithology records
# The geopandas data frame has a column geometry listing `POINT` objects. 'ela' includes `get_coords_from_gpd_shape` to extrace the coordinates to a simpler structure. 'ela' has predefined column names (e.g. EASTING_COL) defined for easting/northing information, that we can use to name our coordinate information.
bore_locations.columns
# +
def get_geoloc_df(bore_locations, additional_columns):
geoloc = get_coords_from_gpd_shape(bore_locations, colname='geometry', out_colnames=[EASTING_COL, NORTHING_COL])
for cn in additional_columns:
geoloc[cn] = bore_locations[cn].values #important to remove indexing otherwise conterintuitive behavior (NaN)
return geoloc
geoloc = get_geoloc_df(bore_locations, ['Latitude', 'Longitude', HYDRO_ID_COL])
# -
geoloc.info()
# to be reused in experimental notebooks:
geoloc_filename = os.path.join(cbr_datadir_out,'geoloc.pkl')
if not os.path.exists(geoloc_filename):
geoloc.to_pickle(geoloc_filename)
# +
# geoloc.to_pickle(geoloc_filename)
# geoloc.to_csv(os.path.join(cbr_datadir_out,'geoloc.csv'))
# -
geoloc[HYDRO_ID_COL].dtype, df[BORE_ID_COL].dtype
df = pd.merge(df, geoloc, how='inner', left_on=BORE_ID_COL, right_on=HYDRO_ID_COL, sort=False, copy=True, indicator=False, validate=None)
len(df)
df.head()
# ### Round up 'depth to' and 'depth from' columns
#
# We round the depth related columns to the upper integer value and drop the entries where the resulting depths have degenerated to 0. `ela` has a class `DepthsRounding` to facilitate this operations on lithology records with varying column names.
#
# We first clean up height/depths columns to make sure they are numeric.
# TODO: function in the package
def as_numeric(x):
if isinstance(x, float):
return x
if x == 'None':
return np.nan
elif x is None:
return np.nan
elif isinstance(x, str):
return float(x)
else:
return float(x)
df[DEPTH_FROM_COL] = df[DEPTH_FROM_COL].apply(as_numeric)
df[DEPTH_TO_COL] = df[DEPTH_TO_COL].apply(as_numeric)
df[TOP_ELEV_COL] = df[TOP_ELEV_COL].apply(as_numeric)
df[BOTTOM_ELEV_COL] = df[BOTTOM_ELEV_COL].apply(as_numeric)
dr = DepthsRounding(DEPTH_FROM_COL, DEPTH_TO_COL)
"Before rounding heights we have " + str(len(df)) + " records"
df = dr.round_to_metre_depths(df, np.round, True)
"After removing thin sliced entries of less than a metre, we are left with " + str(len(df)) + " records left"
# ## Exploring the descriptive lithology
descs = df[LITHO_DESC_COL]
descs = descs.reset_index()
descs = descs[LITHO_DESC_COL]
descs.head()
# The description column as read seems to be objects. Other columns seem to be objects when they should be numeric. We define two functions to clean these.
def clean_desc(x):
if isinstance(x, float):
return u''
elif x is None:
return u''
else:
# python2 return unicode(x)
return x
y = [clean_desc(x) for x in descs]
from striplog import Lexicon
lex = Lexicon.default()
y = clean_lithology_descriptions(y, lex)
# We get a flat list of all the "tokens" but remove stop words ('s', 'the' and the like)
y = v_lower(y)
vt = v_word_tokenize(y)
flat = np.concatenate(vt)
import nltk
from nltk.corpus import stopwords
stoplist = stopwords.words('english')
exclude = stoplist + ['.',',',';',':','(',')','-']
flat = [word for word in flat if word not in exclude]
len(set(flat))
df_most_common= token_freq(flat, 50)
plot_freq(df_most_common)
df_most_common
# ## Defining lithology classes and finding primary/secondary lithologies
#
# From the list of most common tokens, we may want to define lithology classes as follows:
df[LITHO_DESC_COL] = y
lithologies = [ 'shale', 'clay','granite','soil','sand', 'porphyry','siltstone', 'dacite', 'gravel', 'limestone']
# Prep for visualisation
lithology_color_names = ['lightslategrey', 'olive', 'dimgray', 'chocolate', 'gold', 'tomato', 'teal', 'darkgrey', 'lavender', 'yellow']
# more classes for display of raw logs
lithologies = ['shale', 'clay','granite','soil','sand', 'porphyry','siltstone', 'dacite', 'rhyodacite', 'gravel', 'limestone', 'sandstone', 'slate', 'mudstone', 'rock', 'ignimbrite', 'tuff']
# Prep for visualisation
lithology_color_names = [
'lightslategrey', # Shale
'olive', # clay
'dimgray', # granite
'chocolate', # soil
'gold', # sand
'tomato', # porphyry
'teal', # siltstone
'darkgrey', # dacite
'whitesmoke', # rhyodacite
'powderblue', # gravel
'yellow', #limestone
'papayawhip', #sandstone
'dimgray', #slate
'darkred', #mudstone
'grey', #rock
'khaki', #ignimbrite
'lemonchiffon' #tuff
]
# And to capture any of these we devise a regular expression:
my_lithologies_numclasses = create_numeric_classes(lithologies)
# +
lithologies_dict = dict([(x,x) for x in lithologies])
# Plurals do occur
lithologies_dict['clays'] = 'clay'
lithologies_dict['sands'] = 'sand'
lithologies_dict['shales'] = 'shale'
# lithologies_dict['dacite'] = 'granite'
# lithologies_dict['sandstone'] = 'granite'
# lithologies_dict['slate'] = 'granite'
# lithologies_dict['rock'] = 'granite'
# lithologies_dict['ryodacite'] = 'granite'
# lithologies_dict['mudstone'] = 'sand' # ??
lithologies_dict['topsoil'] = 'soil' # ??
# -
any_litho_markers_re = r'shale|clay|granit|soil|sand|porphy|silt|gravel|dacit|slat|rock|stone|slate|brite|tuff'
regex = re.compile(any_litho_markers_re)
lithologies_adjective_dict = {
'sandy' : 'sand',
'clayey' : 'clay',
'clayish' : 'clay',
'shaley' : 'shale',
'silty' : 'silt',
'pebbly' : 'pebble',
'gravelly' : 'gravel',
'porphyritic': 'porphyry'
}
v_tokens = v_word_tokenize(y)
litho_terms_detected = v_find_litho_markers(v_tokens, regex=regex)
# Let's see if we detect these lithology markers in each bore log entries
zero_mark = [x for x in litho_terms_detected if len(x) == 0 ]
at_least_one_mark = [x for x in litho_terms_detected if len(x) >= 1]
at_least_two_mark = [x for x in litho_terms_detected if len(x) >= 2]
print('There are %s entries with no marker, %s entries with at least one, %s with at least two'%(len(zero_mark),len(at_least_one_mark),len(at_least_two_mark)))
# Note: probably need to think of precanned facilities in ela to assess the detection rate in such EDA. Maybe wordcloud not such a bad idea too.
descs_zero_mark = [y[i] for i in range(len(litho_terms_detected)) if len(litho_terms_detected[i]) == 0 ]
import random
random.sample(descs_zero_mark,20)
# descs_zero_mark[1:20]
flat = flat_list_tokens(descs_zero_mark)
s = ' '.join(flat)
show_wordcloud(s, title = 'Unclassified via regexp')
primary_litho = v_find_primary_lithology(litho_terms_detected, lithologies_dict)
secondary_litho = v_find_secondary_lithology(litho_terms_detected, primary_litho, lithologies_adjective_dict, lithologies_dict)
df[PRIMARY_LITHO_COL]=primary_litho
df[SECONDARY_LITHO_COL]=secondary_litho
df[PRIMARY_LITHO_NUM_COL] = v_to_litho_class_num(primary_litho, my_lithologies_numclasses)
df[SECONDARY_LITHO_NUM_COL] = v_to_litho_class_num(secondary_litho, my_lithologies_numclasses)
# ## Converting depth below ground to Australian Height Datum elevation
#
# While the bore entries have columns for AHD elevations, many appear to be missing data. Since we have a DEM of the region we can correct this.
cd = HeightDatumConverter(dem)
df = cd.add_height(df,
depth_from_col=DEPTH_FROM_COL, depth_to_col=DEPTH_TO_COL,
depth_from_ahd_col=DEPTH_FROM_AHD_COL, depth_to_ahd_col=DEPTH_TO_AHD_COL,
easting_col=EASTING_COL, northing_col=NORTHING_COL, drop_na=False)
df.info()
# to be reused in experimental notebooks:
classified_logs_filename = os.path.join(cbr_datadir_out,'classified_logs.pkl')
if write_outputs or not os.path.exists(classified_logs_filename):
df.to_pickle(classified_logs_filename)
# +
# df.to_pickle(classified_logs_filename)
# df.to_csv(os.path.join(cbr_datadir_out,'classified_logs.csv'))
# -
classified_logs_filename = os.path.join(cbr_datadir_out,'classified_logs.csv')
df_subset = df[[HYDRO_ID_COL, BORE_ID_COL, DEPTH_FROM_COL, DEPTH_TO_COL, LITHO_DESC_COL, 'Lithology_1', 'MajorLithCode']]
# df_subset.to_csv(classified_logs_filename)
#
# ## Interpolate over a regular grid
#
df
grid_res = 200
m = create_meshgrid_cartesian(x_min, x_max, y_min, y_max, grid_res)
dem_array = surface_array(dem, x_min, y_min, x_max, y_max, grid_res)
dem_array[dem_array <= 0.0] = np.nan
dem_array_data = {'bounds': (x_min, x_max, y_min, y_max), 'grid_res': grid_res, 'mesh_xy': m, 'dem_array': dem_array}
# +
import pickle
fp = os.path.join(cbr_datadir_out, 'dem_array_data.pkl')
if write_outputs or not os.path.exists(fp):
with open(fp, 'wb') as handle:
pickle.dump(dem_array_data, handle, protocol=pickle.HIGHEST_PROTOCOL)
# -
# We need to define min and max heights on the Z axis for which we interoplate. We use the KNN algorithm with 10 neighbours. We should use a domain such that there are enough points for each height. Let's find visually heights with at least 10 records
df.info()
dc = DepthCoverage(df)
r, cc = dc.get_counts()
plt.plot(r, cc)
r = dc.get_range(11)
r
# +
n_neighbours=10
ahd_min=int(r[0])
ahd_max=int(r[1])
z_ahd_coords = np.arange(ahd_min,ahd_max,1)
dim_x,dim_y = m[0].shape
dim_z = len(z_ahd_coords)
dims = (dim_x,dim_y,dim_z)
# -
dims
lithology_3d_array=np.empty(dims)
gi = GridInterpolation(easting_col=EASTING_COL, northing_col=NORTHING_COL)
gi.get_lithology_observations_for_depth(df, ahd_max, 'Depth From (AHD)')
len(df)
gi.interpolate_volume(lithology_3d_array, df, PRIMARY_LITHO_NUM_COL, z_ahd_coords, n_neighbours, m)
# Burn DEM into grid
z_index_for_ahd = z_index_for_ahd_functor(b=-ahd_min)
dem_array.shape, m[0].shape, lithology_3d_array.shape
burn_volume(lithology_3d_array, dem_array, z_index_for_ahd, below=False)
# to be reused in experimental notebooks:
interp_litho_filename = os.path.join(cbr_datadir_out,'3d_primary_litho.pkl')
if write_outputs or not os.path.exists(interp_litho_filename):
with open(interp_litho_filename, 'wb') as handle:
pickle.dump(lithology_3d_array, handle, protocol=pickle.HIGHEST_PROTOCOL)
| case_studies/canberra/brisbane_eda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Contextualizing iCdR758
# #### Project summary...
# ### Annotation conversion
# +
# Read in gene list from iCdR758
cdr775_genes = []
with open('/home/mjenior/Desktop/tamayo_analysis/cdr775_genes.tsv', 'r') as genre_genes:
for line in genre_genes:
cdr775_genes.append(line.strip())
cdr775_genes = set(cdr775_genes)
# Parse PATRIC lookup table
refseq_dict = {}
refseq = set()
gene_dict = {}
genes = set()
with open('/home/mjenior/Desktop/tamayo_analysis/CdR20291_genes.tsv', 'r') as gene_catalog:
header = gene_catalog.readline()
for line in gene_catalog:
line = line.split()
if len(line) == 0: continue
if not line[0] in cdr775_genes:
continue
else:
refseq_dict[line[1]] = line[0]
refseq |= set([line[1]])
gene_dict[line[2]] = line[0]
genes |= set([line[2]])
# Parse RNASeq results
rough_1 = {}
rough_2 = {}
smooth_2 = {}
smooth_3 = {}
with open('/home/mjenior/Desktop/tamayo_analysis/tamayo_rnaseq.tsv', 'r') as transcription:
header = transcription.readline()
for line in transcription:
line = line.split()
if len(line) == 0: continue
if line[0] in refseq:
gene = refseq_dict[line[0]]
rough_1[gene] = float(line[1])
rough_2[gene] = float(line[2])
smooth_2[gene] = float(line[3])
smooth_3[gene] = float(line[4])
elif line[0] in genes:
gene = gene_dict[line[0]]
rough_1[gene] = float(line[1])
rough_2[gene] = float(line[2])
smooth_2[gene] = float(line[3])
smooth_3[gene] = float(line[4])
else:
continue
# Save to files for easier use later
with open('/home/mjenior/Desktop/tamayo_analysis/rough_1.tsv', 'w') as outFile:
for index in rough_1.keys():
outFile.write(index + '\t' + str(rough_1[index]) + '\n')
with open('/home/mjenior/Desktop/tamayo_analysis/rough_2.tsv', 'w') as outFile:
for index in rough_2.keys():
outFile.write(index + '\t' + str(rough_2[index]) + '\n')
with open('/home/mjenior/Desktop/tamayo_analysis/smooth_2.tsv', 'w') as outFile:
for index in smooth_2.keys():
outFile.write(index + '\t' + str(smooth_2[index]) + '\n')
with open('/home/mjenior/Desktop/tamayo_analysis/smooth_3.tsv', 'w') as outFile:
for index in smooth_3.keys():
outFile.write(index + '\t' + str(smooth_3[index]) + '\n')
# -
# ## Phase Variation
# +
from riptide import *
iCdR758 = cobra.io.load_json_model('/home/mjenior/Desktop/repos/Jenior_Cdifficile_2019/data/reconstructions/iCdR758.json')
for rxn in iCdR758.reactions:
if 'EX_' in rxn.id:
rxn.bounds = (-1000.,1000.)
# Read in transcriptomes
rough_1 = riptide.read_transcription_file('/home/mjenior/Desktop/tamayo_analysis/rough_1.tsv')
rough_2 = riptide.read_transcription_file('/home/mjenior/Desktop/tamayo_analysis/rough_2.tsv')
smooth_2 = riptide.read_transcription_file('/home/mjenior/Desktop/tamayo_analysis/smooth_2.tsv')
smooth_3 = riptide.read_transcription_file('/home/mjenior/Desktop/tamayo_analysis/smooth_3.tsv')
# -
# +
def checkFreeMass(raw_model, cytosol='cytosol'):
with raw_model as model:
# Close all exchanges
for index in model.boundary:
model.reactions.get_by_id(index.id).lower_bound = 0.
# Identify all metabolites that are produced within the network
demand_metabolites = [x.reactants[0].id for x in model.demands if len(x.reactants) > 0] + [x.products[0].id for x in model.demands if len(x.products) > 0]
free = []
for index in model.metabolites:
if index.id in demand_metabolites:
continue
elif not index.compartment in cytosol:
continue
else:
demand = model.add_boundary(index, type='demand')
model.objective = demand
obj_val = model.slim_optimize(error_value=0.)
if obj_val > 1e-8:
free.append(index.id)
model.remove_reactions([demand])
if len(free) > 0:
print(str(len(free)) + ' metabolites are generated for free')
return(free)
# -
iCdR758_free = checkFreeMass(iCdR758)
iCdR758_rough1 = riptide.contextualize(model=iCdR758, transcriptome=rough_1)
iCdR758_rough2 = riptide.contextualize(model=iCdR758, transcriptome=rough_2)
iCdR758_smooth2 = riptide.contextualize(model=iCdR758, transcriptome=smooth_2)
iCdR758_smooth3 = riptide.contextualize(model=iCdR758, transcriptome=smooth_3)
riptide.save_output(riptide_obj=iCdR758_rough1, path='/home/mjenior/Desktop/tamayo_analysis/riptide_rough1')
riptide.save_output(riptide_obj=iCdR758_rough2, path='/home/mjenior/Desktop/tamayo_analysis/riptide_rough2')
riptide.save_output(riptide_obj=iCdR758_smooth2, path='/home/mjenior/Desktop/tamayo_analysis/riptide_smooth2')
riptide.save_output(riptide_obj=iCdR758_smooth3, path='/home/mjenior/Desktop/tamayo_analysis/riptide_smooth3')
# ### Analysis
# #### Growth rate
# +
from scipy import stats
rough_biomass = list(iCdR758_rough1.flux_samples['biomass']) + list(iCdR758_rough2.flux_samples['biomass'])
rough_growth = [(1. / numpy.median(x)) * 3600. for x in rough_biomass]
print('Rough doubling time: ' + str(round(numpy.median(rough_growth), 2)) + ' minutes')
smooth_biomass = list(iCdR758_smooth2.flux_samples['biomass']) + list(iCdR758_smooth3.flux_samples['biomass'])
smooth_growth = [(1. / numpy.median(x)) * 3600. for x in smooth_biomass]
print('Smooth doubling time: ' + str(round(numpy.median(smooth_growth), 2)) + ' minutes')
t_stat, p_val = stats.shapiro(rough_growth)
t_stat, p_val = stats.shapiro(smooth_growth)
t_stat, p_val = stats.wilcoxon(rough_growth, smooth_growth)
print('p-value: ' + str(round(p_val, 3)))
# -
# #### Subrate utilization / Secretion
# +
# Inferring media condition
def find_growth_substrates(riptide):
substrates = []
exchanges = list(set([x.id for x in riptide.model.reactions if 'EX_' in x.id]))
for rxn in exchanges:
if numpy.median(riptide.flux_samples[rxn]) < 0.0:
substrate_id = riptide.model.reactions.get_by_id(rxn).reactants[0].id
substrate_name = riptide.model.reactions.get_by_id(rxn).reactants[0].name
substrates.append([substrate_id, substrate_name])
print(str(len(substrates)) + ' growth substrates found')
substrates = pandas.DataFrame.from_records(substrates)
substrates.columns = ['id','name']
return substrates
def find_byproducts(riptide):
byproducts = []
exchanges = list(set([x.id for x in riptide.model.reactions if 'EX_' in x.id]))
for rxn in exchanges:
if numpy.median(riptide.flux_samples[rxn]) > 0.0:
byproduct_id = riptide.model.reactions.get_by_id(rxn).reactants[0].id
byproduct_name = riptide.model.reactions.get_by_id(rxn).reactants[0].name
byproducts.append([byproduct_id, byproduct_name])
print(str(len(byproducts)) + ' secreted byproducts found')
byproducts = pandas.DataFrame.from_records(byproducts)
byproducts.columns = ['id','name']
return byproducts
def find_element_sources(riptide):
# Isolate exchange reactions
exchanges = []
for rxn in riptide.model.reactions:
if len(rxn.reactants) == 0 or len(rxn.products) == 0:
exchanges.append(rxn.id)
sources = {}
c_source = ['cpd_id', 0.0]
n_source = ['cpd_id', 0.0]
# PArse exchange flux samples for imported metabolites
for rxn in exchanges:
flux = abs(numpy.median(riptide.flux_samples[rxn]))
if flux > 1e-6:
metabolite = riptide.model.reactions.get_by_id(rxn).reactants[0]
sources[metabolite.id] = {}
# Multiply elemental components by median flux absolute value
for element in metabolite.elements.keys():
element_supply = round(float(metabolite.elements[element]) * flux, 3)
sources[metabolite.id][element] = element_supply
# Identify largest sources of carbon and nitrogen
if element == 'C' and element_supply > c_source[1]:
c_source = [metabolite.id, element_supply]
elif element == 'N' and element_supply > n_source[1]:
n_source = [metabolite.id, element_supply]
print('Primary carbon source: ' + riptide.model.metabolites.get_by_id(c_source[0]).name + ' (' + str(c_source[1]) + ')')
print('Primary nitrogen source: ' + riptide.model.metabolites.get_by_id(n_source[0]).name + ' (' + str(n_source[1]) + ')')
return sources
# -
rough1_substrates = find_growth_substrates(iCdR758_rough1)
rough1_sources = find_element_sources(iCdR758_rough1)
rough1_byproducts = find_byproducts(iCdR758_rough1)
rough2_substrates = find_growth_substrates(iCdR758_rough2)
rough2_sources = find_element_sources(iCdR758_rough2)
rough2_byproducts = find_byproducts(iCdR758_rough2)
smooth2_substrates = find_growth_substrates(iCdR758_smooth2)
smooth2_sources = find_element_sources(iCdR758_smooth2)
smooth2_byproducts = find_byproducts(iCdR758_smooth2)
smooth3_substrates = find_growth_substrates(iCdR758_smooth3)
smooth3_sources = find_element_sources(iCdR758_smooth3)
smooth3_byproducts = find_byproducts(iCdR758_smooth3)
# +
rough_substrates = set(rough1_substrates['id']).union(set(rough2_substrates['id']))
smooth_substrates = set(smooth3_substrates['id']).union(set(smooth2_substrates['id']))
rough_only_substrates = rough_substrates.difference(smooth_substrates)
smooth_only_substrates = smooth_substrates.difference(rough_substrates)
print('Rough only:')
for x in rough_only_substrates:
print(x + '\t' + iCdR758.metabolites.get_by_id(x).name)
print('\nSmooth only:')
for x in smooth_only_substrates:
print(x + '\t' + iCdR758.metabolites.get_by_id(x).name)
# +
rough_substrates = set(rough1_substrates['id']).intersection(set(rough2_substrates['id']))
smooth_substrates = set(smooth3_substrates['id']).intersection(set(smooth2_substrates['id']))
rough_only_substrates = rough_substrates.difference(smooth_substrates)
smooth_only_substrates = smooth_substrates.difference(rough_substrates)
print('Rough only:')
for x in rough_only_substrates:
print(x + '\t' + iCdR758.metabolites.get_by_id(x).name)
print('\nSmooth only:')
for x in smooth_only_substrates:
print(x + '\t' + iCdR758.metabolites.get_by_id(x).name)
# +
rough_byproducts = set(rough1_byproducts['id']).union(set(rough2_byproducts['id']))
smooth_byproducts = set(smooth3_byproducts['id']).union(set(smooth2_byproducts['id']))
rough_only_byproducts = rough_byproducts.difference(smooth_byproducts)
smooth_only_byproducts = smooth_byproducts.difference(rough_byproducts)
print('Rough only:')
for x in rough_only_byproducts:
print(x + '\t' + iCdR758.metabolites.get_by_id(x).name)
print('\nSmooth only:')
for x in smooth_only_byproducts:
print(x + '\t' + iCdR758.metabolites.get_by_id(x).name)
# +
rough_byproducts = set(rough1_byproducts['id']).intersection(set(rough2_byproducts['id']))
smooth_byproducts = set(smooth3_byproducts['id']).intersection(set(smooth2_byproducts['id']))
rough_only_byproducts = rough_byproducts.difference(smooth_byproducts)
smooth_only_byproducts = smooth_byproducts.difference(rough_byproducts)
print('Rough only:')
for x in rough_only_byproducts:
print(x + '\t' + iCdR758.metabolites.get_by_id(x).name)
print('\nSmooth only:')
for x in smooth_only_byproducts:
print(x + '\t' + iCdR758.metabolites.get_by_id(x).name)
# -
# #### Gene Essentiality
import cobra
import cobra.flux_analysis
minGrowth = iCdR758.slim_optimize() * 0.8
base_essential_genes = cobra.flux_analysis.find_essential_genes(iCdR758, threshold=minGrowth)
base_essential_genes = set([x.id for x in base_essential_genes])
print(str(len(base_essential_genes)) + ' essential genes found')
minGrowth = iCdR758_rough1.model.slim_optimize() * 0.8
rough1_essential_genes = cobra.flux_analysis.find_essential_genes(iCdR758_rough1.model, threshold=minGrowth)
rough1_essential_genes = set([x.id for x in rough1_essential_genes])
print(str(len(rough1_essential_genes)) + ' essential genes found')
minGrowth = iCdR758_rough2.model.slim_optimize() * 0.8
rough2_essential_genes = cobra.flux_analysis.find_essential_genes(iCdR758_rough2.model, threshold=minGrowth)
rough2_essential_genes = set([x.id for x in rough2_essential_genes])
print(str(len(rough2_essential_genes)) + ' essential genes found')
minGrowth = iCdR758_smooth2.model.slim_optimize() * 0.8
smooth2_essential_genes = cobra.flux_analysis.find_essential_genes(iCdR758_smooth2.model, threshold=minGrowth)
smooth2_essential_genes = set([x.id for x in smooth2_essential_genes])
print(str(len(smooth2_essential_genes)) + ' essential genes found')
minGrowth = iCdR758_smooth3.model.slim_optimize() * 0.8
smooth3_essential_genes = cobra.flux_analysis.find_essential_genes(iCdR758_smooth3.model, threshold=minGrowth)
smooth3_essential_genes = set([x.id for x in smooth3_essential_genes])
print(str(len(smooth3_essential_genes)) + ' essential genes found')
# +
# Filter against base model
rough1_essential_genes = rough1_essential_genes.difference(base_essential_genes)
rough2_essential_genes = rough2_essential_genes.difference(base_essential_genes)
smooth2_essential_genes = smooth2_essential_genes.difference(base_essential_genes)
smooth3_essential_genes = smooth3_essential_genes.difference(base_essential_genes)
# Find agreement within groups
rough = rough1_essential_genes.union(rough2_essential_genes)
smooth = smooth2_essential_genes.union(smooth3_essential_genes)
# Contrast groups
rough_only_essential = rough.difference(smooth)
smooth_only_essential = smooth.difference(rough)
# Display results
print('Rough only:')
for x in rough_only_essential:
print(x + '\t' + iCdR758.genes.get_by_id(x).name)
print('\nSmooth only:')
for x in smooth_only_essential:
print(x + '\t' + iCdR758.genes.get_by_id(x).name)
# +
# Filter against base model
rough1_essential_genes = rough1_essential_genes.difference(base_essential_genes)
rough2_essential_genes = rough2_essential_genes.difference(base_essential_genes)
smooth2_essential_genes = smooth2_essential_genes.difference(base_essential_genes)
smooth3_essential_genes = smooth3_essential_genes.difference(base_essential_genes)
# Find agreement within groups
rough = rough1_essential_genes.intersection(rough2_essential_genes)
smooth = smooth2_essential_genes.intersection(smooth3_essential_genes)
# Contrast groups
rough_only_essential = rough.difference(smooth)
smooth_only_essential = smooth.difference(rough)
# Display results
print('Rough only:')
for x in rough_only_essential:
print(x + '\t' + iCdR758.genes.get_by_id(x).name)
print('\nSmooth only:')
for x in smooth_only_essential:
print(x + '\t' + iCdR758.genes.get_by_id(x).name)
# -
# #### Topology
# +
# Genes
r_genes = set([x.id for x in iCdR758_rough1.model.genes]).union(set([x.id for x in iCdR758_rough2.model.genes]))
s_genes = set([x.id for x in iCdR758_smooth2.model.genes]).union(set([x.id for x in iCdR758_smooth3.model.genes]))
print(len(r_genes.difference(s_genes)))
print(len(s_genes.difference(r_genes)))
print(len(r_genes.intersection(s_genes)))
# +
# Reactions
r_reactions = set([x.id for x in iCdR758_rough1.model.reactions]).union(set([x.id for x in iCdR758_rough2.model.reactions]))
s_reactions = set([x.id for x in iCdR758_smooth2.model.reactions]).union(set([x.id for x in iCdR758_smooth3.model.reactions]))
print(len(r_reactions.difference(s_reactions)))
print(len(s_reactions.difference(r_reactions)))
print(len(r_reactions.intersection(s_reactions)))
# +
# Metabolites
r_metabolites = set([x.id for x in iCdR758_rough1.model.metabolites]).union(set([x.id for x in iCdR758_rough2.model.metabolites]))
s_metabolites = set([x.id for x in iCdR758_smooth2.model.metabolites]).union(set([x.id for x in iCdR758_smooth3.model.metabolites]))
print(len(r_metabolites.difference(s_metabolites)))
print(len(s_metabolites.difference(r_metabolites)))
print(len(r_metabolites.intersection(s_metabolites)))
# +
# Compare gene pruning between groups
rough_pruned = iCdR758_rough1.pruned['genes'].union(iCdR758_rough2.pruned['genes'])
smooth_pruned = iCdR758_smooth2.pruned['genes'].union(iCdR758_smooth3.pruned['genes'])
rough_only_genes = smooth_pruned.difference(rough_pruned)
smooth_only_genes = rough_pruned.difference(smooth_pruned)
print('Rough only:')
for x in rough_only_genes:
print(x + '\t' + iCdR758.genes.get_by_id(x).name)
print('\nSmooth only:')
for x in smooth_only_genes:
print(x + '\t' + iCdR758.genes.get_by_id(x).name)
# +
# Compare gene pruning between groups
rough_pruned = iCdR758_rough1.pruned['genes'].intersection(iCdR758_rough2.pruned['genes'])
smooth_pruned = iCdR758_smooth2.pruned['genes'].intersection(iCdR758_smooth3.pruned['genes'])
rough_only_genes = smooth_pruned.difference(rough_pruned)
smooth_only_genes = rough_pruned.difference(smooth_pruned)
print('Rough only:')
for x in rough_only_genes:
print(x + '\t' + iCdR758.genes.get_by_id(x).name)
print('\nSmooth only:')
for x in smooth_only_genes:
print(x + '\t' + iCdR758.genes.get_by_id(x).name)
# +
# Reactions
rough_pruned = iCdR758_rough1.pruned['reactions'].union(iCdR758_rough2.pruned['reactions'])
smooth_pruned = iCdR758_smooth2.pruned['reactions'].union(iCdR758_smooth3.pruned['reactions'])
rough_only_reactions = smooth_pruned.difference(rough_pruned)
smooth_only_reactions = rough_pruned.difference(smooth_pruned)
print('Rough only:')
for x in rough_only_reactions:
print(x + '\t' + iCdR758.reactions.get_by_id(x).name)
print('\nSmooth only:')
for x in smooth_only_reactions:
print(x + '\t' + iCdR758.reactions.get_by_id(x).name)
# +
# Reactions
rough_pruned = iCdR758_rough1.pruned['reactions'].intersection(iCdR758_rough2.pruned['reactions'])
smooth_pruned = iCdR758_smooth2.pruned['reactions'].intersection(iCdR758_smooth3.pruned['reactions'])
rough_only_reactions = smooth_pruned.difference(rough_pruned)
smooth_only_reactions = rough_pruned.difference(smooth_pruned)
print('Rough only:')
for x in rough_only_reactions:
print(x + '\t' + iCdR758.reactions.get_by_id(x).name)
print('\nSmooth only:')
for x in smooth_only_reactions:
print(x + '\t' + iCdR758.reactions.get_by_id(x).name)
# +
# Metabolites
rough_pruned = iCdR758_rough1.pruned['metabolites'].union(iCdR758_rough2.pruned['metabolites'])
smooth_pruned = iCdR758_smooth2.pruned['metabolites'].union(iCdR758_smooth3.pruned['metabolites'])
rough_only_metabolites = smooth_pruned.difference(rough_pruned)
smooth_only_metabolites = rough_pruned.difference(smooth_pruned)
print('Rough only:')
for x in rough_only_metabolites:
print(x + '\t' + iCdR758.metabolites.get_by_id(x).name)
print('\nSmooth only:')
for x in smooth_only_metabolites:
print(x + '\t' + iCdR758.metabolites.get_by_id(x).name)
# +
# Metabolites
rough_pruned = iCdR758_rough1.pruned['metabolites'].intersection(iCdR758_rough2.pruned['metabolites'])
smooth_pruned = iCdR758_smooth2.pruned['metabolites'].intersection(iCdR758_smooth3.pruned['metabolites'])
rough_only_metabolites = smooth_pruned.difference(rough_pruned)
smooth_only_metabolites = rough_pruned.difference(smooth_pruned)
print('Rough only:')
for x in rough_only_metabolites:
print(x + '\t' + iCdR758.metabolites.get_by_id(x).name)
print('\nSmooth only:')
for x in smooth_only_metabolites:
print(x + '\t' + iCdR758.metabolites.get_by_id(x).name)
# -
# +
import re
import copy
import cobra
def pathway_trace(model, substrate, threshold=0.8):
# model = cobra model
# substrate = metabolite ID of extracellular growth substrate (str)
# threshold = fraction of optimal objective flux to set as minimum fo pfba
# Initialize duplicate model and get exchange reaction IDs
temp = copy.deepcopy(model)
objID = list(set((re.split('\*|\s', str(iCdG791.objective.expression)))).intersection(set([x.id for x in iCdG791.reactions])))[0]
substrate_rxns = set([x.id for x in temp.metabolites.get_by_id(substrate).reactions])
exchanges = set()
for rxn in temp.reactions:
if len(rxn.reactants) == 0 or len(rxn.products) == 0:
exchanges |= set([rxn.id])
# Set high previous objective flux as constraint
objVal = temp.slim_optimize()
obj_constraint = temp.problem.Constraint(temp.objective.expression, lb=objVal*threshold, ub=objVal)
temp.add_cons_vars([obj_constraint])
temp.solver.update()
# Assemble pfba objective
pfba_expr = symengine.RealDouble(0)
for rxn in temp.reactions:
pfba_expr += 1.0 * rxn.forward_variable
pfba_expr += 1.0 * rxn.reverse_variable
temp.objective = temp.problem.Objective(pfba_expr, direction='min', sloppy=True)
temp.solver.update()
# Identify active reactions and identify specific exchange reaction
solution = temp.optimize()
active_rxns = set([rxn.id for rxn in temp.reactions if abs(solution.fluxes[rxn.id]) > 1e-6])
exchanges = exchanges.intersection(active_rxns)
pathway = list(substrate_rxns.intersection(exchanges))
# Parse model by flux starting with substrate of interest
for rxn in pathway:
cpds = temp.reactions.get_by_id(rxn).metabolites
new_rxns =
for cpd in cpds:
find next reaction with largest flux that is not the current reaction
pathway.append(reaction.id)
return pathway
# -
# +
import cobra
import copy
import pandas
def find_blocked(model, threshold=1e-5):
temp_model = copy.deepcopy(model)
blocked = []
# Run an FVA
for rxn in temp_model.reactions:
temp_model.objective = rxn.id
temp_model.objective_direction = 'max'
max_objVal = temp_model.slim_optimize()
temp_model.objective_direction = 'min'
min_objVal = temp_model.slim_optimize()
if abs(max_objVal) < threshold and abs(min_objVal) < threshold: blocked.append(rxn.id)
return blocked
# +
import cobra
import copy
import pandas
def FVA(model, fraction=0.001):
temp_model = copy.deepcopy(model)
# Set previous objective as a constraint
objVal = temp_model.slim_optimize()
obj_constraint = temp_model.problem.Constraint(temp_model.objective.expression, lb=objVal*fraction, ub=objVal)
temp_model.add_cons_vars([obj_constraint])
temp_model.solver.update()
# Run an FVA
fva = []
rxn_ids = []
for rxn in temp_model.reactions:
temp_model.objective = rxn.id
temp_model.objective_direction = 'max'
max_objVal = temp_model.slim_optimize()
temp_model.objective_direction = 'min'
min_objVal = temp_model.slim_optimize()
rxn_ids.append(rxn.id)
fva.append([rxn.id, min_objVal, max_objVal])
fva = pandas.DataFrame.from_records(fva, columns=['id','minimum','maximum'], index=rxn_ids)
return fva
# -
iCdR758_fva = FVA(iCdR758)
iCdR758_fva
from cobra.flux_analysis import flux_variability_analysis
fva = flux_variability_analysis(iCdR758)
fva
set(rough1_substrates['name']).difference(set(rough2_substrates['name']))
rough1_substrates
# +
# Parse AUCRF results
aucrf = [['EX_cpd03170_e',14.41],['rxn07124_c',14.39],['EX_cpd00339_e',13.76],['ID008_c',13.64],
['rxn12566_c',9.61],['rxn20606_c',9.52],['rxn00293_c',4.57],['rxn00704_c',4.20],
['EX_cpd00076_e',3.76],['rxn05655_c',3.38]]
for x in aucrf: print(iCdR758.reactions.get_by_id(x[0]).name)
# -
top = ['rxn07124_c', 'ID008_c', 'EX_cpd03170_e', 'EX_cpd00339_e', 'rxn12566_c', 'rxn20606_c']
for x in top:
print(x, iCdR758.reactions.get_by_id(x).name)
# +
def find_element_sources(riptide):
# Isolate exchange reactions
exchanges = []
for rxn in riptide.model.reactions:
if len(rxn.reactants) == 0 or len(rxn.products) == 0:
exchanges.append(rxn.id)
sources = {}
c_source = ['cpd_id', 0.0]
n_source = ['cpd_id', 0.0]
# PArse exchange flux samples for imported metabolites
for rxn in exchanges:
flux = abs(numpy.median(riptide.flux_samples[rxn]))
if flux > 1e-6:
metabolite = riptide.model.reactions.get_by_id(rxn).reactants[0]
sources[metabolite.id] = {}
# Multiply elemental components by median flux absolute value
for element in metabolite.elements.keys():
element_supply = round(float(metabolite.elements[element]) * flux, 3)
sources[metabolite.id][element] = element_supply
# Identify largest sources of carbon and nitrogen
if element == 'C' and element_supply > c_source[1]:
c_source = [metabolite.id, element_supply]
elif element == 'N' and element_supply > n_source[1]:
n_source = [metabolite.id, element_supply]
print('Primary carbon source: ' + riptide.model.metabolites.get_by_id(c_source[0]).name + ' (' + str(c_source[1]) + ')')
print('Primary nitrogen source: ' + riptide.model.metabolites.get_by_id(n_source[0]).name + ' (' + str(n_source[1]) + ')')
return sources
# -
rough1_sources = find_element_sources(iCdR758_rough1)
rough2_sources = find_element_sources(iCdR758_rough2)
smooth2_sources = find_element_sources(iCdR758_smooth2)
smooth3_sources = find_element_sources(iCdR758_smooth3)
iCdR758.metabolites.cpd00076_e
transport_rxns = []
for rxn in iCdR758.reactions:
if len(set([cpd.compartment for cpd in rxn.metabolites])) > 1:
transport_rxns.append(rxn.id)
print(len(transport_rxns))
| notebooks/.ipynb_checkpoints/cmrRST Phase Variation R20291-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: python
# name: synapse_pyspark
# ---
# ## Model Deployment with Spark Serving
# In this example, we try to predict incomes from the *Adult Census* dataset. Then we will use Spark serving to deploy it as a realtime web service.
# First, we import needed packages:
import sys
import numpy as np
import pandas as pd
# Now let's read the data and split it to train and test sets:
data = spark.read.parquet("wasbs://publicwasb@m<EMAIL>.blob.<EMAIL>.windows.net/AdultCensusIncome.parquet")
data = data.select(["education", "marital-status", "hours-per-week", "income"])
train, test = data.randomSplit([0.75, 0.25], seed=123)
train.limit(10).toPandas()
# `TrainClassifier` can be used to initialize and fit a model, it wraps SparkML classifiers.
# You can use `help(mmlspark.TrainClassifier)` to view the different parameters.
#
# Note that it implicitly converts the data into the format expected by the algorithm. More specifically it:
# tokenizes, hashes strings, one-hot encodes categorical variables, assembles the features into a vector
# etc. The parameter `numFeatures` controls the number of hashed features.
from mmlspark.train import TrainClassifier
from pyspark.ml.classification import LogisticRegression
model = TrainClassifier(model=LogisticRegression(), labelCol="income", numFeatures=256).fit(train)
# After the model is trained, we score it against the test dataset and view metrics.
from mmlspark.train import ComputeModelStatistics, TrainedClassifierModel
prediction = model.transform(test)
prediction.printSchema()
metrics = ComputeModelStatistics().transform(prediction)
metrics.limit(10).toPandas()
# First, we will define the webservice input/output.
# For more information, you can visit the [documentation for Spark Serving](https://github.com/Azure/mmlspark/blob/master/docs/mmlspark-serving.md)
# +
from pyspark.sql.types import *
from mmlspark.io import *
import uuid
serving_inputs = spark.readStream.server() \
.address("localhost", 8898, "my_api") \
.option("name", "my_api") \
.load() \
.parseRequest("my_api", test.schema)
serving_outputs = model.transform(serving_inputs) \
.makeReply("scored_labels")
server = serving_outputs.writeStream \
.server() \
.replyTo("my_api") \
.queryName("my_query") \
.option("checkpointLocation", "file:///tmp/checkpoints-{}".format(uuid.uuid1())) \
.start()
# -
# Test the webservice
import requests
data = u'{"education":" 10th","marital-status":"Divorced","hours-per-week":40.0}'
r = requests.post(data=data, url="http://localhost:8898/my_api")
print("Response {}".format(r.text))
import requests
data = u'{"education":" Masters","marital-status":"Married-civ-spouse","hours-per-week":40.0}'
r = requests.post(data=data, url="http://localhost:8898/my_api")
print("Response {}".format(r.text))
import time
time.sleep(20) # wait for server to finish setting up (just to be safe)
server.stop()
spark.stop()
| MachineLearning/SparkServing - Deploying a Classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Standalone Convergence Checker for the numerical vKdV solver
#
# Copied from Standalone Convergence Checker for the numerical KdV solver - just add bathy
#
# Does not save or require any input data
# +
import xarray as xr
from iwaves.kdv.kdvimex import KdVImEx#from_netcdf
from iwaves.kdv.vkdv import vKdV
from iwaves.kdv.solve import solve_kdv
from iwaves.utils.plot import vKdV_plot
import iwaves.utils.initial_conditions as ics
import numpy as np
from scipy.interpolate import PchipInterpolator as pchip
import matplotlib.pyplot as plt
# %matplotlib inline
from matplotlib import rcParams
# Set font sizes
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Bitstream Vera Sans']
rcParams['font.serif'] = ['Bitstream Vera Sans']
rcParams["font.size"] = "14"
rcParams['axes.labelsize']='large'
# +
# CONSTANTS FOR WHOLE NOTEBOOK
d = 252.5
L_d = 4.0e5
Nz = 100
# Functions
def run_kdv(args):
"""
Main function for generating different soliton scenarios
"""
rho_params, bathy_params, a0, L_d, mode, nu_H, dx, runtime, dt, Lw = args
####################################################
# Inputs
mode = 0
Nz = 100
ntout = 1800.0
z = np.linspace(0, -d, Nz)
dz = np.abs(z[1]-z[0])
x = np.arange(-2*dx,L_d+dx,dx)
h = ics.depth_tanh2(bathy_params, x)
kdvargs = dict(\
verbose=False,\
a0=a0,\
Lw=Lw,\
mode=mode,
dt=dt,\
nu_H=nu_H,\
ekdv=False,\
wavefunc=ics.eta_fullsine,\
#L_d = L_d,
x=x,\
Nsubset=10,
nonlinear=False,
nonhydrostatic=False
)
###
# THIS WAS COPIED FROM THE KdV VERSION. IT INITIALISES EACH vKdV 3 TIMES - QUITE SLOW.
###
ii=0
#rhoz = single_tanh_rho(
# z, pp['rho0'][ii], pp['drho1'][ii], pp['z1'][ii], pp['h1'][ii])
rhoz = ics.rho_double_tanh_rayson(rho_params,z)
######
## Call the vKdV run function
mykdv, Bda = solve_kdv(rhoz, z, runtime,\
solver='vkdv', h=h, ntout=ntout, outfile=None, **kdvargs)
print('Done with dx={} and dt={}'.format(dx, dt))
return mykdv, Bda
# +
dx = 10
x = np.arange(-2*dx,L_d+dx,dx)
bathy_params = [L_d*0.5, 50000, d+50, d-50]
h = ics.depth_tanh2(bathy_params, x)
plt.figure(figsize=(9,5))
plt.plot(x, h, 'k')
plt.ylabel('h (m)')
plt.xlabel('x (m)')
plt.title('vKdV bathy')
# +
#betas = [1023.7, 1.12, 105, 52, 155, 43] # ~April 5data:image/png;base64,<KEY>
#betas = [1023.5, 1.22, 67, 55, 157, 52] # ~March 1
betas_w = [1023.8229810318612,
0.9865506702797462,
143.5428700089361,
46.1265812512485,
136.66278860120943,
41.57014327398592] # 15 July 2016
betas_s =[1023.6834358117951,
1.2249066117658955,
156.78804559089772,
53.66835548728355,
73.14183287436342,
40.21031777315428] # 1st April 2017
a0 = 20.
mode =0
nu_H = 0
runtime = 2.5*86400.
# Going to make Lw an input for the vKdV as it will really speed things up.
dx = 100
dt = 10
z = np.linspace(0, -d, Nz)
rhoz_w = ics.rho_double_tanh_rayson(betas_w, z)
rhoz_s = ics.rho_double_tanh_rayson(betas_s, z)
Lw_w = ics.get_Lw(rhoz_w, z, z0=max(h), mode=0)
Lw_s = ics.get_Lw(rhoz_s, z, z0=max(h), mode=0)
print(Lw_w)
print(Lw_s)
# +
dxs =[1600,800,400,200,100,75,50,37.5,25]
dxs =[800,400,200,100,75,50,35]
dxs =[800,200,100,50]
dt = 8.
all_kdv_dx_w = []
all_kdv_dx_s = []
for dx in dxs:
print(' ')
print('Running dx={}'.format(dx))
print(' ')
mykdv, B = run_kdv( (betas_w, bathy_params, a0, L_d, mode, nu_H, dx, runtime, dt, Lw_w))
all_kdv_dx_w.append(mykdv)
mykdv, B = run_kdv( (betas_s, bathy_params, a0, L_d, mode, nu_H, dx, runtime, dt, Lw_s))
all_kdv_dx_s.append(mykdv)
print(' ')
print('Completed dx={}'.format(dx))
print(' ')
# +
plt.figure(figsize=(9,5))
for mykdv in all_kdv_dx_s:
plt.plot(mykdv.x, mykdv.B, label=mykdv.dx_s)
# plt.xlim((162200, 163600))
plt.legend()
plt.show()
plt.figure(figsize=(9,5))
for mykdv in all_kdv_dx_s:
plt.plot(mykdv.x, mykdv.B, label=mykdv.dx_s)
# plt.xlim((162200, 163600))
plt.ylim((-65, 40))
plt.xlim((250000, 300000))
plt.legend()
# +
plt.figure(figsize=(9,5))
for mykdv in all_kdv_dx_w:
plt.plot(mykdv.x, mykdv.B, label=mykdv.dx_s)
plt.legend()
plt.show()
plt.figure(figsize=(9,5))
for mykdv in all_kdv_dx_w:
plt.plot(mykdv.x, mykdv.B, label=mykdv.dx_s)
plt.legend()
plt.ylim((-40, 10))
plt.xlim((250000, 300000))
# +
# Compute the errors
X = np.arange(0,L_d, 10.)
nx = X.shape[0]
ndx = len(dxs)
solns = np.zeros((ndx, nx))
for ii, mykdv in enumerate(all_kdv_dx_w):
Fx = pchip(mykdv.x, mykdv.B)
solns[ii,:] = Fx(X)
# Compute the error between each solution
#err = np.diff(solns, axis=0)
err = solns - solns[-1,:]
err_rms_w = np.linalg.norm(err, ord=2, axis=1) # L2-norm
#err_rms_w = np.sqrt(np.mean(err**2,axis=1))
solns = np.zeros((ndx, nx))
for ii, mykdv in enumerate(all_kdv_dx_s):
Fx = pchip(mykdv.x, mykdv.B)
solns[ii,:] = Fx(X)
# Compute the error between each solution
#err = np.diff(solns, axis=0)
err = solns - solns[-1,:]
err_rms_s = np.linalg.norm(err, ord=2, axis=1) # L2-norm
#err_rms_s = np.sqrt(np.mean(err**2,axis=1))
# +
plt.figure(figsize=(9,8))
plt.loglog(dxs[:-1],err_rms_s[:-1],'ko')
plt.loglog(dxs[:-1],err_rms_w[:-1],'s', color='0.5')
plt.xlim(2e1,2e3)
plt.ylim(1e-2,2e3)
plt.grid(b=True)
x0 = np.array([50,100.])
plt.plot(x0, 100/x0[0]**2*x0**2, 'k--')
plt.plot(x0, 100/x0[0]**1*x0**1, 'k:')
plt.ylabel('L2-norm Error [m]')
plt.xlabel('$\Delta x$ [m]')
alpha_s = -2*all_kdv_dx_s[0].c1*all_kdv_dx_s[0].r10
beta_s = -1*all_kdv_dx_s[0].r01
alpha_w = -2*all_kdv_dx_w[0].c1*all_kdv_dx_w[0].r10
beta_w = -1*all_kdv_dx_w[0].r01
plt.legend((r'~\Deltax$^1$',
r'$\alpha$ = (%3.4f,%3.4f), $\beta$ = (%3.4f,%3.4f)'%(min(alpha_s), max(alpha_s), min(beta_s), max(beta_s)),
r'$\alpha$ = (%3.4f,%3.4f), $\beta$ = (%3.4f,%3.4f)'%(min(alpha_w), max(alpha_w), min(beta_w), max(beta_w))), loc='lower right')
# +
# Delta t comparison
dts = [20,10.,5,2.5,1.25,0.6,0.3]
dx = 50.
all_kdv_dt_w = []
all_kdv_dt_s = []
for dt in dts:
print(' ')
print('Running dt={}'.format(dt))
print(' ')
mykdv, B = run_kdv( (betas_w, bathy_params, a0, L_d, mode, nu_H, dx, runtime, dt, Lw_w))
all_kdv_dt_w.append(mykdv)
mykdv, B = run_kdv( (betas_s, bathy_params, a0, L_d, mode, nu_H, dx, runtime, dt, Lw_s))
all_kdv_dt_s.append(mykdv)
print(' ')
print('Completed dt={}'.format(dt))
print(' ')
# +
plt.figure(figsize=(9,5))
for mykdv in all_kdv_dt_s:
plt.plot(mykdv.x, mykdv.B, label=mykdv.dt_s)
plt.legend()
plt.show()
plt.figure(figsize=(9,5))
for mykdv in all_kdv_dt_s:
plt.plot(mykdv.x, mykdv.B, label=mykdv.dt_s)
plt.legend()
plt.ylim((-50, 30))
plt.xlim((195000, 210000))
# +
plt.figure(figsize=(9,5))
for mykdv in all_kdv_dt_w:
plt.plot(mykdv.x, mykdv.B, label=mykdv.dt_s)
plt.legend()
plt.show()
plt.figure(figsize=(9,5))
for mykdv in all_kdv_dt_w:
plt.plot(mykdv.x, mykdv.B, label=mykdv.dt_s)
plt.legend()
plt.ylim((-30, 1))
plt.xlim((175000, 205000))
# +
# Compute the errors
X = np.arange(0,L_d, 10.)
nx = X.shape[0]
ndx = len(dts)
solns = np.zeros((ndx, nx))
for ii, mykdv in enumerate(all_kdv_dt_w):
print(ii)
Fx = pchip(mykdv.x, mykdv.B)
solns[ii,:] = Fx(X)
# Compute the error between each solution
#err = np.diff(solns, axis=0)
err = solns - solns[-1,:]
err_rms_w_t = np.linalg.norm(err, ord=2, axis=1) # L2-norm
#err_rms_w = np.sqrt(np.mean(err**2,axis=1))
solns = np.zeros((ndx, nx))
for ii, mykdv in enumerate(all_kdv_dt_s):
print(ii)
Fx = pchip(mykdv.x, mykdv.B)
solns[ii,:] = Fx(X)
# Compute the error between each solution
#err = np.diff(solns, axis=0)
err = solns - solns[-1,:]
err_rms_s_t = np.linalg.norm(err, ord=2, axis=1) # L2-norm
#err_rms_s = np.sqrt(np.mean(err**2,axis=1))
# +
plt.figure(figsize=(12,8))
ax=plt.subplot(121)
plt.loglog(dxs[:-1],err_rms_s[:-1],'ko', markersize=6)
plt.loglog(dxs[:-1],err_rms_w[:-1],'s', color='0.5', markersize=4)
plt.xlim(2e1,2e3)
plt.ylim(1e0,2e3)
plt.grid(b=True)
x0 = np.array([50,100.])
plt.plot(x0, 100/x0[0]**2*x0**2, 'k--')
plt.plot(x0, 100/x0[0]**1*x0**1, 'k:')
plt.ylabel('L2-norm Error [m]')
plt.xlabel('$\Delta x$ [m]')
alpha_s = -2*all_kdv_dx_s[0].c1*all_kdv_dx_s[0].r10
beta_s = -1*all_kdv_dx_s[0].r01
alpha_w = -2*all_kdv_dx_w[0].c1*all_kdv_dx_w[0].r10
beta_w = -1*all_kdv_dx_w[0].r01
plt.legend((r'$\alpha$ = (%3.3f, %3.3f), $\beta$ = (%3.0f, %3.0f)'%(min(alpha_s), max(alpha_s), min(beta_s), max(beta_s)),
r'$\alpha$ = (%3.3f, %3.3f), $\beta$ = (%3.0f, %3.0f)'%(min(alpha_w), max(alpha_w), min(beta_w), max(beta_w))), loc='lower right')
plt.text(0.05,0.95,'(a)',transform=ax.transAxes)
ax=plt.subplot(122)
plt.loglog(dts[:-1],err_rms_s_t[:-1],'kd', markersize=6)
plt.loglog(dts[:-1],err_rms_w_t[:-1],'s', color='0.5', markersize=4)
plt.xlim(0,0.5e2)
plt.ylim(1e-2,1e3)
plt.grid(b=True)
x0 = np.array([5,20])
plt.plot(x0, 10/x0[0]**2*x0**2, 'k--')
plt.plot(x0, 10/x0[0]**1*x0**1, 'k:')
#plt.ylabel('L2-norm Error [m]')
plt.xlabel('$\Delta t$ [s]')
plt.text(0.05,0.95,'(b)',transform=ax.transAxes)
alpha_s = -2*all_kdv_dt_s[0].c1*all_kdv_dt_s[0].r10
beta_s = -1*all_kdv_dt_s[0].r01
alpha_w = -2*all_kdv_dt_w[0].c1*all_kdv_dt_w[0].r10
beta_w = -1*all_kdv_dt_w[0].r01
plt.legend((r'$\alpha$ = (%3.3f, %3.3f), $\beta$ = (%3.0f, %3.0f)'%(min(alpha_s), max(alpha_s), min(beta_s), max(beta_s)),
r'$\alpha$ = (%3.3f, %3.3f), $\beta$ = (%3.0f, %3.0f)'%(min(alpha_w), max(alpha_w), min(beta_w), max(beta_w))), loc='lower right')
plt.savefig('../FIGURES/vkdv_convergence_dxdt.png',dpi=150)
plt.savefig('../FIGURES/vkdv_convergence_dxdt.pdf',dpi=150)
# -
| tests/DELETE standalone_vkdv_convergence linearHydrostatic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_tensorflow2_p36)
# language: python
# name: conda_tensorflow2_p36
# ---
# !pip install -qU horovod
# + [markdown] papermill={"duration": 0.011952, "end_time": "2021-06-01T00:13:00.123603", "exception": false, "start_time": "2021-06-01T00:13:00.111651", "status": "completed"} tags=[]
# # Identify a CPU bottleneck caused by a callback process with Amazon SageMaker Debugger
#
# In this notebook we demonstrate how to identify a training bottleneck that is caused by a TensorFlow Keras callback.
# To simulate this type of bottleneck, we will program the callback associated with the tensor monitoring feature of Amazon SageMaker Debugger, to collect an excessive number of tensors, and at a high frequency.
# + [markdown] papermill={"duration": 0.011776, "end_time": "2021-06-01T00:13:00.147143", "exception": false, "start_time": "2021-06-01T00:13:00.135367", "status": "completed"} tags=[]
# ### Install sagemaker
# To use the new Debugger profiling features, ensure that you have the latest version of SageMaker SDK installed. The following cell updates the library and restarts the Jupyter kernel to apply the updates.
# + papermill={"duration": 0.018328, "end_time": "2021-06-01T00:13:00.177237", "exception": false, "start_time": "2021-06-01T00:13:00.158909", "status": "completed"} tags=[]
import sys
import IPython
install_needed = False # should only be True once
if install_needed:
print("installing deps and restarting kernel")
# !{sys.executable} -m pip install -U sagemaker
IPython.Application.instance().kernel.do_shutdown(True)
# + [markdown] papermill={"duration": 0.011722, "end_time": "2021-06-01T00:13:00.200769", "exception": false, "start_time": "2021-06-01T00:13:00.189047", "status": "completed"} tags=[]
# ## 1. Prepare training dataset
#
# ### Tensorflow Datasets package
#
# First of all, set the notebook kernel to Tensorflow 2.x.
#
# We will use CIFAR-10 dataset for this experiment. To download CIFAR-10 datasets and convert it into TFRecord format, install `tensorflow-datasets` package, run `demo/generate_cifar10_tfrecords`, and upload tfrecord files to your S3 bucket.
# + jupyter={"outputs_hidden": true} papermill={"duration": 22.404515, "end_time": "2021-06-01T00:13:22.617136", "exception": false, "start_time": "2021-06-01T00:13:00.212621", "status": "completed"} tags=[]
# !python demo/generate_cifar10_tfrecords.py --data-dir=./data
# + papermill={"duration": 4.663508, "end_time": "2021-06-01T00:13:27.334960", "exception": false, "start_time": "2021-06-01T00:13:22.671452", "status": "completed"} tags=[]
import sagemaker
s3_bucket = sagemaker.Session().default_bucket()
dataset_prefix = "data/cifar10-tfrecords"
desired_s3_uri = f"s3://{s3_bucket}/{dataset_prefix}"
dataset_location = sagemaker.s3.S3Uploader.upload(local_path="data", desired_s3_uri=desired_s3_uri)
print(f"Dataset uploaded to {dataset_location}")
# + [markdown] papermill={"duration": 0.051662, "end_time": "2021-06-01T00:13:27.438637", "exception": false, "start_time": "2021-06-01T00:13:27.386975", "status": "completed"} tags=[]
# ## 2. Create a Training Job with Profiling Enabled<a class="anchor" id="option-1"></a>
#
# We will use the standard [SageMaker Estimator API for Tensorflow](https://sagemaker.readthedocs.io/en/stable/frameworks/tensorflow/sagemaker.tensorflow.html#tensorflow-estimator) to create a training job. To enable profiling, we create a `ProfilerConfig` object and pass it to the `profiler_config` parameter of the `TensorFlow` estimator. For this demo, we set the the profiler to probe the system once every 500 miliseconds.
# + [markdown] papermill={"duration": 0.051632, "end_time": "2021-06-01T00:13:27.541950", "exception": false, "start_time": "2021-06-01T00:13:27.490318", "status": "completed"} tags=[]
# ### Set a profiler configuration
# + papermill={"duration": 0.058237, "end_time": "2021-06-01T00:13:27.652143", "exception": false, "start_time": "2021-06-01T00:13:27.593906", "status": "completed"} tags=[]
from sagemaker.debugger import ProfilerConfig, FrameworkProfile
profiler_config = ProfilerConfig(
system_monitor_interval_millis=500,
framework_profile_params=FrameworkProfile(
local_path="/opt/ml/output/profiler/", start_step=5, num_steps=2
),
)
# + [markdown] papermill={"duration": 0.052106, "end_time": "2021-06-01T00:13:27.756284", "exception": false, "start_time": "2021-06-01T00:13:27.704178", "status": "completed"} tags=[]
# ### Configure Debugger hook
# We configure the debugger hook to collect an excessive number of tensors, every 50 steps.
# + papermill={"duration": 0.058783, "end_time": "2021-06-01T00:13:27.867516", "exception": false, "start_time": "2021-06-01T00:13:27.808733", "status": "completed"} tags=[]
import os
from sagemaker.debugger import DebuggerHookConfig, CollectionConfig
debugger_hook_config = DebuggerHookConfig(
hook_parameters={"save_interval": "50"},
collection_configs=[
CollectionConfig(name="outputs"),
CollectionConfig(name="gradients"),
CollectionConfig(name="weights"),
CollectionConfig(name="layers"),
],
)
# + [markdown] papermill={"duration": 0.052574, "end_time": "2021-06-01T00:13:27.972348", "exception": false, "start_time": "2021-06-01T00:13:27.919774", "status": "completed"} tags=[]
# ### Define hyperparameters
#
# The start-up script is set to [train_tf_bottleneck.py](./demo/train_tf_bottleneck.py). Define hyperparameters such as number of epochs, and batch size.
# + papermill={"duration": 0.058554, "end_time": "2021-06-01T00:13:28.085393", "exception": false, "start_time": "2021-06-01T00:13:28.026839", "status": "completed"} tags=[]
hyperparameters = {"epoch": 2, "batch_size": 128}
# + [markdown] papermill={"duration": 0.052114, "end_time": "2021-06-01T00:13:28.190111", "exception": false, "start_time": "2021-06-01T00:13:28.137997", "status": "completed"} tags=[]
# ### Get the image URI
# The image that we will is dependent on the region that you are running this notebook in.
# + papermill={"duration": 0.071134, "end_time": "2021-06-01T00:13:28.313374", "exception": false, "start_time": "2021-06-01T00:13:28.242240", "status": "completed"} tags=[]
import boto3
session = boto3.session.Session()
region = session.region_name
image_uri = f"763104351884.dkr.ecr.{region}.amazonaws.com/tensorflow-training:2.3.1-gpu-py37-cu110-ubuntu18.04"
# + [markdown] papermill={"duration": 0.052806, "end_time": "2021-06-01T00:13:28.419138", "exception": false, "start_time": "2021-06-01T00:13:28.366332", "status": "completed"} tags=[]
# ### Define SageMaker Tensorflow Estimator
# To enable profiling, you need to pass the Debugger profiling configuration (`profiler_config`), a list of Debugger rules (`rules`), and the image URI (`image_uri`) to the estimator. Debugger enables monitoring and profiling while the SageMaker estimator requests a training job.
# + papermill={"duration": 0.51737, "end_time": "2021-06-01T00:13:28.989048", "exception": false, "start_time": "2021-06-01T00:13:28.471678", "status": "completed"} tags=[]
import sagemaker
from sagemaker.tensorflow import TensorFlow
job_name = "network-bottleneck"
instance_count = 1
instance_type = "ml.p2.xlarge"
entry_script = "train_tf_bottleneck.py"
estimator = TensorFlow(
role=sagemaker.get_execution_role(),
image_uri=image_uri,
base_job_name=job_name,
instance_type=instance_type,
instance_count=instance_count,
entry_point=entry_script,
source_dir="demo",
profiler_config=profiler_config,
debugger_hook_config=debugger_hook_config,
script_mode=True,
hyperparameters=hyperparameters,
input_mode="Pipe",
)
# + [markdown] papermill={"duration": 0.052871, "end_time": "2021-06-01T00:13:29.095305", "exception": false, "start_time": "2021-06-01T00:13:29.042434", "status": "completed"} tags=[]
# > If you see an error, `TypeError: __init__() got an unexpected keyword argument 'instance_type'`, that means SageMaker Python SDK is out-dated. Please update your SageMaker Python SDK to 2.x by executing the below command and restart this notebook.
#
# ```bash
# pip install --upgrade sagemaker
# ```
# + [markdown] papermill={"duration": 0.052919, "end_time": "2021-06-01T00:13:29.201150", "exception": false, "start_time": "2021-06-01T00:13:29.148231", "status": "completed"} tags=[]
# ### Start training job
#
# The following `estimator.fit()` with `wait=False` argument initiates the training job in the background. You can proceed to run the dashboard or analysis notebooks.
# + papermill={"duration": 0.66101, "end_time": "2021-06-01T00:13:29.915670", "exception": false, "start_time": "2021-06-01T00:13:29.254660", "status": "completed"} tags=[]
remote_inputs = {"train": dataset_location + "/train"}
estimator.fit(remote_inputs, wait=True)
# + [markdown] papermill={"duration": 0.053011, "end_time": "2021-06-01T00:13:30.022207", "exception": false, "start_time": "2021-06-01T00:13:29.969196", "status": "completed"} tags=[]
# ## 3. Monitor the system resource utilization using SageMaker Studio
#
# SageMaker Studio provides the visualization tool for Sagemaker Debugger where you can find the analysis report and the system and framework resource utilization history.
#
# To access this information in SageMaker Studio, click on the last icon on the left to open `SageMaker Components and registries` and choose `Experiments and trials`. You will see the list of training jobs. Right click on the job you want to investigate shows a pop-up menu, then click on `Open Debugger for insights` which opens a new tab for SageMaker Debugger.
#
# There are two tabs, `Overview` and `Nodes`. `Overview` gives profiling summaries for quick review, and `Nodes` gives a detailed utilization information on all nodes.
# + [markdown] papermill={"duration": 0.05283, "end_time": "2021-06-01T00:13:30.127965", "exception": false, "start_time": "2021-06-01T00:13:30.075135", "status": "completed"} tags=[]
# ## 4. SageMaker Debugger profiling analysis utilities
# We can use the profiling analysis utilities to gain deeper insights into what the source of the issue is.
# For this step, we will rely on the bokeh and smdebug packages
# + papermill={"duration": 10.153683, "end_time": "2021-06-01T00:13:40.334396", "exception": false, "start_time": "2021-06-01T00:13:30.180713", "status": "completed"} tags=[]
# ! pip install bokeh==2.1.1
# ! pip install smdebug
# + [markdown] papermill={"duration": 0.064631, "end_time": "2021-06-01T00:13:40.464137", "exception": false, "start_time": "2021-06-01T00:13:40.399506", "status": "completed"} tags=[]
# Use smdebug to extract gpu and framework metrics
# + papermill={"duration": 2.455191, "end_time": "2021-06-01T00:13:42.983612", "exception": true, "start_time": "2021-06-01T00:13:40.528421", "status": "failed"} tags=[]
import boto3
from smdebug.profiler.analysis.notebook_utils.training_job import TrainingJob
from smdebug.profiler.analysis.utils.profiler_data_to_pandas import PandasFrame
training_job_name = estimator.latest_training_job.name
region = boto3.Session().region_name
tj = TrainingJob(training_job_name, region)
pf = PandasFrame(tj.profiler_s3_output_path)
# extract gpu metrics
system_metrics_df = pf.get_all_system_metrics()
gpus = system_metrics_df[system_metrics_df["dimension"] == "GPUUtilization"]
timestamps = gpus["timestamp_us"].to_numpy()
values = gpus["value"].to_numpy()
# exctract framework metrics
framework_metrics_df = pf.get_all_framework_metrics(
selected_framework_metrics=["Step:ModeKeys.TRAIN", "Step:ModeKeys.GLOBAL"]
)
train_steps = framework_metrics_df[
framework_metrics_df["framework_metric"].isin(["Step:ModeKeys.TRAIN", "Step:ModeKeys.GLOBAL"])
]
start_step = train_steps["start_time_us"].to_numpy()
end_step = train_steps["end_time_us"].to_numpy()
step_num = train_steps["step"].to_numpy()
# -
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# Use bokeh to plot the gpu metrics and the training progression on the same graph. This enables us to correlate between the two. We can see that the drops in gpu utilization coincide with every 50th step, which are marked in yellow. These are precisely the steps in which we have chosen to capture all of the graph tensors.
# 
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
import numpy as np
from bokeh.models import ColumnDataSource, CustomJS, Div, HoverTool, HBar
from bokeh.models.glyphs import Circle, Line
from bokeh.plotting import figure, show
plot = figure(
plot_height=400,
plot_width=1400,
x_range=(timestamps[0], timestamps[-1]),
y_range=(-1, 110),
tools="crosshair,xbox_select,pan,reset,save,xwheel_zoom",
)
x_range = plot.x_range
plot.xgrid.visible = False
plot.ygrid.visible = False
colors = np.where(step_num % 50 == 0, "yellow", "purple")
# pad framework metrics to match length of system metrics
pad = values.size - step_num.size
source = ColumnDataSource(
data=dict(
x=timestamps,
y=values,
left=np.pad(start_step, (0, pad)),
right=np.pad(end_step, (0, pad)),
color=np.pad(colors, (0, pad)),
)
)
callback = CustomJS(
args=dict(s1=source, div=Div(width=250, height=100, height_policy="fixed")),
code="""
console.log('Running CustomJS callback now.');
var inds = s1.selected.indices;
console.log(inds);
var line = "<span style=float:left;clear:left;font_size=13px><b> Selected index range: [" + Math.min.apply(Math,inds) + "," + Math.max.apply(Math,inds) + "]</b></span>\\n";
console.log(line)
var text = div.text.concat(line);
var lines = text.split("\\n")
if (lines.length > 35)
lines.shift();
div.text = lines.join("\\n");""",
)
plot.js_on_event("selectiongeometry", callback)
line = Line(x="x", y="y", line_color="white")
circle = Circle(x="x", y="y", fill_alpha=0, line_width=0)
hbar = HBar(
y=105, height=5, right="right", left="left", fill_color="color", line_cap="round", line_width=0
)
p = plot.add_glyph(source, line)
p = plot.add_glyph(source, circle)
p = plot.add_glyph(source, hbar)
# create tooltip for hover tool
hover = HoverTool(renderers=[p], tooltips=[("index", "$index"), ("(x,y)", "($x, $y)")])
plot.xaxis.axis_label = "Time in ms"
plot.yaxis.axis_label = "GPU Utilization"
plot.add_tools(hover)
show(plot, notebook_handle=True)
| sagemaker-debugger/tensorflow_profiling/callback_bottleneck.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## GloVe embedding
#
# Download GloVe pre-trained embedding glove.6B.zip from: https://nlp.stanford.edu/projects/glove/
# +
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
# -
# load the whole embedding into memory
embeddings = dict()
f = open('glove.6B.200d.txt')
for line in f:
values = line.split()
word = values[0]
embedding = np.array(values[1:], dtype='float32')
embeddings[word] = embedding
f.close()
print('Loaded %s word vectors.' % len(embeddings))
x = embeddings['beijing']
# print(x)
y = embeddings['china']
# print(y)
z = embeddings['philippines']
# print(z)
x = np.subtract(x, y)
x = np.add(x, z)
predict = None
max_similarity = 0
for word, embedding in embeddings.items():
y = np.array(embedding)
similarity = np.dot(x, y)
if similarity > max_similarity:
predict = word
print("Similarity: ", similarity, "Prediction: ", word)
max_similarity = similarity
| keras/embedding/glove_embedding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ccxt_env)
# language: python
# name: ccxt_env
# ---
# # Split data files into chunks ordered by days, weeks, ...
#
#
import numpy as np
from datetime import datetime
from quickndirtybot import io
# +
filename_in = '../data/gdax_data'
# split by conversion:
splitby = '_%Y-%m-%d'
splitby = '_%Y-%U'
data = io.load_csv(filename_in + '.csv')
# get datetimes from timestamps
time = [datetime.fromtimestamp(x/1000) for x in data[:, 0]]
categories = [ti.strftime(splitby) for ti in time]
lastidx = 0
for i, (line, lastline) in enumerate(zip(categories[1:], categories[:-1])):
if line != lastline:
io.save_csv(data[lastidx:i, :], filename_in+lastline+'.csv')
lastidx = i+1
io.save_csv(data[lastidx:, :], filename_in+line+'.csv')
# -
| notebooks/.ipynb_checkpoints/220180415_split_data_into_chunks-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="TA21Jo5d9SVq"
#
#
# 
#
# [](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/healthcare/DEID_EHR_DATA.ipynb)
#
#
#
# + [markdown] id="CzIdjHkAW8TB"
# # **De-identify Structured Data**
# + [markdown] id="6uDmeHEFW7_h"
# To run this yourself, you will need to upload your license keys to the notebook. Just Run The Cell Below in order to do that. Also You can open the file explorer on the left side of the screen and upload `license_keys.json` to the folder that opens.
# Otherwise, you can look at the example outputs at the bottom of the notebook.
#
#
# + [markdown] id="wIeCOiJNW-88"
# ## 1. Colab Setup
# + [markdown] id="HMIDv74CYN0d"
# Import license keys
# + id="ttHPIV2JXbIM"
import json
import os
from google.colab import files
license_keys = files.upload()
with open(list(license_keys.keys())[0]) as f:
license_keys = json.load(f)
# Defining license key-value pairs as local variables
locals().update(license_keys)
# Adding license key-value pairs to environment variables
os.environ.update(license_keys)
# + [markdown] id="rQtc1CHaYQjU"
# Install dependencies
# + id="CGJktFHdHL1n"
# Installing pyspark and spark-nlp
# ! pip install --upgrade -q pyspark==3.1.2 spark-nlp==$PUBLIC_VERSION
# Installing Spark NLP Healthcare
# ! pip install --upgrade -q spark-nlp-jsl==$JSL_VERSION --extra-index-url https://pypi.johnsnowlabs.com/$SECRET
# + [markdown] id="Hj5FRDV4YSXN"
# Import dependencies into Python
# + id="qUWyj8c6JSPP"
import pandas as pd
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from tabulate import tabulate
import sparknlp
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.base import *
import sparknlp_jsl
# + [markdown] id="ed6Htm7qDQB3"
# Start the Spark session
# + id="eaSM8-xhDRa4" outputId="a32c2047-23d5-474c-cf47-156dec4509f0" colab={"base_uri": "https://localhost:8080/", "height": 254}
# manually start session
# params = {"spark.driver.memory" : "16G",
# "spark.kryoserializer.buffer.max" : "2000M",
# "spark.driver.maxResultSize" : "2000M"}
# spark = sparknlp_jsl.start(license_keys['SECRET'],params=params)
print ("Spark NLP Version :", sparknlp.version())
print ("Spark NLP_JSL Version :", sparknlp_jsl.version())
spark = sparknlp_jsl.start(license_keys['SECRET'])
spark
# + [markdown] id="tGmN8zv1Zcdf"
# ## 2. Download Structured PHI Data and Create a `DataFrame`
# + id="XnbZdOsNlDhu"
# !wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Healthcare/data/hipaa-table-001.txt
# + colab={"base_uri": "https://localhost:8080/"} id="Bj1Z2G7Tk7NI" outputId="fc19fbe2-adb0-461f-c79a-4e754207a0e0"
df = spark.read.format("csv") \
.option("sep", "\t") \
.option("inferSchema", "true") \
.option("header", "true") \
.load("hipaa-table-001.txt")
df = df.withColumnRenamed("PATIENT","NAME")
df.show(truncate=False)
# + [markdown] id="1gmrjqHSGcJx"
# ## 3. De-identify using Obfuscation Method
# + id="ZBfKNzlWldBj"
from sparknlp_jsl.structured_deidentification import StructuredDeidentification
# + [markdown] id="Sw_IqjgicEPY"
# We will obfuscate `NAME` column as `PATIENT`, `AGE` column as `AGE` and `TEL` column as `PHONE`.
# + [markdown] id="2z6FZ5qluWNM"
# We can shift n days in the structured deidentification through "days" parameter when the column is a Date.
# + id="VztWV7pCoi8r"
obfuscator = StructuredDeidentification(spark,{"NAME" : "PATIENT", "AGE" : "AGE", "TEL" : "PHONE"},
obfuscateRefSource='faker',
columnsSeed={"NAME": 23, "DOB": 23},
days=5)
obfuscator_df = obfuscator.obfuscateColumns(df)
# + colab={"base_uri": "https://localhost:8080/"} id="KbbnDrfhd25M" outputId="cbdaff2f-bbcc-4210-a844-f015f564195c"
obfuscator_df.select("NAME", "AGE", "TEL").show(truncate=False)
# + [markdown] id="jT9xz9big4I3"
# The annotator does not have fake `DATE` chunks by default. Let's do it manually. We can create a `faker` dictionary for `DOB` column as `DATE` label then we obfuscate `DOB` column as well.
# + id="wSs270YjocHi"
obfuscator_unique_ref_test = '''2022-11-1#DATE
2033-10-30#DATE
2011-8-22#DATE
2005-11-1#DATE
2008-10-30#DATE
2044-8-22#DATE
2022-04-1#DATE
2033-05-30#DATE
2011-09-22#DATE
2005-12-1#DATE
2008-02-30#DATE
2044-03-22#DATE
2055-11-1#DATE
2066-10-30#DATE
2077-8-22#DATE
2088-11-1#DATE
2099-10-30#DATE
2100-8-22#DATE
2111-04-1#DATE
2122-05-30#DATE
2133-09-22#DATE
2144-12-1#DATE
2155-02-30#DATE
2166-03-22#DATE'''
with open('obfuscator_unique_ref_test.txt', 'w') as f:
f.write(obfuscator_unique_ref_test)
# + id="gqFtN0x5leZF"
obfuscator = StructuredDeidentification(spark,{"NAME":"PATIENT","AGE":"AGE", "DOB":"DATE", "TEL":"PHONE"}, obfuscateRefFile="/content/obfuscator_unique_ref_test.txt")
obfuscator_df = obfuscator.obfuscateColumns(df)
# + colab={"base_uri": "https://localhost:8080/"} id="QDNY4d8FgSyt" outputId="0b537967-2f68-420e-8839-71be633b4acb"
obfuscator_df.select("NAME", "DOB", "AGE", "TEL").show(truncate=False)
| tutorials/streamlit_notebooks/healthcare/DEID_EHR_DATA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Develop the NTE
# +
import pickle
import pandas as pd
import os
import nte
reload(nte)
from nte import *
# set up your pool
from multiprocessing import Pool
pool = Pool(processes=12)
# -
# ## Setup the Paths and Pool
with open('./Data/NOxPaths.pickle', 'r') as handle:
paths = pickle.load(handle)
mac = lambda x: x.replace("E:","/Volumes/Fleet Storage")
wnd = lambda x: x.replace("/","\\")
# set up your pool
pool = Pool(processes=12)
# ## Read in data from csvs
v_id = 12105
vehicle_paths = [mac(x) for x in paths[v_id]][:10]
# +
# %%time
# have your pool map the file names to dataframes
dfs = pool.map(read_csv, vehicle_paths)
# filter the dfs if longer than 500 rows (optional)
dfs = [df for df in dfs if len(df) > 500]
# -
# lets concat the entire vehicles data (optional)
df = pd.concat(dfs)
df = df.reset_index()
df = df.drop(['index'], axis=1)
from sys import getsizeof
# how many gb of memory are being used?
sum(map(getsizeof, dfs)) / 1e9
# ## Results and Intermediate Results
#
# Once we run get_nte_proportions, we will have columns with True and False for each criteria, the gNOx for each moment, the work for each moment, and the window.
#
# - ```criteria='torq_criteria'```: only meets torq criteria
# - ```criteria='criterion'```: means all criteria must be met
#
# ### Results
# +
# names of criteria for exploration
criteria_cols = ['engine_speed_criteria','torq_criteria',
'engine_air_temp_criteria','coolant_temp_criteria',
'scr_temp_criteria']
other_computed_cols = ['work', 'gNOx']
all_nox_cols = other_computed_cols + criteria_cols + ['criterion']
# -
# %%time
# get the proportion
prop_in_nte, prop_nte_testable = get_nte_proportion(df,
cutoff=.3,
criteria='criterion')
print "percent in nte:", prop_in_nte
print "percent in nte testable", prop_nte_testable
# since we ran the computation all_nox_cols exist
df[all_nox_cols].head()
df[all_nox_cols].agg(sum)
# ### Intermediate Results
# If you want to get the ratio of emissions to positive work for each window and its corresponding length:
# %%time
lt = get_percent_NTE_valid(df, 'torq_criteria')
# index corresponds to the window that satisfies 'window' col in df
# tuple := (ratio, length of window)
lt.head()
| _jupyter/nte.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Confidence interval
import numpy as np
sample = np.random.randint(0, 400, 1000)
sample
average = np.sum(sample)/len(sample)
average
R = np.max(sample) - np.min(sample)
R
(sample -average)**2
disp = np.sum((sample - average)**2)/(len(sample)-1)
disp
mean_quad = np.sqrt(disp)
mean_quad
| Statistics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: mne-python
# language: python
# name: mne-python
# ---
import matplotlib.pyplot as plt
import numpy as np
import glob,os
import pandas as pd
import hddm
import kabuki
import seaborn as sns
# %load_ext rpy2.ipython
plt.style.use('seaborn-ticks')
data = pd.read_csv('../Raw_data/markers/MRK_SAT.csv')
# ## Recovering successfull fit
# +
fit1 = []
for f in os.listdir("DDM/Fits/ModelSelection/"):
if "Exp2" in f:
if "M6" in f:
print(f)
fit1.append(hddm.load("DDM/Fits/ModelSelection/%s"%f))
fit1 = kabuki.utils.concat_models(fit1)
# -
# ## QPplot
# Every cell in this section takes extremely long time and requires at least 16GB of RAM...
ppc_data = hddm.utils.post_pred_gen(fit1)
ppc_data.to_csv('DDM/simulated_data_exp2.csv')
gen_data = pd.read_csv('DDM/simulated_data_exp2.csv')
gen_data.reset_index(inplace=True)
gen_data.rt = np.abs(gen_data.rt)*1000
gen_data[["condition","Con1","contraste","expdResp","participant"]] = gen_data['node'].str.split('.', expand=True)
gen_data.drop(['index','node','Con1'], axis=1, inplace=True)
gen_data.contraste = [float("0."+x) for x in gen_data.contraste]
gen_data.expdResp = gen_data.apply(lambda x: "Left" if x["expdResp"]=="0)" else "Right", axis=1)
gen_data.condition = [x[5:] for x in gen_data.condition]
gen_data["givenResp"] = gen_data.apply(lambda x: "Left" if x["response"]==0 else "Right", axis=1)
gen_data.response = gen_data.apply(lambda x: 1 if x["givenResp"]==x["expdResp"] else 0, axis=1)
df2 = data[data.exp == 2]
fig, ax = plt.subplots(2,1, figsize=[5,7], dpi=300)
for SAT, SAT_dat in df2.groupby('condition'):
Prec, RTQuantiles, subject, contrast = [],[],[],[]
meanPrec, meanRT = [],[]
synmeanPrec, synmeanRT, samp_idx = [],[],[]
for con, con_dat in SAT_dat.groupby("contraste"):
for corr, corr_dat in con_dat.groupby("response"):
meanPrec.append(float(len(corr_dat.response))/len(con_dat))
corr_dat["quantile"] = pd.qcut(corr_dat.rt, 5)
corr_dat["quantile"].replace(corr_dat["quantile"].unique().sort_values(), corr_dat.groupby("quantile").mean().rt.values)
mean_quantiles = []
for quant, quant_dat in corr_dat.groupby("quantile"):
mean_quantiles.append(quant_dat.rt.mean())
meanRT.append(mean_quantiles)
for i in np.arange(250): #Using samples from the synthetic data
syn = gen_data[(gen_data["sample"] == i) & (gen_data["condition"] == SAT) & \
(gen_data["contraste"] == con)]
corr_syn = syn[syn.response == corr].copy()
corr_syn["quantile"] = pd.qcut(corr_syn.rt, 5)
corr_syn["quantile"].replace(corr_syn["quantile"].unique().sort_values(), corr_syn.groupby("quantile").mean().rt.values)
synmeanPrec.append(float(len(corr_syn.response))/len(syn))
mean_quantiles = []
for quant, quant_dat in corr_syn.groupby("quantile"):
mean_quantiles.append(quant_dat.rt.mean())
synmeanRT.append(mean_quantiles)
samp_idx.append(i)
QPdf = pd.DataFrame([meanRT, meanPrec, contrast]).T
QPdf.columns=["RTQuantiles","Precision","contrast"]
QPdf = QPdf.sort_values(by="Precision")
synQPdf = pd.DataFrame([synmeanRT, synmeanPrec, samp_idx]).T
synQPdf.columns=["RTQuantiles","Precision","sample"]
synQPdf = synQPdf.sort_values(by="Precision")
color = ['#999999','#777777', '#555555','#333333','#111111']
x = [x for x in QPdf["Precision"].values]
y = [y for y in QPdf["RTQuantiles"].values]
if SAT =="Accuracy":
curax = ax[0]
else:
curax = ax[1]
for _x, _y in zip( x, y):
n = 0
for xp, yp in zip([_x] * len(_y), _y):
n += 1
curax.scatter([xp],[yp], marker=None, s = 0.0001)
curax.text(xp-.01, yp-10, 'x', fontsize=12, color=color[n-1])#substracted values correct text offset
for samp, samp_dat in synQPdf.groupby("sample"):
curax.plot( [i for i in samp_dat["Precision"].values], [j for j in samp_dat["RTQuantiles"].values],'.',
color='gray', markerfacecolor="w", markeredgecolor="gray", alpha=.2)
curax.set_xlabel("Response proportion")
curax.set_ylabel("RT quantiles (ms)")
curax.set_xlim(0,1)
curax.vlines(.5,0,2000,linestyle=':')
if SAT == "Accuracy":
curax.set_ylim([250, 1300])
else :
curax.set_ylim([200, 800])
plt.tight_layout()
plt.savefig('DDM/QPplot_exp2.png')
plt.show()
# ## Printing parameter summary table
stats = fit1.gen_stats()
table = stats[stats.apply(lambda row: False if "subj" in row.name else (False if "std" in row.name else True), axis=1)][["mean", '2.5q', '97.5q']].T
#col_names = [r"$a$ Acc", r"$a$ Spd", r"$v$ Acc 1", r"$v$ Acc 3", r"$v$ Acc 4", r"$v$ Spd 1",
# r"$v$ Spd 3", r"$v$ Spd 4", r"$T_{er}$ Acc", r"$T_{er}$ Spd ",
# r"$sv$", r"$sz$ Acc", r"$sz$ Spd", r"$st$", r"$z$"]
#table.columns = col_names
table = np.round(table, decimals=2)
print(table)#.to_latex())
traces = fit1.get_traces()
fig, ax = plt.subplots(1,4, figsize=(15,3), dpi=300)
traces["a(Accuracy)"].plot(kind='density', ax=ax[0], color='k', label="Accuracy")
traces["a(Speed)"].plot(kind='density', ax=ax[0], color="gray", label="Speed")
ax[0].set_xlabel(r'$a$ values')
ax[0].set_xlim(0.6, 1.41)
traces["t(Accuracy)"].plot(kind='density', ax=ax[1], color='k', label='_nolegend_')
traces["t(Speed)"].plot(kind='density', ax=ax[1], color="gray", label='_nolegend_')
ax[1].set_xlabel(r'$T_{er}$ values')
ax[1].set_ylabel('')
ax[1].set_xlim(0.225, 0.375)
traces["sz(Accuracy)"].plot(kind='density', ax=ax[2], color='k', label='_nolegend_')
traces["sz(Speed)"].plot(kind='density', ax=ax[2], color="gray", label='_nolegend_')
ax[2].set_xlabel(r'$s_z$ values')
ax[2].set_ylabel('')
ax[2].set_xlim(-0.05, 0.78)
traces["v(Accuracy.0.01)"].plot(kind='density', ax=ax[3], color='k', label="1")
traces["v(Accuracy.0.07)"].plot(kind='density', ax=ax[3], color='k', ls="-.", label="2")
traces["v(Accuracy.0.15)"].plot(kind='density', ax=ax[3], color='k', ls="--", label="3")
traces["v(Speed.0.01)"].plot(kind='density', ax=ax[3], color='gray', label='_nolegend_')
traces["v(Speed.0.07)"].plot(kind='density', ax=ax[3], color='gray', ls="-.", label='_nolegend_')
traces["v(Speed.0.15)"].plot(kind='density', ax=ax[3], color='gray', ls="--", label='_nolegend_')
ax[3].set_xlabel(r'$v$ values')
ax[3].set_ylabel('')
ax[3].set_xlim(-2, 6)
plt.tight_layout()
plt.savefig("../Manuscript/plots/DDMpar2.png")
plt.show()
# Estimating the effect size of SAT by substracting traces
print(np.mean(traces["t(Accuracy)"] - traces["t(Speed)"]))
print(np.percentile(traces["t(Accuracy)"] - traces["t(Speed)"], 2.5))
print(np.percentile(traces["t(Accuracy)"] - traces["t(Speed)"], 97.5))
#
#
# # Regressing MT over the Ter parameter across participants
# Computing plausible values
corr_Acc, corr_Spd, Ters_Acc, Ters_Spd = [],[],[],[]
traces = fit1.get_traces()
mts = data[data.exp==2].groupby(['condition','participant']).mt.mean().values#same index as below
for iteration in traces.iterrows():
Ter_Acc = iteration[1][['t_subj' in s for s in iteration[1].index]][:16]
Ter_Spd = iteration[1][['t_subj' in s for s in iteration[1].index]][16:]
corr_Acc.append(np.corrcoef(Ter_Acc, mts[:16])[0,1])
corr_Spd.append(np.corrcoef(Ter_Spd, mts[16:])[0,1])
Ters_Acc.append(Ter_Acc*1000)
Ters_Spd.append(Ter_Spd*1000)
# Potting raw data
# +
plt.errorbar(x=mts[:16], y=np.mean(Ters_Acc, axis=0), yerr=np.abs([np.mean(Ters_Acc, axis=0),np.mean(Ters_Acc, axis=0)] - np.asarray((np.percentile(Ters_Acc, 97.5, axis=0),np.percentile(Ters_Acc, 2.5, axis=0)))),fmt='o')
plt.errorbar(x=mts[16:], y=np.mean(Ters_Spd, axis=0), yerr=np.abs([np.mean(Ters_Spd, axis=0),np.mean(Ters_Spd, axis=0)] - np.asarray((np.percentile(Ters_Spd, 97.5, axis=0),np.percentile(Ters_Spd, 2.5, axis=0)))),fmt='o')
#plt.savefig('testexp1.png')
plt.show()
# -
# Plotting plausible value distribution
plt.hist(corr_Acc)
plt.hist(corr_Spd)
# Taking the code for plausible population correlation from the DMC package (Heathcote, Lin, reynolds, Strickland, Gretton and Matzke, 2019)
# + language="R"
#
# ### Plausible values ----
#
# posteriorRho <- function(r, n, npoints=100, kappa=1)
# # Code provided by <NAME>, March 2016, from <NAME>
# # Reformatted into a single funciton. kappa=1 implies uniform prior.
# # Picks smart grid of npoints points concentrating around the density peak.
# # Returns approxfun for the unnormalized density.
# {
#
# .bf10Exact <- function(n, r, kappa=1) {
# # Ly et al 2015
# # This is the exact result with symmetric beta prior on rho
# # with parameter alpha. If kappa = 1 then uniform prior on rho
# #
# if (n <= 2){
# return(1)
# } else if (any(is.na(r))){
# return(NaN)
# }
# # TODO: use which
# check.r <- abs(r) >= 1 # check whether |r| >= 1
# if (kappa >= 1 && n > 2 && check.r) {
# return(Inf)
# }
#
# log.hyper.term <- log(hypergeo::genhypergeo(U=c((n-1)/2, (n-1)/2),
# L=((n+2/kappa)/2), z=r^2))
# log.result <- log(2^(1-2/kappa))+0.5*log(pi)-lbeta(1/kappa, 1/kappa)+
# lgamma((n+2/kappa-1)/2)-lgamma((n+2/kappa)/2)+log.hyper.term
# real.result <- exp(Re(log.result))
# return(real.result)
# }
#
# .jeffreysApproxH <- function(n, r, rho) {
# result <- ((1 - rho^(2))^(0.5*(n - 1)))/((1 - rho*r)^(n - 1 - 0.5))
# return(result)
# }
#
# .bf10JeffreysIntegrate <- function(n, r, kappa=1) {
# # Jeffreys' test for whether a correlation is zero or not
# # Jeffreys (1961), pp. 289-292
# # This is the exact result, see EJ
# ##
# if (n <= 2){
# return(1)
# } else if ( any(is.na(r)) ){
# return(NaN)
# }
#
# # TODO: use which
# if (n > 2 && abs(r)==1) {
# return(Inf)
# }
# hyper.term <- Re(hypergeo::genhypergeo(U=c((2*n-3)/4, (2*n-1)/4), L=(n+2/kappa)/2, z=r^2))
# log.term <- lgamma((n+2/kappa-1)/2)-lgamma((n+2/kappa)/2)-lbeta(1/kappa, 1/kappa)
# result <- sqrt(pi)*2^(1-2/kappa)*exp(log.term)*hyper.term
# return(result)
# }
#
#
# # 1.0. Built-up for likelihood functions
# .aFunction <- function(n, r, rho) {
# #hyper.term <- Re(hypergeo::hypergeo(((n-1)/2), ((n-1)/2), (1/2), (r*rho)^2))
# hyper.term <- Re(hypergeo::genhypergeo(U=c((n-1)/2, (n-1)/2), L=(1/2), z=(r*rho)^2))
# result <- (1-rho^2)^((n-1)/2)*hyper.term
# return(result)
# }
#
# .bFunction <- function(n, r, rho) {
# #hyper.term.1 <- Re(hypergeo::hypergeo((n/2), (n/2), (1/2), (r*rho)^2))
# #hyper.term.2 <- Re(hypergeo::hypergeo((n/2), (n/2), (-1/2), (r*rho)^2))
# #hyper.term.1 <- Re(hypergeo::genhypergeo(U=c(n/2, n/2), L=(1/2), z=(r*rho)^2))
# #hyper.term.2 <- Re(hypergeo::genhypergeo(U=c(n/2, n/2), L=(-1/2), z=(r*rho)^2))
# #result <- 2^(-1)*(1-rho^2)^((n-1)/2)*exp(log.term)*
# # ((1-2*n*(r*rho)^2)/(r*rho)*hyper.term.1-(1-(r*rho)^2)/(r*rho)*hyper.term.2)
# #
# hyper.term <- Re(hypergeo::genhypergeo(U=c(n/2, n/2), L=(3/2), z=(r*rho)^2))
# log.term <- 2*(lgamma(n/2)-lgamma((n-1)/2))+((n-1)/2)*log(1-rho^2)
# result <- 2*r*rho*exp(log.term)*hyper.term
# return(result)
# }
#
# .hFunction <- function(n, r, rho) {
# result <- .aFunction(n, r, rho) + .bFunction(n, r, rho)
# return(result)
# }
#
# .scaledBeta <- function(rho, alpha, beta){
# result <- 1/2*dbeta((rho+1)/2, alpha, beta)
# return(result)
# }
#
# .priorRho <- function(rho, kappa=1) {
# .scaledBeta(rho, 1/kappa, 1/kappa)
# }
#
# fisherZ <- function(r) log((1+r)/(1-r))/2
#
# inv.fisherZ <- function(z) {K <- exp(2*z); (K-1)/(K+1)}
#
#
# # Main body
#
# # Values spaced around mode
# qs <- qlogis(seq(0,1,length.out=npoints+2)[-c(1,npoints+2)])
# rho <- c(-1,inv.fisherZ(fisherZ(r)+qs/sqrt(n)),1)
# # Get heights
# if (!is.na(r) && !r==0) {
# d <- .bf10Exact(n, r, kappa)*.hFunction(n, r, rho)*.priorRho(rho, kappa)
# } else if (!is.na(r) && r==0) {
# d <- .bf10JeffreysIntegrate(n, r, kappa)*
# .jeffreysApproxH(n, r, rho)*.priorRho(rho, kappa)
# } else return(NA)
# # Unnormalized approximation funciton for density
# approxfun(rho,d)
# }
#
#
# postRav <- function(r, n, spacing=.01, kappa=1,npoints=100,save=FALSE)
# # r is a vector, returns average density. Can also save unnormalized pdfs
# {
# funs <- sapply(r,posteriorRho,n=n,npoints=npoints,kappa=kappa)
# rho <- seq(-1,1,spacing)
# result <- apply(matrix(unlist(lapply(funs,function(x){
# out <- x(rho); out/sum(out)
# })),nrow=length(rho)),1,mean)
# names(result) <- seq(-1,1,spacing)
# attr(result,"n") <- n
# attr(result,"kappa") <- kappa
# if (save) attr(result,"updfs") <- funs
# result
# }
#
#
# postRav.Density <- function(result)
# # Produces density class object
# {
# x.vals <- as.numeric(names(result))
# result <- result/(diff(range(x.vals))/length(x.vals))
# out <- list(x=x.vals,y=result,has.na=FALSE,
# data.name="postRav",call=call("postRav"),
# bw=mean(diff(x.vals)),n=attr(result,"n"))
# class(out) <- "density"
# out
# }
#
# postRav.mean <- function(pra) {
# # Average value of object produced by posteriorRhoAverage
# sum(pra*as.numeric(names(pra)))
# }
#
# postRav.p <- function(pra,lower=-1,upper=1) {
# # probability in an (inclusive) range of posteriorRhoAverage object
# x.vals <- as.numeric(names(pra))
# sum(pra[x.vals <= upper & x.vals >= lower])
# }
#
# postRav.ci <- function(pra,interval=c(.025,.975))
# {
# cs <- cumsum(pra)
# rs <- as.numeric(names(pra))
# tmp <- approx(cs,rs,interval)
# out <- tmp$y
# names(out) <- interval
# out
# }
#
# -
# Computing plausible population correlation for both SAT conditions
# + magic_args="-i corr_Acc -o x4_1,y4_1" language="R"
# rhohat = postRav(corr_Acc, 16)
# print(postRav.mean(rhohat))
# print(postRav.ci(rhohat))
# d = postRav.Density(rhohat)
# plot(d)
# x4_1 = d$x
# y4_1 = d$y
#
# + magic_args="-i corr_Spd -o x4_2,y4_2" language="R"
# rhohat = postRav(corr_Spd, 16)
# print(postRav.mean(rhohat))
# print(postRav.ci(rhohat))
# d = postRav.Density(rhohat)
# plot(d)
# x4_2 = d$x
# y4_2 = d$y
#
#
# -
# ### Plotting for both experiment
plot1data = pd.read_csv("plot1data.csv")
plot3data = pd.read_csv("plot3data.csv")
# +
import matplotlib.gridspec as gridspec
plt.figure(dpi=300)
gs = gridspec.GridSpec(2, 2,
width_ratios=[2, 2, ],
height_ratios=[2, 1])
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
ax3 = plt.subplot(gs[2])
ax4 = plt.subplot(gs[3])
ax1.errorbar(x=plot1data.x1_1, y=plot1data.y1_1, yerr=np.array([plot1data.yerr1_1u.values, plot1data.yerr1_1b.values]),fmt='.', color="k", label="Accuracy")
ax1.errorbar(x=plot1data.x1_2, y=plot1data.y1_2, yerr=np.array([plot1data.yerr1_1u.values, plot1data.yerr1_1b.values]),fmt='.', color="gray", label="Speed")
ax2.errorbar(x=mts[:16], y=np.mean(Ters_Acc, axis=0), yerr=np.abs([np.mean(Ters_Acc, axis=0),np.mean(Ters_Acc, axis=0)] - np.asarray((np.percentile(Ters_Acc, 97.5, axis=0),np.percentile(Ters_Acc, 2.5, axis=0)))),fmt='.', color="k")
ax2.errorbar(x=mts[16:], y=np.mean(Ters_Spd, axis=0), yerr=np.abs([np.mean(Ters_Spd, axis=0),np.mean(Ters_Spd, axis=0)] - np.asarray((np.percentile(Ters_Spd, 97.5, axis=0),np.percentile(Ters_Spd, 2.5, axis=0)))),fmt='.', color="gray")
ax3.plot(plot3data.x3_1,plot3data.y3_1, color="k", label="Accuracy")
ax3.plot(plot3data.x3_2,plot3data.y3_2, color="gray", label="Speed")
ax4.plot(x4_1,y4_1, color="k")
ax4.plot(x4_2,y4_2, color="gray")
ax1.legend(loc=0)
ax1.set_ylim(148, 400)
ax2.set_ylim(148, 400)
ax3.set_ylim(0, 3)
ax4.set_ylim(0, 3)
ax2.set_yticks([])
ax4.set_yticks([])
ax1.set_ylabel(r"$T_{er}$ (ms)")
ax1.set_xlabel("MT (ms)")
ax2.set_xlabel("MT (ms)")
ax3.set_ylabel("Density")
ax3.set_xlabel(r"$r$ value")
ax4.set_xlabel(r"$r$ value")
plt.tight_layout()
plt.savefig("../Manuscript/plots/TerMTcorr.eps")
# -
# # Joint fit with MT
# Except if high amount of RAM (>18 Gb), kernel should be restarted and only the first cell run before running cells below
# +
fit_joint2 = []
for f in os.listdir("DDM/Fits/"):
if os.path.isfile("DDM/Fits/%s"%f) and "Exp2" in f:
fit_joint2.append(hddm.load("DDM/Fits/%s"%f))
fit_joint = kabuki.utils.concat_models(fit_joint2)
# -
stats = fit_joint.gen_stats()
stats[stats.index=="t_mt"]
# ## Testing wether var in MT ~ Ter can be explained by r(PMT,MT)
# +
import scipy.stats as stats
df = dffull = pd.read_csv('../Raw_data/markers/MRK_SAT.csv')
df = df[df.exp==2]
df = df[np.isfinite(df.pmt)].reset_index(drop=True)#Removing unmarked EMG trials
r, part, SAT = [],[],[]
for xx, subj_dat in df.groupby(['participant', 'condition']):
subj_dat = subj_dat[np.isfinite(subj_dat['mt'])]
r.append(stats.spearmanr(subj_dat.mt, subj_dat.pmt)[0])
part.append(xx[0])
SAT.append(xx[1])
dfcorr = pd.concat([pd.Series(r), pd.Series(part),pd.Series(SAT)], axis=1)
dfcorr.columns = ['correl','participant','SAT']
PMTMTcorr = dfcorr.groupby('participant').correl.mean().values #averaging across SAT conditions
# -
corr, t_mts = [],[]
traces = fit_joint.get_traces()
for iteration in traces.iterrows():
t_mt = iteration[1][['t_mt_subj' in s for s in iteration[1].index]]
corr.append(np.corrcoef(t_mt, PMTMTcorr)[0,1])
t_mts.append(t_mt)
plt.errorbar(x=PMTMTcorr, y=np.mean(t_mts, axis=0), yerr=np.abs([np.mean(t_mts, axis=0),np.mean(t_mts, axis=0)] - np.asarray((np.percentile(t_mts, 97.5, axis=0),np.percentile(t_mts, 2.5, axis=0)))),fmt='o')
plt.hist(corr)
# Computing population plausible values
# + magic_args="-i corr -o x4,y4" language="R"
# rhohat = postRav(corr, 16)
# print(postRav.mean(rhohat))
# print(postRav.ci(rhohat))
# d = postRav.Density(rhohat)
# plot(d)
# x4 = d$x
# y4 = d$y
#
#
# -
# ## Plotting for both experiments
plot1data_tmt = pd.read_csv('plot1data_tmt.csv')
plot2data_tmt = pd.read_csv('plot2data_tmt.csv')
# +
import matplotlib.gridspec as gridspec
plt.figure(dpi=300)
gs = gridspec.GridSpec(2, 2,
width_ratios=[2, 2, ],
height_ratios=[2, 1])
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
ax3 = plt.subplot(gs[2])
ax4 = plt.subplot(gs[3])
ax1.errorbar(x=plot1data_tmt.x1, y=plot1data_tmt.y1, yerr=np.array([plot1data_tmt.yerr1u.values, plot1data_tmt.yerr1b.values]),fmt='.', color="k")
ax2.errorbar(x=PMTMTcorr, y=np.mean(t_mts, axis=0), yerr=np.abs([np.mean(t_mts, axis=0),np.mean(t_mts, axis=0)] - np.asarray((np.percentile(t_mts, 97.5, axis=0),np.percentile(t_mts, 2.5, axis=0)))),fmt='.', color="k")
ax3.plot(plot2data_tmt.x2,plot2data_tmt.y2, color="k", label="Accuracy")
ax4.plot(x4,y4, color="k")
ax3.set_ylim(0, 4)
ax4.set_ylim(0, 4)
ax2.set_yticks([])
ax4.set_yticks([])
ax1.set_ylabel(r"$\beta_{MT}$")
ax1.set_xlabel("PMT-MT correlation")
ax2.set_xlabel("PMT-MT correlation")
ax3.set_ylabel("Density")
ax3.set_xlabel(r"$r$ value")
ax4.set_xlabel(r"$r$ value")
plt.tight_layout()
plt.savefig("../Manuscript/plots/tmt.eps")
| Analysis/7-Testing_DDM_exp2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # WeatherPy
# ----
#
# #### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
pip install citipy
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import time
from scipy.stats import linregress
# Import API key
from api_keys import weather_api_key
# Incorporated citipy to determine city based on latitude and longitude
from citipy import citipy
# Output File (CSV)
output_data_file = "output_data/cities.csv"
# Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
# -
# ## Generate Cities List
# +
# List for holding lat_lngs and cities
lat_lngs = []
cities = []
# Create a set of random lat and lng combinations -first set to 20 and final should be at 1500
lats = np.random.uniform(low=-90.000, high=90.000, size=1500)
lngs = np.random.uniform(low=-180.000, high=180.000, size=1500)
lat_lngs = zip(lats, lngs)
# Identify nearest city for each lat, lng combination
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# If the city is unique, then add it to a our cities list
if city not in cities:
cities.append(city)
# Print the city count to confirm sufficient count
len(cities)
# -
# ### Perform API Calls
# * Perform a weather check on each city using a series of successive API calls.
# * Include a print log of each city as it'sbeing processed (with the city number and city name).
#
# +
#Define the URL
units= "imperial"
query_url = f"http://api.openweathermap.org/data/2.5/weather?appid={weather_api_key}&units={units}&q="
# define count of pass throughs
counter = 0
set_term = 1
print(query_url)
# +
second_city = []
cloudiness = []
dates = []
humidity = []
lat = []
long = []
temp = []
wind_speed = []
countries = []
counter = 0
set_term = 1
# loops for creating dataframe columns
for city in cities:
try:
response = requests.get(query_url + city).json()
cloudiness.append(response['clouds']['all'])
countries.append(response['sys']['country'])
dates.append(response['dt'])
humidity.append(response['main']['humidity'])
lat.append(response['coord']['lat'])
long.append(response['coord']['lon'])
temp.append(response['main']['temp_max'])
wind_speed.append(response['wind']['speed'])
if counter > 48:
counter = 1
set_term += 1
second_city.append(city)
else:
counter += 1
second_city.append(city)
print(f"Processing Record {counter} of Set {set_term} | {city}")
except Exception:
print("City not found. Skipping...")
print("------------------------------\nData Retrieval Complete\n------------------------------")
# -
# ### Convert Raw Data to DataFrame
# * Export the city data into a .csv.
# * Display the DataFrame
# +
# Create a dictionary to keep data
weather_dict = {
"City": second_city,
"Cloudiness": cloudiness,
"Country": countries,
"Date": dates,
"Humidity": humidity,
"Lat": lat,
"Lng": long,
"Max Temp": temp,
"Wind Speed": wind_speed
}
# Create the data frame and count variables for each columns
weather_df = pd.DataFrame(weather_dict)
weather_df.count()
# -
#print the dataframe
weather_df
# Export data into a csv
weather_df.to_csv("weathercities.csv", index=False, header=True)
# ### Plotting the Data
# * Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.
# * Save the plotted figures as .pngs.
# #### Latitude vs. Temperature Plot
plt.scatter(x=weather_df['Lat'], y=weather_df['Max Temp'], alpha=1, edgecolors ="black")
plt.grid()
plt.title("City Latitude Vs. Max Temperature 2/1/20")
plt.xlabel("Latitude")
plt.ylabel("Max Temperature(F)")
plt.savefig("Lat v. Temp.png")
plt.show()
# #### Latitude vs. Cloudiness Plot
plt.scatter(x=weather_df['Lat'], y=weather_df['Cloudiness'], alpha=1, edgecolors ="black")
plt.grid()
plt.title("City Latitude Vs. Cloudiness 2/1/20")
plt.xlabel("Latitude")
plt.ylabel("Cloudiness (%)")
plt.savefig("Lat v. Cloudiness.png")
plt.show()
# #### Latitude vs. Humidity Plot
plt.scatter(x=weather_df['Lat'], y=weather_df['Humidity'], alpha=1, edgecolors ="black")
plt.grid()
plt.title("City Latitude Vs. Humidity 2/1/20")
plt.xlabel("Latitude")
plt.ylabel("Humidity (%)")
plt.savefig("Lat v. Humidity.png")
plt.show()
# #### Latitude vs. Wind Speed Plot
plt.scatter(x=weather_df['Lat'], y=weather_df['Wind Speed'], alpha=1, edgecolors ="black")
plt.grid()
plt.title("City Latitude Vs. Wind Speed 2/1/20")
plt.xlabel("Latitude")
plt.ylabel("Wind Speed (MPH)")
plt.savefig("Lat v. Wind Speed.png")
plt.show()
# ## Linear Regression
# OPTIONAL: Create a function to create Linear Regression plots
# +
#Create DF for each Hemisphere
North_Hemisphere = weather_df.loc[weather_df["Lat"]>0.01]
South_Hemisphere = weather_df.loc[weather_df["Lat"]<-0.01]
#North_Hemisphere
South_Hemisphere
# -
# #### Northern Hemisphere - Max Temp vs. Latitude Linear Regression
# +
#Create a Scatter Plot for Northern Hemisphere - Max Temp vs. Latitude Linear Regression
x = North_Hemisphere ['Lat']
y = North_Hemisphere ['Max Temp']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x, y)
regress_values = x * slope + intercept
line_equation = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x,y)
plt.plot(x,regress_values,"red")
plt.annotate(line_equation,(5,10),fontsize=15,color="red")
plt.ylim(0,100)
plt.xlim(0, 80)
plt.ylabel("Max. Temp")
plt.xlabel("Latitude")
# plt.show()
plt.savefig("Northern Hem Max Temp vs Lat.png")
# -
# #### Southern Hemisphere - Max Temp vs. Latitude Linear Regression
# +
#Create a Scatter Plot for Southern Hemisphere - Max Temp vs. Latitude Linear Regression
x = South_Hemisphere['Lat']
y = South_Hemisphere['Max Temp']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x, y)
regress_values = x * slope + intercept
line_eq= "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x,y)
plt.plot(x,regress_values,"red")
plt.annotate(line_equation,(-40,20),fontsize=15,color="red")
plt.ylim(0,115)
plt.xlim(-60, 20)
plt.ylabel("Max. Temp")
plt.xlabel("Latitude")
# plt.show()
plt.savefig("Southern Hem Max Temp vs Lat.png")
# -
# #### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression
# +
#Create a Scatter Plot for Northern Hemisphere - Humidity % vs. Latitude Linear Regression
x = North_Hemisphere ['Lat']
y = North_Hemisphere ['Humidity']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x, y)
regress_values = x * slope + intercept
line_equation = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x,y)
plt.plot(x,regress_values,"red")
plt.annotate(line_equation,(40,10),fontsize=15,color="red")
plt.ylim(0,110)
plt.xlim(-5, 90)
plt.ylabel("Max. Temp")
plt.xlabel("Humidity")
# plt.show()
plt.savefig("Northern Hem Lat vs Humidity.png")
# -
# #### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression
# +
#Create a Scatter Plot for Southern Hemisphere - Max Temp vs. Latitude Linear Regression
x = South_Hemisphere['Lat']
y = South_Hemisphere['Humidity']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x, y)
regress_values = x * slope + intercept
line_eq= "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x,y)
plt.plot(x,regress_values,"red")
plt.annotate(line_equation,(-40,10),fontsize=15,color="red")
plt.ylim(0,110)
plt.xlim(-60, 20)
plt.ylabel("Humidity")
plt.xlabel("Latitude")
# plt.show()
plt.savefig("Southern Hem Humidity vs Lat.png")
# -
# #### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
# +
#Scatter Plot for the Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
x = North_Hemisphere ['Lat']
y = North_Hemisphere ['Cloudiness']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x, y)
regress_values = x * slope + intercept
line_equation = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x,y)
plt.plot(x,regress_values,"red")
plt.annotate(line_equation,(5,-15),fontsize=15,color="red")
plt.ylim(-20,110)
plt.xlim(-5, 85)
plt.xlabel("Lat")
plt.ylabel("Cloudiness")
plt.show()
plt.savefig("Northern Hem Lat vs Cloudiness.png")
# -
# #### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
# +
#Scatter Plot for Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
x = South_Hemisphere ['Lat']
y = South_Hemisphere ['Cloudiness']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x, y)
regress_values = x * slope + intercept
line_equation = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x,y)
plt.plot(x,regress_values,"red")
plt.annotate(line_equation,(-60,-20),fontsize=15,color="red")
plt.ylim(-25,110)
plt.xlim(-75,15)
plt.xlabel("Lat")
plt.ylabel("Cloudiness")
plt.show()
plt.savefig("Southern Hem Lat vs Cloudiness.png")
# -
# #### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
# +
#Scatter plot for the Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
x = North_Hemisphere ['Lat']
y = North_Hemisphere ['Wind Speed']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x, y)
regress_values = x * slope + intercept
line_equation = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x,y)
plt.plot(x,regress_values,"red")
plt.annotate(line_equation,(5,-2),fontsize=15,color="red")
plt.ylim(-5,27)
plt.xlim(-5, 90)
plt.xlabel("Latitude")
plt.ylabel("Wind Speed")
plt.show()
plt.savefig("Northern Hem Lat vs Wind Speed.png")
# -
# #### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
# +
#Scatter plot for the Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
x = South_Hemisphere ['Lat']
y = South_Hemisphere ['Wind Speed']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x, y)
regress_values = x * slope + intercept
line_equation = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x,y)
plt.plot(x,regress_values,"red")
plt.annotate(line_equation,(-40,-5),fontsize=15,color="red")
plt.ylim(-10,30)
plt.xlim(-60, 8)
plt.xlabel("Latitude")
plt.ylabel("Wind Speed")
plt.show()
plt.savefig("Southern Hem Lat vs Wind Speed.png")
# -
| WeatherPy/.ipynb_checkpoints/WeatherPy-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
# import seaborn as sns
# import matplotlib.pyplot as plt
import os
from sklearn.model_selection import train_test_split
from feature_engineering import transformation
# plt.style.use('seaborn-colorblind')
# # %matplotlib inline
#from feature_cleaning import rare_values as ra
# -
# ## Load Dataset
# +
use_cols = [
'Pclass', 'Sex', 'Age', 'Fare', 'SibSp',
'Survived'
]
data = pd.read_csv('./data/titanic.csv', usecols=use_cols)
# -
data.head(3)
# Note that we include target variable in the X_train
# because we need it to supervise our discretization
# this is not the standard way of using train-test-split
X_train, X_test, y_train, y_test = train_test_split(data, data.Survived, test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
# ## Logarithmic transformation
# add the new created feature
X_train_copy = X_train.copy(deep=True)
X_train_copy = transformation.log_transform(data=X_train,cols=['Fare'])
print(X_train_copy.head(6))
# ## Reciprocal transformation
# add the new created feature
X_train_copy = X_train.copy(deep=True)
X_train_copy = X_train_copy[X_train_copy.Fare!=0] # Warning that x should not be 0
#X_train_copy[X_train_copy.Fare==0]
X_train_copy = transformation.reciprocal_transform(data=X_train_copy,cols=['Fare'])
print(X_train_copy.head(6))
# ## Square root transformation
# add the new created feature
X_train_copy = X_train.copy(deep=True)
X_train_copy = transformation.square_root_transform(data=X_train,cols=['Fare'])
print(X_train_copy.head(6))
# ## Exponential transformation
# add the new created feature
X_train_copy = X_train.copy(deep=True)
X_train_copy = transformation.exp_transform(data=X_train,cols=['Fare'],coef=0.2)
print(X_train_copy.head(6))
# ## Box-cox transformation
from sklearn.preprocessing import PowerTransformer
pt = PowerTransformer().fit(X_train[['Fare']])
X_train_copy = X_train.copy(deep=True)
X_train_copy['Fare_boxcox'] = pt.transform(X_train_copy[['Fare']])
print(X_train_copy.head(6))
transformation.diagnostic_plots(X_train_copy,'Fare_boxcox')
# ## Quantile transformation
from sklearn.preprocessing import QuantileTransformer
qt = QuantileTransformer(output_distribution='normal').fit(X_train[['Fare']])
X_train_copy = X_train.copy(deep=True)
X_train_copy['Fare_qt'] = qt.transform(X_train_copy[['Fare']])
print(X_train_copy.head(6))
transformation.diagnostic_plots(X_train_copy,'Fare_qt')
| feature-engineering-and-data-transformations/using-toy-exmaples/3.4_Demo_Feature_Transformation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/syusuke9999/FashionMNIST_VGG16/blob/main/FashionMNIST_VGG16.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="T-s6bVfwPQ40" colab={"base_uri": "https://localhost:8080/", "height": 372} outputId="2bb0966b-ce94-4029-a23e-d309a7281c6d"
import cv2
import keras
import tensorflow
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dropout, Flatten, Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.python.keras.applications import VGG16
import numpy as np
import logging
logging.getLogger('tensorflow').disabled = True
# + id="F1hPM12mPT04"
# %matplotlib inline
# + id="d_CXBBsbPXss"
import matplotlib.pyplot as plt
# + [markdown] id="w6-R722jPfvE"
# **パラメーターの設定**
# + id="eIlHOyCCPZ5B"
batch_size = 128
num_class = 10
epochs = 100
image_size = 32
# + id="WVuN4zRDPoaK"
# keras.datasets.fashion_mnistからデータを読み込む。
(x_train, y_train), (x_test, y_test) = keras.datasets.fashion_mnist.load_data()
# + id="vhfGKEF8X9n9"
# それぞれの分類をリストに保存する
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# + [markdown] id="sxqOZZt9Puyn"
# #### VGG16のトレーニングモデルは最低限32×32のサイズとRGBカラーチャネルを持ったデータでないと受け付けないため、
# #### リスト内包表記とopenCVの関数を使用して、画像サイズを拡大し、カラーチャネルをRGBの3つに増やす。
# #### x_trainからimgを1つずつ取り出し、画像サイズの変換とRGBデータへの変換を同時に行い、numpy.ndarray形式に変換して、x_trainに代入する
# + id="Zvw7-SP9Pr7G"
x_train = np.array(
[cv2.cvtColor(cv2.resize(img, (image_size, image_size)),
cv2.COLOR_GRAY2RGB) for img in x_train])
# + [markdown] id="sJBKOAE-P0a1"
# ### x_testについても同様の操作を行う
# + id="LB80E9cqPxZD"
x_test = np.array(
[cv2.cvtColor(cv2.resize(img, (image_size, image_size)),
cv2.COLOR_GRAY2RGB) for img in x_test])
# + id="PqCueUGqP59w"
for i in range(10):
plt.subplot(2, 5, i+1)
plt.title("Label: " + str(i))
plt.imshow(x_train[i].reshape(image_size, image_size, 3), cmap=None)
# + id="IVIhnSQOP7d8"
# 前処理として画像データを0~1の間の数値に正規化する
x_train, x_test = x_train / 255.0 , x_test / 255.0
# + id="aU-2nnV3MifA"
x_test.shape
# + id="bOkj1DdZP9Gi"
# 最適化手法の指定
opt = Adam()
# + id="QSBPxsvwQB0d"
# 事前学習に'imagenet'で学習したモデルを使用(weights='imagenet')
# 今回は10クラスに分類するため、最後尾(トップ)の全結合層は使用しない(include_top=False)
# 画像サイズとして最低32×32、カラーチャネルとしてRGBの3の次元が必要なため、
# 事前に加工した形状の通りに入力の形状を指定(input_shape=(image_size, image_size, 3))
VGG16TrainedModel = VGG16(weights='imagenet', include_top=False,
input_shape=(32, 32, 3))
# + [markdown] id="-UAx5IxTQF34"
# ### VGG16の事前学習済みモデルの概要
# + id="ktAk8mj3QG6L"
VGG16TrainedModel.summary()
# + [markdown] id="oCe2v2ilQMTh"
# ### 全結合層の構築
# + id="ytMHLhIaQKCK"
top_model = Sequential()
top_model.add(Flatten(input_shape=VGG16TrainedModel.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(num_class, activation='softmax'))
# + [markdown] id="47n8Uq08QT1W"
# ### VGG16と全結合層の統合
# + id="4Bp5ePdQQRr5"
# 事前学習済みのVGG-16畳み込みニューラルネットワークと、上記作成したレイヤーを結合する
Customized_VGG_Model = keras.Model(inputs=VGG16TrainedModel.input, outputs=top_model(VGG16TrainedModel.output))
# + id="14tPy-fOQZjE"
# 事前学習済みのVGG-16畳み込みニューラルネットワークの13番目の層までを学習対象にする
for layer in Customized_VGG_Model.layers[:16]:
layer.trainable = False
# + id="VqRWVd3FVe4-"
### 今回の転移学習に使用するモデルの概要
# + id="5gZKmUGUVngr"
Customized_VGG_Model..summary()
# + [markdown] id="L8AiUrAiQLgT"
#
# + [markdown] id="Jj7IXpMBQc92"
#
# + id="Qr4qyMzHQe-_"
Customized_VGG_Model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# + id="Zg14RRRAQkf4"
Customized_VGG_Model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs)
# + id="IDqm2qgqQ0jC"
Customized_VGG_Model.evaluate(x_test, y_test)
# + id="FCBk4JeTYczn"
plt.imshow(x_test[99:100].reshape(image_size, image_size, 3), cmap=None)
# + id="C7sMfN0eXoGb"
class_names[Customized_VGG_Model.predict(x_test[99:100]).argmax()]
| FashionMNIST_VGG16.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import ast
import os
os.listdir(os.getcwd())
director = pd.read_csv('directorparsed.csv')
meta = pd.read_csv('movies_metadata_small.csv')
meta
director
# +
list_o_tuples = []
numcount = 1
used_id = []
for i in range(len(director.index)):
b_id = director.iloc[i]['id']
for j in range(len(meta.index)):
a_id = meta.iloc[j]['id']
if a_id == b_id and a_id not in used_id:
used_id.append(a_id)
new_director = director.iloc[i]['director']
genres = meta.iloc[j]['genres']
title = meta.iloc[j]['title']
revenue = meta.iloc[j]['revenue']
vote_count = meta.iloc[j]['vote_count']
vote_average = meta.iloc[j]['vote_average']
list_o_tuples.append([b_id, new_director, title, genres, revenue, vote_count, vote_average])
print(numcount)
numcount += 1
break
print("It's done bruh!")
# -
list_o_tuples
cl = ["id", "director", "title", "genres", "revenue", "vote_count", "vote_average"]
ultimate = pd.DataFrame(list_o_tuples, columns=cl)
ultimate.to_csv('finalparsed.csv')
test = pd.read_csv('completed_movie_database_for_PSIT.csv')
sample = {}
already = []
for x in range(len(test.index)):
name = test.iloc[x]['director']
networth = test.iloc[x]['revenue']
if name not in already:
sample[name] = networth
else:
sample[name] += networth
print(sample.get(name))
print("yay")
sorted_d = sorted(sample.items(), key=lambda x: x[1], reverse=True)
sorted_d
test.loc[test['director'] == '<NAME>', ['title', 'revenue', "vote_count", "vote_average"]]
test.loc[test['director'] == '<NAME>', ['title', 'revenue', "vote_count", "vote_average"]]
test.loc[test['director'] == '<NAME>', ['title', 'revenue', "vote_count", "vote_average"]]
test.loc[test['director'] == '<NAME>', ['title', 'revenue', "vote_count", "vote_average"]]
test.loc[test['director'] == '<NAME>', ['title', 'revenue', "vote_count", "vote_average"]]
test.loc[test['director'] == '<NAME>', ['title', 'revenue', "vote_count", "vote_average"]]
test.loc[test['director'] == '<NAME>', ['title', 'revenue', "vote_count", "vote_average"]]
test.loc[test['director'] == '<NAME>', ['title', 'revenue', "vote_count", "vote_average"]]
test.loc[test['director'] == '<NAME>', ['title', 'revenue', "vote_count", "vote_average"]]
test.loc[test['director'] == '<NAME>', ['title', 'revenue', "vote_count", "vote_average"]]
test.loc[test['director'] == '<NAME>', ['title', 'revenue', "vote_count", "vote_average"]]
# 11th
test.loc[test['director'] == '<NAME>', ['title', 'revenue', "vote_count", "vote_average"]]
# 12th
test
import pygal
from pygal.style import DarkStyle
bar_chart = pygal.HorizontalBar(x_label_rotation=30, style=DarkStyle)
bar_chart.title = "Top 25 director by revenue in all movie"
counter = 0
for a, b in sorted_d:
if counter > 25:
break
bar_chart.add(a, b)
counter += 1
bar_chart.render_to_file('chart.svg')
print("done")
| Notebook/completed_movie_database_for_PSIT_parser.ipynb |
# +
# #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 18 18:51:39 2019
@author: satida
"""
import persian
import pickle
import os
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'Alphabet.pkl')
with open(filename, 'rb') as f:
alphabet = pickle.load(f)
def SortFa(words):
return [str(i) for i in sorted([FarsiComparison(i) for i in words])]
def tokenizer(text:str , invalid_char,condition_char,stop_words,alphabet):
alphabet="".join(alphabet)
text_dict=[]
text=text.replace("\xa0"," ")
text=text.replace('\u200e',"")
text=text.split(" ")
for word in text:
if word in invalid_char or word in condition_char or word in stop_words or word==" ":
continue
for space in ["\n","\t",]:
word=word.replace(space," ")
for i,w in enumerate(word):
if w in invalid_char or w not in alphabet:
word=word.replace(w,"")
text_dict.append(word)
return text_dict
def DollerControl(word):
main_word=word[:]
res_words=[]
for i,w in enumerate(main_word):
word=main_word[i:]+"$"+ main_word[:i]
res_words.append(word.replace(",",""))
return res_words
def deleteDoller(word:str):
idx=word.index("$")
return word[idx+1:]+word[:idx]
class FarsiComparisonTranslate:
def __init__(self,ref,String,mode):
self.ref = ref
self.alphabet = alphabet
self.mode = mode
self.String=String
def __gt__(self,op2):
ref2=op2.ref
def Greater(self,op1,op2):
for i in range(len(op1)) :
try:
ch=op2[i]
except:
return True
if self.alphabet.index(self.word[i]) > self.alphabet.index(ch):
return True
elif self.alphabet.index(self.word[i]) < self.alphabet.index(ch):
return False
elif self.alphabet.index(self.word[i]) == self.alphabet.index(ch):
continue
return False
def __eq__(self,op2):
if op2.word==self.word:
return True
else :
return False
def NormalTranslate(self,ref):
result =""
for i in range(ref+1 ,len(self.String)):
if self.String[i] != ",":
result += self.String[i]
else :
return result
def FullTranslate(self,ref2):
try :
block1=self.String(self.ref)
except:
block1=self.ref
block2=self.String(ref2)
def __str__(self):
return self.word
class FarsiComparison:
def __init__(self,word):
self.word=word
self.alphabet=alphabet
def __gt__(self,op2):
for i in range(len(self.word)) :
try:
ch=op2.word[i]
except:
return True
if self.alphabet.index(self.word[i]) > self.alphabet.index(ch):
return True
elif self.alphabet.index(self.word[i]) < self.alphabet.index(ch):
return False
elif self.alphabet.index(self.word[i]) == self.alphabet.index(ch):
continue
return False
def __eq__(self,op2):
if op2.word==self.word:
return True
else :
return False
def __str__(self):
return self.word
def plusWord(self,char):
idx=self.alphabet.index(char)
return self.alphabet[idx+1]
| Satida/Utils/Utils.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# This cell is added by sphinx-gallery
# !pip install mrsimulator --quiet
# %matplotlib inline
import mrsimulator
print(f'You are using mrsimulator v{mrsimulator.__version__}')
# -
#
# # ¹³C MAS NMR of Glycine (CSA) [960 Hz] (TO REMOVE)
#
# The following is a sideband least-squares fitting example of a
# $^{13}\text{C}$ MAS NMR spectrum of Glycine spinning at 960 Hz.
# The following experimental dataset is a part of DMFIT [#f1]_ examples.
# We thank Dr. <NAME> for sharing the dataset.
#
#
# +
import csdmpy as cp
import matplotlib.pyplot as plt
from lmfit import Minimizer
from mrsimulator import Simulator, SpinSystem, Site
from mrsimulator.methods import BlochDecaySpectrum
from mrsimulator import signal_processing as sp
from mrsimulator.utils import spectral_fitting as sf
from mrsimulator.utils import get_spectral_dimensions
# -
# ## Import the dataset
#
#
# +
host = "https://nmr.cemhti.cnrs-orleans.fr/Dmfit/Help/csdm/"
filename = "13C MAS 960Hz - Glycine.csdf"
experiment = cp.load(host + filename)
# standard deviation of noise from the dataset
sigma = 3.982936
# For spectral fitting, we only focus on the real part of the complex dataset
experiment = experiment.real
# Convert the coordinates along each dimension from Hz to ppm.
_ = [item.to("ppm", "nmr_frequency_ratio") for item in experiment.dimensions]
# plot of the dataset.
plt.figure(figsize=(8, 4))
ax = plt.subplot(projection="csdm")
ax.plot(experiment, color="black", linewidth=0.5, label="Experiment")
ax.set_xlim(280, -10)
plt.grid()
plt.tight_layout()
plt.show()
# -
# ## Create a fitting model
# **Spin System**
#
#
# +
C1 = Site(
isotope="13C",
isotropic_chemical_shift=176.0, # in ppm
shielding_symmetric={"zeta": 70, "eta": 0.6}, # zeta in Hz
)
C2 = Site(
isotope="13C",
isotropic_chemical_shift=43.0, # in ppm
shielding_symmetric={"zeta": 30, "eta": 0.5}, # zeta in Hz
)
spin_systems = [SpinSystem(sites=[C1], name="C1"), SpinSystem(sites=[C2], name="C2")]
# -
# **Method**
#
#
# +
# Get the spectral dimension parameters from the experiment.
spectral_dims = get_spectral_dimensions(experiment)
MAS = BlochDecaySpectrum(
channels=["13C"],
magnetic_flux_density=7.05, # in T
rotor_frequency=960, # in Hz
spectral_dimensions=spectral_dims,
experiment=experiment, # experimental dataset
)
# Optimize the script by pre-setting the transition pathways for each spin system from
# the method.
for sys in spin_systems:
sys.transition_pathways = MAS.get_transition_pathways(sys)
# -
# **Guess Model Spectrum**
#
#
# +
# Simulation
# ----------
sim = Simulator(spin_systems=spin_systems, methods=[MAS])
sim.config.decompose_spectrum = "spin_system"
sim.run()
# Post Simulation Processing
# --------------------------
processor = sp.SignalProcessor(
operations=[
sp.IFFT(),
sp.apodization.Exponential(FWHM="20 Hz", dv_index=0), # spin system 0
sp.apodization.Exponential(FWHM="200 Hz", dv_index=1), # spin system 1
sp.FFT(),
sp.Scale(factor=100),
]
)
processed_data = processor.apply_operations(data=sim.methods[0].simulation).real
# Plot of the guess Spectrum
# --------------------------
plt.figure(figsize=(8, 4))
ax = plt.subplot(projection="csdm")
ax.plot(experiment, color="black", linewidth=0.5, label="Experiment")
ax.plot(processed_data, linewidth=2, alpha=0.6)
ax.set_xlim(280, -10)
plt.grid()
plt.legend()
plt.tight_layout()
plt.show()
# -
# ## Least-squares minimization with LMFIT
# Use the :func:`~mrsimulator.utils.spectral_fitting.make_LMFIT_params` for a quick
# setup of the fitting parameters.
#
#
params = sf.make_LMFIT_params(sim, processor, include={"rotor_frequency"})
print(params.pretty_print(columns=["value", "min", "max", "vary", "expr"]))
# **Solve the minimizer using LMFIT**
#
#
minner = Minimizer(sf.LMFIT_min_function, params, fcn_args=(sim, processor, sigma))
result = minner.minimize()
result
# ## The best fit solution
#
#
# +
best_fit = sf.bestfit(sim, processor)[0]
residuals = sf.residuals(sim, processor)[0]
plt.figure(figsize=(8, 4))
ax = plt.subplot(projection="csdm")
ax.plot(experiment, color="black", linewidth=0.5, label="Experiment")
ax.plot(residuals, color="gray", linewidth=0.5, label="Residual")
ax.plot(best_fit, linewidth=2, alpha=0.6)
ax.set_xlim(280, -10)
plt.grid()
plt.legend()
plt.tight_layout()
plt.show()
# -
# .. [#f1] D.Massiot, F.Fayon, M.Capron, I.King, <NAME>, B.Alonso, J.O.Durand,
# B.Bujoli, Z.Gan, G.Hoatson, 'Modelling one and two-dimensional solid-state NMR
# spectra.', Magn. Reson. Chem. **40** 70-76 (2002)
# `DOI: 10.1002/mrc.984 <https://doi.org/10.1002/mrc.984>`_
#
#
| docs/notebooks/fitting/1D_fitting/plot_2_13C_glycine_960Hz.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MuData quickstart
# [](https://colab.research.google.com/github/PmBio/mudata/blob/master/docs/source/notebooks/quickstart_mudata.ipynb)
# Introducing multimodal data — `MuData` — objects built on top of [AnnData](https://anndata.readthedocs.io/en/latest/index.html), `mudata` library naturally enriches the Python ecosystem for data analysis to enable multimodal data analysis. Be sure to check tools that take advantage of this data format such as [muon](https://muon.readthedocs.io/) — the Python framework for multimodal omics analysis.
#
# This notebooks provides an introduction to multimodal data objects.
import mudata as md
from mudata import MuData
# ## Multimodal objects
# To see how multimodal objects behave, we will simulate some data first:
# +
import numpy as np
np.random.seed(1)
n, d, k = 1000, 100, 10
z = np.random.normal(loc=np.arange(k), scale=np.arange(k)*2, size=(n,k))
w = np.random.normal(size=(d,k))
y = np.dot(z, w.T)
y.shape
# -
# Creating an `AnnData` object from the matrix will allow us to add annotations to its different dimensions (_"observations"_, e.g. samples, and measured _"variables"_):
# +
from anndata import AnnData
adata = AnnData(y)
adata.obs_names = [f"obs_{i+1}" for i in range(n)]
adata.var_names = [f"var_{j+1}" for j in range(d)]
adata
# -
# We will go ahead and create a second object with data for the _same observations_ but for _different variables_:
# +
d2 = 50
w2 = np.random.normal(size=(d2,k))
y2 = np.dot(z, w2.T)
adata2 = AnnData(y2)
adata2.obs_names = [f"obs_{i+1}" for i in range(n)]
adata2.var_names = [f"var2_{j+1}" for j in range(d2)]
adata2
# -
# We can now wrap these two objects into a `MuData` object:
mdata = MuData({"A": adata, "B": adata2})
mdata
# _Observations_ and _variables_ of the `MuData` object are global, which means that observations with the identical name (`.obs_names`) in different modalities are considered to be the same observation. This also means variable names (`.var_names`) should be unique.
#
# This is reflected in the object description above: `mdata` has 1000 _observations_ and 150=100+50 _variables_.
# ### Variable mappings
# Upon construction of a `MuData` object, a global binary mapping between _observations_ and individual modalities is created as well as between _variables_ and modalities.
#
# Since all the observations are the same across modalities in `mdata`, all the values in the _observations_ mappings are set to `True`:
np.sum(mdata.obsm["A"]) == np.sum(mdata.obsm["B"]) == n
# For variables, those are 150-long vectors, e.g. for the `A` modality — with 100 `True` values followed by 50 `False` values:
mdata.varm['A']
# ### Object references
# Importantly, individual modalities are stored as references to the original objects.
# Add some unstructured data to the original object
adata.uns["misc"] = {"adata": True}
# Access modality A via the .mod attribute
mdata.mod["A"].uns["misc"]
# This is also why the `MuData` object has to be updated in order to reflect the latest changes to the modalities it includes:
adata2.var_names = ["var_ad2_" + e.split("_")[1] for e in adata2.var_names]
print(f"Outdated variables names: ...,", ", ".join(mdata.var_names[-3:]))
mdata.update()
print(f"Updated variables names: ...,", ", ".join(mdata.var_names[-3:]))
# ### Common observations
# While `mdata` is comprised of the same observations for both modalities, it is not always the case in the real world where some data might be missing. By design, `mudata` accounts for these scenarios since there's no guarantee observations are the same — or even intersecting — for a `MuData` instance.
# It's worth noting that other tools might provide convenience functions for some common scenarios of dealing with missing data, such as `intersect_obs()` [implemented in muon](https://muon.readthedocs.io/en/latest/api/generated/muon.pp.intersect_obs.html?highlight=intersect_obs).
# ### Rich representation
# Some notebook environments such as Jupyter/IPython allow for the [rich object representation](https://ipython.readthedocs.io/en/stable/config/integrating.html). This is what `mudata` uses in order to provide an optional HTML representation that allows to interactively explore `MuData` objects. While the dataset in our example is not the most comprehensive one, here is how it looks like:
with md.set_options(display_style = "html", display_html_expand = 0b000):
display(mdata)
# Running `md.set_options(display_style = "html")` will change the setting for the current Python session.
#
# The flag `display_html_expand` has three bits that correspond to
#
# 1. `MuData` attributes,
# 1. modalities,
# 1. `AnnData` attributes,
#
# and indicates if the fields should be expanded by default (`1`) or collapsed under the `<summary>` tag (`0`).
# ### .h5mu files
# `MuData` objects were designed to be serialized into `.h5mu` files. Modalities are stored under their respective names in the `/mod` HDF5 group of the `.h5mu` file. Each individual modality, e.g. `/mod/A`, is stored in the same way as it would be stored in the `.h5ad` file.
# +
import tempfile
# Create a temporary file
temp_file = tempfile.NamedTemporaryFile(mode="w", suffix=".h5mu", prefix="muon_getting_started_")
mdata.write(temp_file.name)
mdata_r = md.read(temp_file.name, backed=True)
mdata_r
# -
# Individual modalities are backed as well — inside the `.h5mu` file:
mdata_r["A"].isbacked
# The rich representation would also reflect the _backed_ state of `MuData` objects when they are loaded from `.h5mu` files in the read-only mode and would point to the respective file:
with md.set_options(display_style = "html", display_html_expand = 0b000):
display(mdata_r)
# ## Multimodal methods
# When the `MuData` object is prepared, it is up to multimodal methods to be used to make sense of the data. The most simple and naïve approach is to concatenate matrices from multiple modalities to perform e.g. dimensionality reduction.
x = np.hstack([mdata.mod["A"].X, mdata.mod["B"].X])
x.shape
# We can write a simple function to run principal component analysis on such a concatenated matrix. `MuData` object provides a place to store multimodal embeddings — `MuData.obsm`. It is similar to how the embeddings generated on invidual modalities are stored, only this time it is saved inside the `MuData` object rather than in `AnnData.obsm`.
def simple_pca(mdata):
from sklearn import decomposition
x = np.hstack([m.X for m in mdata.mod.values()])
pca = decomposition.PCA(n_components=2)
components = pca.fit_transform(x)
# By default, methods operate in-place
# and embeddings are stored in the .obsm slot
mdata.obsm["X_pca"] = components
return
simple_pca(mdata)
print(mdata)
# In reality, however, having different modalities often means that the features between them come from different generative processes and are not comparable.
#
# This is where special multimodal integration methods come into play. For omics technologies, these methods are frequently addressed as _multi-omics integration methods_. `MuData` objects make it easy for the new methods to be easily applied to such data, and some of them [are implemented in muon](https://muon.readthedocs.io/en/latest/omics/multi.html).
#
| docs/source/notebooks/quickstart_mudata.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Implementation of a Devito viscoacoustic equations
#
# ## This tutorial is contributed by SENAI CIMATEC (2020)
#
# This tutorial is based on:
#
# <br>**Linear inversion in layered viscoacoustic media using a time‐domain method** (1994)
# <br><NAME> and <NAME>
# <br>SEG Technical Program Expanded Abstracts
# <br>https://doi.org/10.1190/1.1822695
#
# <br>**True-amplitude prestack depth migration** (2007)
# <br><NAME> and <NAME>
# <br>GEOPHYSICS Technical Papers
# <br>https://doi.org/10.1190/1.2714334
#
# <br>**Attenuation compensation for least-squares reverse time migration using the viscoacoustic-wave equation** (2014)
# <br><NAME> and <NAME>
# <br>GEOPHYSICS Technical Papers
# <br>https://doi.org/10.1190/geo2013-0414.1
#
# <br>**Multiscale viscoacoustic waveform inversion with the second generation wavelet transform and adaptive time–space domain finite-difference method** (2014)
# <br><NAME>, <NAME>,and <NAME>
# <br>Geophysical Journal International, Volume 197, Issue 2, 1 May 2014, Pages 948–974
# <br>https://doi.org/10.1093/gji/ggu024
#
# <br>**Viscoacoustic prestack reverse time migration based on the optimal time-space domain high-order finite-difference method** (2014)
# <br><NAME>, <NAME>, and <NAME>
# <br>Appl. Geophys. 11, 50–62.
# <br>https://doi.org/10.1007/s11770-014-0414-8
#
# <br>**A stable and efficient approach of Q reverse time migration** (2018)
# <br><NAME>, <NAME>, and <NAME>
# <br>GEOPHYSICS Technical Papers
# <br>https://doi.org/10.1190/geo2018-0022.1
# ## Introduction
#
# The conversion of mechanical energy to heat, occurs during the propagation of seismic waves on the subsurface, due to the viscosity of the rocks. The presence of oil and gas in these rocks causes seismic attenuations. Thus, associated effects, such as dispersion and dissipation, can significantly affect the amplitudes, as well as the phase of the seismic pulse. However, in the seismic exploration, the subsurface has still been considered as an ideal elastic/acoustic medium, that is, disregarding its mitigating effect. In practice, the propagation of seismic waves on the subsurface is in many ways different from propagation in an ideal solid.
#
# For example, some subsurface rocks have anisotropic properties, are heterogeneous, porous and so on. The acoustic/elastic wave equation is not sensitive enough to describe propagation in these more complicated mediums. Generally, the viscosity of materials in the subsurface causes energy dissipation and consequently a decrease in amplitude, in addition to modifying the frequency content of the waves. This phenomenon of energy dissipation of the wave is called seismic absorption or attenuation.
#
# The goal of this tutorial is to perform a seismic modeling taking into account the viscosity of the medium, so that it is possible to more accurately simulate the seismic data and consequently build images with better resolution in the processing of this data, in addition to extracting more detailed information on rocky materials through seismic inversion.
#
# This tutorial follow three main viscoacoustic approaches in time-space domain:
#
# - Blanch and Symes (1995) / Dutta and Schuster (2014)
#
# - Ren et al. (2014)
#
# - Deng and McMechan (2007)
# <h1><center>Table of symbols</center></h1>
#
# | Symbol | Description
# | :--- | :---
# |$f$ |Frequency |
# |$f_o$ |Reference frequency |
# |$\omega$ |Angular frenquency |
# |$\omega_0$ |Angular Reference Frequency |
# |$v$ |Velocity model |
# |$v_0$ |Reference velocity at $\omega_0$ |
# |$\kappa$ |Bulk modulus |
# |$g$ |Absorption coefficient |
# |$\tau$ |Relaxation time |
# |$\tau_\sigma$ |Stress relaxation parameter |
# |$\tau_\epsilon$ |Strain relaxation parameter |
# |$Q$ |Quality factor |
# |$\eta$ |Viscosity |
# |$\rho$ |Density |
# |$\nabla$ |Nabla operator |
# |$P({\bf x},t)$ |Pressure field |
# |$r({\bf x},t)$ |Memory variable |
# |${\bf v}({\bf x},t)$ |Particle velocity |
# |$S({\bf x}_s,t)$ |Source |
# # Seismic modelling with Devito
# Before start with the viscoacoustic approaches we will describe a setup of seismic modelling with Devito in a simple 2D case. We will create a physical model of our domain and define a single source and an according set of receivers to model for the forward model. But first, we initialize some basic utilities.
# +
import numpy as np
import sympy as sp
import matplotlib.pyplot as plt
from devito import *
from examples.seismic.source import RickerSource, WaveletSource, TimeAxis
from examples.seismic import ModelViscoacoustic, plot_image, setup_geometry, plot_velocity
# +
nx = 300
nz = 300
# Define a physical size
shape = (nx, nz)
spacing = (20., 20.)
origin = (0., 0.)
nlayers = 3
nbl = 50
space_order = 8
dtype = np.float32
# Model physical parameters:
vp = np.zeros(shape)
qp = np.zeros(shape)
rho = np.zeros(shape)
# Define a velocity profile. The velocity is in km/s
vp_top = 1.5
vp_bottom = 3.5
# Define a velocity profile in km/s
v = np.empty(shape, dtype=dtype)
v[:] = vp_top # Top velocity (background)
vp_i = np.linspace(vp_top, vp_bottom, nlayers)
for i in range(1, nlayers):
v[..., i*int(shape[-1] / nlayers):] = vp_i[i] # Bottom velocity
qp[:] = 3.516*((v[:]*1000.)**2.2)*10**(-6) # Li's empirical formula
rho[:] = 0.31*(v[:]*1000.)**0.25 # Gardner's relation
# + tags=["nbval-ignore-output"]
#NBVAL_IGNORE_OUTPUT
model = ModelViscoacoustic(space_order=space_order, vp=v, qp=qp, b=1/rho,
origin=origin, shape=shape, spacing=spacing,
nbl=nbl)
# +
#NBVAL_IGNORE_OUTPUT
aspect_ratio = model.shape[0]/model.shape[1]
plt_options_model = {'cmap': 'jet', 'extent': [model.origin[0], model.origin[0] + model.domain_size[0],
model.origin[1] + model.domain_size[1], model.origin[1]]}
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(15, 5))
slices = [slice(model.nbl, -model.nbl), slice(model.nbl, -model.nbl)]
img1 = ax[0].imshow(np.transpose(model.vp.data[slices]), vmin=1.5, vmax=3.5, **plt_options_model)
fig.colorbar(img1, ax=ax[0])
ax[0].set_title(r"V (km/s)", fontsize=20)
ax[0].set_xlabel('X (m)', fontsize=20)
ax[0].set_ylabel('Depth (m)', fontsize=20)
ax[0].set_aspect('auto')
img2 = ax[1].imshow(np.transpose(qp), vmin=15, vmax=220, **plt_options_model)
fig.colorbar(img2, ax=ax[1])
ax[1].set_title("Q", fontsize=20)
ax[1].set_xlabel('X (m)', fontsize=20)
ax[1].set_ylabel('Depth (m)', fontsize=20)
ax[1].set_aspect('auto')
img3 = ax[2].imshow(np.transpose(rho), vmin=1.9, vmax=2.4, **plt_options_model)
fig.colorbar(img3, ax=ax[2])
ax[2].set_title(r"Density $\rho$ (g/cm^3)", fontsize=20)
ax[2].set_xlabel('X (m)', fontsize=20)
ax[2].set_ylabel('Depth (m)', fontsize=20)
ax[2].set_aspect('auto')
plt.tight_layout()
# +
f0 = 0.005 # peak/dominant frequency
b = model.b
rho = 1./b
# velocity model
vp = model.vp
lam = vp * vp * rho
t_s = (sp.sqrt(1.+1./model.qp**2)-1./model.qp)/f0
t_ep = 1./(f0**2*t_s)
tt = (t_ep/t_s) - 1.
s = model.grid.stepping_dim.spacing
damp = model.damp
# -
# Time step in ms and time range:
t0, tn = 0., 2000.
dt = model.critical_dt
time_range = TimeAxis(start=t0, stop=tn, step=dt)
# +
from examples.seismic import Receiver
def src_rec(p, model):
src = RickerSource(name='src', grid=model.grid, f0=f0, time_range=time_range)
src.coordinates.data[0, :] = np.array(model.domain_size) * .5
src.coordinates.data[0, -1] = 8.
# Create symbol for receivers
rec = Receiver(name='rec', grid=model.grid, npoint=shape[0], time_range=time_range)
# Prescribe even spacing for receivers along the x-axis
rec.coordinates.data[:, 0] = np.linspace(0, model.domain_size[0], num=shape[0])
rec.coordinates.data[:, 1] = 8.
src_term = src.inject(field=p.forward, expr=(s*src))
rec_term = rec.interpolate(expr=p)
return src_term + rec_term, src, rec
# -
# Auxiliary functions for plotting data:
def plot_receiver(rec):
rec_plot = rec.resample(num=1001)
scale_for_plot = np.diag(np.linspace(1.0, 2.5, 1001)**2.0)
# Pressure (txx + tzz) data at sea surface
extent = [rec_plot.coordinates.data[0, 0], rec_plot.coordinates.data[-1, 0], 1e-3*tn, t0]
aspect = rec_plot.coordinates.data[-1, 0]/(1e-3*tn)/.5
plt.figure(figsize=(10, 10))
plt.imshow(np.dot(scale_for_plot, rec_plot.data), vmin=-.01, vmax=.01, cmap="seismic",
interpolation='lanczos', extent=extent, aspect=aspect)
plt.ylabel("Time (s)", fontsize=20)
plt.xlabel("Receiver position (m)", fontsize=20)
def plot_v_and_p(model, v, p):
slices = [slice(model.nbl, -model.nbl), slice(model.nbl, -model.nbl)]
scale = .5*1e-3
plt_options_model = {'extent': [model.origin[0] , model.origin[0] + model.domain_size[0],
model.origin[1] + model.domain_size[1], model.origin[1]]}
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(15, 7))
ax[0].imshow(np.transpose(v[0].data[0][slices]), vmin=-scale, vmax=scale, cmap="RdGy", **plt_options_model)
ax[0].imshow(np.transpose(model.vp.data[slices]), vmin=1.5, vmax=3.5, cmap="jet", alpha=.5, **plt_options_model)
ax[0].set_aspect('auto')
ax[0].set_xlabel('X (m)', fontsize=20)
ax[0].set_ylabel('Depth (m)', fontsize=20)
ax[0].set_title(r"$v_{x}$", fontsize=20)
ax[1].imshow(np.transpose(v[1].data[0][slices]), vmin=-scale, vmax=scale, cmap="RdGy", **plt_options_model)
ax[1].imshow(np.transpose(model.vp.data[slices]), vmin=1.5, vmax=3.5, cmap="jet", alpha=.5, **plt_options_model)
ax[1].set_aspect('auto')
ax[1].set_xlabel('X (m)', fontsize=20)
ax[1].set_title(r"$v_{z}$", fontsize=20)
ax[2].imshow(np.transpose(p.data[0][slices]), vmin=-scale, vmax=scale, cmap="RdGy", **plt_options_model)
ax[2].imshow(np.transpose(model.vp.data[slices]), vmin=1.5, vmax=3.5, cmap="jet", alpha=.5, **plt_options_model)
ax[2].set_aspect('auto')
ax[2].set_xlabel('X (m)', fontsize=20)
ax[2].set_title(r"$P$", fontsize=20)
# ## Equation based on standard linear solid (SLS) rheological model
#
# The equations of motion for a viscoacoustic medium can be written as:
#
# \begin{equation}
# \left\{
# \begin{array}{lcl}
# \frac{\partial P}{\partial t} + \kappa (\tau + 1)(\nabla \cdot {\bf v}) + r = S({\bf x}_{s}, t) \\
# \frac{\partial {\bf v}}{\partial t} + \frac{1}{\rho}\nabla{P} = 0 \\
# \frac{\partial r}{\partial t} + \frac{1}{\tau_{\sigma}} [r + \tau \kappa (\nabla \cdot {\bf v})] = 0.
# \end{array}
# \right.
# \end{equation}
#
# Where $\tau = \tau_{\epsilon}/\tau_{\sigma} -1$ represents the magnitude of $Q$. $\tau_{\epsilon}$ and $\tau_{\sigma}$ are, respectively, the stress and strain relaxation parameters, given by:
#
# \begin{equation}
# \tau_\sigma = \frac{\sqrt{Q^2+1}-1}{2 \pi f_0 Q}
# \end{equation}
# and
# \begin{equation}
# \tau_\epsilon= \frac{\sqrt{Q^2+1}+1}{2\pi f_0 Q}
# \end{equation}
#
# Stencil created from Blanch and Symes (1995) / Dutta and Schuster (2014)
def SLS(model, p, r, v):
# Bulk modulus
bm = rho * (vp * vp)
# Define PDE to v
pde_v = v.dt + b * grad(p)
u_v = Eq(v.forward, damp * solve(pde_v, v.forward))
# Define PDE to r
pde_r = r.dt + (1. / t_s) * (r + tt * bm * div(v.forward))
u_r = Eq(r.forward, damp * solve(pde_r, r.forward))
# Define PDE to p
pde_p = p.dt + bm * (tt + 1.) * div(v.forward) + r.forward
u_p = Eq(p.forward, damp * solve(pde_p, p.forward))
return [u_v, u_r, u_p]
# Seismic Modelling from Blanch and Symes (1995) / Dutta and Schuster (2014) viscoacoustic wave equation.
def modelling_SLS(model):
# Create symbols for particle velocity, pressure field, memory variable, source and receivers
v = VectorTimeFunction(name="v", grid=model.grid, time_order=1, space_order=space_order)
p = TimeFunction(name="p", grid=model.grid, time_order=1, space_order=space_order,
staggered=NODE)
r = TimeFunction(name="r", grid=model.grid, time_order=1, space_order=space_order,
staggered=NODE)
# define the source injection and create interpolation expression for receivers
src_rec_expr, src, rec = src_rec(p, model)
eqn = SLS(model, p, r, v)
op = Operator(eqn + src_rec_expr, subs=model.spacing_map)
op(time=time_range.num-1, dt=dt, src=src, rec=rec)
return rec, v, p
#NBVAL_IGNORE_OUTPUT
rec, v, p = modelling_SLS(model)
#NBVAL_IGNORE_OUTPUT
plot_receiver(rec)
assert np.isclose(np.linalg.norm(rec.data), 16, rtol=10)
#NBVAL_IGNORE_OUTPUT
plot_v_and_p(model, v, p)
assert np.isclose(norm(v[0]), 1.87797, atol=1e-3, rtol=0)
# ## Equation based on Kelvin-Voigt (KV) rheological model
#
# The viscoacoustic wave equation in time domain is written as:
#
# \begin{equation}
# \frac{\partial^{2}P}{\partial{t^2}} - v^{2}\nabla^{2}{P} - \eta\nabla^{2}\left(\frac{\partial P}{\partial t}\right) = S({\bf x}_{s}, t),
# \end{equation}
#
# where $\eta = \frac{v^2}{\omega_{0}Q}$ represents the viscosity of medium.
#
# Considering the variable density $\rho$, the equation can be rewritten as:
#
# \begin{equation}
# \frac{\partial^{2}P}{\partial{t^2}} - \kappa \nabla \cdot \frac{1}{\rho} \nabla{P} - \eta \rho \nabla \cdot \frac{1}{\rho} \nabla \left(\frac{\partial{P}}{\partial{t}}\right) = S({\bf x}_{s}, t).
# \end{equation}
#
# The equation can be written using a first order formulation, given by:
#
# \begin{equation}
# \left\{
# \begin{array}{ll}
# \frac{\partial P}{\partial t} + \kappa \nabla \cdot {\bf v} - \eta \rho \nabla \cdot \frac{1}{\rho} \nabla{P} = S({\bf x}_{s}, t) \\
# \frac{\partial {\bf v}}{\partial t} + \frac{1}{\rho} \nabla{P} = 0
# \end{array}
# \right.
# \end{equation}
# Stencil created from Ren et al. (2014) viscoacoustic wave equation.
def KV(model, p, v):
# Angular frequency
w = 2. * np.pi * f0
# Define PDE to v
pde_v = v.dt + b * grad(p)
u_v = Eq(v.forward, damp * solve(pde_v, v.forward))
# Define PDE to p
pde_p = p.dt + lam * div(v.forward) - (lam / (w * model.qp)) * div(b * grad(p, shift=.5), shift=-.5)
u_p = Eq(p.forward, damp * solve(pde_p, p.forward))
return [u_v, u_p]
# Seismic Modelling from Ren et al. (2014) viscoacoustic wave equation.
def modelling_KV(model):
# Create symbols for particle velocity, pressure field, source and receivers
v = VectorTimeFunction(name="v", grid=model.grid, time_order=1, space_order=space_order)
p = TimeFunction(name="p", grid=model.grid, time_order=1, space_order=space_order,
staggered=NODE)
# define the source injection and create interpolation expression for receivers
src_rec_expr, src, rec = src_rec(p, model)
eqn = KV(model, p, v)
op = Operator(eqn + src_rec_expr, subs=model.spacing_map)
op(time=time_range.num-1, dt=dt, src=src, rec=rec)
return rec, v, p
#NBVAL_IGNORE_OUTPUT
rec, v, p = modelling_KV(model)
#NBVAL_IGNORE_OUTPUT
plot_receiver(rec)
assert np.isclose(np.linalg.norm(rec.data), 15, rtol=10)
#NBVAL_IGNORE_OUTPUT
plot_v_and_p(model, v, p)
assert np.isclose(norm(v[0]), 1.0639238, atol=1e-3, rtol=0)
# ## Equation based on Maxwell rheological model
#
# The viscoacoustic wave equation for the propagating pressure $P$ in the time-space domain:
#
# \begin{equation}
# \frac{1}{v^2}\frac{\partial^{2}P}{\partial{t^2}} - \nabla^{2}P + \frac{g}{v}\frac{\partial P}{\partial{t}} = S({\bf x}_{s}, t),
# \end{equation}
#
# where $g$ is the absorption coefficient, given by:
#
# \begin{equation}
# g = \frac{2\pi f_{0}}{vQ},
# \end{equation}
#
# The equation can be written using a first order formulation, given by:
#
# \begin{equation}
# \left\{
# \begin{array}{lcl}
# \frac{\partial P}{\partial t} + \kappa (\nabla \cdot {\bf v}) + \frac{2\pi f_{0}}{Q}P= S({\bf x}_{s}, t) \\
# \frac{\partial {\bf v}}{\partial t} + \frac{1}{\rho}\nabla{P} = 0 \\
# \end{array}
# \right.
# \end{equation}
#
# Stencil created from Deng and McMechan (2007) viscoacoustic wave equation.
def Maxwell(model, p, v):
# Angular frequency
w = 2. * np.pi * f0
# Define PDE to v
pde_v = v.dt + b * grad(p)
u_v = Eq(v.forward, damp * solve(pde_v, v.forward))
# Define PDE to p
pde_p = p.dt + lam * div(v.forward) + (w / model.qp) * p
u_p = Eq(p.forward, damp * solve(pde_p, p.forward))
return [u_v, u_p]
# Seismic Modelling from Deng and McMechan (2007) viscoacoustic wave equation.
def modelling_Maxwell(model):
# Create symbols for particle velocity, pressure field, source and receivers
v = VectorTimeFunction(name="v", grid=model.grid, time_order=1, space_order=space_order)
p = TimeFunction(name="p", grid=model.grid, time_order=1, space_order=space_order,
staggered=NODE)
# define the source injection and create interpolation expression for receivers
src_rec_expr, src, rec = src_rec(p, model)
eqn = Maxwell(model, p, v)
op = Operator(eqn + src_rec_expr, subs=model.spacing_map)
op(time=time_range.num-1, dt=dt, src=src, rec=rec)
return rec, v, p
#NBVAL_IGNORE_OUTPUT
rec, v, p = modelling_Maxwell(model)
#NBVAL_IGNORE_OUTPUT
plot_receiver(rec)
assert np.isclose(np.linalg.norm(rec.data), 16, rtol=10)
#NBVAL_IGNORE_OUTPUT
plot_v_and_p(model, v, p)
assert np.isclose(norm(v[0]), 1.1323929, atol=1e-3, rtol=0)
# # More references
#
# [1] https://academic.oup.com/gji/article/197/2/948/616510
#
# [2] https://link.springer.com/article/10.1007/s11770-014-0414-8
#
# [3] https://janth.home.xs4all.nl/Software/fdelmodcManual.pdf
| examples/seismic/tutorials/11_viscoacoustic.ipynb |