text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
```
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'prepare/mesolitica-tpu.json'
b2_application_key_id = os.environ['b2_application_key_id']
b2_application_key = os.environ['b2_application_key']
from google.cloud import storage
client = storage.Client()
bucket = client.bucket('mesolitica-tpu-general')
best = '759900'
directory = 't5-base-v2'
!rm -rf output out {directory}
!mkdir {directory}
model = best
blob = bucket.blob(f'{directory}/model.ckpt-{model}.data-00000-of-00002')
blob.download_to_filename(f'{directory}/model.ckpt-{model}.data-00000-of-00002')
blob = bucket.blob(f'{directory}/model.ckpt-{model}.data-00001-of-00002')
blob.download_to_filename(f'{directory}/model.ckpt-{model}.data-00001-of-00002')
blob = bucket.blob(f'{directory}/model.ckpt-{model}.index')
blob.download_to_filename(f'{directory}/model.ckpt-{model}.index')
blob = bucket.blob(f'{directory}/model.ckpt-{model}.meta')
blob.download_to_filename(f'{directory}/model.ckpt-{model}.meta')
blob = bucket.blob(f'{directory}/checkpoint')
blob.download_to_filename(f'{directory}/checkpoint')
blob = bucket.blob(f'{directory}/operative_config.gin')
blob.download_to_filename(f'{directory}/operative_config.gin')
with open(f'{directory}/checkpoint', 'w') as fopen:
fopen.write(f'model_checkpoint_path: "model.ckpt-{model}"')
from b2sdk.v1 import *
info = InMemoryAccountInfo()
b2_api = B2Api(info)
application_key_id = b2_application_key_id
application_key = b2_application_key
b2_api.authorize_account("production", application_key_id, application_key)
file_info = {'how': 'good-file'}
b2_bucket = b2_api.get_bucket_by_name('malaya-model')
tar = 't5-base-2021-07-28.tar.gz'
os.system(f'tar -czvf {tar} {directory}')
outPutname = f'pretrained/{tar}'
b2_bucket.upload_local_file(
local_file=tar,
file_name=outPutname,
file_infos=file_info,
)
import tensorflow as tf
import tensorflow_datasets as tfds
import t5
# !pip3 install t5==0.5.0
model = t5.models.MtfModel(
model_dir=directory,
tpu=None,
tpu_topology=None,
model_parallelism=1,
batch_size=1,
sequence_length={"inputs": 512, "targets": 512},
learning_rate_schedule=0.003,
save_checkpoints_steps=5000,
keep_checkpoint_max=3,
iterations_per_loop=100,
mesh_shape="model:1,batch:1",
mesh_devices=["cpu:0"]
)
string = """
Amanah Kedah berpendapat jika ada Adun Pakatan Harapan atau Bersatu negeri itu mahu berpaling tadah memberikan sokongan kepada kumpulan Muafakat Nasional, mereka perku membuat kenyataan rasmi mengenainya.
Pengerusi Amanah Kedah, Phahrolrazi Mohd Zawawi, berkata disebabkan tiada mana-mana Adun membuat kenyataan berhubung isu itu maka kerajaan negeri berpendapat tiada apa-apa yang berlaku.
Ditemui media selepas mengadakan pertemuan tertutup lebih sejam dengan Menteri Besar, Mukhriz Mahathir, hari ini Phahrolrazi berkata pihaknya juga mendapati kerajaan negeri masih berfungsi seperti biasa.
"Kami bincang keadaan semasa, ada juga kita sentuh (cubaan menukar kerajaan negeri), tetapi kita lihat kerajaan masih berfungsi.
"Tidak ada apa-apa kenyataan dari pihak sana (pembangkang) bahawa mereka sudah cukup majoriti setakat ini," katanya seperti dipetik BH Online.
Spekulasi mengenai pertukaran kerajaan menjadi kencang sejak semalam ekoran berlaku pertemuan tertutup pemimpin PAS dan Umno Kedah di Alor Setar semalam.
Turut hadir Setiausaha Agung PAS yang juga Menteri di Jabatan Perdana Menteri, Takiyuddin Hassan, dan Menteri Besar Terengganu, Dr Ahmad Samsuri Mokhtar.
Cuba jatuhkan sejak dulu
Perkembangan itu berlaku kesan tindakan PKR memecat dan menggantung sejumlah anggota mereka baru-baru ini dan dipercayai memberi kesan terhadap pendirian wakil rakyat parti itu di Kedah.
Turut disebut-sebut akan beralih arah dalam perjalanan politik mereka ialah Adun Bersatu.
Untuk rekod berdasarkan pecahan parti PAS menguasai kerusi terbesar dalam DUN dan lazimnya pemimpin parti itu akan menjadi pilihan menjadi menteri besar jika berlaku pertukaran kerajaan.
Menurut Phahrolrazi, jika ada mana-mana wakil rakyat Bersatu atau PH mahu melompat, mereka wajar menyatakannya secara rasmi.
Tanpa kenyataan begitu, katanya, Amanah beranggapan isu perubahan kerajaan negeri masih bersifat spekulasi.
Timbalan Pengerusi Amanah Kedah, Dr Ismail Salleh, pula berkata ada kemungkinan Adun Bersatu, PH atau exco negeri tu yang sudah diumpan untuk membelakangkan mandat rakyat.
Beliau yang juga exco Kedah berkata memang sejak dulu lagi PAS cuba menjatuhkan kerajaan negeri dengan memujuk Adun PH serta Bersatu bertindak seperti rakan mereka di Perak, Johor dan Selangor.
"""
# https://harakahdaily.net/index.php/2020/05/22/apa-ph-buat-isu-kenaikan-harga-barang/
string2 = """
SEKARANG PH tengah mainkan isu harga barang untuk memburukkan Kerajaan PN. Kata Ahli Majlis Setiausaha PH (Saifuddin Nasution, Khalid Samad dan Ong Kian Ming) harga barang mentah dan basah menunjukkan ada peningkatan di banyak tempat. Ini membebankan rakyat kata mereka.
Saya setuju, Kerajaan PN perlu pantau dan ambil apa-apa tindakan yang perlu jika ada peningkatan harga barang basah atau mentah.
Tapi dalam masa yang sama saya fikir elok juga kalau kita imbas balik apa pula tindakan yang diambil oleh Kerajaan PH ketika rakyat tertekan dengan harga barang yang melonjak gila-gila masa mereka jadi Kerajaan Persekutuan selama 22 bulan dulu.
Mari kita semak balik.
(1) Kenaikan harga telur
Saifuddin Nasution – “Kementerian akan siasat.”
Salahuddin Ayub – “Ianya adalah perkara biasa disebabkan kenaikan bahan makanan ayam.”
(2) Kenaikan harga bawang 3 kali ganda
Saifuddin Nasution – “KPNHEP akan pantau kenaikan ini. Rakyat perlu tukar kepada bawang Holland.”
(3) Kenaikan harga ikan kembung
Salahuddin Ayub – “Saya pelik mengapa harga ikan kembung naik sedangkan ada 7 bilion ikan kembung dalam laut.”
(4) Kenaikan harga barangan kawalan
Saifuddin Nasution – “KPNHEP tidak menerima sebarang aduan kenaikan harga barang kawalan. Kita akan siasat.”
(5) Kenaikan harga barang mentah dan basah menjelang Aidilfitri 2019
Saifuddin Nasution – “KPNHEP puas hati dengan trend semasa harga barang keperluan. Tiada kenaikan mendadak (kenaikan sedikit demi sedikit itu perkara biasa).”
(6) Kenaikan harga barang keperluan pada penghujung 2018 dan sepanjang 2019
Lim Guan Eng – “Kenaikan harga barang disebakan banyak faktor seperti penyusutan nilai Ringgit, musim perayaan, kegiatan penyeludupan, kemarau di negara pengeluar selain sikap peniaga yang mahu mengaut keuntungan berlebihan. Ianya tidak dapat dielakkan.”
Yang saya listkan di atas hanya sebahagian kecil daripada kenyataan Menteri-menteri PH. Ada banyak lagi.
Kesimpulannya, PH tak buat apa-apa pun berkaitan kenaikan harga barang keperluan yang meresahkan rakyat semasa mereka jadi Kerajaan. Tapi sekarang tiba-tiba pula mereka jadi prihatin dan mahu jadi hero rakyat.
"""
# https://www.malaysiakini.com/news/526581
string3 = """
KATA MP | Saya membaca kenyataan beberapa orang penganalisis politik seperti yang dilaporkan dalam media - misalnya di Malaysiakini - mengenai perkembangan semasa politik dalam negara.
Mereka, antara lain, menyarankan agar Pakatan Harapan (PH) memberi tumpuan kepada PRU15 untuk mengambil kerajaan dan bukannya melakukan hal itu pada masa sekarang.
Seorang pemimpin Umno pula dilaporkan oleh akhbar Sinar Harian dengan bangga menggunakan ucapan titah yang di-Pertuan Agong semasa hari perasmian sidang parlimen pada 18 Mei lalu bagi menunjukkan kononnya kerajaan yang ada sekarang ialah "kerajaan Tuanku".
Pertamanya, saya ingin ucapkan terima kasihlah kepada para penganalisis politik tersebut atas analisis mereka. Maaf - dan dengan penuh hormat - jika saya katakan bahawa analisis anda semua bernilai amat amat rendah dan hampir mencapai tahap tidak ada nilai.
Saya tidak pasti di mana anda semua simpan disiplin ilmu anda.
Keduanya, sekiranya kritikan anda semua itu benar-benar jujur, cadangan anda semua (bahawa perlu tunggu hingga PRU15 untuk ambil alih kerajaan) itu sewajarnya ditujukan kepada Perikatan Nasional (PN) yang membentuk kerajaan sekarang tanpa sebarang mandat rakyat dalam sebarang pilihan raya.
Ia diakui sendiri oleh Perdana Menteri Muhyiddin Yassin semasa berucap secara langsung di televisyen baru-baru ini. Bagi saya ia sesuai dinobatkan sebagai Ucapan Mak Cik Kiah.
Dibentuk secara sah, bermoral
Ketiga, jika PH kembali berkuasa pada masa ini, hakikatnya ia berkuasa semula dengan mandat rakyat yang diberi oleh rakyat menerusi PRU14.
Mandat tersebut tidak pernah ditarik balik! Bukankah mandat itu untuk tempoh lima tahun? Justeru, siapa yang tidak hormat kepada mandat rakyat tersebut?
Keempat, meminta agar PH menunggu sehingga PRU15 nampaknya berdasarkan andaian atau premis bahawa kerajaan PN ini wajar dibiarkan mentadbir beberapa tahun lagi.
Pun begitu ada beberapa fakta yang perlu diperhatikan, iaitu:
Ia merupakan kerajaan yang memiliki saiz kabinat yang lebih besar berbanding PH.
Kerajaan PN meriah mengagih-agihkan ghanimah kepada aktor-aktor politik seperti pelantikan sebagai duta khas bertaraf menteri dan pelantikan dalam syarikat berkaitan kerajaan (GLC) bagi 'membeli' kesetiaan
Ia merupakan kerajaan yang lebih teruja 'membebaskan' insan yang terpalit dengan skandal mega 1MDB.
Sedarkah para penganalisis berkenaan mengenai semua realiti ini, selain banyak isu lain membabitkan imej negara, ekonomi, pelaburan asing, keyakinan pelabur dan sebagainya?
Kelima, kerajaan PH yang ditumbangkan oleh PN sekarang adalah kerajaan yang tidak ada sedikit pun 'daki' ketakabsahan (legitimacy) sama ada dari sudut perlembagaan, proses demokrasi atau dari sudut moral dan agama.
Di mana suara mereka sebelum ini
Tiada sedikit pun elemen pengkhianatan dalam pembentukan kerajaan PH. Ia dibentuk secara sah, secara bermoral dan beretika sambil menjunjung tinggi semangat dan ruh Perlembagaan Persekutuan.
Ia adalah sepenuhnya kerajaan rakyat yang mewakili semua bangsa, etnik dan agama.
Ya, benar, kerajaan PH itu tidaklah sempurna. Ada kelemahan di sini sana. Namun siapa yang boleh menafikan kesungguhan kerajaan PH bagi membetulkan kerosakan amat teruk yang ditinggalkan oleh kerajaan sebelum ini?
Kesungguhan itu boleh dilihat oleh ramai orang di dalam atau di luar negara. Semasa saya bertugas di luar negara atas kapasiti anggota pentadbiran kerajaan PH, ramai rakan sejawat saya di luar yang bukan sahaja memuji kita malah lebih daripada itu mereka berjanji untuk membantu.
Saya masih ingat mantan perdana menteri - Dr Mahathir Mohamad - sering mengatakan bahawa hanya setelah beberapa bulan PH mengambil alih tampuk pemerintahan negara, usaha menukar imej negara ini yang dikenali sebagai negara kleptokrat mula menunjukkan hasilnya.
Keenam, saya tertanya-tanya di mana suara para penganalisis politik itu semua ketika PN mengambil alih kerajaan PH secara tidak bermaruah hingga digelar kerajaan pintu belakang atau kerajaan tebuk atap?
Ya, di mana suara mereka ketika Langkah Sheraton digerakkan?
Kenapa - pada masa itu - kita tidak dengar mereka menasihati PN agar menunggu sehingga PRU15 untuk menjadi kerajaan?
Tentang isu "kerajaan Tuanku" atau "kerajaan beta" itu, maaflah, takkanlah serendah itu tahap politik mereka? Kenapa terlalu paranoid dengan lafaz itu?
Dalam sistem demokrasi ala Westminster yang menjunjung prinsip raja berperlembagaan, tiada sebarang keanehan pun dengan istilah tersebut. Ia fenomena yang lumrah. Saya rasa aneh apabila ada yang sanggup mengeksploitasi istilah berkenaan.
Amat tidak wajar dan amat tidak beretika sama sekali untuk mengheret institusi diraja - sama seperti tidak wajar mengeksploitasi isu agama dan perkauman - demi kelangsungan jangka hayat politik yang kita tahu sedang begitu nazak sekarang.
"""
string4 = """
ORANG ramai tidak perlu panik atau bimbang dengan berita tular di media sosial mendakwa ayam yang dibeli di pasar raya menjadi `ejen' pembawa virus Covid-19 dan boleh membahayakan nyawa.
Ini berdasarkan kenyataan dikeluarkan Jabatan Perkhidmatan Veteriner Putrajaya (JPVP) pada 9 Mei lalu, menjelaskan bahawa ayam ternakan tidak boleh dijangkiti virus Covid-19, walaupun pekerja kilang berkenaan atau mereka yang mengurus atau memproses ayam mempunyai kontak Covid-19.
Jurucakap jabatan itu berkata, virus covid-19 juga tidak mampu bertahan lama pada daging ayam sekiranya pekerja syarikat ayam terbabit disahkan positif Covid-19.
Katanya, dalam proses penyembelihan dan pemprosesan yang dilakukan terhadap ayam, virus Covid-19 yang mungkin ada pada ayam akan mati dan ayam berkenaan selamat dimakan.
"Kenyataan ini juga disokong dengan laporan dikeluarkan Pertubuhan Kesihatan Sedunia (WHO) pada 7 April 2020, yang menjelaskan bahawa makanan yang diproses selamat dimakan.
"Malah, laporan itu juga mengesahkan bahawa ujian Covid-19 yang dibuat terhadap ayam dari negara China menunjukkan semuanya negatif penyakit berkenaan," katanya di sini, tadi.
Isu kebimbangan orang ramai mengenai kedudukan ayam timbul selepas wujud kluster baharu Covid-19 di sebuah kilang pemprosesan ayam di Pedas Rembau Negeri Sembilan, pada 2 Mei lalu.
Kebimbangan itu semakin bertambah apabila isu berkenaan viral dalam laman sosial hingga mendapat pelbagai pandangan daripada netizen.
Dalam laporan yang dikeluarkan Kementerian Kesihatan Malaysia (KKM) peringkat awal mendapati 60 pekerja loji pemprosesan ayam di kilang terbabit positif Covid-19.
Difahamkan kilang berkenaan adalah loji pemprosesan ayam komersial di bawah seliaan JPV dan dikatakan memproses dan membekalkan sehingga 54,000 ekor ayam sehari daripada jumlah keseluruhan bekalan ayam segar seluruh negara purata sebanyak 1.6 juta ekor sehari.
Sumber ayam hidup diperoleh daripada 79 ladang ayam kontrak di bawah kawal selia syarikat dan 23 ladang persendirian.
Syarikat itu juga membekalkan ayam yang telah diproses ke beberapa pasar raya.
Kluster baharu Covid-19 di kilang berkenaan mula dikesan apabila seorang kakitangan syarikat mengalami gejala demam, batuk, selsema, sesak nafas dan sakit kepala pada 5 April lalu.
Pekerja terbabit telah menjalani saringan ujian Covid-19 pada 10 April dan keputusan ujian disahkan positif dua hari kemudian.
Sehingga 8 Mei lalu, seramai 786 orang kakitangan dan ahli keluarga kilang berkenaan telah dijalankan saringan ujian Covid-19 dan daripada jumlah terbabit, 60 didapati positif Covid-19.
Seramai 286 negatif dan 440 orang masih menunggu keputusan dan pekerja yang disahkan popsitif Covid-19 telah dikuarantin.
Susulan itu, KKM mengeluarkan arahan menghentikan operasi kilang terbabit selama 14 hari berkuatkuasa dari 7 hingga 20 Mei ini.
Dalam laporan yang dikeluarkan KKM semalam, terdapat pertambahan 43 kes, membabitkan warga pekerja kilang berkenan menjadikan jumlah kes positif Covid-19 kepada 131 kes.
"Daripada jumlah itu 122 kes melibatkan bukan warganegara," kata Ketua Pengarah Kesihatan, Dr Noor Hisham Abdullah.
"""
import re
# minimum cleaning, just simply to remove newlines.
def cleaning(string):
string = string.replace('\n', ' ')
string = re.sub(r'[ ]+', ' ', string).strip()
return string
string = cleaning(string)
string2 = cleaning(string2)
string3 = cleaning(string3)
string4 = cleaning(string4)
# with tf.io.gfile.GFile('test.txt', "w") as f:
# f.write("ringkasan: %s\n" % string2)
# f.write("ringkasan: %s\n" % string3)
# f.write("ringkasan: %s\n" % string4)
!rm -rf output/*
import gin
from t5.data import sentencepiece_vocabulary
DEFAULT_SPM_PATH = 'prepare/sp10m.cased.ms-en.model'
DEFAULT_EXTRA_IDS = 100
model_dir = directory
def get_default_vocabulary():
return sentencepiece_vocabulary.SentencePieceVocabulary(
DEFAULT_SPM_PATH, DEFAULT_EXTRA_IDS)
with gin.unlock_config():
gin.parse_config_file(t5.models.mtf_model._operative_config_path(model_dir))
gin.bind_parameter("Bitransformer.decode.beam_size", 1)
gin.bind_parameter("Bitransformer.decode.temperature", 0)
gin.bind_parameter("utils.get_variable_dtype.slice_dtype", "float32")
gin.bind_parameter(
"utils.get_variable_dtype.activation_dtype", "float32")
vocabulary = t5.data.SentencePieceVocabulary(DEFAULT_SPM_PATH)
estimator = model.estimator(vocabulary, disable_tpu=True)
estimator.__dict__
import os
checkpoint_step = t5.models.mtf_model._get_latest_checkpoint_from_dir(model_dir)
model_ckpt = "model.ckpt-" + str(checkpoint_step)
checkpoint_path = os.path.join(model_dir, model_ckpt)
checkpoint_step, model_ckpt, checkpoint_path
from mesh_tensorflow.transformer import dataset as transformer_dataset
def serving_input_fn():
inputs = tf.placeholder(
dtype=tf.string,
shape=[None],
name="inputs")
batch_size = tf.shape(inputs)[0]
padded_inputs = tf.pad(inputs, [(0, tf.mod(-tf.size(inputs), batch_size))])
dataset = tf.data.Dataset.from_tensor_slices(padded_inputs)
dataset = dataset.map(lambda x: {"inputs": x})
dataset = transformer_dataset.encode_all_features(dataset, vocabulary)
dataset = transformer_dataset.pack_or_pad(
dataset=dataset,
length=model._sequence_length,
pack=False,
feature_keys=["inputs"]
)
dataset = dataset.batch(tf.cast(batch_size, tf.int64))
features = tf.data.experimental.get_single_element(dataset)
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=inputs)
out = estimator.export_saved_model('output', serving_input_fn, checkpoint_path=checkpoint_path)
config = tf.ConfigProto()
config.allow_soft_placement = True
sess = tf.Session(config = config)
meta_graph_def = tf.saved_model.loader.load(
sess,
[tf.saved_model.tag_constants.SERVING],
out)
saver = tf.train.Saver(tf.trainable_variables())
saver.save(sess, 'base/model.ckpt')
strings = [
n.name
for n in tf.get_default_graph().as_graph_def().node
if ('encoder' in n.op
or 'decoder' in n.name
or 'shared' in n.name
or 'inputs' in n.name
or 'output' in n.name
or 'SentenceTokenizer' in n.name
or 'self/Softmax' in n.name)
and 'adam' not in n.name
and 'Assign' not in n.name
]
def freeze_graph(model_dir, output_node_names):
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
'directory: %s' % model_dir
)
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
absolute_model_dir = '/'.join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_dir + '/frozen_model.pb'
clear_devices = True
with tf.Session(graph = tf.Graph()) as sess:
saver = tf.train.import_meta_graph(
input_checkpoint + '.meta', clear_devices = clear_devices
)
saver.restore(sess, input_checkpoint)
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
tf.get_default_graph().as_graph_def(),
output_node_names,
)
with tf.gfile.GFile(output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
print('%d ops in the final graph.' % len(output_graph_def.node))
freeze_graph('base', strings)
import struct
unknown = b'\xff\xff\xff\xff'
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
for node in graph_def.node:
if node.op == 'RefSwitch':
node.op = 'Switch'
for index in xrange(len(node.input)):
if 'moving_' in node.input[index]:
node.input[index] = node.input[index] + '/read'
elif node.op == 'AssignSub':
node.op = 'Sub'
if 'use_locking' in node.attr: del node.attr['use_locking']
elif node.op == 'AssignAdd':
node.op = 'Add'
if 'use_locking' in node.attr: del node.attr['use_locking']
elif node.op == 'Assign':
node.op = 'Identity'
if 'use_locking' in node.attr: del node.attr['use_locking']
if 'validate_shape' in node.attr: del node.attr['validate_shape']
if len(node.input) == 2:
node.input[0] = node.input[1]
del node.input[1]
if 'Reshape/shape' in node.name or 'Reshape_1/shape' in node.name:
b = node.attr['value'].tensor.tensor_content
arr_int = [int.from_bytes(b[i:i + 4], 'little') for i in range(0, len(b), 4)]
if len(arr_int):
arr_byte = [unknown] + [struct.pack('<i', i) for i in arr_int[1:]]
arr_byte = b''.join(arr_byte)
node.attr['value'].tensor.tensor_content = arr_byte
if len(node.attr['value'].tensor.int_val):
node.attr['value'].tensor.int_val[0] = -1
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
return graph
g = load_graph('base/frozen_model.pb')
i = g.get_tensor_by_name('import/inputs:0')
o = g.get_tensor_by_name('import/SelectV2_3:0')
i, o
test_sess = tf.Session(graph = g)
%%time
o_ = test_sess.run(o, feed_dict = {i: [f'ringkasan: {string}',
f'ringkasan: {string2}',
f'ringkasan: {string3}',
f'ringkasan: {string4}',
f'tajuk: {string}',
f'tajuk: {string2}',
f'tajuk: {string3}',
f'tajuk: {string4}',
'terjemah Inggeris ke Melayu: PETALING JAYA: Former prime minister Najib Razak has questioned whether the government knows how to manage the Covid-19 pandemic, outlining several seemingly contradictory announcements it has made.',
'terjemah Melayu ke Inggeris: PETALING JAYA: Pertemuan bekas Perdana Menteri, Datuk Seri Najib Tun Razak dan Timbalan Perdana Menteri, Datuk Seri Ismail Sabri Yaakob hari ini adalah bagi membincangkan isu berkaitan hala tuju dan dasar negara.']})
o_
import sentencepiece as spm
sp_model = spm.SentencePieceProcessor()
sp_model.Load(DEFAULT_SPM_PATH)
for k in range(len(o_)):
print(k, sp_model.DecodeIds(o_[k].tolist()))
from tensorflow.tools.graph_transforms import TransformGraph
transforms = ['add_default_attributes',
'remove_nodes(op=Identity, op=CheckNumerics)',
'fold_batch_norms',
'fold_old_batch_norms',
'quantize_weights(minimum_size=1536000)',
#'quantize_weights(fallback_min=-10240, fallback_max=10240)',
'strip_unused_nodes',
'sort_by_execution_order']
pb = 'base/frozen_model.pb'
input_graph_def = tf.GraphDef()
with tf.gfile.FastGFile(pb, 'rb') as f:
input_graph_def.ParseFromString(f.read())
transformed_graph_def = TransformGraph(input_graph_def,
['inputs'],
['SelectV2_3'], transforms)
with tf.gfile.GFile(f'{pb}.quantized', 'wb') as f:
f.write(transformed_graph_def.SerializeToString())
g = load_graph('base/frozen_model.pb.quantized')
i = g.get_tensor_by_name('import/inputs:0')
o = g.get_tensor_by_name('import/SelectV2_3:0')
i, o
test_sess = tf.InteractiveSession(graph = g)
o_ = test_sess.run(o, feed_dict = {i: [string3]})[0]
sp_model.DecodeIds(o_.tolist())
file = 'base/frozen_model.pb'
outPutname = 'abstractive-summarization-v2/t5/model.pb'
b2_bucket.upload_local_file(
local_file=file,
file_name=outPutname,
file_infos=file_info,
)
file = 'base/frozen_model.pb.quantized'
outPutname = 'abstractive-summarization-v2/t5-quantized/model.pb'
b2_bucket.upload_local_file(
local_file=file,
file_name=outPutname,
file_infos=file_info,
)
file = 'base/frozen_model.pb'
outPutname = 'knowledge-graph-triplet/t5/model.pb'
b2_bucket.upload_local_file(
local_file=file,
file_name=outPutname,
file_infos=file_info,
)
file = 'base/frozen_model.pb.quantized'
outPutname = 'knowledge-graph-triplet/t5-quantized/model.pb'
b2_bucket.upload_local_file(
local_file=file,
file_name=outPutname,
file_infos=file_info,
)
file = 'base/frozen_model.pb'
outPutname = 'paraphrase-v2/t5/model.pb'
b2_bucket.upload_local_file(
local_file=file,
file_name=outPutname,
file_infos=file_info,
)
file = 'base/frozen_model.pb.quantized'
outPutname = 'paraphrase-v2/t5-quantized/model.pb'
b2_bucket.upload_local_file(
local_file=file,
file_name=outPutname,
file_infos=file_info,
)
```
| github_jupyter |
**This notebook is an exercise in the [Machine Learning Explainability](https://www.kaggle.com/learn/machine-learning-explainability) course. You can reference the tutorial at [this link](https://www.kaggle.com/dansbecker/partial-plots).**
---
## Set Up
Today you will create partial dependence plots and practice building insights with data from the [Taxi Fare Prediction](https://www.kaggle.com/c/new-york-city-taxi-fare-prediction) competition.
We have again provided code to do the basic loading, review and model-building. Run the cell below to set everything up:
```
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
# Environment Set-Up for feedback system.
from learntools.core import binder
binder.bind(globals())
from learntools.ml_explainability.ex3 import *
print("Setup Complete")
# Data manipulation code below here
data = pd.read_csv('../input/new-york-city-taxi-fare-prediction/train.csv', nrows=50000)
# Remove data with extreme outlier coordinates or negative fares
data = data.query('pickup_latitude > 40.7 and pickup_latitude < 40.8 and ' +
'dropoff_latitude > 40.7 and dropoff_latitude < 40.8 and ' +
'pickup_longitude > -74 and pickup_longitude < -73.9 and ' +
'dropoff_longitude > -74 and dropoff_longitude < -73.9 and ' +
'fare_amount > 0'
)
y = data.fare_amount
base_features = ['pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude']
X = data[base_features]
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
first_model = RandomForestRegressor(n_estimators=30, random_state=1).fit(train_X, train_y)
print("Data sample:")
data.head()
data.describe()
```
## Question 1
Here is the code to plot the partial dependence plot for pickup_longitude. Run the following cell.
```
from matplotlib import pyplot as plt
from pdpbox import pdp, get_dataset, info_plots
feat_name = 'pickup_longitude'
pdp_dist = pdp.pdp_isolate(model=first_model, dataset=val_X, model_features=base_features, feature=feat_name)
pdp.pdp_plot(pdp_dist, feat_name)
plt.show()
```
Why does the partial dependence plot have this U-shape?
Does your explanation suggest what shape to expect in the partial dependence plots for the other features?
Create all other partial plots in a for-loop below (copying the appropriate lines from the code above).
```
for feat_name in base_features:
pdp_dist = pdp.pdp_isolate(model=first_model, dataset=val_X, model_features=base_features, feature=feat_name)
# plot it
pdp.pdp_plot(pdp_dist, feat_name)
plt.show()
```
Do the shapes match your expectations for what shapes they would have? Can you explain the shape now that you've seen them?
Uncomment the following line to check your intuition.
```
# Check your answer (Run this code cell to receive credit!)
q_1.solution()
```
## Question 2
Now you will run a 2D partial dependence plot. As a reminder, here is the code from the tutorial.
```
inter1 = pdp.pdp_interact(model=my_model, dataset=val_X, model_features=feature_names, features=['Goal Scored', 'Distance Covered (Kms)'])
pdp.pdp_interact_plot(pdp_interact_out=inter1, feature_names=['Goal Scored', 'Distance Covered (Kms)'], plot_type='contour')
plt.show()
```
Create a 2D plot for the features `pickup_longitude` and `dropoff_longitude`. Plot it appropriately?
What do you expect it to look like?
```
# Add your code here
inter1 = pdp.pdp_interact(model=first_model, dataset=val_X, model_features=base_features, features=['pickup_longitude', 'dropoff_longitude'])
pdp.pdp_interact_plot(pdp_interact_out=inter1, feature_names=['pickup_longitude', 'dropoff_longitude'], plot_type='contour')
plt.show()
```
Uncomment the line below to see the solution and explanation for how one might reason about the plot shape.
```
# Check your answer (Run this code cell to receive credit!)
q_2.solution()
```
## Question 3
Consider a ride starting at longitude -73.92 and ending at longitude -74. Using the graph from the last question, estimate how much money the rider would have saved if they'd started the ride at longitude -73.98 instead?
```
savings_from_shorter_trip = 15
# Check your answer
q_3.check()
```
For a solution or hint, uncomment the appropriate line below.
```
# q_3.hint()
# q_3.solution()
```
## Question 4
In the PDP's you've seen so far, location features have primarily served as a proxy to capture distance traveled. In the permutation importance lessons, you added the features `abs_lon_change` and `abs_lat_change` as a more direct measure of distance.
Create these features again here. You only need to fill in the top two lines. Then run the following cell.
**After you run it, identify the most important difference between this partial dependence plot and the one you got without absolute value features. The code to generate the PDP without absolute value features is at the top of this code cell.**
---
```
# This is the PDP for pickup_longitude without the absolute difference features. Included here to help compare it to the new PDP you create
feat_name = 'pickup_longitude'
pdp_dist_original = pdp.pdp_isolate(model=first_model, dataset=val_X, model_features=base_features, feature=feat_name)
pdp.pdp_plot(pdp_dist_original, feat_name)
plt.show()
# create new features
data['abs_lon_change'] = abs(data.dropoff_longitude - data.pickup_longitude)
data['abs_lat_change'] = abs(data.dropoff_latitude - data.pickup_latitude)
#data['abs_lat_change'] = ____
features_2 = ['pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude',
'abs_lat_change',
'abs_lon_change']
X = data[features_2]
new_train_X, new_val_X, new_train_y, new_val_y = train_test_split(X, y, random_state=1)
second_model = RandomForestRegressor(n_estimators=30, random_state=1).fit(new_train_X, new_train_y)
feat_name = 'pickup_longitude'
pdp_dist = pdp.pdp_isolate(model=second_model, dataset=new_val_X, model_features=features_2, feature=feat_name)
pdp.pdp_plot(pdp_dist, feat_name)
plt.show()
# Check your answer
q_4.check()
```
Uncomment the lines below to see a hint or the solution (including an explanation of the important differences between the plots).
```
# q_4.hint()
# q_4.solution()
```
## Question 5
Consider a scenario where you have only 2 predictive features, which we will call `feat_A` and `feat_B`. Both features have minimum values of -1 and maximum values of 1. The partial dependence plot for `feat_A` increases steeply over its whole range, whereas the partial dependence plot for feature B increases at a slower rate (less steeply) over its whole range.
Does this guarantee that `feat_A` will have a higher permutation importance than `feat_B`. Why or why not?
After you've thought about it, uncomment the line below for the solution.
```
# Check your answer (Run this code cell to receive credit!)
q_5.solution()
```
## Question 6
The code cell below does the following:
1. Creates two features, `X1` and `X2`, having random values in the range [-2, 2].
2. Creates a target variable `y`, which is always 1.
3. Trains a `RandomForestRegressor` model to predict `y` given `X1` and `X2`.
4. Creates a PDP plot for `X1` and a scatter plot of `X1` vs. `y`.
Do you have a prediction about what the PDP plot will look like? Run the cell to find out.
Modify the initialization of `y` so that our PDP plot has a positive slope in the range [-1,1], and a negative slope everywhere else. (Note: *you should only modify the creation of `y`, leaving `X1`, `X2`, and `my_model` unchanged.*)
```
import numpy as np
from numpy.random import rand
n_samples = 20000
# Create array holding predictive feature
X1 = 4 * rand(n_samples) - 2
X2 = 4 * rand(n_samples) - 2
# Create y. you should have X1 and X2 in the expression for y
y = -2 * X1 * (X1<-1) + X1 - 2 * X1 * (X1>1) - X2
# create dataframe because pdp_isolate expects a dataFrame as an argument
my_df = pd.DataFrame({'X1': X1, 'X2': X2, 'y': y})
predictors_df = my_df.drop(['y'], axis=1)
my_model = RandomForestRegressor(n_estimators=30, random_state=1).fit(predictors_df, my_df.y)
pdp_dist = pdp.pdp_isolate(model=my_model, dataset=my_df, model_features=['X1', 'X2'], feature='X1')
# visualize your results
pdp.pdp_plot(pdp_dist, 'X1')
plt.show()
# Check your answer
q_6.check()
```
Uncomment the lines below for a hint or solution
```
# q_6.hint()
#q_6.solution()
```
## Question 7
Create a dataset with 2 features and a target, such that the pdp of the first feature is flat, but its permutation importance is high. We will use a RandomForest for the model.
*Note: You only need to supply the lines that create the variables X1, X2 and y. The code to build the model and calculate insights is provided*.
```
import eli5
from eli5.sklearn import PermutationImportance
n_samples = 20000
# Create array holding predictive feature
X1 = 4 * rand(n_samples) - 2
X2 = 4 * rand(n_samples) - 2
# Create y. you should have X in the expression for y
y = X1 * X2
# create dataframe because pdp_isolate expects a dataFrame as an argument
my_df = pd.DataFrame({'X1': X1, 'X2': X2, 'y': y})
predictors_df = my_df.drop(['y'], axis=1)
my_model = RandomForestRegressor(n_estimators=30, random_state=1).fit(predictors_df, my_df.y)
pdp_dist = pdp.pdp_isolate(model=my_model, dataset=my_df, model_features=['X1', 'X2'], feature='X1')
pdp.pdp_plot(pdp_dist, 'X1')
plt.show()
perm = PermutationImportance(my_model).fit(predictors_df, my_df.y)
# Check your answer
q_7.check()
# show the weights for the permutation importance you just calculated
eli5.show_weights(perm, feature_names = ['X1', 'X2'])
# Uncomment the following lines for the hint or solution
# q_7.hint()
#q_7.solution()
```
## Keep Going
Partial dependence plots can be really interesting. We have a [discussion thread](https://www.kaggle.com/learn-forum/65782) to talk about what real-world topics or questions you'd be curious to see addressed with partial dependence plots.
Next, learn how **[SHAP values](https://www.kaggle.com/dansbecker/shap-values)** help you understand the logic for each individual prediction.
---
*Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161307) to chat with other Learners.*
| github_jupyter |
System of differential equations: two equations for the velocity components $u,v$ and one equation for pressure:
$$\frac{\partial u}{\partial t}+u\frac{\partial u}{\partial x}+v\frac{\partial u}{\partial y} = -\frac{1}{\rho}\frac{\partial p}{\partial x}+\nu \left(\frac{\partial^2 u}{\partial x^2}+\frac{\partial^2 u}{\partial y^2} \right) $$
$$\frac{\partial v}{\partial t}+u\frac{\partial v}{\partial x}+v\frac{\partial v}{\partial y} = -\frac{1}{\rho}\frac{\partial p}{\partial y}+\nu\left(\frac{\partial^2 v}{\partial x^2}+\frac{\partial^2 v}{\partial y^2}\right) $$
$$\frac{\partial^2 p}{\partial x^2}+\frac{\partial^2 p}{\partial y^2} = -\rho\left(\frac{\partial u}{\partial x}\frac{\partial u}{\partial x}+2\frac{\partial u}{\partial y}\frac{\partial v}{\partial x}+\frac{\partial v}{\partial y}\frac{\partial v}{\partial y} \right)$$
Discretizing the $u$-momentum equation, we get:
$$
\begin{split}
& \frac{u_{i,j}^{n+1}-u_{i,j}^{n}}{\Delta t}+u_{i,j}^{n}\frac{u_{i,j}^{n}-u_{i-1,j}^{n}}{\Delta x}+v_{i,j}^{n}\frac{u_{i,j}^{n}-u_{i,j-1}^{n}}{\Delta y} = \\
& \qquad -\frac{1}{\rho}\frac{p_{i+1,j}^{n}-p_{i-1,j}^{n}}{2\Delta x}+\nu\left(\frac{u_{i+1,j}^{n}-2u_{i,j}^{n}+u_{i-1,j}^{n}}{\Delta x^2}+\frac{u_{i,j+1}^{n}-2u_{i,j}^{n}+u_{i,j-1}^{n}}{\Delta y^2}\right)
\end{split}
$$
Rearranging the terms, the momentum equation in the $u$ direction:
$$
\begin{split}
u_{i,j}^{n+1} = u_{i,j}^{n} & - u_{i,j}^{n} \frac{\Delta t}{\Delta x} \left(u_{i,j}^{n}-u_{i-1,j}^{n}\right) - v_{i,j}^{n} \frac{\Delta t}{\Delta y} \left(u_{i,j}^{n}-u_{i,j-1}^{n}\right) \\
& - \frac{\Delta t}{\rho 2\Delta x} \left(p_{i+1,j}^{n}-p_{i-1,j}^{n}\right) \\
& + \nu \left(\frac{\Delta t}{\Delta x^2} \left(u_{i+1,j}^{n}-2u_{i,j}^{n}+u_{i-1,j}^{n}\right) + \frac{\Delta t}{\Delta y^2} \left(u_{i,j+1}^{n}-2u_{i,j}^{n}+u_{i,j-1}^{n}\right)\right)
\end{split}
$$
Discretizing the $v$-momentum equation, we get:
$$
\begin{split}
&\frac{v_{i,j}^{n+1}-v_{i,j}^{n}}{\Delta t}+u_{i,j}^{n}\frac{v_{i,j}^{n}-v_{i-1,j}^{n}}{\Delta x}+v_{i,j}^{n}\frac{v_{i,j}^{n}-v_{i,j-1}^{n}}{\Delta y} = \\
& \qquad -\frac{1}{\rho}\frac{p_{i,j+1}^{n}-p_{i,j-1}^{n}}{2\Delta y}
+\nu\left(\frac{v_{i+1,j}^{n}-2v_{i,j}^{n}+v_{i-1,j}^{n}}{\Delta x^2}+\frac{v_{i,j+1}^{n}-2v_{i,j}^{n}+v_{i,j-1}^{n}}{\Delta y^2}\right)
\end{split}
$$
Rearranging the terms, the momentum equation in the $v$ direction:
$$
\begin{split}
v_{i,j}^{n+1} = v_{i,j}^{n} & - u_{i,j}^{n} \frac{\Delta t}{\Delta x} \left(v_{i,j}^{n}-v_{i-1,j}^{n}\right) - v_{i,j}^{n} \frac{\Delta t}{\Delta y} \left(v_{i,j}^{n}-v_{i,j-1}^{n})\right) \\
& - \frac{\Delta t}{\rho 2\Delta y} \left(p_{i,j+1}^{n}-p_{i,j-1}^{n}\right) \\
& + \nu \left(\frac{\Delta t}{\Delta x^2} \left(v_{i+1,j}^{n}-2v_{i,j}^{n}+v_{i-1,j}^{n}\right) + \frac{\Delta t}{\Delta y^2} \left(v_{i,j+1}^{n}-2v_{i,j}^{n}+v_{i,j-1}^{n}\right)\right)
\end{split}
$$
Finally, the discretized pressure-Poisson equation can be written as:
$$
\begin{split}
& \frac{p_{i+1,j}^{n}-2p_{i,j}^{n}+p_{i-1,j}^{n}}{\Delta x^2}+\frac{p_{i,j+1}^{n}-2p_{i,j}^{n}+p_{i,j-1}^{n}}{\Delta y^2} = \\
& \qquad \rho \left[ \frac{1}{\Delta t}\left(\frac{u_{i+1,j}-u_{i-1,j}}{2\Delta x}+\frac{v_{i,j+1}-v_{i,j-1}}{2\Delta y}\right) -\frac{u_{i+1,j}-u_{i-1,j}}{2\Delta x}\frac{u_{i+1,j}-u_{i-1,j}}{2\Delta x} - 2\frac{u_{i,j+1}-u_{i,j-1}}{2\Delta y}\frac{v_{i+1,j}-v_{i-1,j}}{2\Delta x} - \frac{v_{i,j+1}-v_{i,j-1}}{2\Delta y}\frac{v_{i,j+1}-v_{i,j-1}}{2\Delta y}\right]
\end{split}
$$
Now, we rearrange the pressure-Poisson equation:
$$
\begin{split}
p_{i,j}^{n} = & \frac{\left(p_{i+1,j}^{n}+p_{i-1,j}^{n}\right) \Delta y^2 + \left(p_{i,j+1}^{n}+p_{i,j-1}^{n}\right) \Delta x^2}{2\left(\Delta x^2+\Delta y^2\right)} \\
& -\frac{\rho\Delta x^2\Delta y^2}{2\left(\Delta x^2+\Delta y^2\right)} \\
& \times \left[\frac{1}{\Delta t}\left(\frac{u_{i+1,j}-u_{i-1,j}}{2\Delta x}+\frac{v_{i,j+1}-v_{i,j-1}}{2\Delta y}\right)-\frac{u_{i+1,j}-u_{i-1,j}}{2\Delta x}\frac{u_{i+1,j}-u_{i-1,j}}{2\Delta x} -2\frac{u_{i,j+1}-u_{i,j-1}}{2\Delta y}\frac{v_{i+1,j}-v_{i-1,j}}{2\Delta x}-\frac{v_{i,j+1}-v_{i,j-1}}{2\Delta y}\frac{v_{i,j+1}-v_{i,j-1}}{2\Delta y}\right]
\end{split}
$$
The initial condition is $u, v, p = 0$ everywhere, and the boundary conditions are:
$u=1$ at $y=2$ (the "lid");
$u, v=0$ on the other boundaries;
$\frac{\partial p}{\partial y}=0$ at $y=0$;
$p=0$ at $y=2$
$\frac{\partial p}{\partial x}=0$ at $x=0,2$
```
import numpy
from matplotlib import pyplot, cm
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
nx = 41
ny = 41
nt = 500
nit = 50
c = 1
dx = 2 / (nx - 1)
dy = 2 / (ny - 1)
x = numpy.linspace(0, 2, nx)
y = numpy.linspace(0, 2, ny)
X, Y = numpy.meshgrid(x, y)
rho = 1
nu = .1
dt = .001
u = numpy.zeros((ny, nx))
v = numpy.zeros((ny, nx))
p = numpy.zeros((ny, nx))
b = numpy.zeros((ny, nx))
def build_up_b(b, rho, dt, u, v, dx, dy):
b[1:-1, 1:-1] = (rho * (1 / dt *
((u[1:-1, 2:] - u[1:-1, 0:-2]) /
(2 * dx) + (v[2:, 1:-1] - v[0:-2, 1:-1]) / (2 * dy)) -
((u[1:-1, 2:] - u[1:-1, 0:-2]) / (2 * dx))**2 -
2 * ((u[2:, 1:-1] - u[0:-2, 1:-1]) / (2 * dy) *
(v[1:-1, 2:] - v[1:-1, 0:-2]) / (2 * dx))-
((v[2:, 1:-1] - v[0:-2, 1:-1]) / (2 * dy))**2))
return b
def pressure_poisson(p, dx, dy, b):
pn = numpy.empty_like(p)
pn = p.copy()
for q in range(nit):
pn = p.copy()
p[1:-1, 1:-1] = (((pn[1:-1, 2:] + pn[1:-1, 0:-2]) * dy**2 +
(pn[2:, 1:-1] + pn[0:-2, 1:-1]) * dx**2) /
(2 * (dx**2 + dy**2)) -
dx**2 * dy**2 / (2 * (dx**2 + dy**2)) *
b[1:-1,1:-1])
p[:, -1] = p[:, -2] # dp/dx = 0 at x = 2
p[0, :] = p[1, :] # dp/dy = 0 at y = 0
p[:, 0] = p[:, 1] # dp/dx = 0 at x = 0
p[-1, :] = 0 # p = 0 at y = 2
return p
def cavity_flow(nt, u, v, dt, dx, dy, p, rho, nu):
un = numpy.empty_like(u)
vn = numpy.empty_like(v)
b = numpy.zeros((ny, nx))
for n in range(nt):
un = u.copy()
vn = v.copy()
b = build_up_b(b, rho, dt, u, v, dx, dy)
p = pressure_poisson(p, dx, dy, b)
u[1:-1, 1:-1] = (un[1:-1, 1:-1]-
un[1:-1, 1:-1] * dt / dx *
(un[1:-1, 1:-1] - un[1:-1, 0:-2]) -
vn[1:-1, 1:-1] * dt / dy *
(un[1:-1, 1:-1] - un[0:-2, 1:-1]) -
dt / (2 * rho * dx) * (p[1:-1, 2:] - p[1:-1, 0:-2]) +
nu * (dt / dx**2 *
(un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +
dt / dy**2 *
(un[2:, 1:-1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1])))
v[1:-1,1:-1] = (vn[1:-1, 1:-1] -
un[1:-1, 1:-1] * dt / dx *
(vn[1:-1, 1:-1] - vn[1:-1, 0:-2]) -
vn[1:-1, 1:-1] * dt / dy *
(vn[1:-1, 1:-1] - vn[0:-2, 1:-1]) -
dt / (2 * rho * dy) * (p[2:, 1:-1] - p[0:-2, 1:-1]) +
nu * (dt / dx**2 *
(vn[1:-1, 2:] - 2 * vn[1:-1, 1:-1] + vn[1:-1, 0:-2]) +
dt / dy**2 *
(vn[2:, 1:-1] - 2 * vn[1:-1, 1:-1] + vn[0:-2, 1:-1])))
u[0, :] = 0
u[:, 0] = 0
u[:, -1] = 0
u[-1, :] = 1 # set velocity on cavity lid equal to 1
v[0, :] = 0
v[-1, :] = 0
v[:, 0] = 0
v[:, -1] = 0
return u, v, p
u = numpy.zeros((ny, nx))
v = numpy.zeros((ny, nx))
p = numpy.zeros((ny, nx))
b = numpy.zeros((ny, nx))
nt = 700
u, v, p = cavity_flow(nt, u, v, dt, dx, dy, p, rho, nu)
fig = pyplot.figure(figsize=(11,7), dpi=100)
# plotting the pressure field as a contour
pyplot.contourf(X, Y, p, alpha=0.5, cmap=cm.viridis)
pyplot.colorbar()
# plotting the pressure field outlines
pyplot.contour(X, Y, p, cmap=cm.viridis)
# plotting velocity field
pyplot.quiver(X[::2, ::2], Y[::2, ::2], u[::2, ::2], v[::2, ::2])
pyplot.xlabel('X')
pyplot.ylabel('Y');
fig = pyplot.figure(figsize=(11, 7), dpi=100)
pyplot.contourf(X, Y, p, alpha=0.5, cmap=cm.viridis)
pyplot.colorbar()
pyplot.contour(X, Y, p, cmap=cm.viridis)
pyplot.streamplot(X, Y, u, v)
pyplot.xlabel('X')
pyplot.ylabel('Y');
```
| github_jupyter |
# MLP GenCode
Wen et al 2019 used DNN to distinguish GenCode mRNA/lncRNA.
Based on K-mer frequencies, K={1,2,3}, they reported 99% accuracy.
Their CNN used 2 Conv2D layers of 32 filters of width 3x3, max pool 2x2, 25% drop, dense 128.
Can we reproduce that with MLP layers instead of CNN?
Extract features as list of K-mer frequencies for K={1,2,3}.
```
import time
def show_time():
t = time.time()
print(time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t)))
show_time()
PC_TRAINS=10000
NC_TRAINS=10000
PC_TESTS=5000
NC_TESTS=5000 # Wen et al 2019 used 8000 and 2000 of each class
PC_LENS=(200,4000)
NC_LENS=(200,4000) # Wen et al 2019 used 250-3500 for lncRNA only
MAX_K = 3
INPUT_SHAPE=(None,84) # 4^3 + 4^2 + 4^1
NEURONS=128
DROP_RATE=0.2
EPOCHS=100
SPLITS=5
FOLDS=5 # make this 5 for serious testing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from keras.models import Sequential
from keras.layers import Dense,Embedding,Dropout
from keras.layers import Flatten,TimeDistributed
from keras.losses import BinaryCrossentropy
from keras.callbacks import ModelCheckpoint
import sys
IN_COLAB = False
try:
from google.colab import drive
IN_COLAB = True
except:
pass
if IN_COLAB:
print("On Google CoLab, mount cloud-local file, get our code from GitHub.")
PATH='/content/drive/'
#drive.mount(PATH,force_remount=True) # hardly ever need this
drive.mount(PATH) # Google will require login credentials
DATAPATH=PATH+'My Drive/data/' # must end in "/"
import requests
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/GenCodeTools.py')
with open('GenCodeTools.py', 'w') as f:
f.write(r.text)
from GenCodeTools import GenCodeLoader
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/KmerTools.py')
with open('KmerTools.py', 'w') as f:
f.write(r.text)
from KmerTools import KmerTools
else:
print("CoLab not working. On my PC, use relative paths.")
DATAPATH='data/' # must end in "/"
sys.path.append("..") # append parent dir in order to use sibling dirs
from SimTools.GenCodeTools import GenCodeLoader
from SimTools.KmerTools import KmerTools
MODELPATH="BestModel" # saved on cloud instance and lost after logout
#MODELPATH=DATAPATH+MODELPATH # saved on Google Drive but requires login
```
## Data Load
Restrict mRNA to those transcripts with a recognized ORF.
```
PC_FILENAME='gencode.v38.pc_transcripts.fa.gz'
NC_FILENAME='gencode.v38.lncRNA_transcripts.fa.gz'
PC_FULLPATH=DATAPATH+PC_FILENAME
NC_FULLPATH=DATAPATH+NC_FILENAME
# Full GenCode ver 38 human is 106143 pc + 48752 nc and loads in 7 sec.
# Expect fewer transcripts if special filtering is used.
loader=GenCodeLoader()
loader.set_label(1)
loader.set_check_utr(True)
pcdf=loader.load_file(PC_FULLPATH)
print("PC seqs loaded:",len(pcdf))
loader.set_label(0)
loader.set_check_utr(False)
ncdf=loader.load_file(NC_FULLPATH)
print("NC seqs loaded:",len(ncdf))
trivial=False
if trivial:
print("Trivialize the data...")
dummy='AAAA'*200
for i in range(0,len(pcdf)):
pcdf['sequence']=dummy
dummy='GGGG'*200
for i in range(0,len(ncdf)):
ncdf['sequence']=dummy
show_time()
```
## Data Prep
```
def dataframe_length_filter(df,low_high):
(low,high)=low_high
# The pandas query language is strange,
# but this is MUCH faster than loop & drop.
return df[ (df['seqlen']>=low) & (df['seqlen']<=high) ]
def dataframe_shuffle(df):
# The ignore_index option is new in Pandas 1.3.
# The default (False) replicates the old behavior: shuffle the index too.
# The new option seems more logical th
# After shuffling, df.iloc[0] has index == 0.
# return df.sample(frac=1,ignore_index=True)
return df.sample(frac=1) # Use this till CoLab upgrades Pandas
def dataframe_extract_sequence(df):
return df['sequence'].tolist()
pc_all = dataframe_extract_sequence(
dataframe_shuffle(
dataframe_length_filter(pcdf,PC_LENS)))
nc_all = dataframe_extract_sequence(
dataframe_shuffle(
dataframe_length_filter(ncdf,NC_LENS)))
show_time()
print("PC seqs pass filter:",len(pc_all))
print("NC seqs pass filter:",len(nc_all))
# Garbage collection to reduce RAM footprint
pcdf=None
ncdf=None
# Any portion of a shuffled list is a random selection
pc_train=pc_all[:PC_TRAINS]
nc_train=nc_all[:NC_TRAINS]
pc_test=pc_all[PC_TRAINS:PC_TESTS]
nc_test=nc_all[NC_TRAINS:PC_TESTS]
# Garbage collection
pc_all=None
nc_all=None
def prepare_x_and_y(seqs1,seqs0):
len1=len(seqs1)
len0=len(seqs0)
labels1=np.ones(len1,dtype=np.int8)
labels0=np.zeros(len0,dtype=np.int8)
all_labels = np.concatenate((labels1,labels0))
seqs1 = np.asarray(seqs1)
seqs0 = np.asarray(seqs0)
all_seqs = np.concatenate((seqs1,seqs0),axis=0)
#return all_seqs,all_labels # test unshuffled
tandem = (all_seqs,all_labels)
X,y = shuffle(tandem) # sklearn.utils.shuffle
return X,y
Xseq,y=prepare_x_and_y(pc_train,nc_train)
show_time()
def seqs_to_kmer_freqs(seqs,max_K):
tool = KmerTools() # from SimTools
empty = tool.make_dict_upto_K(max_K)
collection = []
for seq in seqs:
counts = empty
counts = tool.update_count_one_K(counts,max_K,seq,True)
counts = tool.harvest_counts_from_K(counts,max_K)
fdict = tool.count_to_frequency(counts,max_K)
freqs = list(fdict.values())
collection.append(freqs)
return np.asarray(collection)
Xfrq=seqs_to_kmer_freqs(Xseq,MAX_K)
show_time()
print("X shape",np.shape(Xfrq))
print(type(Xfrq),"of",type(Xfrq[0]),"of",type(Xfrq[0][0]))
print("y shape",np.shape(y))
```
## Neural network
```
def make_DNN():
dt=np.float32
print("make_DNN")
print("input shape:",INPUT_SHAPE)
dnn = Sequential()
dnn.add(Dense(NEURONS,activation="sigmoid",dtype=dt))
dnn.add(Dense(NEURONS,activation="sigmoid",dtype=dt))
dnn.add(Dense(NEURONS,activation="sigmoid",dtype=dt))
dnn.add(Dropout(DROP_RATE))
dnn.add(Dense(1,activation="sigmoid",dtype=dt))
dnn.compile(optimizer='adam',
loss=BinaryCrossentropy(from_logits=False),
metrics=['accuracy']) # add to default metrics=loss
dnn.build(input_shape=INPUT_SHAPE)
return dnn
model = make_DNN()
print(model.summary())
def do_cross_validation(X,y):
cv_scores = []
fold=0
mycallbacks = [ModelCheckpoint(
filepath=MODELPATH, save_best_only=True,
monitor='val_accuracy', mode='max')]
splitter = KFold(n_splits=SPLITS) # this does not shuffle
for train_index,valid_index in splitter.split(X):
if fold < FOLDS:
fold += 1
X_train=X[train_index] # inputs for training
y_train=y[train_index] # labels for training
X_valid=X[valid_index] # inputs for validation
y_valid=y[valid_index] # labels for validation
print("MODEL")
# Call constructor on each CV. Else, continually improves the same model.
model = model = make_DNN()
print("FIT") # model.fit() implements learning
start_time=time.time()
history=model.fit(X_train, y_train,
epochs=EPOCHS,
verbose=1, # ascii art while learning
callbacks=mycallbacks, # called at end of each epoch
validation_data=(X_valid,y_valid))
end_time=time.time()
elapsed_time=(end_time-start_time)
print("Fold %d, %d epochs, %d sec"%(fold,EPOCHS,elapsed_time))
# print(history.history.keys()) # all these keys will be shown in figure
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1) # any losses > 1 will be off the scale
plt.show()
do_cross_validation(Xfrq,y)
# TO DO: run trained model on (pc_test,nc_test)
# and draw the AUC.
# Borrow code from other notebooks.
```
| github_jupyter |
<a href="https://colab.research.google.com/github/justin-hsieh/DS-Unit-2-Applied-Modeling/blob/master/assignment_applied_modeling_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Lambda School Data Science, Unit 2: Predictive Modeling
# Applied Modeling, Module 1
You will use your portfolio project dataset for all assignments this sprint.
## Assignment
Complete these tasks for your project, and document your decisions.
- [ ] Choose your target. Which column in your tabular dataset will you predict?
- [ ] Choose which observations you will use to train, validate, and test your model. And which observations, if any, to exclude.
- [ ] Determine whether your problem is regression or classification.
- [ ] Choose your evaluation metric.
- [ ] Begin with baselines: majority class baseline for classification, or mean baseline for regression, with your metric of choice.
- [ ] Begin to clean and explore your data.
- [ ] Choose which features, if any, to exclude. Would some features "leak" information from the future?
## Reading
- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_
- [How Shopify Capital Uses Quantile Regression To Help Merchants Succeed](https://engineering.shopify.com/blogs/engineering/how-shopify-uses-machine-learning-to-help-our-merchants-grow-their-business)
- [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), **by Lambda DS3 student** Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook.
- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)
- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video
- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415)
```
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_absolute_error,r2_score,mean_squared_error
df = pd.read_csv('openpowerlifting.csv')
drops = ['Squat4Kg', 'Bench4Kg', 'Deadlift4Kg','Country','Place','Squat1Kg',
'Squat2Kg','Squat3Kg','Bench1Kg','Bench2Kg','Bench3Kg','Deadlift1Kg',
'Deadlift2Kg','Deadlift3Kg']
df = df.drop(columns=drops)
#df.dropna(inplace=True)
df.shape
df.head()
df.isna().sum()
df.columns
df['Equipment'].value_counts()
df.Sex.value_counts()
squat_mean = [df['Best3SquatKg'].mean()]*len(df['Best3SquatKg'])
squat = df['Best3SquatKg']
print(mean_absolute_error(squat, squat_mean))
print(r2_score(squat, squat_mean))
bench_mean = [df['Best3BenchKg'].mean()]*len(df['Best3BenchKg'])
bench = df['Best3BenchKg']
print(mean_absolute_error(bench, bench_mean))
print(r2_score(bench, bench_mean))
deadlift_mean = [df['Best3DeadliftKg'].mean()]*len(df['Best3DeadliftKg'])
deadlift = df['Best3DeadliftKg']
print(mean_absolute_error(deadlift, deadlift_mean))
print(r2_score(deadlift, deadlift_mean))
```
| github_jupyter |
# Assignment 3 - basic classifiers
Math practice and coding application for main classifiers introduced in Chapter 3 of the Python machine learning book.
## Weighting
Note that this assignment is more difficult than the previous ones, and thus has a higher weighting 3 and longer duration (3 weeks). Each one of the previous two assignments has a weighting 1.
Specifically, the first 3 assignments contribute to your continuous assessment as follows:
Assignment weights: $w_1 = 1, w_2 = 1, w_3 = 3$
Assignment grades: $g_1, g_2, g_3$
Weighted average: $\frac{1}{\sum_i w_i} \times \sum_i \left(w_i \times g_i \right)$
Future assignments will be added analogously.
# RBF kernel (20 points)
Show that a Gaussian RBF kernel can be expressed as a dot product:
$$
K(\mathbf{x}, \mathbf{y})
= e^\frac{-|\mathbf{x} - \mathbf{y}|^2}{2}
= \phi(\mathbf{x})^T \phi(\mathbf{y})
$$
by spelling out the mapping function $\phi$.
For simplicity
* you can assume both $\mathbf{x}$ and $\mathbf{y}$ are 2D vectors
$
x =
\begin{pmatrix}
x_1 \\
x_2
\end{pmatrix}
, \;
y =
\begin{pmatrix}
y_1 \\
y_2
\end{pmatrix}
$
* we use a scalar unit variance here
even though the proof can be extended for vectors $\mathbf{x}$ $\mathbf{y}$ and general covariance matrices.
Hint: use Taylor series expansion of the exponential function
## Answer
We denote $e^x$ as exp($x$). Since $
\mathbf x =
\begin{pmatrix}
x_1 \\
x_2
\end{pmatrix}
, \;
\mathbf y =
\begin{pmatrix}
y_1 \\
y_2
\end{pmatrix}
$, we have
$$
\begin{align}
K(\mathbf{x}, \mathbf{y}) = \text{exp}(\frac{-||\mathbf{x} - \mathbf{y}||^2}{2}) & = \text{exp}(\frac{-||(x_1-y_1, x_2-y_2)||^2}{2} )\\
& = \text{exp}(\frac{-(x_1-y_1)^2-(x_2-y_2)^2}{2})\\
& = \text{exp}(\frac{-{x_1}^2-{x_2}^2-{y_1}^2-{y_2}^2+2 x_1 y_1+2 x_2 y_2}{2}) \\
& = \text{exp}(\frac{-||\mathbf{x}||^2}{2}) \text{ exp}(\frac{-||\mathbf{y}||^2}{2}) \text{ exp}(x_1 y_1 + x_2 y_2)
\end{align}
$$
<br>By Taylor series of $f(x)$ on $a$, $e^x$ at $a=0$ can be expressed as $\sum_{n=0}^{\infty} \frac{x^n}{n!} = 1 + x + \frac{x^2}{2!} +$ ... for $x \in \mathbb{R}^1$. Therefore,<br><br>
$$K(\mathbf{x}, \mathbf{y}) = \text{exp}(\frac{-||\mathbf{x}||^2}{2}) \text{ exp}(\frac{-||\mathbf{y}||^2}{2}) \sum_{n=0}^{\infty} \frac{(x_1 y_1 + x_2 y_2)^n}{n!}$$
<br>By binomial expansion, we have
$$ (x_1 y_1 + x_2 y_2)^n = \sum_{i = 0}^{n} \binom{n}{i} (x_1 y_1)^{n-i} (x_2 y_2)^i = \sum_{i = 0}^{n} \sqrt{\binom{n}{i}} (x_1^{n-i} x_2^i) \sqrt{\binom{n}{i}} (y_1^{n-i} y_2^i)$$
<br>We let $\xi_{n}(\mathbf{x}) = \xi_{n}(x_1, x_2) = \left[\sqrt{\binom{n}{i}} (x_1^{n-i} x_2^i) \right] = \left[\sqrt{\binom{n}{0}} (x_1^{n} x_2^0), \sqrt{\binom{n}{1}} (x_1^{n-1} x_2^1), ..., \sqrt{\binom{n}{n}} (x_1^{0} x_2^n)\right] \in \mathbb{R}^{\text{n}}$. <br>
<br>Hence we have <br>
$$ \left\{
\begin{aligned}
\phi(\mathbf{x}) & = \text{exp}(\frac{-||\mathbf{x}||^2}{2}) \left[1, \frac{\xi_{1}(\mathbf{x})}{\sqrt{1!}}, \frac{\xi_{2}(\mathbf{x})}{\sqrt{2!}} , \frac{\xi_{3}(\mathbf{x})}{\sqrt{3!}} , ... \right]^T \\
\phi(\mathbf{y}) & = \text{exp}(\frac{-||\mathbf{y}||^2}{2}) \left[1, \frac{\xi_{1}(\mathbf{y})}{\sqrt{1!}}, \frac{\xi_{2}(\mathbf{y})}{\sqrt{2!}} , \frac{\xi_{3}(\mathbf{y})}{\sqrt{3!}} , ... \right]^T
\end{aligned}
\right.
$$
<br>The mapping function is therefore $\phi(\mathbf{x}) = \text{exp}(\frac{-||\mathbf{x}||^2}{2}) \left[1, \frac{\xi_{1}(\mathbf{x})}{\sqrt{1!}}, \frac{\xi_{2}(\mathbf{x})}{\sqrt{2!}} , \frac{\xi_{3}(\mathbf{x})}{\sqrt{3!}} , ... \right]^T$, where $\xi_{n}(\mathbf{x}) = \left[\sqrt{\binom{n}{0}} (x_1^{n} x_2^0), \sqrt{\binom{n}{1}} (x_1^{n-1} x_2^1), ..., \sqrt{\binom{n}{n}} (x_1^{0} x_2^n)\right]$.
# Kernel SVM complexity (10 points)
How would the complexity (in terms of number of parameters) of a trained kernel SVM change with the amount of training data, and why?
Note that the answer may depend on the specific kernel used as well as the amount of training data.
Consider specifically the following types of kernels $K(\mathbf{x}, \mathbf{y})$.
* linear:
$$
K\left(\mathbf{x}, \mathbf{y}\right) = \mathbf{x}^T \mathbf{y}
$$
* polynomial with degree $q$:
$$
K\left(\mathbf{x}, \mathbf{y}\right) =
(\mathbf{x}^T\mathbf{y} + 1)^q
$$
* RBF with distance function $D$:
$$
K\left(\mathbf{x}, \mathbf{y} \right) = e^{-\frac{D\left(\mathbf{x}, \mathbf{y} \right)}{2s^2}}
$$
## Answer
For all examples, we assume $\mathbf{x}, \mathbf{y} \in \mathbb{R}^\text{d}$.
### Linear:
For linear kernal, the mapping function $\phi(\mathbf{x}) = \mathbf{x}$, which mapps $\mathbb{R}^\text{d}$ to $\mathbb{R}^\text{d}$, therefore the size of data is unchanged.<br>
There are not explicit parameters, therefore the time cost increase linearly with the dimension of data, or the amount of data increase $n$ times, the time cost simply increase $O(n)$ time. Both changes in dimension or data amount will not afftect any parameters.<br>
### Polynomial with degree $q$:
For simplicity we write $1 = x_{d+1} y_{d+1}$. Then
$$K\left(\mathbf{x}, \mathbf{y}\right) =(\mathbf{x}^T\mathbf{y} + 1)^q = (\sum_{i=1}^{d+1} x_i y_i)^q = \sum_{k_1 + k_2 + ... + k_{d+1} = q} \binom{q}{k_1, k_2, ..., k_{d+1}} \prod_{t=1}^{d+1} (x_t y_t)^{k_t} = \sum_{\sum_{i=1}^{d+1} k_i = q} \frac{q!}{\prod_{i=1}^{d+1} k_i!} \prod_{t=1}^{d+1} (x_t y_t)^{k_t}$$
by Multinomial theorem. Therefore the mapping function is
$$\phi(\mathbf{x}) = \left[\sqrt{\frac{q!}{\prod_{i=1}^{d+1} k_i!}} \prod_{t=1}^{d+1} (x_t)^{k_t}\right]_{\sum_{i=1}^{d+1} k_i = q}^T,$$
which maps $\mathbb{R}^\text{d}$ to $\mathbb{R}^\binom{p+(d+1)-1}{(d+1)-1} = \mathbb{R}^\binom{p+d}{d} = \mathbb{R}^\frac{(p+d)!}{p! d!}$, computed using the stars and bars method. <br>
* If $p=1$, only one useless dimension is added, where $x_{d+1} = 1$. In this case the actual dimension remains. <br>
* If $p>2$, then the dimension increases from $d$ to $\binom{p+d}{d}$, where actural dimension is $\binom{p+d}{d} - 1$ since we always have a $x_{d+1}^q = 1$ term.<br><br>
Now we consider the parameters.
* For each entry in $K\left(\mathbf{x}, \mathbf{y}\right)$, we have a parameter $\frac{q!}{\prod_{i=1}^{d+1} k_i!} = \binom{q}{k_1, k_2, ..., k_{d+1}}$, which takes $O(q \prod_{t=1}^{d+1} k_t)$ to compute in brute force. Considering the dimension analysis we discuss above, the greater the dimension or the greater $q$ is, the more parameters and greater time complexity we will have in the kernal function.<br>
* However, since $q$ and $k_i$ are identical for any set of input data, increasing amount of data will not change number of parameter to be calculated (because they only need to be calculated once), although multiplying them to each term of $x$ and $y$ takes constant time.<br>
* If we do $\mathbf{x}^T \mathbf{y} + 1$ first and then do the power function, then the parameter analysis is the same as the linear function, except that we need an extra power $q$ after the $\mathbf{x}^T \mathbf{y} + 1$.<br>
### RBF with distance function $D$:
Assume $D(\mathbf{x}, \mathbf{y}) = \omega(\mathbf{x}) \omega(\mathbf{y})$. For $K(\mathbf{x}, \mathbf{y} ) = e^{-\frac{D\left(\mathbf{x}, \mathbf{y} \right)}{2s^2}} = e^{-\frac{\omega(\mathbf{x}) \omega(\mathbf{y})}{2s^2}} = e^{-\frac{1}{2s^2} \omega(\mathbf{x}) \omega(\mathbf{y})}$, we have the mapping function as
$$
\phi(\mathbf{x}) = e^{-\frac{1}{4s^2}} \left[1, \frac{\omega(\mathbf{x})}{\sqrt {1!}}, \frac{\omega(\mathbf{x})^2}{\sqrt {2!}}, \frac{\omega(\mathbf{x})^3}{\sqrt {3!}}, ... \right]^T,
$$
which maps $\mathbb{R}^\text{d}$ to $\mathbb{R}^\infty$. That is, RBF essentially projects the original vector to an infinite dimensional space.<br><br>
Now we consider the parameters.
* We first clarify that although RBF maps $\mathbb{R}^\text{d}$ to $\mathbb{R}^\infty$, the dimension actually used is determined by the explicit function $K(\mathbf{x}, \mathbf{y})$, because we don't have to separate the mapping function. Instead, we can just compute the kernal $K$ directly.<br>
* As calculating exp($\mathbf{x}$) is simply mapping to an exponential function, the main cost in terms of dimension of data is at the distance function $D(\mathbf{x}, \mathbf{y})$, which varies with different distance functions we choose. For example, if we choose simple metrics such as Taxicab distance and Euclidean distance, the cost is relatively small. However, if we choose complex metrics for some reason, then the time cost could be huge.<br>
* Also, if all input data share the same set of parameters, then the parameters only need to be computed once, and applyed to each set of input data with constant time. However, if parameters change with different sets of input data in a specific kernal, then the number of parameters as well as time complexity also increase linearly with the increase of amount of data.
# Gaussian density Bayes (30 points)
$$
p\left(\Theta | \mathbf{X}\right)
=
\frac{p\left(\mathbf{X} | \Theta\right) p\left(\Theta\right)}{p\left(\mathbf{X}\right)}
$$
Assume both the likelihood and prior have Gaussian distributions:
$$
\begin{align}
p(\mathbf{X} | \Theta)
&=
\frac{1}{(2\pi)^{N/2}\sigma^N} \exp\left(-\frac{\sum_{t=1}^N (\mathbf{x}^{(t)} - \Theta)^2}{2\sigma^2}\right)
\\
p(\Theta)
&=
\frac{1}{\sqrt{2\pi}\sigma_0} \exp\left( -\frac{(\Theta - \mu_0)^2}{2\sigma_0^2} \right)
\end{align}
$$
Derive $\Theta$ from the dataset $\mathbf{X}$ via the following methods:
### ML (maximum likelihood) estimation
$$
\Theta_{ML} = argmax_{\Theta} p(\mathbf{X} | \Theta)
$$
### MAP estimation
$$
\begin{align}
\Theta_{MAP}
&=
argmax_{\Theta} p(\Theta | \mathbf{X})
\\
&=
argmax_{\Theta} p(\mathbf{X} | \Theta) p(\Theta)
\end{align}
$$
### Bayes estimation
$$
\begin{align}
\Theta_{Bayes}
&=
E(\Theta | \mathbf{X})
\\
&=
\int \Theta p(\Theta | \mathbf{X}) d\Theta
\end{align}
$$
## Answer
### 1. ML (maximum likelihood) estimation
To maximize $p(\mathbf{X} | \Theta)$, we set $\nabla_\Theta p(\mathbf{X} | \Theta) = \nabla_\Theta \left(\frac{1}{(2\pi)^{N/2}\sigma^N} \exp\left(-\frac{\sum_{t=1}^N (\mathbf{x}^{(t)} - \Theta)^2}{2\sigma^2}\right)\right) = 0$. <br>
By Chain rule we get<br>
$$
\begin{align}
\nabla_\Theta p(\mathbf{X} | \Theta) & = \frac{1}{(2\pi)^{N/2}\sigma^N} \frac{\partial \exp\left(-\frac{\sum_{t=1}^N (\mathbf{x}^{(t)} - \Theta)^2}{2\sigma^2}\right)}{\partial \left(-\frac{\sum_{t=1}^N (\mathbf{x}^{(t)} - \Theta)^2}{2\sigma^2}\right)} \frac{\partial \left(-\frac{\sum_{t=1}^N (\mathbf{x}^{(t)} - \Theta)^2}{2\sigma^2}\right)}{\partial \Theta}\\
0 & = \frac{1}{(2\pi)^{N/2}\sigma^N} \exp\left(-\frac{\sum_{t=1}^N (\mathbf{x}^{(t)} - \Theta)^2}{2\sigma^2}\right) \left( - \frac{\sum_{t=1}^N -2(\mathbf{x}^{(t)} - \Theta)}{2\sigma^2} \right) \\
\end{align}
$$
Note that $p(\mathbf{X} | \Theta) = \frac{1}{(2\pi)^{N/2}\sigma^N} \exp\left(-\frac{\sum_{t=1}^N (\mathbf{x}^{(t)} - \Theta)^2}{2\sigma^2}\right)$ is non-zero, because $e^y$ is always positive for $y \in \mathbb{R}$, and the constant $\frac{1}{(2\pi)^{N/2}\sigma^N}$ is positive. <br>
Then we have<br>
$$
\begin{align}
0 & = \frac{\sum_{t=1}^N 2(\mathbf{x}^{(t)} - \Theta)}{2\sigma^2} = \frac{\sum_{t=1}^N \mathbf{x}^{(t)} - \Theta}{\sigma^2} \\
0 & = \frac{(\sum_{t=1}^N \mathbf{x}^{(t)}) - N\Theta}{\sigma^2} \\
N\Theta & = \sum_{t=1}^N \mathbf{x}^{(t)} \\
\Theta & = \frac{\sum_{t=1}^N \mathbf{x}^{(t)}}{N}
\end{align}
$$
Hence $$\Theta_{ML} = argmax_{\Theta} p(\mathbf{X} | \Theta) = \frac{\sum_{t=1}^N \mathbf{x}^{(t)}}{N}.$$
### 2. MAP estimation
To maximize $p(\mathbf{X} | \Theta) p(\Theta)$, we set $\nabla_\Theta (p(\mathbf{X} | \Theta) p(\Theta)) = p(\Theta)\nabla_\Theta p(\mathbf{X} | \Theta) + p(\mathbf{X} | \Theta)\nabla_\Theta p(\Theta) = 0$. <br><br>
We get $p(\Theta)\nabla_\Theta p(\mathbf{X} | \Theta) = - p(\mathbf{X} | \Theta)\nabla_\Theta p(\Theta) $ and therefore <br><br>
$$
\begin{align}
\frac{1}{\sqrt{2\pi}\sigma_0} \exp\left( -\frac{(\Theta - \mu_0)^2}{2\sigma_0^2} \right) \nabla_\Theta \left(\frac{1}{(2\pi)^{N/2}\sigma^N} \exp\left(-\frac{\sum_{t=1}^N (\mathbf{x}^{(t)} - \Theta)^2}{2\sigma^2}\right)\right) & \\
= - \frac{1}{(2\pi)^{N/2}\sigma^N} \exp\left(-\frac{\sum_{t=1}^N (\mathbf{x}^{(t)} - \Theta)^2}{2\sigma^2}\right) \nabla_\Theta & \left(\frac{1}{\sqrt{2\pi}\sigma_0} \exp\left( -\frac{(\Theta - \mu_0)^2}{2\sigma_0^2} \right)\right)
\end{align}
$$
<br><br>By Removing the constants we get<br><br>
$$
\begin{align}
\exp\left( -\frac{(\Theta - \mu_0)^2}{2\sigma_0^2} \right) \nabla_\Theta \left(\exp\left(-\frac{\sum_{t=1}^N (\mathbf{x}^{(t)} - \Theta)^2}{2\sigma^2}\right)\right) & = - \exp\left(-\frac{\sum_{t=1}^N (\mathbf{x}^{(t)} - \Theta)^2}{2\sigma^2}\right) \nabla_\Theta \left(\exp\left( -\frac{(\Theta - \mu_0)^2}{2\sigma_0^2} \right)\right) \\
\end{align}
$$
<br>By Chain rule we get<br><br>
$$
\begin{align}
\exp\left( -\frac{(\Theta - \mu_0)^2}{2\sigma_0^2} \right) \frac{\partial \exp\left(-\frac{\sum_{t=1}^N (\mathbf{x}^{(t)} - \Theta)^2}{2\sigma^2}\right)}{\partial \left(-\frac{\sum_{t=1}^N (\mathbf{x}^{(t)} - \Theta)^2}{2\sigma^2}\right)} \frac{\partial \left(-\frac{\sum_{t=1}^N (\mathbf{x}^{(t)} - \Theta)^2}{2\sigma^2}\right)}{\partial \Theta} & = - \exp\left(-\frac{\sum_{t=1}^N (\mathbf{x}^{(t)} - \Theta)^2}{2\sigma^2}\right) \frac{\partial \exp\left( -\frac{(\Theta - \mu_0)^2}{2\sigma_0^2} \right)}{\partial \left(-\frac{(\Theta - \mu_0)^2}{2\sigma_0^2}\right)} \frac{\partial \left(-\frac{(\Theta - \mu_0)^2}{2\sigma_0^2}\right)}{\partial \Theta}\\
\exp\left( -\frac{(\Theta - \mu_0)^2}{2\sigma_0^2} \right) \exp\left(-\frac{\sum_{t=1}^N (\mathbf{x}^{(t)} - \Theta)^2}{2\sigma^2}\right) \left( - \frac{\sum_{t=1}^N -2(\mathbf{x}^{(t)} - \Theta)}{2\sigma^2} \right) & = -\exp\left(-\frac{\sum_{t=1}^N (\mathbf{x}^{(t)} - \Theta)^2}{2\sigma^2}\right) \exp\left( -\frac{(\Theta - \mu_0)^2}{2\sigma_0^2} \right) \left(-\frac{2(\Theta - \mu_0)}{2\sigma_0^2}\right)
\end{align}
$$
<br>Since $e^y$ is always positive for $y \in \mathbb{R}$, the first two terms of both sides are non-zero. Dividing them on both sides we have<br>
$$
\begin{align}
- \frac{\sum_{t=1}^N -2(\mathbf{x}^{(t)} - \Theta)}{2\sigma^2} & = \frac{2(\Theta - \mu_0)}{2\sigma_0^2}\\
\frac{\sum_{t=1}^N (\mathbf{x}^{(t)} - \Theta)}{\sigma^2} & = \frac{\Theta -\mu_0}{\sigma_0^2}\\
\frac{(\sum_{t=1}^N \mathbf{x}^{(t)}) - N\Theta}{\sigma^2} & = \frac{\Theta -\mu_0}{\sigma_0^2}\\
(\sum_{t=1}^N \mathbf{x}^{(t)})\sigma_0^2 - N\Theta\sigma_0^2 & = \sigma^2 \Theta - \sigma^2 \mu_0 \\
(\sum_{t=1}^N \mathbf{x}^{(t)})\sigma_0^2 + \sigma^2 \mu_0 & = (N\sigma_0^2 + \sigma^2)\Theta \\
\end{align}
$$
<br>Hence <br>
$$
\begin{align}
\Theta_{MAP} & = argmax_{\Theta} p(\Theta | \mathbf{X}) \\
& = argmax_{\Theta} p(\mathbf{X} | \Theta) p(\Theta) \\
& = \frac{(\sum_{t=1}^N \mathbf{x}^{(t)})\sigma_0^2 + \sigma^2 \mu_0}{N\sigma_0^2 + \sigma^2} \\
\end{align}
$$<br>
Furthurmore, since $\Theta_{ML} = \frac{\sum_{t=1}^N \mathbf{x}^{(t)}}{N}$, we have<br>
$$\Theta_{MAP} = \frac{(\sum_{t=1}^N \mathbf{x}^{(t)})\sigma_0^2 + \sigma^2 \mu_0}{N\sigma_0^2 + \sigma^2}
= \frac{N\Theta_{ML}\sigma_0^2 + \sigma^2 \mu_0}{N\sigma_0^2 + \sigma^2} = \frac{N/\sigma^2}{N/\sigma^2 + 1/\sigma_0^2} \Theta_{ML} +\frac{1/\sigma_0^2}{N/\sigma^2 + 1/\sigma_0^2} \mu_0$$
### 3. Bayes estimation
For $\Theta_{Bayes} = E(\Theta | \mathbf{X}) = \int \Theta p(\Theta | \mathbf{X}) d\Theta \\$, since $p(\Theta | \mathbf{X}) = \frac{p(\mathbf{X}| \Theta) p(\Theta)}{p(\mathbf{X})}$ and $p(\mathbf{X})$ is a constant for given $\mathbf{X}$, our interest is in $p(\mathbf{X}| \Theta) p(\Theta)$. We denote $\phi(x, \mu, \sigma^2)$ as the normal distribution with input $x$, mean $\mu$ and standard deviation $\sigma$. Then $$p(\mathbf{X}| \Theta) p(\Theta) = \phi(\Theta, \mu_0, \sigma_0^2) \prod_{i=1}^N \phi(\Theta, \mathbf{x}^{(i)}, \sigma^2).$$
Notice that $$\phi(x, \mu_1, \sigma_1^2) \phi(x, \mu_2, \sigma_2^2) = \phi(\mu_1, \mu_2, \sigma_1^2 + \sigma_2^2) \phi(x, \mu_i, \sigma_i^2)$$ where $$\mu_i = \frac{1 / \sigma_1^2}{1/ \sigma_1^2 + 1/ \sigma_2^2}\mu_1 + \frac{1 / \sigma_2^2}{1/ \sigma_1^2 + 1/ \sigma_2^2}\mu_2 \text{ and } \sigma_i^2 = \frac{\sigma_1^2 \sigma_2^2}{\sigma_1^2 + \sigma_2^2}.$$<br>
We will prove the formula later on. Using this formuma, we have $$\phi(\Theta, \mathbf{x}^{(a)}, \sigma^2) \phi(\Theta, \mathbf{x}^{(b)}, \sigma^2) = \phi(\mathbf{x}^{(a)}, \mathbf{x}^{(b)}, 2\sigma^2) \phi(\Theta, \frac{\mathbf{x}^{(a)}+\mathbf{x}^{(b)}}{2}, \frac{\sigma^2}{2}) = C_0 \phi(\Theta, \frac{\mathbf{x}^{(a)}+\mathbf{x}^{(b)}}{2}, \frac{\sigma^2}{2}),$$ where $C_0$ is some constant since all variables of $\phi(\mathbf{x}^{(a)}, \mathbf{x}^{(b)}, 2\sigma^2)$ are set. Following similar steps we get
$$\prod_{i=1}^N \phi(\Theta, \mathbf{x}^{(i)}, \sigma^2) = C_1 \phi(\Theta, \frac{\sum_{t=1}^N \mathbf{x}^{(t)}}{N}, \frac{\sigma^2}{N}),$$ where $C_1$ is some constant.<br>
Hence, $$p(\mathbf{X}| \Theta) p(\Theta) = C_1 \phi(\Theta, \frac{\sum_{t=1}^N \mathbf{x}^{(t)}}{N}, \frac{\sigma^2}{N}) \phi(\Theta, \mu_0, \sigma_0^2) = C_2 \phi(\Theta, \mu_\text{new}, \sigma_\text{new}^2),$$ where $C_2$ is some constant and by the formula, $\mu_\text{new} = \frac{N/\sigma^2}{N/\sigma^2 + 1/\sigma_0^2} \Theta_{ML} +\frac{1/\sigma_0^2}{N/\sigma^2 + 1/\sigma_0^2} \mu_0 $, where $\Theta_{ML} = \frac{\sum_{t=1}^N \mathbf{x}^{(t)}}{N}.$ <br>
Notice that for a given normal distribution, multiplying the probability density function by a constant will not change its mean value. Therefore the expectation of $p(\Theta | \mathbf{X})$ is exactly the expectation of the non-constant normal distribution part. Hence,
$$
E(\Theta | \mathbf{X}) = \mu_\text{new} = \frac{N/\sigma^2}{N/\sigma^2 + 1/\sigma_0^2} \Theta_{ML} +\frac{1/\sigma_0^2}{N/\sigma^2 + 1/\sigma_0^2} \mu_0 = \Theta_{MAP},
$$
where $\Theta_{ML} = \frac{\sum_{t=1}^N \mathbf{x}^{(t)}}{N}.$ <br>
Finally we want to prove the formula $\phi(x, \mu_1, \sigma_1^2) \phi(x, \mu_2, \sigma_2^2) = \phi(\mu_1, \mu_2, \sigma_1^2 + \sigma_2^2) \phi(x, \mu_i, \sigma_i^2)$:<br><br>
$$
\begin{align}
\phi(x, \mu_1, \sigma_1^2) \phi(x, \mu_2, \sigma_2^2) & = \frac{1}{\sqrt{2\pi}\sigma_1} \exp\left( -\frac{(x - \mu_1)^2}{2\sigma_1^2} \right) \frac{1}{\sqrt{2\pi}\sigma_2} \exp\left( -\frac{(x - \mu_2)^2}{2\sigma_2^2} \right) \\
& = \frac{1}{2\pi \sigma_1 \sigma_2} \exp\left( -\frac{(x - \mu_1)^2}{2\sigma_1^2} - \frac{(x - \mu_2)^2}{2\sigma_2^2}\right) \\
& = \frac{1}{2\pi \sigma_1 \sigma_2} \exp\left( -\frac{(\sigma_1^2+\sigma_2^2) x^2 -2(\mu_1 \sigma_2^2 +\mu_2 \sigma_1^2)x +(\mu_1^2 \sigma_2^2+\mu_2^2 \sigma_1^2)}{2\sigma_1^2 \sigma_2^2} \right)\\
& = \frac{1}{2\pi \sigma_1 \sigma_2} \exp\left( -\frac{x^2 -2\frac{\mu_1 \sigma_2^2 +\mu_2 \sigma_1^2}{\sigma_1^2+\sigma_2^2}x + \frac{\mu_1^2 \sigma_2^2+\mu_2^2 \sigma_1^2}{\sigma_1^2+\sigma_2^2}}{2\frac{\sigma_1^2 \sigma_2^2}{\sigma_1^2+\sigma_2^2}} \right) \\
& = \frac{1}{2\pi \sigma_1 \sigma_2} \exp\left( -\frac{x^2 -2\frac{\mu_1 \sigma_2^2 +\mu_2 \sigma_1^2}{\sigma_1^2+\sigma_2^2}x + \frac{\mu_1^2 \sigma_2^4+\mu_2^2 \sigma_1^4 + 2\mu_1 \sigma_2^2 \mu_2 \sigma_1^2}{(\sigma_1^2+\sigma_2^2)^2}}{2\frac{\sigma_1^2 \sigma_2^2}{\sigma_1^2+\sigma_2^2}} \right) \\
& \times \exp\left(\frac{ - (\sigma_1^2+\sigma_2^2)(\mu_1^2 \sigma_2^2+\mu_2^2 \sigma_1^2) + (\mu_1^2 \sigma_2^4+\mu_2^2 \sigma_1^4 + 2\mu_1 \sigma_2^2 \mu_2 \sigma_1^2) }{2 \sigma_1^2 \sigma_2^2 (\sigma_1^2+\sigma_2^2)}\right) \\
& = \frac{1}{2\pi \sqrt{\frac{\sigma_1^2 \sigma_2^2}{\sigma_1^2+\sigma_2^2}} \sqrt{\sigma_1^2+\sigma_2^2}} \exp\left( -\frac{(x -\frac{\mu_1 \sigma_2^2 +\mu_2 \sigma_1^2}{\sigma_1^2+\sigma_2^2})^2}{2\frac{\sigma_1^2 \sigma_2^2}{\sigma_1^2+\sigma_2^2}} \right) \exp\left( -\frac{(\mu_1 - \mu_2)^2}{2 \sigma_1^2 + \sigma_2^2} \right)\\
& = \frac{1}{\sqrt{2\pi} \sqrt{\frac{\sigma_1^2 \sigma_2^2}{\sigma_1^2+\sigma_2^2}}} \exp\left( -\frac{(x -\frac{\mu_1 \sigma_2^2 +\mu_2 \sigma_1^2}{\sigma_1^2+\sigma_2^2})^2}{2\frac{\sigma_1^2 \sigma_2^2}{\sigma_1^2+\sigma_2^2}} \right) \frac{1}{\sqrt{2\pi} \sqrt{\sigma_1^2+\sigma_2^2}} \exp\left( -\frac{(\mu_1 - \mu_2)^2}{2 \sigma_1^2 + \sigma_2^2} \right)\\
& = \phi(x, \mu_i, \sigma_i^2) \phi(\mu_1, \mu_2, \sigma_1^2 + \sigma_2^2)\\
\end{align}
$$
where $$\mu_i = \frac{1 / \sigma_1^2}{1/ \sigma_1^2 + 1/ \sigma_2^2}\mu_1 + \frac{1 / \sigma_2^2}{1/ \sigma_1^2 + 1/ \sigma_2^2}\mu_2 \text{ and } \sigma_i^2 = \frac{\sigma_1^2 \sigma_2^2}{\sigma_1^2 + \sigma_2^2}.$$<br>
Hence we complete the proof and we validate that
$$\Theta_{Bayes} = \mu_\text{new} = \frac{N/\sigma^2}{N/\sigma^2 + 1/\sigma_0^2} \Theta_{ML} +\frac{1/\sigma_0^2}{N/\sigma^2 + 1/\sigma_0^2} \mu_0.$$
# Hand-written digit classification (40 points)
In the textbook sample code we applied different scikit-learn classifers for the Iris data set.
In this exercise, we will apply the same set of classifiers over a different data set: hand-written digits.
Please write down the code for different classifiers, choose their hyper-parameters, and compare their performance via the accuracy score as in the Iris dataset.
Which classifier(s) perform(s) the best and worst, and why?
The classifiers include:
* perceptron
* logistic regression
* SVM
* decision tree
* random forest
* KNN
* naive Bayes
The dataset is available as part of scikit learn, as follows.
```
%load_ext watermark
%watermark -a '' -u -d -v -p numpy,pandas,matplotlib,scipy,sklearn
%matplotlib inline
# Added version check for recent scikit-learn 0.18 checks
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
```
## Load data
```
from sklearn.datasets import load_digits
digits = load_digits()
X = digits.data # training data
y = digits.target # training label
print(X.shape)
print(y.shape)
```
## Visualize data
```
import matplotlib.pyplot as plt
import pylab as pl
num_rows = 4
num_cols = 5
fig, ax = plt.subplots(nrows=num_rows, ncols=num_cols, sharex=True, sharey=True)
ax = ax.flatten()
for index in range(num_rows*num_cols):
img = digits.images[index]
label = digits.target[index]
ax[index].imshow(img, cmap='Greys', interpolation='nearest')
ax[index].set_title('digit ' + str(label))
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.show()
```
## Date Preprocessing
Hint: How you divide training and test data set? And apply other techinques we have learned if needed.
You could take a look at the Iris data set case in the textbook.
```
from sklearn.preprocessing import StandardScaler
if Version(sklearn_version) < '0.18':
from sklearn.cross_validation import train_test_split
else:
from sklearn.model_selection import train_test_split
print ('scikit-learn version: ' + str(Version(sklearn_version)))
# 1. Standardize features by removing the mean and scaling to unit variance
X_std = StandardScaler().fit_transform(X) # fit_transform(X) will fit to data, then transform it.
print ('1. Complete removing the mean and scaling to unit variance.')
# 2. splitting data into 70% training and 30% test data:
split_ratio = 0.3
X_train, X_test, y_train, y_test = train_test_split(X_std, y, test_size=split_ratio, random_state=0)
print('2. Complete splitting with ' + str(y_train.shape[0]) + \
'(' + str(int((1-split_ratio)*100)) +'%) training data and ' + \
str(y_test.shape[0]) + '(' + str(int(split_ratio*100)) +'%) test data.')
```
## Classifier #1 Perceptron
```
from sklearn.linear_model import Perceptron
from sklearn.metrics import accuracy_score
# Training
ppn = Perceptron(n_iter=800, eta0=0.1, random_state=0)
ppn.fit(X_train, y_train)
# Testing
y_pred = ppn.predict(X_test)
# Results
print('Misclassified samples: %d out of %d' % ((y_test != y_pred).sum(), y_test.shape[0]))
print('Accuracy: %.3f' % accuracy_score(y_test, y_pred))
```
## Classifier #2 Logistic Regression
```
from sklearn.linear_model import LogisticRegression
# Training
lr = LogisticRegression(C=1.0, random_state=0) # we observe that changing C from 0.0001 to 1000 has ignorable effect
lr.fit(X_train, y_train)
# Testing
y_pred = lr.predict(X_test)
# Results
print('Misclassified samples: %d out of %d' % ((y_test != y_pred).sum(), y_test.shape[0]))
print('Accuracy: %.3f' % accuracy_score(y_test, y_pred))
```
## Classifier #3 SVM
```
from sklearn.svm import SVC
# 1. Using linear kernel
# Training
svm = SVC(kernel='linear', C=1.0, random_state=0)
svm.fit(X_train, y_train)
# Testing
y_pred = svm.predict(X_test)
# Results
print('1. Using linear kernel:')
print(' Misclassified samples: %d out of %d' % ((y_test != y_pred).sum(), y_test.shape[0]))
print(' Accuracy: %.3f' % accuracy_score(y_test, y_pred))
# 2. Using rbf kernel
# Training
svm = SVC(kernel='rbf', C=1.0, random_state=0)
svm.fit(X_train, y_train)
# Testing
y_pred = svm.predict(X_test)
# Results
print('2. Using rbf kernel:')
print(' Misclassified samples: %d out of %d' % ((y_test != y_pred).sum(), y_test.shape[0]))
print(' Accuracy: %.3f' % accuracy_score(y_test, y_pred))
```
## Classifier #4 Decision Tree
```
from sklearn.tree import DecisionTreeClassifier
# 1. Using entropy criterion
# Training
tree = DecisionTreeClassifier(criterion='entropy', random_state=0)
tree.fit(X_train, y_train)
# Testing
y_pred = tree.predict(X_test)
# Results
print('1. Using entropy criterion:')
print(' Misclassified samples: %d out of %d' % ((y_test != y_pred).sum(), y_test.shape[0]))
print(' Accuracy: %.3f' % accuracy_score(y_test, y_pred))
# 2. Using Gini criterion
# Training
tree = DecisionTreeClassifier(criterion='gini', random_state=0)
tree.fit(X_train, y_train)
# Testing
y_pred = tree.predict(X_test)
# Results
print('2. Using Gini criterion:')
print(' Misclassified samples: %d out of %d' % ((y_test != y_pred).sum(), y_test.shape[0]))
print(' Accuracy: %.3f' % accuracy_score(y_test, y_pred))
```
## Classifer #5 Random Forest
```
from sklearn.ensemble import RandomForestClassifier
# 1. Using entropy criterion
# Training
forest = RandomForestClassifier(criterion='entropy', n_estimators=10, random_state=1, n_jobs=2)
forest.fit(X_train, y_train)
# Testing
y_pred = forest.predict(X_test)
# Results
print('1. Using entropy criterion:')
print(' Misclassified samples: %d out of %d' % ((y_test != y_pred).sum(), y_test.shape[0]))
print(' Accuracy: %.3f' % accuracy_score(y_test, y_pred))
# 2. Using Gini criterion
# Training
forest = RandomForestClassifier(criterion='gini', n_estimators=10, random_state=1, n_jobs=2)
forest.fit(X_train, y_train)
# Testing
y_pred = forest.predict(X_test)
# Results
print('2. Using Gini criterion:')
print(' Misclassified samples: %d out of %d' % ((y_test != y_pred).sum(), y_test.shape[0]))
print(' Accuracy: %.3f' % accuracy_score(y_test, y_pred))
```
## Classifier #6 KNN
```
from sklearn.neighbors import KNeighborsClassifier
# Training
knn = KNeighborsClassifier(n_neighbors=5, p=2, metric='minkowski')
knn.fit(X_train, y_train)
# Testing
y_pred = knn.predict(X_test)
# Results
print('Misclassified samples: %d out of %d' % ((y_test != y_pred).sum(), y_test.shape[0]))
print('Accuracy: %.3f' % accuracy_score(y_test, y_pred))
```
## Classifier #7 Naive Bayes
```
from sklearn.naive_bayes import GaussianNB
# Training
gnb = GaussianNB()
gnb.fit(X_train, y_train)
# Testing
y_pred = gnb.predict(X_test)
# Results
print('Misclassified samples: %d out of %d' % ((y_test != y_pred).sum(), y_test.shape[0]))
print('Accuracy: %.3f' % accuracy_score(y_test, y_pred))
```
Hence in this example, the accuracy of the predictions is ranked as (in descending order):
* SVM (rbf kernel) - 0.985 <br>
* SVM (linear kernel) - 0.974 <br>
* KNN - 0.972 <br>
* Logistic Regression - 0.954 <br>
* Random forest (Gini criterion) - 0.950 <br>
* Random forest (entropy criterion) - 0.941 <br>
* Perceptron - 0.928 <br>
* Decision Tree (entropy criterion) - 0.869 <br>
* Decision Tree (Gini criterion) - 0.857 <br>
* Naive Bayes - 0.772 <br>
The best is SVM (using rbf kernel). Because SVM maximize margins to nearest samples (called support vectors), which is considered as an effective way of classifying spacially separated samples. Also, SVM is more robust against outliers and offers slack variables as a solution to not linearly-separable samples. Moreover, using rbf kernel, the dimension is mapped to infinity and therefore samples are highly likely to be separated by some hyperplanes.
The worst is Naive Bayes. Because it assumes that all data are independent, which leads to high bias and low variance. When the input samples are not generally independent, the assumption fails and therefore the accuracy is low. Also, Naive Bayes cannot deal with outliers or noise, therefore unideal samples may not be correctly classified.
| github_jupyter |
```
%reload_ext autoreload
%autoreload 2
#export
from fastai import *
from fastai.vision import *
```
## Data
```
PATH = Path('../data/coco')
ANNOT_PATH = PATH/'annotations'
train_ds = ObjectDetectDataset.from_json(PATH/'train2017', ANNOT_PATH/'train_sample.json')
tfms = get_transforms()
train_tds = DatasetTfm(train_ds, tfms[0], tfm_y=True, size=224)
x,y = train_tds[5]
x.show(y=y, classes=train_ds.classes, figsize=(6,4))
size = 224
tfms = ([flip_lr(p=0.5), crop_pad(size=size)], [crop_pad(size=size)])
train_tds = DatasetTfm(train_ds, tfms[0], tfm_y=True, size=size, padding_mode='zeros', do_crop=False)
x,y = train_tds[0]
x.show(y=y, classes=train_ds.classes, figsize=(6,4))
y.data
x.size
#export
def bb_pad_collate(samples:BatchSamples, pad_idx:int=0, pad_first:bool=True) -> Tuple[FloatTensor, Tuple[LongTensor, LongTensor]]:
"Function that collect samples and adds padding."
max_len = max([len(s[1].data[1]) for s in samples])
bboxes = torch.zeros(len(samples), max_len, 4)
labels = torch.zeros(len(samples), max_len).long() + pad_idx
imgs = []
for i,s in enumerate(samples):
imgs.append(s[0].data[None])
bbs, lbls = s[1].data
bboxes[i,-len(lbls):] = bbs
labels[i,-len(lbls):] = lbls
return torch.cat(imgs,0), (bboxes,labels)
train_dl = DataLoader(train_tds, 64, shuffle=False, collate_fn=bb_pad_collate)
def show_sample(dl, rows, start=0):
x,y = next(iter(dl))
x = x[start:start+rows*rows].cpu()
_,axs = plt.subplots(rows,rows,figsize=(9,9))
for i, ax in enumerate(axs.flatten()):
img = Image(x[i])
idxs = y[1][start+i].nonzero()[:,0]
if len(idxs) != 0:
bbs,lbls = y[0][start+i][idxs],y[1][start+i][idxs]
h,w = img.size
bbs = ((bbs+1) * torch.tensor([h/2,w/2, h/2, w/2])).long()
bbox = ImageBBox.create(bbs, *img.size, lbls)
img.show(ax=ax, y=bbox, classes=dl.dataset.classes)
else: img.show(ax=ax)
plt.tight_layout()
show_sample(train_dl, 3, 18)
train_ds, valid_ds = ObjectDetectDataset.from_json(PATH/'train2017', ANNOT_PATH/'train_sample.json', valid_pct=0.2)
data = DataBunch.create(train_ds, valid_ds, path=PATH, ds_tfms=tfms, tfms=imagenet_norm, collate_fn=bb_pad_collate,
num_workers=8, bs=16, size=128, tfm_y=True, padding_mode='zeros', do_crop=False)
def show_sample(dl, rows, denorm=None):
x,y = next(iter(dl))
x = x[:rows*rows].cpu()
if denorm: x = denorm(x)
_,axs = plt.subplots(rows,rows,figsize=(9,9))
for i, ax in enumerate(axs.flatten()):
img = Image(x[i])
idxs = y[1][i].nonzero()[:,0]
if len(idxs) != 0:
bbs,lbls = y[0][i][idxs],y[1][i][idxs]
h,w = img.size
bbs = ((bbs.cpu()+1) * torch.tensor([h/2,w/2, h/2, w/2])).long()
bbox = ImageBBox.create(bbs, *img.size, lbls)
img.show(ax=ax, y=bbox, classes=dl.dataset.classes)
else: img.show(ax=ax)
plt.tight_layout()
show_sample(data.train_dl, 3, denorm=imagenet_denorm)
```
## Model
```
#export
def _get_sfs_idxs(sizes:Sizes) -> List[int]:
"Get the indexes of the layers where the size of the activation changes."
feature_szs = [size[-1] for size in sizes]
sfs_idxs = list(np.where(np.array(feature_szs[:-1]) != np.array(feature_szs[1:]))[0])
if feature_szs[0] != feature_szs[1]: sfs_idxs = [0] + sfs_idxs
return sfs_idxs
encoder = create_body(tvm.resnet50(True), -2)
#export
class LateralUpsampleMerge(nn.Module):
def __init__(self, ch, ch_lat, hook):
super().__init__()
self.hook = hook
self.conv_lat = conv2d(ch_lat, ch, ks=1, bias=True)
def forward(self, x):
return self.conv_lat(self.hook.stored) + F.interpolate(x, scale_factor=2)
#export
class RetinaNet(nn.Module):
"Implements RetinaNet from https://arxiv.org/abs/1708.02002"
def __init__(self, encoder:Model, n_classes, final_bias=0., chs=256, n_anchors=9, flatten=True):
super().__init__()
self.n_classes,self.flatten = n_classes,flatten
imsize = (256,256)
sfs_szs,x,hooks = model_sizes(encoder, size=imsize)
sfs_idxs = _get_sfs_idxs(sfs_szs)
self.encoder = encoder
self.c5top5 = conv2d(sfs_szs[-1][1], chs, ks=1, bias=True)
self.c5top6 = conv2d(sfs_szs[-1][1], chs, stride=2, bias=True)
self.p6top7 = nn.Sequential(nn.ReLU(), conv2d(chs, chs, stride=2, bias=True))
self.merges = nn.ModuleList([LateralUpsampleMerge(chs, szs[1], hook)
for szs,hook in zip(sfs_szs[-2:-4:-1], hooks[-2:-4:-1])])
self.smoothers = nn.ModuleList([conv2d(chs, chs, 3, bias=True) for _ in range(3)])
self.classifier = self._head_subnet(n_classes, n_anchors, final_bias, chs=chs)
self.box_regressor = self._head_subnet(4, n_anchors, 0., chs=chs)
def _head_subnet(self, n_classes, n_anchors, final_bias=0., n_conv=4, chs=256):
layers = [conv2d_relu(chs, chs, bias=True) for _ in range(n_conv)]
layers += [conv2d(chs, n_classes * n_anchors, bias=True)]
layers[-1].bias.data.zero_().add_(final_bias)
layers[-1].weight.data.fill_(0)
return nn.Sequential(*layers)
def _apply_transpose(self, func, p_states, n_classes):
if not self.flatten:
sizes = [[p.size(0), p.size(2), p.size(3)] for p in p_states]
return [func(p).permute(0,2,3,1).view(*sz,-1,n_classes) for p,sz in zip(p_states,sizes)]
else:
return torch.cat([func(p).permute(0,2,3,1).contiguous().view(p.size(0),-1,n_classes) for p in p_states],1)
def forward(self, x):
c5 = self.encoder(x)
p_states = [self.c5top5(c5.clone()), self.c5top6(c5)]
p_states.append(self.p6top7(p_states[-1]))
for merge in self.merges: p_states = [merge(p_states[0])] + p_states
for i, smooth in enumerate(self.smoothers[:3]):
p_states[i] = smooth(p_states[i])
return [self._apply_transpose(self.classifier, p_states, self.n_classes),
self._apply_transpose(self.box_regressor, p_states, 4),
[[p.size(2), p.size(3)] for p in p_states]]
encoder = create_body(tvm.resnet50(True), -2)
model = RetinaNet(encoder, 6, -4)
model.eval()
x = torch.randn(2,3,256,256)
output = model(x)
[y.size() for y in output[:2]], output[2]
```
## Anchors
We need to create the corresponding anchors in this order:
```
torch.arange(1,17).long().view(4,4)
#export
def create_grid(size):
"Create a grid of a given `size`."
H, W = size if is_tuple(size) else (size,size)
grid = FloatTensor(H, W, 2)
linear_points = torch.linspace(-1+1/W, 1-1/W, W) if W > 1 else tensor([0.])
grid[:, :, 1] = torch.ger(torch.ones(H), linear_points).expand_as(grid[:, :, 0])
linear_points = torch.linspace(-1+1/H, 1-1/H, H) if H > 1 else tensor([0.])
grid[:, :, 0] = torch.ger(linear_points, torch.ones(W)).expand_as(grid[:, :, 1])
return grid.view(-1,2)
```
Convention (-1.,-1.) to (1.,1.), first is y, second is x (like for the bboxes). -1 is left/top, 1 is right/bottom.
```
#export
def show_anchors(ancs, size):
_,ax = plt.subplots(1,1, figsize=(5,5))
ax.set_xticks(np.linspace(-1,1, size[1]+1))
ax.set_yticks(np.linspace(-1,1, size[0]+1))
ax.grid()
ax.scatter(ancs[:,1], ancs[:,0]) #y is first
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_xlim(-1,1)
ax.set_ylim(1,-1) #-1 is top, 1 is bottom
for i, (x, y) in enumerate(zip(ancs[:, 1], ancs[:, 0])): ax.annotate(i, xy = (x,y))
size = (4,4)
show_anchors(create_grid(size), size)
#export
def create_anchors(sizes, ratios, scales, flatten=True):
"Create anchor of `sizes`, `ratios` and `scales`."
aspects = [[[s*math.sqrt(r), s*math.sqrt(1/r)] for s in scales] for r in ratios]
aspects = torch.tensor(aspects).view(-1,2)
anchors = []
for h,w in sizes:
#4 here to have the anchors overlap.
sized_aspects = 4 * (aspects * torch.tensor([2/h,2/w])).unsqueeze(0)
base_grid = create_grid((h,w)).unsqueeze(1)
n,a = base_grid.size(0),aspects.size(0)
ancs = torch.cat([base_grid.expand(n,a,2), sized_aspects.expand(n,a,2)], 2)
anchors.append(ancs.view(h,w,a,4))
return torch.cat([anc.view(-1,4) for anc in anchors],0) if flatten else anchors
ratios = [1/2,1,2]
#scales = [1,2**(-1/3), 2**(-2/3)]
scales = [1,2**(1/3), 2**(2/3)]
sizes = [(2**i,2**i) for i in range(5)]
sizes.reverse()
anchors = create_anchors(sizes, ratios, scales)
anchors.size()
#[anc.size() for anc in anchors]
import matplotlib.cm as cmx
import matplotlib.colors as mcolors
from cycler import cycler
def get_cmap(N):
color_norm = mcolors.Normalize(vmin=0, vmax=N-1)
return cmx.ScalarMappable(norm=color_norm, cmap='Set3').to_rgba
num_color = 12
cmap = get_cmap(num_color)
color_list = [cmap(float(x)) for x in range(num_color)]
def draw_outline(o, lw):
o.set_path_effects([patheffects.Stroke(
linewidth=lw, foreground='black'), patheffects.Normal()])
def draw_rect(ax, b, color='white'):
patch = ax.add_patch(patches.Rectangle(b[:2], *b[-2:], fill=False, edgecolor=color, lw=2))
draw_outline(patch, 4)
def draw_text(ax, xy, txt, sz=14, color='white'):
text = ax.text(*xy, txt,
verticalalignment='top', color=color, fontsize=sz, weight='bold')
draw_outline(text, 1)
def show_boxes(boxes):
"Show the `boxes` (size by 4)"
_, ax = plt.subplots(1,1, figsize=(5,5))
ax.set_xlim(-1,1)
ax.set_ylim(1,-1)
for i, bbox in enumerate(boxes):
bb = bbox.numpy()
rect = [bb[1]-bb[3]/2, bb[0]-bb[2]/2, bb[3], bb[2]]
draw_rect(ax, rect, color=color_list[i%num_color])
draw_text(ax, [bb[1]-bb[3]/2,bb[0]-bb[2]/2], str(i), color=color_list[i%num_color])
show_boxes(anchors[-9:])
#export
def activ_to_bbox(acts, anchors, flatten=True):
"Extrapolate bounding boxes on anchors from the model activations."
if flatten:
acts.mul_(acts.new_tensor([[0.1, 0.1, 0.2, 0.2]]))
centers = anchors[...,2:] * acts[...,:2] + anchors[...,:2]
sizes = anchors[...,2:] * torch.exp(acts[...,:2])
return torch.cat([centers, sizes], -1)
else: return [activ_to_bbox(act,anc) for act,anc in zip(acts, anchors)]
return res
size=(3,4)
anchors = create_grid(size)
anchors = torch.cat([anchors, torch.tensor([2/size[0],2/size[1]]).expand_as(anchors)], 1)
activations = 0.1 * torch.randn(size[0]*size[1], 4)
bboxes = activ_to_bbox(activations, anchors)
show_boxes(bboxes)
#export
def cthw2tlbr(boxes):
"Convert center/size format `boxes` to top/left bottom/right corners."
top_left = boxes[:,:2] - boxes[:,2:]/2
bot_right = boxes[:,:2] + boxes[:,2:]/2
return torch.cat([top_left, bot_right], 1)
#export
def intersection(anchors, targets):
"Compute the sizes of the intersections of `anchors` by `targets`."
ancs, tgts = cthw2tlbr(anchors), cthw2tlbr(targets)
a, t = ancs.size(0), tgts.size(0)
ancs, tgts = ancs.unsqueeze(1).expand(a,t,4), tgts.unsqueeze(0).expand(a,t,4)
top_left_i = torch.max(ancs[...,:2], tgts[...,:2])
bot_right_i = torch.min(ancs[...,2:], tgts[...,2:])
sizes = torch.clamp(bot_right_i - top_left_i, min=0)
return sizes[...,0] * sizes[...,1]
show_boxes(anchors)
targets = torch.tensor([[0.,0.,2.,2.], [-0.5,-0.5,1.,1.], [1/3,0.5,0.5,0.5]])
show_boxes(targets)
intersection(anchors, targets)
#export
def IoU_values(anchors, targets):
"Compute the IoU values of `anchors` by `targets`."
inter = intersection(anchors, targets)
anc_sz, tgt_sz = anchors[:,2] * anchors[:,3], targets[:,2] * targets[:,3]
union = anc_sz.unsqueeze(1) + tgt_sz.unsqueeze(0) - inter
return inter/(union+1e-8)
IoU_values(anchors, targets)
```
Manually checked that those are right.
```
#export
def match_anchors(anchors, targets, match_thr=0.5, bkg_thr=0.4):
"Match `anchors` to targets. -1 is match to background, -2 is ignore."
ious = IoU_values(anchors, targets)
matches = anchors.new(anchors.size(0)).zero_().long() - 2
vals,idxs = torch.max(ious,1)
matches[vals < bkg_thr] = -1
matches[vals > match_thr] = idxs[vals > match_thr]
#Overwrite matches with each target getting the anchor that has the max IoU.
#vals,idxs = torch.max(ious,0)
#If idxs contains repetition, this doesn't bug and only the last is considered.
#matches[idxs] = targets.new_tensor(list(range(targets.size(0)))).long()
return matches
```
Last example
```
match_anchors(anchors, targets)
```
With anchors very close to the targets.
```
size=(3,4)
anchors = create_grid(size)
anchors = torch.cat([anchors, torch.tensor([2/size[0],2/size[1]]).expand_as(anchors)], 1)
activations = 0.1 * torch.randn(size[0]*size[1], 4)
bboxes = activ_to_bbox(activations, anchors)
match_anchors(anchors,bboxes)
```
With anchors in the grey area.
```
anchors = create_grid((2,2))
anchors = torch.cat([anchors, torch.tensor([1.,1.]).expand_as(anchors)], 1)
targets = anchors.clone()
anchors = torch.cat([anchors, torch.tensor([[-0.5,0.,1.,1.8]])], 0)
match_anchors(anchors,targets)
#export
def tlbr2cthw(boxes):
"Convert top/left bottom/right format `boxes` to center/size corners."
center = (boxes[:,:2] + boxes[:,2:])/2
sizes = boxes[:,2:] - boxes[:,:2]
return torch.cat([center, sizes], 1)
#export
def bbox_to_activ(bboxes, anchors, flatten=True):
"Return the target of the model on `anchors` for the `bboxes`."
if flatten:
t_centers = (bboxes[...,:2] - anchors[...,:2]) / anchors[...,2:]
t_sizes = torch.log(bboxes[...,2:] / anchors[...,2:] + 1e-8)
return torch.cat([t_centers, t_sizes], -1).div_(bboxes.new_tensor([[0.1, 0.1, 0.2, 0.2]]))
else: return [activ_to_bbox(act,anc) for act,anc in zip(acts, anchors)]
return res
#export
def encode_class(idxs, n_classes):
target = idxs.new_zeros(len(idxs), n_classes).float()
mask = idxs != 0
i1s = LongTensor(list(range(len(idxs))))
target[i1s[mask],idxs[mask]-1] = 1
return target
encode_class(LongTensor([1,2,0,1,3]),3)
#export
class RetinaNetFocalLoss(nn.Module):
def __init__(self, gamma:float=2., alpha:float=0.25, pad_idx:int=0, scales:Collection[float]=None,
ratios:Collection[float]=None, reg_loss:LossFunction=F.smooth_l1_loss):
super().__init__()
self.gamma,self.alpha,self.pad_idx,self.reg_loss = gamma,alpha,pad_idx,reg_loss
self.scales = ifnone(scales, [1,2**(-1/3), 2**(-2/3)])
self.ratios = ifnone(ratios, [1/2,1,2])
def _change_anchors(self, sizes:Sizes) -> bool:
if not hasattr(self, 'sizes'): return True
for sz1, sz2 in zip(self.sizes, sizes):
if sz1[0] != sz2[0] or sz1[1] != sz2[1]: return True
return False
def _create_anchors(self, sizes:Sizes, device:torch.device):
self.sizes = sizes
self.anchors = create_anchors(sizes, self.ratios, self.scales).to(device)
def _unpad(self, bbox_tgt, clas_tgt):
i = torch.min(torch.nonzero(clas_tgt-self.pad_idx))
return tlbr2cthw(bbox_tgt[i:]), clas_tgt[i:]-1+self.pad_idx
def _focal_loss(self, clas_pred, clas_tgt):
encoded_tgt = encode_class(clas_tgt, clas_pred.size(1))
ps = torch.sigmoid(clas_pred)
weights = encoded_tgt * (1-ps) + (1-encoded_tgt) * ps
alphas = (1-encoded_tgt) * self.alpha + encoded_tgt * (1-self.alpha)
weights.pow_(self.gamma).mul_(alphas)
clas_loss = F.binary_cross_entropy_with_logits(clas_pred, encoded_tgt, weights, reduction='sum')
return clas_loss
def _one_loss(self, clas_pred, bbox_pred, clas_tgt, bbox_tgt):
bbox_tgt, clas_tgt = self._unpad(bbox_tgt, clas_tgt)
matches = match_anchors(self.anchors, bbox_tgt)
bbox_mask = matches>=0
if bbox_mask.sum() != 0:
bbox_pred = bbox_pred[bbox_mask]
bbox_tgt = bbox_tgt[matches[bbox_mask]]
bb_loss = self.reg_loss(bbox_pred, bbox_to_activ(bbox_tgt, self.anchors[bbox_mask]))
else: bb_loss = 0.
matches.add_(1)
clas_tgt = clas_tgt + 1
clas_mask = matches>=0
clas_pred = clas_pred[clas_mask]
clas_tgt = torch.cat([clas_tgt.new_zeros(1).long(), clas_tgt])
clas_tgt = clas_tgt[matches[clas_mask]]
return bb_loss + self._focal_loss(clas_pred, clas_tgt)/torch.clamp(bbox_mask.sum(), min=1.)
def forward(self, output, bbox_tgts, clas_tgts):
clas_preds, bbox_preds, sizes = output
if self._change_anchors(sizes): self._create_anchors(sizes, clas_preds.device)
n_classes = clas_preds.size(2)
return sum([self._one_loss(cp, bp, ct, bt)
for (cp, bp, ct, bt) in zip(clas_preds, bbox_preds, clas_tgts, bbox_tgts)])/clas_tgts.size(0)
```
Alternative to the L1 smooth loss used in online implementations
```
#export
class SigmaL1SmoothLoss(nn.Module):
def forward(self, output, target):
reg_diff = torch.abs(target - output)
reg_loss = torch.where(torch.le(reg_diff, 1/9), 4.5 * torch.pow(reg_diff, 2), reg_diff - 1/18)
return reg_loss.mean()
```
Sketch to test the loss
```
LongTensor([[[0,0,64,128,0], [32,64,128,128,1]], [[128,96,256,192,2], [96,192,128,256,3]]]).float().cuda()
tgt_clas = LongTensor([[1,2], [3,4]])
tgt_bbox = FloatTensor([[[0,0,128,64], [64,32,128,128]], [[96,128,192,256], [192,96,256,128]]])
tgt_bbox = tgt_bbox / 128 - 1.
y = [tgt_bbox.cuda(), tgt_clas.cuda()]
clas = torch.load(PATH/'models'/'tst_clas.pth')
regr = torch.load(PATH/'models'/'tst_regr.pth')
sizes = [[32, 32], [16, 16], [8, 8], [4, 4], [2, 2]]
output = [logit(clas), regr, sizes]
crit(output, *y)
```
Checking the output
```
#export
def unpad(tgt_bbox, tgt_clas, pad_idx=0):
i = torch.min(torch.nonzero(tgt_clas-pad_idx))
return tlbr2cthw(tgt_bbox[i:]), tgt_clas[i:]-1+pad_idx
idx = 0
clas_pred,bbox_pred,sizes = output[0][idx].cpu(), output[1][idx].cpu(), output[2]
bbox_tgt, clas_tgt = y[0][idx].cpu(),y[1][idx].cpu()
bbox_tgt, clas_tgt = unpad(bbox_tgt, clas_tgt)
bbox_tgt
anchors = create_anchors(sizes, ratios, scales)
ious = IoU_values(anchors, bbox_tgt)
matches = match_anchors(anchors, bbox_tgt)
ious[-9:]
(matches==-2).sum(), (matches==-1).sum(), (matches>=0).sum()
bbox_mask = matches>=0
bbox_pred = bbox_pred[bbox_mask]
bbox_tgt = bbox_tgt[matches[bbox_mask]]
bb_loss = F.smooth_l1_loss(bbox_pred, bbox_to_activ(bbox_tgt, anchors[bbox_mask]))
F.smooth_l1_loss(bbox_pred, bbox_to_activ(bbox_tgt, anchors[bbox_mask]))
tst_loss = SigmaL1SmoothLoss()
tst_loss(bbox_pred, bbox_to_activ(bbox_tgt, anchors[bbox_mask]))
crit.reg_loss(bbox_pred, bbox_to_activ(bbox_tgt, anchors[bbox_mask]))
matches.add_(1)
clas_tgt += 1
clas_mask = matches>=0
clas_pred = clas_pred[clas_mask]
clas_tgt = torch.cat([clas_tgt.new_zeros(1).long(), clas_tgt])
clas_tgt = clas_tgt[matches[clas_mask]]
```
Focal loss
```
alpha, gamma, n_classes = 0.25, 2., 6
encoded_tgt = encode_class(clas_tgt, n_classes)
ps = torch.sigmoid(clas_pred)
weights = encoded_tgt * (1-ps) + (1-encoded_tgt) * ps
alphas = encoded_tgt * alpha + (1-encoded_tgt) * (1-alpha)
weights.pow_(gamma).mul_(alphas)
clas_loss = F.binary_cross_entropy_with_logits(clas_pred, encoded_tgt, weights, reduction='sum') / bbox_mask.sum()
clas_loss
```
Let's look at the objects missclassified.
```
clas_pred[clas_tgt.nonzero().squeeze()]
F.binary_cross_entropy_with_logits(clas_pred[clas_tgt.nonzero().squeeze()], encoded_tgt[clas_tgt.nonzero().squeeze()], weights[clas_tgt.nonzero().squeeze()], reduction='sum') / bbox_mask.sum()
```
They account for half the loss!
## Training
```
n_classes = 6
encoder = create_body(tvm.resnet50(True), -2)
model = RetinaNet(encoder, n_classes,final_bias=-4)
crit = RetinaNetFocalLoss(scales=scales, ratios=ratios)
learn = Learner(data, model, loss_fn=crit)
learn.split([model.encoder[6], model.c5top5])
learn.freeze()
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(1, 1e-4)
learn.save('sample')
```
## Inference
```
learn.load('sample')
img,target = next(iter(data.valid_dl))
with torch.no_grad():
output = model(img)
torch.save(img, PATH/'models'/'tst_input.pth')
def _draw_outline(o:Patch, lw:int):
"Outline bounding box onto image `Patch`."
o.set_path_effects([patheffects.Stroke(
linewidth=lw, foreground='black'), patheffects.Normal()])
def draw_rect(ax:plt.Axes, b:Collection[int], color:str='white', text=None, text_size=14):
"Draw bounding box on `ax`."
patch = ax.add_patch(patches.Rectangle(b[:2], *b[-2:], fill=False, edgecolor=color, lw=2))
_draw_outline(patch, 4)
if text is not None:
patch = ax.text(*b[:2], text, verticalalignment='top', color=color, fontsize=text_size, weight='bold')
_draw_outline(patch,1)
def show_preds(img, output, idx, detect_thresh=0.3, classes=None):
clas_pred,bbox_pred,sizes = output[0][idx].cpu(), output[1][idx].cpu(), output[2]
anchors = create_anchors(sizes, ratios, scales)
bbox_pred = activ_to_bbox(bbox_pred, anchors)
clas_pred = torch.sigmoid(clas_pred)
detect_mask = clas_pred.max(1)[0] > detect_thresh
bbox_pred, clas_pred = bbox_pred[detect_mask], clas_pred[detect_mask]
t_sz = torch.Tensor([*img.size])[None].float()
bbox_pred[:,:2] = bbox_pred[:,:2] - bbox_pred[:,2:]/2
bbox_pred[:,:2] = (bbox_pred[:,:2] + 1) * t_sz/2
bbox_pred[:,2:] = bbox_pred[:,2:] * t_sz
bbox_pred = bbox_pred.long()
_, ax = plt.subplots(1,1)
for bbox, c in zip(bbox_pred, clas_pred.argmax(1)):
img.show(ax=ax)
txt = str(c.item()) if classes is None else classes[c.item()+1]
draw_rect(ax, [bbox[1],bbox[0],bbox[3],bbox[2]], text=txt)
idx = 0
img = data.valid_ds[idx][0]
classes = data.train_ds.classes
show_preds(img, output, idx, detect_thresh=0.2, classes=classes)
#export
def nms(boxes, scores, thresh=0.5):
idx_sort = scores.argsort(descending=True)
boxes, scores = boxes[idx_sort], scores[idx_sort]
to_keep, indexes = [], torch.LongTensor(range_of(scores))
while len(scores) > 0:
#pdb.set_trace()
to_keep.append(idx_sort[indexes[0]])
iou_vals = IoU_values(boxes, boxes[:1]).squeeze()
mask_keep = iou_vals <= thresh
if len(mask_keep.nonzero()) == 0: break
idx_first = mask_keep.nonzero().min().item()
boxes, scores, indexes = boxes[mask_keep], scores[mask_keep], indexes[mask_keep]
return LongTensor(to_keep)
#export
def process_output(output, i, detect_thresh=0.25):
clas_pred,bbox_pred,sizes = output[0][i], output[1][i], output[2]
anchors = create_anchors(sizes, ratios, scales).to(clas_pred.device)
bbox_pred = activ_to_bbox(bbox_pred, anchors)
clas_pred = torch.sigmoid(clas_pred)
detect_mask = clas_pred.max(1)[0] > detect_thresh
bbox_pred, clas_pred = bbox_pred[detect_mask], clas_pred[detect_mask]
bbox_pred = tlbr2cthw(torch.clamp(cthw2tlbr(bbox_pred), min=-1, max=1))
scores, preds = clas_pred.max(1)
return bbox_pred, scores, preds
def show_preds(img, output, idx, detect_thresh=0.25, classes=None):
bbox_pred, scores, preds = process_output(output, idx, detect_thresh)
to_keep = nms(bbox_pred, scores)
bbox_pred, preds, scores = bbox_pred[to_keep].cpu(), preds[to_keep].cpu(), scores[to_keep].cpu()
t_sz = torch.Tensor([*img.size])[None].float()
bbox_pred[:,:2] = bbox_pred[:,:2] - bbox_pred[:,2:]/2
bbox_pred[:,:2] = (bbox_pred[:,:2] + 1) * t_sz/2
bbox_pred[:,2:] = bbox_pred[:,2:] * t_sz
bbox_pred = bbox_pred.long()
_, ax = plt.subplots(1,1)
for bbox, c, scr in zip(bbox_pred, preds, scores):
img.show(ax=ax)
txt = str(c.item()) if classes is None else classes[c.item()+1]
draw_rect(ax, [bbox[1],bbox[0],bbox[3],bbox[2]], text=f'{txt} {scr:.2f}')
idx = 0
img = data.valid_ds[idx][0]
show_preds(img, output, idx, detect_thresh=0.2, classes=data.classes)
#export
def get_predictions(output, idx, detect_thresh=0.05):
bbox_pred, scores, preds = process_output(output, idx, detect_thresh)
to_keep = nms(bbox_pred, scores)
return bbox_pred[to_keep], preds[to_keep], scores[to_keep]
get_predictions(output, 0)
```
## mAP
```
#export
def compute_ap(precision, recall):
"Compute the average precision for `precision` and `recall` curve."
recall = np.concatenate(([0.], list(recall), [1.]))
precision = np.concatenate(([0.], list(precision), [0.]))
for i in range(len(precision) - 1, 0, -1):
precision[i - 1] = np.maximum(precision[i - 1], precision[i])
idx = np.where(recall[1:] != recall[:-1])[0]
ap = np.sum((recall[idx + 1] - recall[idx]) * precision[idx + 1])
return ap
#export
def compute_class_AP(model, dl, n_classes, iou_thresh=0.5, detect_thresh=0.05, num_keep=100):
tps, clas, p_scores = [], [], []
classes, n_gts = LongTensor(range(n_classes)),torch.zeros(n_classes).long()
with torch.no_grad():
for input,target in progress_bar(dl):
output = model(input)
for i in range(target[0].size(0)):
bbox_pred, preds, scores = get_predictions(output, i, detect_thresh)
tgt_bbox, tgt_clas = unpad(target[0][i], target[1][i])
ious = IoU_values(bbox_pred, tgt_bbox)
max_iou, matches = ious.max(1)
detected = []
for i in range_of(preds):
if max_iou[i] >= iou_thresh and matches[i] not in detected and tgt_clas[matches[i]] == preds[i]:
detected.append(matches[i])
tps.append(1)
else: tps.append(0)
clas.append(preds.cpu())
p_scores.append(scores.cpu())
n_gts += (tgt_clas.cpu()[:,None] == classes[None,:]).sum(0)
tps, p_scores, clas = torch.tensor(tps), torch.cat(p_scores,0), torch.cat(clas,0)
fps = 1-tps
idx = p_scores.argsort(descending=True)
tps, fps, clas = tps[idx], fps[idx], clas[idx]
aps = []
#return tps, clas
for cls in range(n_classes):
tps_cls, fps_cls = tps[clas==cls].float().cumsum(0), fps[clas==cls].float().cumsum(0)
if tps_cls[-1] != 0:
precision = tps_cls / (tps_cls + fps_cls + 1e-8)
recall = tps_cls / (n_gts[cls] + 1e-8)
aps.append(compute_ap(precision, recall))
else: aps.append(0.)
return aps
L = compute_class_AP(learn.model, tst_dl, 6)
L[0]
```
| github_jupyter |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Reducer/mean_std_image.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Reducer/mean_std_image.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Reducer/mean_std_image.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Reducer/mean_std_image.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The following script checks if the geehydro package has been installed. If not, it will install geehydro, which automatically install its dependencies, including earthengine-api and folium.
```
import subprocess
try:
import geehydro
except ImportError:
print('geehydro package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geehydro'])
```
Import libraries
```
import ee
import folium
import geehydro
```
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once.
```
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
```
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
```
## Add Earth Engine Python script
```
# Load a Landsat 8 image.
image = ee.Image('LANDSAT/LC08/C01/T1/LC08_044034_20140318')
# Combine the mean and standard deviation reducers.
reducers = ee.Reducer.mean().combine(**{
'reducer2': ee.Reducer.stdDev(),
'sharedInputs': True
})
# Use the combined reducer to get the mean and SD of the image.
stats = image.reduceRegion(**{
'reducer': reducers,
'bestEffort': True,
})
# Display the dictionary of band means and SDs.
print(stats.getInfo())
```
## Display Earth Engine data layers
```
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
```
| github_jupyter |
```
import pandas as pd
pd.DataFrame({'Yes': [50, 21], 'No': [131, 2]})
pd.DataFrame({'Bob': ['I Like it.', 'It was awful'], 'Sue': ['Pretty good.', 'Bland']})
pd.DataFrame({'Bob': ['I like it.', 'It was awful'],
'Sue': ['Pretty good', 'Bland']},
index=['Product A', 'Product B'])
pd.Series([1, 2, 3, 4, 5])
pd.Series([30, 40, 50], index=['2015 Sales', '2016 Sales', '2017 Sales'], name='Product A')
wine_reviews = pd.read_csv('winemag-data_first150k.csv')
wine_reviews.shape
wine_reviews.head()
wine_reviews = pd.read_csv('winemag-data_first150k.csv', index_col=0)
wine_reviews.head()
wic = pd.read_excel('xls_files_all/WICAgencies2013ytd.xls',
sheet_name='Total Women')
wic.head()
import sqlite3
conn = sqlite3.connect('FPA_FOD_20170508.sqlite')
fires = pd.read_sql_query('SELECT * FROM fires', conn)
fires.head()
wine_reviews.head().to_csv('wine_reviews.csv')
wic.to_excel('wlc.xlsx', sheet_name='Total Women')
conn = sqlite3.connect('fires.sqlite')
fires.head().to_sql('fires', conn)
import pandas as pd
reviews = pd.read_csv('winemag-data-130k-v2.csv', index_col=0)
pd.set_option('display.max_rows', 5)
reviews
reviews.country
reviews['country']
reviews['country'][0]
reviews.iloc[0]
reviews.iloc[:, 0]
reviews.iloc[:3, 0]
reviews.iloc[1:3, 0]
reviews.iloc[[0,1,2],0]
reviews.iloc[-5:]
reviews.loc[0, 'country']
reviews.loc[:, ['taster_name', 'taster_twitter_handle', 'points']]
reviews.set_index('title')
reviews.country == 'Italy'
reviews.loc[reviews.country == 'Italy']
reviews.loc[(reviews.country == 'Italy') & (reviews.points >= 90)]
reviews.loc[(reviews.country == 'Italy') | (reviews.points >= 90)]
reviews.loc[reviews.country.isin(['Italy', 'France'])]
reviews.loc[reviews.price.notnull()]
reviews['critic'] = 'everyone'
reviews['critic']
reviews['index_backwards'] = range(len(reviews), 0, -1)
reviews['index_backwards']
import pandas as pd
pd.set_option('max_rows', 5)
import numpy as np
reviews = pd.read_csv('winemag-data-130k-v2.csv', index_col=0)
reviews.head()
reviews.points.describe()
reviews.taster_name.describe()
reviews.points.mean()
reviews.taster_name.unique()
reviews.taster_name.value_counts()
reviews_points_mean = reviews.points.mean()
reviews.points.map(lambda p: p - reviews_points_mean)
def remean_points(srs):
srs.points = srs.points - reviews_points_mean
return srs
reviews.apply(remean_points, axis='columns')
review_points_mean = reviews.points.mean()
reviews.points - review_points_mean
reviews.country + '-' + reviews.region_1
import pandas as pd
reviews = pd.read_csv('winemag-data-130k-v2.csv', index_col=0)
pd.set_option('display.max_rows',5)
reviews.groupby('points').points.count()
reviews.groupby('points').price.min()
reviews.head()
reviews.groupby('winery').apply(lambda df: df.title.iloc[0])
reviews.groupby(['country', 'province']).apply(lambda df: df.loc[df.points.idxmax()])
reviews.groupby(['country']).price.agg([len, min, max])
countries_reviewed = reviews.groupby(['country', 'province']).description.agg([len])
countries_reviewed
mi = _.index
type(mi)
countries_reviewed.reset_index()
countries_reviewed = countries_reviewed.reset_index()
countries_reviewed.sort_values(by='len')
countries_reviewed.sort_values(by='len', ascending=False)
countries_reviewed.sort_index()
countries_reviewed.sort_values(by=['country', 'len'])
import pandas as pd
reviews = pd.read_csv("winemag-data-130k-v2.csv", index_col=0)
pd.set_option('max_rows', 5)
reviews.price.dtype
reviews.dtypes
reviews.points.astype('float64')
reviews.index.dtype
reviews[reviews.country.isnull()]
reviews.region_2.fillna('Unknow')
reviews.taster_twitter_handle.replace("@kerinokeefe",'@kerino')
import pandas as pd
pd.set_option('max_rows', 5)
reviews = pd.read_csv("winemag-data-130k-v2.csv", index_col=0)
reviews
reviews.rename(columns={'points':'score'})
reviews.rename(index={0: 'firstEntry', 1: 'secondEntry'})
reviews.rename_axis('wines', axis='rows').rename_axis('fields', axis='columns')
canadian_youtube = pd.read_csv('CAvideos.csv')
british_youtube = pd.read_csv('GBvideos.csv')
pd.concat([canadian_youtube, british_youtube])
left = canadian_youtube.set_index(['title', 'trending_date'])
right = british_youtube.set_index(['title', 'trending_date'])
left.join(right, lsuffix='_CAN', rsuffix='_UK')
import pandas as pd
pd.set_option('max_rows', 5)
wine = pd.read_csv("winemag-data-130k-v2.csv", index_col=0)
ramen = pd.read_csv("ramen-ratings.csv", index_col=0)
stars = ramen['Stars']
na_stars = stars.replace('Unrated', None).dropna()
float_stars = na_stars.astype('float64')
float_stars.head()
(ramen['Stars']
.replace('Unrated', None)
.dropna()
.astype('float64')
.head())
wine.head()
wine.assign(
region_1=wine.apply(lambda srs: srs.region_1 if pd.notnull(srs.region_1) else srs.province,
axis='columns')
)
wine['region_1'] = wine['region_1'].apply(
lambda srs: srs.region_1 if pd.notnull(srs.region_1) else srs.province,
axis='columns'
)
def name_index(df):
df.index.name = 'review_id'
return df
wine.pipe(name_index)
```
| github_jupyter |
# Feature transformation with Amazon SageMaker Processing and SparkML
Typically a machine learning (ML) process consists of few steps. First, gathering data with various ETL jobs, then pre-processing the data, featurizing the dataset by incorporating standard techniques or prior knowledge, and finally training an ML model using an algorithm.
Often, distributed data processing frameworks such as Spark are used to pre-process data sets in order to prepare them for training. In this notebook we'll use Amazon SageMaker Processing, and leverage the power of Spark in a managed SageMaker environment to run our preprocessing workload. Then, we'll take our preprocessed dataset and train a regression model using XGBoost.
## Contents
1. [Objective](#Objective:-predict-the-age-of-an-Abalone-from-its-physical-measurement)
1. [Setup](#Setup)
1. [Using Amazon SageMaker Processing to execute a SparkML Job](#Using-Amazon-SageMaker-Processing-to-execute-a-SparkML-Job)
1. [Downloading dataset and uploading to S3](#Downloading-dataset-and-uploading-to-S3)
1. [Build a Spark container for running the preprocessing job](#Build-a-Spark-container-for-running-the-preprocessing-job)
1. [Run the preprocessing job using Amazon SageMaker Processing](#Run-the-preprocessing-job-using-Amazon-SageMaker-Processing)
1. [Inspect the preprocessed dataset](#Inspect-the-preprocessed-dataset)
1. [Train a regression model using the Amazon SageMaker XGBoost algorithm](#Train-a-regression-model-using-the-SageMaker-XGBoost-algorithm)
1. [Retrieve the XGBoost algorithm image](#Retrieve-the-XGBoost-algorithm-image)
1. [Set XGBoost model parameters and dataset details](#Set-XGBoost-model-parameters-and-dataset-details)
1. [Train the XGBoost model](#Train-the-XGBoost-model)
## Objective: predict the age of an Abalone from its physical measurement
The dataset is available from [UCI Machine Learning](https://archive.ics.uci.edu/ml/datasets/abalone). The aim for this task is to determine age of an Abalone (a kind of shellfish) from its physical measurements. At the core, it's a regression problem. The dataset contains several features - `sex` (categorical), `length` (continuous), `diameter` (continuous), `height` (continuous), `whole_weight` (continuous), `shucked_weight` (continuous), `viscera_weight` (continuous), `shell_weight` (continuous) and `rings` (integer).Our goal is to predict the variable `rings` which is a good approximation for age (age is `rings` + 1.5).
Use SparkML to process the dataset (apply one or many feature transformers) and upload the transformed dataset to Amazon S3 so that it can be used for training with XGBoost.
## Setup
Let's start by specifying:
* The S3 bucket and prefixes that you use for training and model data. Use the default bucket specified by the Amazon SageMaker session.
* The IAM role ARN used to give processing and training access to the dataset.
```
import sagemaker
from time import gmtime, strftime
sagemaker_session = sagemaker.Session()
role = sagemaker.get_execution_role()
bucket = sagemaker_session.default_bucket()
timestamp_prefix = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
prefix = 'sagemaker/spark-preprocess-demo/' + timestamp_prefix
input_prefix = prefix + '/input/raw/abalone'
input_preprocessed_prefix = prefix + '/input/preprocessed/abalone'
model_prefix = prefix + '/model'
```
## Using Amazon SageMaker Processing to execute a SparkML job
### Downloading dataset and uploading to Amazon Simple Storage Service (Amazon S3)
The Amazon SageMaker team downloaded the abalone dataset from the University of California, Irvine repository and uploaded it to an S3 buckets. In this notebook, you download from that bucket and upload to your own bucket so that Amazon SageMaker can access the dataset.
```
# Fetch the dataset from the SageMaker bucket
!wget https://s3-us-west-2.amazonaws.com/sparkml-mleap/data/abalone/abalone.csv
# Uploading the training data to S3
sagemaker_session.upload_data(path='abalone.csv', bucket=bucket, key_prefix=input_prefix)
```
### Build a Spark container for running the preprocessing job
An example Spark container is included in the `./container` directory of this example. The container handles the bootstrapping of all Spark configuration, and serves as a wrapper around the `spark-submit` CLI. At a high level the container provides:
* A set of default Spark/YARN/Hadoop configurations
* A bootstrapping script for configuring and starting up Spark master/worker nodes
* A wrapper around the `spark-submit` CLI to submit a Spark application
After the container build and push process is complete, use the Amazon SageMaker Python SDK to submit a managed, distributed Spark application that performs our dataset preprocessing.
Build the example Spark container.
```
%cd container
!docker build -t sagemaker-spark-example .
%cd ../
```
Create an Amazon Elastic Container Registry (Amazon ECR) repository for the Spark container and push the image.
```
import boto3
account_id = boto3.client('sts').get_caller_identity().get('Account')
region = boto3.session.Session().region_name
ecr_repository = 'sagemaker-spark-example'
tag = ':latest'
spark_repository_uri = '{}.dkr.ecr.{}.amazonaws.com/{}'.format(account_id, region, ecr_repository + tag)
# Create ECR repository and push docker image
!$(aws ecr get-login --region $region --registry-ids $account_id --no-include-email)
!aws ecr create-repository --repository-name $ecr_repository
!docker tag {ecr_repository + tag} $spark_repository_uri
!docker push $spark_repository_uri
```
### Run the preprocessing job using Amazon SageMaker Processing
Next, use the Amazon SageMaker Python SDK to submit a processing job. Use the Spark container that was just built, and a SparkML script for preprocessing in the job configuration.
Create the SparkML preprocessing script.
```
%%writefile preprocess.py
from __future__ import print_function
from __future__ import unicode_literals
import time
import sys
import os
import shutil
import csv
import pyspark
from pyspark.sql import SparkSession
from pyspark.ml import Pipeline
from pyspark.sql.types import StructField, StructType, StringType, DoubleType
from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler
from pyspark.sql.functions import *
def csv_line(data):
r = ','.join(str(d) for d in data[1])
return str(data[0]) + "," + r
def main():
spark = SparkSession.builder.appName("PySparkAbalone").getOrCreate()
# Convert command line args into a map of args
args_iter = iter(sys.argv[1:])
args = dict(zip(args_iter, args_iter))
# This is needed to save RDDs which is the only way to write nested Dataframes into CSV format
spark.sparkContext._jsc.hadoopConfiguration().set("mapred.output.committer.class",
"org.apache.hadoop.mapred.FileOutputCommitter")
# Defining the schema corresponding to the input data. The input data does not contain the headers
schema = StructType([StructField("sex", StringType(), True),
StructField("length", DoubleType(), True),
StructField("diameter", DoubleType(), True),
StructField("height", DoubleType(), True),
StructField("whole_weight", DoubleType(), True),
StructField("shucked_weight", DoubleType(), True),
StructField("viscera_weight", DoubleType(), True),
StructField("shell_weight", DoubleType(), True),
StructField("rings", DoubleType(), True)])
# Downloading the data from S3 into a Dataframe
total_df = spark.read.csv(('s3a://' + os.path.join(args['s3_input_bucket'], args['s3_input_key_prefix'],
'abalone.csv')), header=False, schema=schema)
#StringIndexer on the sex column which has categorical value
sex_indexer = StringIndexer(inputCol="sex", outputCol="indexed_sex")
#one-hot-encoding is being performed on the string-indexed sex column (indexed_sex)
sex_encoder = OneHotEncoder(inputCol="indexed_sex", outputCol="sex_vec")
#vector-assembler will bring all the features to a 1D vector for us to save easily into CSV format
assembler = VectorAssembler(inputCols=["sex_vec",
"length",
"diameter",
"height",
"whole_weight",
"shucked_weight",
"viscera_weight",
"shell_weight"],
outputCol="features")
# The pipeline comprises of the steps added above
pipeline = Pipeline(stages=[sex_indexer, sex_encoder, assembler])
# This step trains the feature transformers
model = pipeline.fit(total_df)
# This step transforms the dataset with information obtained from the previous fit
transformed_total_df = model.transform(total_df)
# Split the overall dataset into 80-20 training and validation
(train_df, validation_df) = transformed_total_df.randomSplit([0.8, 0.2])
# Convert the train dataframe to RDD to save in CSV format and upload to S3
train_rdd = train_df.rdd.map(lambda x: (x.rings, x.features))
train_lines = train_rdd.map(csv_line)
train_lines.saveAsTextFile('s3a://' + os.path.join(args['s3_output_bucket'], args['s3_output_key_prefix'], 'train'))
# Convert the validation dataframe to RDD to save in CSV format and upload to S3
validation_rdd = validation_df.rdd.map(lambda x: (x.rings, x.features))
validation_lines = validation_rdd.map(csv_line)
validation_lines.saveAsTextFile('s3a://' + os.path.join(args['s3_output_bucket'], args['s3_output_key_prefix'], 'validation'))
if __name__ == "__main__":
main()
```
Run a processing job using the Docker image and preprocessing script you just created. When invoking the `spark_processor.run()` function, pass the Amazon S3 input and output paths as arguments that are required by our preprocessing script to determine input and output location in Amazon S3. Here, you also specify the number of instances and instance type that will be used for the distributed Spark job.
```
from sagemaker.processing import ScriptProcessor, ProcessingInput
spark_processor = ScriptProcessor(base_job_name='spark-preprocessor',
image_uri=spark_repository_uri,
command=['/opt/program/submit'],
role=role,
instance_count=2,
instance_type='ml.r5.xlarge',
max_runtime_in_seconds=1200,
env={'mode': 'python'})
spark_processor.run(code='preprocess.py',
arguments=['s3_input_bucket', bucket,
's3_input_key_prefix', input_prefix,
's3_output_bucket', bucket,
's3_output_key_prefix', input_preprocessed_prefix],
logs=False)
```
#### Inspect the preprocessed dataset
Take a look at a few rows of the transformed dataset to make sure the preprocessing was successful.
```
print('Top 5 rows from s3://{}/{}/train/'.format(bucket, input_preprocessed_prefix))
!aws s3 cp --quiet s3://$bucket/$input_preprocessed_prefix/train/part-00000 - | head -n5
```
## Train a regression model using the SageMaker XGBoost algorithm
Use Amazon SageMaker XGBoost algorithm to train on this dataset. You already know the Amazon S3 location where the preprocessed training data was uploaded as part of the processing job output.
### Retrieve the XGBoost algorithm image
Retrieve the XGBoost built-in algorithm image so that you can use it in the training job.
```
from sagemaker.amazon.amazon_estimator import get_image_uri
training_image = get_image_uri(sagemaker_session.boto_region_name, 'xgboost', repo_version="0.90-1")
print(training_image)
```
### Set XGBoost model parameters and dataset details
Next, configure an Estimator for the XGBoost algorithm and the input dataset. The notebook is parameterized so that the same data location used in the SparkML script can now be passed to XGBoost Estimator as well.
```
s3_train_data = 's3://{}/{}/{}'.format(bucket, input_preprocessed_prefix, 'train/part')
s3_validation_data = 's3://{}/{}/{}'.format(bucket, input_preprocessed_prefix, 'validation/part')
s3_output_location = 's3://{}/{}/{}'.format(bucket, prefix, 'xgboost_model')
xgb_model = sagemaker.estimator.Estimator(training_image,
role,
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
train_volume_size = 20,
train_max_run = 3600,
input_mode= 'File',
output_path=s3_output_location,
sagemaker_session=sagemaker_session)
xgb_model.set_hyperparameters(objective = "reg:linear",
eta = .2,
gamma = 4,
max_depth = 5,
num_round = 10,
subsample = 0.7,
silent = 0,
min_child_weight = 6)
train_data = sagemaker.session.s3_input(s3_train_data, distribution='FullyReplicated',
content_type='text/csv', s3_data_type='S3Prefix')
validation_data = sagemaker.session.s3_input(s3_validation_data, distribution='FullyReplicated',
content_type='text/csv', s3_data_type='S3Prefix')
data_channels = {'train': train_data, 'validation': validation_data}
```
### Train the XGBoost model
```
xgb_model.fit(inputs=data_channels, logs=True)
```
### Summary
Voila! You completed the first portion of the machine learning pipeline using Amazon SageMaker Processing for feature transformation and Amazon SageMaker XGBoost for training a regression model.
| github_jupyter |
```
'''Trains a memory network on the bAbI dataset.
References:
- Jason Weston, Antoine Bordes, Sumit Chopra, Tomas Mikolov, Alexander M. Rush,
"Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks",
http://arxiv.org/abs/1502.05698
- Sainbayar Sukhbaatar, Arthur Szlam, Jason Weston, Rob Fergus,
"End-To-End Memory Networks",
http://arxiv.org/abs/1503.08895
Reaches 98.6% accuracy on task 'single_supporting_fact_10k' after 120 epochs.
Time per epoch: 3s on CPU (core i7).
'''
from __future__ import print_function
# from keras.models import Sequential, Model
# from keras.layers.embeddings import Embedding
# from keras.layers import Input, Activation, Dense, Permute, Dropout, add, dot, concatenate
# from keras.layers import LSTM
from keras.utils.data_utils import get_file
from keras.preprocessing.sequence import pad_sequences
from functools import reduce
import tarfile
import numpy as np, pandas as pd
import re
# def pad_sequences(seq, *args, **kwargs):
# """NOP dropin for Keras pad_sequences"""
# return seq
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']
'''
return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()]
def parse_stories(lines, only_supporting=False):
'''Parse stories provided in the bAbi tasks format
If only_supporting is true, only the sentences
that support the answer are kept.
'''
data = []
story = []
for line in lines:
line = line.decode('utf-8').strip()
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1:
story = []
if '\t' in line:
q, a, supporting = line.split('\t')
q = tokenize(q)
substory = None
if only_supporting:
# Only select the related substory
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
# Provide all the substories
substory = [x for x in story if x]
data.append((substory, q, a))
story.append('')
else:
sent = tokenize(line)
story.append(sent)
return data
def get_stories(f, only_supporting=False, max_length=None):
'''Given a file name, read the file,
retrieve the stories,
and then convert the sentences into a single story.
If max_length is supplied,
any stories longer than max_length tokens will be discarded.
'''
data = parse_stories(f.readlines(), only_supporting=only_supporting)
flatten = lambda data: reduce(lambda x, y: x + y, data)
data = [(flatten(story), q, answer) for story, q, answer in data if not max_length or len(flatten(story)) < max_length]
return data
def vectorize_stories(data, word_idx, story_maxlen, query_maxlen):
X = []
Xq = []
Y = []
for story, query, answer in data:
x = [word_idx[w] for w in story]
xq = [word_idx[w] for w in query]
# let's not forget that index 0 is reserved
y = np.zeros(len(word_idx) + 1)
y[word_idx[answer]] = 1
X.append(x)
Xq.append(xq)
Y.append(y)
return (pad_sequences(X, maxlen=story_maxlen),
pad_sequences(Xq, maxlen=query_maxlen), np.array(Y))
try:
path = get_file('babi-tasks-v1-2.tar.gz', origin='https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz')
except:
print('Error downloading dataset, please download it manually:\n'
'$ wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz\n'
'$ mv tasks_1-20_v1-2.tar.gz ~/.keras/datasets/babi-tasks-v1-2.tar.gz')
raise
tar = tarfile.open(path)
challenges = {
# QA1 with 10,000 samples
'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt',
# QA2 with 10,000 samples
'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt',
}
challenge_type = 'single_supporting_fact_10k'
challenge = challenges[challenge_type]
print('Extracting stories for the challenge:', challenge_type)
train_stories = get_stories(tar.extractfile(challenge.format('train')))
test_stories = get_stories(tar.extractfile(challenge.format('test')))
vocab = set()
for story, q, answer in train_stories + test_stories:
vocab |= set(story + q + [answer])
vocab = sorted(vocab)
# Reserve 0 for masking via pad_sequences
vocab_size = len(vocab) + 1
story_maxlen = max(map(len, (x for x, _, _ in train_stories + test_stories)))
query_maxlen = max(map(len, (x for _, x, _ in train_stories + test_stories)))
print('-')
print('Vocab size:', vocab_size, 'unique words')
print('Story max length:', story_maxlen, 'words')
print('Query max length:', query_maxlen, 'words')
print('Number of training stories:', len(train_stories))
print('Number of test stories:', len(test_stories))
print('-')
print('Here\'s what a "story" tuple looks like (input, query, answer):')
print(train_stories[0])
print('-')
print('Vectorizing the word sequences...')
word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
inputs_train, queries_train, answers_train = vectorize_stories(train_stories,
word_idx,
story_maxlen,
query_maxlen)
inputs_test, queries_test, answers_test = vectorize_stories(test_stories,
word_idx,
story_maxlen,
query_maxlen)
print('-')
print('inputs: integer tensor of shape (samples, max_length)')
print('inputs_train shape:', inputs_train.shape)
print('inputs_test shape:', inputs_test.shape)
print('-')
print('queries: integer tensor of shape (samples, max_length)')
print('queries_train shape:', queries_train.shape)
print('queries_test shape:', queries_test.shape)
print('-')
print('answers: binary (1 or 0) tensor of shape (samples, vocab_size)')
print('answers_train shape:', answers_train.shape)
print('answers_test shape:', answers_test.shape)
print('-')
ts = test_stories[:20]
ts
stories, queries, answers = list(zip(*test_stories))
df = pd.DataFrame(test_stories, columns=['story', 'query', 'answer'])
df['story'] = df['story'].str.join(' ')
df['story'].unique().shape
df.shape
```
| github_jupyter |
## Exploring the Dataframe
Exploring the indexes (rows) of a certain dataframe
Method 1:
```
list(dfObj.index.values)
```
Method 2:
```
list(dfObj.index)
```
Good!, now we can inspect this new dataframe:
```
# returns a tuple with number of rows/columns
DF.shape
```
In order to have basic information the DataFrame:
```
DF.info()
```
In order to have a more detailed report on the memory usage you do:
```
DF.info(memory_usage='deep')
```
And we can also take a look to the first rows of the dataframe:
```
DF.head(3) #only the 3 first lines are shown
```
In order to know whe column names:
```
DF.columns
```
If we want to check a particular column from the dataframe ('RSI' for example):
```
RSI=DF[['RSI']]
```
If we want to select 2 non consecutive columns:
```
a=DF[['RSI','Ranging']]
```
### Selecting using .iloc and .loc
Extracted from:
https://www.shanelynn.ie/select-pandas-dataframe-rows-and-columns-using-iloc-loc-and-ix/
#### .iloc<br>
Single selection:<br>
* Rows:<br>
data.iloc[0] # first row of data frame (Aleshia Tomkiewicz) - Note a Series data type output.<br>
data.iloc[1] # second row of data frame (Evan Zigomalas)<br>
data.iloc[-1] # last row of data frame (Mi Richan)<br>
* Columns:<br>
data.iloc[:,0] # first column of data frame (first_name)<br>
data.iloc[:,1] # second column of data frame (last_name)<br>
data.iloc[:,-1] # last column of data frame (id)<br>
Multiple selection:<br>
<br>
data.iloc[0:5] # first five rows of dataframe<br>
data.iloc[:, 0:2] # first two columns of data frame with all rows<br>
data.iloc[[0,3,6,24], [0,5,6]] # 1st, 4th, 7th, 25th row + 1st 6th 7th columns.<br>
data.iloc[0:5, 5:8] # first 5 rows and 5th, 6th, 7th columns of data frame (county -> phone1)<br>
data.iloc[:, [0,1]] <br>
#### .loc<br>
Single selection:<br>
a=DF.loc[:,'Direction']<br>
Multiple selection:<br>
a=DF.loc[:,['Direction','RSI']]
### Objects returned by .iloc and .loc
* If only one row is selected then we will get a Pandas series:<br>
data.iloc[0]
* If we use list selector then we get a Dataframe:<br>
data.iloc[[0]]
* If we select multiple rows then we get a Dataframe:<br>
data.iloc[0:5]
```
ix='RSI'
DF.loc[:,ix]
```
### Setting the value of a certain cell in the dataframe
#### By index:
```
import pandas as pd
df=pd.DataFrame(index=['A','B','C'], columns=['x','y'])
df.at['C', 'x'] = 10
```
#### By position:
```
df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]],columns=['A', 'B', 'C'])
df.iat[1, 2] = 10
```
### Logical selection
And for example, if we want to select all records for which the 'Reversed' column is TRUE:
```
reversed_true=DF.loc[DF['Reversed']==True]
```
And if we want to select based in either the value of one column or a different one:
```
DF.loc[(DF['Reversed']==True) | DF['Divergence']==True]
```
Now, if we want the counts (frequencies) for a certain categorical variable we have to enter the following:
```
DF['Currency Pair'].value_counts()
```
And if we want to have proportions instead of counts we do:
```
DF['Currency Pair'].value_counts(normalize=True)
```
And if we want we have percentages we do:
```
DF['Currency Pair'].value_counts(normalize=True)*100
```
Now, if we want to copy the entire dataframe:
```
newDF = DF.copy()
newDF.head(3)
```
| github_jupyter |
Rough replication of Izhikevich's 2007 paper,
Solving the Distal Reward Problem through linkage of STDP and Dopamine Signaling
https://www.izhikevich.org/publications/dastdp.pdf
Eugene M. Izhikevich(2007) Solving the Distal Reward Problem through linkage of STDP and Dopamine Signaling. Cerebral Cortex Advance Access, Jan 13, 2007. doi:10.1093/cercor/bhl152
```
import numpy as np
from spikey.snn import *
from spikey.core import GenericLoop, RLCallback
from spikey.games import Logic
from spikey.viz import print_rates, print_common_action
np.random.seed(0)
class rand_state(Logic):
"""
A customization of the Logic game, sets the game state randomly in 0..N at each timestep.
"""
NECESSARY_KEYS = Logic.extend_keys({"n_states": "Number of input groups."})
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.observation_space = list(range(self._n_states))
def _get_state(self) -> np.ndarray:
# Randomly sample a state integer on game start and at every update
return np.random.randint(self._n_states)
```
## Classical Conditioning
In the original experiment there are N distinct input neuron groups, all pointing towards a single output group. The goal is to condition the output neurons to fire heavily in response to certain input groups, while largely ignoring others. This is accomplished by rewarding the network when the desired input groups fire to strengthen that group's connections to the outputs.
Converting this description for use in the framework is straightforward, but if it's your first time needs frame of reference.
1. Divide experiment into network and game mechanics.
In this experiment the game is very simple, for each step a state in 0..N is randomly chosen that corresponds to the input group that is to fire, see rand_state in the cell above. The network will handle its own topology, input firings and reward scheme.
2. Set up network inputs.
First we split the set of input neurons into N groups, each will fire at a set rate when its respective state is active. In Spikey we accomplish this with the RateMap input type, with its state_rate_map parameter as an ndarray of all zeros except the diagonal which is set to the desired firing rate(=.2). state_rate_map can be a dictionary, ndarray or any other object that will index the state, used as group_rates = state_rate_map[state]. In this case if the state = 0, then group_rates = [.2, 0, 0, ...] which means group 0 will fire at a rate of 20% and all other groups will remain quincient. RateMap automatically divides the set of inputs into groups based on the size of the group rates vector.
3. Set the topology of the network.
Here we have a single fully connected feedforward layer, with each input connected to each output. Using the Manual weight part, we specify the network topology as a matrix in the shape (n_inputs+n_body, n_body) with n_body = n_hidden + n_output. For our purposes this looks like,
```
n_neurons
------------------
| connected | n_inputs
- - - - - - - - -
| unconnected | n_neurons
------------------
```
with connected = uniform(0, 1) and unconnected = 0.
4. Setup reward scheme and network readout.
In this experiment reward is given solely based on the game state and ignores the network output. Therefore the readout function was arbitrarily chosen to be the simplest possible, a threshold function. A custom rewarder was setup in the state below, giving reward when the states is in the list 0, 3, 6 or 9.
```
class StateRewarder(reward.template.Reward):
FIRE_STATES = [0, 3, 6, 9]
def __call__(self, state, action, state_next):
# Give reward when state in desired states
if state in self.FIRE_STATES:
return self._reward_mult
else:
return self._punish_mult
training_params = {
'n_episodes': 5,
'len_episode': 100,
}
N_STATES = 10
N_INPUTS = 100
N_NEURONS = 50
N_OUTPUTS = N_NEURONS
w_matrix = np.vstack(( # Feedforward, no hidden layers
np.random.uniform(0, .5, (N_INPUTS, N_NEURONS)),
np.zeros((N_NEURONS, N_NEURONS)),
))
# When state is 1 neuron group 1 fires, ...
state_rate_map = np.zeros((N_STATES, N_STATES))
for state in range(N_STATES):
state_rate_map[state, state] = .2
class network_template(RLNetwork):
parts = {
"inputs": input.RateMap,
"neurons": neuron.Neuron,
"synapses": synapse.LTP,
"weights": weight.Manual,
"readout": readout.Threshold,
"rewarder": StateRewarder,
}
keys = {
"n_inputs": N_INPUTS,
"n_neurons": N_NEURONS,
"n_outputs": N_OUTPUTS,
"matrix": w_matrix,
"magnitude": 1,
"potential_decay": .05,
"n_states": N_STATES,
"refractory_period": 0,
"firing_threshold": 8,
"processing_time": 100,
"learning_rate": .1,
"max_weight": 2,
"stdp_window": 100,
"reward_mult": 1,
"punish_mult": 0,
"action_threshold": .0, # Does not matter
"state_rate_map": state_rate_map,
}
# Control, without learning
training_loop = GenericLoop(network_template, rand_state, measure_rates=True, **training_params)
training_loop.reset(**{'learning_rate': 0, 'n_episodes': 1})
network, game, results, info = training_loop()
print(f"{training_loop.callback.results['total_time']:.2f}s")
print_rates(callback=training_loop.callback)
# Real test
training_loop = GenericLoop(network_template, rand_state, measure_rates=True, **training_params)
network, game, results, info = training_loop()
print("Firing states:", StateRewarder.FIRE_STATES)
print(f"{training_loop.callback.results['total_time']:.2f}s")
print_rates(callback=training_loop.callback)
```
## Classical Conditioning with Ordinal Output
On top of the last experiment, here network outputs are split into two groups. The networks output is equal to the highest firing group, eg if group 0 fires more than any other group the network outputs a 0. The network is conditioned to output a 0 for states 2, 3, 6 and 8 and a 1 otherwise.
A variation of the population vector readout was used, defined in the cell below. The base population vector readout returns a relative firing rate per each input group, eg [.25, .75], our custom MaxGroup readout takes this output and returns the index of the max group, eg 0 or 1.
A custom rewarder was used to reward the network when the correct group fires the most.
```
class MaxGroup(readout.PopulationVector):
def __call__(self, output_spike_train: np.bool) -> np.float:
# Network reads out index of highest firing output group
population_vector = super().__call__(output_spike_train)
return np.argmax(population_vector)
class OrdinalRewarder(reward.template.Reward):
A_STATES = [2, 3, 6, 8]
def __call__(self, state, action, state_next):
# Expect network to output A(0) when current state in states listed,
# otherwise B(0)
if action == (state in self.A_STATES):
return self._reward_mult
else:
return self._punish_mult
training_params = {
'n_episodes': 10,
'len_episode': 100,
}
N_STATES = 10
N_INPUTS = 100
N_NEURONS = 60
N_OUTPUTS = N_NEURONS
w_matrix = np.vstack(( # Feedforward, no hidden layers
np.random.uniform(0, 1, (N_INPUTS, N_NEURONS)),
np.zeros((N_NEURONS, N_NEURONS)),
))
# When state is 1 neuron group 1 fires, ...
state_rate_map = np.zeros((N_STATES, N_STATES), dtype=float)
for state in range(N_STATES):
state_rate_map[state, state] = .2
class network_template(RLNetwork):
parts = {
"inputs": input.RateMap,
"neurons": neuron.Neuron,
"synapses": synapse.LTP,
"weights": weight.Manual,
"readout": MaxGroup,
"rewarder": OrdinalRewarder,
}
keys = {
"n_inputs": N_INPUTS,
'n_neurons': N_NEURONS,
"n_outputs": N_OUTPUTS,
"matrix": w_matrix,
"magnitude": 1,
"potential_decay": .05,
"n_states": N_STATES,
"refractory_period": 0,
"firing_threshold": 8,
"processing_time": 100,
"learning_rate": .1,
"max_weight": 2,
"stdp_window": 100,
"reward_mult": 1,
"punish_mult": 0,
"n_actions": 2,
"state_rate_map": state_rate_map,
}
# Control, without learning
training_loop = GenericLoop(network_template, rand_state, measure_rates=True, **training_params)
training_loop.reset(params={'learning_rate': 0, 'n_episodes': 1})
network, game, results, info = training_loop()
print(f"{training_loop.callback.results['total_time']:.2f}s")
print_common_action(callback=training_loop.callback)
# Real test
training_loop = GenericLoop(network_template, rand_state, measure_rates=True, **training_params)
network, game, results, info = training_loop()
print("A States:", OrdinalRewarder.A_STATES)
print(f"{training_loop.callback.results['total_time']:.2f}s")
print("Initial Responses")
print_common_action(callback=training_loop.callback, episode=0)
print("\nFinal Responses")
print_common_action(callback=training_loop.callback, episode=-1)
```
| github_jupyter |
## OpenAI Gym Available Environment
Gym comes with a diverse suite of environments that range from easy to difficult and involve many different kinds of data. View the [full list of environments](https://gym.openai.com/envs) to get the birds-eye view.
- [Classic control](https://gym.openai.com/envs#classic_control) and [toy text](https://gym.openai.com/envs#toy_text): complete small-scale tasks, mostly from the RL literature. They’re here to get you started.
- [Algorithmic](https://gym.openai.com/envs#algorithmic): perform computations such as adding multi-digit numbers and reversing sequences. One might object that these tasks are easy for a computer. The challenge is to learn these algorithms purely from examples. These tasks have the nice property that it’s easy to vary the difficulty by varying the sequence length.
- [Atari](https://gym.openai.com/envs#atari): play classic Atari games.
- [2D and 3D robots](https://gym.openai.com/envs#mujoco): control a robot in simulation. These tasks use the MuJoCo physics engine, which was designed for fast and accurate robot simulation.
Don't forget to set matplotlib to inline
```
import matplotlib
%matplotlib inline
```
# CartPole-v1 exemple
## Initialize environment
```
import gym
env = gym.make('CartPole-v1')
_ = env.reset()
print("observation_space=",env.observation_space, "action_space=",env.action_space)
```
## Initialize agent
```
from blobrl.agents import DQN
from blobrl.explorations import EpsilonGreedy
agent = DQN(observation_space=env.observation_space, action_space=env.action_space, greedy_exploration=EpsilonGreedy(0.1))
```
## Train
Create Trainer
```
from blobrl import Trainer
trainer = Trainer(environment=env, agent=agent, log_dir="./logs")
```
Start train
```
trainer.train(max_episode=500, nb_evaluation=0, render=False)
trainer.evaluate()
env.close()
```
# MountainCar-v0 exemple
```
import gym
env = gym.make('MountainCar-v0')
_ = env.reset()
print("observation_space=",env.observation_space, "action_space=",env.action_space)
```
## Initialize agent
```
from blobrl.agents import DQN
from blobrl.memories import ExperienceReplay
from blobrl.explorations import AdaptativeEpsilonGreedy
import torch.optim as optim
memory = ExperienceReplay(max_size=50000, gamma=0.99) # gamma is discount reward factor (default is 0)
greedy_exploration = AdaptativeEpsilonGreedy(epsilon_max=0.8, epsilon_min=0.05, gamma=0.997)
agent = DQN(observation_space=env.observation_space, action_space=env.action_space, memory=memory, gamma=0.85, greedy_exploration=greedy_exploration)
agent.optimizer = optim.Adam(agent.network.parameters(), lr=0.001)
```
## Train
Create Trainer
```
from blobrl import Trainer
trainer = Trainer(environment=env, agent=agent, log_dir="./logs")
```
Start train
```
trainer.train(max_episode=1000, nb_evaluation=0, render=False)
trainer.evaluate()
env.close()
```
# FrozenLake-v0 example
```
import gym
env = gym.make('FrozenLake-v0')
_ = env.reset()
print("observation_space=",env.observation_space, "action_space=",env.action_space)
```
## Initialize agent
```
from blobrl.agents import DQN
agent = DQN(observation_space=env.observation_space, action_space=env.action_space)
```
## Train
Create Trainer
```
from blobrl import Trainer
trainer = Trainer(environment=env, agent=agent, log_dir="./logs")
```
Start train
```
trainer.train(max_episode=1000, nb_evaluation=0, render=False)
trainer.evaluate()
env.close()
```
# Assault-v0 example
```
import gym
env = gym.make('Assault-v0')
_ = env.reset()
trainer.evaluate()
env.close()
print("observation_space=",env.observation_space, "action_space=",env.action_space)
```
## Initialize agent
```
from blobrl.agents import DQN
agent = DQN(observation_space=env.observation_space, action_space=env.action_space)
```
## Train
Create Trainer
```
from blobrl import Trainer
trainer = Trainer(environment=env, agent=agent, log_dir="./logs")
```
Start train
```
trainer.train(max_episode=200, nb_evaluation=0, render=False)
trainer.evaluate()
env.close()
```
| github_jupyter |
###### Content under Creative Commons Attribution license CC-BY 4.0, code under MIT license (c)2014 L.A. Barba, G.F. Forsyth, C.D. Cooper.
# Spreading out
Welcome back! This is the third lesson of the course [Module 4](https://github.com/numerical-mooc/numerical-mooc/tree/master/lessons/04_spreadout), _Spreading out: parabolic PDEs,_ where we study the numerical solution of diffusion problems.
In the first two notebooks, we looked at the 1D heat equation, and solved it numerically using [*explicit*](https://nbviewer.jupyter.org/github/numerical-mooc/numerical-mooc/blob/master/lessons/04_spreadout/04_01_Heat_Equation_1D_Explicit.ipynb) and [*implicit*](https://nbviewer.jupyter.org/github/numerical-mooc/numerical-mooc/blob/master/lessons/04_spreadout/04_02_Heat_Equation_1D_Implicit.ipynb) schemes. We learned that implicit schemes are unconditionally stable, and we are free to choose any time step. —Wait: _any time step?_ Remember, we still want to capture the physics of the problem accurately. So although stability concerns do not limit the time step, it still has to be small enough to satisfy any accuracy concerns.
We are now ready to graduate to two dimensions! In the remaining lessons of this course module, we will study the 2D heat equation and reaction-diffusion equation. Like before, we start with explicit methods (this lesson) and then move to implicit methods (next lesson). Let's get started.
## 2D Heat conduction
The equation of heat conduction in 2D is:
$$
\begin{equation}
\rho c_p \frac{\partial T}{\partial t} = \frac{\partial}{\partial x} \left( \kappa_x \frac{\partial T}{\partial x} \right) + \frac{\partial}{\partial y} \left(\kappa_y \frac{\partial T}{\partial y} \right)
\end{equation}
$$
where $\rho$ is the density, $c_p$ is the heat capacity and $\kappa$ is the thermal conductivity.
If the thermal conductivity $\kappa$ is constant, then we can take it outside of the spatial derivative and the equation simplifies to:
$$
\begin{equation}
\frac{\partial T}{\partial t} = \alpha \left(\frac{\partial^2 T}{\partial x^2} + \frac{\partial^2 T}{\partial y^2} \right)
\end{equation}
$$
where $\alpha = \frac{\kappa}{\rho c_p}$ is the thermal diffusivity. The thermal diffusivity describes the ability of a material to conduct heat vs. storing it.
Does that equation have a familiar look to it? That's because it's the same as the diffusion equation. There's a reason that $\alpha$ is called the thermal *diffusivity*! We're going to set up an interesting problem where 2D heat conduction is important, and set about to solve it with explicit finite-difference methods.
### Problem statement
Removing heat out of micro-chips is a big problem in the computer industry. We are at a point in technology where computers can't run much faster because the chips might start failing due to the high temperature. This is a big deal! Let's study the problem more closely.
We want to understand how heat is dissipated from the chip with a very simplified model. Say we consider the chip as a 2D plate of size $1{\rm cm}\times 1{\rm cm}$, made of Silicon: $\kappa = 159{\rm W/m C}$, $c_p = 0.712\cdot 10^3 {\rm J/kg C}$, $\rho = 2329{\rm kg/m}^3$, and diffusivity $\alpha \approx 10^{-4}{\rm m}^2{/\rm s}$. Silicon melts at $1414{\rm C}$, but chips should of course operate at much smaller temperatures. The maximum temperature allowed depends on the processor make and model; in many cases, the maximum temperature is somewhere between $60{\rm C}$ and $\sim70{\rm C}$, but better CPUs are recommended to operate at a [maximum of $80{\rm C}$](http://www.pugetsystems.com/blog/2009/02/26/intel-core-i7-temperatures/) (like the Intel Core i7, for example).
We're going to set up a somewhat artificial problem, just to demonstrate an interesting numerical solution. Say the chip is in a position where on two edges (top and right) it is in contact with insulating material. On the other two edges the chip is touching other components that have a constant temperature of $T=100{\rm C}$ when the machine is operating. Initially, the chip is at room temperature $(20{\rm C})$. *How long does it take for the center of the chip to reach $70{\rm C}$?*
<img src='./figures/2dchip.svg' width='400px'>
#### Figure 1: Simplified microchip problem setup.
Let's use what we have learned to tackle this problem!
## 2D Finite differences
Everything you learned about finite-difference schemes in [Notebook 1 of Module 2](https://nbviewer.jupyter.org/github/numerical-mooc/numerical-mooc/blob/master/lessons/02_spacetime/02_01_1DConvection.ipynb) still applies, but now there are two spatial dimensions. We will need to build a 2D grid of discrete points to compute the solution on.
We will use a 2D Cartesian grid: one that consists of two families of (grid) lines parallel to the two spatial directions. Two lines (of different families) intersect on one and only one grid node (this is called a _structured_ grid). In the $x$ direction, the discretization uses $i=0, \cdots N_x$ lines, and in the $y$ direction we have $j=0, \cdots N_y$ lines. A given node on the grid will now have two spatial coordinates, and we need two indices: for the two lines that intersect at that node. For example, the middle point in the figure below would be $T_{i,j}$.
<img src="./figures/2dgrid.svg">
#### Figure 2. Nodal coordinates in 2 dimensions
### Explicit scheme in 2D
Recall from above that the 2D heat equation is
$$
\frac{\partial T}{\partial t} = \alpha \left(\frac{\partial^2 T}{\partial x^2} + \frac{\partial^2 T}{\partial y^2} \right)
$$
Let's write this out discretized using forward difference in time, and central difference in space, using an explicit scheme. You should be able write this out yourself, without looking—if you need to look, it means you still need to write more difference equations by your own hand!
$$
\begin{equation}
\frac{T^{n+1}_{i,j} - T^n_{i,j}}{\Delta t} = \alpha \left( \frac{T^n_{i+1, j} - 2T^n_{i,j} + T^n_{i-1,j}}{\Delta x^2} + \frac{T^n_{i, j+1} - 2T^n_{i,j} + T^n_{i,j-1}}{\Delta y^2}\right)
\end{equation}
$$
Rearranging the equation to solve for the value at the next time step, $T^{n+1}_{i,j}$, yields
$$
\begin{equation}
T^{n+1}_{i,j}= T^n_{i,j} + \alpha \left( \frac{\Delta t}{\Delta x^2} (T^n_{i+1, j} - 2T^n_{i,j} + T^n_{i-1,j}) + \\\frac{\Delta t}{\Delta y^2} (T^n_{i, j+1} - 2T^n_{i,j} + T^n_{i,j-1})\right)
\end{equation}
$$
That's a little messier than 1D, but still recognizable.
Up until now, we've used stencils to help visualize how a scheme will advance the solution for one time step. Stencils in 2D are a little harder to draw, but hopefully the figure below will guide your understanding of this method: we are using five grid points at time step $n$ to obtain the solution on one point at time step $n+1$.
<img src="./figures/2d_stencil.svg">
#### Figure 3: 2D Explicit Stencil
Similar to all of the 1D explicit methods we've used, the solution at $T^{n+1}_{i,j}$ is updated using only known values from the current solution at time $n$. This is straightforward to implement in code, but will be subject to stability limitations on the time step that you can choose. We'll study an implicit method in the next lesson.
### Boundary Conditions
Whenever we reach a point that interacts with the boundary, we apply the boundary condition. As in the previous notebook, if the boundary has Dirichlet conditions, we simply impose the prescribed temperature at that point. If the boundary has Neumann conditions, we approximate them with a finite-difference scheme.
Remember, Neumann boundary conditions prescribe the derivative in the normal direction. For example, in the problem described above, we have $\frac{\partial T}{\partial y} = q_y$ in the top boundary and $\frac{\partial T}{\partial x} = q_x$ in the right boundary, with $q_y = q_x = 0$ (insulation).
Thus, at every time step, we need to enforce
$$
\begin{equation}
T_{i,end} = q_y\cdot\Delta y + T_{i,end-1}
\end{equation}
$$
and
$$
\begin{equation}
T_{end,j} = q_x\cdot\Delta x + T_{end-1,j}
\end{equation}
$$
Write the finite-difference discretization of the boundary conditions yourself, and confirm that you can get the expressions above.
### Stability
Before doing any coding, let's revisit stability constraints. We saw in the first notebook of this series that the 1D explicit discretization of the diffusion equation was stable as long as $\alpha \frac{\Delta t}{(\Delta x)^2} \leq \frac{1}{2}$. In 2D, this constraint is even tighter, as we need to add them in both directions:
$$
\begin{equation}
\alpha \frac{\Delta t}{(\Delta x)^2} + \alpha \frac{\Delta t}{(\Delta y)^2} < \frac{1}{2}.
\end{equation}
$$
Say that the mesh has the same spacing in $x$ and $y$, $\Delta x = \Delta y = \delta$. In that case, the stability condition is:
$$
\begin{equation}
\alpha \frac{\Delta t}{\delta^2} < \frac{1}{4}
\end{equation}
$$
## Code implementation
### Array storage
The physical problem has two dimensions, so we also store the temperatures in two dimensions: in a 2D array.
We chose to store it with the $y$ coordinates corresponding to the rows of the array and $x$ coordinates varying with the columns (this is just a code design decision!). If we are consistent with the stencil formula (with $x$ corresponding to index $i$ and $y$ to index $j$), then $T_{i,j}$ will be stored in array format as `T[j,i]`.
This might be a little confusing as most of us are used to writing coordinates in the format $(x,y)$, but our preference is to have the data stored so that it matches the physical orientation of the problem. Then, when we make a plot of the solution, the visualization will make sense to us, with respect to the geometry of our set-up. That's just nicer than to have the plot rotated!
<img src="./figures/rowcolumn.svg" width="400px">
#### Figure 4: Row-column data storage
As you can see on Figure 4 above, if we want to access the value $18$ we would write those coordinates as $(x_2, y_3)$. You can also see that its location is the 3rd row, 2nd column, so its array address would be `T[3,2]`.
Again, this is a design decision. However you can choose to manipulate and store your data however you like; just remember to be consistent!
### Code time!
Now, to some coding! First, we have a little function that will advance the solution in time with a forward-time, centered-space scheme, and will monitor the center of the plate to tell us when it reaches $70{\rm C}$. Let's start by setting up our Python compute environment.
```
import numpy
from matplotlib import pyplot
%matplotlib inline
# Set the font family and size to use for Matplotlib figures.
pyplot.rcParams['font.family'] = 'serif'
pyplot.rcParams['font.size'] = 16
def ftcs(T0, nt, dt, dx, dy, alpha):
"""
Computes and returns the temperature distribution
after a given number of time steps.
Explicit integration using forward differencing
in time and central differencing in space, with
Neumann conditions (zero-gradient) on top and right
boundaries and Dirichlet conditions on bottom and
left boundaries.
Parameters
----------
T0 : numpy.ndarray
The initial temperature distribution as a 2D array of floats.
nt : integer
Maximum number of time steps to compute.
dt : float
Time-step size.
dx : float
Grid spacing in the x direction.
dy : float
Grid spacing in the y direction.
alpha : float
Thermal diffusivity.
Returns
-------
T : numpy.ndarray
The temperature distribution as a 2D array of floats.
"""
# Define some constants.
sigma_x = alpha * dt / dx**2
sigma_y = alpha * dt / dy**2
# Integrate in time.
T = T0.copy()
ny, nx = T.shape
I, J = int(nx / 2), int(ny / 2) # indices of the center
for n in range(nt):
T[1:-1, 1:-1] = (T[1:-1, 1:-1] +
sigma_x * (T[1:-1, 2:] - 2.0 * T[1:-1, 1:-1] + T[1:-1, :-2]) +
sigma_y * (T[2:, 1:-1] - 2.0 * T[1:-1, 1:-1] + T[:-2, 1:-1]))
# Apply Neumann conditions (zero-gradient).
T[-1, :] = T[-2, :]
T[:, -1] = T[:, -2]
# Check if the center of the domain has reached T = 70C.
if T[J, I] >= 70.0:
break
print('[time step {}] Center at T={:.2f} at t={:.2f} s'
.format(n + 1, T[J, I], (n + 1) * dt))
return T
```
See the [`break`](https://docs.python.org/3/tutorial/controlflow.html) statement? It exits the `for` loop at the closest time iteration when the plate reaches $70{\rm C}$.
In the code cell below, we define our initial conditions according to the problem set up, and choose the discretization parameters. We start with only 20 spatial steps in each coordinate direction and advance for 500 time steps. You should later experiments with these parameters at your leisure!
```
# Set parameters.
Lx = 0.01 # length of the plate in the x direction
Ly = 0.01 # height of the plate in the y direction
nx = 21 # number of points in the x direction
ny = 21 # number of points in the y direction
dx = Lx / (nx - 1) # grid spacing in the x direction
dy = Ly / (ny - 1) # grid spacing in the y direction
alpha = 1e-4 # thermal diffusivity of the plate
# Define the locations along a gridline.
x = numpy.linspace(0.0, Lx, num=nx)
y = numpy.linspace(0.0, Ly, num=ny)
# Compute the initial temperature distribution.
Tb = 100.0 # temperature at the left and bottom boundaries
T0 = 20.0 * numpy.ones((ny, nx))
T0[0, :] = Tb
T0[:, 0] = Tb
```
We don't want our solution blowing up, so let's find a time step with $\frac{\alpha \Delta t}{\Delta x^2} = \frac{\alpha \Delta t}{\Delta y^2} = \frac{1}{4}$.
```
# Set the time-step size based on CFL limit.
sigma = 0.25
dt = sigma * min(dx, dy)**2 / alpha # time-step size
nt = 500 # number of time steps to compute
# Compute the temperature along the rod.
T = ftcs(T0, nt, dt, dx, dy, alpha)
```
### Visualize the results
By now, you're no doubt *very* familiar with the `pyplot.plot` command. It's great for line plots, scatter plots, etc., but what about when we have two spatial dimensions and another value (temperature) to display?
Are you thinking contour plot? We're thinking contour plot. Check out the documentation on [`pyplot.contourf`](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.contour) (the 'f' denotes "filled" contours).
```
# Plot the filled contour of the temperature.
pyplot.figure(figsize=(8.0, 5.0))
pyplot.xlabel('x [m]')
pyplot.ylabel('y [m]')
levels = numpy.linspace(20.0, 100.0, num=51)
contf = pyplot.contourf(x, y, T, levels=levels)
cbar = pyplot.colorbar(contf)
cbar.set_label('Temperature [C]')
pyplot.axis('scaled', adjustable='box');
```
That looks pretty cool! Note that in the call to `pyplot.contourf` you can specify the number of contour levels to display (we chose `51`). Look at that visualization: does it make physical sense to you, considering that the upper and right sides of the chip are insulated, in our problem?
##### Dig deeper
In the problem we just demonstrated, the chip reaches a temperature of $70{\rm C}$ at a given time, but will it keep increasing? That spells trouble.
Imagine that you have a heat sink instead of an insulator acting on the upper and right sides. What should be the heat flux that the heat sink achieves there, so that the temperature does not exceed $70{\rm C}$ at the center of the chip?
---
###### The cell below loads the style of the notebook
```
from IPython.core.display import HTML
css_file = '../../styles/numericalmoocstyle.css'
HTML(open(css_file, 'r').read())
```
| github_jupyter |
# Object Detection Demo
Welcome to the object detection inference walkthrough! This notebook will walk you step by step through the process of using a pre-trained model to detect objects in an image. Make sure to follow the [installation instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md) before you start.
# Imports
```
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from object_detection.utils import ops as utils_ops
if tf.__version__ < '1.4.0':
raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!')
```
## Env setup
```
# This is needed to display the images.
%matplotlib inline
```
## Object detection imports
Here are the imports from the object detection module.
```
from utils import label_map_util
from utils import visualization_utils as vis_util
```
# Model preparation
## Variables
Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_FROZEN_GRAPH` to point to a new .pb file.
By default we use an "SSD with Mobilenet" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
```
# What model to download.
MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
```
## Download Model
```
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
```
## Load a (frozen) Tensorflow model into memory.
```
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
```
## Loading label map
Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
```
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
```
## Helper code
```
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
```
# Detection
```
# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test_images'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 3) ]
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
output_dict = run_inference_for_single_image(image_np, detection_graph)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np)
```
| github_jupyter |
# Composite symbols into component
In this example we will show how to make an Inception network by forming single symbol into component.
Inception is currently best model. Compared to other models, it has much less parameters, and with best performance. However, it is much more complex than sequence feedforward network.
The Inception network in this example is refer to ```Ioffe, Sergey, and Christian Szegedy. "Batch normalization: Accelerating deep network training by reducing internal covariate shift." arXiv preprint arXiv:1502.03167 (2015).```
```
import mxnet as mx
```
For complex network such as inception network, building from single symbol is painful, we can make simple ```ComponentFactory``` to simplfiy the procedure.
Except difference in number of filter, we find 2 major differences in each Inception module, so we can build two factories plus one basic ```Convolution + BatchNorm + ReLU``` factory to simplfiy the problem.
```
# Basic Conv + BN + ReLU factory
def ConvFactory(data, num_filter, kernel, stride=(1,1), pad=(0, 0), name=None, suffix=''):
conv = mx.symbol.Convolution(data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_%s%s' %(name, suffix))
bn = mx.symbol.BatchNorm(data=conv, name='bn_%s%s' %(name, suffix))
act = mx.symbol.Activation(data=bn, act_type='relu', name='relu_%s%s' %(name, suffix))
return act
```
We can visualize our basic component
```
prev = mx.symbol.Variable(name="Previos Output")
conv_comp = ConvFactory(data=prev, num_filter=64, kernel=(7,7), stride=(2, 2))
mx.viz.plot_network(symbol=conv_comp)
```
The next step is making a component factory with all ```stride=(1, 1)```
```
# param mapping to paper:
# num_1x1 >>> #1x1
# num_3x3red >>> #3x3 reduce
# num_3x3 >>> #3x3
# num_d3x3red >>> double #3x3 reduce
# num_d3x3 >>> double #3x3
# pool >>> Pool
# proj >>> proj
def InceptionFactoryA(data, num_1x1, num_3x3red, num_3x3, num_d3x3red, num_d3x3, pool, proj, name):
# 1x1
c1x1 = ConvFactory(data=data, num_filter=num_1x1, kernel=(1, 1), name=('%s_1x1' % name))
# 3x3 reduce + 3x3
c3x3r = ConvFactory(data=data, num_filter=num_3x3red, kernel=(1, 1), name=('%s_3x3' % name), suffix='_reduce')
c3x3 = ConvFactory(data=c3x3r, num_filter=num_3x3, kernel=(3, 3), pad=(1, 1), name=('%s_3x3' % name))
# double 3x3 reduce + double 3x3
cd3x3r = ConvFactory(data=data, num_filter=num_d3x3red, kernel=(1, 1), name=('%s_double_3x3' % name), suffix='_reduce')
cd3x3 = ConvFactory(data=cd3x3r, num_filter=num_d3x3, kernel=(3, 3), pad=(1, 1), name=('%s_double_3x3_0' % name))
cd3x3 = ConvFactory(data=cd3x3, num_filter=num_d3x3, kernel=(3, 3), pad=(1, 1), name=('%s_double_3x3_1' % name))
# pool + proj
pooling = mx.symbol.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
cproj = ConvFactory(data=pooling, num_filter=proj, kernel=(1, 1), name=('%s_proj' % name))
# concat
concat = mx.symbol.Concat(*[c1x1, c3x3, cd3x3, cproj], name='ch_concat_%s_chconcat' % name)
return concat
# We can also visualize network with feature map shape information
# In this case, we must provide all necessary input shape info as a dict
prev = mx.symbol.Variable(name="Previos Output")
in3a = InceptionFactoryA(prev, 64, 64, 64, 64, 96, "avg", 32, name="in3a")
# shape info
# Note shape info must contain batch size although we ignore batch size in graph to save space
batch_size = 128
shape = {"Previos Output" : (batch_size, 3, 28, 28)}
# plot
mx.viz.plot_network(symbol=in3a, shape=shape)
```
We will make the other factory with ```stride=(2, 2)```
```
# param mapping to paper:
# num_1x1 >>> #1x1 (not exist!)
# num_3x3red >>> #3x3 reduce
# num_3x3 >>> #3x3
# num_d3x3red >>> double #3x3 reduce
# num_d3x3 >>> double #3x3
# pool >>> Pool (not needed, all are max pooling)
# proj >>> proj (not exist!)
def InceptionFactoryB(data, num_3x3red, num_3x3, num_d3x3red, num_d3x3, name):
# 3x3 reduce + 3x3
c3x3r = ConvFactory(data=data, num_filter=num_3x3red, kernel=(1, 1), name=('%s_3x3' % name), suffix='_reduce')
c3x3 = ConvFactory(data=c3x3r, num_filter=num_3x3, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name=('%s_3x3' % name))
# double 3x3 reduce + double 3x3
cd3x3r = ConvFactory(data=data, num_filter=num_d3x3red, kernel=(1, 1), name=('%s_double_3x3' % name), suffix='_reduce')
cd3x3 = ConvFactory(data=cd3x3r, num_filter=num_d3x3, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name=('%s_double_3x3_0' % name))
cd3x3 = ConvFactory(data=cd3x3, num_filter=num_d3x3, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name=('%s_double_3x3_1' % name))
# pool + proj
pooling = mx.symbol.Pooling(data=data, kernel=(3, 3), stride=(2, 2), pool_type="max", name=('max_pool_%s_pool' % name))
# concat
concat = mx.symbol.Concat(*[c3x3, cd3x3, pooling], name='ch_concat_%s_chconcat' % name)
return concat
prev = mx.symbol.Variable(name="Previos Output")
in3c = InceptionFactoryB(prev, 128, 160, 64, 96, name='in3c')
mx.viz.plot_network(symbol=in3c)
```
Now we can use these factories to build the whole network
```
# data
data = mx.symbol.Variable(name="data")
# stage 1
conv1 = ConvFactory(data=data, num_filter=64, kernel=(7, 7), stride=(2, 2), pad=(3, 3), name='conv1')
pool1 = mx.symbol.Pooling(data=conv1, kernel=(3, 3), stride=(2, 2), name='pool1', pool_type='max')
# stage 2
conv2red = ConvFactory(data=pool1, num_filter=64, kernel=(1, 1), stride=(1, 1), name='conv2red')
conv2 = ConvFactory(data=conv2red, num_filter=192, kernel=(3, 3), stride=(1, 1), pad=(1, 1), name='conv2')
pool2 = mx.symbol.Pooling(data=conv2, kernel=(3, 3), stride=(2, 2), name='pool2', pool_type='max')
# stage 3
in3a = InceptionFactoryA(pool2, 64, 64, 64, 64, 96, "avg", 32, name='in3a')
in3b = InceptionFactoryA(in3a, 64, 64, 96, 64, 96, "avg", 64, name='in3b')
in3c = InceptionFactoryB(in3b, 128, 160, 64, 96, name='in3c')
# stage 4
in4a = InceptionFactoryA(in3c, 224, 64, 96, 96, 128, "avg", 128, name='in4a')
in4b = InceptionFactoryA(in4a, 192, 96, 128, 96, 128, "avg", 128, name='in4b')
in4c = InceptionFactoryA(in4b, 160, 128, 160, 128, 160, "avg", 128, name='in4c')
in4d = InceptionFactoryA(in4c, 96, 128, 192, 160, 192, "avg", 128, name='in4d')
in4e = InceptionFactoryB(in4d, 128, 192, 192, 256, name='in4e')
# stage 5
in5a = InceptionFactoryA(in4e, 352, 192, 320, 160, 224, "avg", 128, name='in5a')
in5b = InceptionFactoryA(in5a, 352, 192, 320, 192, 224, "max", 128, name='in5b')
# global avg pooling
avg = mx.symbol.Pooling(data=in5b, kernel=(7, 7), stride=(1, 1), name="global_avg", pool_type='avg')
# linear classifier
flatten = mx.symbol.Flatten(data=avg)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=1000)
softmax = mx.symbol.SoftmaxOutput(data=fc1)
# if you like, you can visualize full network structure
mx.viz.plot_network(symbol=softmax, shape={"data" : (128, 3, 224, 224)})
```
| github_jupyter |
```
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets('./MNIST_data',one_hot=True)
mnist.train.images.shape
mnist.test.images.shape
mnist.validation.images.shape
train_x,train_y=mnist.train.next_batch(20000)
train_x.shape
test_x,test_y=mnist.test.next_batch(5000)
```
### 图像的可视化
```
import matplotlib.pyplot as plt
plt.rc('image',cmap='binary')
for i in range(10):#打印10张图
plt.subplot(2,5,i+1)
plt.imshow(train_x[i].reshape(28,28))
print(train_y[i])
plt.xticks(())
plt.yticks(())
plt.tight_layout()
plt.show
```
### 全连接神经网络
```
from keras.models import Sequential
from keras.layers import Dense
def model1():
model=Sequential()
model.add(Dense(784,activation='relu'))
model.add(Dense(100,activation='relu'))
model.add(Dense(100,activation='relu'))
model.add(Dense(10,activation='softmax'))
return model
m=model1()
m.compile(optimizer='adam',loss='categorical_crossentropy')
history=m.fit(train_x,train_y,epochs=30,batch_size=32)
pred=m.predict(test_x)
from sklearn.metrics import accuracy_score
accuracy_score(pred.argmax(1),test_y.argmax(1))
```
## LeNet
```
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import os
import random
import numpy as np
# data_trans=transforms.Compose([
# transforms.Resize(32),
# transforms.ToTensor()
# # transforms.Normalize(()())?<-参数mean和std来自于训练集,但是transform本身会在训练和评测的时候都会使用
# ])
```
### 图像的Normalize
目的:将图片进行归一化的缩放|(x-mean)/std
思考:图片归一化后,真的不存在小于0或者大于1的outlier了吗? 不一定
思考:归一化哪部分数据?A 训练集 B 评测集 C 训练集+评测集? A
```
#np.mean(mnist.test.images)
#np.std(mnist.test.images)
data_trans=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.RandomCrop(32,padding=3),
transforms.ToTensor(),
transforms.Normalize((0.49139968, 0.48215827, 0.44653124), (0.24703233, 0.24348505, 0.26158768))#参数mean和std来自于训练集,但是transform本身会在训练和评测的时候都会使用
])
data_trans_227=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.RandomCrop(32,padding=3),
transforms.Resize(227),
transforms.ToTensor(),
transforms.Normalize((0.49139968, 0.48215827, 0.44653124), (0.24703233, 0.24348505, 0.26158768))#参数mean和std来自于训练集,但是transform本身会在训练和评测的时候都会使用
])
data_test=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.49139968, 0.48215827, 0.44653124), (0.24703233, 0.24348505, 0.26158768))
])
data_test_227=transforms.Compose([
transforms.ToTensor(),
transforms.Resize(227),
transforms.Normalize((0.49139968, 0.48215827, 0.44653124), (0.24703233, 0.24348505, 0.26158768))
])
train_data=datasets.CIFAR10('data',train=True,download=True,transform=data_trans)
test_data=datasets.CIFAR10('data',train=False,download=True,transform=data_trans)
n_train=int(len(train_data)*0.9)
n_validation=len(train_data)-n_train
train_data,valid_data=torch.utils.data.random_split(train_data,[n_train,n_validation])
print(len(train_data),len(valid_data),len(test_data))
batch_size=64
```
目前完成了数据集的制作
```
train_iterator=torch.utils.data.DataLoader(train_data,shuffle=True,batch_size=batch_size)
valid_iterator=torch.utils.data.DataLoader(valid_data,batch_size=batch_size)
test_iterator=torch.utils.data.DataLoader(test_data,batch_size=batch_size)
class LeNet(nn.Module):
def __init__(self):
super(LeNet,self).__init__()
#第一层conv1卷积层,in_channel=1,output_channel=6,kernel_size=5*5,input_size=32*32,output_size=28*28
self.conv1=nn.Conv2d(3,6,5)
#第二层conv2,output_channel=6 ,kernel 5*5,output_size=10*10,input_size=14*14
self.conv2=nn.Conv2d(6,16,5)
self.fc1=nn.Linear(16*5*5,120)
self.fc2=nn.Linear(120,80)
self.fc3=nn.Linear(80,10)#不用增加softmax层,在cross_entropy的Loss中自动增加了Softmax
def forward(self,x):
x=F.max_pool2d(F.relu(self.conv1(x)),2)
x=F.max_pool2d(F.relu(self.conv2(x)),2)
x=x.view(x.shape[0],-1)
x=F.relu(self.fc1(x))
x=F.relu(self.fc2(x))
x=self.fc3(x)
return x
class AlexNet(nn.Module):
def __init__(self):#init函数定义的是网络的架构、关键的网络模块、模组
super(AlexNet,self).__init__()
self.feature_block=nn.Sequential(
nn.Conv2d(3,64,kernel_size=11,stride=4,padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3,stride=2),
nn.Conv2d(64,192,kernel_size=5,padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3,stride=2),
nn.Conv2d(192,384,kernel_size=3,padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384,256,kernel_size=3,padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256,256,kernel_size=3,padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3,stride=2)
)
self.avgpool=nn.AdaptiveAvgPool2d((6,6))
self.class_block=nn.Sequential(
nn.Dropout(),
nn.Linear(256*6*6,4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096,4096),
nn.ReLU(inplace=True),
nn.Linear(4096,10),
)
def forward(self,x):#数据的正向流
x=self.feature_block(x)
x=self.avgpool(x)
x=x.view(x.size(0),256*6*6)
x=self.class_block(x)
return x
class VGGBlock(nn.Module):
def __init__(self,in_channels,out_channels,batch_norm):#在后来改良后的VGG网络增加了BatchNorm
super(VGGBlock,self).__init__()
stack=[]
stack.append(nn.Conv2d(in_channels,out_channels,kernel_size=3,padding=1))
if batch_norm:
stack.append(nn.BatchNorm2d(out_channels))
stack.append(nn.ReLU(inplace=True))
self.model_block=nn.Sequential(*stack)
def forward(self,x):
return self.model_block(x)
class VGGNet11(nn.Module):
def __init__(self,block,pool,batch_norm):#block是一个网络模组抽象,pool也是pooling层的抽象
super(VGGNet11,self).__init__()
self.feature_block=nn.Sequential(
block(3,64,batch_norm), #32*32
pool(kernel_size=2,stride=2),#16*16
block(64,128,batch_norm),
pool(kernel_size=2,stride=2),#8*8
block(128,256,batch_norm),
block(256,256,batch_norm),
pool(kernel_size=2,stride=2),#4*4
block(256,512,batch_norm),
block(512,512,batch_norm),
pool(kernel_size=2,stride=2),#2*2
block(512,512,batch_norm),
block(512,512,batch_norm),
pool(kernel_size=2,stride=2),#1*1
)
self.classifier=nn.Linear(512,10)
def forward(self,x):
x=self.feature_block(x)
x=x.view(x.shape[0],-1)
x=self.classifier(x)
return x
class VGGNet16(nn.Module):
def __init__(self,block,pool,batch_norm):#block是一个网络模组抽象,pool也是pooling层的抽象
super(VGGNet16,self).__init__()
self.feature_block=nn.Sequential(
block(3,64,batch_norm), #32*32
block(64,64,batch_norm), #32*32
pool(kernel_size=2,stride=2),#16*16
block(64,128,batch_norm),
block(128,128,batch_norm),
pool(kernel_size=2,stride=2),#8*8
block(128,256,batch_norm),
block(256,256,batch_norm),
pool(kernel_size=2,stride=2),#4*4
block(256,512,batch_norm),
block(512,512,batch_norm),
block(512,512,batch_norm),
pool(kernel_size=2,stride=2),#2*2
block(512,512,batch_norm),
block(512,512,batch_norm),
block(512,512,batch_norm),
pool(kernel_size=2,stride=2),#1*1
)
self.classifier=nn.Linear(512,10)
def forward(self,x):
x=self.feature_block(x)
x=x.view(x.shape[0],-1)
x=self.classifier(x)
return x
class Inception(nn.Module):
def __init__(self,in_planes,n1x1,n3x3red,n3x3,n5x5red,n5x5,pool_planes):
super(Inception,self).__init__()
self.b1=nn.Sequential(
nn.Conv2d(in_planes,n1x1,kernel_size=1),
nn.BatchNorm2d(n1x1),
nn.ReLU(True),
)
self.b2=nn.Sequential(
nn.Conv2d(in_planes,n3x3red,kernel_size=1),
nn.BatchNorm2d(n3x3red),
nn.ReLU(True),
nn.Conv2d(n3x3red,n3x3,kernel_size=3,padding=1),
nn.BatchNorm2d(n3x3),
nn.ReLU(True),
)
self.b3=nn.Sequential(
nn.Conv2d(in_planes,n5x5red,kernel_size=1),
nn.BatchNorm2d(n5x5red),
nn.ReLU(True),
nn.Conv2d(n5x5red,n5x5,kernel_size=5,padding=2),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
)
self.b4=nn.Sequential(
nn.MaxPool2d(3,stride=1,padding=1),
nn.Conv2d(in_planes,pool_planes,kernel_size=1),
nn.BatchNorm2d(pool_planes),
nn.ReLU(True),
)
def forward(self,x):
x1=self.b1(x)
x2=self.b2(x)
x3=self.b3(x)
x4=self.b4(x)
#concat4层输入在一起
return torch.cat([x1,x2,x3,x4],1)
class GoogLeNet(nn.Module):
def __init__(self):
super(GoogLeNet,self).__init__()
self.feature_block=nn.Sequential(
nn.Conv2d(1,192,kernel_size=3,padding=1),
nn.BatchNorm2d(192),
nn.ReLU(True),
)
self.a3=Inception(192,64,96,128,16,32,32)
self.b3=Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool=nn.MaxPool2d(3,stride=2,padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool=nn.AvgPool2d(8,stride=1)
self.linear=nn.Linear(1024,10)
def forward(self,x):
out=self.feature_block(x)
out=self.a3(out)
out=self.b3(out)
out=self.maxpool(out)
out=self.a4(out)
out=self.b4(out)
out=self.c4(out)
out=self.d4(out)
out=self.e4(out)
out = self.maxpool(out)
out = self.a5(out)
out = self.b5(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class GoogLeNet(nn.Module):
def __init__(self):
super(GoogLeNet,self).__init__()
self.feature_block=nn.Sequential(
nn.Conv2d(3,192,kernel_size=3,padding=1),
nn.BatchNorm2d(192),
nn.ReLU(True),
)
self.a3=Inception(192,64,96,128,16,32,32)
self.b3=Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool=nn.MaxPool2d(3,stride=2,padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool=nn.AvgPool2d(8,stride=1)
self.linear=nn.Linear(1024,10)
def forward(self,x):
out=self.feature_block(x)
out=self.a3(out)
out=self.b3(out)
out=self.maxpool(out)
out=self.a4(out)
out=self.b4(out)
out=self.c4(out)
out=self.d4(out)
out=self.e4(out)
out = self.maxpool(out)
out = self.a5(out)
out = self.b5(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class ResNetBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride):
super(ResNetBlock,self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.downsample = nn.Sequential()
if stride != 1 or in_channels != out_channels:
self.downsample = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.downsample(x)#ResNet的add操作,其实是张量的加和
out = F.relu(out)
return out
class ResNetLayer(nn.Module):
def __init__(self,block,n_blocks,in_channels,out_channels,stride):
super(ResNetLayer,self).__init__()
self.modules=[]
self.modules.append(block(in_channels,out_channels,stride))
for _ in range(n_blocks-1):
self.modules.append(block(out_channels,out_channels,1))
self.blocks=nn.Sequential(*self.modules)
def forward(self,x):
return self.blocks(x)
class ResNet18(nn.Module):
def __init__(self,layer,block):
super(ResNet18,self).__init__()
n_blocks=[2,2,2,2]
self.conv1=nn.Conv2d(3,64,kernel_size=3,stride=1,padding=1,bias=False)
self.bn1=nn.BatchNorm2d(64)
self.rb1=layer(block,n_blocks[0],64,64,1)
self.rb2=layer(block,n_blocks[1],64,128,2)
self.rb3=layer(block,n_blocks[2],128,256,2)
self.rb4=layer(block,n_blocks[3],256,512,2)
self.fc=nn.Linear(512,10)
def forward(self,x):
out=F.relu(self.bn1(self.conv1(x)))
out=self.rb1(out)
out=self.rb2(out)
out=self.rb3(out)
out=self.rb4(out)
out=F.avg_pool2d(out,4)
out=out.view(out.shape[0],-1)
out=self.fc(out)
return out
#ResNet34->[3,4,6,3]
import math
class Bottleneck(nn.Module):
def __init__(self,in_planes,growth_rate):
super(Bottleneck,self).__init__()
self.bn1=nn.BatchNorm2d(in_planes)
self.conv1=nn.Conv2d(in_planes,4*growth_rate,kernel_size=1,bias=False)
self.bn2=nn.BatchNorm2d(4*growth_rate)
self.conv2=nn.Conv2d(4*growth_rate,growth_rate,kernel_size=3,padding=1,bias=False)
def forward(self,x):
out=self.conv1(F.relu(self.bn1(x)))#pre-activation
out=self.conv2(F.relu(self.bn2(out)))
out=torch.cat([out,x],1)
return out
class Transition(nn.Module):
def __init__(self,in_planes,out_planes):
super(Transition,self).__init__()
self.bn=nn.BatchNorm2d(in_planes)
self.conv=nn.Conv2d(in_planes,out_planes,kernel_size=1,bias=False)
def forward(self,x):
out=self.conv(F.relu(self.bn(x)))
out=F.avg_pool2d(out,2)
return out
class DenseNet(nn.Module):
def __init__(self,block,nblocks,growth_rate=12,reduction=0.5,num_classes=10):
super(DenseNet,self).__init__()
self.growth_rate=growth_rate
num_planes=2*growth_rate #32
#最初的感知层
self.conv1=nn.Conv2d(3,num_planes,kernel_size=3,padding=1,bias=False)
#第一个DenseBlock
self.dense1=self._make_dense_layers(block,num_planes,nblocks[0])
num_planes+=nblocks[0]*growth_rate
out_planes=int(math.floor(num_planes*reduction))
self.trans1=Transition(num_planes,out_planes)
num_planes=out_planes
#第二个DenseBlock
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1]*growth_rate#计算如果不压缩的话的输出
out_planes = int(math.floor(num_planes*reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
#第三个DenseBlock
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
#第四个DenseBlock
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3]*growth_rate
#分类层
self.bn=nn.BatchNorm2d(num_planes)
self.linear=nn.Linear(num_planes,num_classes)
def _make_dense_layers(self,block,in_planes,nblock):
#block:bottleneck
#nblock代表构建denseblock中有多少bottleneck层
layers=[]
for i in range(nblock):
layers.append(block(in_planes,self.growth_rate))
in_planes+=self.growth_rate
return nn.Sequential(*layers)
def forward(self,x):
out=self.conv1(x)
out=self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out=F.avg_pool2d(F.relu(self.bn(out)),4)
out=out.view(out.size(0),-1)
out=self.linear(out)
return out
def DenseNet121():
return DenseNet(Bottleneck,[6,12,24,16],growth_rate=32)
class Block(nn.Module):
expansion = 2
def __init__(self, in_planes, cardinality=32, bottleneck_width=4, stride=1):
super(Block, self).__init__()
group_width = cardinality * bottleneck_width
self.conv1 = nn.Conv2d(in_planes, group_width, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(group_width)
self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn2 = nn.BatchNorm2d(group_width)
self.conv3 = nn.Conv2d(group_width, self.expansion*group_width, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*group_width)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*group_width:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*group_width, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*group_width)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNeXt(nn.Module):
def __init__(self, num_blocks, cardinality, bottleneck_width, num_classes=10):
super(ResNeXt, self).__init__()
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(num_blocks[0], 1)
self.layer2 = self._make_layer(num_blocks[1], 2)
self.layer3 = self._make_layer(num_blocks[2], 2)
# self.layer4 = self._make_layer(num_blocks[3], 2)
self.linear = nn.Linear(cardinality*bottleneck_width*8, num_classes)
def _make_layer(self, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(Block(self.in_planes, self.cardinality, self.bottleneck_width, stride))
self.in_planes = Block.expansion * self.cardinality * self.bottleneck_width
# Increase bottleneck_width by 2 after each stage.
self.bottleneck_width *= 2
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
# out = self.layer4(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNeXt29_2x64d():
return ResNeXt(num_blocks=[3,3,3], cardinality=2, bottleneck_width=64)
import torch.nn.init as init
class Fire(nn.Module):
def __init__(self,inplanes,s1,e1,e3):
super(Fire,self).__init__()
self.inplanes=inplanes
self.squeeze=nn.Conv2d(inplanes,s1,kernel_size=1)
self.squeeze_activation=nn.ReLU(inplace=True)
self.expand1x1=nn.Conv2d(s1,e1,kernel_size=1)
self.expand1x1_activation=nn.ReLU(inplace=True)
self.expand3x3=nn.Conv2d(s1,e3,kernel_size=True)
self.expand3x3_activation=nn.ReLU(inplace=True)
def forward(self,x):
x=self.squeeze_activation(self.squeeze(x))
return torch.cat([
self.expand1x1_activation(self.expand1x1(x)),
self.expand3x3_activation(self.expand3x3(x))
],1)
class SqueezeNet(nn.Module):
def __init__(self, version=1.0, num_classes=10):
super(SqueezeNet, self).__init__()
if version not in [1.0, 1.1]:
raise ValueError("Unsupported SqueezeNet version {version}:"
"1.0 or 1.1 expected".format(version=version))
self.num_classes = num_classes
if version == 1.0:
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=7, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(96, 16, 64, 64),
Fire(128, 16, 64, 64),
Fire(128, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 32, 128, 128),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(512, 64, 256, 256),
)
else:
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(64, 16, 64, 64),
Fire(128, 16, 64, 64),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(128, 32, 128, 128),
Fire(256, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
Fire(512, 64, 256, 256),
)
# Final convolution is initialized differently form the rest
final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1)
self.classifier = nn.Sequential(
nn.Dropout(p=0.5),
final_conv,
#nn.ReLU(inplace=True),
#nn.AvgPool2d(4, stride=1)
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m is final_conv:
init.normal(m.weight.data, mean=0.0, std=0.01)
else:
init.kaiming_uniform(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = self.classifier(x)
return x.view(x.size(0), self.num_classes)
class Block(nn.Module):
"DWConv+PointWiseConv"
def __init__(self,in_planes,out_planes,stride):
super(Block,self).__init__()
self.conv1=nn.Conv2d(in_planes,in_planes,kernel_size=3,stride=stride,padding=1,groups=in_planes,bias=False)
self.bn1=nn.BatchNorm2d(in_planes)
self.conv2=nn.Conv2d(in_planes,out_planes,kernel_size=1,stride=1,padding=0,bias=False)
self.bn2=nn.BatchNorm2d(out_planes)
def forward(self,x):
out=F.relu(self.bn1(self.conv1(x)))
out=F.relu(self.bn2(self.conv2(out)))
return out
class MobileNet(nn.Module):
cfg = [64, (128,2), 128, (256,2), 256, (512,2), 512, 512, 512, 512, 512, (1024,2), 1024]
def __init__(self, num_classes=10):
super(MobileNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.linear = nn.Linear(1024, num_classes)
def _make_layers(self, in_planes):
layers = []
for x in self.cfg:
out_planes = x if isinstance(x, int) else x[0]
stride = 1 if isinstance(x, int) else x[1]
layers.append(Block(in_planes, out_planes, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.avg_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class ShuffleBlock(nn.Module):
def __init__(self,groups):
super(ShuffleBlock,self).__init__()
self.groups=groups
def forward(self,x):#转置重组操作
'''
[N,C,H,W]->分组操作->[N,C/g,H,W]*g->转置重组->[N,g,H,W]*C/g
'''
N,C,H,W=x.size()
g=self.groups
return x.view(N,g,C/g,H,W).permute(0,2,1,3,4).contiguous().view(N,C,H,W)
class Bottleneck(nn.Module):
def __init__(self, in_planes, out_planes, stride, groups):
super(Bottleneck, self).__init__()
self.stride = stride
mid_planes = out_planes/4
g = 1 if in_planes==24 else groups
self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.shuffle1 = ShuffleBlock(groups=g)
self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 2:
self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.shuffle1(out)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
res = self.shortcut(x)
out = F.relu(torch.cat([out,res], 1)) if self.stride==2 else F.relu(out+res)
return out
class ShuffleNet(nn.Module):
def __init__(self, cfg):
super(ShuffleNet, self).__init__()
out_planes = cfg['out_planes']
num_blocks = cfg['num_blocks']
groups = cfg['groups']
self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_planes = 24
self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups)
self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups)
self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups)
self.linear = nn.Linear(out_planes[2], 10)
def _make_layer(self, out_planes, num_blocks, groups):
layers = []
for i in range(num_blocks):
stride = 2 if i == 0 else 1
cat_planes = self.in_planes if i == 0 else 0
layers.append(Bottleneck(self.in_planes, out_planes-cat_planes, stride=stride, groups=groups))
self.in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
5//2
class SELayer(nn.Module):
def __init__(self,channel,reduction=16):
super(SELayer,self).__init__()
self.avg_pool=nn.AdaptiveAvgPool2d(1)
self.fc=nn.Sequential(
nn.Linear(channel,channel//reduction,bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel//reduction,channel,bias=False),
nn.Sigmoid()
)
def forward(self,x):
b,c,_,_=x.size()
y=self.avg_pool(x).view(b,c)#b*c*1*1->b*c
y=self.fc(y).view(b,c,1,1)#b*c->b*c*1*1
y=y.expand_as(x)#b*c*1*1->b*c*w*h
return x*y
class SEResNetBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride,reduction=16):
super(SEResNetBlock,self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.se=SELayer(out_channels,reduction)
self.downsample = nn.Sequential()
if stride != 1 or in_channels != out_channels:
self.downsample = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out = self.se(out)
out += self.downsample(x)#ResNet的add操作,其实是张量的加和
out = F.relu(out)
return out
class SEResNetLayer(nn.Module):
def __init__(self,block,n_blocks,in_channels,out_channels,stride):
super(SEResNetLayer,self).__init__()
self.modules=[]
self.modules.append(block(in_channels,out_channels,stride))
for _ in range(n_blocks-1):
self.modules.append(block(out_channels,out_channels,1))
self.blocks=nn.Sequential(*self.modules)
def forward(self,x):
return self.blocks(x)
class SEResNet18(nn.Module):
def __init__(self,layer,block):
super(SEResNet18,self).__init__()
n_blocks=[2,2,2,2]
self.conv1=nn.Conv2d(3,64,kernel_size=3,stride=1,padding=1,bias=False)
self.bn1=nn.BatchNorm2d(64)
self.rb1=layer(block,n_blocks[0],64,64,1)
self.rb2=layer(block,n_blocks[1],64,128,2)
self.rb3=layer(block,n_blocks[2],128,256,2)
self.rb4=layer(block,n_blocks[3],256,512,2)
self.fc=nn.Linear(512,10)
def forward(self,x):
out=F.relu(self.bn1(self.conv1(x)))
out=self.rb1(out)
out=self.rb2(out)
out=self.rb3(out)
out=self.rb4(out)
out=F.avg_pool2d(out,4)
out=out.view(out.shape[0],-1)
out=self.fc(out)
return out
```
到此,神经网络定义完毕
## 载入模型并训练
```
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_dir='models'
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
device
# model=LeNet().to(device)
# model_path=os.path.join(model_dir,'lenet_c10.pt')
# model=AlexNet().to(device)
# model_path=os.path.join(model_dir,'alexnet_mnist.pt')
# model=VGGNet11(VGGBlock,nn.MaxPool2d,True).to(device)
# model_path=os.path.join(model_dir,'vgg11_mnist.pt')
# model=VGGNet16(VGGBlock,nn.MaxPool2d,True).to(device)
# model_path=os.path.join(model_dir,'vgg16_mnist.pt')
# model=GoogLeNet().to(device)
# model_path=os.path.join(model_dir,'googlenet_mnist.pt')
# model=ResNet18(ResNetLayer,ResNetBlock).to(device)
# model_path=os.path.join(model_dir,'resnet_mnist.pt')
# model=DenseNet121().to(device)
# model_path=os.path.join(model_dir,'densenet_c10.pt')
# model=ResNeXt29_2x64d().to(device)
# model_path=os.path.join(model_dir,'resnext_c10.pt')
# model=SqueezeNet().to(device)
# model_path=os.path.join(model_dir,'squeezenet_c10.pt')
# model=MobileNet().to(device)
# model_path=os.path.join(model_dir,'mobilenet_c10.pt')
# cfg={
# 'out_planes':[200,400,800],
# 'num_blocks':[4,8,4],
# 'groups':2
# }
# model=ShuffleNet(cfg).to(device)
# model_path=os.path.join(model_dir,'shufflenet_c10.pt')
model=SEResNet18(SEResNetLayer,SEResNetBlock).to(device)
model_path=os.path.join(model_dir,'seresnet_c10.pt')
optimizer=optim.Adam(model.parameters())
criterion=nn.CrossEntropyLoss()
### 如何评测结果--计算精确度
def accu(fx,y):
pred=fx.max(1,keepdim=True)[1]
correct=pred.eq(y.view_as(pred)).sum()#得到该batch的准确度
acc=correct.float()/pred.shape[0]
return acc
def train(model,device,iterator,optimizer,criterion):
epoch_loss=0#积累变量
epoch_acc=0#积累变量
model.train()#该函数表示PHASE=Train
for (x,y) in iterator:#拿去每一个minibatch
x=x.to(device)
y=y.to(device)
optimizer.zero_grad()
fx=model(x)#进行forward
loss=criterion(fx,y)#计算Loss,train_loss
type(loss)
acc=accu(fx,y)#计算精确度,train_accu
loss.backward()#进行BP
optimizer.step()#统一更新模型
epoch_loss+=loss.item()
epoch_acc+=acc.item()
return epoch_loss/len(iterator),epoch_acc/len(iterator)
def evaluate(model,device,iterator,criterion):
epoch_loss=0
epoch_acc=0
model.eval()
with torch.no_grad():
for (x,y) in iterator:
x=x.to(device)
y=y.to(device)
fx=model(x)
loss=criterion(fx,y)
acc=accu(fx,y)
epoch_loss+=loss.item()
epoch_acc+=acc.item()
return epoch_loss/len(iterator),epoch_acc/len(iterator)
```
## 开始训练
```
epochs=30
best_valid_loss=float('inf')
for epoch in range(epochs):
train_loss,train_acc=train(model,device,train_iterator,optimizer,criterion)
valid_loss,valid_acc=evaluate(model,device,valid_iterator,criterion)
if valid_loss<best_valid_loss:#如果是最好的模型就保存到文件夹
best_valid_loss=valid_loss
torch.save(model.state_dict(),model_path)
print('Epoch:{0}|Train Loss:{1}|Train Acc:{2}|Val Loss:{3}|Val Acc:{4}'.format(epoch+1,train_loss,train_acc,valid_loss,valid_acc))
model.load_state_dict(torch.load(model_path))
test_loss, test_acc = evaluate(model, device, test_iterator, criterion)
print('| Test Loss: {0} | Test Acc: {1} |'.format(test_loss,test_acc))
```
## 模型的评测
```
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc,precision_recall_curve,average_precision_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
prediction=[]
groundtruth=[]
def evaluate2(model,device,iterator,criterion):
epoch_loss=0
epoch_acc=0
model.eval()
with torch.no_grad():
for (x,y) in iterator:
x=x.to(device)
y=y.to(device)
fx=model(x)
loss=criterion(fx,y)
prediction.append(fx)
groundtruth.append(y)
acc=accu(fx,y)
epoch_loss+=loss.item()
epoch_acc+=acc.item()
return epoch_loss/len(iterator),epoch_acc/len(iterator)
model.load_state_dict(torch.load(model_path))
test_loss, test_acc = evaluate2(model, device, test_iterator, criterion)
print('| Test Loss: {0} | Test Acc: {1} |'.format(test_loss,test_acc))
pred=torch.cat(prediction).cpu().numpy()
pred.shape
pred[0]
gt=torch.cat(groundtruth).cpu().numpy()
gt.shape
fpr=dict()
tpr=dict()
prc=dict()
rcl=dict()
roc_auc=dict()
ap=dict()
gt_one_hot=np.eye(10)[gt.reshape(-1)]#groundtruth的onehot化
gt_one_hot[0]
pred_soft=F.softmax(torch.cat(prediction).cpu()).cpu().numpy()
pred_soft[0]
for i in range(10):
fpr[i],tpr[i],_=roc_curve(gt_one_hot[:,i],pred_soft[:,i])
roc_auc[i]=auc(fpr[i],tpr[i])
prc[i],rcl[i],_=precision_recall_curve(gt_one_hot[:,i],pred_soft[:,i])
ap[i]=average_precision_score(gt_one_hot[:,i],pred_soft[:,i])
tpr[0].shape
for i in range(10):
plt.plot(fpr[i],tpr[i],lw=2,label='ROC curve for class %d with Area %0.2f'%(i,roc_auc[i]))
plt.legend()
for i in range(10):
plt.plot(rcl[i],prc[i],lw=2,label='PR curve for class %d with Area %0.2f'%(i,ap[i]))
plt.legend()
```
### Micro的实现
Micro认为所有的分类1......k都是同一类,因此将所有的gt和预测的结果合并成为1个数组来进行评判
```
gt_one_hot.ravel().shape
pred_soft.ravel().shape
fpr['micro'],tpr['micro'],_=roc_curve(gt_one_hot.ravel(),pred_soft.ravel())
prc['micro'],rcl['micro'],_=precision_recall_curve(gt_one_hot.ravel(),pred_soft.ravel())
ap['micro']=average_precision_score(gt_one_hot.ravel(),pred_soft.ravel(),average='micro')
roc_auc['micro']=auc(fpr['micro'],tpr['micro'])
for i in range(10):
plt.plot(rcl[i],prc[i],lw=2,label='PR curve for class %d with Area %0.2f'%(i,ap[i]))
plt.plot(prc['micro'],rcl['micro'],lw=5,linestyle=":",label='PR Micro Curve with Area %0.2f'%(ap['micro']))
plt.legend()
for i in range(10):
plt.plot(fpr[i],tpr[i],lw=2,label='ROC curve for class %d with Area %0.2f'%(i,roc_auc[i]))
plt.plot(fpr['micro'],tpr['micro'],lw=5,linestyle=":",label='ROC Micro Curve with Area %0.2f'%(roc_auc['micro']))
plt.legend()
```
### Macro的计算
```
all_fpr=np.unique(np.concatenate([fpr[i] for i in range(10)]))#获得所有的fpr插值点
np.concatenate([fpr[i] for i in range(10)]).shape
all_fpr.shape
all_prc=np.unique(np.concatenate([prc[i] for i in range(10)]))#获得所有的prc的插值点
#interp(all_fpr,fpr[0],tpr[0]).shape
mean_tpr=np.zeros_like(all_fpr)
mean_rcl=np.zeros_like(all_prc)
mean_tpr.shape
mean_tpr
for i in range(10):
mean_tpr+=interp(all_fpr,fpr[i],tpr[i])
mean_rcl+=interp(all_prc,prc[i],rcl[i])
mean_tpr/=10
mean_rcl/=10
fpr['macro']=all_fpr
prc['macro']=all_prc
tpr['macro']=mean_tpr
rcl['macro']=mean_rcl
roc_auc['macro']=auc(fpr['macro'],tpr['macro'])
ap['macro']=average_precision_score(gt_one_hot.ravel(),pred_soft.ravel(),average='macro')
for i in range(10):
plt.plot(rcl[i],prc[i],lw=2,label='PR curve for class %d with Area %0.2f'%(i,ap[i]))
plt.plot(prc['micro'],rcl['micro'],lw=5,linestyle=":",label='PR Micro Curve with Area %0.2f'%(ap['micro']))
plt.plot(prc['macro'],rcl['macro'],lw=5,linestyle=":",label='PR Macro Curve with Area %0.2f'%(ap['macro']))
plt.legend()
for i in range(10):
plt.plot(fpr[i],tpr[i],lw=2,label='ROC curve for class %d with Area %0.2f'%(i,roc_auc[i]))
plt.plot(fpr['micro'],tpr['micro'],lw=5,linestyle=":",label='ROC Micro Curve with Area %0.2f'%(roc_auc['micro']))
plt.plot(fpr['macro'],tpr['macro'],lw=5,linestyle=":",label='ROC Macro Curve with Area %0.2f'%(roc_auc['macro']))
plt.legend()
mAP=np.asanyarray([ap[i] for i in range(10)]).mean()
mAP
```
| github_jupyter |
#Improving Computer Vision Accuracy using Convolutions
In the previous lessons you saw how to do fashion recognition using a Deep Neural Network (DNN) containing three layers -- the input layer (in the shape of the data), the output layer (in the shape of the desired output) and a hidden layer. You experimented with the impact of different sized of hidden layer, number of training epochs etc on the final accuracy.
For convenience, here's the entire code again. Run it and take a note of the test accuracy that is printed out at the end.
```
import tensorflow as tf
mnist = tf.keras.datasets.fashion_mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_images=training_images / 255.0
test_images=test_images / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=5)
test_loss = model.evaluate(test_images, test_labels)
```
Your accuracy is probably about 89% on training and 87% on validation...not bad...But how do you make that even better? One way is to use something called Convolutions. I'm not going to details on Convolutions here, but the ultimate concept is that they narrow down the content of the image to focus on specific, distinct, details.
If you've ever done image processing using a filter (like this: https://en.wikipedia.org/wiki/Kernel_(image_processing)) then convolutions will look very familiar.
In short, you take an array (usually 3x3 or 5x5) and pass it over the image. By changing the underlying pixels based on the formula within that matrix, you can do things like edge detection. So, for example, if you look at the above link, you'll see a 3x3 that is defined for edge detection where the middle cell is 8, and all of its neighbors are -1. In this case, for each pixel, you would multiply its value by 8, then subtract the value of each neighbor. Do this for every pixel, and you'll end up with a new image that has the edges enhanced.
This is perfect for computer vision, because often it's features that can get highlighted like this that distinguish one item for another, and the amount of information needed is then much less...because you'll just train on the highlighted features.
That's the concept of Convolutional Neural Networks. Add some layers to do convolution before you have the dense layers, and then the information going to the dense layers is more focussed, and possibly more accurate.
Run the below code -- this is the same neural network as earlier, but this time with Convolutional layers added first. It will take longer, but look at the impact on the accuracy:
```
import tensorflow as tf
print(tf.__version__)
mnist = tf.keras.datasets.fashion_mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_images=training_images.reshape(60000, 28, 28, 1)
training_images=training_images / 255.0
test_images = test_images.reshape(10000, 28, 28, 1)
test_images=test_images/255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.summary()
model.fit(training_images, training_labels, epochs=5)
test_loss = model.evaluate(test_images, test_labels)
```
It's likely gone up to about 93% on the training data and 91% on the validation data.
That's significant, and a step in the right direction!
Try running it for more epochs -- say about 20, and explore the results! But while the results might seem really good, the validation results may actually go down, due to something called 'overfitting' which will be discussed later.
(In a nutshell, 'overfitting' occurs when the network learns the data from the training set really well, but it's too specialised to only that data, and as a result is less effective at seeing *other* data. For example, if all your life you only saw red shoes, then when you see a red shoe you would be very good at identifying it, but blue suade shoes might confuse you...and you know you should never mess with my blue suede shoes.)
Then, look at the code again, and see, step by step how the Convolutions were built:
Step 1 is to gather the data. You'll notice that there's a bit of a change here in that the training data needed to be reshaped. That's because the first convolution expects a single tensor containing everything, so instead of 60,000 28x28x1 items in a list, we have a single 4D list that is 60,000x28x28x1, and the same for the test images. If you don't do this, you'll get an error when training as the Convolutions do not recognize the shape.
```
import tensorflow as tf
mnist = tf.keras.datasets.fashion_mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_images=training_images.reshape(60000, 28, 28, 1)
training_images=training_images / 255.0
test_images = test_images.reshape(10000, 28, 28, 1)
test_images=test_images/255.0
```
Next is to define your model. Now instead of the input layer at the top, you're going to add a Convolution. The parameters are:
1. The number of convolutions you want to generate. Purely arbitrary, but good to start with something in the order of 32
2. The size of the Convolution, in this case a 3x3 grid
3. The activation function to use -- in this case we'll use relu, which you might recall is the equivalent of returning x when x>0, else returning 0
4. In the first layer, the shape of the input data.
You'll follow the Convolution with a MaxPooling layer which is then designed to compress the image, while maintaining the content of the features that were highlighted by the convlution. By specifying (2,2) for the MaxPooling, the effect is to quarter the size of the image. Without going into too much detail here, the idea is that it creates a 2x2 array of pixels, and picks the biggest one, thus turning 4 pixels into 1. It repeats this across the image, and in so doing halves the number of horizontal, and halves the number of vertical pixels, effectively reducing the image by 25%.
You can call model.summary() to see the size and shape of the network, and you'll notice that after every MaxPooling layer, the image size is reduced in this way.
```
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
```
Add another convolution
```
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2)
```
Now flatten the output. After this you'll just have the same DNN structure as the non convolutional version
```
tf.keras.layers.Flatten(),
```
The same 128 dense layers, and 10 output layers as in the pre-convolution example:
```
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
```
Now compile the model, call the fit method to do the training, and evaluate the loss and accuracy from the test set.
```
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=5)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print(test_acc)
```
# Visualizing the Convolutions and Pooling
This code will show us the convolutions graphically. The print (test_labels[;100]) shows us the first 100 labels in the test set, and you can see that the ones at index 0, index 23 and index 28 are all the same value (9). They're all shoes. Let's take a look at the result of running the convolution on each, and you'll begin to see common features between them emerge. Now, when the DNN is training on that data, it's working with a lot less, and it's perhaps finding a commonality between shoes based on this convolution/pooling combination.
```
print(test_labels[:100])
import matplotlib.pyplot as plt
f, axarr = plt.subplots(3,4)
FIRST_IMAGE=0
SECOND_IMAGE=7
THIRD_IMAGE=26
CONVOLUTION_NUMBER = 1
from tensorflow.keras import models
layer_outputs = [layer.output for layer in model.layers]
activation_model = tf.keras.models.Model(inputs = model.input, outputs = layer_outputs)
for x in range(0,4):
f1 = activation_model.predict(test_images[FIRST_IMAGE].reshape(1, 28, 28, 1))[x]
axarr[0,x].imshow(f1[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')
axarr[0,x].grid(False)
f2 = activation_model.predict(test_images[SECOND_IMAGE].reshape(1, 28, 28, 1))[x]
axarr[1,x].imshow(f2[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')
axarr[1,x].grid(False)
f3 = activation_model.predict(test_images[THIRD_IMAGE].reshape(1, 28, 28, 1))[x]
axarr[2,x].imshow(f3[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')
axarr[2,x].grid(False)
```
EXERCISES
1. Try editing the convolutions. Change the 32s to either 16 or 64. What impact will this have on accuracy and/or training time.
2. Remove the final Convolution. What impact will this have on accuracy or training time?
3. How about adding more Convolutions? What impact do you think this will have? Experiment with it.
4. Remove all Convolutions but the first. What impact do you think this will have? Experiment with it.
5. In the previous lesson you implemented a callback to check on the loss function and to cancel training once it hit a certain amount. See if you can implement that here!
```
import tensorflow as tf
print(tf.__version__)
mnist = tf.keras.datasets.mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_images=training_images.reshape(60000, 28, 28, 1)
training_images=training_images / 255.0
test_images = test_images.reshape(10000, 28, 28, 1)
test_images=test_images/255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=10)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print(test_acc)
```
| github_jupyter |
# Sudoku
This tutorial includes everything you need to set up decision optimization engines, build constraint programming models.
Table of contents:
- [Describe the business problem](#Describe-the-business-problem)
* [How decision optimization (prescriptive analytics) can help](#How--decision-optimization-can-help)
* [Use decision optimization](#Use-decision-optimization)
* [Step 1: Model the Data](#Step-1:-Model-the-data)
* [Step 2: Set up the prescriptive model](#Step-2:-Set-up-the-prescriptive-model)
* [Define the decision variables](#Define-the-decision-variables)
* [Express the business constraints](#Express-the-business-constraints)
* [Express the objective](#Express-the-objective)
* [Solve with Decision Optimization solve service](#Solve-with-Decision-Optimization-solve-service)
* [Step 3: Investigate the solution and run an example analysis](#Step-3:-Investigate-the-solution-and-then-run-an-example-analysis)
* [Summary](#Summary)
****
### Describe the business problem
* Sudoku is a logic-based, combinatorial number-placement puzzle.
* The objective is to fill a 9x9 grid with digits so that each column, each row,
and each of the nine 3x3 sub-grids that compose the grid contains all of the digits from 1 to 9.
* The puzzle setter provides a partially completed grid, which for a well-posed puzzle has a unique solution.
#### References
* See <a href="https://en.wikipedia.org/wiki/Sudoku" target="_blank" rel="noopener noreferrer">https://en.wikipedia.org/wiki/Sudoku</a> for details.
*****
## How decision optimization can help
* Prescriptive analytics technology recommends actions based on desired outcomes, taking into account specific scenarios, resources, and knowledge of past and current events. This insight can help your organization make better decisions and have greater control of business outcomes.
* Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes.
* Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage.
<br/>
+ For example:
+ Automate complex decisions and trade-offs to better manage limited resources.
+ Take advantage of a future opportunity or mitigate a future risk.
+ Proactively update recommendations based on changing events.
+ Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes.
## Use decision optimization
### Step 1: Model the data
First import some of the packages you need to use.
```
from docplex.cp.model import *
from sys import stdout
```
#### Grid range
```
GRNG = range(9)
```
#### Different problems
A zero means that the cell is to be filled with the appropriate value.
```
SUDOKU_PROBLEM_1 = ( (0, 0, 0, 0, 9, 0, 1, 0, 0),
(2, 8, 0, 0, 0, 5, 0, 0, 0),
(7, 0, 0, 0, 0, 6, 4, 0, 0),
(8, 0, 5, 0, 0, 3, 0, 0, 6),
(0, 0, 1, 0, 0, 4, 0, 0, 0),
(0, 7, 0, 2, 0, 0, 0, 0, 0),
(3, 0, 0, 0, 0, 1, 0, 8, 0),
(0, 0, 0, 0, 0, 0, 0, 5, 0),
(0, 9, 0, 0, 0, 0, 0, 7, 0),
)
SUDOKU_PROBLEM_2 = ( (0, 7, 0, 0, 0, 0, 0, 4, 9),
(0, 0, 0, 4, 0, 0, 0, 0, 0),
(4, 0, 3, 5, 0, 7, 0, 0, 8),
(0, 0, 7, 2, 5, 0, 4, 0, 0),
(0, 0, 0, 0, 0, 0, 8, 0, 0),
(0, 0, 4, 0, 3, 0, 5, 9, 2),
(6, 1, 8, 0, 0, 0, 0, 0, 5),
(0, 9, 0, 1, 0, 0, 0, 3, 0),
(0, 0, 5, 0, 0, 0, 0, 0, 7),
)
SUDOKU_PROBLEM_3 = ( (0, 0, 0, 0, 0, 6, 0, 0, 0),
(0, 5, 9, 0, 0, 0, 0, 0, 8),
(2, 0, 0, 0, 0, 8, 0, 0, 0),
(0, 4, 5, 0, 0, 0, 0, 0, 0),
(0, 0, 3, 0, 0, 0, 0, 0, 0),
(0, 0, 6, 0, 0, 3, 0, 5, 4),
(0, 0, 0, 3, 2, 5, 0, 0, 6),
(0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0)
)
try:
import numpy as np
import matplotlib.pyplot as plt
VISU_ENABLED = True
except ImportError:
VISU_ENABLED = False
def print_grid(grid):
""" Print Sudoku grid """
for l in GRNG:
if (l > 0) and (l % 3 == 0):
stdout.write('\n')
for c in GRNG:
v = grid[l][c]
stdout.write(' ' if (c % 3 == 0) else ' ')
stdout.write(str(v) if v > 0 else '.')
stdout.write('\n')
def draw_grid(values):
%matplotlib inline
fig, ax = plt.subplots(figsize =(4,4))
min_val, max_val = 0, 9
R = range(0,9)
for l in R:
for c in R:
v = values[c][l]
s = " "
if v > 0:
s = str(v)
ax.text(l+0.5,8.5-c, s, va='center', ha='center')
ax.set_xlim(min_val, max_val)
ax.set_ylim(min_val, max_val)
ax.set_xticks(np.arange(max_val))
ax.set_yticks(np.arange(max_val))
ax.grid()
plt.show()
def display_grid(grid, name):
stdout.write(name)
stdout.write(":\n")
if VISU_ENABLED:
draw_grid(grid)
else:
print_grid(grid)
display_grid(SUDOKU_PROBLEM_1, "PROBLEM 1")
display_grid(SUDOKU_PROBLEM_2, "PROBLEM 2")
display_grid(SUDOKU_PROBLEM_3, "PROBLEM 3")
```
#### Choose your preferred problem (SUDOKU_PROBLEM_1 or SUDOKU_PROBLEM_2 or SUDOKU_PROBLEM_3)
If you change the problem, ensure that you re-run all the cells below this one.
```
problem = SUDOKU_PROBLEM_3
```
### Step 2: Set up the prescriptive model
```
mdl = CpoModel(name="Sudoku")
```
#### Define the decision variables
```
grid = [[integer_var(min=1, max=9, name="C" + str(l) + str(c)) for l in GRNG] for c in GRNG]
```
#### Express the business constraints
Add alldiff constraints for lines
```
for l in GRNG:
mdl.add(all_diff([grid[l][c] for c in GRNG]))
```
Add alldiff constraints for columns
```
for c in GRNG:
mdl.add(all_diff([grid[l][c] for l in GRNG]))
```
Add alldiff constraints for sub-squares
```
ssrng = range(0, 9, 3)
for sl in ssrng:
for sc in ssrng:
mdl.add(all_diff([grid[l][c] for l in range(sl, sl + 3) for c in range(sc, sc + 3)]))
```
Initialize known cells
```
for l in GRNG:
for c in GRNG:
v = problem[l][c]
if v > 0:
grid[l][c].set_domain((v, v))
```
#### Solve with Decision Optimization solve service
```
print("\nSolving model....")
msol = mdl.solve(TimeLimit=10)
```
### Step 4: Investigate the solution and then run an example analysis
```
display_grid(problem, "Initial problem")
if msol:
sol = [[msol[grid[l][c]] for c in GRNG] for l in GRNG]
stdout.write("Solve time: " + str(msol.get_solve_time()) + "\n")
display_grid(sol, "Solution")
else:
stdout.write("No solution found\n")
```
## Summary
You have learned how to set up and use the IBM Decision Optimization CPLEX Modeling for Python to formulate and solve a Constraint Programming model.
#### References
* <a href="https://rawgit.com/IBMDecisionOptimization/docplex-doc/master/docs/index.html" target="_blank" rel="noopener noreferrer">Decision Optimization CPLEX Modeling for Python documentation</a>
* <a href="https://dataplatform.cloud.ibm.com/docs/content/wsj/getting-started/welcome-main.html" target="_blank" rel="noopener noreferrer">Watson Studio documentation</a>
<hr>
Copyright © 2017-2021. This notebook and its source code are released under the terms of the MIT License.
<div style="background:#F5F7FA; height:110px; padding: 2em; font-size:14px;">
<span style="font-size:18px;color:#152935;">Love this notebook? </span>
<span style="font-size:15px;color:#152935;float:right;margin-right:40px;">Don't have an account yet?</span><br>
<span style="color:#5A6872;">Share it with your colleagues and help them discover the power of Watson Studio!</span>
<span style="border: 1px solid #3d70b2;padding:8px;float:right;margin-right:40px; color:#3d70b2;"><a href="https://ibm.co/wsnotebooks" target="_blank" style="color: #3d70b2;text-decoration: none;">Sign Up</a></span><br>
</div>
| github_jupyter |
## graphml2heatmap.ipynb
This notebook transforms a graphml file (of the type produced by https://github.com/SuLab/genewikiworld) and creates a heatmap visualization
```
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
G = nx.read_graphml('../data/wikidata-update-2021-09-06.graphml')
G
nx.draw(G)
# inspect nodes
for node in G.nodes(data=True):
print(node)
print(node[1]['count'])
# inspect edges
for edge in G.edges(data=True):
print(edge)
# transform graph data into data frame
numOfRows=10
data = []
for edge in G.edges(data=True):
data.append([G.nodes[edge[0]]['NodeLabel'],G.nodes[edge[1]]['NodeLabel'],edge[2]['count']])
for node in G.nodes(data=True):
data.append([node[1]['NodeLabel'],"COUNT",node[1]['count']])
df = pd.DataFrame(data,columns=["Node1","Node2","count"])
df
df[df.Node1=="gene"]
# get full list of unique node types
nodes = np.sort(pd.unique(df[['Node1', 'Node2']].values.ravel()))
nodes
# create output dataframe with edge counts
df2 = pd.DataFrame(0,nodes,nodes)
df2
# populate output data frame df2 (combine edges of with different predicates, in opposite directions)
for i,j in df.iterrows():
if( j['Node1'] > j['Node2'] ):
df2.at[j['Node1'],j['Node2']] = df2.at[j['Node1'],j['Node2']] + j['count']
else:
df2.at[j['Node2'],j['Node1']] = df2.at[j['Node2'],j['Node1']] + j['count']
# remove negative values (from when SPARQL query times out, count is set to -1)
df2[df2 < 0] = 0
df2
def pretty_round(a, digits=0):
if( str(type(a)) == "<class 'pandas.core.series.Series'>" ):
# print("A")
out = []
for i in a:
# print("B: "+str(i))
out.append(pretty_round(i))
return(out)
if(not isinstance(a,int)):
return None
i=0
while(int(a)>=1e3):
a/=1e3
i+=1
return str(int(round(a,digits)))+" kMBT"[i]
df2Label = df2.apply(pretty_round)
df2Label
# perform log10 transformation
df3 = np.log10(df2 + 1)
df3
df4 = df3
df4[df4==0] = None
df4
# create plot
sns.set_theme(style="white", font_scale=2)
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(17, 15))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(230, 20, as_cmap=True)
mask = np.zeros_like(df3)
mask[np.triu_indices_from(mask,k=1)] = True
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(df4,
cmap=cmap,
# vmax=100,
mask=mask,
annot=df2Label,
annot_kws={"size": 15.5,"fontweight":"bold"},
fmt="",
center=0,
square=True,
linewidths=2,
linecolor="white",
cbar=False,
)
# create plot
sns.set_theme(style="white", font_scale=2)
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(17, 15))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(260, 260, as_cmap=True)
mask = np.zeros_like(df3)
mask[np.triu_indices_from(mask,k=1)] = True
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(df4,
cmap=cmap,
# vmax=100,
mask=mask,
annot=df2Label,
annot_kws={"size": 15.5,"fontweight":"bold"},
fmt="",
center=0,
square=True,
linewidths=2,
linecolor="white",
cbar=False,
)
```
| github_jupyter |
### Untappd Scraper
Due to the unique requirements of scraping untappd, selenium (headless or otherwise) is our best choice.
1) Login required
2) Must select 'Show More' to see more than a handful of both search results and reviews
3) Odd design that is surprisingly difficult to use requests with
```
import os
import glob
import json
import time
import pickle
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
```
### Load config info
Username, Password, and selenium driver path
```
config_path = 'untappd.cfg'
with open(config_path) as rdr:
config = json.load(rdr)
```
### Set Variables
```
LOGIN_URL = 'https://untappd.com/login'
CHUNK_SIZE = 25 ## URLs to scraper per session.
search_pkl = 'ipa_urls.pkl' ## Only perform search once. It is picked otherwise. If this file exists, search will not be performed
```
## Log in
1) Create Browser Object
2) Find login elements
3) Fill them out, submit
```
browser = webdriver.Chrome(config['driver_path'])
browser.get(LOGIN_URL)
username = browser.find_element_by_id("username")
password = browser.find_element_by_id("password")
username.send_keys(config['username'])
password.send_keys(config['password'])
browser.find_element_by_xpath("//input[@type='submit']").click()
```
### Note:
Sometimes along the bottom, a prompt to download the app appears. It's hard to identify and click for some reason with selenium. Closing it once per session will keep it closed. Go ahead and look for that now, before continuing. If it's there, just click 'x'
### Identify IPA URLS
If the pickle file exists, load it. Otherwise, perform search.
```
def get_beers_from_search(search_term, browser):
## Create search URL and go
browser.get('https://untappd.com/search?q={}'.format(search_term.strip().replace(' ', '+')))
## Click the show more button
for i in range(25):
try:
browser.find_element_by_xpath("//*[contains(text(), 'Show More')]").click()
time.sleep(2)
browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
except:
print('Error clicking "Show More" on iteration', i)
time.sleep(5)
## Find beer links on page
results = browser.find_elements_by_css_selector('.beer-item')
urls = []
for result in results:
for url in result.find_elements_by_tag_name('a'):
if url.get_attribute('href').startswith(r'https://untappd.com/beer'):
urls.append(url.get_attribute('href'))
print(len(urls), 'beers found for search', search_term)
return urls
def write_pkl(filename, data):
with open(filename, 'wb') as f:
pickle.dump(data, f)
def read_pkl(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
if not os.path.isfile(search_pkl):
print('Pickle file not found. Performing search.')
beer_urls = set()
search_terms = ['ipa', 'dipa', 'double ipa', 'hazy ipa']
for search_term in search_terms:
urls = set(get_beers_from_search(search_term, browser))
beer_urls = beer_urls.union(urls)
print('total url count:', len(beer_urls))
## Write out our pickle file
write_pkl('ipa_urls.pkl', beer_urls)
else:
## Read pickled data
print("Reading pickled URLS")
beer_urls = read_pkl('ipa_urls.pkl')
```
### Create Function for identifying/naming files
```
def create_naming_funct(file_format):
"""file_format is intended to come in as folder/filename_{}.extension, where the {} will be replaced by a number (0000, 00001, etc)"""
file_format = file_format
def identify_checkpoint():
nonlocal file_format
existing_files = glob.glob(file_format.format('*'))
return len(existing_files), file_format.format(str(len(existing_files)).zfill(5))
return identify_checkpoint
beer_checkpointer = create_naming_funct('data/beer_info_{}.json')
review_checkpointer = create_naming_funct('data/reviews_{}.json')
user_checkpointer = create_naming_funct('data/users_{}.json')
url_checkpointer = create_naming_funct('checkpoints/run_checkpoint_{}.pkl')
```
### Once we have a set of URLS to iterate over, we can begin scraping reviews.
We'll have to load each page, push the show more button a bunch, and scrape the reviews.
```
def get_beer_info(browser, url):
classnames = 'name,brewery,style,abv,ibu,rating,raters,date'.split(',')
browser.get(url)
beer_id = url.split('/')[-1]
## Populate the beer info
beer_info = {}
beer_info['id'] = beer_id
try:
element = browser.find_element_by_class_name('beer-descrption-read-more')
if not element:
print('didnt find element. pausing')
time.sleep(10)
except:
print('Exception caught. pausing')
time.sleep(10)
## If there is no "Show More" button, catch that error
try:
browser.find_element_by_class_name('beer-descrption-read-more').find_element_by_link_text('Show More').click()
time.sleep(0.5)
except:
pass
beer_info['description'] = browser.find_element_by_class_name('beer-descrption-read-less').text[:-10]
for classname in classnames:
beer_info[classname] = browser.find_element_by_class_name(classname).text
if classname == 'name':
beer_info[classname] = browser.find_element_by_class_name(classname).find_element_by_tag_name('h1').text
return beer_info
def scrape_reviews(browser, beer_id):
## Show more reviews!
## Click the show more button
fail_count = 0
for i in range(50):
try:
browser.find_elements_by_xpath("//*[contains(text(), 'Show More')]")[1].click()
time.sleep(2)
except:
fail_count += 1
if fail_count > 2:
break
time.sleep(4)
## Get reviews
user_reviews = []
user_reviews_elems = browser.find_element_by_id('main-stream').find_elements_by_class_name('checkin')
for user_review_elem in user_reviews_elems:
rating_dict = {}
rating_dict['beer_id'] = beer_id
rating_dict['user_id'] = user_review_elem.find_element_by_class_name('user').get_attribute('href')
try:
rating = None
rating_spans = user_review_elem.find_element_by_class_name('rating-serving').find_elements_by_tag_name('span')
for span in rating_spans:
if span.get_attribute('class').startswith('rating small'):
rating = span.get_attribute('class').split(' ')[-1][1:]
except:
continue
rating_dict['comment'] = None
try:
rating_dict['comment'] = user_review_elem.find_element_by_class_name('comment-text').text
except:
pass
if rating:
if len(rating) > 1:
rating = rating[0] + '.' + rating[1:]
rating = float(rating)
rating_dict['rating'] = rating
user_reviews.append(rating_dict)
return user_reviews
## Identify checkpoints
if url_checkpointer()[0] == 0:
print("Creating first checkpoint")
url_list = list(beer_urls)
write_pkl(url_checkpointer()[1], url_list)
else:
next_checkpoint = url_checkpointer()
previous_checkpoint = next_checkpoint[1].replace(str(next_checkpoint[0]).zfill(5), str(next_checkpoint[0]-1).zfill(5))
print('Reading checkpoint file', previous_checkpoint)
url_list = read_pkl(previous_checkpoint)
for x in range(1):
reviews = []
beers = []
start = time.time()
for i in range(CHUNK_SIZE):
url = url_list.pop()
print(str(i) + ':', url, end=' ')
beer_info = get_beer_info(browser, url)
beer_reviews = scrape_reviews(browser, beer_info['id'])
reviews.extend(beer_reviews)
beers.append(beer_info)
time.sleep(20)
print('Reviews found:', len(beer_reviews), len(json.dumps(beer_reviews)), 'Total Review Count:', len(reviews))
with open(beer_checkpointer()[1], 'w') as wtr:
json.dump(beers, wtr)
with open(review_checkpointer()[1], 'w') as wtr:
json.dump(reviews, wtr)
write_pkl(url_checkpointer()[1], url_list)
print('Run', x, 'took', time.time()-start, 'seconds. pausing.')
time.sleep(240) # 1 minute pause
```
### Get user information!
Identify all user urls, and figure out what information we can scrape
```
user_urls = set([review['user_id'] for review in reviews])
len(user_urls)
user_data = []
for user_url in list(user_urls)[:100]:
browser.get(user_url)
user_info = browser.find_element_by_class_name('user-info')
user_dict = {}
user_dict['name'] = user_info.find_element_by_class_name('info').find_element_by_tag_name('h1').text
user_dict['username'] = user_info.find_element_by_class_name('username').text
user_dict['location'] = user_info.find_element_by_class_name('username').text
user_dict['location'] = None if len(user_dict['location']) == 0 else user_dict['location']
user_dict['social'] = {}
social_list = user_info.find_element_by_class_name('social').find_elements_by_tag_name('a')
for social in social_list:
user_dict['social'][social.text] = social.get_attribute('href')
user_dict['stats'] = {}
stats_list = user_info.find_element_by_class_name('stats').find_elements_by_tag_name('a')
for stat in stats_list:
user_dict['stats'][stat.find_element_by_class_name('title').text] = int(stat.find_element_by_class_name('stat').text.replace(',', ''))
user_data.append(user_dict)
with open(user_checkpointer()[1], 'w') as wtr:
json.dump(user_data, wtr)
len(user_data)
user_review_elem.text
```
| github_jupyter |
```
import numpy as np
import cPickle as pickle
import scipy
import combo
import os
import urllib
import matplotlib.pyplot as plt
%matplotlib inline
def download():
if not os.path.exists('data/s5-210.csv'):
if not os.path.exists('data'):
os.mkdir('data')
urllib.urlretrieve('http://www.tsudalab.org/files/s5-210.csv', 'data/s5-210.csv')
def load_data():
download()
A = np.asarray( np.loadtxt('data/s5-210.csv',skiprows=1,delimiter=',') )
X = A[:,0:3]
t = -A[:,3]
return X, t
# Load the data
# X is the N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
# t is the N-dimensional vector that represents the corresponding negative energy of search candidates.
# ( It is of course unknown in practice. )
X, t = load_data()
# Normalize the mean and standard deviation along the each column of X to 0 and 1, respectively
X = combo.misc.centering( X )
# Declare the class for calling the simulator.
# In this tutorial, we simply refer to the value of t.
# If you want to apply combo to other problems, you have to customize this class.
class simulator:
def __init__( self ):
_, self.t = load_data()
def __call__( self, action ):
return self.t[action]
# Design of policy
# Declaring the policy by
policy = combo.search.discrete.policy(test_X=X)
# test_X is the set of candidates which is represented by numpy.array.
# Each row vector represents the feature vector of the corresponding candidate
# set the seed parameter
policy.set_seed( 0 )
# If you want to perform the initial random search before starting the Bayesian optimization,
# the random sampling is performed by
res = policy.random_search(max_num_probes=2, num_search_each_probe=10, simulator=simulator())
# Input:
# max_num_probes: number of random search
# num_search_each_probe: number of probes
# simulator: simulator which is defined above
# output: combo.search.discreate.results.history (class)
# multiple probe Bayesian search
# The single query version of COMBO is performed by
res = policy.bayes_search(max_num_probes=8, num_search_each_probe=10, simulator=simulator(), score='EI',
interval=2, num_rand_basis=0)
# Input
# max_num_probes: number of searching by Bayesian optimization
# num_search_each_probe: number of probes
# simulator: the class of simulator which is defined above
# score: the type of aquision funciton. TS, EI and PI are available
# interval: the timing for learning the hyper parameter.
# In this case, the hyper parameter is learned at each 20 steps
# If you set the negative value to interval, the hyper parameter learning is not performed
# If you set zero to interval, the hyper parameter learning is performed only at the first step
# num_rand_basis: the number of basis function. If you choose 0, ordinary Gaussian process runs
# The result of searching is summarized in the class combo.search.discrete.results.history()
# res.fx: observed negative energy at each step
# res.chosed_actions: history of choosed actions
# fbest, best_action= res.export_sequence_best_fx(): current best fx and current best action
# that has been observed until each probe
# res.num_runs: number of probes
# res.total_num_search: total number of actions
print 'f(x)='
print res.fx[0:res.total_num_search]
best_fx, best_action = res.export_sequence_best_fx()
print 'current best at each probe'
print best_fx
print 'current best action at each probe='
print best_action
print 'history of chosed actions='
print res.chosed_actions[0:res.total_num_search]
```
| github_jupyter |
# Strategy analysis example
Debugging a strategy can be time-consuming. Freqtrade offers helper functions to visualize raw data.
The following assumes you work with SampleStrategy, data for 5m timeframe from Binance and have downloaded them into the data directory in the default location.
## Setup
```
from pathlib import Path
from freqtrade.configuration import Configuration
# Customize these according to your needs.
# Initialize empty configuration object
config = Configuration.from_files([])
# Optionally, use existing configuration file
# config = Configuration.from_files(["config.json"])
# Define some constants
config["timeframe"] = "5m"
# Name of the strategy class
config["strategy"] = "SampleStrategy"
# Location of the data
data_location = Path(config['user_data_dir'], 'data', 'binance')
# Pair to analyze - Only use one pair here
pair = "BTC/USDT"
# Load data using values set above
from freqtrade.data.history import load_pair_history
candles = load_pair_history(datadir=data_location,
timeframe=config["timeframe"],
pair=pair,
data_format = "hdf5",
)
# Confirm success
print("Loaded " + str(len(candles)) + f" rows of data for {pair} from {data_location}")
candles.head()
```
## Load and run strategy
* Rerun each time the strategy file is changed
```
# Load strategy using values set above
from freqtrade.resolvers import StrategyResolver
strategy = StrategyResolver.load_strategy(config)
# Generate buy/sell signals using strategy
df = strategy.analyze_ticker(candles, {'pair': pair})
df.tail()
```
### Display the trade details
* Note that using `data.head()` would also work, however most indicators have some "startup" data at the top of the dataframe.
* Some possible problems
* Columns with NaN values at the end of the dataframe
* Columns used in `crossed*()` functions with completely different units
* Comparison with full backtest
* having 200 buy signals as output for one pair from `analyze_ticker()` does not necessarily mean that 200 trades will be made during backtesting.
* Assuming you use only one condition such as, `df['rsi'] < 30` as buy condition, this will generate multiple "buy" signals for each pair in sequence (until rsi returns > 29). The bot will only buy on the first of these signals (and also only if a trade-slot ("max_open_trades") is still available), or on one of the middle signals, as soon as a "slot" becomes available.
```
# Report results
print(f"Generated {df['buy'].sum()} buy signals")
data = df.set_index('date', drop=False)
data.tail()
```
## Load existing objects into a Jupyter notebook
The following cells assume that you have already generated data using the cli.
They will allow you to drill deeper into your results, and perform analysis which otherwise would make the output very difficult to digest due to information overload.
### Load backtest results to pandas dataframe
Analyze a trades dataframe (also used below for plotting)
```
from freqtrade.data.btanalysis import load_backtest_data, load_backtest_stats
# if backtest_dir points to a directory, it'll automatically load the last backtest file.
backtest_dir = config["user_data_dir"] / "backtest_results"
# backtest_dir can also point to a specific file
# backtest_dir = config["user_data_dir"] / "backtest_results/backtest-result-2020-07-01_20-04-22.json"
# You can get the full backtest statistics by using the following command.
# This contains all information used to generate the backtest result.
stats = load_backtest_stats(backtest_dir)
strategy = 'SampleStrategy'
# All statistics are available per strategy, so if `--strategy-list` was used during backtest, this will be reflected here as well.
# Example usages:
print(stats['strategy'][strategy]['results_per_pair'])
# Get pairlist used for this backtest
print(stats['strategy'][strategy]['pairlist'])
# Get market change (average change of all pairs from start to end of the backtest period)
print(stats['strategy'][strategy]['market_change'])
# Maximum drawdown ()
print(stats['strategy'][strategy]['max_drawdown'])
# Maximum drawdown start and end
print(stats['strategy'][strategy]['drawdown_start'])
print(stats['strategy'][strategy]['drawdown_end'])
# Get strategy comparison (only relevant if multiple strategies were compared)
print(stats['strategy_comparison'])
# Load backtested trades as dataframe
trades = load_backtest_data(backtest_dir)
# Show value-counts per pair
trades.groupby("pair")["sell_reason"].value_counts()
```
### Load live trading results into a pandas dataframe
In case you did already some trading and want to analyze your performance
```
from freqtrade.data.btanalysis import load_trades_from_db
# Fetch trades from database
trades = load_trades_from_db("sqlite:///tradesv3.sqlite")
# Display results
trades.groupby("pair")["sell_reason"].value_counts()
```
## Analyze the loaded trades for trade parallelism
This can be useful to find the best `max_open_trades` parameter, when used with backtesting in conjunction with `--disable-max-market-positions`.
`analyze_trade_parallelism()` returns a timeseries dataframe with an "open_trades" column, specifying the number of open trades for each candle.
```
from freqtrade.data.btanalysis import analyze_trade_parallelism
# Analyze the above
parallel_trades = analyze_trade_parallelism(trades, '5m')
parallel_trades.plot()
```
## Plot results
Freqtrade offers interactive plotting capabilities based on plotly.
```
from freqtrade.plot.plotting import generate_candlestick_graph
# Limit graph period to keep plotly quick and reactive
# Filter trades to one pair
trades_red = trades.loc[trades['pair'] == pair]
data_red = data['2019-06-01':'2019-06-10']
# Generate candlestick graph
graph = generate_candlestick_graph(pair=pair,
data=data_red,
trades=trades_red,
indicators1=['sma20', 'ema50', 'ema55'],
indicators2=['rsi', 'macd', 'macdsignal', 'macdhist']
)
# Show graph inline
# graph.show()
# Render graph in a seperate window
graph.show(renderer="browser")
```
## Plot average profit per trade as distribution graph
```
import plotly.figure_factory as ff
hist_data = [trades.profit_ratio]
group_labels = ['profit_ratio'] # name of the dataset
fig = ff.create_distplot(hist_data, group_labels,bin_size=0.01)
fig.show()
```
Feel free to submit an issue or Pull Request enhancing this document if you would like to share ideas on how to best analyze the data.
| github_jupyter |
```
import gc
gc.collect()
import sys
sys.path.insert(0, '../')
import logging
logging.basicConfig(level=logging.ERROR)
from datetime import datetime, timedelta
from cryptotrader.exchange_api.poloniex import Poloniex
from cryptotrader.envs.trading import BacktestDataFeed, BacktestEnvironment
from cryptotrader.envs.utils import make_balance, convert_to
from cryptotrader.agents import apriori
from cryptotrader.utils import array_normalize, simplex_proj
from bokeh.io import output_notebook
output_notebook()
%matplotlib inline
# %load_ext line_profiler
# Simulation Params
test_name = 'Momentum_agent'
obs_steps = 200 # Observation steps, number of candles required by the agent for calculations
period = 120 # Observation period in minutes, also trading frequency
pairs = ["USDT_BTC", "USDT_ETH", "USDT_LTC", "USDT_XRP", "USDT_XMR", "USDT_ETC", "USDT_ZEC", "USDT_DASH"] # Universe, some survivor bias here...
fiat_symbol = 'USDT' # Quote symbol
init_funds = make_balance(crypto=1 / len(pairs), fiat=0.0, pairs=pairs) # Initial equally distributed portfolio
data_dir = './data' # Data directory for offline testing
## Environment setup
# Data feed setup
tapi = Poloniex()
tapi = BacktestDataFeed(tapi, period, pairs=pairs, balance=init_funds, load_dir=data_dir)
# Download new data from the exchange
# tapi.download_data(end=datetime.timestamp(datetime.utcnow() - timedelta(days=100)),
# start=datetime.timestamp(datetime.utcnow() - timedelta(days=300)))
# # And save it to disk, if you want to
# tapi.save_data(data_dir + '/train')
# Or load data from disk
tapi.load_data('/train')
# Environment setup
env = BacktestEnvironment(period, obs_steps, tapi, fiat_symbol, test_name)
obs = env.reset()
# Agent setup
agent = apriori.MomentumTrader(ma_span=[3,50], std_span=50, activation=simplex_proj)
# Training run
# Optimization params
nb_steps = 1000
batch_size = 1
nb_max_episode_steps = 12
# Params search space
hp = {
'ma1': [2, env.obs_steps],
'ma2': [2, env.obs_steps],
'std_span': [2, env.obs_steps],
'alpha_up': [1e-1, 1],
'alpha_down': [1e-1, 1]
}
search_space = {'mean_type':{'simple': hp,
'exp': hp,
'kama': hp
}
}
# Params constrains
constrains = [lambda mean_type, ma1, ma2, std_span, alpha_up, alpha_down: ma1 < ma2]
# Optimization session, this may take some time
opt_params, info = agent.fit(env, nb_steps, batch_size, search_space, constrains, nb_max_episode_steps=nb_max_episode_steps)
print("\n", opt_params,"\n", env.status)
# Saved previous found params for comparison...
# {'mean_type': 'kama', 'alpha_down': 0.4128425807825884, 'alpha_up': 0.37140222586733046, 'ma1': 133, 'ma2': 234, 'std_span': 39}
# Run on training data
agent.test(env, verbose=True)
# Display results
env.plot_results();
# Validation run
# Download data
# tapi.download_data(end=datetime.timestamp(datetime.now() - timedelta(days=50)),
# start=datetime.timestamp(datetime.now() - timedelta(days=100)))
# And save it to disk, if you want to
# tapi.save_data(data_dir + '/eval')
# or load from disk
env.tapi.load_data('/eval')
# Run evaluation
agent.test(env, verbose=True)
# Show results
env.plot_results();
# Test run
# Download data
# tapi.download_data(end=datetime.timestamp(datetime.now()),
# start=datetime.timestamp(datetime.now() - timedelta(days=50)))
# And save it to disk, if you want to
# tapi.save_data(data_dir + '/test')
# Or load form disk
env.tapi.load_data('/test')
# Run test
agent.test(env, verbose=True)
# Show results
env.plot_results();
```
| github_jupyter |
___
<a href='http://www.pieriandata.com'> <img src='../../Pierian_Data_Logo.png' /></a>
___
# SF Salaries Exercise - Solutions
Welcome to a quick exercise for you to practice your pandas skills! We will be using the [SF Salaries Dataset](https://www.kaggle.com/kaggle/sf-salaries) from Kaggle! Just follow along and complete the tasks outlined in bold below. The tasks will get harder and harder as you go along.
** Import pandas as pd.**
```
import pandas as pd
```
** Read Salaries.csv as a dataframe called sal.**
```
sal = pd.read_csv('Salaries.csv')
```
** Check the head of the DataFrame. **
```
sal.head()
```
** Use the .info() method to find out how many entries there are.**
```
sal.info() # 148654 Entries
```
**What is the average BasePay ?**
```
sal['BasePay'].mean()
```
** What is the highest amount of OvertimePay in the dataset ? **
```
sal['OvertimePay'].max()
```
** What is the job title of JOSEPH DRISCOLL ? Note: Use all caps, otherwise you may get an answer that doesn't match up (there is also a lowercase Joseph Driscoll). **
```
sal[sal['EmployeeName']=='JOSEPH DRISCOLL']['JobTitle']
```
** How much does JOSEPH DRISCOLL make (including benefits)? **
```
sal[sal['EmployeeName']=='JOSEPH DRISCOLL']['TotalPayBenefits']
```
** What is the name of highest paid person (including benefits)?**
```
sal[sal['TotalPayBenefits']== sal['TotalPayBenefits'].max()] #['EmployeeName']
# or
# sal.loc[sal['TotalPayBenefits'].idxmax()]
```
** What is the name of lowest paid person (including benefits)? Do you notice something strange about how much he or she is paid?**
```
sal[sal['TotalPayBenefits']== sal['TotalPayBenefits'].min()] #['EmployeeName']
# or
# sal.loc[sal['TotalPayBenefits'].idxmax()]['EmployeeName']
## ITS NEGATIVE!! VERY STRANGE
```
** What was the average (mean) BasePay of all employees per year? (2011-2014) ? **
```
sal.groupby('Year').mean()['BasePay']
```
** How many unique job titles are there? **
```
sal['JobTitle'].nunique()
```
** What are the top 5 most common jobs? **
```
sal['JobTitle'].value_counts().head(5)
```
** How many Job Titles were represented by only one person in 2013? (e.g. Job Titles with only one occurence in 2013?) **
```
sum(sal[sal['Year']==2013]['JobTitle'].value_counts() == 1) # pretty tricky way to do this...
```
** How many people have the word Chief in their job title? (This is pretty tricky) **
```
def chief_string(title):
if 'chief' in title.lower():
return True
else:
return False
sum(sal['JobTitle'].apply(lambda x: chief_string(x)))
```
** Bonus: Is there a correlation between length of the Job Title string and Salary? **
```
sal['title_len'] = sal['JobTitle'].apply(len)
sal[['title_len','TotalPayBenefits']].corr() # No correlation.
```
# Great Job!
| github_jupyter |
```
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import requests
from tensorflow.python.framework import ops
ops.reset_default_graph()
sess = tf.Session()
housing_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data'
housing_header = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
cols_used = ['CRIM', 'INDUS', 'NOX', 'RM', 'AGE', 'DIS', 'TAX', 'PTRATIO', 'B', 'LSTAT']
num_features = len(cols_used)
housing_file = requests.get(housing_url)
housing_data = [[float(x) for x in y.split(' ') if len(x)>=1] for y in housing_file.text.split('\n') if len(y)>=1]
y_vals = np.transpose([np.array([y[13] for y in housing_data])])
x_vals = np.array([[x for i,x in enumerate(y) if housing_header[i] in cols_used] for y in housing_data])
x_vals = (x_vals - x_vals.min(0)) / x_vals.ptp(0)
train_indices = np.random.choice(len(x_vals), round(len(x_vals)*0.8), replace=False)
test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
x_vals_train = x_vals[train_indices]
x_vals_test = x_vals[test_indices]
y_vals_train = y_vals[train_indices]
y_vals_test = y_vals[test_indices]
k = 4
batch_size=len(x_vals_test)
x_data_train = tf.placeholder(shape=[None, num_features], dtype=tf.float32)
x_data_test = tf.placeholder(shape=[None, num_features], dtype=tf.float32)
y_target_train = tf.placeholder(shape=[None, 1], dtype=tf.float32)
y_target_test = tf.placeholder(shape=[None, 1], dtype=tf.float32)
distance = tf.reduce_sum(tf.abs(tf.subtract(x_data_train, tf.expand_dims(x_data_test,1))), reduction_indices=2)
top_k_xvals, top_k_indices = tf.nn.top_k(tf.negative(distance), k=k)
x_sums = tf.expand_dims(tf.reduce_sum(top_k_xvals, 1),1)
x_sums_repeated = tf.matmul(x_sums,tf.ones([1, k], tf.float32))
x_val_weights = tf.expand_dims(tf.div(top_k_xvals,x_sums_repeated), 1)
top_k_yvals = tf.gather(y_target_train, top_k_indices)
prediction = tf.squeeze(tf.matmul(x_val_weights,top_k_yvals), squeeze_dims=[1])
mse = tf.div(tf.reduce_sum(tf.square(tf.subtract(prediction, y_target_test))), batch_size)
num_loops = int(np.ceil(len(x_vals_test)/batch_size))
for i in range(num_loops):
min_index = i*batch_size
max_index = min((i+1)*batch_size,len(x_vals_train))
x_batch = x_vals_test[min_index:max_index]
y_batch = y_vals_test[min_index:max_index]
predictions = sess.run(prediction, feed_dict={x_data_train: x_vals_train, x_data_test: x_batch,
y_target_train: y_vals_train, y_target_test: y_batch})
batch_mse = sess.run(mse, feed_dict={x_data_train: x_vals_train, x_data_test: x_batch,
y_target_train: y_vals_train, y_target_test: y_batch})
print('Batch #' + str(i+1) + ' MSE: ' + str(np.round(batch_mse,3)))
bins = np.linspace(5, 50, 45)
plt.hist(predictions, bins, alpha=0.5, label='Prediction')
plt.hist(y_batch, bins, alpha=0.5, label='Actual')
plt.title('Histogram of Predicted and Actual Values')
plt.xlabel('Med Home Value in $1,000s')
plt.ylabel('Frequency')
plt.legend(loc='upper right')
plt.show()
```
| github_jupyter |
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# Converting *Exact* ADM Initial Data in the Spherical or Cartesian Basis to BSSN Initial Data in the Desired Curvilinear Basis
## Author: Zach Etienne
### Formatting improvements courtesy Brandon Clark
[comment]: <> (Abstract: TODO)
### This module is meant for use only with initial data that can be represented exactly in ADM form, either in the Spherical or Cartesian basis. I.e., the ADM variables are given $\left\{\gamma_{ij}, K_{ij}, \alpha, \beta^i\right\}$ *exactly* as functions of $(r,\theta,\phi)$ or $(x,y,z)$, respectively. If instead the initial data are given only numerically (e.g., through an initial data solver), then [the Numerical-ADM-Spherical/Cartesian-to-BSSNCurvilinear module](Tutorial-ADM_Initial_Data-Converting_Numerical_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb) will need to be used instead.
**Notebook Status:** <font color='orange'><b> Self-Validated </b></font>
**Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](#code_validation). **Additional validation tests may have been performed, but are as yet, undocumented. (TODO)**
### NRPy+ Source Code for this module: [BSSN/ADM_Exact_Spherical_or_Cartesian_to_BSSNCurvilinear.py](../edit/BSSN/ADM_Exact_Spherical_or_Cartesian_to_BSSNCurvilinear.py)
## Introduction:
Given the ADM variables:
$$\left\{\gamma_{ij}, K_{ij}, \alpha, \beta^i\right\}$$
in the Spherical or Cartesian basis, and as functions of $(r,\theta,\phi)$ or $(x,y,z)$, respectively, this module documents their conversion to the BSSN variables
$$\left\{\bar{\gamma}_{i j},\bar{A}_{i j},\phi, K, \bar{\Lambda}^{i}, \alpha, \beta^i, B^i\right\},$$
in the desired curvilinear basis (given by `reference_metric::CoordSystem`). Then it rescales the resulting BSSNCurvilinear variables (as defined in [the covariant BSSN formulation tutorial](Tutorial-BSSN_formulation.ipynb)) into the form needed for solving Einstein's equations with the BSSN formulation:
$$\left\{h_{i j},a_{i j},\phi, K, \lambda^{i}, \alpha, \mathcal{V}^i, \mathcal{B}^i\right\}.$$
We will use as our core example in this module UIUC initial data, which are ([as documented in their NRPy+ initial data module](Tutorial-ADM_Initial_Data-UIUC_BlackHole.ipynb)) given in terms of ADM variables in Spherical coordinates.
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
This notebook is organized as follows
1. [Step 1](#initializenrpy): Initialize core Python/NRPy+ modules
1. [Step 2](#cylindrical): Desired output BSSN Curvilinear coordinate system set to Cylindrical, as a proof-of-principle
1. [Step 3](#admxx0xx1xx2): Converting ADM variables to functions of (`xx0,xx1,xx2`)
1. [Step 4](#adm_jacobian): Applying Jacobian transformations to get in the correct `xx0,xx1,xx2` basis
1. [Step 5](#adm2bssn): Call functions within [`BSSN.BSSN_in_terms_of_ADM`](../edit/BSSN/BSSN_in_terms_of_ADM.py) ([**tutorial**](Tutorial-BSSN_in_terms_of_ADM.ipynb)) to perform the ADM-to-BSSN conversion
1. [Step 6](#code_validation): Code Validation against `BSSN.ADM_Exact_Spherical_or_Cartesian_to_BSSNCurvilinear` NRPy+ module
1. [Step 7](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='initializenrpy'></a>
# Step 1: Initialize core Python/NRPy+ modules \[Back to [top](#toc)\]
$$\label{initializenrpy}$$
```
# Step P1: Import needed NRPy+ core modules:
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import reference_metric as rfm # NRPy+: Reference metric support
import NRPy_param_funcs as par # NRPy+: Parameter interface
import sys # Standard Python module for multiplatform OS-level functions
```
<a id='cylindrical'></a>
# Step 2: Desired output BSSN Curvilinear coordinate system set to Cylindrical, as a proof-of-principle \[Back to [top](#toc)\]
$$\label{cylindrical}$$
```
# The ADM & BSSN formalisms only work in 3D; they are 3+1 decompositions of Einstein's equations.
# To implement axisymmetry or spherical symmetry, simply set all spatial derivatives in
# the relevant angular directions to zero; DO NOT SET DIM TO ANYTHING BUT 3.
# Step P1: Set spatial dimension (must be 3 for BSSN)
DIM = 3
# Set the desired *output* coordinate system to Cylindrical:
par.set_parval_from_str("reference_metric::CoordSystem","Cylindrical")
rfm.reference_metric()
# Import UIUC Black Hole initial data
import BSSN.UIUCBlackHole as uibh
uibh.UIUCBlackHole(ComputeADMGlobalsOnly=True)
Sph_r_th_ph_or_Cart_xyz = [uibh.r,uibh.th,uibh.ph]
alphaSphorCart = uibh.alphaSph
betaSphorCartU = uibh.betaSphU
BSphorCartU = uibh.BSphU
gammaSphorCartDD = uibh.gammaSphDD
KSphorCartDD = uibh.KSphDD
```
<a id='admxx0xx1xx2'></a>
# Step 3: Converting ADM variables to functions of ${\rm xx0},{\rm xx1},{\rm xx2}$ \[Back to [top](#toc)\]
$$\label{admxx0xx1xx2}$$
ADM variables are given as functions of $(r,\theta,\phi)$ or $(x,y,z)$. We convert them to functions of `(xx0,xx1,xx2)` using SymPy's `subs()` function.
```
# Step 3: All input quantities are in terms of r,th,ph or x,y,z. We want them in terms
# of xx0,xx1,xx2, so here we call sympify_integers__replace_rthph() to replace
# r,th,ph or x,y,z, respectively, with the appropriate functions of xx0,xx1,xx2
# as defined for this particular reference metric in reference_metric.py's
# xxSph[] or xx_to_Cart[], respectively:
# UIUC Black Hole initial data are given in Spherical coordinates.
CoordType_in = "Spherical"
# Make sure that rfm.reference_metric() has been called.
# We'll need the variables it defines throughout this module.
if rfm.have_already_called_reference_metric_function == False:
print("Error. Called Convert_Spherical_ADM_to_BSSN_curvilinear() without")
print(" first setting up reference metric, by calling rfm.reference_metric().")
sys.exit(1)
# Note that substitution only works when the variable is not an integer. Hence the
# if isinstance(...,...) stuff:
def sympify_integers__replace_rthph_or_Cartxyz(obj, rthph_or_xyz, rthph_or_xyz_of_xx):
if isinstance(obj, int):
return sp.sympify(obj)
else:
return obj.subs(rthph_or_xyz[0], rthph_or_xyz_of_xx[0]).\
subs(rthph_or_xyz[1], rthph_or_xyz_of_xx[1]).\
subs(rthph_or_xyz[2], rthph_or_xyz_of_xx[2])
r_th_ph_or_Cart_xyz_of_xx = []
if CoordType_in == "Spherical":
r_th_ph_or_Cart_xyz_of_xx = rfm.xxSph
elif CoordType_in == "Cartesian":
r_th_ph_or_Cart_xyz_of_xx = rfm.xx_to_Cart
else:
print("Error: Can only convert ADM Cartesian or Spherical initial data to BSSN Curvilinear coords.")
sys.exit(1)
alphaSphorCart = sympify_integers__replace_rthph_or_Cartxyz(
alphaSphorCart, Sph_r_th_ph_or_Cart_xyz, r_th_ph_or_Cart_xyz_of_xx)
for i in range(DIM):
betaSphorCartU[i] = sympify_integers__replace_rthph_or_Cartxyz(
betaSphorCartU[i], Sph_r_th_ph_or_Cart_xyz, r_th_ph_or_Cart_xyz_of_xx)
BSphorCartU[i] = sympify_integers__replace_rthph_or_Cartxyz(
BSphorCartU[i], Sph_r_th_ph_or_Cart_xyz, r_th_ph_or_Cart_xyz_of_xx)
for j in range(DIM):
gammaSphorCartDD[i][j] = sympify_integers__replace_rthph_or_Cartxyz(
gammaSphorCartDD[i][j], Sph_r_th_ph_or_Cart_xyz, r_th_ph_or_Cart_xyz_of_xx)
KSphorCartDD[i][j] = sympify_integers__replace_rthph_or_Cartxyz(
KSphorCartDD[i][j], Sph_r_th_ph_or_Cart_xyz, r_th_ph_or_Cart_xyz_of_xx)
```
<a id='adm_jacobian'></a>
# Step 4: Applying Jacobian transformations to get in the correct `xx0,xx1,xx2` basis \[Back to [top](#toc)\]
$$\label{adm_jacobian}$$
All ADM initial data quantities are now functions of `xx0,xx1,xx2`, but they are still in the Spherical or Cartesian basis. We can now directly apply Jacobian transformations to get them in the correct `xx0,xx1,xx2` basis. The following discussion holds for either Spherical or Cartesian input data, so for simplicity let's just assume the data are given in Spherical coordinates.
All ADM tensors and vectors are in the Spherical coordinate basis $x^i_{\rm Sph} = (r,\theta,\phi)$, but we need them in the curvilinear coordinate basis $x^i_{\rm rfm}=$`(xx0,xx1,xx2)` set by the `"reference_metric::CoordSystem"` variable. Empirically speaking, it is far easier to write `(x(xx0,xx1,xx2),y(xx0,xx1, xx2),z(xx0,xx1,xx2))` than the inverse, so we will compute the Jacobian matrix
$$
{\rm Jac\_dUSph\_dDrfmUD[i][j]} = \frac{\partial x^i_{\rm Sph}}{\partial x^j_{\rm rfm}},
$$
via exact differentiation (courtesy SymPy), and the inverse Jacobian
$$
{\rm Jac\_dUrfm\_dDSphUD[i][j]} = \frac{\partial x^i_{\rm rfm}}{\partial x^j_{\rm Sph}},
$$
using NRPy+'s `generic_matrix_inverter3x3()` function. In terms of these, the transformation of BSSN tensors from Spherical to `"reference_metric::CoordSystem"` coordinates may be written:
\begin{align}
\beta^i_{\rm rfm} &= \frac{\partial x^i_{\rm rfm}}{\partial x^\ell_{\rm Sph}} \beta^\ell_{\rm Sph}\\
B^i_{\rm rfm} &= \frac{\partial x^i_{\rm rfm}}{\partial x^\ell_{\rm Sph}} B^\ell_{\rm Sph}\\
\gamma^{\rm rfm}_{ij} &=
\frac{\partial x^\ell_{\rm Sph}}{\partial x^i_{\rm rfm}}
\frac{\partial x^m_{\rm Sph}}{\partial x^j_{\rm rfm}} \gamma^{\rm Sph}_{\ell m}\\
K^{\rm rfm}_{ij} &=
\frac{\partial x^\ell_{\rm Sph}}{\partial x^i_{\rm rfm}}
\frac{\partial x^m_{\rm Sph}}{\partial x^j_{\rm rfm}} K^{\rm Sph}_{\ell m}
\end{align}
```
# Step 2: All ADM initial data quantities are now functions of xx0,xx1,xx2, but
# they are still in the Spherical or Cartesian basis. We can now directly apply
# Jacobian transformations to get them in the correct xx0,xx1,xx2 basis:
# alpha is a scalar, so no Jacobian transformation is necessary.
alpha = alphaSphorCart
Jac_dUSphorCart_dDrfmUD = ixp.zerorank2()
for i in range(DIM):
for j in range(DIM):
Jac_dUSphorCart_dDrfmUD[i][j] = sp.simplify(sp.diff(r_th_ph_or_Cart_xyz_of_xx[i],rfm.xx[j]))
Jac_dUrfm_dDSphorCartUD, dummyDET = ixp.generic_matrix_inverter3x3(Jac_dUSphorCart_dDrfmUD)
betaU = ixp.zerorank1()
BU = ixp.zerorank1()
gammaDD = ixp.zerorank2()
KDD = ixp.zerorank2()
for i in range(DIM):
for j in range(DIM):
betaU[i] += Jac_dUrfm_dDSphorCartUD[i][j] * betaSphorCartU[j]
BU[i] += Jac_dUrfm_dDSphorCartUD[i][j] * BSphorCartU[j]
for k in range(DIM):
for l in range(DIM):
gammaDD[i][j] += Jac_dUSphorCart_dDrfmUD[k][i]*Jac_dUSphorCart_dDrfmUD[l][j] * gammaSphorCartDD[k][l]
KDD[i][j] += Jac_dUSphorCart_dDrfmUD[k][i]*Jac_dUSphorCart_dDrfmUD[l][j] * KSphorCartDD[k][l]
```
<a id='adm2bssn'></a>
# Step 5: Call functions within [`BSSN.BSSN_in_terms_of_ADM`](../edit/BSSN/BSSN_in_terms_of_ADM.py) ([**tutorial**](Tutorial-BSSN_in_terms_of_ADM.ipynb)) to perform the ADM-to-BSSN conversion \[Back to [top](#toc)\]
$$\label{adm2bssn}$$
All ADM quantities were input into this function in the Spherical or Cartesian basis, as functions of $r,\theta,\phi$ or $x,y,z$, respectively. In [Step 3](#admxx0xx1xx2) and [Step 4](#adm_jacobian) above, we converted them to the `xx0,xx1,xx2` basis, and as functions of `xx0,xx1,xx2`. Here we convert ADM 3-metric, extrinsic curvature, and gauge quantities in the `xx0,xx1,xx2` (a.k.a. "rfm") basis to their BSSN Curvilinear counterparts, in the same basis.
```
import BSSN.BSSN_in_terms_of_ADM as BitoA
BitoA.gammabarDD_hDD( gammaDD)
BitoA.trK_AbarDD_aDD( gammaDD,KDD)
BitoA.LambdabarU_lambdaU__exact_gammaDD(gammaDD)
BitoA.cf_from_gammaDD( gammaDD)
BitoA.betU_vetU( betaU,BU)
hDD = BitoA.hDD
trK = BitoA.trK
aDD = BitoA.aDD
lambdaU = BitoA.lambdaU
cf = BitoA.cf
vetU = BitoA.vetU
betU = BitoA.betU
```
<a id='code_validation'></a>
# Step 6: Code Validation against `BSSN.ADM_Exact_Spherical_or_Cartesian_to_BSSNCurvilinear` module \[Back to [top](#toc)\]
$$\label{code_validation}$$
Here, as a code validation check, we verify agreement in the SymPy expressions for BrillLindquist initial data between
1. this tutorial and
2. the NRPy+ [BSSN.ADM_Exact_Spherical_or_Cartesian_to_BSSNCurvilinear](../edit/BSSN/ADM_Exact_Spherical_or_Cartesian_to_BSSNCurvilinear.py) module.
By default, we analyze these expressions in Spherical coordinates, though other coordinate systems may be chosen.
```
import BSSN.UIUCBlackHole as uibh
import BSSN.ADM_Exact_Spherical_or_Cartesian_to_BSSNCurvilinear as ADMtoBSSN
uibh.UIUCBlackHole()
mod_cf,mod_hDD,mod_lambdaU,mod_aDD,mod_trK,mod_alpha,mod_vetU,mod_betU = \
ADMtoBSSN.Convert_Spherical_or_Cartesian_ADM_to_BSSN_curvilinear("Spherical",uibh.Sph_r_th_ph,
uibh.gammaSphDD, uibh.KSphDD, uibh.alphaSph, uibh.betaSphU, uibh.BSphU)
print("Consistency check between this tutorial notebook and BSSN.ADM_Exact_Spherical_or_Cartesian_to_BSSNCurvilinear NRPy+ module: ALL SHOULD BE ZERO.")
print("cf - mod_cf = " + str(cf - mod_cf))
print("trK - mod_trK = " + str(trK - mod_trK))
print("alpha - mod_alpha = " + str(alpha - mod_alpha))
for i in range(DIM):
print("vetU["+str(i)+"] - mod_vetU["+str(i)+"] = " + str(vetU[i] - mod_vetU[i]))
print("betU["+str(i)+"] - mod_betU["+str(i)+"] = " + str(betU[i] - mod_betU[i]))
print("lambdaU["+str(i)+"] - mod_lambdaU["+str(i)+"] = " + str(lambdaU[i] - mod_lambdaU[i]))
for j in range(DIM):
print("hDD["+str(i)+"]["+str(j)+"] - mod_hDD["+str(i)+"]["+str(j)+"] = "
+ str(hDD[i][j] - mod_hDD[i][j]))
print("aDD["+str(i)+"]["+str(j)+"] - mod_aDD["+str(i)+"]["+str(j)+"] = "
+ str(aDD[i][j] - mod_aDD[i][j]))
# If you wish to generate & analyze C code output, uncomment the following:
# import os, shutil # Standard Python modules for multiplatform OS-level functions
# import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
# # Step P2: Create C code output directory:
# Ccodesdir = os.path.join("BSSN_Exact_ADM_validation/")
# # First remove C code output directory if it exists
# # Courtesy https://stackoverflow.com/questions/303200/how-do-i-remove-delete-a-folder-that-is-not-empty
# # !rm -r ScalarWaveCurvilinear_Playground_Ccodes
# shutil.rmtree(Ccodesdir, ignore_errors=True)
# # Then create a fresh directory
# cmd.mkdir(Ccodesdir)
# with open(os.path.join(Ccodedir,"UIUCBlackHole-CylindricalTest.h"),"w") as file:
# file.write(uibh.returnfunction)
```
<a id='latex_pdf_output'></a>
# Step 7: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename [Tutorial-ADM_Initial_Data-Converting_Exact_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.pdf](Tutorial-ADM_Initial_Data-Converting_Exact_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-ADM_Initial_Data-Converting_Exact_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear")
```
| github_jupyter |
## Probabilistic tractography
Probabilistic fiber tracking is a way of reconstructing the white matter
structural connectivity using diffusion MRI data. Much like deterministic fiber
tracking, the probabilistic approach follows the trajectory of a possible
pathway in a step-wise fashion and propagating streamlines based on the local
orientations reconstructed at each voxel.
In probabilistic tracking, however, the tracking direction at each point along
the path is chosen at random from a distribution of possible directions, and
thus is no longer deterministic. The distribution at each point is different and
depends on the observed diffusion data at that point. The distribution of
tracking directions at each point can be represented as a probability mass
function (PMF) if the possible tracking directions are restricted to a set of
directions distributed points on a sphere.
Like their deterministic counterparts, probabilistic tracking methods start
propagating streamlines from a *seed map*, which contains a number of
coordinates per voxel to initiate the procedure. The higher the number of seeds
per voxel (i.e. the seed density), the larger will be the number of potentially
recovered long-range connections. However, this comes at the cost of a longer
running time.
This episode builds on top of the results of the CSD local orientation
reconstruction method presented in a previous episode.
We will first get the necessary diffusion data, and compute the local
orientation information using the CSD method:
```
import os
import nibabel as nib
import numpy as np
import bids
from bids.layout import BIDSLayout
from dipy.core.gradients import gradient_table
from dipy.io.gradients import read_bvals_bvecs
from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel,
auto_response_ssst)
from dipy.tracking import utils
from dipy.tracking.local_tracking import LocalTracking
from dipy.tracking.streamline import Streamlines
from dipy.tracking.stopping_criterion import ThresholdStoppingCriterion
bids.config.set_option('extension_initial_dot', True)
# Get the diffusion files
dwi_layout = BIDSLayout(
'../../data/ds000221/derivatives/uncorrected_topup_eddy/', validate=False)
gradient_layout = BIDSLayout(
'../../data/ds000221/sub-010006/ses-01/dwi/', validate=False)
subj = '010006'
dwi_fname = dwi_layout.get(subject=subj, suffix='dwi',
extension='nii.gz', return_type='file')[0]
bval_fname = gradient_layout.get(
subject=subj, suffix='dwi', extension='bval', return_type='file')[0]
bvec_fname = dwi_layout.get(
subject=subj, extension='eddy_rotated_bvecs', return_type='file')[0]
dwi_img = nib.load(dwi_fname)
affine = dwi_img.affine
bvals, bvecs = read_bvals_bvecs(bval_fname, bvec_fname)
gtab = gradient_table(bvals, bvecs)
```
We will now create the seeding mask and the seeds using an estimate of the
white matter tissue based on the FA values obtained from the diffusion tensor:
```
from dipy.reconst import dti
from dipy.segment.mask import median_otsu
from dipy.tracking import utils
dwi_data = dwi_img.get_fdata()
# Specify the volume index to the b0 volumes
dwi_data, dwi_mask = median_otsu(dwi_data, vol_idx=[0], numpass=1)
dti_model = dti.TensorModel(gtab)
# This step may take a while
dti_fit = dti_model.fit(dwi_data, mask=dwi_mask)
# Create the seeding mask
fa_img = dti_fit.fa
seed_mask = fa_img.copy()
seed_mask[seed_mask >= 0.2] = 1
seed_mask[seed_mask < 0.2] = 0
# Create the seeds
seeds = utils.seeds_from_mask(seed_mask, affine=affine, density=1)
```
We will now estimate the FRF and set the CSD model to feed the local orientation
information to the streamline propagation object:
```
response, ratio = auto_response_ssst(gtab, dwi_data, roi_radii=10, fa_thr=0.7)
sh_order = 2
csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=sh_order)
csd_fit = csd_model.fit(dwi_data, mask=seed_mask)
```
Tracking methods are provided with a criterion to stop propagating streamlines
beyond non-white matter tissues. One way to do this is to use the Generalized
Fractional Anisotropy (GFA). Much like the Fractional Anisotropy issued by the
DTI model measures anisotropy, the GFA uses samples of the ODF to quantify the
anisotropy of tissues, and hence, it provides an estimation of the underlying
tissue type.
```
from scipy import ndimage # To rotate image for visualization purposes
import matplotlib.pyplot as plt
from dipy.reconst.shm import CsaOdfModel
csa_model = CsaOdfModel(gtab, sh_order=sh_order)
gfa = csa_model.fit(dwi_data, mask=seed_mask).gfa
stopping_criterion = ThresholdStoppingCriterion(gfa, .25)
# Create the directory to save the results
out_dir = '../../data/ds000221/derivatives/dwi/tractography/sub-%s/ses-01/dwi/' % subj
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Save the GFA
gfa_img = nib.Nifti1Image(gfa.astype(np.float32), affine)
nib.save(gfa_img, os.path.join(out_dir, 'gfa.nii.gz'))
# Plot the GFA
%matplotlib inline
fig, ax = plt.subplots(1, 3, figsize=(10, 10))
ax[0].imshow(ndimage.rotate(gfa[:, gfa.shape[1]//2, :], 90, reshape=False))
ax[1].imshow(ndimage.rotate(gfa[gfa.shape[0]//2, :, :], 90, reshape=False))
ax[2].imshow(ndimage.rotate(gfa[:, :, gfa.shape[-1]//2], 90, reshape=False))
fig.savefig(os.path.join(out_dir, "gfa.png"), dpi=300, bbox_inches="tight")
plt.show()
```
The GFA threshold stopping criterion value must be adjusted to the data in
order to avoid creating a mask that will exclude white matter areas (which
would result in streamlines being unable to propagate to other white matter
areas). Visually inspecting the GFA map might provide with a sufficient
guarantee about the goodness of the value.
The Fiber Orientation Distribution (FOD) of the CSD model estimates the
distribution of small fiber bundles within each voxel. We can use this
distribution for probabilistic fiber tracking. One way to do this is to
represent the FOD using a discrete sphere. This discrete FOD can be used by the
``ProbabilisticDirectionGetter`` as a PMF for sampling tracking directions. We
need to clip the FOD to use it as a PMF because the latter cannot have negative
values. Ideally, the FOD should be strictly positive, but because of noise
and/or model failures sometimes it can have negative values.
The set of possible directions to choose to propagate a streamline is restricted
by a cone angle $\theta$, named `max_angle` in `DIPY`'s
`ProbabilisticDirectionGetter::from_pmf` method.
Another relevant parameter of the propagation is the step size, which dictates
how much the propagation will advance to the next point. Note that it is a real
number, since the tracking procedure operates in physical coordinates.
Note that the `LocalTracking` class accepts a `StoppingCriterion` class instance
as its second argument, and thus a different criterion can be used if the GFA
criterion does not fit into our framework, or if different data is available in
our workflow.
```
from dipy.direction import ProbabilisticDirectionGetter
from dipy.data import small_sphere
from dipy.io.stateful_tractogram import Space, StatefulTractogram
from dipy.io.streamline import save_tractogram
fod = csd_fit.odf(small_sphere)
pmf = fod.clip(min=0)
prob_dg = ProbabilisticDirectionGetter.from_pmf(pmf, max_angle=30.,
sphere=small_sphere)
streamline_generator = LocalTracking(prob_dg, stopping_criterion, seeds,
affine, step_size=.5)
streamlines = Streamlines(streamline_generator)
sft = StatefulTractogram(streamlines, dwi_img, Space.RASMM)
# Save the tractogram
save_tractogram(sft, os.path.join(
out_dir, 'tractogram_probabilistic_dg_pmf.trk'))
```
We will easily generate the anatomical views on the generated tractogram using the `generate_anatomical_volume_figure` helper function:
```
# NBVAL_SKIP
from fury import actor, colormap
from utils.visualization_utils import generate_anatomical_volume_figure
# Plot the tractogram
# Build the representation of the data
streamlines_actor = actor.line(streamlines, colormap.line_colors(streamlines))
# Compute the slices to be shown
slices = tuple(elem // 2 for elem in dwi_data.shape[:-1])
# Generate the figure
fig = generate_anatomical_volume_figure(streamlines_actor)
fig.savefig(os.path.join(out_dir, "tractogram_probabilistic_dg_pmf.png"),
dpi=300, bbox_inches="tight")
plt.show()
```
One disadvantage of using a discrete PMF to represent possible tracking
directions is that it tends to take up a lot of memory (RAM). The size of the
PMF, the FOD in this case, must be equal to the number of possible tracking
directions on the hemisphere, and every voxel has a unique PMF. In this case
the data is ``(81, 106, 76)`` and ``small_sphere`` has 181 directions so the
FOD is ``(81, 106, 76, 181)``. One way to avoid sampling the PMF and holding it
in memory is to build the direction getter directly from the spherical harmonic
(SH) representation of the FOD. By using this approach, we can also use a
larger sphere, like ``default_sphere`` which has 362 directions on the
hemisphere, without having to worry about memory limitations.
```
from dipy.data import default_sphere
prob_dg = ProbabilisticDirectionGetter.from_shcoeff(csd_fit.shm_coeff,
max_angle=30.,
sphere=default_sphere)
streamline_generator = LocalTracking(prob_dg, stopping_criterion, seeds,
affine, step_size=.5)
streamlines = Streamlines(streamline_generator)
sft = StatefulTractogram(streamlines, dwi_img, Space.RASMM)
# Save the tractogram
save_tractogram(sft, os.path.join(
out_dir, 'tractogram_probabilistic_dg_sh.trk'))
```
We will visualize the tractogram using the three usual anatomical views:
```
# NBVAL_SKIP
# Plot the tractogram
# Build the representation of the data
streamlines_actor = actor.line(streamlines, colormap.line_colors(streamlines))
# Generate the figure
fig = generate_anatomical_volume_figure(streamlines_actor)
fig.savefig(os.path.join(out_dir, "tractogram_probabilistic_dg_sh.png"),
dpi=300, bbox_inches="tight")
plt.show()
```
Not all model fits have the ``shm_coeff`` attribute because not all models use
this basis to represent the data internally. However we can fit the ODF of any
model to the spherical harmonic basis using the ``peaks_from_model`` function.
```
from dipy.direction import peaks_from_model
peaks = peaks_from_model(csd_model, dwi_data, default_sphere, .5, 25,
mask=seed_mask, return_sh=True, parallel=True)
```
It is always good practice to (save and) visualize the peaks as a check towards ensuring that the orientation information conforms to what is expected prior to the tracking process.
```
# Save the peaks
from dipy.io.peaks import reshape_peaks_for_visualization
nib.save(nib.Nifti1Image(reshape_peaks_for_visualization(peaks),
affine), os.path.join(out_dir, 'peaks.nii.gz'))
```
As usual, we will use `fury` to visualize the peaks:
```
# NBVAL_SKIP
from utils.visualization_utils import generate_anatomical_slice_figure
# Visualize the peaks
# Build the representation of the data
peaks_actor = actor.peak_slicer(peaks.peak_dirs, peaks.peak_values)
# Compute the slices to be shown
slices = tuple(elem // 2 for elem in dwi_data.shape[:-1])
# Generate the figure
fig = generate_anatomical_slice_figure(slices, peaks_actor)
fig.savefig(os.path.join(out_dir, "peaks.png"), dpi=300, bbox_inches="tight")
plt.show()
fod_coeff = peaks.shm_coeff
prob_dg = ProbabilisticDirectionGetter.from_shcoeff(fod_coeff, max_angle=30.,
sphere=default_sphere)
streamline_generator = LocalTracking(prob_dg, stopping_criterion, seeds,
affine, step_size=.5)
streamlines = Streamlines(streamline_generator)
sft = StatefulTractogram(streamlines, dwi_img, Space.RASMM)
# Save the tractogram
save_tractogram(sft, os.path.join(
out_dir, "tractogram_probabilistic_dg_sh_pmf.trk"))
```
We will again visualize the tractogram using the three usual anatomical views:
```
# NBVAL_SKIP
# Plot the tractogram
# Build the representation of the data
streamlines_actor = actor.line(streamlines, colormap.line_colors(streamlines))
# Generate the figure
fig = generate_anatomical_volume_figure(streamlines_actor)
fig.savefig(os.path.join(
out_dir, "tractogram_probabilistic_dg_sh_pmf.png"), dpi=300, bbox_inches="tight")
plt.show()
```
## Tip: Making sure your tractogram is well aligned with the data
If for whatever reason the anatomical and diffusion images were not correctly aligned, you may find that your tractogram is not well aligned with the anatomical data. This may also happen derived from the different formats in which a tractogram is saved/loaded, some conventions specifying the origin at the voxel corner and other specifying it at the center of the voxel. Visualizing the computed features is always recommended. There are some tools that allow to ensure that the matrices specifying the orientation and positioning of the data should be correct.
`MRtrix`'s `mrinfo` command can be used to visualize the affine matrix of a `NIfTI` file as:
`mrinfo dwi.nii.gz`
which would output something like:
```
************************************************
Image: "/data/dwi.nii.gz"
************************************************
Dimensions: 90 x 108 x 90 x 33
Voxel size: 2 x 2 x 2 x 1
Data strides: [ -1 -2 3 4 ]
Format: NIfTI-1.1 (GZip compressed)
Data type: signed 16 bit integer (little endian)
Intensity scaling: offset = 0, multiplier = 1
Transform: 1 -0 0 -178
-0 1 0 -214
-0 -0 1 -0
```
Similarly, for your tractograms, you may use the command `track_info` from `TrackVis`' `Diffusion Toolkit` set of command-line tools:
`trk_info tractogram.trk`
which would output something like:
```
ID string: TRACK
Version: 2
Dimension: 180 216 180
Voxel size: 1 1 1
Voxel order: LPS
Voxel order original: LPS
Voxel to RAS matrix:
-1.0000 0.0000 0.0000 0.5000
0.0000 -1.0000 0.0000 0.5000
0.0000 0.0000 1.0000 -0.5000
0.0000 0.0000 0.0000 1.0000
Image Orientation: 1.0000/0.0000/0.0000/0.0000/1.0000/0.0000
Orientation patches: none
Number of scalars: 0
Number of properties: 0
Number of tracks: 200433
```
Note that, a `TRK` file contains orientational and positional information. If you choose to store your tractograms using the `TCK` format, this informationwill not be contained in the file. To see the file header information you may use the `MRtrix` `tckinfo` command:
`tckinfo tractogram.tck`
which would output something like:
```
***********************************
Tracks file: "/data/tractogram.tck"
count: 0000200433
dimensions: (180, 216, 180)
voxel_order: LPS
voxel_sizes: (1.0, 1.0, 1.0)
```
| github_jupyter |

# Introduction to [`astropy`](http://astropy.readthedocs.org/en/stable/)
with [Brett Morris](http://staff.washington.edu/bmmorris/)
**Dependencies**: astropy, astroquery, astroplan
### Outline
1. `astropy.units`
2. `astropy.time`
3. `astropy.coordinates`
4. `astropy.cosmology`
5. `astropy.table`
5. `astropy`-affiliated packages: `astroquery` & `astroplan`
6. `astropy.io.fits`
7. `astropy.io.ascii`
8. Exercises
***
## 1) [`astropy.units`](http://astropy.readthedocs.org/en/latest/units/): Problem sets are about to get easier
One of the modules most central to `astropy` is the `units` module, which will save you lots of time.
```
import astropy.units as u
import numpy as np
height = u.Quantity(1.778, unit=u.meter)
# or equivalently:
height = 1.778*u.m
height
```
If you're self-abusive, imperial units are supported:
```
from astropy.units.imperial import foot
height.to(foot)
```
What is the light-travel time across one Brett? ($\Delta t = \Delta x / c$)
```
from astropy.constants import c
dt = height/c
dt
```
Metric prefixes accepted (try `M` for mega, `p` for pico, etc.)
```
dt.to(u.ns)
```
A quantity has two attributes:
```
dt.value, dt.unit
```
Vector quantities are where it's at:
```
distances = u.Quantity([1.3, 1.5, 1.7], unit=u.lightyear)
distances.to(u.m)
```
Quantities are either Python built-in types (float, int) or numpy arrays with metadata. To get at the underlying numbers, use the `value` attribute:
```
distances.value, type(distances.value)
```
If the result of what you're computing is unitless, but you arrived there by combining lots of units, then you might need to use the `float` function to turn your unitful answer into a dimensionless quantity by resolving all of the unit equivalences:
```
expansion_rate = 67 * u.km / u.s / u.Mpc
duration = 1 * u.Gyr
expansion_rate * duration
float(expansion_rate * duration)
```
There are a bunch of useful quantities stored in `astropy.constants`, which will save you frustration in problem sets:
```
from astropy.constants import R_sun, R_earth, R_jup
from astropy.constants import M_sun, M_earth, m_e
from astropy.constants import G, h, k_B
print(M_sun)
```
You can use these constants like units:
```
# Calculate the black hole mass in units of solar masses:
black_hole_mass = 12e31 * u.kg
black_hole_mass.to(M_sun)
```
The above result should be read as "60 (solar masses)". To see the quantity without it's unit, use `value`:
```
black_hole_mass.to(M_sun).value
```
***
## 2) [`astropy.time`](http://astropy.readthedocs.org/en/latest/time/index.html): Time objects for humans
There are many distinct and confusing time systems used in astronomy, and the `astropy.time` module provides a convenient means of translating between them – never code your own JD-to-ISO time converter or try to remember whether or not the difference between JD and MJD has a 0.5 in it again!
```
# The astropy.time.Time object contains a time in a specified format
from astropy.time import Time
# If the input format is not specified, it will guess. Here's an ISO formatted string:
Time('2005-01-01 12:34:56')
```
Here's a Julian Date:
```
t = Time(2453372.0242592595, format='jd')
t
```
Convert between time formats by calling `t.iso`, `t.mjd`, etc.
```
t.iso
```
By default, the scale (or time standard) is set to **UTC**, which is defined to keep an integer number of seconds per day. There are other time standards like **UT1** which are defined by the rotation of the Earth (see [my blog post on time standards](http://bmmorris.blogspot.com/2015/06/ut1-utc-and-astropy.html) for more background). Converting between the two can be messy, but not with astropy:
```
print('Available time scales: {0}'.format(', '.join(Time.SCALES)))
t.scale
t.ut1
```
If converting between UTC and UT1 you raises an `IndexError` like this,
```
IndexError: (some) times are outside of range covered by IERS table.
```
it's because you need more up-to-date Earth rotation data since the Earth's rate of rotation is constantly changing. See the `astropy.time` docs on [Transformation offsets](http://astropy.readthedocs.org/en/stable/time/index.html#transformation-offsets) to update your Earth rotation data.
```
t.ut1.iso
```
Lastly, arrays of times can be generated from numpy arrays:
```
Time.now() + np.linspace(0, 1, 10)*u.year
```
`Time` objects are also great for plotting a time series. For exampel, try using the `plot_date` attribute with `plt.plot_date`, or `decimalyear` with `plt.plot`:
```
%matplotlib inline
import matplotlib.pyplot as plt
times = Time.now() - np.linspace(0, 10, 100) * u.year
fluxes = 0.01 * np.random.randn(len(times)) + 1
plt.plot_date(times.plot_date, fluxes)
plt.xlabel('Date')
plt.ylabel('Flux')
plt.show()
```
***
## 3) [`astropy.coordinates`](http://astropy.readthedocs.org/en/latest/coordinates/index.html)

Convert the position of your target from one coordinate system to another without opening a reference book!
Let's define the galactic center in the natural coordinate system:
```
from astropy.coordinates import SkyCoord
gal_center = SkyCoord(l=0*u.deg, b=0*u.deg, frame='galactic')
print(gal_center)
```
Now let's say you have to tell an observer where that is in ICRS coordinates: what is that position in RA/Dec?
```
gal_center.icrs
```
You can resolve targets by name with the `from_name` class method
```
sgr_a = SkyCoord.from_name('Sgr A*')
print(sgr_a)
```
Let's represent these coordinates in various formats with `.degree`, `.hourangle`:
```
sgr_a.ra.degree
```
and experiment with the string outputs you'd use in a proposal, like `dms`, `hmsdms`, `decimal`:
```
sgr_a.to_string(style='hmsdms', sep=':')
```
With a specified location on Earth, you can compute alt/az coordinates for any `SkyCoord`
```
from astropy.coordinates import EarthLocation, AltAz
# Define Earth location:
longitude, latitude, elevation = (-122.3331*u.deg, 47.6097*u.deg, 0*u.m)
seattle = EarthLocation.from_geodetic(longitude, latitude, elevation)
# Define alt/az frame:
alt_az_frame = AltAz(obstime=Time('2005-06-07 08:09:10'), location=seattle)
# Transform the coordinate to the new reference frame, and print
sgr_a_altaz = sgr_a.transform_to(alt_az_frame)
sgr_a_altaz.to_string(style='hmsdms')
```
***
## 4) [`astropy.cosmology`](http://astropy.readthedocs.org/en/latest/cosmology/): No more JavaScript cosmology calculators for you!
First, choose a cosmology (e.g.: `Planck13`, `WMAP9`) and get $H_0$:
```
from astropy.cosmology import Planck13 as cosmo
cosmo.H(z=0)
cosmo.angular_diameter_distance(z=1)
cosmo.luminosity_distance(z=1)
```
In cosmology class you'll still have to learn to solve these from scratch, but you can double check yourself like so:
```
%matplotlib inline
import matplotlib.pyplot as plt
z = np.linspace(0, 10, 50)
# Compute some parameters
t_lookback = cosmo.lookback_time(z)
T_cmb = cosmo.Tcmb(z)
D_A = cosmo.angular_diameter_distance(z)
D_L = cosmo.luminosity_distance(z)
fig, ax = plt.subplots(2, 2, figsize=(10, 10))
ax[0, 0].plot(z, t_lookback)
ax[0, 0].set(title='Look-back Time', xlabel='$z$',
ylabel=r'$t_{{lookback}}(z)$ [{0}]'.format(t_lookback.unit))
ax[0, 1].plot(z, T_cmb)
ax[0, 1].set(title='$T_{CMB}$', xlabel='$z$',
ylabel='$T_{{CMB}}(z)$ [{0}]'.format(T_cmb.unit))
ax[1, 0].plot(z, D_A)
ax[1, 0].set(title='Angular Diameter Distance', xlabel='$z$',
ylabel=r'$D_A(z)$ [{0}]'.format(D_A.unit))
ax[1, 1].plot(z, D_L)
ax[1, 1].set(title='Luminosity Distance', xlabel='$z$',
ylabel=r'$D_L(z)$ [{0}]'.format(D_L.unit))
fig.subplots_adjust(wspace=0.3)
fig.suptitle('Cosmology: {0}'.format(cosmo.name), fontsize=18);
```
# 5) `astropy.table`: Table objects for physical scientists
What makes a table object specific to physical scientists, you ask? **Units** my friend, units. In general, [pandas](http://pandas.pydata.org) has the most mature table-like data structures in Python, but the astropy table is nifty, so let's see how it works.
A table can be constructed a bunch of ways. Let's initialize one from an array:
```
from astropy.table import Table
example_data = np.random.randint(0, 100, 50).reshape((10, 5))
column_names = ['a', 'b', 'c', 'd', 'e']
table = Table(example_data, names=column_names)
table
```
As you can see, the astropy table has some special powers inside iPython notebooks, and gets rendered nicely.
The first row tells you the names of each column. You can access a column of data from a table by treating the table like a dictionary:
```
table['a']
```
This column object has a `.data` attribute which you can use to get at the `numpy` array underneath:
```
table['a'].data
```
This gets at the heart of what an astropy table is. It's essentially an ordered dictionary of columns. Each column is a numpy array _with metadata_. That metadata is what makes the table useful, because those columns, for example, can have units!
```
table['a'].unit = u.km
table['b'].unit = u.lightyear
table['c'].unit = u.kg
table['d'].unit = u.s
table['e'].unit = u.Mpc
table
```
Now you can do operations on each column as though it was a unit vector:
```
table['b'].to(u.pc)
```
and you can get each element as a quantity, or not:
```
# Not a quantity:
table['a'][2], type(table['a'][2])
# A quantity:
table['a'].quantity[2]
```
***
## 6a) Affiliated Package: [`astroquery`](http://astroquery.readthedocs.org)
Since `astropy` is a collection of fundamental tools that are easy to use, lots of packages have been built on top of `astropy`, but not necessarily merged into `astropy` core. One of those is `astroquery`, which allows you to query astronomical databases with ease.
Let's query for the SIMBAD entry for a planet hosting star, HD 189733:
```
from astroquery.simbad import Simbad
Simbad.query_object('HD 189733')
```
Let's query Vizier for the famous list of standard stars from [Landolt (1992)](http://adsabs.harvard.edu/abs/1992AJ....104..340L). The [`astropy.table`](http://astropy.readthedocs.org/en/latest/table/) that is returned to you will have the same information as [this Vizier query page](http://vizier.u-strasbg.fr/viz-bin/VizieR-3?-source=II/183A/table2).
```
from astroquery.vizier import Vizier
landolt_table = Vizier.get_catalogs('Landolt 1992')[0]
landolt_table
```
## 6b) Affiliated Package: [`astroplan`](https://astroplan.readthedocs.org/en/latest/)
`astroplan` is an `astropy`-affiliated package that helps you calculate when objects are observable. Here's a quick example for determining which targets are visible right now from Apache Point Observatory:
```
from astroplan import Observer, FixedTarget
# Targets are stored as `astroplan.FixedTarget` objects
target_names = ['Polaris', 'Sirius', 'Vega', 'Rigel']
targets = [FixedTarget.from_name(target) for target in target_names]
# Observatories are `astroplan.Observer` objects
observatory = Observer.at_site("Apache Point")
# Which targets are visible right now?
observatory.target_is_up(Time.now(), targets)
```
Now let's see which of those targets are visible over a time range of the next ten days, given the following constraints:
* Observations must occur between civil twilights
* The altitude of the target must be $20^\circ < $alt$ < 85^\circ$
```
from astroplan import AtNightConstraint, AltitudeConstraint, observability_table
time_range = Time.now() + np.array([0, 10])*u.day
constraints = [AtNightConstraint.twilight_civil(),
AltitudeConstraint(min=20*u.deg, max=85*u.deg)]
observability_table(constraints, observatory, targets, time_range=time_range)
```
Let's track that target's motion through the sky for the next ten hours in a plot:
```
from astroplan.plots import plot_sky
# Plot at times:
plot_times = Time.now() + np.linspace(0, 10, 10)*u.hour
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plot_sky(targets[2], observatory, plot_times)
```
For a more detailed run through of astroplan, [here's another notebook to check out](https://gist.github.com/bmorris3/19374760eb11271850ec).
# 7) `astropy.io.fits`: Reading and writing FITS files
Astronomers (unfortunately) use FITS files a lot, so let's practice using FITS files with astropy. The following command will download a FITS image for us to work with:
```
from astropy.utils.data import download_file
url = 'http://staff.washington.edu/bmmorris/images/example.fits'
example_fits_path = download_file(url)
```
We can read in FITS files in two ways. The first is generic, and allows us to see the different extensions.
```
from astropy.io import fits
hdus = fits.open(example_fits_path)
print(hdus)
first_hdu = hdus[0]
plt.imshow(np.log(first_hdu.data), cmap=plt.cm.binary)
plt.title(first_hdu.header['OBJECT'])
plt.show()
```
Alternatively, if you know what HDU you want to access, and you want just the header or the data, you can use the following:
```
image = fits.getdata(example_fits_path)
header = fits.getheader(example_fits_path)
```
You can access particular header cards from the `astropy.io.fits.header.Header` object like a dictionary:
```
date_obs = header['DATE-OBS']
exp_time = header['EXPTIME']
print("Observation time: {0}".format(date_obs))
print("Exposure duration: {0} s".format(exp_time))
```
To see the available keywords within the header, do:
```
list(header.keys())[:10]
```
If you want to write some results to a FITS file, you can do so like this:
```
# Create a 2D, 10 x 10 random number array:
example_data = np.random.randn(100).reshape((10, 10))
fits.writeto('example_data.fits', example_data, header=header, clobber=True)
```
***
# 8) Reading and writing ascii text tables
Perhaps a collaborator will send you some IDL-generated text tables to work with, and you'll want to open it with Python. Sometimes the easiest way to do this will be with `astropy.io.ascii`. Let's create an example table in the cell below:
```
%%writefile example_table.txt
a b c
0 2 2
3 4 6
34 4 1
6 36 5
86 7 3
from astropy.io import ascii
table = ascii.read('example_table.txt')
table
```
Well that was easy! We didn't have to specify anything, and it did most of the work. We can now take this `astropy.table.Table` object and make it even more useful, by giving the columns units, etc.:
```
table['a'].unit = u.kg
table
```
We can write this table out using the [very very flexible `ascii.write` function](http://docs.astropy.org/en/v0.2.1/io/ascii/index.html):
```
# Directly output a table in LaTeX format:
ascii.write(table, 'latex_table.tex', Writer=ascii.Latex)
# Output a CSV file:
ascii.write(table, 'csv_table.csv', format='csv')
# Write a table with the column names at the top, in a comment
ascii.write(table, 'table_with_header.txt', Writer=ascii.CommentedHeader)
```
***
# Exercises
**1)** Get the light travel time to the sun in minutes, given it's distance *right now* (hint: check out [`astropy.coordinates.get_sun`](http://astropy.readthedocs.org/en/latest/api/astropy.coordinates.get_sun.html?highlight=get_sun#astropy.coordinates.get_sun)).
**2)** Using your current distance from the Sun in #1, calculate which is greater: the force of gravity between you and the Sun right now, or between you and a bowling ball-sized chunk of neutron star placed 12 kilometers away.
Let's assume your mass is 60 kg. Use `astropy.constants` to get the gravitational constant $G$ and the mass of the sun $M_\odot$. Let's say bowling balls have $r \sim 22$ cm, and neutron stars have a density of $\rho \sim 3.7 \times 10^{17} $kg m$^{-3}$.
**4)** Calculate the Schwarzschild radius in units of solar radii of the Sgr A*, the Milky Way's supermassive black hole with $M = 4.31 \times 10^6 M_\odot$, given
$$r_\mathrm{s} = \frac{2 G M}{c^2}$$
and the distance to the galactic center $d_{center} = 7.94$ kpc. Also calculate the angular size of the event horizon on the sky in microarcseconds.
**5)** Represent your birthday in the following time formats: ISO, JD, MJD and decimal year, all with the UTC time standard (default).
**6)** Using the table of Landolt standards which we generated above (`landolt_table`), find the name of the star with the brightest _V_ magnitude (smallest number), and find its position in galactic coordinates (hint: [`SkyCoord` docs](http://docs.astropy.org/en/stable/coordinates/#transformation)).
| github_jupyter |
# Representação numérica de palavras e textos
Neste notebook iremos apresentação formas de representar valores textuais por meio de representação numérica. Iremos usar pandas, caso queira entender um pouco sobre pandas, [veja este notebook](pandas.ipynb).
Em aprendizado de máquina, muitas vezes, precisamos da representação numérica de um determinado valor. Por exemplo:
```
import pandas as pd
df_jogos = pd.DataFrame([ ["boa","nublado","não"],
["boa","chuvoso","não"],
["média","nublado","sim"],
["fraca","chuvoso","não"]],
columns=["disposição","tempo","jogar volei?"])
df_jogos
```
Caso quisermos maperar cada coluna (agora chamada de atributo) para um valor, forma mais simples de se fazer a transformação é simplesmente mapear esse atributo para um valor numérico. Veja o exemplo abaixo:
Nesse exemplo, temos dois atributos disposição do jogador e tempo e queremos prever se o jogar irá jogar volei ou não. Tanto os atributos quanto a classe podem ser mapeados como número. Além disso, o atributo `disposicao` é um atributo que representa uma escala - o que deixa essa forma de tranformação bem adequada para esse atributo.
```
from typing import Dict
def mapeia_atributo_para_int(df_data:pd.DataFrame, coluna:str, dic_nom_to_int: Dict[int,str]):
for i,valor in enumerate(df_data[coluna]):
valor_int = dic_nom_to_int[valor]
df_data[coluna].iat[i] = valor_int
df_jogos = pd.DataFrame([ ["boa","nublado","sim"],
["boa","chuvoso","não"],
["média","ensolarado","sim"],
["fraca","chuvoso","não"]],
columns=["disposição","tempo","jogar volei?"])
dic_disposicao = {"boa":3,"média":2,"fraca":1}
mapeia_atributo_para_int(df_jogos, "disposição", dic_disposicao)
dic_tempo = {"ensolarado":3,"nublado":2,"chuvoso":1}
mapeia_atributo_para_int(df_jogos, "tempo", dic_tempo)
dic_volei = {"sim":1, "não":0}
mapeia_atributo_para_int(df_jogos, "jogar volei?", dic_volei)
df_jogos
```
## Binarização dos atributos categóricos
Podemos fazer a binarização dos atributos categóricos em que, cada valor de atributo transforma-se em uma coluna que recebe `0` caso esse atributo não exista e `1`, caso contrário. Em nosso exemplo:
```
from preprocessamento_atributos import BagOfItems
df_jogos = pd.DataFrame([ [4, "boa","nublado","sim"],
[3,"boa","chuvoso","não"],
[2,"média","ensolarado","sim"],
[1,"fraca","chuvoso","não"]],
columns=["id","disposição","tempo","jogar volei?"])
dic_disposicao = {"boa":3,"média":2,"fraca":1}
bag_of_tempo = BagOfItems(0)
#veja a implementação do método em preprocesamento_atributos.py
df_jogos_bot = bag_of_tempo.cria_bag_of_items(df_jogos,["tempo"])
df_jogos_bot
```
Como existem vários valores no teste que você desconhece, se fizermos dessa forma, atributos que estão no teste poderiam estar completamente zerados no treino, sendo desnecessário, por exemplo:
```
df_jogos_treino = df_jogos[:2]
df_jogos_treino
df_jogos_teste = df_jogos[2:]
df_jogos_teste
```
## Exemplo Real
Considere este exemplo real de filmes e seus atores ([obtidos no kaggle](https://www.kaggle.com/rounakbanik/the-movies-dataset)):
```
import pandas as pd
df_amostra = pd.read_csv("movies_amostra.csv")
df_amostra
```
Nesse exemplo, as colunas que representam os atores principais podem ser binarizadas. Em nosso caso, podemos colocar os atores todos em um "Bag of Items". Os atores são representados por as colunas `ator_1`, `ator_2`,..., `ator_5`. Abaixo, veja um sugestão de como fazer em dataset:
```
import pandas as pd
from preprocessamento_atributos import BagOfItems
obj_bag_of_actors = BagOfItems(min_occur=3)
#boa=bag of actors ;)
df_amostra_boa = obj_bag_of_actors.cria_bag_of_items(df_amostra,["ator_1","ator_2","ator_3","ator_4","ator_5"])
df_amostra_boa
```
Veja que temos bastante atributos um para cada ator. Mesmo sendo melhor possuirmos poucos atributos e mais informativos, um método de aprendizado de máquina pode ser capaz de usar essa quantidade de forma eficaz. Particularmente, o [SVM linear](https://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html) e o [RandomForest](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) são métodos que conseguem ir bem nesse tipo de dado.
Essa é a forma mais prática de fazer, porém, em aprendizado de máquina, geralmente dividimos nossos dados em, pelo menos, treino e teste em que treino é o dado que você terá todo o acesso e, o teste, deve reproduzir uma amostra do mundo real. Vamos supor que no treino há atores raros que não ocorrem no teste, nesse caso tais atributos seriam inúteis para o teste. Isso pode fazer com que o resultado reproduza menos o mundo real - neste caso, é muito possível que a diferença seja quase insignificante. Mas, caso queiramos fazer da forma "mais correta", temos que considerar apenas o treino para isso:
```
#supondo que 80% da amostra é treino
df_treino_amostra = df_amostra.sample(frac=0.8, random_state = 2)
df_teste_amostra = df_amostra.drop(df_teste_amostra.index)
#min_occur=3 definie o minimo de ocorrencias desse ator para ser considerado
#pois, um ator que apareceu em poucos filmes, pode ser menos relevante para a predição do genero
obj_bag_of_actors = BagOfItems(min_occur=3)
df_treino_amostra_boa = obj_bag_of_actors.cria_bag_of_items(df_treino_amostra,["ator_1","ator_2","ator_3","ator_4","ator_5"])
df_teste_amostra_boa = obj_bag_of_actors.aplica_bag_of_items(df_teste_amostra,["ator_1","ator_2","ator_3","ator_4","ator_5"])
```
## Representação Bag of Words
Muitas vezes, temos textos que podem ser relevantes para uma determinada tarefa de aprendizado d máquina. Por isso, temos que representar tais elementos para nosso método de aprendizado de máquina.
A forma mais usual para isso, é a `Bag of Words` em que cada palavra é um atributo e, o valor dela, é a frequencia dele no texto (ou algum outro valor que indique a importancia dessa palavra no texto).
Por exemplo, caso temos as frases `A casa é grande`, `A casa é verde verde` em que cada frase é uma instancia diferente. A representação seria da seguinte forma:
```
dic_bow = {"a":[1,1],
"casa":[1,1],
"é":[1,1],
"verde":[0,2]
}
df_bow = pd.DataFrame.from_dict(dic_bow)
df_bow
```
Da forma que fizemos acima, usamos a frequencia de um termo para definir sua importancia no texto, porém, existem termos que possuem uma frequencia muito alta e importancia baixa: são os casos dos artigos e preposições por exemplo, pois, eles não discriminam o texto.
Uma forma de mensurar o porder discriminativo das palavras é usando a métrica `TF-IDF`. Para calcularmos essa métrica, primeiramente calculamos a frequencia de um termo no documento (TF) e, logo após multiplamos pelo IDF.
A fórmula para calcular o TF-IDF do termo $i$ no documento (ou instancia) $j$ é a seguinte:
\begin{equation}
TFIDF_{ij} = TF_{ij} \times IDF_i
\end{equation}
\begin{equation}
TF_{ij} = log(f_{ij})
\end{equation}
em que $f_{ij}$ é a frequencia de um termo $i$ no documento $j$. Usa-se o `log` para suavizar valores muito altos e o $IDF$ (do inglês, _Inverse Document Frequency_) do termo $i$ é calculado da seguinte forma:
\begin{equation}
IDF_i = log(\frac{N}{n_i})
\end{equation}
em que $N$ é o número de documentos da coleção e $n_i$ é o número de documentos em que esse termo $i$ ocorre. Espera-se que, quanto mais discriminativo o termo, em menos documentos esse termo irá ocorrer e, consequentemente, o $IDF$ deste termo será mais alto.
Por exemplo, considere as palavras `de`, `bebida` e `cerveja`. `cerveja` é uma palavra mais discriminativa do que `bebida`; e `bebibda` é mais discriminativo do que a preposição `de`. Muito provavelmente teremos mais frequentemente termos menos discriminativos. Por exemplo, se tivermos uma coleção de 1000 documentos, `de` poderia ocorrer em 900 documentos, `bebida` em 500 e `cerveja` em 100 documentos. Se fizermos o calculo, veremos que quanto mais discriminativo um termo, mais alto é seu IDF:
```
import math
N = 1000
n_de = 900
n_bebida = 500
n_cerveja = 100
IDF_de = math.log(N/n_de)
IDF_bebida = math.log(N/n_bebida)
IDF_cerveja = math.log(N/n_cerveja)
print(f"IDF_de: {IDF_de}\tIDF_bebida:{IDF_bebida}\tIDF_cerveja:{IDF_cerveja}")
```
A biblioteca `scikitlearn`também já possui uma classe [TFIDFVectorizer](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html) que transforma um texto em um vetor de atributos usando o TF-IDF para o valor referente a relevancia deste termo. Veja um exemplo na coluna `resumo` do nosso dataset de filme:
```
import pandas as pd
from preprocessamento_atributos import BagOfWords
df_amostra = pd.read_csv("datasets/movies_amostra.csv")
bow_amostra = BagOfWords()
df_bow_amostra = bow_amostra.cria_bow(df_amostra,"resumo")
df_bow_amostra
```
Como são muitos atributos, pode parecer que não ficou corretamente gerado. Mas, filtrando as palavras de um determinado resumo você verificará que está ok:
```
df_bow_amostra[["in","lake", "high"]]
```
Não fique preso apenas nessas representações. Vocês podem tentar fazer representações mais sucintas, como, por exemplo: para preprocessar os dados da equipe do filme (atores, diretor e escritor), calcule o número de filmes de comédia que membros da equipe participaram e, logo após, o número de filme de ação. Neste caso, como você usará a classe, você deverá usar **apenas** os dados de treino. No caso do resumo, você pode utilizar palavras chaves. Por exemplo, faça uma lista de palavras chaves que remetem "ação" e contabilize o quantidade dessas palavras chaves no resumo.
| github_jupyter |
```
import torch
from RzLinear import RzLinear
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
import rz_linear
idx =rz_linear.get_idx(random_numbers, input_dim, output_dim, chunk_size, weight_size, TILED)
```
# checkout the mapping
```
weight_size = 1000
input_dim = 128
output_dim = 128
chunk_size = 4
#hashed_weight = nn.Parameter(torch.from_numpy(np.random.uniform(-1/np.sqrt(input_dim), 1/np.sqrt(input_dim), size=((weight_size,))).astype(np.float32)))
hashed_weight = nn.Parameter(torch.from_numpy(np.arange(weight_size).astype(np.float32)))
rzlinear = RzLinear(input_dim, output_dim, chunk_size, hashed_weight).to("cuda:0");
input_v = torch.eye(input_dim).to("cuda:0")
output_v = rzlinear(input_v)
#print(output_v[:16,:16])
#print(output_v[16:32,16:32])
#print(output_v[16:96,:16])
#plt.hist(np.array(output_v.detach().cpu().view(-1)))
#np.max(np.array(output_v.detach().cpu().view(-1)))
output_v[1,:].long()
for i in range(int(input_dim/16)):
for j in range(int(input_dim/16)):
x = output_v[i*16:(i+1)*16, j*16:(j+1)*16].reshape(-1)
print(x[-1] - x[0])
```
# Checkout the Randomness
```
weight_size = 100000
input_dim = 1000
output_dim = 1000
chunk_size = 2
#hashed_weight = nn.Parameter(torch.from_numpy(np.random.uniform(-1/np.sqrt(input_dim), 1/np.sqrt(input_dim), size=((weight_size,))).astype(np.float32)))
hashed_weight = nn.Parameter(torch.from_numpy(np.arange(weight_size).astype(np.float32)))
rzlinear = RzLinear(input_dim, output_dim, chunk_size, hashed_weight).to("cuda:0");
input_v = torch.eye(input_dim).to("cuda:0")
output_v = rzlinear(input_v)
print(output_v.shape)
plt.hist(np.array(output_v.detach().cpu()).reshape(-1), bins = int(input_dim/10))
plt.show()
#output_v[1,:].long()
#idx = rz_linear.get_idx(rzlinear.random_numbers, input_dim, output_dim, chunk_size, weight_size, True)
#plt.hist(np.array(idx.detach().cpu()).reshape(-1), bins = int(input_dim/10))
output_v
idx
```
# Checkout the correctness of forward pass
```
weight_size = 1000000
input_dim = 1000
output_dim = 1000
chunk_size = 5
hashed_weight = nn.Parameter(torch.from_numpy(np.arange(weight_size).astype(np.float32)))
rzlinear = RzLinear(input_dim, output_dim, chunk_size, hashed_weight).to("cuda:0");
input_v = torch.eye(input_dim).to("cuda:0")
idx_matrix = rzlinear(input_v).long()
idx_matrix.long()
hashed_weight = nn.Parameter(torch.from_numpy(np.random.uniform(-1,1, size=(weight_size,)).astype(np.float32)))
rzlinear = RzLinear(input_dim, output_dim, chunk_size, hashed_weight).to("cuda:0");
```
```
out = rzlinear(input_v)
matrix = hashed_weight[idx_matrix]
ground_truth = torch.matmul(input_v, matrix)
if torch.norm(out - ground_truth) == 0:
print("All OK")
else:
print("Issue in forward pass")
```
# Check Backprop
```
from RzLinear import RzLinearFunction
import torch
from RzLinear import RzLinear
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
TILED = True
weight_size = 1000
input_dim = 64
output_dim = 64
chunk_size = 1
seed = 1024
r = np.random.RandomState(seed)
x = r.randint(0, 2038074743, (50,))
x = x + 1*(x%2==0);
random_numbers = torch.from_numpy(np.concatenate([np.array([2038074743]), x])).long().cuda(0) # set of 50 random numbers to use
print(random_numbers[:5])
hashed_weight = nn.Parameter(torch.from_numpy(np.arange(weight_size).astype(np.float32))).to("cuda:0")
input_v = torch.eye(input_dim).cuda(0)
#hashed_weight
RzLinearFunction.forwardproxy(hashed_weight, input_v ,random_numbers, input_dim, output_dim, chunk_size, TILED)
def myFunc(hashed_weight, input_v, random_numbers, input_dim, output_dim, chunk_size, tiled):
out = RzLinearFunction.forwardproxy(hashed_weight, input_v, random_numbers, input_dim, output_dim, chunk_size , tiled)
return out, torch.sum(out)
out, val = myFunc(hashed_weight, input_v, random_numbers, input_dim, output_dim, chunk_size, TILED )
torch.cuda.synchronize()
print("loss", val)
grad = out * 0 + 1
torch.sum(grad)
wt_grad, in_grad = RzLinearFunction.backwardproxy(grad, hashed_weight, input_v, random_numbers, input_dim, output_dim, chunk_size, TILED)
torch.cuda.synchronize()
torch.sum(wt_grad)
epsilon = 0.001
_, f0 = myFunc(hashed_weight, input_v, random_numbers, input_dim, output_dim, chunk_size, TILED)
hwt_grad = torch.empty_like(hashed_weight, dtype=torch.float32)
for i in range(len(hashed_weight)):
#for i in [10]:
hwt = hashed_weight.clone()
hwt[i] += epsilon
_, fi = myFunc(hwt, input_v, random_numbers, input_dim, output_dim, chunk_size,TILED)
hwt_grad[i] = (fi - f0) / epsilon
#print(i,np.float(fi.cpu()),np.float(f0.cpu()), hwt_grad[i], wt_grad[i])
print ("error norm", torch.norm(hwt_grad - wt_grad))
#print(torch.max(torch.abs(hwt_grad - wt_grad)))
#print(hwt_grad[hwt_grad != 0][:10])
#print(wt_grad[hwt_grad != 0][:10])
print(hwt_grad[wt_grad != 0])
print(wt_grad[wt_grad != 0])
epsilon = 1e-4
_, f0 = myFunc(hashed_weight, input_v, random_numbers, input_dim, output_dim, chunk_size, TILED )
int_grad = torch.empty_like(input_v)
for i in range(int_grad.shape[0]):
for j in range(int_grad.shape[1]):
inputt = input_v.clone()
inputt[i][j] += epsilon
_, fi = myFunc(hashed_weight, inputt, random_numbers, input_dim, output_dim, chunk_size, TILED)
int_grad[i][j] = (fi - f0) / epsilon
print ("error norm", torch.norm(int_grad - in_grad))
print(int_grad[:5,:5])
print(in_grad[:5,:5])
print(torch.max(torch.abs(int_grad - in_grad)))
```
| github_jupyter |
```
from google.colab import drive
drive.mount('/content/drive')
cd drive/My\ Drive/CPN Dataset
ls
pip install np_utils
```
**TRAINING/TESTING MODEL**
```
#libraries required to train the model
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
import tensorflow as tf
import cv2, os, gc, glob
from tqdm import tqdm
from tensorflow.keras import layers, models
import keras
from keras.models import Sequential, Model
from keras.layers import Conv2D, MaxPool2D
from keras.layers import Activation, Dropout, BatchNormalization, Flatten, Dense
from tensorflow.keras.optimizers import Adam
from keras.utils import np_utils
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import LabelEncoder
dataset_dir="/content/drive/My Drive/CPN Dataset"
imgpath=[]
for (dirnames, foldernames, filenames) in os.walk(dataset_dir): #get the full path name of a type of file into all subdirectories with walk
print(dirnames,foldernames,filenames)
for filename in filenames:
if (filename[-3:]=='png'): #using list slicing we can fetch the last 'n' elements from list
imgpath.append(os.path.join(dirnames, filename)) #os.path.join combines one or more path names into a single path and then paths will be appended to list imgpath
#initialize the list of image data and target labels
data=[]
target=[]
resize=150
dic={'Viral Pneumonia': 'Pneumonia', 'Normal': 'Normal', 'COVID': 'Covid-19'} #dictionary to map file label name with target label name
for imgpaths in tqdm(imgpath): #tqdm is used to make terminal progress bar
label=imgpaths.split(os.path.sep)[-2]
image=cv2.imread(imgpaths)
image=cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image=cv2.resize(image, (resize, resize)) /255 #resizing the image
#updates the respective data and target label lists
data.append(image)
target.append(dic[label])
print(len(data)) #to know length of data
#box plot and counting the lables
df=pd.DataFrame(target,columns=['labels'])
sns.countplot(df['labels'])
plt.show()
size=[]
for file in imgpath: #for loop to get image paths
img=plt.imread(file) #used to read an image from a file into an array
size.append(img.shape) #image.shape returns a tuple of the number of rows, columns, and channels (if the image is color).
pd.Series(size).value_counts() #it retures a series containing counts of unique values
plt.figure(figsize=(10,10)) #to change the size of the images
for c , i in enumerate(list(np.random.randint(0,len(imgpath),20))) : #enumerate() to get a counter and the value from the iterable at the same time and random.randit() to generate randome images from image path
plt.subplot(4,5,c+1)
plt.imshow(data[i] , cmap='gray')
plt.title(target[i])
plt.axis('off')
plt.show()
#encode labels as integer
le=LabelEncoder() #labelencoder used to normalize labels
labels=le.fit_transform(target) #also be used to transform non-numerical labels to numerical labels
labels=to_categorical(labels)
print(le.classes_)
print(labels[0])
#spliting data into training and testing (80-20 Ratio)
(trainX, testX, trainY, testY)=train_test_split(data, labels, test_size=0.20, stratify=labels, random_state=42)
trainX=np.array(trainX)
testX=np.array(testX)
trainY=np.array(trainY)
testY=np.array(testY)
print(trainX.shape)
print(testX.shape)
print(trainY.shape)
print(testY.shape)
trainX[0]
from tensorflow.keras import backend as K
K.clear_session()
#CNN Model
s=150
model=Sequential()
model.add(Conv2D(filters=32, kernel_size=(3,3), activation="relu", input_shape=(s,s,3), kernel_initializer='he_normal'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Conv2D(filters=64, kernel_size=(3,3), activation="relu"))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Conv2D(filters=128, kernel_size=(3,3), activation="relu"))
model.add(MaxPool2D(pool_size=(2,2), strides=(1,1)))
model.add(Dropout(0.25))
#fully connected
model.add(Flatten())
model.add(Dense(64, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(3, activation="softmax"))
#compile
model.compile(optimizer=Adam(), loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
print(model.summary())
data=[]
imgpath=[]
df=[]
size=[]
#will train model upto 20 epochs
epochs=20
batch_size=64
ThisModel=model.fit(trainX, trainY, batch_size, steps_per_epoch=len(trainX) // batch_size, validation_data=(testX, testY), validation_steps=len(testX) // batch_size, epochs=epochs, verbose=1)
model.save('/content/drive/My Drive/CPN Dataset/CPN_Model.h5')
#loss/accuracy graph
n=epochs
plt.style.use('ggplot')
plt.figure()
plt.plot(np.arange(0, n), ThisModel.history["loss"], label="train_loss")
plt.plot(np.arange(0, n), ThisModel.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, n), ThisModel.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, n), ThisModel.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy for Classification between COVID-19,Pneumonia and Normal")
plt.xlabel("Epochs")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="upper right", bbox_to_anchor=(1.25, 1))
plt.savefig("plot.png")
#to check test accuacy
modelLoss, modelAccuracy=model.evaluate(testX, testY, verbose=0)
print('Test Loss is: {}'.format(modelLoss))
print('Test Accuracy is: {}'.format(modelAccuracy ))
modelLoss=[]
modelAccuracy=[]
#classification report on train
predIdxs = model.predict (trainX, batch_size=64)
predIdxs = np.argmax(predIdxs, axis=1)
print(classification_report(trainY.argmax(axis=1), predIdxs, target_names=le.classes_, digits=3))
predIdxs=[]
#classification report on test
predIdxs=model.predict(testX, batch_size=64)
predIdxs=np.argmax(predIdxs, axis=1)
print(classification_report(testY.argmax(axis=1), predIdxs, target_names=le.classes_, digits=3))
#confusion matrix
from sklearn.metrics import confusion_matrix
confusionmatrix=confusion_matrix(testY.argmax(axis=1), predIdxs)
print(confusionmatrix)
#heatmap
df_cm=pd.DataFrame(confusionmatrix, columns=le.classes_, index=le.classes_)
df_cm.index.name='Actual'
df_cm.columns.name='Predicted'
plt.figure(figsize=(5,5))
sns.heatmap(df_cm/np.sum(df_cm), fmt='.2%', annot=True, annot_kws={'size':16})
plt.show()
```
**PREDICTING THE OUTPUT**
```
#loading the model
from tensorflow import keras
model=keras.models.load_model('/content/drive/MyDrive/CPN Dataset/CPN_Model.h5')
#necessary libraries
from numpy import asarray
from PIL import Image
from tensorflow.keras.preprocessing import image
import numpy as np
import cv2
#image paths
covid_img_path="/content/drive/MyDrive/CPN Dataset/COVID/COVID-19.png"
normal_img_path="/content/drive/MyDrive/CPN Dataset/Normal/Normal-500.png"
pneumonia_img_path="/content/drive/MyDrive/CPN Dataset/Viral Pneumonia/Viral Pneumonia-1000.png"
img_paths=[covid_img_path, normal_img_path, pneumonia_img_path]
print(img_paths)
#preprocessing the image
resize=150
images=[]
for img_path in img_paths:
img_path=cv2.imread(img_path)
img_path=cv2.cvtColor(img_path, cv2.COLOR_BGR2RGB)
img_path=cv2.resize(img_path, (resize, resize)) /255
images.append(img_path)
#printing image shape
count=0
for _ in images:
images[count]=np.array(images[count])
images[count]=np.expand_dims(images[count], axis=0)
print(images[count].shape)
count+=1
#prediction
prediction=[]
for i in images:
prediction.append(model.predict(i))
#output of prediction
output=[]
for p in prediction:
output.append(np.argmax(p,axis=1))
#0: COVID-19
#1: Normal
#2: Pneumonia
title=[]
for out in output:
if out==0:
title.append('COVID-19')
elif out==1:
title.append('Normal')
else:
title.append('Pneumonia')
print(title)
#printing the images with predicted output
import matplotlib.pyplot as plt
plt.figure(figsize=(10,10))
i=0
for img in images:
plt.subplot(1,3,i+1)
plt.imshow(np.squeeze(img) , cmap='gray')
plt.title(title[i])
plt.axis('off')
i+=1
plt.show()
```
| github_jupyter |
# Multi model comparison
```
import numpy as np
from fmskill.model import ModelResult
from fmskill import PointObservation, TrackObservation, Connector
```
## Define observations
```
fldr = '../tests/testdata/SW/'
o1 = PointObservation(fldr + 'HKNA_Hm0.dfs0', item=0, x=4.2420, y=52.6887, name="HKNA")
o2 = PointObservation(fldr + "eur_Hm0.dfs0", item=0, x=3.2760, y=51.9990, name="EPL")
o3 = TrackObservation(fldr + "Alti_c2_Dutch.dfs0", item=3, name="c2")
```
## Define models
```
mr1 = ModelResult(fldr + 'HKZN_local_2017_DutchCoast.dfsu', name='SW_1', item=0)
mr2 = ModelResult(fldr + 'HKZN_local_2017_DutchCoast_v2.dfsu', name='SW_2', item=0)
```
## Connect observations and model results
```
con = Connector([o1, o2, o3], [mr1, mr2])
con
con.modelresults
con.plot_observation_positions();
con.plot_temporal_coverage();
cc = con.extract() # returns a collection of comparisons
cc["EPL"] # select a single comparer from the collection like this
```
## Perform analysis
You can perform simple filtering on specific `observation` or specific `model`. You can refer to observations and models using their _name_ or _index_.
The main analysis methods are:
* skill()
* mean_skill()
* scatter()
* taylor()
```
cc.skill()
cc.skill(observation="c2")
cc.mean_skill(model=0, observation=[0,"c2"])
cc.scatter(model='SW_1', cmap='OrRd')
cc.taylor(normalize_std=True, aggregate_observations=False)
```
### Time series plot (specifically for point comparisons)
If you select an comparison from the collection which is a PointComparer, you can do a time series plot
```
cc['EPL'].plot_timeseries(figsize=(12,4));
```
## Filtering on time
Use the `start` and `end` arguments to do your analysis on part of the time series
```
cc.skill(model="SW_1", end='2017-10-28')
cc.scatter(model='SW_2', start='2017-10-28', cmap='OrRd', figsize=(6,7))
```
## Filtering on area
You can do you analysis in a specific `area` by providing a bounding box or a closed polygon
```
bbox = np.array([0.5,52.5,5,54])
polygon = np.array([[6,51],[0,55],[0,51],[6,51]])
ax = con.plot_observation_positions();
ax.plot([bbox[0],bbox[2],bbox[2],bbox[0],bbox[0]],[bbox[1],bbox[1],bbox[3],bbox[3],bbox[1]]);
ax.plot(polygon[:,0],polygon[:,1]);
cc.skill(model="SW_1", area=bbox)
cc.scatter(model="SW_2", area=polygon, backend='plotly')
```
## Skill object
The skill() and mean_skill() methods return a skill object that can visualize results in various ways. The primary methods of the skill object are:
* style()
* plot_bar()
* plot_line()
* plot_grid()
* sel()
```
s = cc.skill()
s.style()
s.style(columns='rmse')
s.plot_bar('rmse');
s = cc.skill(by=['model','freq:12H'], metrics=['bias','rmse','si'])
s.style()
s.plot_line('rmse', title='Hm0 rmse [m]');
s.plot_grid('si', fmt='0.1%', title='Hm0 Scatter index');
```
### The sel() method can subset the skill object
A new skill object will be returned
```
s = cc.skill()
s.style()
s.sel(model='SW_1').style()
s.sel(observation='HKNA').style()
s.sel('rmse>0.25').style()
s.sel('rmse>0.3', columns=['rmse','mae']).style()
```
| github_jupyter |
```
import numpy as np
import pandas as pd
# Visualization
import matplotlib.pyplot as plt
import seaborn as sns
import IPython.display as ipd
import librosa.display
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
import pandas as pd
```
Either run this cell and replace all the missing data with NAN
```
data = pd.read_csv("./diabetic_data.csv")
data.replace('?',np.nan,inplace=True)
```
or run the following two cells for a different data preprocessing
```
data = pd.read_csv("./diabetic_data.csv")
data["race_age_gender"] = data["race"].map(str) + data["gender"].map(str) + data["age"].map(str)
data["num_medications" + "_log"] = np.log(data["num_medications"])
drop_Idx = set(data[(data['diag_1'] == '?') & (data['diag_2'] == '?') & (data['diag_3'] == '?')].index)
drop_Idx = drop_Idx.union(set(data['diag_1'][data['diag_1'] == '?'].index))
drop_Idx = drop_Idx.union(set(data['diag_2'][data['diag_2'] == '?'].index))
drop_Idx = drop_Idx.union(set(data['diag_3'][data['diag_3'] == '?'].index))
drop_Idx = drop_Idx.union(set(data['race'][data['race'] == '?'].index))
drop_Idx = drop_Idx.union(set(data[data['discharge_disposition_id'] == 11].index))
drop_Idx = drop_Idx.union(set(data['gender'][data['gender'] == 'Unknown/Invalid'].index))
new_Idx = list(set(data.index) - set(drop_Idx))
data = data.iloc[new_Idx]
# re-encoding admission type, discharge type and admission source into fewer categories
data['admission_type_id'] = data['admission_type_id'].replace(2,1)
data['admission_type_id'] = data['admission_type_id'].replace(7,1)
data['admission_type_id'] = data['admission_type_id'].replace(6,5)
data['admission_type_id'] = data['admission_type_id'].replace(8,5)
data['discharge_disposition_id'] = data['discharge_disposition_id'].replace(6,1)
data['discharge_disposition_id'] = data['discharge_disposition_id'].replace(8,1)
data['discharge_disposition_id'] = data['discharge_disposition_id'].replace(9,1)
data['discharge_disposition_id'] = data['discharge_disposition_id'].replace(13,1)
data['discharge_disposition_id'] = data['discharge_disposition_id'].replace(3,2)
data['discharge_disposition_id'] = data['discharge_disposition_id'].replace(4,2)
data['discharge_disposition_id'] = data['discharge_disposition_id'].replace(5,2)
data['discharge_disposition_id'] = data['discharge_disposition_id'].replace(14,2)
data['discharge_disposition_id'] = data['discharge_disposition_id'].replace(22,2)
data['discharge_disposition_id'] = data['discharge_disposition_id'].replace(23,2)
data['discharge_disposition_id'] = data['discharge_disposition_id'].replace(24,2)
data['discharge_disposition_id'] = data['discharge_disposition_id'].replace(12,10)
data['discharge_disposition_id'] = data['discharge_disposition_id'].replace(15,10)
data['discharge_disposition_id'] = data['discharge_disposition_id'].replace(16,10)
data['discharge_disposition_id'] = data['discharge_disposition_id'].replace(17,10)
data['discharge_disposition_id'] = data['discharge_disposition_id'].replace(25,18)
data['discharge_disposition_id'] = data['discharge_disposition_id'].replace(26,18)
data['admission_source_id'] = data['admission_source_id'].replace(2,1)
data['admission_source_id'] = data['admission_source_id'].replace(3,1)
data['admission_source_id'] = data['admission_source_id'].replace(5,4)
data['admission_source_id'] = data['admission_source_id'].replace(6,4)
data['admission_source_id'] = data['admission_source_id'].replace(10,4)
data['admission_source_id'] = data['admission_source_id'].replace(22,4)
data['admission_source_id'] = data['admission_source_id'].replace(25,4)
data['admission_source_id'] = data['admission_source_id'].replace(15,9)
data['admission_source_id'] = data['admission_source_id'].replace(17,9)
data['admission_source_id'] = data['admission_source_id'].replace(20,9)
data['admission_source_id'] = data['admission_source_id'].replace(21,9)
data['admission_source_id'] = data['admission_source_id'].replace(13,11)
data['admission_source_id'] = data['admission_source_id'].replace(14,11)
data['A1Cresult'] = data['A1Cresult'].replace('>7', 1)
data['A1Cresult'] = data['A1Cresult'].replace('>8', 1)
data['A1Cresult'] = data['A1Cresult'].replace('Norm', 0)
data['A1Cresult'] = data['A1Cresult'].replace('None', -99)
data['max_glu_serum'] = data['max_glu_serum'].replace('>200', 1)
data['max_glu_serum'] = data['max_glu_serum'].replace('>300', 1)
data['max_glu_serum'] = data['max_glu_serum'].replace('Norm', 0)
data['max_glu_serum'] = data['max_glu_serum'].replace('None', -99)
data['level1_diag1'] = data['diag_1']
data['level2_diag1'] = data['diag_1']
data['level1_diag2'] = data['diag_2']
data['level2_diag2'] = data['diag_2']
data['level1_diag3'] = data['diag_3']
data['level2_diag3'] = data['diag_3']
data.loc[data['diag_1'].str.contains('V'), ['level1_diag1', 'level2_diag1']] = 0
data.loc[data['diag_1'].str.contains('E'), ['level1_diag1', 'level2_diag1']] = 0
data.loc[data['diag_2'].str.contains('V'), ['level1_diag2', 'level2_diag2']] = 0
data.loc[data['diag_2'].str.contains('E'), ['level1_diag2', 'level2_diag2']] = 0
data.loc[data['diag_3'].str.contains('V'), ['level1_diag3', 'level2_diag3']] = 0
data.loc[data['diag_3'].str.contains('E'), ['level1_diag3', 'level2_diag3']] = 0
data['level1_diag1'] = data['level1_diag1'].replace('?', -1)
data['level2_diag1'] = data['level2_diag1'].replace('?', -1)
data['level1_diag2'] = data['level1_diag2'].replace('?', -1)
data['level2_diag2'] = data['level2_diag2'].replace('?', -1)
data['level1_diag3'] = data['level1_diag3'].replace('?', -1)
data['level2_diag3'] = data['level2_diag3'].replace('?', -1)
data['level1_diag1'] = data['level1_diag1'].astype(float)
data['level2_diag1'] = data['level2_diag1'].astype(float)
data['level1_diag2'] = data['level1_diag2'].astype(float)
data['level2_diag2'] = data['level2_diag2'].astype(float)
data['level1_diag3'] = data['level1_diag3'].astype(float)
data['level2_diag3'] = data['level2_diag3'].astype(float)
for index, row in data.iterrows():
if (row['level1_diag1'] >= 390 and row['level1_diag1'] < 460) or (np.floor(row['level1_diag1']) == 785):
data.loc[index, 'level1_diag1'] = 1
elif (row['level1_diag1'] >= 460 and row['level1_diag1'] < 520) or (np.floor(row['level1_diag1']) == 786):
data.loc[index, 'level1_diag1'] = 2
elif (row['level1_diag1'] >= 520 and row['level1_diag1'] < 580) or (np.floor(row['level1_diag1']) == 787):
data.loc[index, 'level1_diag1'] = 3
elif (np.floor(row['level1_diag1']) == 250):
data.loc[index, 'level1_diag1'] = 4
elif (row['level1_diag1'] >= 800 and row['level1_diag1'] < 1000):
data.loc[index, 'level1_diag1'] = 5
elif (row['level1_diag1'] >= 710 and row['level1_diag1'] < 740):
data.loc[index, 'level1_diag1'] = 6
elif (row['level1_diag1'] >= 580 and row['level1_diag1'] < 630) or (np.floor(row['level1_diag1']) == 788):
data.loc[index, 'level1_diag1'] = 7
elif (row['level1_diag1'] >= 140 and row['level1_diag1'] < 240):
data.loc[index, 'level1_diag1'] = 8
else:
data.loc[index, 'level1_diag1'] = 0
if (row['level1_diag2'] >= 390 and row['level1_diag2'] < 460) or (np.floor(row['level1_diag2']) == 785):
data.loc[index, 'level1_diag2'] = 1
elif (row['level1_diag2'] >= 460 and row['level1_diag2'] < 520) or (np.floor(row['level1_diag2']) == 786):
data.loc[index, 'level1_diag2'] = 2
elif (row['level1_diag2'] >= 520 and row['level1_diag2'] < 580) or (np.floor(row['level1_diag2']) == 787):
data.loc[index, 'level1_diag2'] = 3
elif (np.floor(row['level1_diag2']) == 250):
data.loc[index, 'level1_diag2'] = 4
elif (row['level1_diag2'] >= 800 and row['level1_diag2'] < 1000):
data.loc[index, 'level1_diag2'] = 5
elif (row['level1_diag2'] >= 710 and row['level1_diag2'] < 740):
data.loc[index, 'level1_diag2'] = 6
elif (row['level1_diag2'] >= 580 and row['level1_diag2'] < 630) or (np.floor(row['level1_diag2']) == 788):
data.loc[index, 'level1_diag2'] = 7
elif (row['level1_diag2'] >= 140 and row['level1_diag2'] < 240):
data.loc[index, 'level1_diag2'] = 8
else:
data.loc[index, 'level1_diag2'] = 0
if (row['level1_diag3'] >= 390 and row['level1_diag3'] < 460) or (np.floor(row['level1_diag3']) == 785):
data.loc[index, 'level1_diag3'] = 1
elif (row['level1_diag3'] >= 460 and row['level1_diag3'] < 520) or (np.floor(row['level1_diag3']) == 786):
data.loc[index, 'level1_diag3'] = 2
elif (row['level1_diag3'] >= 520 and row['level1_diag3'] < 580) or (np.floor(row['level1_diag3']) == 787):
data.loc[index, 'level1_diag3'] = 3
elif (np.floor(row['level1_diag3']) == 250):
data.loc[index, 'level1_diag3'] = 4
elif (row['level1_diag3'] >= 800 and row['level1_diag3'] < 1000):
data.loc[index, 'level1_diag3'] = 5
elif (row['level1_diag3'] >= 710 and row['level1_diag3'] < 740):
data.loc[index, 'level1_diag3'] = 6
elif (row['level1_diag3'] >= 580 and row['level1_diag3'] < 630) or (np.floor(row['level1_diag3']) == 788):
data.loc[index, 'level1_diag3'] = 7
elif (row['level1_diag3'] >= 140 and row['level1_diag3'] <
240):
data.loc[index, 'level1_diag3'] = 8
else:
data.loc[index, 'level1_diag3'] = 0
for index, row in data.iterrows():
if (row['level2_diag1'] >= 390 and row['level2_diag1'] < 399):
data.loc[index, 'level2_diag1'] = 1
elif (row['level2_diag1'] >= 401 and row['level2_diag1'] < 415):
data.loc[index, 'level2_diag1'] = 2
elif (row['level2_diag1'] >= 415 and row['level2_diag1'] < 460):
data.loc[index, 'level2_diag1'] = 3
elif (np.floor(row['level2_diag1']) == 785):
data.loc[index, 'level2_diag1'] = 4
elif (row['level2_diag1'] >= 460 and row['level2_diag1'] < 489):
data.loc[index, 'level2_diag1'] = 5
elif (row['level2_diag1'] >= 490 and row['level2_diag1'] < 497):
data.loc[index, 'level2_diag1'] = 6
elif (row['level2_diag1'] >= 500 and row['level2_diag1'] < 520):
data.loc[index, 'level2_diag1'] = 7
elif (np.floor(row['level2_diag1']) == 786):
data.loc[index, 'level2_diag1'] = 8
elif (row['level2_diag1'] >= 520 and row['level2_diag1'] < 530):
data.loc[index, 'level2_diag1'] = 9
elif (row['level2_diag1'] >= 530 and row['level2_diag1'] < 544):
data.loc[index, 'level2_diag1'] = 10
elif (row['level2_diag1'] >= 550 and row['level2_diag1'] < 554):
data.loc[index, 'level2_diag1'] = 11
elif (row['level2_diag1'] >= 555 and row['level2_diag1'] < 580):
data.loc[index, 'level2_diag1'] = 12
elif (np.floor(row['level2_diag1']) == 787):
data.loc[index, 'level2_diag1'] = 13
elif (np.floor(row['level2_diag1']) == 250):
data.loc[index, 'level2_diag1'] = 14
elif (row['level2_diag1'] >= 800 and row['level2_diag1'] < 1000):
data.loc[index, 'level2_diag1'] = 15
elif (row['level2_diag1'] >= 710 and row['level2_diag1'] < 740):
data.loc[index, 'level2_diag1'] = 16
elif (row['level2_diag1'] >= 580 and row['level2_diag1'] < 630):
data.loc[index, 'level2_diag1'] = 17
elif (np.floor(row['level2_diag1']) == 788):
data.loc[index, 'level2_diag1'] = 18
elif (row['level2_diag1'] >= 140 and row['level2_diag1'] < 240):
data.loc[index, 'level2_diag1'] = 19
elif row['level2_diag1'] >= 240 and row['level2_diag1'] < 280 and (np.floor(row['level2_diag1']) != 250):
data.loc[index, 'level2_diag1'] = 20
elif (row['level2_diag1'] >= 680 and row['level2_diag1'] < 710) or (np.floor(row['level2_diag1']) == 782):
data.loc[index, 'level2_diag1'] = 21
elif (row['level2_diag1'] >= 290 and row['level2_diag1'] < 320):
data.loc[index, 'level2_diag1'] = 22
else:
data.loc[index, 'level2_diag1'] = 0
if (row['level2_diag2'] >= 390 and row['level2_diag2'] < 399):
data.loc[index, 'level2_diag2'] = 1
elif (row['level2_diag2'] >= 401 and row['level2_diag2'] < 415):
data.loc[index, 'level2_diag2'] = 2
elif (row['level2_diag2'] >= 415 and row['level2_diag2'] < 460):
data.loc[index, 'level2_diag2'] = 3
elif (np.floor(row['level2_diag2']) == 785):
data.loc[index, 'level2_diag2'] = 4
elif (row['level2_diag2'] >= 460 and row['level2_diag2'] < 489):
data.loc[index, 'level2_diag2'] = 5
elif (row['level2_diag2'] >= 490 and row['level2_diag2'] < 497):
data.loc[index, 'level2_diag2'] = 6
elif (row['level2_diag2'] >= 500 and row['level2_diag2'] < 520):
data.loc[index, 'level2_diag2'] = 7
elif (np.floor(row['level2_diag2']) == 786):
data.loc[index, 'level2_diag2'] = 8
elif (row['level2_diag2'] >= 520 and row['level2_diag2'] < 530):
data.loc[index, 'level2_diag2'] = 9
elif (row['level2_diag2'] >= 530 and row['level2_diag2'] < 544):
data.loc[index, 'level2_diag2'] = 10
elif (row['level2_diag2'] >= 550 and row['level2_diag2'] < 554):
data.loc[index, 'level2_diag2'] = 11
elif (row['level2_diag2'] >= 555 and row['level2_diag2'] < 580):
data.loc[index, 'level2_diag2'] = 12
elif (np.floor(row['level2_diag2']) == 787):
data.loc[index, 'level2_diag2'] = 13
elif (np.floor(row['level2_diag2']) == 250):
data.loc[index, 'level2_diag2'] = 14
elif (row['level2_diag2'] >= 800 and row['level2_diag2'] < 1000):
data.loc[index, 'level2_diag2'] = 15
elif (row['level2_diag2'] >= 710 and row['level2_diag2'] < 740):
data.loc[index, 'level2_diag2'] = 16
elif (row['level2_diag2'] >= 580 and row['level2_diag2'] < 630):
data.loc[index, 'level2_diag2'] = 17
elif (np.floor(row['level2_diag2']) == 788):
data.loc[index, 'level2_diag2'] = 18
elif (row['level2_diag2'] >= 140 and row['level2_diag2'] < 240):
data.loc[index, 'level2_diag2'] = 19
elif row['level2_diag2'] >= 240 and row['level2_diag2'] < 280 and (np.floor(row['level2_diag2']) != 250):
data.loc[index, 'level2_diag2'] = 20
elif (row['level2_diag2'] >= 680 and row['level2_diag2'] < 710) or (np.floor(row['level2_diag2']) == 782):
data.loc[index, 'level2_diag2'] = 21
elif (row['level2_diag2'] >= 290 and row['level2_diag2'] < 320):
data.loc[index, 'level2_diag2'] = 22
else:
data.loc[index, 'level2_diag2'] = 0
if (row['level2_diag3'] >= 390 and row['level2_diag3'] < 399):
data.loc[index, 'level2_diag3'] = 1
elif (row['level2_diag3'] >= 401 and row['level2_diag3'] < 415):
data.loc[index, 'level2_diag3'] = 2
elif (row['level2_diag3'] >= 415 and row['level2_diag3'] < 460):
data.loc[index, 'level2_diag3'] = 3
elif (np.floor(row['level2_diag3']) == 785):
data.loc[index, 'level2_diag3'] = 4
elif (row['level2_diag3'] >= 460 and row['level2_diag3'] < 489):
data.loc[index, 'level2_diag3'] = 5
elif (row['level2_diag3'] >= 490 and row['level2_diag3'] < 497):
data.loc[index, 'level2_diag3'] = 6
elif (row['level2_diag3'] >= 500 and row['level2_diag3'] < 520):
data.loc[index, 'level2_diag3'] = 7
elif (np.floor(row['level2_diag3']) == 786):
data.loc[index, 'level2_diag3'] = 8
elif (row['level2_diag3'] >= 520 and row['level2_diag3'] < 530):
data.loc[index, 'level2_diag3'] = 9
elif (row['level2_diag3'] >= 530 and row['level2_diag3'] < 544):
data.loc[index, 'level2_diag3'] = 10
elif (row['level2_diag3'] >= 550 and row['level2_diag3'] < 554):
data.loc[index, 'level2_diag3'] = 11
elif (row['level2_diag3'] >= 555 and row['level2_diag3'] < 580):
data.loc[index, 'level2_diag3'] = 12
elif (np.floor(row['level2_diag3']) == 787):
data.loc[index, 'level2_diag3'] = 13
elif (np.floor(row['level2_diag3']) == 250):
data.loc[index, 'level2_diag3'] = 14
elif (row['level2_diag3'] >= 800 and row['level2_diag3'] < 1000):
data.loc[index, 'level2_diag3'] = 15
elif (row['level2_diag3'] >= 710 and row['level2_diag3'] < 740):
data.loc[index, 'level2_diag3'] = 16
elif (row['level2_diag3'] >= 580 and row['level2_diag3'] < 630):
data.loc[index, 'level2_diag3'] = 17
elif (np.floor(row['level2_diag3']) == 788):
data.loc[index, 'level2_diag3'] = 18
elif (row['level2_diag3'] >= 140 and row['level2_diag3'] < 240):
data.loc[index, 'level2_diag3'] = 19
elif row['level2_diag3'] >= 240 and row['level2_diag3'] < 280 and (np.floor(row['level2_diag3']) != 250):
data.loc[index, 'level2_diag3'] = 20
elif (row['level2_diag3'] >= 680 and row['level2_diag3'] < 710) or (np.floor(row['level2_diag3']) == 782):
data.loc[index, 'level2_diag3'] = 21
elif (row['level2_diag3'] >= 290 and row['level2_diag3'] < 320):
data.loc[index, 'level2_diag3'] = 22
else:
data.loc[index, 'level2_diag3'] = 0
from math import log10
class ExploratoryDataAnalysis:
def __init__(self,data):
self.data = data
def get_feature_class_count(self,col):
count_map = {}
for x in col:
if(x in count_map):
count_map[x] = count_map[x] + 1
else:
count_map[x] = 1
return log10(len(count_map))
def get_missing_counts(self,col):
return len(col) - col.count()
def plot_class_counts(self,plot_missing_feature_count = False):
class_counts = [self.get_feature_class_count(self.data[column]) for column in self.data]
data = [go.Histogram(x=self.data.columns, y=class_counts)]
trace = go.Bar(
x=self.data.columns,
y=class_counts
)
layout = go.Layout(
title='Feature counts | Total number of rows=' + str(len(self.data)),
xaxis = dict(title='Features'),
yaxis = dict(title='Log(Number of unique values)')
)
py.iplot(go.Figure(data=[trace], layout=layout))
def plot_class_count(self):
count_NO = 0
count_less_30 = 0
for x in self.data["readmitted"]:
if(x == "NO"):
count_NO = count_NO + 1
elif (x == "<30"):
count_less_30 = count_less_30 + 1
trace = go.Bar(
x=[">30","<30","NO"],
y=[(len(self.data) - count_NO - count_less_30),count_less_30,count_NO]
)
layout = go.Layout(
title='Class counts | Total number of rows=' + str(len(self.data)),
xaxis = dict(title='Features'),
yaxis = dict(title='Log(Number of unique values)')
)
py.iplot(go.Figure(data=[trace], layout=layout))
def plot_missing_count(self):
class_counts = [len(self.data[column]) for column in self.data]
missing_counts = [self.get_missing_counts(self.data[column]) for column in self.data]
data = [go.Histogram(x=self.data.columns, y=class_counts)]
trace = go.Bar(
x=self.data.columns,
y=class_counts
)
trace_missing = go.Bar(
x=self.data.columns,
y=missing_counts
)
layout = go.Layout(
title='Feature counts | Total number of rows=' + str(len(self.data)),
xaxis = dict(title='Features'),
yaxis = dict(title='Log(Number of unique values)')
)
py.iplot(go.Figure(data=[trace,trace_missing], layout=layout))
EDA = ExploratoryDataAnalysis(data)
EDA.plot_class_counts()
EDA.plot_class_count()
EDA.plot_missing_count()
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from sklearn.feature_extraction.text import CountVectorizer
from scipy.sparse import hstack,vstack
class OneHotEncoderWithFeaturePrunning:
def __init__(self,threshold_count=50,dummy_variable="UNK"):
self.enc = CountVectorizer(analyzer='char_wb',lowercase=False)
self.frequency_map = {}
self.threshold_count = threshold_count
def fit(self,data):
map(self._process_frequencies,enumerate(data))
map(self._process_rare_data,enumerate(data))
return self.enc.fit(data)
def _process_frequencies(self,data,index):
if(data[index] not in self.frequency_map):
self.frequency_map[data[index]] = 1
else:
self.frequency_map[data[index]] = self.frequency_map[data[index]] + 1
def _process_rare_data(self,data,index):
if(self.frequency_map[data[index]] <= self.threshold_count):
data[index] = self.dummy_variable
def fit_transform(self,data):
map(self._process_frequencies,enumerate(data))
map(self._process_rare_data,enumerate(data))
return self.enc.fit_transform(data)
def transform(self,data):
map(self._process_rare_data,enumerate(data))
return self.enc.transform(data)
def get_encoder(self):
return self.enc
class FeatureEngineering:
def get_encoding(self,column):
categorical_encoder = OneHotEncoderWithFeaturePrunning()
return categorical_encoder.fit_transform(column.astype('str')), categorical_encoder
def fit(self,data,y):
self.categorical_indices={
"race":True,
"gender": True,
"age":True,
"admission_type_id":True,
"discharge_disposition_id":True,
"admission_source_id": True,
"time_in_hospital": False,
# "medical_specialty":True,
"num_lab_procedures":False,
"num_procedures":False,
"num_medications":False,
"number_outpatient":True,
"number_emergency":True,
"number_inpatient":True,
# "diag_1":True,
# "diag_2":True,
# "diag_3":True,
"number_diagnoses":False,
"max_glu_serum":True,
"A1Cresult":True,
"metformin":True,
"repaglinide":True,
"nateglinide":True,
"chlorpropamide":True,
"glimepiride":True,
"acetohexamide":True,
"glipizide":True,
"glyburide":True,
"tolbutamide":True,
"pioglitazone":True,
"rosiglitazone":True,
"acarbose":True,
"miglitol":True,
"troglitazone":True,
"tolazamide":True,
# "examide":True,
# "citoglipton":True,
"insulin":True,
"glyburide-metformin":True,
"glipizide-metformin":True,
"metformin-rosiglitazone":True,
"metformin-pioglitazone":True,
"change":True,
"diabetesMed":True
# "race_age_gender":True
# "level2_diag3":True,
# "level2_diag1":True,
# "level2_diag2":True,
# "level1_diag3":True,
# "level1_diag1":True,
# "level1_diag2":True
}
x = None
# data = self.pre_process_columns(data)
self.encoder = {}
for label, column in data.items():
if(label in self.categorical_indices):
if(self.categorical_indices[label] == True):
encoded_data,self.encoder[label] = self.get_encoding(column)
x = encoded_data if x == None else hstack([x,encoded_data],format="csr")
else:
x = column if x == None else hstack([x,((pd.DataFrame(column.astype('float64'))).to_sparse())],format="csr")
return x.toarray()
def fit_transform(self,data,y):
return self.fit(data,y)
def transform(self,data):
x = None
# data = self.pre_process_columns(data)
for label, column in data.items():
if(label in self.categorical_indices):
if(self.categorical_indices[label] == True):
encoded_data = (self.encoder[label]).transform(column.astype('str'))
x = encoded_data if x == None else hstack([x,encoded_data])
else:
x = column if x == None else hstack([x,(pd.DataFrame(column).to_sparse())])
return x.toarray()
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from sklearn.feature_extraction.text import CountVectorizer
from scipy.sparse import hstack,vstack
class FeatureEngineeringForTrees:
def get_encoding(self,column):
categorical_encoder = LabelEncoder()
return categorical_encoder.fit_transform(column.astype('str')), categorical_encoder
def fit(self,data,y):
self.categorical_indices={
"race":True,
"gender": True,
"age":True,
"admission_type_id":True,
"discharge_disposition_id":True,
"admission_source_id": True,
"time_in_hospital": False,
# "medical_specialty":True,
"num_lab_procedures":False,
"num_procedures":False,
"num_medications":False,
"number_outpatient":True,
"number_emergency":True,
"number_inpatient":True,
# "diag_1":True,
# "diag_2":True,
# "diag_3":True,
"number_diagnoses":False,
"max_glu_serum":True,
"A1Cresult":True,
"metformin":True,
"repaglinide":True,
"nateglinide":True,
"chlorpropamide":True,
"glimepiride":True,
"acetohexamide":True,
"glipizide":True,
"glyburide":True,
"tolbutamide":True,
"pioglitazone":True,
"rosiglitazone":True,
"acarbose":True,
"miglitol":True,
"troglitazone":True,
"tolazamide":True,
# "examide":True,
# "citoglipton":True,
"insulin":True,
"glyburide-metformin":True,
"glipizide-metformin":True,
"metformin-rosiglitazone":True,
"metformin-pioglitazone":True,
"change":True,
"diabetesMed":True
# "race_age_gender":True
# "level2_diag3":True,
# "level2_diag1":True,
# "level2_diag2":True,
# "level1_diag3":True,
# "level1_diag1":True,
# "level1_diag2":True
}
x = None
# data = self.pre_process_columns(data)
self.encoder = {}
for label, column in data.items():
if(label in self.categorical_indices):
if(self.categorical_indices[label] == True):
encoded_data,self.encoder[label] = self.get_encoding(column)
encoded_data = encoded_data.reshape(-1,1)
x = encoded_data if x is None else np.hstack((x,encoded_data))
else:
x = column if x is None else np.hstack((x,column.values.reshape(-1,1)))
return x
def fit_transform(self,data,y):
return self.fit(data,y)
def transform(self,data):
x = None
# data = self.pre_process_columns(data)
for label, column in data.items():
if(label in self.categorical_indices):
if(self.categorical_indices[label] == True):
encoded_data,self.encoder[label] = self.get_encoding(column)
encoded_data = encoded_data.reshape(-1,1)
x = encoded_data if x is None else np.hstack((x,encoded_data))
else:
x = column if x is None else np.hstack((x,column.values.reshape(-1,1)))
return x
from sklearn.metrics import confusion_matrix
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, auc
from sklearn import metrics
from inspect import signature
from sklearn.metrics import average_precision_score
def print_confusion_matrix(confusion_matrix, class_names, figsize = (6,2), fontsize=14):
df_cm = pd.DataFrame(confusion_matrix, index=class_names, columns=class_names)
fig = plt.figure(figsize=figsize)
try:
heatmap = sns.heatmap(df_cm, annot=True, fmt="d")
except ValueError:
raise ValueError("Confusion matrix values must be integers.")
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=fontsize)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=fontsize)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
def print_prauc(y_test,y_score):
average_precision = average_precision_score(y_test, y_score)
print('Average precision-recall score: {0:0.2f}'.format(
average_precision))
precision, recall, _ = precision_recall_curve(y_test, y_score)
# In matplotlib < 1.5, plt.fill_between does not have a 'step' argument
step_kwargs = ({'step': 'post'}
if 'step' in signature(plt.fill_between).parameters
else {})
plt.step(recall, precision, color='b', alpha=0.2,
where='post')
plt.fill_between(recall, precision, alpha=0.2, color='b', **step_kwargs)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(
average_precision))
plt.show()
def print_raoc(y_test,y_score):
fpr, tpr, threshold = metrics.roc_curve(y_test, y_score)
roc_auc = metrics.auc(fpr, tpr)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
def print_result(y_test1, y_pred1, y_pred_proba1=0):
cm = confusion_matrix(y_test1, y_pred1, labels=[0,1])
print_confusion_matrix(cm, [0, 1])
print(classification_report(y_test1, y_pred1))
from sklearn.metrics import confusion_matrix, classification_report
def generate_results(clf,X_test,y_test):
pred = clf.predict(X_test)
print_result(y_test, pred)
score = clf.decision_function(X_test)
print_prauc(y_test, score)
print_raoc(y_test, score)
from keras.models import Sequential
from keras.layers import Dense, Activation, ZeroPadding2D,Conv2D,Flatten,Conv1D,MaxPooling2D,BatchNormalization
from sklearn.metrics import roc_auc_score
import tensorflow as tf
def auroc(y_true, y_pred):
return tf.py_func(roc_auc_score, (y_true, y_pred), tf.double)
model = Sequential([
Dense(200, activation="softmax"),
Dense(50, activation="softmax"),
Dense(10, activation="softmax"),
Dense(2, activation="softmax"),
])
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# model.fit(X_train,y_train,validation_split=0, epochs=150, batch_size=100)
y = (data["readmitted"].astype('str')).apply(lambda x: 0 if x == "NO" else 1)
y = to_categorical(y, num_classes=None, dtype='float32')
X_train, X_test, y_train, y_test = split_data(data,y)
feature_engineering = FeatureEngineering()
encoded_data = feature_engineering.fit(X_train,y_train)
encoded_data_test = feature_engineering.transform(X_test)
model.fit(encoded_data,y_train,epochs=300)
y_pred = model.predict_proba(encoded_data_test)
from sklearn.metrics import roc_auc_score
import tensorflow as tf
def auroc(y_true, y_pred):
return tf.py_func(roc_auc_score, (y_true, y_pred), tf.double)
print(roc_auc_score(y_test,y_pred))
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline,make_pipeline
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import AdaBoostClassifier
from keras.utils import to_categorical
def split_data(x,label):
X_train, X_test, y_train, y_test = train_test_split(x, label, test_size=0.20)
return X_train, X_test, y_train, y_test
# y = (data["readmitted"].astype('str')).apply(lambda x: 0 if x == "NO" else (1 if x == "<30" else 2))
y = (data["readmitted"].astype('str')).apply(lambda x: 0 if x == "NO" else 1)
X_train, X_test, y_train, y_test = split_data(data,y)
# pipeline.score(X_train,y_train)
pipeline = make_pipeline(FeatureEngineering(),DecisionTreeClassifier())
pipeline.fit(X_train,y_train)
pipeline.score(X_train,y_train)
pipeline.score(X_test,y_test)
generate_results(pipeline,X_test,y_test)
feature_engineering = FeatureEngineeringForTrees()
encoded_training_data = feature_engineering.fit_transform(X_train,y_train)
clf = DecisionTreeClassifier()
clf.fit(encoded_training_data,y_train)
encoded_test_data = feature_engineering.transform(X_test)
import shap
shap.initjs()
explainer = shap.TreeExplainer(clf)
shap_values = explainer.shap_values(encoded_test_data,y_test)
shap.force_plot(explainer.expected_value[0], shap_values[0][0])
shap.summary_plot(shap_values,encoded_test_data)
shap.force_plot(explainer.expected_value[0], shap_values[0][:100])
```
| github_jupyter |
```
#importing
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
#loading dataset
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
#sklearn.datasets is a dictionary style dataset
cancer.keys()
print(cancer['DESCR'])
df = pd.DataFrame(cancer['data'], columns = cancer['feature_names'])
df.head()
cancer['target_names']
cancer['target_names']
#Standard scale
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(df)
scaled_data = scaler.transform(df)
#PCA
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
pca.fit(scaled_data)
x_pca = pca.transform(scaled_data)
scaled_data.shape
x_pca.shape()
plt.figure(figsize=(8,6))
plt.scatter( x_pca[:,0] , x_pca[:,1] , c=cancer['target'] , cmap = 'plasma' )
plt.xlabel('First Principal Component')
plt.ylabel('Second Principal Component')
plt.show()
```
**PCA Description**
1. Here based of just the 1st PC and 2nd PC we see we have a very clear seperation of what the malignant and benign tumor looks like.
2. This is just based off 2 PC vs the 30 dimensions of data.
3. Meaning this can be explained as a compression algorithms of sorts. We keep a lot of the information while also explaining lot of the variance but with just 2 columns of data.
**Now Let us understand what it means by 1st PC and 2nd PC**
4. Unfortunately with this great power of dimensionality reduction it comes with the cost of being able to able understand what each of these components represent. They don't relate 1-1 to a specific feature in our data.
5. The components really correspond to combinations of original feature.
6. The components themselves are stored as an attribute of the PCA object.
```
pca.components_
#Why the underscore though ?
#Let's visualise this relationship with a heatmap
df_comp = pd.DataFrame(pca.components_ , columns = cancer['feature_names'])
df_comp
#Making the figure manually bigger by adjusting it's size manually
plt.figure(figsize=(12,6))
sns.heatmap(df_comp, cmap = 'plasma');
```
* And now we have a heatmap that shows the relationship between the various features and the principal components themselves. Basically each PC is shown here as a row.
* The hotter the color, it's more co-related to specific features in the columns.
* That's really the best explaination for what the Principal components represent. They're combination of all these features, and we can also see which features were specifically important to PC 1 vs PC 2 (one versus the other).
# Now we can feed this reduced version (x_pca) into a classification algorithm.
We can perform logistic regression on x_pca instead of doing it on entire dataframe of features.
Also notice that x_pca is also easily seperable by a straight line. SVM is a great choice for this.
# Diving deeper in PCA
# Done
* StatQuest: Principal Component Analysis (PCA), Step-by-Step
* https://www.youtube.com/watch?v=FgakZw6K1QQ
* StatQuest: PCA in Python
* https://www.youtube.com/watch?v=Lsue2gEM9D0&t=623s
* https://statquest.org/2018/01/08/statquest-pca-in-python/
* Standard Deviation - Explained and Visualized
* https://www.youtube.com/watch?v=MRqtXL2WX2M
* Creating Heatmap
# To Do for further exploration
* Read wiki
OR
* Intro to statistical learning - PCA
* Find top 3 principal components
* 3D Analysis
```
```
| github_jupyter |
<small><i>This notebook was put together by [Jake Vanderplas](http://www.vanderplas.com). Source and license info is on [GitHub](https://github.com/jakevdp/sklearn_tutorial/).</i></small>
# An Introduction to scikit-learn: Machine Learning in Python
## Goals of this Tutorial
- **Introduce the basics of Machine Learning**, and some skills useful in practice.
- **Introduce the syntax of scikit-learn**, so that you can make use of the rich toolset available.
## Schedule:
**Preliminaries: Setup & introduction** (15 min)
* Making sure your computer is set-up
**Basic Principles of Machine Learning and the Scikit-learn Interface** (45 min)
* What is Machine Learning?
* Machine learning data layout
* Supervised Learning
- Classification
- Regression
- Measuring performance
* Unsupervised Learning
- Clustering
- Dimensionality Reduction
- Density Estimation
* Evaluation of Learning Models
* Choosing the right algorithm for your dataset
**Supervised learning in-depth** (1 hr)
* Support Vector Machines
* Decision Trees and Random Forests
**Unsupervised learning in-depth** (1 hr)
* Principal Component Analysis
* K-means Clustering
* Gaussian Mixture Models
**Model Validation** (1 hr)
* Validation and Cross-validation
## Preliminaries
This tutorial requires the following packages:
- Python version 2.7 or 3.4+
- `numpy` version 1.8 or later: http://www.numpy.org/
- `scipy` version 0.15 or later: http://www.scipy.org/
- `matplotlib` version 1.3 or later: http://matplotlib.org/
- `scikit-learn` version 0.15 or later: http://scikit-learn.org
- `ipython`/`jupyter` version 3.0 or later, with notebook support: http://ipython.org
- `seaborn`: version 0.5 or later, used mainly for plot styling
The easiest way to get these is to use the [conda](http://store.continuum.io/) environment manager.
I suggest downloading and installing [miniconda](http://conda.pydata.org/miniconda.html).
The following command will install all required packages:
```
$ conda install numpy scipy matplotlib scikit-learn ipython-notebook
```
Alternatively, you can download and install the (very large) Anaconda software distribution, found at https://store.continuum.io/.
### Checking your installation
You can run the following code to check the versions of the packages on your system:
(in IPython notebook, press `shift` and `return` together to execute the contents of a cell)
```
from __future__ import print_function
import IPython
print('IPython:', IPython.__version__)
import numpy
print('numpy:', numpy.__version__)
import scipy
print('scipy:', scipy.__version__)
import matplotlib
print('matplotlib:', matplotlib.__version__)
import sklearn
print('scikit-learn:', sklearn.__version__)
```
## Useful Resources
- **scikit-learn:** http://scikit-learn.org (see especially the narrative documentation)
- **matplotlib:** http://matplotlib.org (see especially the gallery section)
- **Jupyter:** http://jupyter.org (also check out http://nbviewer.jupyter.org)
| github_jupyter |
<div align="center">
<h1><img width="30" src="https://madewithml.com/static/images/rounded_logo.png"> <a href="https://madewithml.com/">Made With ML</a></h1>
Applied ML · MLOps · Production
<br>
Join 30K+ developers in learning how to responsibly <a href="https://madewithml.com/about/">deliver value</a> with ML.
<br>
</div>
<br>
<div align="center">
<a target="_blank" href="https://newsletter.madewithml.com"><img src="https://img.shields.io/badge/Subscribe-30K-brightgreen"></a>
<a target="_blank" href="https://github.com/GokuMohandas/MadeWithML"><img src="https://img.shields.io/github/stars/GokuMohandas/MadeWithML.svg?style=social&label=Star"></a>
<a target="_blank" href="https://www.linkedin.com/in/goku"><img src="https://img.shields.io/badge/style--5eba00.svg?label=LinkedIn&logo=linkedin&style=social"></a>
<a target="_blank" href="https://twitter.com/GokuMohandas"><img src="https://img.shields.io/twitter/follow/GokuMohandas.svg?label=Follow&style=social"></a>
<br>
🔥 Among the <a href="https://github.com/topics/deep-learning" target="_blank">top ML</a> repositories on GitHub
</div>
<br>
<hr>
# Python
In this lessons, we'll learn about the basics of Python programmming.
<div align="left">
<a target="_blank" href="https://madewithml.com/courses/foundations/python/"><img src="https://img.shields.io/badge/📖 Read-blog post-9cf"></a>
<a href="https://github.com/GokuMohandas/MadeWithML/blob/main/notebooks/02_Python.ipynb" role="button"><img src="https://img.shields.io/static/v1?label=&message=View%20On%20GitHub&color=586069&logo=github&labelColor=2f363d"></a>
<a href="https://colab.research.google.com/github/GokuMohandas/MadeWithML/blob/main/notebooks/02_Python.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
</div>
# Variables
Variables are containers for holding data and they're defined by a name and value.
<div align="left">
<img src="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/python/variables.png" width="220">
</div>
```
# Integer variable
x = 5
print (x)
print (type(x))
```
We can change the value of a variable by simply assigning a new value to it.
```
# String variable
x = "hello"
print (x)
print (type(x))
```
There are many different types of variables: integers, floats, strings, boolean etc.
```
# int variable
x = 5
print (x, type(x))
# float variable
x = 5.0
print (x, type(x))
# text variable
x = "5"
print (x, type(x))
# boolean variable
x = True
print (x, type(x))
```
We can also do operations with variables.
```
# Variables can be used with each other
a = 1
b = 2
c = a + b
print (c)
```
We should always know what types of variables we're dealing with so we can do the right operations with them. Here's a common mistake that can happen if we're using the wrong variable type.
```
# int variables
a = 5
b = 3
print (a + b)
# string variables
a = "5"
b = "3"
print (a + b)
```
# Lists
Lists are an ordered, mutable (changeable) collection of values that are *comma separated* and enclosed by *square brackets*. A list can be comprised of many different types of variables (below is a list with an integer, string and a float).
```
# Creating a list
x = [3, "hello", 1.2]
print (x)
# Length of a list
len(x)
```
You can add to a list by using the **append** function.
```
# Adding to a list
x.append(7)
print (x)
print (len(x))
# Replacing items in a list
x[1] = "bye"
print (x)
# Operations
y = [2.4, "world"]
z = x + y
print (z)
```
# Indexing and Slicing
Indexing and slicing from lists allow us to retrieve specific values within lists. Note that indices can be positive (starting from 0) or negative (-1 and lower, where -1 is the last item in the list).
<div align="left">
<img src="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/python/indexing.png" width="300">
</div>
```
# Indexing
x = [3, "hello", 1.2]
print ("x[0]: ", x[0])
print ("x[1]: ", x[1])
print ("x[-1]: ", x[-1]) # the last item
print ("x[-2]: ", x[-2]) # the second to last item
# Slicing
print ("x[:]: ", x[:]) # all indices
print ("x[1:]: ", x[1:]) # index 1 to the end of the list
print ("x[1:2]: ", x[1:2]) # index 1 to index 2 (not including index 2)
print ("x[:-1]: ", x[:-1]) # index 0 to last index (not including last index)
```
# Tuples
Tuples are collections that are ordered and immutable (unchangeable). You will use these to store values that will never be changed.
```
# Creating a tuple
x = (3.0, "hello") # tuples start and end with ()
print (x)
# Adding values to a tuple
x = x + (5.6, 4)
print (x)
# Try to change (it won't work and you'll get an error)
# x[0] = 1.2
```
# Dictionaries
Dictionaries are an unordered, mutable and indexed collection of key-value pairs. You can retrieve values based on the key and a dictionary cannot have two of the same keys.
<div align="left">
<img src="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/python/dictionaries.png" width="320">
</div>
```
# Creating a dictionary
person = {'name': 'Goku',
'eye_color': 'brown'}
print (person)
print (person['name'])
print (person['eye_color'])
# Changing the value for a key
person['eye_color'] = 'green'
print (person)
# Adding new key-value pairs
person['age'] = 24
print (person)
# Length of a dictionary
print (len(person))
```
# If statements
We can use `if` statements to conditionally do something. The conditions are defined by the words `if`, `elif` (which stands for else if) and `else`. We can have as many `elif` statements as we want. The indented code below each condition is the code that will execute if the condition is `True`.
```
# If statement
x = 4
if x < 1:
score = 'low'
elif x <= 4: # elif = else if
score = 'medium'
else:
score = 'high'
print (score)
# If statement with a boolean
x = True
if x:
print ("it worked")
```
# Loops
### For Loops
A `for` loop can iterate over a collection of values (lists, tuples, dictionaries, etc.) The indented code is executed for each item in the collection of values.
```
# For loop
veggies = ["carrots", "broccoli", "beans"]
for veggie in veggies:
print (veggie)
```
When the loop encounters the `break` command, the loop will terminate immediately. If there were more items in the list, they will not be processed.
```
# `break` from a for loop
veggies = ["carrots", "broccoli", "beans"]
for veggie in veggies:
if veggie == "broccoli":
break
print (veggie)
```
When the loop encounters the `continue` command, the loop will skip all other operations for that item in the list only. If there were more items in the list, the loop will continue normally.
```
# `continue` to the next iteration
veggies = ["carrots", "broccoli", "beans"]
for veggie in veggies:
if veggie == "broccoli":
continue
print (veggie)
```
### While Loops
A `while` loop can perform repeatedly as long as a condition is `True`. We can use `continue` and `break` commands in `while` loops as well.
```
# While loop
x = 3
while x > 0:
x -= 1 # same as x = x - 1
print (x)
```
# Functions
Functions are a way to modularize reusable pieces of code. They're defined by the keyword `def` which stands for definition and they can have the following components.
<div align="left">
<img src="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/python/functions.png" width="350">
</div>
```
# Define the function
def add_two(x):
"""Increase x by 2.""" # explains what this function will do
x += 2
return x
```
Here are the components that may be required when we want to use the function. we need to ensure that the function name and the input parameters match with how we defined the function above.
<div align="left">
<img src="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/python/calling_functions.png" width="350">
</div>
```
# Use the function
score = 0
new_score = add_two(x=score)
print (new_score)
```
A function can have as many input parameters and outputs as we want.
```
# Function with multiple inputs
def join_name(first_name, last_name):
"""Combine first name and last name."""
joined_name = first_name + " " + last_name
return joined_name
# Use the function
first_name = "Goku"
last_name = "Mohandas"
joined_name = join_name(first_name=first_name,
last_name=last_name)
print (joined_name)
```
> It's good practice to always use keyword argument when using a function so that it's very clear what input variable belongs to what function input parameter. On a related note, you will often see the terms `*args` and `**kwargs` which stand for arguments and keyword arguments. You can extract them when they are passed into a function. The significance of the `*` is that any number of arguments and keyword arguments can be passed into the function.
```
def f(*args, **kwargs):
x = args[0]
y = kwargs.get('y')
print (f"x: {x}, y: {y}")
f(5, y=2)
```
# Classes
Classes are object constructors and are a fundamental component of object oriented programming in Python. They are composed of a set of functions that define the class and it's operations.
### `__init__` function
The `__init__` function is used when an instance of the class is initialized.
```
# Creating the class
class Pet(object):
"""Class object for a pet."""
def __init__(self, species, name):
"""Initialize a Pet."""
self.species = species
self.name = name
# Creating an instance of a class
my_dog = Pet(species="dog",
name="Scooby")
print (my_dog)
print (my_dog.name)
```
### `__str__` function
The `print (my_dog)` command printed something not so relevant to us. Let's fix that with the `__str__` function.
```
# Creating the class
class Pet(object):
"""Class object for a pet."""
def __init__(self, species, name):
"""Initialize a Pet."""
self.species = species
self.name = name
def __str__(self):
"""Output when printing an instance of a Pet."""
return f"{self.species} named {self.name}"
# Creating an instance of a class
my_dog = Pet(species="dog",
name="Scooby")
print (my_dog)
print (my_dog.name)
```
> Classes can be customized with **magic** functions like `__init__` and `__str__`, to enable powerful operations. We'll be exploring additional built-in functions in subsequent notebooks (like `__len__`, `__iter__` and `__getitem__`, etc.) but if you're curious, here is a [tutorial](https://rszalski.github.io/magicmethods/) on more magic methods.
### Object methods
```
# Creating the class
class Pet(object):
"""Class object for a pet."""
def __init__(self, species, name):
"""Initialize a Pet."""
self.species = species
self.name = name
def __str__(self):
"""Output when printing an instance of a Pet."""
return f"{self.species} named {self.name}"
def change_name(self, new_name):
"""Change the name of your Pet."""
self.name = new_name
# Creating an instance of a class
my_dog = Pet(species="dog", name="Scooby")
print (my_dog)
print (my_dog.name)
# Using a class's function
my_dog.change_name(new_name="Scrappy")
print (my_dog)
print (my_dog.name)
```
### Inheritance
Inheritance allows us to inherit all the properties and methods from another class (the parent). Notice how we inherited the initialized variables from the parent `Pet` class like species and name. We also inherited the `change_name` function. But for the `__str__` function, we define our own version to overwrite the `Pet` `__str__` function. We can similarly overwrite any object functions as well.
```
class Dog(Pet):
def __init__(self, name, breed):
super().__init__(species="dog", name=name)
self.breed = breed
def __str__(self):
return f"{self.breed} named {self.name}"
scooby = Dog(breed="Great Dane", name="Scooby")
print (scooby)
scooby.change_name('Scooby Doo')
print (scooby)
```
### Methods
There are two important decorator methods to know about when it comes to classes: `@classmethod` and `@staticmethod`. We'll learn about decorators in the next section below but these specific methods pertain to classes so we'll cover them here.
```
class Dog(Pet):
def __init__(self, name, breed):
super().__init__(species="dog", name=name)
self.breed = breed
def __str__(self):
return f"{self.breed} named {self.name}"
@classmethod
def from_dict(cls, d):
return cls(name=d["name"], breed=d["breed"])
@staticmethod
def is_cute(breed):
return True # all animaals are cute!
```
A `@classmethod` allows us to create class instances by passing in the uninstantiated class itself (`cls`). This is a great way to create (or load) classes from objects (ie. dictionaries).
```
# Create instance
d = {"name": "Cassie", "breed": "Border Collie"}
cassie = Dog.from_dict(d=d)
print(cassie)
```
A `@staticmethod` can be called from an uninstantiated class object so we can do things like this:
```
# Static method
Dog.is_cute(breed="Border Collie")
```
# Decorators
Recall that functions allow us to modularize code and reuse them. However, we'll often want to add some functionality before or after the main function executes and we may want to do this for many different functions. Instead of adding more code to the original function, we can use decorators!
* **decorators**: augment a function with pre/post-processing. Decorators wrap around the main function and allow us to operate on the inputs and or outputs.
Suppose we have a function called `operations` which increments the input value x by 1.
```
def operations(x):
"""Basic operations."""
x += 1
return x
operations(x=1)
```
Now let's say we want to increment our input x by 1 before and after the `operations` function executes and, to illustrate this example, let's say the increments have to be separate steps. Here's how we would do it by changing the original code:
```
def operations(x):
"""Basic operations."""
x += 1
x += 1
x += 1
return x
operations(x=1)
```
We were able to achieve what we want but we now increased the size of our `operations` function and if we want to do the same incrementing for any other function, we have to add the same code to all of those as well ... not very efficient. To solve this, let's create a decorator called `add` which increments `x` by 1 before and after the main function `f` executes.
### Creating a decorator function
The decorator function accepts a function `f` which is the function we wish to wrap around, in our case, it's `operations()`. The output of the decorator is its `wrapper` function which receives the arguments and keyword arguments passed to function `f`.
Inside the `wrapper` function, we can:
1. extract the input parameters passed to function `f`.
2. make any changes we want to the function inputs.
3. function `f` is executed
4. make any changes to the function outputs
5. `wrapper` function returns some value(s), which is what the decorator returns as well since it returns `wrapper`.
```
# Decorator
def add(f):
def wrapper(*args, **kwargs):
"""Wrapper function for @add."""
x = kwargs.pop('x') # .get() if not altering x
x += 1 # executes before function f
x = f(*args, **kwargs, x=x)
x += 1 # executes after function f
return x
return wrapper
```
We can use this decorator by simply adding it to the top of our main function preceded by the `@` symbol.
```
@add
def operations(x):
"""Basic operations."""
x += 1
return x
operations(x=1)
```
Suppose we wanted to debug and see what function actually executed with `operations()`.
```
operations.__name__, operations.__doc__
```
The function name and docstring are not what we're looking for but it appears this way because the `wrapper` function is what was executed. In order to fix this, Python offers `functools.wraps` which carries the main function's metadata.
```
from functools import wraps
# Decorator
def add(f):
@wraps(f)
def wrap(*args, **kwargs):
"""Wrapper function for @add."""
x = kwargs.pop('x')
x += 1
x = f(*args, **kwargs, x=x)
x += 1
return x
return wrap
@add
def operations(x):
"""Basic operations."""
x += 1
return x
operations.__name__, operations.__doc__
```
Awesome! We were able to decorate our main function `operation()` to achieve the customization we wanted without actually altering the function. We can reuse our decorator for other functions that may need the same customization!
> This was a dummy example to show how decorators work but we'll be using them heavily during our [MLOps](https://madewithml.com/#mlops) lessons. A simple scenario would be using decorators to create uniform JSON responses from each API endpoint without including the bulky code in each endpoint.
# Callbacks
Decorators allow for customized operations before and after the main function's execution but what about in between? Suppose we want to conditionally/situationally do some operations. Instead of writing a whole bunch of if-statements and make our functions bulky, we can use callbacks!
* **callbacks**: conditional/situational processing within the function.
Our callbacks will be classes that have functions with key names that will execute at various periods during the main function's execution. The function names are up to us but we need to invoke the same callback functions within our main function.
```
# Callback
class x_tracker(object):
def __init__(self, x):
self.history = []
def at_start(self, x):
self.history.append(x)
def at_end(self, x):
self.history.append(x)
```
We can pass in as many callbacks as we want and because they have appropriately named functions, they will be invoked at the appropriate times.
```
def operations(x, callbacks=[]):
"""Basic operations."""
for callback in callbacks:
callback.at_start(x)
x += 1
for callback in callbacks:
callback.at_end(x)
return x
x = 1
tracker = x_tracker(x=x)
operations(x=x, callbacks=[tracker])
tracker.history
```
# Putting it all together
decorators + callbacks = powerful customization *before*, *during* and *after* the main function’s execution without increasing its complexity. We will be using this duo to create powerful ML training scripts that are highly customizable in future lessons.
```
from functools import wraps
# Decorator
def add(f):
@wraps(f)
def wrap(*args, **kwargs):
"""Wrapper function for @add."""
x = kwargs.pop('x') # .get() if not altering x
x += 1 # executes before function f
x = f(*args, **kwargs, x=x)
# can do things post function f as well
return x
return wrap
# Callback
class x_tracker(object):
def __init__(self, x):
self.history = [x]
def at_start(self, x):
self.history.append(x)
def at_end(self, x):
self.history.append(x)
# Main function
@add
def operations(x, callbacks=[]):
"""Basic operations."""
for callback in callbacks:
callback.at_start(x)
x += 1
for callback in callbacks:
callback.at_end(x)
return x
x = 1
tracker = x_tracker(x=x)
operations(x=x, callbacks=[tracker])
tracker.history
```
| github_jupyter |
# Part 1 - Scalars and Vectors
For the questions below it is not sufficient to simply provide answer to the questions, but you must solve the problems and show your work using python (the NumPy library will help a lot!) Translate the vectors and matrices into their appropriate python representations and use numpy or functions that you write yourself to demonstrate the result or property.
## 1.1 Create a two-dimensional vector and plot it on a graph
```
import numpy as np
import matplotlib.pyplot as plt
vector = np.array([1,6])
plt.arrow(0, 0, *vector, head_width=.1, head_length=0.1)
plt.xlim(0,max(vector)+1)
plt.ylim(0,max(vector)+1);
```
## 1.2 Create a three-dimensional vecor and plot it on a graph
```
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
vector = np.array([3, 2, 3])
ax.quiver(*(0,0,0), *vector)
ax.set_xlim([0, max(vector) + 1])
ax.set_ylim([0, max(vector) + 1])
ax.set_zlim([0, max(vector) + 1])
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show();
```
## 1.3 Scale the vectors you created in 1.1 by $5$, $\pi$, and $-e$ and plot all four vectors (original + 3 scaled vectors) on a graph. What do you notice about these vectors?
```
vector = np.array([1,6])
quiver = np.array([vector, vector*5, vector*np.pi, vector*-np.exp(1)])
for v in quiver:
plt.arrow(0, 0, *v, head_width=1, head_length=1)
plt.xlim(quiver.min()-1,quiver.max()+1)
plt.ylim(quiver.min()-1,quiver.max()+1);
```
## 1.4 Graph vectors $\vec{a}$ and $\vec{b}$ and plot them on a graph
\begin{align}
\vec{a} = \begin{bmatrix} 5 \\ 7 \end{bmatrix}
\qquad
\vec{b} = \begin{bmatrix} 3 \\4 \end{bmatrix}
\end{align}
```
a = np.array([5, 7])
b = np.array([3, 4])
quiver = np.array([a, b])
for v in quiver:
plt.arrow(0, 0, *v, head_width=0.5, head_length=0.5)
plt.xlim(0,quiver.max()+1)
plt.ylim(0,quiver.max()+1);
```
## 1.5 find $\vec{a} - \vec{b}$ and plot the result on the same graph as $\vec{a}$ and $\vec{b}$. Is there a relationship between vectors $\vec{a} \thinspace, \vec{b} \thinspace \text{and} \thinspace \vec{a-b}$
```
a = np.array([5, 7])
b = np.array([3, 4])
quiver = np.array([a, b, a-b])
for v in quiver:
plt.arrow(0, 0, *v, head_width=0.5, head_length=0.5)
plt.xlim(0,quiver.max()+1)
plt.ylim(0,quiver.max()+1);
```
$\vec{a-b}$ is the length and direction connecting the tips of $\vec{a}$ and $\vec{b}$
## 1.6 Find $c \cdot d$
\begin{align}
\vec{c} = \begin{bmatrix}7 & 22 & 4 & 16\end{bmatrix}
\qquad
\vec{d} = \begin{bmatrix}12 & 6 & 2 & 9\end{bmatrix}
\end{align}
```
np.dot(np.array([ 7, 22, 4, 16]),
np.array([12, 6, 2, 9]))
```
## 1.7 Find $e \times f$
\begin{align}
\vec{e} = \begin{bmatrix} 5 \\ 7 \\ 2 \end{bmatrix}
\qquad
\vec{f} = \begin{bmatrix} 3 \\4 \\ 6 \end{bmatrix}
\end{align}
```
np.cross(np.array([5, 7, 2]),
np.array([3, 4, 6]))
```
## 1.8 Find $||g||$ and then find $||h||$. Which is longer?
\begin{align}
\vec{e} = \begin{bmatrix} 1 \\ 1 \\ 1 \\ 8 \end{bmatrix}
\qquad
\vec{f} = \begin{bmatrix} 3 \\3 \\ 3 \\ 3 \end{bmatrix}
\end{align}
```
(np.linalg.norm(np.array([1, 1, 1, 8])),
np.linalg.norm(np.array([3, 3, 3, 3])))
```
## 1.9 Show that the following vectors are orthogonal (perpendicular to each other):
\begin{align}
\vec{g} = \begin{bmatrix} 1 \\ 0 \\ -1 \end{bmatrix}
\qquad
\vec{h} = \begin{bmatrix} 1 \\ \sqrt{2} \\ 1 \end{bmatrix}
\end{align}
```
np.dot(np.array([1, 0, -1]),
np.array([1, 2**(1/2), 1]))
```
# Part 2 - Matrices
## 2.1 What are the dimensions of the following matrices? Which of the following can be multiplied together? See if you can find all of the different legal combinations.
\begin{align}
A = \begin{bmatrix}
1 & 2 \\
3 & 4 \\
5 & 6
\end{bmatrix}
\qquad
B = \begin{bmatrix}
2 & 4 & 6 \\
\end{bmatrix}
\qquad
C = \begin{bmatrix}
9 & 6 & 3 \\
4 & 7 & 11
\end{bmatrix}
\qquad
D = \begin{bmatrix}
1 & 0 & 0 \\
0 & 1 & 0 \\
0 & 0 & 1
\end{bmatrix}
\qquad
E = \begin{bmatrix}
1 & 3 \\
5 & 7
\end{bmatrix}
\end{align}
```
A = np.array([[1, 2],
[3, 4],
[5, 6]])
B = np.array([[2, 4, 6]])
C = np.array([[9, 6, 3],
[4, 7, 11]])
D = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
E = np.array([[1, 3],
[5, 7]])
quiver = [A, B, C, D, E]
# You can multipy any two matrices where the number of columns of the first matrix
# is equal to the number of rows of the second matrix.
for M1 in quiver:
for M2 in quiver:
if M1 is M2:
continue
if M1.shape[1] == M2.shape[0]:
print('Can multiply:', M1, M2, sep='\n')
print('\n')
if M1.shape[0] == M2.shape[1]:
print('Can multiply:', M2, M1, sep='\n')
print('\n')
```
## 2.2 Find the following products: CD, AE, and BA. What are the dimensions of the resulting matrices? How does that relate to the dimensions of their factor matrices?
```
# the result of multiplying two matrices will have the shape of
# the first matrix's rows and the second matrix's columns
CD = np.matmul(C, D)
print(CD)
AE = np.matmul(A, E)
print(AE)
BA = np.matmul(B, A)
print(BA)
```
## 2.3 Find $F^{T}$. How are the numbers along the main diagonal (top left to bottom right) of the original matrix and its transpose related? What are the dimensions of $F$? What are the dimensions of $F^{T}$?
\begin{align}
F =
\begin{bmatrix}
20 & 19 & 18 & 17 \\
16 & 15 & 14 & 13 \\
12 & 11 & 10 & 9 \\
8 & 7 & 6 & 5 \\
4 & 3 & 2 & 1
\end{bmatrix}
\end{align}
```
F = np.array([[20, 19, 18, 17],
[16, 15, 14, 13],
[12, 11, 10, 9],
[ 8, 7, 6, 5],
[ 4, 3, 2, 1]])
print(F)
print(np.transpose(F))
```
# Part 3 - Square Matrices
## 3.1 Find $IG$ (be sure to show your work) 😃
\begin{align}
G=
\begin{bmatrix}
12 & 11 \\
7 & 10
\end{bmatrix}
\end{align}
The product of any square matrix with its corresponding identity matrix is the same martrix.
## 3.2 Find $|H|$ and then find $|J|$.
\begin{align}
H=
\begin{bmatrix}
12 & 11 \\
7 & 10
\end{bmatrix}
\qquad
J=
\begin{bmatrix}
0 & 1 & 2 \\
7 & 10 & 4 \\
3 & 2 & 0
\end{bmatrix}
\end{align}
```
H = np.array([[12, 11],
[ 7, 10]])
np.linalg.det(H)
J = np.array([[ 0, 1, 2],
[ 7,10, 4],
[ 3, 2, 0]])
np.linalg.det(J)
```
## 3.3 Find $H^{-1}$ and then find $J^{-1}$
```
np.linalg.inv(H)
np.linalg.inv(J)
```
## 3.4 Find $HH^{-1}$ and then find $J^{-1}J$. Is $HH^{-1} == J^{-1}J$? Why or Why not?
```
np.matmul(H, np.linalg.inv(H))
np.matmul(np.linalg.inv(J), J)
```
No, they are not equal. They don't have the same dimensions.
# Stretch Goals:
A reminder that these challenges are optional. If you finish your work quickly we welcome you to work on them. If there are other activities that you feel like will help your understanding of the above topics more, feel free to work on that. Topics from the Stretch Goals sections will never end up on Sprint Challenges. You don't have to do these in order, you don't have to do all of them.
- Write a function that can calculate the dot product of any two vectors of equal length that are passed to it.
- Write a function that can calculate the norm of any vector
- Prove to yourself again that the vectors in 1.9 are orthogonal by graphing them.
- Research how to plot a 3d graph with animations so that you can make the graph rotate (this will be easier in a local notebook than in google colab)
- Create and plot a matrix on a 2d graph.
- Create and plot a matrix on a 3d graph.
- Plot two vectors that are not collinear on a 2d graph. Calculate the determinant of the 2x2 matrix that these vectors form. How does this determinant relate to the graphical interpretation of the vectors?
| github_jupyter |
# Introduction
This notebook describes the fine-tuning process of CNN. The base network is the [MovibleNetV2](https://ai.googleblog.com/2018/04/mobilenetv2-next-generation-of-on.html).
# Data upload
The dataset needs to be on the Colab virtual machine. Use a dataset located in Google Drive will result in a slow training process because the virtual machine needs to request and transfer the data from Google Drive during the training process.
```
from google.colab import drive
drive.mount('/content/gdrive')
```
The dataset is on Google Drive as a zip file and then copied into the virtual machine.
```
!cp '/content/gdrive/MyDrive/[MIRCV]FoodWebSearch/food.zip' .
!unzip -q food.zip
!rm food.zip
```
# Initialization
The dataset directory has the following structure:
```
images/
...class_a/
......image_1.jpg
......image_2.jpg
...class_b/
......image_3.jpg
......image_4.jpg
```
To train and test the model, we need three subsets: train, test and validation. To split the dataset, we use the [split-folder](https://pypi.org/project/split-folders/) package.
```
!pip install split-folders tqdm
```
The training of a CNN on CPU could be unfeasible, so we check if the GPU hardware acceleration is active for the notebook.
```
import os
import tensorflow as tf
import matplotlib.pyplot as plt
import splitfolders
# constants
DATA_DIR = '/content/food-101/images'
SETS_DIR = '/content/sets'
BATCH_SIZE = 256
# check hardware acceleration
device_name = tf.test.gpu_device_name()
print('GPU: ', device_name)
```
It may happen that, during the unzip process, some image gets corrupted. A corrupted image will give an error during the training process, so we need to remove it. We used a piece of code from the Keras documentation slightly modified. It deletes the mac configuration file and the corrupted images.
```
# remove corrupted images
num_skipped = 0
for folder_name in os.listdir(DATA_DIR):
if folder_name == '.DS_Store':
ds_store = os.path.join(DATA_DIR, folder_name)
os.remove(ds_store)
print("Removed: ", ds_store)
else:
folder_path = os.path.join(DATA_DIR, folder_name)
for fname in os.listdir(folder_path):
fpath = os.path.join(folder_path, fname)
try:
fobj = open(fpath, "rb")
is_jfif = tf.compat.as_bytes("JFIF") in fobj.peek(10)
finally:
fobj.close()
if not is_jfif:
num_skipped += 1
os.remove(fpath)
print("Removed: ", fpath)
print("Removed %d images" % num_skipped)
```
We split the dataset into train, validation and test set. We use 70% of images for the training set, 15% for the validation set and 15% for the test set.
```
# split data
splitfolders.ratio(DATA_DIR, output=SETS_DIR, seed=123, ratio=(.7, .15, .15), group_prefix=None)
```
We use the `image_dataset_from_directory` function to create a Dataset object directly from the image directory. We take the class labels from the directory structure and transform them into integers to use them in the sparse categorical cross-entropy loss. We use buffered prefetching to yield data from disk without having I/O becoming blocking.
```
train = SETS_DIR + '/train/'
val = SETS_DIR + '/val/'
test = SETS_DIR + '/test/'
train_set = tf.keras.preprocessing.image_dataset_from_directory(
train,
labels='inferred',
label_mode='int',
seed=123,
shuffle=True,
image_size=(224, 224),
batch_size=BATCH_SIZE)
val_set = tf.keras.preprocessing.image_dataset_from_directory(
val,
labels='inferred',
label_mode='int',
seed=123,
shuffle=True,
image_size=(224, 224),
batch_size=BATCH_SIZE)
test_set = tf.keras.preprocessing.image_dataset_from_directory(
test,
labels='inferred',
label_mode='int',
seed=123,
shuffle=True,
image_size=(224, 224),
batch_size=BATCH_SIZE)
# use buffered prefetching so we can yield data
# from disk without having I/O becoming blocking
train_set = train_set.prefetch(buffer_size=BATCH_SIZE)
val_set = val_set.prefetch(buffer_size=BATCH_SIZE)
test_set = test_set.prefetch(buffer_size=BATCH_SIZE)
```
The MobileNetV2 needs input images with pixel normalized in [-1;1] (they vary in [0;255]) and size 224x224. To normalize the images, we use the function taken from exercise 6 of the laboratory.
```
def preprocess(images, labels):
images = tf.keras.applications.mobilenet_v2.preprocess_input(images)
return images, labels
train_set = train_set.map(preprocess, deterministic=True)
val_set = val_set.map(preprocess, deterministic=True)
test_set = test_set.map(preprocess, deterministic=True)
```
# Training
The CNN has as base network the MobileNetV2 and an MLP with one hidden layer for classification. To use the pre-trained MobileNetV2, we load it removing the fully-connected layer on top.
Since the classification task is a multi-class classification problem with 101 classes, the output layer has 101 neurons with softmax activation function. To avoid overfitting, we use data augmentation. We perform data augmentation directly into the network using the first two layers that are active only during training. Furthermore, we apply global average pooling to the MobileNetV2.
```
mobilenetv2 = tf.keras.applications.MobileNetV2(
weights='imagenet',
include_top=False,
input_shape=(224,224,3),
)
mobilenetv2.trainable = False
model = tf.keras.models.Sequential([
# data augmentation
tf.keras.Input(shape=(224,224,3)),
tf.keras.layers.experimental.preprocessing.RandomFlip(mode='horizontal', seed=123),
tf.keras.layers.experimental.preprocessing.RandomRotation(factor=0.2, seed=123, fill_mode='nearest'),
# basenet
mobilenetv2,
tf.keras.layers.GlobalAveragePooling2D(),
# classifier
tf.keras.layers.Dense(256, activation='relu', name='dense_hidden'),
tf.keras.layers.Dense(101, activation='softmax', name='output')
], name='food_classifier')
model.summary()
```
We perform the training in two steps: first, we freeze the base network and train the classifier, then we un-freeze the last two blocks of the base network to fine-tune them. The MLP has random weights at the beginning. Hence, the two-step training avoids destroying the pre-trained layers backpropagating large errors.
To train the model, we use the sparse categorical cross-entropy loss and the RMSprop optimizer. We perform early stopping to stop the training when the validation loss stops to improve, saving the model with the best validation accuracy.
```
model.compile(loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.RMSprop(learning_rate=1e-4, momentum=0.7),
metrics=['accuracy'])
callbacks = [
# early stopping
tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=2,
restore_best_weights=True),
# model checkpoints (save the best model)
tf.keras.callbacks.ModelCheckpoint(
monitor='val_accuracy',
save_best_only=True,
mode='max',
filepath='/content/gdrive/MyDrive/model/food_classifier.h5'),
]
history = model.fit(
train_set,
epochs=100,
verbose=1,
callbacks=callbacks,
validation_data=val_set,
batch_size=BATCH_SIZE)
```
To fine-tune the network, we un-freeze the last two blocks of the base network, that gives us a slightly better result in terms of validation accuracy.
```
# unfreeze the last block of the MobileNetV2
mobilenetv2.trainable = True
train = False
for layer in mobilenetv2.layers:
if layer.name == 'block_15_expand':
train = True
if train:
layer.trainable = True
else:
layer.trainable = False
model.summary()
```
We perform the fine-tuning with a somewhat lower learning rate to withdraw overfitting. We do again early stopping.
```
model.compile(loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.RMSprop(lr=1e-5, momentum=0.7),
metrics=['accuracy'])
ft_history = model.fit(
train_set,
epochs=100,
verbose=1,
callbacks=callbacks,
validation_data=val_set,
batch_size=BATCH_SIZE
)
```
# Evaluation
We plot the loss functions and the accuracy functions of both training steps with Matplotlib to have visual feedback.
```
# data from training with freezed mobilenet
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
# data from fine-tuning training
ft_acc = ft_history.history['accuracy']
ft_val_acc = ft_history.history['val_accuracy']
ft_loss = ft_history.history['loss']
ft_val_loss = ft_history.history['val_loss']
# create figure
fig, (acc_g,loss_g) = plt.subplots(1,2, figsize=(15,6))
# range for x axis
epochs = range(len(acc))
epochs_1 = range(len(ft_acc))
# plot accuracy curves
acc_g.plot(epochs, acc, color='tab:orange', marker='x', linestyle='-', label='Classifier training', linewidth=1)
acc_g.plot(epochs, val_acc, color='tab:blue', marker='x', linestyle='-', label='Classifier validation', linewidth=1)
acc_g.plot(epochs_1, ft_acc, color='tab:olive', marker='^', linestyle='-', label='Fine-tuning training', linewidth=1)
acc_g.plot(epochs_1, ft_val_acc, color='tab:cyan', marker='^', linestyle='-', label='Fine-tuning validation', linewidth=1)
# plot loss curves
loss_g.plot(epochs, loss, color='tab:orange', marker='x', linestyle='-', label='Classifier training ', linewidth=1)
loss_g.plot(epochs, val_loss, color='tab:blue', marker='x', linestyle='-', label='Classifier validation', linewidth=1)
loss_g.plot(epochs_1, ft_loss, color='tab:olive', marker='^', linestyle='-', label='Fine-tuning training ', linewidth=1)
loss_g.plot(epochs_1, ft_val_loss, color='tab:cyan', marker='^', linestyle='-', label='Fine-tuning validation', linewidth=1)
# accuracy graph style options
acc_g.grid(axis='y', linestyle='--', linewidth=0.5)
acc_g.set_xlabel('Epochs')
acc_g.set_ylabel('Accuracy')
acc_g.set_title('Accuracy', fontdict={'fontsize':18}, pad=20)
acc_g.legend(loc='lower right')
# loss graph style options
loss_g.grid(axis='y', linestyle='--', linewidth=0.5)
loss_g.set_xlabel('Epochs')
loss_g.set_ylabel('Loss')
loss_g.set_title('Loss', fontdict={'fontsize':18}, pad=20)
loss_g.legend(loc='upper right')
# layout adjustment
plt.tight_layout()
# save graph
fig.savefig('train_plots.png')
```
Finally, we evaluate the model on the test set. The data augmentation layers are switched-off in inference mode.
```
model = tf.keras.models.load_model('/content/gdrive/MyDrive/model/food_classifier.h5')
test_loss, test_accuracy = model.evaluate(test_set)
print("Test loss: ", test_loss)
print("Test accuracy: ", test_accuracy)
```
| github_jupyter |
# Introduction to Pandas
## Series
```
# importing the pandas library
import pandas as pd
# Python list of Strings
subjects = ["pyhsics", "chemistry", "biology", "mathematics"]
a = pd.Series(subjects)
a
print(type(a))
```
### Creating a Series object with Integers
```
numbers = [10, 20, 30, 40, 50]
b = pd.Series(numbers)
b
print(type(b))
```
### Creating a Series object with Booleans
```
# Python list of booleans
boolean_values = [True, False, True, False, False]
c = pd.Series(boolean_values)
c
print(type(c))
```
### Creating a Series from Tuple
```
# Python tuple
text = ("hello", "welcome", "to", "pandas")
print(type(text))
x = pd.Series(text)
x
print(type(x))
```
### Creating a Series from dictionary
```
text1 = {"Name": "John", "Age": 30, "City": "New York"}
type(text1)
d = pd.Series(text1)
d
d.index
print(type(d))
```
## Series Attributes
```
friends = ["john", "peter", "sam", "ravi", "pavan", "dainel"]
s = pd.Series(friends)
s.head(3)
# prints the values of series object
s.values
# displays the index values
s.index
# Returns the numbber of rows, it has 6 rows and 0 columns
s.shape
```
## Series Methods
```
sample = pd.Series([10, 20, 40, 62, 38, 20, 40, 70, 10, 40])
# displays the count of elements in the sample
sample.count()
# displays the minimum value
sample.min()
# displays the max value value
sample.max()
# displays the mean(average)
sample.mean()
# displays the variance(average)
sample.std()
sample.sort_values()
sample.mean()
```
## DataFrames
### Create a dataframe from the dictionary
```
data = [33, 66, 99, 45]
print(type(data))
df = pd.DataFrame(data)
df
print(type(df))
df = pd.DataFrame(data, columns=["age"])
df
data1 = {
"Students": ["John", "Peter", "Sam", "Jaine", "Suzain"],
"Score": [70, 63, 77, 35, 90],
}
print(type(data1))
df = pd.DataFrame(data1)
print(type(df))
df
students_score = [
["John", 80],
["Peter", 79],
["Sam", 90],
["Jaine", 76],
["Suzain", 85],
]
df = pd.DataFrame(students_score, columns=["Name", "score"])
df
```
### Creating a DataFrame object with List of Dictionaries
```
stock_details = [
{"satyam": 128.0, "infosys": 1329, "google": 5000},
{"satyam": 126, "infosys": 1322, "google": 5002},
]
print(type(stock_details))
df = pd.DataFrame(stock_details)
print(type(df))
df
data1 = {
"Students": ["John", "Peter", "Sam", "Jaine", "Suzain"],
"Score": [70, 63, 77, 35],
}
```
## Real time dataset
```
import pandas as pd
df = pd.read_csv("datasets/movies.csv")
# df is the dataframe object
print(type(df))
# displays the first 3 rows
df.head(3)
# Default displays the firt 5 rows
df.head()
# displays the last 3 rows
df.tail(3)
# Default displays the last 5 rows
df.tail()
# displays the number of rows and columns
df.shape
# Dataframe has 9742 rows and 3 columns
# index starts at 0 and stops at 9742 with an incrementation of step 1
df.index
# displays the count of each column
df.count()
# displays the column names or headers
df.columns
df.columns
df.dtypes
df.size
```
| github_jupyter |
```
#### Import libraries and dependencies #########
import sys
import pandas as pd
import seaborn as sns
sns.set(style="darkgrid", font_scale=1.5)
from itertools import groupby
cd '/content/drive/MyDrive/Data Developer'
ls
data = pd.read_csv('ExampleData.txt', sep='\t')
data.info()
data.isnull().sum()
## Drop all null values
data1 = data.dropna()
data1.shape
data1 = data1.reset_index(drop=True)
data1.info()
## The Distribution and Quantiles of each numeric column
data1.describe()
data1.head(3)
## Plot control measure against the corresponding treatment
# importing the required module
import matplotlib.pyplot as plt
# Initialise the subplot function using number of rows and columns
figure, axis = plt.subplots(2, 2)
# For Control and treatment 1
axis[0, 0].plot(data1.control_1, data1.treatment_1)
axis[0, 0].set_title("Control_1 graph!")
# For Control and treatment 2
axis[0, 1].plot(data1.control_2, data1.treatment_2)
axis[0, 1].set_title("Control_2 graph!")
# For Control and treatment 3
axis[1, 0].plot(data1.control_3, data1.treatment_3)
axis[1, 0].set_title("Control_3 graph!")
# Combine all the operations and display
plt.show()
## Histogram on a log scale to know the data distribution
sns.displot(
data=data1,
x="control_1",
kind="hist",
aspect=1.4,
log_scale=10
)
sns.displot(
data=data1,
x="treatment_1",
kind="hist",
aspect=1.4,
log_scale=10
)
```
Above plots show us the distribution of effect of control and treatment on each Protein.
From this we can see that the data is close to a normal distribution.
```
#Getting new columns with sample difference to know the effect
new_dict = {'protein_id': data1.protein_id ,
'treat_minus_control1' : data1.treatment_1 - data1.control_1 ,
'treat_minus_control2': data1.treatment_2 - data1.control_2 ,
'treat_minus_control3' : data1.treatment_3 - data1.control_3}
new_df = pd.DataFrame(new_dict)
## Count negative values
new_df1 = new_df[['treat_minus_control1', 'treat_minus_control2' , 'treat_minus_control3']]
new_df1.lt(0).sum()
new_df1.head(3)
## Number of positive differences mean that the Treatment was effective upto some extent
a = len(new_df1.query('treat_minus_control1 > 0'))
b = len(new_df1.query('treat_minus_control2 > 0'))
c = len(new_df1.query('treat_minus_control3 > 0'))
print(a, b, c)
## Separating Protiens with Positive treat_minus_control differences
all_positive = new_df[(new_df[['treat_minus_control1', 'treat_minus_control2' , 'treat_minus_control3']] > 0).all(1)]
all_negative = new_df[(new_df[['treat_minus_control1', 'treat_minus_control2' , 'treat_minus_control3']] < 0).all(1)]
control_1_positive = new_df[(new_df[['treat_minus_control1']] > 0).all(1)]
control_2_positive = new_df[(new_df[['treat_minus_control2']] > 0).all(1)]
control_3_positive = new_df[(new_df[['treat_minus_control3']] > 0).all(1)]
print(control_1_positive.shape)
print(control_2_positive.shape)
print(control_3_positive.shape)
print(all_positive.shape)
print(all_negative.shape)
```
### KEY FINDINGS
- From the above attempt to analyze the Control and Treated samples, we can conclude that Treatment 2 has been effective over more number of samples, giving a positive result.
- Individually, 446 treatments 1s are positive, 581 treatments 2s are positive and 237 treatments 3s are positive.
- There are 86 Proteins that have a positive effect with all three treatments, while there are 315 Proteins which did not show positive effects with any of the three treatments.
| github_jupyter |
# Splunk - Data Connector
## Description
The data provider module of msticpy provides functions to allow for the defining of data sources, connectors to them and queries for them as well as the ability to return query result from the defined data sources.
For more information on Data Propviders, check documentation
- Data Provider: https://msticpy.readthedocs.io/en/latest/data_acquisition/DataProviders.html
In this notebooks we will demonstrate Splunk data connector feature of msticpy.
This feature is built on-top of the [Splunk Enterprise SDK for Python] (https://dev.splunk.com/enterprise/docs/devtools/python/sdk-python/) with some customizations and enhancements.
### Installation
```
# Only run first time to install/upgrade msticpy to latest version
#%pip install --upgrade msticpy[splunk]
```
### Authentication
Authentication for the Splunk data provider is handled by specifying credentials directly in the connect call or specifying the credentials in msticpy config file.
For more information on how to create new user with approapriate roles and permissions, follow Splunk Docs [Addandeditusers](https://docs.splunk.com/Documentation/Splunk/8.0.5/Security/Addandeditusers) and [Aboutusersandroles](https://docs.splunk.com/Documentation/Splunk/8.0.5/Security/Aboutusersandroles). The user should have permission to at least run its own searches or more depending upon the actions to be performed by user.
Once you created user account with the appropriate roles, you will require the following details to specify while connecting
- host = "localhost"(Splunk server FQDN hostname to connect, for locally installed splunk, you can specify localhost)
- port = 8089 (Splunk REST API )
- username = "admin" (username to connect to Splunk instance)
- password = "yourpassword" (password of the userspecified in username)
Once you have details, you can specify it in `msticpyconfig.yaml` as shown in below example
```
SplunkApp:
Args:
host: "{Splunk server FQDN or localhost}"
port: "{default 8089}"
username: "{username with search permissions to connect}"
password: "{password of the user specified}"
```
```
#Check we are running Python 3.6
import sys
MIN_REQ_PYTHON = (3,6)
if sys.version_info < MIN_REQ_PYTHON:
print('Check the Kernel->Change Kernel menu and ensure that Python 3.6')
print('or later is selected as the active kernel.')
sys.exit("Python %s.%s or later is required.\n" % MIN_REQ_PYTHON)
#imports
import pandas as pd
import msticpy.nbtools as nbtools
#data library imports
from msticpy.data.data_providers import QueryProvider
print('Imports Complete')
```
## Instantiating a query provider
You can instantiate a data provider for Splunk by specifying the credentials in connect or in msticpy config file.
<br> If the details are correct and authentication is successful, it will show connected.
```
splunk_prov = QueryProvider('Splunk')
splunk_prov.connect(host=<hostname>, username=<username>, password=<password>)
```
## Listing available queries
Upon connecting to the Splunk data environment, we can take a look what query options available to us by running `QUERY_PROVIDER.list_queries()`
For more information, refer documentation : [Listing available queries](https://msticpy.readthedocs.io/en/latest/data_acquisition/DataProviders.html#listing-available-queries).
This will display all the saved searches from the connected splunk instance and also pre-built custom queries to do common operations such as list datatypes, list saved searches, alerts, audittrail informaion.
```
splunk_prov.list_queries()
```
In order to get help for specific query , you can execute `QUERY_PROVIDER.<QueryName>('?')` .
For more information , refer documentation - [Getting Help for a query](https://msticpy.readthedocs.io/en/latest/data_acquisition/DataProviders.html#getting-help-for-a-query)
```
splunk_prov.SplunkGeneral.get_events_parameterized('?')
```
If you want to print the query prior to executing, pass 'print' as an argument
```
splunk_prov.SplunkGeneral.get_events_parameterized('print')
```
If you have set the arguments and then would like to validate the query, use below example
```
splunk_prov.SplunkGeneral.get_events_parameterized('print',
index="botsv2",
source="WinEventLog:Microsoft-Windows-Sysmon/Operational",
timeformat="%Y-%m-%d %H:%M:%S",
start="2017-08-25 00:00:00",
end="2017-08-25 10:00:00"
)
```
## Running pre-defined query
In order to run pre-defined query , execute with the name either by setting values for arguments if available or run with default arguments.
For more information , refer documentation - [Running an pre-definedfined query](https://msticpy.readthedocs.io/en/latest/data_acquisition/DataProviders.html#running-an-pre-defined-query)
```
splunk_prov.SplunkGeneral.get_events_parameterized(
index="botsv2",
source="WinEventLog:Microsoft-Windows-Sysmon/Operational",
start="2017-08-25 00:00:00.000000",
end="2017-08-25 10:00:00.000000"
)
```
By-default, splunk query results are limited to 100. you can specify `count=0` argument to return all the results.
Deafult value for `add_query_items` argument is set to `| head 100` which you can reset as shown in below example while retrieving all results.
```
splunk_prov.SplunkGeneral.get_events_parameterized(
index="botsv2",
source="WinEventLog:Microsoft-Windows-Sysmon/Operational",
start="2017-08-25 00:00:00.000000",
end="2017-08-25 10:00:00.000000",
add_query_items='',
count=0
)
```
## Running a Ad-hoc Splunk query
You can also define a your own splunk query and run it via splunk provider via `QUERY_PROVIDER.exec_query(<queryname>)`
For more information, check documentation [Running and Ad-hoc Query](https://msticpy.readthedocs.io/en/latest/data_acquisition/DataProviders.html#running-an-ad-hoc-query)
```
splunk_query = '''
search index="blackhat" sourcetype="network" earliest=0
| table TimeGenerated, TotalBytesSent
'''
df = splunk_prov.exec_query(splunk_query)
df.head()
```
## References
- Splunk Enterprise SDK for Python: https://dev.splunk.com/enterprise/docs/devtools/python/sdk-python/
- Splunk Community : https://community.splunk.com/t5/Community/ct-p/en-us
- Splunk Documentation: https://docs.splunk.com/Documentation
| github_jupyter |
# Analyze Time Table
Analyze the time table of COUNT DOWN JAPAN/ROCK'IN ON JAPAN
Hypothesis.
* Attendance Probability: will dependes on past attendances.
* As count of attendance increase, attendance probability will increase.
* Time Table Probability: will depends on past or recent time table.
* Attending time (day) will be different from past / recent fes time table.
## Preparation
```
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
def set_path():
root = os.path.join(os.path.realpath("."), "../")
if root not in sys.path:
sys.path.append(root)
return root
DATA_DIR = os.path.join(set_path(), "data/raw")
if not os.path.exists(DATA_DIR):
raise Exception("Data dir does not exist. Please Run 'Get Fes dataset'first.")
```
## Analyze Time Table
```
df_cdj = pd.read_csv(os.path.join(DATA_DIR, "cdj.csv"))
df_cdj.head(3)
df_rinj = pd.read_csv(os.path.join(DATA_DIR, "rinj.csv"))
df_rinj.head(3)
```
### Attendance Probability
```
def cross_by_year(df, discount_rate=0.9):
ct = pd.crosstab(df["artist"], df["year"], rownames=["artist"], colnames=["year"])
def discount(x):
_exp = np.arange(len(x))[::-1]
rates = np.array([discount_rate] * len(x))
v = np.power(rates, _exp)
return x * v
ct = pd.DataFrame(ct)
d_ct = ct.apply(discount, axis=1)
return ct, d_ct
ct, ct_d = cross_by_year(df_cdj)
ct_d.head(5)
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
def try_prediction(target, feature=None, test_size=0.33):
X = feature
if feature is None:
X = target
result = {}
for i, c in enumerate(X.columns[1:], start=1):
p = X.columns[i - 1]
_X = X[p].values.reshape(-1, 1)
y = target[c]
X_train, X_test, y_train, y_test = train_test_split(_X, y, test_size=test_size)
model = LogisticRegression(solver="liblinear", multi_class="ovr").fit(X_train, y_train)
score = model.score(X_test, y_test)
result["{}=>{}".format(p, c)] = score
result = pd.Series(result)
result.plot.bar()
return result
predict_from_previous = try_prediction(ct)
print(predict_from_previous.mean())
predict_from_cumsum = try_prediction(ct, ct_d.cumsum(axis=1))
print(predict_from_cumsum.mean())
ct_rinj, ct_d_rinj = cross_by_year(df_rinj)
predict_from_previous_rinj = try_prediction(ct_rinj)
print(predict_from_previous_rinj.mean())
predict_from_cumsum_rinj = try_prediction(ct_rinj, ct_d_rinj.cumsum(axis=1))
print(predict_from_cumsum_rinj.mean())
```
## Time Table Probability
```
def make_day_feature(df):
categorical_day = pd.get_dummies(df["day_index"], prefix="day_category")
feature_added = pd.concat([df, categorical_day], axis=1)
return feature_added
f_cdj = make_day_feature(df_cdj)
f_cdj.head(3)
f_rinj = make_day_feature(df_rinj)
def predict_day(df, from_year, target_year, cumsum=False, test_size=0.33, feature=None):
f_columns = [c for c in df.columns if c.startswith("day_category")]
feature = feature
if feature is None:
feature = df
if not cumsum:
X = feature[feature["year"] == from_year][["artist"] + f_columns]
else:
X = feature[feature["year"] <= from_year][["artist"] + f_columns].groupby(["artist"])[f_columns].sum().reset_index()
y = df[df["year"] == target_year][["artist", "day_index"]]
Xy = y.merge(X, left_on="artist", right_on="artist", how="left").fillna(0)
X = Xy[f_columns]
y = Xy["day_index"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
model = LogisticRegression(solver="liblinear", multi_class="ovr", class_weight="balanced").fit(X_train, y_train)
score = model.score(X_test, y_test)
return score
def predict_day_all(df, cumsum=False, feature=None):
years = df["year"].value_counts().index.values.astype(int)
years.sort()
result = {}
for i, y in enumerate(years[1:], start=1):
previous = years[i - 1]
target = y
score = predict_day(df, previous, target, cumsum=cumsum, feature=feature)
result["{} => {}".format(previous, target)] = score
result = pd.Series(result)
result.plot.bar()
return result
```
#### COUNT DOWN JAPAN
```
predict_from_previous_day = predict_day_all(f_cdj)
print(predict_from_previous_day.mean())
predict_from_previous_day_cum = predict_day_all(f_cdj, cumsum=True)
print(predict_from_previous_day_cum.mean())
```
#### ROCK IN JAPAN
```
predict_from_previous_day_rinj = predict_day_all(f_rinj)
print(predict_from_previous_day_rinj.mean())
predict_from_previous_day_rinj = predict_day_all(f_rinj, cumsum=True)
print(predict_from_previous_day_rinj.mean())
```
### COUNT DOWN JAPAN from ROCK IN JAPAN
```
predict_from_rinj = predict_day_all(f_cdj, feature=f_rinj)
print(predict_from_rinj.mean())
predict_from_rinj = predict_day_all(f_cdj, feature=f_rinj, cumsum=True)
print(predict_from_rinj.mean())
```
### ROCK IN JAPAN from COUNT DOWN JAPAN
```
predict_from_cdj = predict_day_all(f_rinj, feature=f_cdj)
print(predict_from_cdj.mean())
predict_from_cdj = predict_day_all(f_rinj, feature=f_cdj, cumsum=True)
print(predict_from_cdj.mean())
```
## Next Year
```
def make_predict_model(df, from_year, cumsum=False, feature=None, test_size=0.33,):
years = df["year"].value_counts().index.values.astype(int)
years.sort()
years = years[:(years.tolist().index(from_year))]
f_columns = [c for c in df.columns if c.startswith("day_category")]
feature = feature
if feature is None:
feature = df
Xs = []
ys = []
def make_feature(year):
if cumsum:
X = feature[feature["year"] <= year][["artist"] + f_columns].groupby(["artist"])[f_columns].sum().reset_index()
else:
X = feature[feature["year"] == year][["artist"] + f_columns]
return X
for y in years[1:]:
X = make_feature(y - 1)
y = df[df["year"] == y][["artist", "day_index"]]
Xy = y.merge(X, left_on="artist", right_on="artist", how="left").fillna(0)
X = Xy[f_columns]
y = Xy["day_index"]
Xs.append(X)
ys.append(y)
X = pd.concat(Xs, axis=0)
y = pd.concat(ys, axis=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
model = LogisticRegression(solver="liblinear", multi_class="ovr", class_weight="balanced").fit(X_train, y_train)
score = model.score(X_test, y_test)
nX = make_feature(from_year)
prediction = model.predict_proba(nX[f_columns])
prediction = pd.DataFrame(prediction, index=nX["artist"])
return score, prediction
score, prediction_cdj = make_predict_model(f_cdj, 2017, cumsum=True)
prediction_cdj[prediction_cdj.max(axis=1) >= 0.6].style.highlight_max(axis=1)
score, prediction_rinj = make_predict_model(f_rinj, 2018, cumsum=True)
print(score)
prediction_rinj[prediction_rinj.max(axis=1) >= 0.6].style.highlight_max(axis=1)
score, prediction_rinj_from_cdj = make_predict_model(f_rinj, 2018, cumsum=True, feature=f_cdj)
prediction_rinj_from_cdj[prediction_rinj_from_cdj.max(axis=1) >= 0.6].style.highlight_max(axis=1)
```
| github_jupyter |
```
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import numpy as np
import gala as ga
import astropy.coordinates as coord
import astropy.units as u
from astropy.visualization import quantity_support
quantity_support()
from astropy.table import Table, join
from astroquery.gaia import Gaia
import sys
sys.path.append("../src/helpers")
import kicks
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
plt.rc('font', family='serif')
plt.rcParams['text.usetex'] = False
fs = 24
# update various fontsizes to match
params = {'figure.figsize': (12, 8),
'legend.fontsize': fs,
'axes.labelsize': fs,
'xtick.labelsize': 0.9 * fs,
'ytick.labelsize': 0.9 * fs,
'axes.linewidth': 1.1,
'xtick.major.size': 7,
'xtick.minor.size': 4,
'ytick.major.size': 7,
'ytick.minor.size': 4}
plt.rcParams.update(params)
# read in the table from Zari+2021 and remove badly measured stars
zari = Table.read("../src/data/filtered_sample.fits", hdu=1)
zari = zari[zari["parallax"] > 0.5]
# create coordinate objects for the table and the BH from Sahu+2022
c_zari = coord.SkyCoord(
ra=zari["ra"] * u.deg,
dec=zari["dec"] * u.deg,
distance=coord.Distance(parallax=zari["parallax"] * u.mas),
)
c_BH = coord.SkyCoord(
ra="17:51:40.2082",
dec="-29:53:26.502",
unit=(u.hourangle, u.degree),
distance=1.58*u.kpc,
)
# shorten table to only nearby stars
matching_zari_table = zari[c_BH.separation_3d(c_zari) < 0.1 * u.kpc]
# start an astroquery job to get all of the matching Gaia data for these stars
j = Gaia.launch_job(
query="SELECT * FROM gaiaedr3.gaia_source as gaia JOIN tap_upload.zari as zari ON zari.source_id=gaia.source_id",
upload_resource=matching_zari_table,
upload_table_name="zari", verbose=True
)
zari_gaia = j.get_results()
zari_gaia.write("../src/data/zari_matching.dat", format="ascii")
galcen_frame = coord.Galactocentric()
coords = coord.SkyCoord(
ra=zari_gaia["ra"],
dec=zari_gaia["dec"],
unit=(u.deg, u.deg),
distance=coord.Distance(parallax=zari_gaia["parallax"].data * u.mas),
pm_ra_cosdec=zari_gaia["pmra"].data * u.mas/u.yr * np.cos(zari_gaia["dec"].data * u.deg),
pm_dec=zari_gaia["pmdec"].data * u.mas/u.yr,
radial_velocity=zari_gaia["dr2_radial_velocity"].data * u.km/u.s
).transform_to(galcen_frame)
w0s = ga.dynamics.PhaseSpacePosition(coords.data)
missing_velocity = np.isnan(coords.v_x.value)
pot = ga.potential.MilkyWayPotential()
orbits = pot.integrate_orbit(w0s[np.logical_not(missing_velocity)], dt=-1 * u.Myr, n_steps=4000)
w0_BH = ga.dynamics.PhaseSpacePosition(coord.SkyCoord(
ra="17:51:40.2082",
dec="-29:53:26.502",
unit=(u.hourangle, u.degree),
distance=1.58*u.kpc,
pm_ra_cosdec=-4.36*u.mas/u.yr,# * cosdec,
pm_dec=3.06*u.mas/u.yr,
radial_velocity=0*u.km/u.s
).transform_to(galcen_frame).data)
orbit_BH = pot.integrate_orbit(w0_BH, dt=-1 * u.Myr, n_steps=4000)
fig, axes = plt.subplots(2, 1, figsize=(12, 16))
axes[0].hist(coords.z.to(u.pc), bins="fd", label="Zari+2021 Matching Stars")
axes[0].set_xlabel("Galactocentric Height [pc]")
axes[0].legend(loc="upper left")
orbits.cylindrical.plot(["rho", "z"], axes=[axes[1]])
orbit_BH.cylindrical.plot(["rho", "z"], axes=[axes[1]], color="black")
axes[1].set_xlim(5, 9.5)
plt.show()
```
# How big of a kick do we need?
```
w0_test = w0s[np.logical_not(missing_velocity)][5]
orbit_test_no_kick = pot.integrate_orbit(w0_test, dt=1 * u.Myr, n_steps=4000)
mean_max_heights = []
max_max_heights = []
kick_vels = np.arange(5, 100 + 5, 5)
for kick in kick_vels:
random_directions = [kicks.integrate_orbits_with_kicks(w0=w0_test,
kicks=[kick * u.km / u.s],
kick_times=[10 * u.Myr],
dt=1 * u.Myr,
n_steps=4000).z.max().to(u.kpc).value for _ in range(100)] * u.kpc
print(kick, np.mean(random_directions))
mean_max_heights.append(np.mean(random_directions))
max_max_heights.append(np.max(random_directions))
fig, ax = plt.subplots()
ax.scatter(kick_vels, mean_max_heights, label="Mean of 100 runs")
ax.scatter(kick_vels, max_max_heights, label="Max of 100 runs")
ax.axhline(orbit_BH.z.max(), color="grey", linestyle="dotted")
ax.annotate("BH max height", xy=(5, orbit_BH.z.max()), va="bottom", fontsize=0.7*fs, color="grey")
ax.set_xlabel("Kick Velocity [km / s]")
ax.set_ylabel("Maximum Height [kpc]")
ax.legend(loc="upper left", markerscale=2, handletextpad=0, fontsize=0.8*fs)
ax.set_xlim(left=0)
plt.show()
```
| github_jupyter |
# Getting Started
This notebook demonstrates the OpenSCM Two Layer Model repository's basic functionality.
We start with imports, their need will become clearer throughout the notebook.
```
import inspect
import numpy as np
from openscm_units import unit_registry
from scmdata import ScmRun
import openscm_twolayermodel
from openscm_twolayermodel import ImpulseResponseModel, TwoLayerModel
from openscm_twolayermodel.base import Model
```
As with most Python packages, the version of ``openscm_twolayermodel`` being used can always be checked as shown below. This is very helpful for debugging.
```
# NBVAL_IGNORE_OUTPUT
openscm_twolayermodel.__version__
```
OpenSCM Two Layer Model has two key classes: `ImpulseResponseModel` and `TwoLayerModel`. These are implementations of the two major variants of the two-layer model found in the literature. We can see that they both have a common base class using the `inspect` package.
```
inspect.getmro(ImpulseResponseModel)
inspect.getmro(TwoLayerModel)
```
These classes can both be used in the same way. We demonstrate the most basic usage here, more comprehensive usage is demonstrated in other notebooks.
The first thing we need is our effective radiative forcing driver. This should be an [`ScmRun`](https://scmdata.readthedocs.io/en/latest/data.html#the-scmrun-class) instance.
```
run_length = 200
driver = ScmRun(
data=np.arange(run_length) * 4 / 70,
index=1850 + np.arange(run_length),
columns={
"unit": "W/m^2",
"model": "idealised",
"scenario": "1pctCO2",
"region": "World",
"variable": "Effective Radiative Forcing",
},
)
driver
# NBVAL_IGNORE_OUTPUT
driver.lineplot()
```
Then we can initialise instances of our models and run them.
```
# NBVAL_IGNORE_OUTPUT
two_layer = TwoLayerModel(lambda0=4 / 3 * unit_registry("W/m^2/delta_degC"))
res_two_layer = two_layer.run_scenarios(driver)
impulse_response = ImpulseResponseModel(d1=10 * unit_registry("yr"))
res_impulse_response = impulse_response.run_scenarios(driver)
res = res_two_layer.append(res_impulse_response)
res.head()
```
Now we can plot our outputs and compare (of course, we can make these two models the same if we're clever about how we set the parameters, see the impulse response equivalence notebook).
```
# NBVAL_IGNORE_OUTPUT
res.filter(variable="Surface Temperature*").lineplot(
hue="climate_model", style="variable"
)
# NBVAL_IGNORE_OUTPUT
res.filter(variable="Heat*").lineplot(hue="climate_model", style="variable")
```
| github_jupyter |
```
## import certain packages
import math
from datetime import datetime, date
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pandas import DataFrame
## define Cox_Ross_Rubinstein binomial model
def Cox_Ross_Rubinstein_Tree (S,K,T,r,sigma,N, Option_type):
# Underlying price (per share): S;
# Strike price of the option (per share): K;
# Time to maturity (years): T;
# Continuously compounding risk-free interest rate: r;
# Volatility: sigma;
# Number of binomial steps: N;
# The factor by which the price rises (assuming it rises) = u ;
# The factor by which the price falls (assuming it falls) = d ;
# The probability of a price rise = pu ;
# The probability of a price fall = pd ;
# discount rate = disc ;
u=math.exp(sigma*math.sqrt(T/N));
d=math.exp(-sigma*math.sqrt(T/N));
pu=((math.exp(r*T/N))-d)/(u-d);
pd=1-pu;
disc=math.exp(-r*T/N);
St = [0] * (N+1)
C = [0] * (N+1)
St[0]=S*d**N;
for j in range(1, N+1):
St[j] = St[j-1] * u/d;
for j in range(1, N+1):
if Option_type == 'P':
C[j] = max(K-St[j],0);
elif Option_type == 'C':
C[j] = max(St[j]-K,0);
for i in range(N, 0, -1):
for j in range(0, i):
C[j] = disc*(pu*C[j+1]+pd*C[j]);
return C[0]
## define Jarrow_Rudd binomial model
def Jarrow_Rudd_Tree (S,K,T,r,sigma,N, Option_type):
# Underlying price (per share): S;
# Strike price of the option (per share): K;
# Time to maturity (years): T;
# Continuously compounding risk-free interest rate: r;
# Volatility: sigma;
# Steps: N;
# The factor by which the price rises (assuming it rises) = u ;
# The factor by which the price falls (assuming it falls) = d ;
# The probability of a price rise = pu ;
# The probability of a price fall = pd ;
# discount rate = disc ;
u=math.exp((r-(sigma**2/2))*T/N+sigma*math.sqrt(T/N));
d=math.exp((r-(sigma**2/2))*T/N-sigma*math.sqrt(T/N));
pu=0.5;
pd=1-pu;
disc=math.exp(-r*T/N);
St = [0] * (N+1)
C = [0] * (N+1)
St[0]=S*d**N;
for j in range(1, N+1):
St[j] = St[j-1] * u/d;
for j in range(1, N+1):
if Option_type == 'P':
C[j] = max(K-St[j],0);
elif Option_type == 'C':
C[j] = max(St[j]-K,0);
for i in range(N, 0, -1):
for j in range(0, i):
C[j] = disc*(pu*C[j+1]+pd*C[j]);
return C[0]
```
##### Input
```
## input the current stock price and check if it is a number.
S = input("What is the current stock price? ");
while True:
try:
S = float(S)
break
except:
print("The current stock price has to be a NUMBER.")
S = input("What is the current stock price? ")
## input the strike price and check if it is a number.
K = input("What is the strike price? ");
while True:
try:
K = float(K)
break
except:
print("The the strike price has to be a NUMBER.")
K = input("What is the strike price? ")
## input the expiration_date and calculate the days between today and the expiration date.
while True:
expiration_date = input("What is the expiration date of the options? (mm-dd-yyyy) ")
try:
expiration_date = datetime.strptime(expiration_date, "%m-%d-%Y")
except ValueError as e:
print("error: %s\nTry again." % (e,))
else:
break
T = (expiration_date - datetime.utcnow()).days / 365
## input the continuously compounding risk-free interest rate and check if it is a number.
r = input("What is the continuously compounding risk-free interest rate in percentage(%)? ");
while True:
try:
r = float(r)
break
except:
print("The continuously compounding risk-free interest rate has to be a NUMBER.")
r = input("What is the continuously compounding risk-free interest rate in percentage(%)? ")
## input the volatility and check if it is a number.
sigma = input("What is the volatility in percentage(%)? ");
while True:
try:
sigma = float(sigma)
if sigma > 100 or sigma < 0:
print ( "The range of sigma has to be in [0,100].")
sigma = input("What is the volatility in percentage(%)? ")
break
except:
print("The volatility has to be a NUMBER.")
sigma = input("What is the volatility in percentage(%)? ")
data = {'Symbol': ['S', 'K', 'T', 'r', 'sigma'],
'Input': [S, K, T , r , sigma]}
input_frame = DataFrame(data, columns=['Symbol', 'Input'],
index=['Underlying price', 'Strike price', 'Time to maturity', 'Risk-free interest rate', 'Volatility'])
input_frame
```
##### Output
```
r = r/100; sigma = sigma/100;
binomial_model_pricing = {'Option' : ['Call', 'Put', 'Call', 'Put'],
'Price': [Cox_Ross_Rubinstein_Tree(S, K, T, r, sigma,1000,'C'), Cox_Ross_Rubinstein_Tree(S, K, T, r, sigma,1000,'P'),
Jarrow_Rudd_Tree(S, K, T, r, sigma,1000,'C'), Jarrow_Rudd_Tree(S, K, T, r, sigma,1000,'P')]}
binomial_model_pricing_frame = DataFrame(binomial_model_pricing, columns=[ 'Option', 'Price'],
index = ['Cox-Ross-Rubinstein','Cox-Ross-Rubinstein', 'Jarrow-Rudd', 'Jarrow-Rudd'])
binomial_model_pricing_frame
```
##### Plot call / put options price with different steps
```
## call option with different steps
runs1 = list(range(50,5000,50))
CRR1 = []
JR1 = []
for i in runs1:
CRR1.append(Cox_Ross_Rubinstein_Tree(S, K, T, r, sigma,i ,'C'))
JR1.append(Jarrow_Rudd_Tree(S, K, T, r, sigma,i ,'C'))
plt.plot(runs1, CRR1, label='Cox_Ross_Rubinstein')
plt.plot(runs1, JR1, label='Jarrow_Rudd')
plt.legend(loc='upper right')
plt.show()
## put option with different steps
runs2 = list(range(50,5000,50))
CRR2 = []
JR2 = []
for i in runs2:
CRR2.append(Cox_Ross_Rubinstein_Tree(S, K, T, r, sigma,i ,'P'))
JR2.append(Jarrow_Rudd_Tree(S, 110, T, r, sigma,i ,'P'))
plt.plot(runs2, CRR2, label='Cox_Ross_Rubinstein')
plt.plot(runs2, JR2, label='Jarrow_Rudd')
plt.legend(loc='upper right')
plt.show()
```
| github_jupyter |
Deep Learning
=============
Assignment 3
------------
Previously in `2_fullyconnected.ipynb`, you trained a logistic regression and a neural network model.
The goal of this assignment is to explore regularization techniques.
```
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
# Some personnal imports
import matplotlib.pyplot as plt
%matplotlib inline
```
First reload the data we generated in _notmnist.ipynb_.
```
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
```
Reformat into a shape that's more adapted to the models we're going to train:
- data as a flat matrix,
- labels as float 1-hot encodings.
```
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 2 to [0.0, 1.0, 0.0 ...], 3 to [0.0, 0.0, 1.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
```
---
Problem 1
---------
Introduce and tune L2 regularization for both logistic and neural network models. Remember that L2 amounts to adding a penalty on the norm of the weights to the loss. In TensorFlow, you can compute the L2 loss for a tensor `t` using `nn.l2_loss(t)`. The right amount of regularization should improve your validation / test accuracy.
---
Let's start with the logistic model:
```
batch_size = 128
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
beta_regul = tf.placeholder(tf.float32)
# Variables.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)) + beta_regul * tf.nn.l2_loss(weights)
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : 1e-3}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
```
The L2 regularization introduces a new meta parameter that should be tuned. Since I do not have any idea of what should be the right value for this meta parameter, I will plot the accuracy by the meta parameter value (in a logarithmic scale).
```
num_steps = 3001
regul_val = [pow(10, i) for i in np.arange(-4, -2, 0.1)]
accuracy_val = []
for regul in regul_val:
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : regul}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
accuracy_val.append(accuracy(test_prediction.eval(), test_labels))
plt.semilogx(regul_val, accuracy_val)
plt.grid(True)
plt.title('Test accuracy by regularization (logistic)')
plt.show()
```
Let's see if the same technique will improve the prediction of the 1-layer neural network:
```
batch_size = 128
num_hidden_nodes = 1024
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
beta_regul = tf.placeholder(tf.float32)
# Variables.
weights1 = tf.Variable(
tf.truncated_normal([image_size * image_size, num_hidden_nodes]))
biases1 = tf.Variable(tf.zeros([num_hidden_nodes]))
weights2 = tf.Variable(
tf.truncated_normal([num_hidden_nodes, num_labels]))
biases2 = tf.Variable(tf.zeros([num_labels]))
# Training computation.
lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)
logits = tf.matmul(lay1_train, weights2) + biases2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)) + \
beta_regul * (tf.nn.l2_loss(weights1) + tf.nn.l2_loss(weights2))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)
valid_prediction = tf.nn.softmax(tf.matmul(lay1_valid, weights2) + biases2)
lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)
test_prediction = tf.nn.softmax(tf.matmul(lay1_test, weights2) + biases2)
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : 1e-3}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
```
Finally something above 90%! I will also plot the final accuracy by the L2 parameter to find the best value.
```
num_steps = 3001
regul_val = [pow(10, i) for i in np.arange(-4, -2, 0.1)]
accuracy_val = []
for regul in regul_val:
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : regul}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
accuracy_val.append(accuracy(test_prediction.eval(), test_labels))
plt.semilogx(regul_val, accuracy_val)
plt.grid(True)
plt.title('Test accuracy by regularization (1-layer net)')
plt.show()
```
---
Problem 2
---------
Let's demonstrate an extreme case of overfitting. Restrict your training data to just a few batches. What happens?
---
```
-
num_steps = 101
num_bacthes = 3
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
#offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
offset = step % num_bacthes
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : 1e-3}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 2 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
```
Since there are far too much parameters and no regularization, the accuracy of the batches is 100%. The generalization capability is poor, as shown in the validation and test accuracy.
---
Problem 3
---------
Introduce Dropout on the hidden layer of the neural network. Remember: Dropout should only be introduced during training, not evaluation, otherwise your evaluation results would be stochastic as well. TensorFlow provides `nn.dropout()` for that, but you have to make sure it's only inserted during training.
What happens to our extreme overfitting case?
---
```
batch_size = 128
num_hidden_nodes = 1024
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights1 = tf.Variable(
tf.truncated_normal([image_size * image_size, num_hidden_nodes]))
biases1 = tf.Variable(tf.zeros([num_hidden_nodes]))
weights2 = tf.Variable(
tf.truncated_normal([num_hidden_nodes, num_labels]))
biases2 = tf.Variable(tf.zeros([num_labels]))
# Training computation.
lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)
drop1 = tf.nn.dropout(lay1_train, 0.5)
logits = tf.matmul(drop1, weights2) + biases2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)
valid_prediction = tf.nn.softmax(tf.matmul(lay1_valid, weights2) + biases2)
lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)
test_prediction = tf.nn.softmax(tf.matmul(lay1_test, weights2) + biases2)
num_steps = 101
num_batches = 3
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
#offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
offset = step % num_batches
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 2 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
```
The first conclusion is that 100% of accuracy on the minibatches is more difficult achieved or to keep. As a result, the test accuracy is improved by 6%, the final net is more capable of generalization.
---
Problem 4
---------
Try to get the best performance you can using a multi-layer model! The best reported test accuracy using a deep network is [97.1%](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html?showComment=1391023266211#c8758720086795711595).
One avenue you can explore is to add multiple layers.
Another one is to use learning rate decay:
global_step = tf.Variable(0) # count the number of steps taken.
learning_rate = tf.train.exponential_decay(0.5, step, ...)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
---
Let's do a first try with 2 layers. Note how the parameters are initialized, compared to the previous cases.
```
batch_size = 128
num_hidden_nodes1 = 1024
num_hidden_nodes2 = 100
beta_regul = 1e-3
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
global_step = tf.Variable(0)
# Variables.
weights1 = tf.Variable(
tf.truncated_normal(
[image_size * image_size, num_hidden_nodes1],
stddev=np.sqrt(2.0 / (image_size * image_size)))
)
biases1 = tf.Variable(tf.zeros([num_hidden_nodes1]))
weights2 = tf.Variable(
tf.truncated_normal([num_hidden_nodes1, num_hidden_nodes2], stddev=np.sqrt(2.0 / num_hidden_nodes1)))
biases2 = tf.Variable(tf.zeros([num_hidden_nodes2]))
weights3 = tf.Variable(
tf.truncated_normal([num_hidden_nodes2, num_labels], stddev=np.sqrt(2.0 / num_hidden_nodes2)))
biases3 = tf.Variable(tf.zeros([num_labels]))
# Training computation.
lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)
lay2_train = tf.nn.relu(tf.matmul(lay1_train, weights2) + biases2)
logits = tf.matmul(lay2_train, weights3) + biases3
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)) + \
beta_regul * (tf.nn.l2_loss(weights1) + tf.nn.l2_loss(weights2) + tf.nn.l2_loss(weights3))
# Optimizer.
learning_rate = tf.train.exponential_decay(0.5, global_step, 1000, 0.65, staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)
lay2_valid = tf.nn.relu(tf.matmul(lay1_valid, weights2) + biases2)
valid_prediction = tf.nn.softmax(tf.matmul(lay2_valid, weights3) + biases3)
lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)
lay2_test = tf.nn.relu(tf.matmul(lay1_test, weights2) + biases2)
test_prediction = tf.nn.softmax(tf.matmul(lay2_test, weights3) + biases3)
num_steps = 9001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
```
This is getting really good. Let's try one layer deeper with dropouts.
```
batch_size = 128
num_hidden_nodes1 = 1024
num_hidden_nodes2 = 256
num_hidden_nodes3 = 128
keep_prob = 0.5
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
global_step = tf.Variable(0)
# Variables.
weights1 = tf.Variable(
tf.truncated_normal(
[image_size * image_size, num_hidden_nodes1],
stddev=np.sqrt(2.0 / (image_size * image_size)))
)
biases1 = tf.Variable(tf.zeros([num_hidden_nodes1]))
weights2 = tf.Variable(
tf.truncated_normal([num_hidden_nodes1, num_hidden_nodes2], stddev=np.sqrt(2.0 / num_hidden_nodes1)))
biases2 = tf.Variable(tf.zeros([num_hidden_nodes2]))
weights3 = tf.Variable(
tf.truncated_normal([num_hidden_nodes2, num_hidden_nodes3], stddev=np.sqrt(2.0 / num_hidden_nodes2)))
biases3 = tf.Variable(tf.zeros([num_hidden_nodes3]))
weights4 = tf.Variable(
tf.truncated_normal([num_hidden_nodes3, num_labels], stddev=np.sqrt(2.0 / num_hidden_nodes3)))
biases4 = tf.Variable(tf.zeros([num_labels]))
# Training computation.
lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)
lay2_train = tf.nn.relu(tf.matmul(lay1_train, weights2) + biases2)
lay3_train = tf.nn.relu(tf.matmul(lay2_train, weights3) + biases3)
logits = tf.matmul(lay3_train, weights4) + biases4
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
learning_rate = tf.train.exponential_decay(0.5, global_step, 4000, 0.65, staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)
lay2_valid = tf.nn.relu(tf.matmul(lay1_valid, weights2) + biases2)
lay3_valid = tf.nn.relu(tf.matmul(lay2_valid, weights3) + biases3)
valid_prediction = tf.nn.softmax(tf.matmul(lay3_valid, weights4) + biases4)
lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)
lay2_test = tf.nn.relu(tf.matmul(lay1_test, weights2) + biases2)
lay3_test = tf.nn.relu(tf.matmul(lay2_test, weights3) + biases3)
test_prediction = tf.nn.softmax(tf.matmul(lay3_test, weights4) + biases4)
num_steps = 18001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
```
Huge! That's my best score on this dataset. I have also tried more parameters, but it does not help:
```
batch_size = 128
num_hidden_nodes1 = 1024
num_hidden_nodes2 = 512
num_hidden_nodes3 = 256
keep_prob = 0.5
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
global_step = tf.Variable(0)
# Variables.
weights1 = tf.Variable(
tf.truncated_normal(
[image_size * image_size, num_hidden_nodes1],
stddev=np.sqrt(2.0 / (image_size * image_size)))
)
biases1 = tf.Variable(tf.zeros([num_hidden_nodes1]))
weights2 = tf.Variable(
tf.truncated_normal([num_hidden_nodes1, num_hidden_nodes2], stddev=np.sqrt(2.0 / num_hidden_nodes1)))
biases2 = tf.Variable(tf.zeros([num_hidden_nodes2]))
weights3 = tf.Variable(
tf.truncated_normal([num_hidden_nodes2, num_hidden_nodes3], stddev=np.sqrt(2.0 / num_hidden_nodes2)))
biases3 = tf.Variable(tf.zeros([num_hidden_nodes3]))
weights4 = tf.Variable(
tf.truncated_normal([num_hidden_nodes3, num_labels], stddev=np.sqrt(2.0 / num_hidden_nodes3)))
biases4 = tf.Variable(tf.zeros([num_labels]))
# Training computation.
lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)
drop1 = tf.nn.dropout(lay1_train, 0.5)
lay2_train = tf.nn.relu(tf.matmul(drop1, weights2) + biases2)
drop2 = tf.nn.dropout(lay2_train, 0.5)
lay3_train = tf.nn.relu(tf.matmul(drop2, weights3) + biases3)
drop3 = tf.nn.dropout(lay3_train, 0.5)
logits = tf.matmul(drop3, weights4) + biases4
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
learning_rate = tf.train.exponential_decay(0.5, global_step, 5000, 0.80, staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)
lay2_valid = tf.nn.relu(tf.matmul(lay1_valid, weights2) + biases2)
lay3_valid = tf.nn.relu(tf.matmul(lay2_valid, weights3) + biases3)
valid_prediction = tf.nn.softmax(tf.matmul(lay3_valid, weights4) + biases4)
lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)
lay2_test = tf.nn.relu(tf.matmul(lay1_test, weights2) + biases2)
lay3_test = tf.nn.relu(tf.matmul(lay2_test, weights3) + biases3)
test_prediction = tf.nn.softmax(tf.matmul(lay3_test, weights4) + biases4)
num_steps = 20001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
```
| github_jupyter |
## Dependencies
```
import json, glob
from tweet_utility_scripts import *
from tweet_utility_preprocess_roberta_scripts import *
from transformers import TFRobertaModel, RobertaConfig
from tokenizers import ByteLevelBPETokenizer
from tensorflow.keras import layers
from tensorflow.keras.models import Model
```
# Load data
```
test = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/test.csv')
print('Test samples: %s' % len(test))
display(test.head())
```
# Model parameters
```
input_base_path = '/kaggle/input/54-tweet-train-3fold-roberta-base-bce/'
with open(input_base_path + 'config.json') as json_file:
config = json.load(json_file)
config
vocab_path = input_base_path + 'vocab.json'
merges_path = input_base_path + 'merges.txt'
base_path = '/kaggle/input/qa-transformers/roberta/'
model_path_list = glob.glob(input_base_path + '*.h5')
model_path_list.sort()
print('Models to predict:')
print(*model_path_list, sep = "\n")
```
# Tokenizer
```
tokenizer = ByteLevelBPETokenizer(vocab_file=vocab_path, merges_file=merges_path,
lowercase=True, add_prefix_space=True)
```
# Pre process
```
test['text'].fillna('', inplace=True)
test["text"] = test["text"].apply(lambda x: x.lower())
test["text"] = test["text"].apply(lambda x: x.strip())
x_test = get_data_test(test, tokenizer, config['MAX_LEN'], preprocess_fn=preprocess_roberta_test)
```
# Model
```
module_config = RobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False)
def model_fn(MAX_LEN):
input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')
attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')
base_model = TFRobertaModel.from_pretrained(config['base_model_path'], config=module_config, name="base_model")
sequence_output = base_model({'input_ids': input_ids, 'attention_mask': attention_mask})
last_state = sequence_output[0]
x_start = layers.Dropout(.1)(last_state)
x_start = layers.Conv1D(1, 1)(x_start)
x_start = layers.Flatten()(x_start)
y_start = layers.Activation('sigmoid', name='y_start')(x_start)
x_end = layers.Dropout(.1)(last_state)
x_end = layers.Conv1D(1, 1)(x_end)
x_end = layers.Flatten()(x_end)
y_end = layers.Activation('sigmoid', name='y_end')(x_end)
model = Model(inputs=[input_ids, attention_mask], outputs=[y_start, y_end])
return model
```
# Make predictions
```
NUM_TEST_IMAGES = len(test)
test_start_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN']))
test_end_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN']))
for model_path in model_path_list:
print(model_path)
model = model_fn(config['MAX_LEN'])
model.load_weights(model_path)
test_preds = model.predict(x_test)
test_start_preds += test_preds[0] / len(model_path_list)
test_end_preds += test_preds[1] / len(model_path_list)
```
# Post process
```
test['start'] = test_start_preds.argmax(axis=-1)
test['end'] = test_end_preds.argmax(axis=-1)
test['text_len'] = test['text'].apply(lambda x : len(x))
test['text_wordCnt'] = test['text'].apply(lambda x : len(x.split(' ')))
test["end"].clip(0, test["text_len"], inplace=True)
test["start"].clip(0, test["end"], inplace=True)
test['selected_text'] = test.apply(lambda x: decode(x['start'], x['end'], x['text'], config['question_size'], tokenizer), axis=1)
test["selected_text"].fillna(test["text"], inplace=True)
```
# Visualize predictions
```
display(test.head(10))
```
# Test set predictions
```
submission = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/sample_submission.csv')
submission['selected_text'] = test["selected_text"]
submission.to_csv('submission.csv', index=False)
submission.head(10)
```
| github_jupyter |
## Label Maker with Dask and Planetary Computer
This notebook shows how to run [label-maker](https://github.com/developmentseed/label-maker-dask) with [dask](https://dask.org/) using [Planetary Computer](https://planetarycomputer.microsoft.com/). Label Maker is a library for creating machine-learning ready data by pairing satellite images with [OpenStreetMap](https://www.openstreetmap.org/) (OSM) vector data. It fetches data from both sources and then divides them into smaller image chips based on [slippy map conventions](https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames).
### Environment Setup
We'll add our dependencies and use dask locally for ease of setup. For running a remote cluster, see the setup in [the dask example](../quickstarts/scale-with-dask.ipynb)
```
!pip install -q label-maker-dask
import planetary_computer as pc
import pystac
from label_maker_dask import LabelMakerJob
from dask.distributed import Client
client = Client()
```
### Finding Source Imagery
You can use any tiled imagery (WMS/TMS) endpoint or Cloud-Optimized GeoTIFF file as the imagery input to `label-maker-dask`. In this case, we follow the [Sentinel 2 L2A Example](..datasets/sentinel-2-l2a/sentinel-2-l2a-example.ipynb) to get an asset URL and sign it with our Planetary Computer SAS token.
```
item = pystac.read_file(
"https://planetarycomputer.microsoft.com/api/stac/v1/collections/sentinel-2-l2a/items/S2A_MSIL2A_20190724T112121_R037_T29SMC_20201005T185645" # noqa: E501
)
asset_href = item.assets["visual"].href
signed_href = pc.sign(asset_href)
```
### Label-Maker-Dask
Now that we have everything setup, we can supply the parameters to define our `label-maker` job:
- `zoom`: *int*. The [zoom level](https://wiki.openstreetmap.org/wiki/Zoom_levels) used to create images. This functions as a rough proxy for resolution. Value should be given as an int on the interval `[0, 19]`
- `bounds`: *List[float]*. The bounding box to create images from. This should be given in the form: `[xmin, ymin, xmax, ymax]` as longitude and latitude values between `[-180, 180]` and `[-90, 90]`, respectively. Values should use the WGS84 datum, with longitude and latitude units in decimal degrees.
- `classes`: *List*. The training classes. Each class is defined as dict object with two required keys:
- `name`: *str*. The class name.
- `filter`: *List[str]*. A [Mapbox GL Filter](https://www.mapbox.com/mapbox-gl-js/style-spec#other-filter) to define any vector features matching this class. Filters are applied with the standalone [featureFilter](https://github.com/mapbox/mapbox-gl-js/tree/main/src/style-spec/feature_filter#api) from Mapbox GL JS.
- `imagery`: *str*. Details at https://developmentseed.org/label-maker/parameters.html#parameters
- `ml_type`: *str*. One of 'classification', 'object-detection', or 'segmentation'. More details at https://developmentseed.org/label-maker/parameters.html#parameters
- `label_source`: *str*. A template string for a tile server providing OpenStreetMap QA tiles. Planetary Computer hosts a tile server supporting this format at https://qa-tiles-server-dev.ds.io/services/z17/tiles/{z}/{x}/{y}.pbf
Once the job is defined, we can use the `build_job` and `execute_job` methods to fetch our labels and imagery.
```
lmj = LabelMakerJob(
zoom=15,
bounds=[-9.232635498046, 38.70265930723, -9.0966796875, 38.78138720209],
classes=[
{"name": "Roads", "filter": ["has", "highway"]},
{"name": "Buildings", "filter": ["has", "building"]},
],
imagery=signed_href,
ml_type="segmentation",
label_source="https://qa-tiles-server-dev.ds.io/services/z17/tiles/{z}/{x}/{y}.pbf",
)
lmj.build_job()
# a quick check on the number of image chips/tiles
lmj.n_tiles()
lmj.execute_job()
lmj.results[2]
```
### What Next?
The `results` property has viewing options for the various machine learning types, but we're really interested in passing the images and labels to a machine learning framework. We can setup the learning process similar to this:
```python
model = ...
X = np.stack([result.label for result in lmj.results])
y = np.stack([result.image for result in lmj.results])
model.fit(X, y)
```
### Resources
Check out these other notebooks for help running your own cluster or finding your own images:
- [Scale with Dask](../quickstarts/scale-with-dask.ipynb)
- [Reading Data from the STAC API](../quickstarts/reading-stac.ipynb)
| github_jupyter |
# Desafio 1
Para esse desafio, vamos trabalhar com o data set [Black Friday](https://www.kaggle.com/mehdidag/black-friday), que reúne dados sobre transações de compras em uma loja de varejo.
Vamos utilizá-lo para praticar a exploração de data sets utilizando pandas. Você pode fazer toda análise neste mesmo notebook, mas as resposta devem estar nos locais indicados.
> Obs.: Por favor, não modifique o nome das funções de resposta.
## _Set up_ da análise
```
import pandas as pd
import numpy as np
black_friday = pd.read_csv("black_friday.csv")
```
## Inicie sua análise a partir daqui
```
black_friday.sample(5)
black_friday.info()
black_friday.describe()
black_friday.isna().sum()
```
## Questão 1
Quantas observações e quantas colunas há no dataset? Responda no formato de uma tuple `(n_observacoes, n_colunas)`.
```
def q1():
# answer (537577 , 12)
return black_friday.shape
```
## Questão 2
Há quantas mulheres com idade entre 26 e 35 anos no dataset? Responda como um único escalar.
```
def q2():
# Answer 49348
young_woman = black_friday.query("Gender == 'F' & Age == '26-35'")
return young_woman.shape[0]
```
## Questão 3
Quantos usuários únicos há no dataset? Responda como um único escalar.
```
def q3():
# Answer 5891
return black_friday['User_ID'].nunique()
```
## Questão 4
Quantos tipos de dados diferentes existem no dataset? Responda como um único escalar.
```
def q4():
# Answer 3
return black_friday.dtypes.nunique()
```
## Questão 5
Qual porcentagem dos registros possui ao menos um valor null (`None`, `ǸaN` etc)? Responda como um único escalar entre 0 e 1.
```
def q5():
# Answer 0.3055897108693266
observations_total = black_friday.shape[0]
observations_wo_na = black_friday.dropna().shape[0]
return 1 - observations_wo_na / observations_total
```
## Questão 6
Quantos valores null existem na variável (coluna) com o maior número de null? Responda como um único escalar.
```
def q6():
# Answer 373299
return black_friday.isnull().sum().max()
```
## Questão 7
Qual o valor mais frequente (sem contar nulls) em `Product_Category_3`? Responda como um único escalar.
```
def q7():
# Answer 16.0.
return black_friday["Product_Category_3"].value_counts().idxmax()
```
## Questão 8
Qual a nova média da variável (coluna) `Purchase` após sua normalização? Responda como um único escalar.
```
def q8():
# Answer 0.3925748592124437
min_at_0 = black_friday["Purchase"] - black_friday["Purchase"].min()
max_difference = (black_friday["Purchase"].max() - black_friday["Purchase"].min())
normalized_purchase = min_at_0 / max_difference
return normalized_purchase.mean()
```
## Questão 9
Quantas ocorrências entre -1 e 1 inclusive existem da variáel `Purchase` após sua padronização? Responda como um único escalar.
```
def q9():
# Answer 0.3925748592124437
purchase = black_friday["Purchase"]
normalized_purchase = (purchase - purchase.mean()) / (purchase.std())
mask = (normalized_purchase >= -1) & (normalized_purchase <= 1)
return normalized_purchase[mask].shape[0]
```
## Questão 10
Podemos afirmar que se uma observação é null em `Product_Category_2` ela também o é em `Product_Category_3`? Responda com um bool (`True`, `False`).
```
def q10():
# Answer True
null_in_both = ((black_friday["Product_Category_2"].isnull()) & (black_friday["Product_Category_3"].isnull())).sum()
null_in_2_total = black_friday["Product_Category_2"].isnull().sum()
return bool(null_in_both == null_in_2_total)
```
| github_jupyter |
# Chaining using Ground Truth Streaming Labeling Jobs
You can use a streaming labeling job to perpetually send new data objects to Amazon SageMaker Ground Truth to be labeled. Ground Truth streaming labeling jobs remain active until they are manually stopped or have been idle for more than 10 days. You can intermittently send new data objects to workers while the labeling job is active.
Use this notebook to create a Ground Truth streaming labeling job using any of the [built-in task types](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-task-types.html). You can make necessary parameter changes for the custom workflow. You can either configure the notebook to create a labeling job using your own input data, or run the notebook on *default* mode and use provided, image input data. **To use your own input data, set `DEFAULT` to `False`**.
Chaining is a powerful feature that you can use to send the output of one streaming labeling job to another streaming labeling job. This opens up multiple possibilities to setup jobs so that data of Job 1 and can flow to Job2, Data of Job 2 can flow to Job n-1, data of Job n-1 can flow to Job n in real time.
In this notebook, we show how you can use 2 such streaming jobs in a chained fashion. If you select `DEFAULT` to `True`, we setup Job 1 which is an "Object Detection" job where one can draw bounding boxes around objects and Job 2 to be an "Object Detection Adjustment" job where one can adjust the previously drawn bounding boxes from Job 1.
```
DEFAULT=True
```
To read more about streaming labeling jobs, see the Amazon SageMaker documentation on [Ground Truth Streaming Labeling Jobs](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-streaming-labeling-job.html).
To learn more about each step in this notebook, refer to [Create a Streaming Labeling Job](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-streaming-create-job.html).
## Get latest version of AWS python SDK
```
!pip install -q --upgrade pip
!pip install awscli -q --upgrade
!pip install botocore -q --upgrade
!pip install boto3 -q --upgrade
!pip install sagemaker -q --upgrade
# NOTE: Restart Kernel after the above command
import boto3
import botocore
import json
import time
import sagemaker
import re
import os
```
## Prerequisites
You will create some of the resources you need to launch a Ground Truth streaming labeling job in this notebook. You must create the following resources before executing this notebook:
* A work team. A work team is a group of workers that complete labeling tasks. If you want to preview the worker UI and execute the labeling task you will need to create a private work team, add yourself as a worker to this team, and provide the work team ARN below. If you do not want to use a private or vendor work team ARN, set `private_work_team` to `False` to use the Amazon Mechanical Turk workforce. To learn more about private, vendor, and Amazon Mechanical Turk workforces, see [Create and Manage Workforces
](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-management.html).
* **IMPORTANT**: 3D point cloud and video frame labeling jobs only support private and vendor workforces. If you plan to use 3D point cloud or video frame input data, specify a private or vendor workforce below for WORKTEAM_ARN.
```
private_work_team = True # Set it to false if using Amazon Mechanical Turk Workforce
if(private_work_team):
WORKTEAM_ARN = '<<ADD WORK TEAM ARN HERE>>'
else :
region = boto3.session.Session().region_name
WORKTEAM_ARN = f'arn:aws:sagemaker:{region}:394669845002:workteam/public-crowd/default'
print(f'This notebook will use the work team ARN: {WORKTEAM_ARN}')
# Make sure workteam arn is populated if private work team is chosen
assert (WORKTEAM_ARN != '<<ADD WORK TEAM ARN HERE>>')
```
* The IAM execution role you used to create this notebook instance must have the following permissions:
* AWS managed policy [AmazonSageMakerGroundTruthExecution](https://console.aws.amazon.com/iam/home#policies/arn:aws:iam::aws:policy/AmazonSageMakerGroundTruthExecution). Run the following code-block to see your IAM execution role name. This [GIF](add-policy.gif) demonstrates how to add this policy to an IAM role in the IAM console. You can also find instructions in the IAM User Guide: [Adding and removing IAM identity permissions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html#add-policies-console).
* When you create your role, you specify Amazon S3 permissions. Make sure that your IAM role has access to the S3 bucket that you plan to use in this example. If you do not specify an S3 bucket in this notebook, the default bucket in the AWS region you are running this notebook instance will be used. If you do not require granular permissions, you can attach [AmazonS3FullAccess](https://console.aws.amazon.com/iam/home#policies/arn:aws:iam::aws:policy/AmazonS3FullAccess) to your role.
```
role = sagemaker.get_execution_role()
role_name = role.split('/')[-1]
print('IMPORTANT: Make sure this execution role has the AWS Managed policy AmazonGroundTruthExecution attached.')
print('********************************************************************************')
print('The IAM execution role name:', role_name)
print('The IAM execution role ARN:', role)
print('********************************************************************************')
# Make sure the bucket is in the same region as this notebook.
BUCKET = '<< YOUR S3 BUCKET NAME >>'
sess = sagemaker.Session()
s3 = boto3.client('s3')
if(BUCKET=='<< YOUR S3 BUCKET NAME >>'):
BUCKET=sess.default_bucket()
region = boto3.session.Session().region_name
bucket_region = s3.head_bucket(Bucket=BUCKET)['ResponseMetadata']['HTTPHeaders']['x-amz-bucket-region']
assert bucket_region == region, f'Your S3 bucket {BUCKET} and this notebook need to be in the same region.'
print(f'IMPORTANT: make sure the role {role_name} has the access to read and write to this bucket.')
print('********************************************************************************************************')
print(f'This notebook will use the following S3 bucket: {BUCKET}')
print('********************************************************************************************************')
```
SNS topics for Input and Output
You can send data objects to your streaming labeling job using Amazon Simple Notification Service (Amazon SNS). Amazon SNS is a web service that coordinates and manages the delivery of messages to and from endpoints (for example, an email address or AWS Lambda function). An Amazon SNS topic acts as a communication channel between two or more endpoints. You use Amazon SNS to send, or publish, new data objects to the topic specified in the CreateLabelingJob parameter SnsTopicArn in InputConfig.
The following cells will create a name for your labeling job and use this name to create Amazon SNS input and output topics. This labeling job name and these topics will be used in your CreateLabelingJob request later in this notebook.
```
# Job Name
LABELING_JOB_NAME = 'GroundTruth-streaming-' + str(int(time.time()))
print('Your labeling job name will be :', LABELING_JOB_NAME)
# Make sure role has "Sns:CreateTopic" access
sns = boto3.client('sns')
# Create Input Topic
input_response = sns.create_topic(Name= LABELING_JOB_NAME + '-Input')
INPUT_SNS_TOPIC = input_response['TopicArn']
print('input_sns_topic :', INPUT_SNS_TOPIC)
# Create Output Topic
output_response = sns.create_topic(Name= LABELING_JOB_NAME + '-Output')
OUTPUT_SNS_TOPIC = output_response['TopicArn']
print('output_sns_topic :', OUTPUT_SNS_TOPIC)
```
## Choose Labeling Job Type
Ground Truth supports a variety of built-in task types which streamline the process of creating image, text, video, video frame, and 3D point cloud labeling jobs. You can use this notebook on *default* mode if you do not want to bring your own input data and input manifest file.
If you have input data and an input manifest file in an S3 bucket, set `DEFAULT` to `False` and choose the **Labeling Job Task Type** you want to use below and specify the S3 URI of your input manifest file below. The S3 URI looks similar to `s3://your-bucket/path-to-input-manifest/input-manifest.manifest`. To learn more about each task type, see [Built-in Task Types](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-task-types.html).
### Choose Labeling Job Built-In Task Type
Copy one of the following task types and use it to set the value for `task_type`. If you set **`DEFAULT`** to `True`, at the beginning of this notebook, the image bounding box task type will be used by default.
To create a custom labeling workflow, set `CUSTOM` to `True` and specify your custom lambda functions to pre-process your input data and process output data in the section **Create Custom Labeling Workflow** below.
```
## Choose from following:
## Bounding Box
## Image Classification (Single Label)
## Image Classification (Multi-label)
## Image Semantic Segmentation
## Text Classification (Single Label)
## Text Classification (Multi-label)
## Named Entity Recognition
## Video Classification
## Video Frame Object Detection
## Video Frame Object Tracking
## 3D Point Cloud Object Detection
## 3D Point Cloud Object Detection
## 3D Point Cloud Semantic Segmentation
task_type = "<<COPY AND PASTE TASK TYPE FROM LIST ABOVE>>"
if(DEFAULT):
task_type = "Bounding Box"
print(f'Your task type: {task_type}')
task_type_map = {
"Bounding Box" : "BoundingBox",
"Image Classification (Single Label)" : "ImageMultiClass",
"Image Classification (Multi-label)" : "ImageMultiClassMultiLabel",
"Image Semantic Segmentation" : "SemanticSegmentation",
"Text Classification (Single Label)" : "TextMultiClass",
"Text Classification (Multi-label)" : "TextMultiClassMultiLabel",
"Named Entity Recognition" : "NamedEntityRecognition",
"Video Classification" : "VideoMultiClass",
"Video Frame Object Detection" : "VideoObjectDetection",
"Video Frame Object Tracking" : "VideoObjectTracking",
"3D Point Cloud Object Detection" : "3DPointCloudObjectDetection",
"3D Point Cloud Object Tracking" : "3DPointCloudObjectTracking",
"3D Point Cloud Semantic Segmentation" : "3DPointCloudSemanticSegmentation"
}
arn_region_map = {'us-west-2': '081040173940',
'us-east-1': '432418664414',
'us-east-2': '266458841044',
'eu-west-1': '568282634449',
'eu-west-2': '487402164563',
'ap-northeast-1': '477331159723',
'ap-northeast-2': '845288260483',
'ca-central-1': '918755190332',
'eu-central-1': '203001061592',
'ap-south-1': '565803892007',
'ap-southeast-1': '377565633583',
'ap-southeast-2': '454466003867'
}
task_type_suffix = task_type_map[task_type]
region_account = arn_region_map[region]
PRE_HUMAN_TASK_LAMBDA = f'arn:aws:lambda:{region}:{region_account}:function:PRE-{task_type_suffix}'
POST_ANNOTATION_LAMBDA = f'arn:aws:lambda:{region}:{region_account}:function:ACS-{task_type_suffix}'
print(PRE_HUMAN_TASK_LAMBDA)
print(POST_ANNOTATION_LAMBDA)
```
3D point cloud and video frame task types have special requirements. The following variables will be used to configure your labeling job for these task types. To learn more, see the following topics in the documentation:
* [3D Point Cloud Labeling Jobs Overview](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-point-cloud-general-information.html)
* [Video Frame Labeling Job Overview](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-video-overview.html)
```
point_cloud_task = re.search(r'Point Cloud', task_type) is not None
video_frame_task = re.search(r'Video Frame', task_type) is not None
```
### Create Custom Labeling Workflow
If you want to create a custom labeling workflow, you can create your own lambda functions to pre-process your input data and post-process the labels returned from workers. To learn more, see [Step 3: Processing with AWS Lambda](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-custom-templates-step3.html).
To use this notebook to run a custom flow, set `CUSTOM` to `True` and specify your pre- and post-processing lambdas below.
```
CUSTOM = False
if(CUSTOM):
PRE_HUMAN_TASK_LAMBDA = '<ADD-PRE-PROCESSING-LABMDA-ARN>'
POST_ANNOTATION_LAMBDA = '<ADD-POST-PROCESSING-LABMDA-ARN>'
```
## Specify Labels
You specify the labels that you want workers to use to annotate your data in a label category configuration file. When you create a 3D point cloud or video frame labeling job, you can add label category attributes to your labeling category configruation file. Workers can assign one or more attributes to annotations to give more information about that object.
For all task types, you can use the following cell to identify the labels you use for your labeling job. To create a label category configuration file with label category attributes, see [Create a Labeling Category Configuration File with Label Category Attributes
](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-label-cat-config-attributes.html) in the Amazon SageMaker developer guide.
```
# Add label categories of your choice
LABEL_CATEGORIES = []
if(DEFAULT):
LABEL_CATEGORIES = ['Pedestrian', 'Street Car', 'Biker']
```
The following cell will create a label category configuration file using the labels specified above.
**IMPORTANT**: Make sure you have added label categories above and they appear under `labels` when you run the following cell.
```
# Specify labels and this notebook will upload and a label category configuration file to S3.
json_body = {
"document-version": "2018-11-28",
'labels': [{'label': label} for label in LABEL_CATEGORIES]
}
with open('class_labels.json', 'w') as f:
json.dump(json_body, f)
print("Your label category configuration file:")
print("\n",json.dumps(json_body, indent=2))
s3.upload_file('class_labels.json', BUCKET, 'class_labels.json')
LABEL_CATEGORIES_S3_URI = f's3://{BUCKET}/class_labels.json'
print(f'You should now see class_labels.json in {LABEL_CATEGORIES_S3_URI}')
```
## Create A Worker Task Template
Part or all of your images will be annotated by human annotators. It is essential to provide good instructions. Good instructions are:
1. Concise. We recommend limiting verbal/textual instruction to two sentences and focusing on clear visuals.
2. Visual. In the case of object detection, we recommend providing several labeled examples with different numbers of boxes.
3. When used through the AWS Console, Ground Truth helps you create the instructions using a visual wizard. When using the API, you need to create an HTML template for your instructions.
NOTE: If you use any images in your template (as we do), they need to be publicly accessible. You can enable public access to files in your S3 bucket through the S3 Console, as described in S3 Documentation.
### Specify Resources Used for Human Task UI
The human task user interface (UI) is the interface that human workers use to label your data. Depending on the type of labeling job you create, you will specify a resource that is used to generate the human task UI in the `UiConfig` parameter of `CreateLabelingJob`.
For 3D point cloud and video frame labeling tasks, you will specify a pre-defined `HumanTaskUiARN`. For all other labeling job task types, you will specify a `UiTemplateS3Uri`.
#### Bounding Box Image Labeling Job (Default)
If you set `DEFAULT` to `True`, use the following to create a worker task template and upload it to your S3 bucket. Ground Trust uses this template to generate your human task UI.
```
from IPython.core.display import HTML, display
def make_template(save_fname='instructions.template'):
template = r"""<script src="https://assets.crowd.aws/crowd-html-elements.js"></script>
<crowd-form>
<crowd-bounding-box
name="boundingBox"
src="{{{{ task.input.taskObject | grant_read_access }}}}"
header="Dear Annotator, please draw a tight box around each object you see (if there are more than 8 objects, draw boxes around at least 8)."
labels="{{{{ task.input.labels | to_json | escape }}}}"
>
<full-instructions header="Please annotate each object">
<ol>
<li><strong>Inspect</strong> the image</li>
<li><strong>Determine</strong> if the specified label is/are visible in the picture.</li>
<li><strong>Outline</strong> each instance of the specified label in the image using the provided “Box” tool.</li>
</ol>
</full-instructions>
<short-instructions>
<ul>
<li>Boxes should fit tightly around each object</li>
<li>Do not include parts of the object are overlapping or that cannot be seen, even though you think you can interpolate the whole shape.</li>
<li>Avoid including shadows.</li>
<li>If the target is off screen, draw the box up to the edge of the image.</li>
</ul>
</short-instructions>
</crowd-bounding-box>
</crowd-form>
""".format()
with open(save_fname, 'w') as f:
f.write(template)
if(DEFAULT):
make_template(save_fname='instructions.template')
if(DEFAULT):
result = s3.upload_file('instructions.template', BUCKET, 'instructions.template')
```
#### Image, Text, and Custom Labeling Jobs (Non Default)
For all image and text based built-in task types, you can find a sample worker task template on that task type page. Find the page for your task type on [Built-in Task Types](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-task-types.html). You will see an example template under the section **Create a {Insert-Task-Type} Job (API)**.
Update `<full-instructions></full-instructions>` and `<short-instructions></short-instructions>`. Add your template to the following code block and run the code blocks below to generate your worker task template and upload it to your S3 bucket.
For custom labeling workflows, you can provide a custom HTML worker task template using Crowd HTML Elements. To learn more, see [Step 2: Creating your custom labeling task template](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-custom-templates-step2.html).
Ground Trust uses this template to generate your human task UI.
**Important**: If you use the following `make_template` function to create and upload a worker task template to Amazon S3, you must add an extra pair of `{}` brackets around each Liquid element. For example, if the template contains `{{ task.input.labels | to_json | escape }}`, this line should look as follows in the `make_template` variable `template`: `{{{{ task.input.labels | to_json | escape }}}}`.
```
from IPython.core.display import HTML, display
def make_template(save_fname='instructions.template'):
template = r"""
<<<ADD-TEMPLATE-HTML-CODE-HERE>>>
""".format()
with open(save_fname, 'w') as f:
f.write(template)
#This will upload your template to S3 if you are not running on DEFAULT mode, and if your take type
#does not use video frames or 3D point clouds.
if(not DEFAULT and not video_frame_task and not point_cloud_task):
make_template(save_fname='instructions.html')
s3.upload_file('instructions.template', BUCKET, 'instructions.template')
```
#### 3D Point Cloud and Video Frame Task Types
If you are creating a 3D point cloud or video frame task type, your worker UI is configured by Ground Truth. If you chose one of these task types above, the following cell will specify the correct template.
```
import re
if(not DEFAULT):
if (point_cloud_task):
task_type_suffix_humanuiarn = task_type_suffix.split('3D')[-1]
HUMAN_UI_ARN = f'arn:aws:sagemaker:{region}:394669845002:human-task-ui/{task_type_suffix_humanuiarn}'
if (video_frame_task):
HUMAN_UI_ARN = f'arn:aws:sagemaker:{region}:394669845002:human-task-ui/{task_type_suffix}'
print(f'The Human Task UI ARN is: {HUMAN_UI_ARN}')
```
## (Optional) Create an Input Manifest File
You can optionally specify an input manifest file Amazon S3 URI in ManifestS3Uri when you create the streaming labeling job. Ground Truth sends each data object in the manifest file to workers for labeling as soon as the labeling job starts.
Each line in an input manifest file is an entry containing an object, or a reference to an object, to label. An entry can also contain labels from previous jobs and for some task types, additional information.
To learn how to create an input manifest file, see [Use an Input Manifest File](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-input-data-input-manifest.html). Copy the S3 URI of the file below.
```
# [Optional] The path in Amazon S3 to your input manifest file.
INPUT_MANIFEST = ''
```
## Specify Parameters for Labeling Job
If you set `DEFAULT` to `False`, you must specify the following parameters. These will be used to configure and create your lableing job. If you set `DEFAULT` to `True`, default parameters will be used.
To learn more about these parameters, use the following documentation:
* [TaskTitle](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_HumanTaskConfig.html#sagemaker-Type-HumanTaskConfig-TaskTitle)
* [TaskDescription](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_HumanTaskConfig.html#sagemaker-Type-HumanTaskConfig-TaskDescription)
* [TaskKeywords](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_HumanTaskConfig.html#sagemaker-Type-HumanTaskConfig-TaskKeywords)
```
TASK_TITLE = '<<ADD-TASK-TITLE>>'
if(DEFAULT):
TASK_TITLE = 'Add bounding boxes to detect objects in an image'
TASK_DESCRIPTION = '<<ADD-TASK-DESCRIPTION>>'
if(DEFAULT):
TASK_DESCRIPTION = 'Categorize images into classes using bounding boxes'
# Keywords for your task, in a string-array. ex) ['image classification', 'image dataset']
TASK_KEYWORDS = ['<<ADD-KEYWODS>>']
if(DEFAULT):
TASK_KEYWORDS = ['bounding box', 'image dataset']
# The path in Amazon S3 to your worker task template or human task UI
HUMAN_UI = []
if(point_cloud_task or video_frame_task):
HUMAN_TASK_UI_ARN = HUMAN_UI_ARN
HUMAN_UI.append(HUMAN_TASK_UI_ARN)
UI_CONFIG_PARAM = 'HumanTaskUiArn'
else:
UI_TEMPLATE_S3_URI = f's3://{BUCKET}/instructions.template'
HUMAN_UI.append(UI_TEMPLATE_S3_URI)
UI_CONFIG_PARAM = 'UiTemplateS3Uri'
print(f'{UI_CONFIG_PARAM} resource that will be used: {HUMAN_UI[0]}')
# The ARN for your SNS input topic.
INPUT_TOPIC_ARN = INPUT_SNS_TOPIC
# The ARN for your SNS output topic.
OUTPUT_TOPIC_ARN = OUTPUT_SNS_TOPIC
# If you want to store your output manifest in a different folder, provide an OUTPUT_PATH.
OUTPUT_FOLDER_PREFIX = '/gt-streaming-demo-output'
OUTPUT_BUCKET = 's3://' + BUCKET + OUTPUT_FOLDER_PREFIX
print("Your output data will be stored in:", OUTPUT_BUCKET)
# An IAM role with AmazonGroundTruthExecution policies attached.
# This must be the same role that you used to create this notebook instance.
ROLE_ARN = role
```
## Use the CreateLabelingJob API to create a streaming labeling job [Job 1]
```
if(re.search(r'Semantic Segmentation', task_type) is not None or re.match(r'Object Tracking', task_type) is not None or video_frame_task):
LABEL_ATTRIBUTE_NAME = LABELING_JOB_NAME + '-ref'
else:
LABEL_ATTRIBUTE_NAME = LABELING_JOB_NAME
human_task_config = {
"PreHumanTaskLambdaArn": PRE_HUMAN_TASK_LAMBDA,
"MaxConcurrentTaskCount": 100, # Maximum of 100 objects will be available to the workteam at any time
"NumberOfHumanWorkersPerDataObject": 1, # We will obtain and consolidate 1 human annotationsfor each image.
"TaskAvailabilityLifetimeInSeconds": 21600, # Your workteam has 6 hours to complete all pending tasks.
"TaskDescription": TASK_DESCRIPTION,
# If using public workforce, specify "PublicWorkforceTaskPrice"
"WorkteamArn": WORKTEAM_ARN,
"AnnotationConsolidationConfig": {
"AnnotationConsolidationLambdaArn": POST_ANNOTATION_LAMBDA
},
"TaskKeywords": TASK_KEYWORDS,
"TaskTimeLimitInSeconds": 600, # Each image must be labeled within 10 minutes.
"TaskTitle": TASK_TITLE,
"UiConfig": {
UI_CONFIG_PARAM : HUMAN_UI[0]
}
}
#if you are using the Amazon Mechanical Turk workforce, specify the amount you want to pay a
#worker to label a data object. See https://aws.amazon.com/sagemaker/groundtruth/pricing/ for recommendations.
if (not private_work_team):
human_task_config["PublicWorkforceTaskPrice"] = {
"AmountInUsd": {
"Dollars": 0,
"Cents": 3,
"TenthFractionsOfACent": 6,
}
}
human_task_config["WorkteamArn"] = WORKTEAM_ARN
else:
human_task_config["WorkteamArn"] = WORKTEAM_ARN
ground_truth_request = {
"InputConfig": {
"DataSource": {
"SnsDataSource": {
"SnsTopicArn": INPUT_TOPIC_ARN
}
}
},
"HumanTaskConfig" : human_task_config,
"LabelAttributeName": LABEL_ATTRIBUTE_NAME,
"LabelCategoryConfigS3Uri" : LABEL_CATEGORIES_S3_URI,
"LabelingJobName": LABELING_JOB_NAME,
"OutputConfig": {
"S3OutputPath": OUTPUT_BUCKET,
"SnsTopicArn": OUTPUT_TOPIC_ARN
},
"RoleArn": ROLE_ARN
}
if(INPUT_MANIFEST is not ''):
ground_truth_request["InputConfig"]["DataSource"]["S3DataSource"] = {"ManifestS3Uri": INPUT_MANIFEST}
```
#### DataAttributes
You should not share explicit, confidential, or personal information or protected health information with the Amazon Mechanical Turk workforce.
If you are using Amazon Mechanical Turk workforce, you must verify that your data is free of personal, confidential, and explicit content and protected health information using this code cell.
```
if (not private_work_team):
ground_truth_request["InputConfig"]["DataAttributes"]={"ContentClassifiers": ["FreeOfPersonallyIdentifiableInformation","FreeOfAdultContent"]}
print("Your create labeling job request:\n",json.dumps(ground_truth_request,indent=4))
sagemaker_client = boto3.client('sagemaker')
sagemaker_client.create_labeling_job(**ground_truth_request)
```
## Use the DescribeLabelingJob API to describe a streaming labeling job
```
sagemaker_client.describe_labeling_job(LabelingJobName=LABELING_JOB_NAME)
```
Wait until the labeling job status equals InProgress before moving forward in this notebook
```
sagemaker_client.describe_labeling_job(LabelingJobName=LABELING_JOB_NAME)['LabelingJobStatus']
```
## Check for LabelingJobStatus and interpreting describe response
* If you specified "S3DataSource.ManifestS3Uri" in the above request, the objects in the S3 file will automatically make their way to the labeling job. You will see counters incrementing from the objects from the file.
* Streaming jobs create a SQS queue in your account. You can check for existence of the queue by name "GroundTruth-LABELING_JOB_NAME" via console or through below command
```
sqs = boto3.client('sqs')
response = sqs.get_queue_url(QueueName='GroundTruth-' + LABELING_JOB_NAME.lower())
print("Queue url is :", response['QueueUrl'])
```
# Job 2 Setup
Use the following section to set up your second labeling job. This labeling job will be chained to the first job that you set up above. This means the output data from the first labeling job will be sent to this labeling job as input data.
Bounding box, semantic segmentation, and all video frame and 3D point cloud labeling job types support an *adjustment* task which you can use to have worker modify and add to the annotations created in the first labeling job for that respective task type. You can select one of these adjustment task types below.
If you do not choose an adjustment task type, the output data from this second job will contain any new labels that workers add, as well as the labels added in the first labeling job.
```
# Job Name
LABELING_JOB_NAME2 = 'GroundTruth-streaming-' + str(int(time.time()))
print('Your labeling job 2 name will be :', LABELING_JOB_NAME2)
```
## SNS topics for Input and Output for Job 2
Input SNS topic for Job 2 is same as Output SNS topic of Job 1. This is how we will set up chaining.
We will create Output SNS topic.
```
# Create Input Topic
# Output topic of Job 1
INPUT_SNS_TOPIC2 = OUTPUT_SNS_TOPIC
print('input_sns_topic of Job 2:', INPUT_SNS_TOPIC2)
# Create Output Topic
output_response = sns.create_topic(Name= LABELING_JOB_NAME2 + '-Output')
OUTPUT_SNS_TOPIC2 = output_response['TopicArn']
print('output_sns_topic of Job 2:', OUTPUT_SNS_TOPIC2)
# The ARN for your SNS input topic.
INPUT_TOPIC_ARN2 = INPUT_SNS_TOPIC2
# The ARN for your SNS output topic.
OUTPUT_TOPIC_ARN2 = OUTPUT_SNS_TOPIC2
```
## Choose Labeling Job Type [Job2]
Ground Truth supports a variety of built-in task types which streamline the process of creating image, text, video, video frame, and 3D point cloud labeling jobs. You can use this notebook on *default* mode if you do not want to bring your own input data and input manifest file.
If you have input data and an input manifest file in an S3 bucket, set `DEFAULT` to `False` and choose the **Labeling Job Task Type** you want to use below and specify the S3 URI of your input manifest file below. The S3 URI looks similar to `s3://your-bucket/path-to-input-manifest/input-manifest.manifest`. To learn more about each task type, see [Built-in Task Types](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-task-types.html).
### Choose Labeling Job Built-In Task Type
Copy one of the following task types and use it to set the value for `task_type`. If you set **`DEFAULT`** to `True`, at the beginning of this notebook, the image bounding box task type will be used by default.
```
## Choose from following:
## Bounding Box
## Image Classification (Single Label)
## Image Classification (Multi-label)
## Image Semantic Segmentation
## Text Classification (Single Label)
## Text Classification (Multi-label)
## Named Entity Recognition
## Video Classification
## Video Frame Object Detection
## Video Frame Object Tracking
## 3D Point Cloud Object Detection
## 3D Point Cloud Object Detection
## 3D Point Cloud Semantic Segmentation
## 3D Point Cloud Semantic Segmentation
## Adjustment Semantic Segmentation
## Verification Semantic Segmentation
## Verification Bounding Box
## Adjustment Bounding Box
## Adjustment Video Object Detection
## Adjustment Video Object Tracking
## Adjustment 3D Point Cloud Object Detection
## Adjustment 3D Point Cloud Object Tracking
## Adjustment 3D Point Cloud Semantic Segmentation
task_type2 = "<<COPY AND PASTE TASK TYPE FROM LIST ABOVE>>"
if(DEFAULT):
task_type2 = "Adjustment Bounding Box"
print(f'Your task type: {task_type2}')
```
The following cells will configure the lambda functions Ground Truth uses to pre-process your input data and output data. These cells will configure your PreHumanTaskLambdaArn and AnnotationConsolidationLambdaArn.
```
task_type_map2 = {
"Bounding Box" : "BoundingBox",
"Image Classification (Single Label)" : "ImageMultiClass",
"Image Classification (Multi-label)" : "ImageMultiClassMultiLabel",
"Image Semantic Segmentation" : "SemanticSegmentation",
"Text Classification (Single Label)" : "TextMultiClass",
"Text Classification (Multi-label)" : "TextMultiClassMultiLabel",
"Named Entity Recognition" : "NamedEntityRecognition",
"Video Classification" : "VideoMultiClass",
"Video Frame Object Detection" : "VideoObjectDetection",
"Video Frame Object Tracking" : "VideoObjectTracking",
"3D Point Cloud Object Detection" : "3DPointCloudObjectDetection",
"3D Point Cloud Object Tracking" : "3DPointCloudObjectTracking",
"3D Point Cloud Semantic Segmentation" : "3DPointCloudSemanticSegmentation",
"Adjustment Semantic Segmentation" : "AdjustmentSemanticSegmentation",
"Verification Semantic Segmentation" : "VerificationSemanticSegmentation",
"Verification Bounding Box" : "VerificationBoundingBox",
"Adjustment Bounding Box" : "AdjustmentBoundingBox",
"Adjustment Video Object Detection" : "AdjustmentVideoObjectDetection",
"Adjustment Video Object Tracking" : "AdjustmentVideoObjectTracking",
"Adjustment 3D Point Cloud Object Detection" : "Adjustment3DPointCloudObjectDetection",
"Adjustment 3D Point Cloud Object Tracking" : "Adjustment3DPointCloudObjectTracking",
"Adjustment 3D Point Cloud Semantic Segmentation" : "Adjustment3DPointCloudSemanticSegmentation",
}
arn_region_map = {'us-west-2': '081040173940',
'us-east-1': '432418664414',
'us-east-2': '266458841044',
'eu-west-1': '568282634449',
'eu-west-2': '487402164563',
'ap-northeast-1': '477331159723',
'ap-northeast-2': '845288260483',
'ca-central-1': '918755190332',
'eu-central-1': '203001061592',
'ap-south-1': '565803892007',
'ap-southeast-1': '377565633583',
'ap-southeast-2': '454466003867'
}
task_type_suffix2 = task_type_map2[task_type2]
region_account = arn_region_map[region]
PRE_HUMAN_TASK_LAMBDA2 = f'arn:aws:lambda:{region}:{region_account}:function:PRE-{task_type_suffix2}'
POST_ANNOTATION_LAMBDA2 = f'arn:aws:lambda:{region}:{region_account}:function:ACS-{task_type_suffix2}'
print(PRE_HUMAN_TASK_LAMBDA2)
print(POST_ANNOTATION_LAMBDA2)
```
3D point cloud and video frame task types have special requirements. The following variables will be used to configure your labeling job for these task types. To learn more, see the following topics in the documentation:
* [3D Point Cloud Labeling Jobs Overview](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-point-cloud-general-information.html)
* [Video Frame Labeling Job Overview](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-video-overview.html)
```
point_cloud_task = re.search(r'Point Cloud', task_type) is not None
video_frame_task = re.search(r'Video Frame', task_type) is not None
```
### Create Custom Labeling Workflow
If you want to create a custom labeling workflow, you can create your own lambda functions to pre-process your input data and post-process the labels returned from workers. To learn more, see [Step 3: Processing with AWS Lambda](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-custom-templates-step3.html).
To use this notebook to run a custom flow, set `CUSTOM` to `True` and specify your pre- and post-processing lambdas below.
```
CUSTOM = False
if(CUSTOM):
PRE_HUMAN_TASK_LAMBDA2 = '<ADD-PRE-PROCESSING-LABMDA-ARN>'
POST_ANNOTATION_LAMBDA2 = '<ADD-POST-PROCESSING-LABMDA-ARN>'
```
## Specify Labels for Job 2
You specify the labels that you want workers to use to annotate your data in a label category configuration file. When you create a 3D point cloud or video frame labeling job, you can add label category attributes to your labeling category configruation file. Workers can assign one or more attributes to annotations to give more information about that object.
For all task types, you can use the following cell to identify the labels you use for your labeling job. To create a label category configuration file with label category attributes, see [Create a Labeling Category Configuration File with Label Category Attributes
](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-label-cat-config-attributes.html) in the Amazon SageMaker developer guide.
```
# Add label categories of your choice
LABEL_CATEGORIES = []
if(DEFAULT):
LABEL_CATEGORIES = ['Pedestrian', 'Street Car', 'Biker']
```
The following cell will create a label category configuration file using the labels specified above.
**IMPORTANT**: Make sure you have added label categories above and they appear under `labels` when you run the following cell.
```
# Specify labels and this notebook will upload and a label category configuration file to S3.
json_body = {
"document-version": "2018-11-28",
'labels': [{'label': label} for label in LABEL_CATEGORIES]
}
with open('class_labels2.json', 'w') as f:
json.dump(json_body, f)
print("Your label category configuration file:")
print("\n",json.dumps(json_body, indent=2))
s3.upload_file('class_labels2.json', BUCKET, 'class_labels2.json')
LABEL_CATEGORIES_S3_URI2 = f's3://{BUCKET}/class_labels2.json'
print(f'You should now see class_labels2.json in {LABEL_CATEGORIES_S3_URI2}')
```
## Create A Worker Task Template for Job 2
Part or all of your images will be annotated by human annotators. It is essential to provide good instructions. Good instructions are:
1. Concise. We recommend limiting verbal/textual instruction to two sentences and focusing on clear visuals.
2. Visual. In the case of object detection, we recommend providing several labeled examples with different numbers of boxes.
3. When used through the AWS Console, Ground Truth helps you create the instructions using a visual wizard. When using the API, you need to create an HTML template for your instructions.
NOTE: If you use any images in your template (as we do), they need to be publicly accessible. You can enable public access to files in your S3 bucket through the S3 Console, as described in S3 Documentation.
### Specify Resources Used for Human Task UI
The human task user interface (UI) is the interface that human workers use to label your data. Depending on the type of labeling job you create, you will specify a resource that is used to generate the human task UI in the `UiConfig` parameter of `CreateLabelingJob`.
For 3D point cloud and video frame labeling tasks, you will specify a pre-defined `HumanTaskUiARN`. For all other labeling job task types, you will specify a `UiTemplateS3Uri`.
#### Bounding Box Adjustment Labeling Job (Default)
If you set `DEFAULT` to `True`, use the following to create a worker task template and upload it to your S3 bucket. Ground Trust uses this template to generate your human task UI.
```
from IPython.core.display import HTML, display
def make_template(save_fname='instructions2.template'):
template = r"""<script src="https://assets.crowd.aws/crowd-html-elements.js"></script>
<crowd-form>
<crowd-bounding-box
name="boundingBox"
src="{{{{ task.input.taskObject | grant_read_access }}}}"
header="Dear Annotator, please adjust box around each object you see."
labels="{{{{ task.input.labels | to_json | escape }}}}"
initial-value="[
{{% for box in task.input.manifestLine.{label_attribute_name_from_prior_job}.annotations %}}
{{% capture class_id %}}{{{{ box.class_id }}}}{{% endcapture %}}
{{% assign label = task.input.manifestLine.{label_attribute_name_from_prior_job}-metadata.class-map[class_id] %}}
{{
label: {{{{label | to_json}}}},
left: {{{{box.left}}}},
top: {{{{box.top}}}},
width: {{{{box.width}}}},
height: {{{{box.height}}}},
}},
{{% endfor %}}
]"
>
<full-instructions header="Bounding box adjustment instructions">
<ol>
<li><strong>Inspect</strong> the image</li>
<li><strong>Determine</strong> if the specified label is/are visible in the picture.</li>
<li><strong>Outline</strong> each instance of the specified label in the image using the provided “Box” tool.</li>
</ol>
</full-instructions>
<short-instructions>
<ul>
<li>Boxes should fit tightly around each object</li>
<li>Do not include parts of the object are overlapping or that cannot be seen, even though you think you can interpolate the whole shape.</li>
<li>Avoid including shadows.</li>
<li>If the target is off screen, draw the box up to the edge of the image.</li>
</ul>
</short-instructions>
</crowd-bounding-box>
</crowd-form>
""".format(label_attribute_name_from_prior_job=LABEL_ATTRIBUTE_NAME)
with open(save_fname, 'w') as f:
f.write(template)
if(DEFAULT):
make_template(save_fname='instructions2.template')
if(DEFAULT):
result = s3.upload_file('instructions2.template', BUCKET, 'instructions2.template')
```
#### Image, Text, and Custom Labeling Jobs (Non Default)
For all image and text based built-in task types, you can find a sample worker task template on that task type page. Find the page for your task type on [Built-in Task Types](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-task-types.html). You will see an example template under the section **Create a {Insert-Task-Type} Job (API)**.
The following template shows an example of a Semantic Segmentation adjustment job template. This template can be used to render segmentation masks from a previous labeling job, to have workers adjust or add to the mask in the new labeling job.
For custom labeling workflows, you can provide a custom HTML worker task template using Crowd HTML Elements. To learn more, see [Step 2: Creating your custom labeling task template](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-custom-templates-step2.html).
Ground Trust uses this template to generate your human task UI.
**Important**: If you use your own template with the following `make_template` function to create and upload a worker task template to Amazon S3, you must add an extra pair of `{}` brackets around each Liquid element. For example, if the template contains `{{ task.input.labels | to_json | escape }}`, this line should look as follows in the `make_template` variable `template`: `{{{{ task.input.labels | to_json | escape }}}}`. The following semantic segmentation template already includes an extra pair of `{}` brackets around each Liquid element.
```
from IPython.core.display import HTML, display
def make_template(save_fname='instructions2.template'):
template = r"""<script src="https://assets.crowd.aws/crowd-html-elements.js"></script>
<crowd-form>
<crowd-semantic-segmentation
initial-value="{{
'src' : '{{{{ task.input.manifestLine.{label_attribute_name_from_prior_job}| grant_read_access }}}}',
'labelMappings': {{
{{% for box in task.input.manifestLine.{label_attribute_name_from_prior_job}-metadata.internal-color-map %}}
{{% if box[1]['class-name'] != 'BACKGROUND' %}}
{{{{ box[1]['class-name'] | to_json }}}}: {{
'color': {{{{ box[1]['hex-color'] | to_json }}}}
}},
{{% endif %}}
{{% endfor %}}
}}
}}"
name="crowd-semantic-segmentation"
src="{{{{ task.input.taskObject | grant_read_access }}}}"
header="highlight bridges"
labels="{{{{ task.input.labels | to_json | escape }}}}"
>
<full-instructions header="Segmentation instructions">
<ol><li><strong>Read</strong> the task carefully and inspect the image.</li><li><strong>Read</strong> the options and review the examples provided to understand more about the labels.</li><li><strong>Choose</strong> the appropriate label that best suits the image.</li></ol>
</full-instructions>
<short-instructions>
<h3><span style="color: rgb(0, 138, 0);">Good example</span></h3><p>Enter description to explain a correctly done segmentation</p><p><img src="https://d7evko5405gb7.cloudfront.net/ae0c1149-12cb-44b6-bff4-6171a09fb83c/src/images/quick-instructions-example-placeholder.png" style="max-width:100%"></p><h3><span style="color: rgb(230, 0, 0);">Bad example</span></h3><p>Enter description of an incorrectly done segmentation</p><p><img src="https://d7evko5405gb7.cloudfront.net/ae0c1149-12cb-44b6-bff4-6171a09fb83c/src/images/quick-instructions-example-placeholder.png" style="max-width:100%"></p>
</short-instructions>
</crowd-semantic-segmentation>
</crowd-form>""".format(label_attribute_name_from_prior_job=LABEL_ATTRIBUTE_NAME)
with open(save_fname, 'w') as f:
f.write(template)
#This will upload your template to S3 if you are not running on DEFAULT mode, and if your take type
#does not use video frames or 3D point clouds.
if(not DEFAULT and not video_frame_task and not point_cloud_task):
make_template(save_fname='instructions2.template')
s3.upload_file('instructions2.template', BUCKET, 'instructions2.template')
```
#### 3D Point Cloud and Video Frame Task Types
If you are creating a 3D point cloud or video frame task type, your worker UI is configured by Ground Truth. If you chose one of these task types above, the following cell will specify the correct template.
```
import re
if(not DEFAULT):
if (point_cloud_task):
task_type_suffix_humanuiarn = task_type_suffix.split('3D')[-1]
HUMAN_UI_ARN2 = f'arn:aws:sagemaker:{region}:394669845002:human-task-ui/{task_type_suffix_humanuiarn}'
if (video_frame_task):
HUMAN_UI_ARN2 = f'arn:aws:sagemaker:{region}:394669845002:human-task-ui/{task_type_suffix}'
print(f'The Human Task UI ARN is: {HUMAN_UI_ARN2}')
```
## (Optional) Create an Input Manifest File
You can optionally specify an input manifest file Amazon S3 URI in ManifestS3Uri when you create the streaming labeling job. Ground Truth sends each data object in the manifest file to workers for labeling as soon as the labeling job starts.
Each line in an input manifest file is an entry containing an object, or a reference to an object, to label. An entry can also contain labels from previous jobs and for some task types, additional information.
To learn how to create an input manifest file, see [Use an Input Manifest File](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-input-data-input-manifest.html). Copy the S3 URI of the file below.
As Job 2 is a chained job, you can connect Output manifest of Job 1 to Job 2. In this case, you may get objects same objects in Job 2 from both output SNS and output S3 of Job 1. These will be considered duplicates and ignored if idempotency key is the same.
For simplicity, leave this field blank unless you really need it!
```
# [Optional] The path in Amazon S3 to your input manifest file.
INPUT_MANIFEST = ''
```
## Specify Parameters for Labeling Job 2
If you set `DEFAULT` to `False`, you must specify the following parameters. These will be used to configure and create your lableing job. If you set `DEFAULT` to `True`, default parameters will be used.
To learn more about these parameters, use the following documentation:
* [TaskTitle](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_HumanTaskConfig.html#sagemaker-Type-HumanTaskConfig-TaskTitle)
* [TaskDescription](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_HumanTaskConfig.html#sagemaker-Type-HumanTaskConfig-TaskDescription)
* [TaskKeywords](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_HumanTaskConfig.html#sagemaker-Type-HumanTaskConfig-TaskKeywords)
```
TASK_TITLE2 = '<<ADD-TASK-TITLE>>'
if(DEFAULT):
TASK_TITLE2 = 'Adjust Bounding boxes around objects'
TASK_DESCRIPTION2 = '<<ADD-TASK-DESCRIPTION>>'
if(DEFAULT):
TASK_DESCRIPTION2 = 'Adjust bounding boxes around specified objects in your images'
# Keywords for your task, in a string-array. ex) ['image classification', 'image dataset']
TASK_KEYWORDS2 = ['<<ADD-KEYWODS>>']
if(DEFAULT):
TASK_KEYWORDS2 = ['bounding box', 'image dataset']
# The path in Amazon S3 to your worker task template or human task UI
HUMAN_UI2 = []
if(point_cloud_task or video_frame_task):
HUMAN_TASK_UI_ARN2 = HUMAN_UI_ARN2
HUMAN_UI2.append(HUMAN_TASK_UI_ARN2)
UI_CONFIG_PARAM = 'HumanTaskUiArn'
else:
UI_TEMPLATE_S3_URI2 = f's3://{BUCKET}/instructions2.template'
HUMAN_UI2.append(UI_TEMPLATE_S3_URI2)
UI_CONFIG_PARAM = 'UiTemplateS3Uri'
print(f'{UI_CONFIG_PARAM} resource that will be used: {HUMAN_UI2[0]}')
# If you want to store your output manifest in a different folder, provide an OUTPUT_PATH.
OUTPUT_FOLDER_PREFIX = '/gt-streaming-demo-output'
OUTPUT_BUCKET = 's3://' + BUCKET + OUTPUT_FOLDER_PREFIX
print("Your output data will be stored in:", OUTPUT_BUCKET)
# An IAM role with AmazonGroundTruthExecution policies attached.
# This must be the same role that you used to create this notebook instance.
ROLE_ARN = role
```
## Use the CreateLabelingJob API to Create a 2nd Streaming Labeling Job
```
if(re.search(r'Semantic Segmentation', task_type) is not None or re.match(r'Object Tracking', task_type) is not None or video_frame_task):
LABEL_ATTRIBUTE_NAME2 = LABELING_JOB_NAME2 + '-ref'
else:
LABEL_ATTRIBUTE_NAME2 = LABELING_JOB_NAME2
human_task_config = {
"PreHumanTaskLambdaArn": PRE_HUMAN_TASK_LAMBDA2,
"MaxConcurrentTaskCount": 100, # Maximum of 100 objects will be available to the workteam at any time
"NumberOfHumanWorkersPerDataObject": 1, # We will obtain and consolidate 1 human annotationsfor each image.
"TaskAvailabilityLifetimeInSeconds": 21600, # Your workteam has 6 hours to complete all pending tasks.
"TaskDescription": TASK_DESCRIPTION2,
# If using public workforce, specify "PublicWorkforceTaskPrice"
"WorkteamArn": WORKTEAM_ARN,
"AnnotationConsolidationConfig": {
"AnnotationConsolidationLambdaArn": POST_ANNOTATION_LAMBDA2
},
"TaskKeywords": TASK_KEYWORDS2,
"TaskTimeLimitInSeconds": 600, # Each image must be labeled within 10 minutes.
"TaskTitle": TASK_TITLE2,
"UiConfig": {
UI_CONFIG_PARAM : HUMAN_UI2[0]
}
}
#if you are using the Amazon Mechanical Turk workforce, specify the amount you want to pay a
#worker to label a data object. See https://aws.amazon.com/sagemaker/groundtruth/pricing/ for recommendations.
if (not private_work_team):
human_task_config["PublicWorkforceTaskPrice"] = {
"AmountInUsd": {
"Dollars": 0,
"Cents": 3,
"TenthFractionsOfACent": 6,
}
}
human_task_config["WorkteamArn"] = WORKTEAM_ARN
else:
human_task_config["WorkteamArn"] = WORKTEAM_ARN
ground_truth_request2 = {
"InputConfig": {
"DataSource": {
"SnsDataSource": {
"SnsTopicArn": INPUT_TOPIC_ARN2
}
}
},
"HumanTaskConfig" : human_task_config,
"LabelAttributeName": LABEL_ATTRIBUTE_NAME2,
"LabelCategoryConfigS3Uri" : LABEL_CATEGORIES_S3_URI2,
"LabelingJobName": LABELING_JOB_NAME2,
"OutputConfig": {
"S3OutputPath": OUTPUT_BUCKET,
"SnsTopicArn": OUTPUT_TOPIC_ARN2
},
"RoleArn": ROLE_ARN
}
if(INPUT_MANIFEST is not ''):
ground_truth_request2["InputConfig"]["DataSource"]["S3DataSource"] = {"ManifestS3Uri": INPUT_MANIFEST}
```
#### DataAttributes
You should not share explicit, confidential, or personal information or protected health information with the Amazon Mechanical Turk workforce.
If you are using Amazon Mechanical Turk workforce, you must verify that your data is free of personal, confidential, and explicit content and protected health information using this code cell.
```
if (not private_work_team):
ground_truth_request2["InputConfig"]["DataAttributes"]={"ContentClassifiers": ["FreeOfPersonallyIdentifiableInformation","FreeOfAdultContent"]}
print("Your create labeling job request:\n",json.dumps(ground_truth_request2,indent=4))
sagemaker_client = boto3.client('sagemaker')
sagemaker_client.create_labeling_job(**ground_truth_request2)
```
## Use the DescribeLabelingJob API to describe 2nd Streaming Labeling Job
```
sagemaker_client.describe_labeling_job(LabelingJobName=LABELING_JOB_NAME2)
```
## Publish a new object to your first labeling job [Job 1] once it has started
Once you start a labeling job, you an publish a new request to it using Amazon SNS.
### Configure your Request
You will need to specify `REQUEST` in the following format:
**For non-text objects**
First, make sure that your object is located in `s3_bucket_location`
`{"source-ref": "s3_bucket_location"}`
**For text objects**
`{"source": "Lorem ipsum dolor sit amet"}`
Modify one of these examples to specify your request in the next cell.
```
REQUEST = '<Populate your object as shown above>'
```
If you set `Default` to `True` use the following cell upload a sample-image to your S3 bucket and send that image to labeling job.
```
if(DEFAULT):
!wget https://aws-ml-blog.s3.amazonaws.com/artifacts/gt-labeling-job-resources/example-image.jpg
s3.upload_file('example-image.jpg', BUCKET, 'example-image.jpg')
REQUEST = str({"source-ref": f"s3://{BUCKET}/example-image.jpg"})
print(f'Your request: {REQUEST}')
```
### Publish Your Request
First, check the `LabelCounters` variable for your labeling job using `DescribeLabelingJob`. After you publish your request, you'll see `Unlabeled` increases to `1` (or the number of objects you send to your labeling job).
```
sagemaker_client.describe_labeling_job(LabelingJobName=LABELING_JOB_NAME)['LabelCounters']
```
The following will publish your request to your Amazon SNS input topic.
```
# TopicArn is of the first job [Job 1]
print(f'Your Request: {REQUEST}\n')
if(REQUEST != '<Populate your object as shown above>'):
published_message = sns.publish(TopicArn=INPUT_TOPIC_ARN,Message=REQUEST)
print(f'Published Message: {published_message}')
```
You may need to wait 1 to 2 minutes for your request to appear in `LabelCounters`.
```
sagemaker_client.describe_labeling_job(LabelingJobName=LABELING_JOB_NAME)['LabelCounters']
```
After your first job finishes, check the status of your chained job. You should see your request appear in `LabelCounters`.
```
sagemaker_client.describe_labeling_job(LabelingJobName=LABELING_JOB_NAME2)['LabelCounters']
```
## Call StopLabelingJob for your previously launched jobs
To stop your Streaming job, call StopLabelingJob twice: with the `LABELING_JOB_NAME` and `LABELING_JOB_NAME2`.
```
sagemaker_client.stop_labeling_job(LabelingJobName=LABELING_JOB_NAME)
sagemaker_client.stop_labeling_job(LabelingJobName=LABELING_JOB_NAME2)
```
| github_jupyter |
# Mlogit Benchmark 2: Kenneth Train's Heating Data
The purpose of this notebook is to:
<ol>
<li> Demonstrate the use of the pyLogit to estimate conditional logit models.</li>
<li> Benchmark the results reported pyLogit against those reported by the mlogit package.</li>
</ol>
The models estimated in this notebook will be the same models detailed in "Kenneth Train’s exercises using the mlogit package for R." In particular, the following models will be estimated:
<ol>
<li> The model with installation cost and operating cost, without intercepts (p.2).
<pre> mlogit(depvar~ic+oc|0, H) </pre>
</li>
<li> The model that imposes the constraint that r = 0.12 (such that wtp = 8.33) (p. 4).
<pre> H$lcc=H$ic+H$oc/0.12
mlcc <- mlogit(depvar~lcc|0, H)
</pre>
</li>
<li> The model with installation cost, operating cost, and all intercepts except that of the "hp" alternative (p.5).
<pre> mc <- mlogit(depvar~ic+oc, H, reflevel = 'hp')
</pre>
</li>
<li> The model with installation cost divided by income, operating cost, and all intercepts except that of the "hp" alternative (p. 7).
<pre> mi <- mlogit(depvar~oc+I(ic/income), H, reflevel = 'hp')
</pre>
</li>
<li> The model with intallation costs, operating costs, alternative specific coefficients for income, and all intercepts except that of the "hp" alternative (p.7).
<pre> mi2 <- mlogit(depvar~oc+ic|income, H, reflevel="hp")
</pre>
</li>
</ol>
## 1. Import Needed libraries
```
from collections import OrderedDict # For recording the model specification
import pandas as pd # For file input/output
import numpy as np # For vectorized math operations
import pylogit as pl # For MNL model estimation and
# conversion from wide to long format
```
## 2. Load and look at the required datasets
```
# Load the Heating data, noting that the data is in wide data format
wide_heating_df = pd.read_csv("../data/heating_data_r.csv")
# Look at the raw Heating data
wide_heating_df.head().T
```
## 3. Convert the wide format dataframes to long format
### 3a. Perform needed data cleaning
Noting that the column denoting the choice (depvar) contains string objects, we need to convert the choice column into an integer based column.
```
# Convert the choice column for the Train data into integers
# Note that we will use a 1 to denote 'choice1' and a 2 to
# represent 'choice2'
wide_heating_df["choice"] = wide_heating_df["depvar"].map(dict(zip(['gc', 'gr',
'ec', 'er',
'hp'],
range(1,6))))
```
For the Heating data, all of the alternatives are available in all choice situations. Note that, in general, this is not the case for choice data. As such we need to have columns that denote the availability of each alternative for each individual.
These columns will all be filled with ones for each row in the wide format dataframes because all of the alternatives are always available for each individual.
```
# Create the needed availability columns for the Heating data
for i in range(1, 6):
wide_heating_df["availability_{}".format(i)] = 1
```
### 3b. Convert the Heating dataset to long format
```
# Look at the columns that we need to account for when converting from
# the wide data format to the long data format.
wide_heating_df.columns
##########
# Define lists of the variables pertaining to each variable type
# that we need to account for in the data format transformation
##########
# Determine the name for the alternative ids in the long format
# data frame
heating_alt_id = "alt_id"
# Determine the column that denotes the id of what we're treating
# as individual observations, i.e. the choice situations.
heating_obs_id_col = "idcase"
# Determine what column denotes the choice that was made
heating_choice_column = "choice"
# Create the list of observation specific variables
heating_ind_variables = ["depvar", "income", "agehed", "rooms", "region"]
# Specify the variables that vary across individuals and some or all alternatives
# Note that each "main" key should be the desired name of the column in the long
# data format. The inner keys shoud be the alternative ids that that have some
# value for the "main" key variable.
heating_alt_varying_variables = {"installation_costs": {1: "ic.gc",
2: "ic.gr",
3: "ic.ec",
4: "ic.er",
5: "ic.hp"},
"operating_costs": {1: "oc.gc",
2: "oc.gr",
3: "oc.ec",
4: "oc.er",
5: "oc.hp"},
}
# Specify the availability variables
heating_availability_variables = OrderedDict()
for alt_id, var in zip(range(1, 6),
["availability_{}".format(i) for i in range(1, 6)]):
heating_availability_variables[alt_id] = var
##########
# Actually perform the conversion to long format
##########
long_heating_df = pl.convert_wide_to_long(wide_data=wide_heating_df,
ind_vars=heating_ind_variables,
alt_specific_vars=heating_alt_varying_variables,
availability_vars=heating_availability_variables,
obs_id_col=heating_obs_id_col,
choice_col=heating_choice_column,
new_alt_id_name=heating_alt_id)
# Look at the long format Heating data
long_heating_df.head()
```
## 4. Create desired variables
```
# Create the life-cycle cost variable needed for model 2 where
# we assume the discount rate, r, is 0.12.
long_heating_df["life_cycle_cost"] = (long_heating_df["installation_costs"] +
long_heating_df["operating_costs"] / 0.12)
# Create the installation cost divided by income variable
long_heating_df["installation_cost_burden"] = (long_heating_df["installation_costs"] /
long_heating_df["income"])
```
For numeric stability reasons, it is advised that one scale one's variables so that the estimated coefficients are similar in absolute magnitude, and if possible so that the estimated coefficients are close to 1 in absolute value (in other words, not terribly tiny or extremely large). This is done for the fishing data below
## 5. Specify and estimate the desired models needed for benchmarking
### 5a. The model with installation cost and operating cost, without intercepts
```
# Create the model specification
model_1_spec = OrderedDict()
model_1_names = OrderedDict()
# Note that for the specification dictionary, the
# keys should be the column names from the long format
# dataframe and the values should be a list with a combination
# of alternative id's and/or lists of alternative id's. There
# should be one element for each beta that will be estimated
# in relation to the given column. Lists of alternative id's
# mean that all of the alternatives in the list will get a
# single beta for them, for the given variable.
# The names dictionary should contain one name for each
# element (that is each alternative id or list of alternative
# ids) in the specification dictionary value for the same
# variable
model_1_spec["installation_costs"] = [range(1, 6)]
model_1_names["installation_costs"] = ["installation_costs"]
model_1_spec["operating_costs"] = [range(1, 6)]
model_1_names["operating_costs"] = ["operating_costs"]
# Create an instance of the MNL model class
model_1 = pl.create_choice_model(data=long_heating_df,
alt_id_col=heating_alt_id,
obs_id_col=heating_obs_id_col,
choice_col=heating_choice_column,
specification=model_1_spec,
model_type="MNL",
names=model_1_names)
# Estimate the given model, starting from a point of all zeros
# as the initial values.
model_1.fit_mle(np.zeros(2), method='newton-cg')
# Look at the estimation summaries
model_1.get_statsmodels_summary()
# Look at the 'standard summary' since it includes robust p-values
model_1.print_summaries()
```
#### Compare with mlogit
The call from mlogit was as follows:
<pre>
Call:
mlogit(formula = depvar ~ ic + oc | 0, data = H, method = "nr",
print.level = 0)
Frequencies of alternatives:
ec er gc gr hp
0.071111 0.093333 0.636667 0.143333 0.055556
nr method
4 iterations, 0h:0m:0s
g'(-H)^-1g = 1.56E-07
gradient close to zero
Coefficients :
Estimate Std. Error t-value Pr(>|t|)
ic -0.00623187 0.00035277 -17.665 < 2.2e-16 \*\*\*
oc -0.00458008 0.00032216 -14.217 < 2.2e-16 \*\*\*
\---
Signif. codes: 0 ‘\*\*\*’ 0.001 ‘\*\*’ 0.01 ‘\*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Log-Likelihood: -1095.2
</pre>
As can be seen, the estimates, standard errors, t-values, and log-likelihood agree. The p-values differ but this is because mlogit calculates its p-values based on a t-distribution whereas pyLogit uses an asymptotic normal distribution.
### 5b. The model that imposes the constraint that the discount rate, r = 0.12, still without intercepts.
```
# Create the model specification
model_2_spec = OrderedDict()
model_2_names = OrderedDict()
model_2_spec["life_cycle_cost"] = [range(1, 6)]
model_2_names["life_cycle_cost"] = ["installation_costs"]
# Create an instance of the MNL model class
model_2 = pl.create_choice_model(data=long_heating_df,
alt_id_col=heating_alt_id,
obs_id_col=heating_obs_id_col,
choice_col=heating_choice_column,
specification=model_2_spec,
model_type="MNL",
names=model_2_names)
# Estimate the given model, starting from a point of all zeros
# as the initial values.
model_2.fit_mle(np.zeros(1), method='newton-cg')
# Look at the estimation summaries
model_2.get_statsmodels_summary()
```
#### Compare with mlogit
Look at the corresponding results from mlogit:
<pre>
Call:
mlogit(formula = depvar ~ lcc | 0, data = H, method = "nr", print.level = 0)
Frequencies of alternatives:
ec er gc gr hp
0.071111 0.093333 0.636667 0.143333 0.055556
nr method
5 iterations, 0h:0m:0s
g'(-H)^-1g = 9.32E-05
successive function values within tolerance limits
Coefficients :
Estimate Std. Error t-value Pr(>|t|)
lcc -7.1585e-04 4.2761e-05 -16.741 < 2.2e-16 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Log-Likelihood: -1248.7
</pre>
As before, all computed values agree except for the p-values, which we already know to be different due to the distribution being used to compute the p-values (t-distribution vs normal distribution).
### 5c. The model with installation cost, operating cost, and all intercepts except that of the "hp" alternative
```
# Create the model specification
model_3_spec = OrderedDict()
model_3_names = OrderedDict()
model_3_spec["intercept"] = range(1, 5)
model_3_names["intercept"] = ["ASC: {}".format(x)
for x in ["gc", "gr", "ec", "er"]]
model_3_spec["installation_costs"] = [range(1, 6)]
model_3_names["installation_costs"] = ["installation_costs"]
model_3_spec["operating_costs"] = [range(1, 6)]
model_3_names["operating_costs"] = ["operating_costs"]
# Create an instance of the MNL model class
model_3 = pl.create_choice_model(data=long_heating_df,
alt_id_col=heating_alt_id,
obs_id_col=heating_obs_id_col,
choice_col=heating_choice_column,
specification=model_3_spec,
model_type="MNL",
names=model_3_names)
# Estimate the given model, starting from a point of all zeros
# as the initial values.
model_3.fit_mle(np.zeros(6))
# Look at the estimation summaries
model_3.get_statsmodels_summary()
```
#### Compare with mlogit
Look at the corresponding results from mlogit:
<pre>
Call:
mlogit(formula = depvar ~ ic + oc, data = H, reflevel = "hp",
method = "nr", print.level = 0)
Frequencies of alternatives:
hp ec er gc gr
0.055556 0.071111 0.093333 0.636667 0.143333
nr method
6 iterations, 0h:0m:0s
g'(-H)^-1g = 9.58E-06
successive function values within tolerance limits
Coefficients :
Estimate Std. Error t-value Pr(>|t|)
ec:(intercept) 1.65884594 0.44841936 3.6993 0.0002162 ***
er:(intercept) 1.85343697 0.36195509 5.1206 3.045e-07 ***
gc:(intercept) 1.71097930 0.22674214 7.5459 4.485e-14 ***
gr:(intercept) 0.30826328 0.20659222 1.4921 0.1356640
ic -0.00153315 0.00062086 -2.4694 0.0135333 *
oc -0.00699637 0.00155408 -4.5019 6.734e-06 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Log-Likelihood: -1008.2
McFadden R^2: 0.013691
Likelihood ratio test : chisq = 27.99 (p.value = 8.3572e-07)
</pre>
Again, all calculated values except for the p-values and McFadden's $R^2$ seem to agree.
As noted in the mlogit benchmark \# 1 notebook, the mlogit values for McFadden's $R^2$ seem to be incorrect. Based off of the formula: $$\begin{aligned} \textrm{McFadden's }R^2 &= 1 - \frac{\mathscr{L}_M}{\mathscr{L}_0} \\
\textrm{where } \mathscr{L}_M &= \textrm{the fitted log-likelihood} \\
\mathscr{L}_0 &= \textrm{the null log-likelihood}\end{aligned}$$
from "Coefficients of Determination for Multiple Logistic Regression Analysis" by Scott Menard (2000), The American Statistician, 54:1, 17-24, the calculated value of McFadden's R^2 should be 0.303947 as reported by pyLogit.
Note that the initial log-likelihood and McFadden $R^2$ are recomputed below for verification of its correctnes.
```
# Note that every observation in the Heating dataset
# has 5 available alternatives, therefore the null
# probability is 0.20
null_prob = 0.20
# Calculate how many observations are in the Heating
# dataset
num_heating_obs = wide_heating_df.shape[0]
# Calculate the Fishing dataset's null log-likelihood
null_heating_log_likelihood = (num_heating_obs *
np.log(null_prob))
# Determine whether pyLogit's null log-likelihood is correct
correct_null_ll = np.allclose(null_heating_log_likelihood,
model_3.null_log_likelihood)
print "pyLogit's null log-likelihood is correct:", correct_null_ll
# Calculate McFadden's R^2
mcfaddens_r2 = 1 - (model_3.log_likelihood / model_3.null_log_likelihood)
print "McFadden's R^2 is {:.5f}".format(mcfaddens_r2)
```
### 5d. The model with installation cost divided by income, operating cost, and all intercepts except that for "hp"
```
# Create the model specification
model_4_spec = OrderedDict()
model_4_names = OrderedDict()
model_4_spec["intercept"] = range(1, 5)
model_4_names["intercept"] = ["ASC: {}".format(x)
for x in ["gc", "gr", "ec", "er"]]
model_4_spec["installation_cost_burden"] = [range(1, 6)]
model_4_names["installation_cost_burden"] = ["installation_cost_burden"]
model_4_spec["operating_costs"] = [range(1, 6)]
model_4_names["operating_costs"] = ["operating_costs"]
# Create an instance of the MNL model class
model_4 = pl.create_choice_model(data=long_heating_df,
alt_id_col=heating_alt_id,
obs_id_col=heating_obs_id_col,
choice_col=heating_choice_column,
specification=model_4_spec,
model_type="MNL",
names=model_4_names)
# Estimate the given model, starting from a point of all zeros
# as the initial values.
model_4.fit_mle(np.zeros(6))
# Look at the estimation summaries
model_4.get_statsmodels_summary()
```
#### Compare with mlogit
Look at the results from mlogit:
<pre>
Call:
mlogit(formula = depvar ~ oc + I(ic/income), data = H, reflevel = "hp",
method = "nr", print.level = 0)
Frequencies of alternatives:
hp ec er gc gr
0.055556 0.071111 0.093333 0.636667 0.143333
nr method
6 iterations, 0h:0m:0s
g'(-H)^-1g = 1.03E-05
successive function values within tolerance limits
Coefficients :
Estimate Std. Error t-value Pr(>|t|)
ec:(intercept) 1.8700773 0.4364248 4.2850 1.827e-05 ***
er:(intercept) 1.9340707 0.3599991 5.3724 7.768e-08 ***
gc:(intercept) 1.9264254 0.2034031 9.4710 < 2.2e-16 ***
gr:(intercept) 0.4047710 0.2011694 2.0121 0.04421 *
oc -0.0071066 0.0015518 -4.5797 4.657e-06 ***
I(ic/income) -0.0027658 0.0018944 -1.4600 0.14430
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Log-Likelihood: -1010.2
McFadden R^2: 0.011765
Likelihood ratio test : chisq = 24.052 (p.value = 5.9854e-06)
</pre>
Again, all calculated values except for the p-values and McFadden's $R^2$ seem to agree.
### 5e. The model with intallation costs, operating costs, alternative specific income, and all intercepts for "hp."
```
# Create the model specification
model_5_spec = OrderedDict()
model_5_names = OrderedDict()
model_5_spec["intercept"] = range(1, 5)
model_5_names["intercept"] = ["ASC: {}".format(x)
for x in ["gc", "gr", "ec", "er"]]
model_5_spec["installation_costs"] = [range(1, 6)]
model_5_names["installation_costs"] = ["installation_costs"]
model_5_spec["operating_costs"] = [range(1, 6)]
model_5_names["operating_costs"] = ["operating_costs"]
model_5_spec["income"] = range(1, 5)
model_5_names["income"] = ["income_{}".format(x)
for x in ["gc", "gr", "ec", "er"]]
# Create an instance of the MNL model class
model_5 = pl.create_choice_model(data=long_heating_df,
alt_id_col=heating_alt_id,
obs_id_col=heating_obs_id_col,
choice_col=heating_choice_column,
specification=model_5_spec,
model_type="MNL",
names=model_5_names)
# Estimate the given model, starting from a point of all zeros
# as the initial values.
model_5.fit_mle(np.zeros(10))
# Look at the estimation summaries
model_5.get_statsmodels_summary()
```
#### Compare with mlogit
Look at the output from mlogit:
<pre>
Call:
mlogit(formula = depvar ~ oc + ic | income, data = H, reflevel = "hp",
method = "nr", print.level = 0)
Frequencies of alternatives:
hp ec er gc gr
0.055556 0.071111 0.093333 0.636667 0.143333
nr method
6 iterations, 0h:0m:0s
g'(-H)^-1g = 6.27E-06
successive function values within tolerance limits
Coefficients :
Estimate Std. Error t-value Pr(>|t|)
ec:(intercept) 1.95445797 0.70353833 2.7780 0.0054688 **
er:(intercept) 2.30560852 0.62390478 3.6954 0.0002195 ***
gc:(intercept) 2.05517018 0.48639682 4.2253 2.386e-05 ***
gr:(intercept) 1.14158139 0.51828845 2.2026 0.0276231 *
oc -0.00696000 0.00155383 -4.4792 7.491e-06 ***
ic -0.00153534 0.00062251 -2.4664 0.0136486 *
ec:income -0.06362917 0.11329865 -0.5616 0.5743846
er:income -0.09685787 0.10755423 -0.9005 0.3678281
gc:income -0.07178917 0.08878777 -0.8085 0.4187752
gr:income -0.17981159 0.10012691 -1.7958 0.0725205 .
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Log-Likelihood: -1005.9
McFadden R^2: 0.01598
Likelihood ratio test : chisq = 32.67 (p.value = 1.2134e-05)
</pre>
As before, pyLogit and mlogit agree on all calculated values except for the p-values and McFadden's $R^2$.
| github_jupyter |
# SETUP: Train and optimize a pet detector using Azure ML
This notebook will setup the Azure ML workspace and resources for the pet detector project. The Azure ML resources that we will be using in the pet detector project are:
1. Workspace
1. Experiment
1. Azure Compute Cluster
1. Datastore
1. HyperDrive
1. Azure Container Instance Deployment
Let's begin by defining a set of setup parameters that we'll be using throughout this notebook and in the pet detector project.
## Copying script files to the local directory of this notebook project
There are a set of helper Python modules that we will be using in the demo to make the code easier to read in the main demo script. This section must be run to copy the helper files into the ```scripts``` directory of the machine.
The reason why we are packaging these three scripts as cells in the notebook is to workaround a limitation that we currently have in Azure Notebooks where files that are present in the Project directory aren't reflected in the execution directory of the DSVM that we are using as a compute kernel. This will be fixed in the near future, so this step will no longer be required.
- scripts/config.py
- scripts/retrain.py
- scripts/image_helpers.py
- scripts/oxford_dataset_helpers.py
```
!mkdir scripts
```
The `config.py` module contains the configuration options that are used in this demo. You will need to fill in values below that are blank for your Azure subscription, resource group, and AML Compute Cluster names. You will also need to create an Azure Blob container in the Azure Storage account for the datastore. That container must be called ```default``` which will be used later in the script.
```
%%writefile scripts/config.py
class AMLConfig:
pass
AMLConfig.workspace_name = ''
AMLConfig.experiment_name = ''
AMLConfig.resource_group = ''
AMLConfig.compute_name = ''
AMLConfig.training_script_filename = 'train.py'
AMLConfig.scoring_script_filename = 'score.py'
AMLConfig.subscription_id = ''
AMLConfig.storage_account_name = ''
AMLConfig.storage_account_key = ''
AMLConfig.datastore_name = 'default'
AMLConfig.container_name = 'default'
AMLConfig.images_dir = 'images'
AML = AMLConfig()
```
The ```image_helpers.py``` module contains some helper methods that make it easier to plot images in a grid in a Jupyter notebook.
```
%%writefile scripts/image_helpers.py
from pathlib import Path
from PIL import Image
import matplotlib.pyplot as plt
def get_sample_images_for_each_species(dirname):
d = Path(dirname)
species_dirs = [d for d in d.iterdir() if d.is_dir()]
species_images_and_labels = []
for species_dir in species_dirs:
for image_path in species_dir.iterdir():
image = Image.open(image_path)
image.thumbnail((224, 224), Image.ANTIALIAS)
image_label = species_dir.parts[-1].lower().replace('_', ' ')
species_images_and_labels.append((image, image_label))
break
return species_images_and_labels
def plot_images_in_grid(images_data, number_columns):
f, subplots = plt.subplots(len(images_data) // number_columns + 1, number_columns)
f.set_size_inches(16, 16)
row = 0
col = 0
for record in images_data:
subplot = subplots[row, col]
subplot.imshow(record[0])
subplot.set_axis_off()
subplot.set_title(record[1], color='#008000')
col += 1
if col == number_columns:
row += 1
col = 0
for c in range(col, number_columns):
subplots[row, c].set_axis_off()
```
The following file is a lightly modified version of the original ```retrain.py``` script that is used in many Tensorflow samples. It adds some additional logic to emit validation accuracy training data to the Hyperdrive logs.
```
%%writefile scripts/retrain.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Simple transfer learning with Inception v4 or Mobilenet models.
With support for TensorBoard.
This example shows how to take a Inception v3 or Mobilenet model trained on
ImageNet images, and train a new top layer that can recognize other classes of
images.
The top layer receives as input a 2048-dimensional vector (1001-dimensional for
Mobilenet) for each image. We train a softmax layer on top of this
representation. Assuming the softmax layer contains N labels, this corresponds
to learning N + 2048*N (or 1001*N) model parameters corresponding to the
learned biases and weights.
Here's an example, which assumes you have a folder containing class-named
subfolders, each full of images for each label. The example folder flower_photos
should have a structure like this:
~/flower_photos/daisy/photo1.jpg
~/flower_photos/daisy/photo2.jpg
...
~/flower_photos/rose/anotherphoto77.jpg
...
~/flower_photos/sunflower/somepicture.jpg
The subfolder names are important, since they define what label is applied to
each image, but the filenames themselves don't matter. Once your images are
prepared, you can run the training with a command like this:
```bash
bazel build tensorflow/examples/image_retraining:retrain && \
bazel-bin/tensorflow/examples/image_retraining/retrain \
--image_dir ~/flower_photos
```
Or, if you have a pip installation of tensorflow, `retrain.py` can be run
without bazel:
```bash
python tensorflow/examples/image_retraining/retrain.py \
--image_dir ~/flower_photos
```
You can replace the image_dir argument with any folder containing subfolders of
images. The label for each image is taken from the name of the subfolder it's
in.
This produces a new model file that can be loaded and run by any TensorFlow
program, for example the label_image sample code.
By default this script will use the high accuracy, but comparatively large and
slow Inception v3 model architecture. It's recommended that you start with this
to validate that you have gathered good training data, but if you want to deploy
on resource-limited platforms, you can try the `--architecture` flag with a
Mobilenet model. For example:
```bash
python tensorflow/examples/image_retraining/retrain.py \
--image_dir ~/flower_photos --architecture mobilenet_1.0_224
```
There are 32 different Mobilenet models to choose from, with a variety of file
size and latency options. The first number can be '1.0', '0.75', '0.50', or
'0.25' to control the size, and the second controls the input image size, either
'224', '192', '160', or '128', with smaller sizes running faster. See
https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html
for more information on Mobilenet.
To use with TensorBoard:
By default, this script will log summaries to /tmp/retrain_logs directory
Visualize the summaries with this command:
tensorboard --logdir /tmp/retrain_logs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
from datetime import datetime
import hashlib
import os.path
import random
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
from azureml.core import Run
def hyperdrive_log(parameter_name, value):
if FLAGS.hyperdrive:
Run.get_context().log(parameter_name, value)
FLAGS = None
# These are all parameters that are tied to the particular model architecture
# we're using for Inception v3. These include things like tensor names and their
# sizes. If you want to adapt this script to work with another model, you will
# need to update these to reflect the values in the network you're using.
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
def create_image_lists(image_dir, testing_percentage, validation_percentage):
"""Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
Args:
image_dir: String path to a folder containing subfolders of images.
testing_percentage: Integer percentage of the images to reserve for tests.
validation_percentage: Integer percentage of images reserved for validation.
Returns:
A dictionary containing an entry for each label subfolder, with images split
into training, testing, and validation sets within each label.
"""
if not gfile.Exists(image_dir):
tf.logging.error("Image directory '" + image_dir + "' not found.")
return None
result = collections.OrderedDict()
sub_dirs = [
os.path.join(image_dir,item)
for item in gfile.ListDirectory(image_dir)]
sub_dirs = sorted(item for item in sub_dirs
if gfile.IsDirectory(item))
for sub_dir in sub_dirs:
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
tf.logging.info("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(gfile.Glob(file_glob))
if not file_list:
tf.logging.warning('No files found')
continue
if len(file_list) < 20:
tf.logging.warning(
'WARNING: Folder has less than 20 images, which may cause issues.')
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
tf.logging.warning(
'WARNING: Folder {} has more than {} images. Some images will '
'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put an image in, the data set creator has a way of
# grouping photos that are close variations of each other. For example
# this is used in the plant disease data set to group multiple pictures of
# the same leaf.
hash_name = re.sub(r'_nohash_.*$', '', file_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if percentage_hash < validation_percentage:
validation_images.append(base_name)
elif percentage_hash < (testing_percentage + validation_percentage):
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, label_name, index, image_dir, category):
""""Returns a path to an image for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category, architecture):
""""Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or
validation.
architecture: The name of the model architecture.
Returns:
File system path string to an image that meets the requested parameters.
"""
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '_' + architecture + '.txt'
def create_model_graph(model_info):
""""Creates a graph from saved GraphDef file and returns a Graph object.
Args:
model_info: Dictionary containing information about the model architecture.
Returns:
Graph holding the trained Inception network, and various tensors we'll be
manipulating.
"""
with tf.Graph().as_default() as graph:
model_path = os.path.join(FLAGS.model_dir, model_info['model_file_name'])
with gfile.FastGFile(model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, resized_input_tensor = (tf.import_graph_def(
graph_def,
name='',
return_elements=[
model_info['bottleneck_tensor_name'],
model_info['resized_input_tensor_name'],
]))
return graph, bottleneck_tensor, resized_input_tensor
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: String of raw JPEG data.
image_data_tensor: Input data layer in the graph.
decoded_image_tensor: Output of initial image resizing and preprocessing.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
"""
# First decode the JPEG image, resize it, and rescale the pixel values.
resized_input_values = sess.run(decoded_image_tensor,
{image_data_tensor: image_data})
# Then run it through the recognition network.
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: resized_input_values})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def maybe_download_and_extract(data_url):
"""Download and extract model tar file.
If the pretrained model we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a directory.
Args:
data_url: Web location of the tar file containing the pretrained model.
"""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = data_url.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)
print()
statinfo = os.stat(filepath)
#tf.logging.info('Successfully downloaded', filename, statinfo.st_size,
# 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def ensure_dir_exists(dir_name):
"""Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
bottleneck_path_2_bottleneck_values = {}
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Create a single bottleneck file."""
tf.logging.info('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index,
image_dir, category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
image_data = gfile.FastGFile(image_path, 'rb').read()
try:
bottleneck_values = run_bottleneck_on_image(
sess, image_data, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor)
except Exception as e:
raise RuntimeError('Error during processing file %s (%s)' % (image_path,
str(e)))
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, architecture):
"""Retrieves or calculates bottleneck values for an image.
If a cached version of the bottleneck data exists on-disk, return that,
otherwise calculate the data and save it to disk for future use.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be modulo-ed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of which set to pull images from - training, testing,
or validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: The tensor to feed loaded jpeg data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The output tensor for the bottleneck values.
architecture: The name of the model architecture.
Returns:
Numpy array of values produced by the bottleneck layer for the image.
"""
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index,
bottleneck_dir, category, architecture)
if not os.path.exists(bottleneck_path):
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
did_hit_error = False
try:
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
except ValueError:
tf.logging.warning('Invalid float found, recreating bottleneck')
did_hit_error = True
if did_hit_error:
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
# Allow exceptions to propagate here, since they shouldn't happen after a
# fresh creation
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The penultimate output layer of the graph.
architecture: The name of the model architecture.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(
sess, image_lists, label_name, index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
tf.logging.info(
str(how_many_bottlenecks) + ' bottleneck files created.')
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, architecture):
"""Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: If positive, a random sample of this size will be chosen.
If negative, all bottlenecks will be retrieved.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
architecture: The name of the model architecture.
Returns:
List of bottleneck arrays, their corresponding ground truths, and the
relevant filenames.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
filenames = []
if how_many >= 0:
# Retrieve a random sample of bottlenecks.
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
else:
# Retrieve all bottlenecks.
for label_index, label_name in enumerate(image_lists.keys()):
for image_index, image_name in enumerate(
image_lists[label_name][category]):
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
return bottlenecks, ground_truths, filenames
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor):
"""Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = gfile.FastGFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: distorted_image_data})
bottleneck_values = np.squeeze(bottleneck_values)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck_values)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def should_distort_images(flip_left_right, random_crop, random_scale,
random_brightness):
"""Whether any distortions are enabled, from the input flags.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
Returns:
Boolean value indicating whether any distortions should be applied.
"""
return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
(random_brightness != 0))
def add_input_distortions(flip_left_right, random_crop, random_scale,
random_brightness, input_width, input_height,
input_depth, input_mean, input_std):
"""Creates the operations to apply the specified distortions.
During training it can help to improve the results if we run the images
through simple distortions like crops, scales, and flips. These reflect the
kind of variations we expect in the real world, and so can help train the
model to cope with natural data more effectively. Here we take the supplied
parameters and construct a network of operations to apply them to an image.
Cropping
~~~~~~~~
Cropping is done by placing a bounding box at a random position in the full
image. The cropping parameter controls the size of that box relative to the
input image. If it's zero, then the box is the same size as the input and no
cropping is performed. If the value is 50%, then the crop box will be half the
width and height of the input. In a diagram it looks like this:
< width >
+---------------------+
| |
| width - crop% |
| < > |
| +------+ |
| | | |
| | | |
| | | |
| +------+ |
| |
| |
+---------------------+
Scaling
~~~~~~~
Scaling is a lot like cropping, except that the bounding box is always
centered and its size varies randomly within the given range. For example if
the scale percentage is zero, then the bounding box is the same size as the
input and no scaling is applied. If it's 50%, then the bounding box will be in
a random range between half the width and height and full size.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
graph.
input_width: Horizontal size of expected input image to model.
input_height: Vertical size of expected input image to model.
input_depth: How many channels the expected input image should have.
input_mean: Pixel value that should be zero in the image for the graph.
input_std: How much to divide the pixel values by before recognition.
Returns:
The jpeg input layer and the distorted result tensor.
"""
jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
margin_scale = 1.0 + (random_crop / 100.0)
resize_scale = 1.0 + (random_scale / 100.0)
margin_scale_value = tf.constant(margin_scale)
resize_scale_value = tf.random_uniform(tensor_shape.scalar(),
minval=1.0,
maxval=resize_scale)
scale_value = tf.multiply(margin_scale_value, resize_scale_value)
precrop_width = tf.multiply(scale_value, input_width)
precrop_height = tf.multiply(scale_value, input_height)
precrop_shape = tf.stack([precrop_height, precrop_width])
precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
precropped_image = tf.image.resize_bilinear(decoded_image_4d,
precrop_shape_as_int)
precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])
cropped_image = tf.random_crop(precropped_image_3d,
[input_height, input_width, input_depth])
if flip_left_right:
flipped_image = tf.image.random_flip_left_right(cropped_image)
else:
flipped_image = cropped_image
brightness_min = 1.0 - (random_brightness / 100.0)
brightness_max = 1.0 + (random_brightness / 100.0)
brightness_value = tf.random_uniform(tensor_shape.scalar(),
minval=brightness_min,
maxval=brightness_max)
brightened_image = tf.multiply(flipped_image, brightness_value)
offset_image = tf.subtract(brightened_image, input_mean)
mul_image = tf.multiply(offset_image, 1.0 / input_std)
distort_result = tf.expand_dims(mul_image, 0, name='DistortResult')
return jpeg_data, distort_result
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor,
bottleneck_tensor_size):
"""Adds a new softmax and fully-connected layer for training.
We need to retrain the top layer to identify our new classes, so this function
adds the right operations to the graph, along with some variables to hold the
weights, and then sets up all the gradients for the backward pass.
The set up for the softmax and fully-connected layers is based on:
https://www.tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
Args:
class_count: Integer of how many categories of things we're trying to
recognize.
final_tensor_name: Name string for the new final node that produces results.
bottleneck_tensor: The output of the main CNN graph.
bottleneck_tensor_size: How many entries in the bottleneck vector.
Returns:
The tensors for the training and cross entropy results, and tensors for the
bottleneck input and ground truth input.
"""
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor,
shape=[None, bottleneck_tensor_size],
name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(tf.float32,
[None, class_count],
name='GroundTruthInput')
# Organizing the following ops as `final_training_ops` so they're easier
# to see in TensorBoard
layer_name = 'final_training_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
initial_value = tf.truncated_normal(
[bottleneck_tensor_size, class_count], stddev=0.001)
layer_weights = tf.Variable(initial_value, name='final_weights')
variable_summaries(layer_weights)
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases)
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.summary.histogram('pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
tf.summary.histogram('activations', final_tensor)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=ground_truth_input, logits=logits)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'):
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
train_step = optimizer.minimize(cross_entropy_mean)
return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Tuple of (evaluation step, prediction).
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(
prediction, tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
def save_graph_to_file(sess, graph, graph_file_name):
output_graph_def = graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
with gfile.FastGFile(graph_file_name, 'wb') as f:
f.write(output_graph_def.SerializeToString())
return
def prepare_file_system():
# Setup the directory we'll write summaries to for TensorBoard
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
if FLAGS.intermediate_store_frequency > 0:
ensure_dir_exists(FLAGS.intermediate_output_graphs_dir)
return
def create_model_info(architecture):
"""Given the name of a model architecture, returns information about it.
There are different base image recognition pretrained models that can be
retrained using transfer learning, and this function translates from the name
of a model to the attributes that are needed to download and train with it.
Args:
architecture: Name of a model architecture.
Returns:
Dictionary of information about the model, or None if the name isn't
recognized
Raises:
ValueError: If architecture name is unknown.
"""
architecture = architecture.lower()
if architecture == 'inception_v3':
# pylint: disable=line-too-long
data_url = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
bottleneck_tensor_name = 'pool_3/_reshape:0'
bottleneck_tensor_size = 2048
input_width = 299
input_height = 299
input_depth = 3
resized_input_tensor_name = 'Mul:0'
model_file_name = 'classify_image_graph_def.pb'
input_mean = 128
input_std = 128
elif architecture.startswith('mobilenet_'):
parts = architecture.split('_')
if len(parts) != 3 and len(parts) != 4:
tf.logging.error("Couldn't understand architecture name '%s'",
architecture)
return None
version_string = parts[1]
if (version_string != '1.0' and version_string != '0.75' and
version_string != '0.50' and version_string != '0.25'):
tf.logging.error(
""""The Mobilenet version should be '1.0', '0.75', '0.50', or '0.25',
but found '%s' for architecture '%s'""",
version_string, architecture)
return None
size_string = parts[2]
if (size_string != '224' and size_string != '192' and
size_string != '160' and size_string != '128'):
tf.logging.error(
"""The Mobilenet input size should be '224', '192', '160', or '128',
but found '%s' for architecture '%s'""",
size_string, architecture)
return None
if len(parts) == 3:
is_quantized = False
else:
if parts[3] != 'quantized':
tf.logging.error(
"Couldn't understand architecture suffix '%s' for '%s'", parts[3],
architecture)
return None
is_quantized = True
data_url = 'http://download.tensorflow.org/models/mobilenet_v1_'
data_url += version_string + '_' + size_string + '_frozen.tgz'
bottleneck_tensor_name = 'MobilenetV1/Predictions/Reshape:0'
bottleneck_tensor_size = 1001
input_width = int(size_string)
input_height = int(size_string)
input_depth = 3
resized_input_tensor_name = 'input:0'
if is_quantized:
model_base_name = 'quantized_graph.pb'
else:
model_base_name = 'frozen_graph.pb'
model_dir_name = 'mobilenet_v1_' + version_string + '_' + size_string
model_file_name = os.path.join(model_dir_name, model_base_name)
input_mean = 127.5
input_std = 127.5
else:
tf.logging.error("Couldn't understand architecture name '%s'", architecture)
raise ValueError('Unknown architecture', architecture)
return {
'data_url': data_url,
'bottleneck_tensor_name': bottleneck_tensor_name,
'bottleneck_tensor_size': bottleneck_tensor_size,
'input_width': input_width,
'input_height': input_height,
'input_depth': input_depth,
'resized_input_tensor_name': resized_input_tensor_name,
'model_file_name': model_file_name,
'input_mean': input_mean,
'input_std': input_std,
}
def add_jpeg_decoding(input_width, input_height, input_depth, input_mean,
input_std):
"""Adds operations that perform JPEG decoding and resizing to the graph..
Args:
input_width: Desired width of the image fed into the recognizer graph.
input_height: Desired width of the image fed into the recognizer graph.
input_depth: Desired channels of the image fed into the recognizer graph.
input_mean: Pixel value that should be zero in the image for the graph.
input_std: How much to divide the pixel values by before recognition.
Returns:
Tensors for the node to feed JPEG data into, and the output of the
preprocessing steps.
"""
jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
resize_shape = tf.stack([input_height, input_width])
resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)
resized_image = tf.image.resize_bilinear(decoded_image_4d,
resize_shape_as_int)
offset_image = tf.subtract(resized_image, input_mean)
mul_image = tf.multiply(offset_image, 1.0 / input_std)
return jpeg_data, mul_image
def run(flags):
global FLAGS
FLAGS = flags
main(None)
def train(architecture, image_dir, output_dir, bottleneck_dir, model_dir, learning_rate, training_steps, use_hyperdrive):
class Flags:
pass
flags = Flags()
Flags.image_dir = image_dir
Flags.output_graph = os.path.join(output_dir, 'output_graph.pb')
Flags.intermediate_output_graphs = os.path.join(output_dir, 'intermediate_graph/')
Flags.intermediate_store_frequency = 0
Flags.output_labels = os.path.join(output_dir, 'output_labels.txt')
Flags.summaries_dir = os.path.join(output_dir, 'retrain_logs')
Flags.how_many_training_steps = training_steps
Flags.learning_rate = learning_rate
Flags.testing_percentage = 10
Flags.validation_percentage = 10
Flags.eval_step_interval = 10
Flags.train_batch_size = 100
Flags.test_batch_size = -1
Flags.validation_batch_size = 100
Flags.print_misclassified_test_images = False
Flags.model_dir = model_dir
Flags.bottleneck_dir = bottleneck_dir
Flags.final_tensor_name = 'final_result'
Flags.flip_left_right = False
Flags.random_crop = False
Flags.random_scale = 0
Flags.random_brightness = 0
Flags.architecture = architecture
Flags.hyperdrive = use_hyperdrive
run(flags)
def main(_):
# Needed to make sure the logging output is visible.
# See https://github.com/tensorflow/tensorflow/issues/3047
tf.logging.set_verbosity(tf.logging.INFO)
# Prepare necessary directories that can be used during training
prepare_file_system()
# Gather information about the model architecture we'll be using.
model_info = create_model_info(FLAGS.architecture)
if not model_info:
tf.logging.error('Did not recognize architecture flag')
return -1
# Set up the pre-trained graph.
maybe_download_and_extract(model_info['data_url'])
graph, bottleneck_tensor, resized_image_tensor = (
create_model_graph(model_info))
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,
FLAGS.validation_percentage)
class_count = len(image_lists.keys())
if class_count == 0:
tf.logging.error('No valid folders of images found at ' + FLAGS.image_dir)
return -1
if class_count == 1:
tf.logging.error('Only one valid folder of images found at ' +
FLAGS.image_dir +
' - multiple classes are needed for classification.')
return -1
# See if the command-line flags mean we're applying any distortions.
do_distort_images = should_distort_images(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
with tf.Session(graph=graph) as sess:
# Set up the image decoding sub-graph.
jpeg_data_tensor, decoded_image_tensor = add_jpeg_decoding(
model_info['input_width'], model_info['input_height'],
model_info['input_depth'], model_info['input_mean'],
model_info['input_std'])
if do_distort_images:
# We will be applying distortions, so setup the operations we'll need.
(distorted_jpeg_data_tensor,
distorted_image_tensor) = add_input_distortions(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness, model_info['input_width'],
model_info['input_height'], model_info['input_depth'],
model_info['input_mean'], model_info['input_std'])
else:
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, FLAGS.image_dir,
FLAGS.bottleneck_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor,
bottleneck_tensor, FLAGS.architecture)
# Add the new layer that we'll be training.
(train_step, cross_entropy, bottleneck_input, ground_truth_input,
final_tensor) = add_final_training_ops(
len(image_lists.keys()), FLAGS.final_tensor_name, bottleneck_tensor,
model_info['bottleneck_tensor_size'])
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step, prediction = add_evaluation_step(
final_tensor, ground_truth_input)
# Merge all the summaries and write them out to the summaries_dir
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(
FLAGS.summaries_dir + '/validation')
# Set up all our weights to their initial default values.
init = tf.global_variables_initializer()
sess.run(init)
# Run the training for as many cycles as requested on the command line.
for i in range(FLAGS.how_many_training_steps):
# Get a batch of input bottleneck values, either calculated fresh every
# time with distortions applied, or from the cache stored on disk.
if do_distort_images:
(train_bottlenecks,
train_ground_truth) = get_random_distorted_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.image_dir, distorted_jpeg_data_tensor,
distorted_image_tensor, resized_image_tensor, bottleneck_tensor)
else:
(train_bottlenecks,
train_ground_truth, _) = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.architecture)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run(
[merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == FLAGS.how_many_training_steps)
if (i % FLAGS.eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
tf.logging.info('%s: Step %d: Train accuracy = %.1f%%' %
(datetime.now(), i, train_accuracy * 100))
tf.logging.info('%s: Step %d: Cross entropy = %f' %
(datetime.now(), i, cross_entropy_value))
validation_bottlenecks, validation_ground_truth, _ = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.validation_batch_size, 'validation',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.architecture))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
tf.logging.info('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %
(datetime.now(), i, validation_accuracy * 100,
len(validation_bottlenecks)))
hyperdrive_log('validation_accuracy', validation_accuracy)
# Store intermediate results
intermediate_frequency = FLAGS.intermediate_store_frequency
if (intermediate_frequency > 0 and (i % intermediate_frequency == 0)
and i > 0):
intermediate_file_name = (FLAGS.intermediate_output_graphs_dir +
'intermediate_' + str(i) + '.pb')
tf.logging.info('Save intermediate result to : ' +
intermediate_file_name)
save_graph_to_file(sess, graph, intermediate_file_name)
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
test_bottlenecks, test_ground_truth, test_filenames = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.test_batch_size, 'testing',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.architecture))
test_accuracy, predictions = sess.run(
[evaluation_step, prediction],
feed_dict={bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth})
tf.logging.info('Final test accuracy = %.1f%% (N=%d)' %
(test_accuracy * 100, len(test_bottlenecks)))
hyperdrive_log('final_accuracy', validation_accuracy)
if FLAGS.print_misclassified_test_images:
tf.logging.info('=== MISCLASSIFIED TEST IMAGES ===')
for i, test_filename in enumerate(test_filenames):
if predictions[i] != test_ground_truth[i].argmax():
tf.logging.info('%70s %s' %
(test_filename,
list(image_lists.keys())[predictions[i]]))
# Write out the trained graph and labels with the weights stored as
# constants.
save_graph_to_file(sess, graph, FLAGS.output_graph)
with gfile.FastGFile(FLAGS.output_labels, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_dir',
type=str,
default='',
help='Path to folders of labeled images.'
)
parser.add_argument(
'--output_graph',
type=str,
default='/tmp/output_graph.pb',
help='Where to save the trained graph.'
)
parser.add_argument(
'--intermediate_output_graphs_dir',
type=str,
default='/tmp/intermediate_graph/',
help='Where to save the intermediate graphs.'
)
parser.add_argument(
'--intermediate_store_frequency',
type=int,
default=0,
help="""\
How many steps to store intermediate graph. If "0" then will not
store.\
"""
)
parser.add_argument(
'--output_labels',
type=str,
default='/tmp/output_labels.txt',
help='Where to save the trained graph\'s labels.'
)
parser.add_argument(
'--summaries_dir',
type=str,
default='/tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.'
)
parser.add_argument(
'--how_many_training_steps',
type=int,
default=4000,
help='How many training steps to run before ending.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='How large a learning rate to use when training.'
)
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of images to use as a test set.'
)
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of images to use as a validation set.'
)
parser.add_argument(
'--eval_step_interval',
type=int,
default=10,
help='How often to evaluate the training results.'
)
parser.add_argument(
'--train_batch_size',
type=int,
default=100,
help='How many images to train on at a time.'
)
parser.add_argument(
'--test_batch_size',
type=int,
default=-1,
help="""\
How many images to test on. This test set is only used once, to evaluate
the final accuracy of the model after training completes.
A value of -1 causes the entire test set to be used, which leads to more
stable results across runs.\
"""
)
parser.add_argument(
'--validation_batch_size',
type=int,
default=100,
help="""\
How many images to use in an evaluation batch. This validation set is
used much more often than the test set, and is an early indicator of how
accurate the model is during training.
A value of -1 causes the entire validation set to be used, which leads to
more stable results across training iterations, but may be slower on large
training sets.\
"""
)
parser.add_argument(
'--print_misclassified_test_images',
default=False,
help="""\
Whether to print out a list of all misclassified test images.\
""",
action='store_true'
)
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/imagenet',
help="""\
Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.\
"""
)
parser.add_argument(
'--bottleneck_dir',
type=str,
default='/tmp/bottleneck',
help='Path to cache bottleneck layer values as files.'
)
parser.add_argument(
'--final_tensor_name',
type=str,
default='final_result',
help="""\
The name of the output classification layer in the retrained graph.\
"""
)
parser.add_argument(
'--flip_left_right',
default=False,
help="""\
Whether to randomly flip half of the training images horizontally.\
""",
action='store_true'
)
parser.add_argument(
'--random_crop',
type=int,
default=0,
help="""\
A percentage determining how much of a margin to randomly crop off the
training images.\
"""
)
parser.add_argument(
'--random_scale',
type=int,
default=0,
help="""\
A percentage determining how much to randomly scale up the size of the
training images by.\
"""
)
parser.add_argument(
'--random_brightness',
type=int,
default=0,
help="""\
A percentage determining how much to randomly multiply the training image
input pixels up or down by.\
"""
)
parser.add_argument(
'--architecture',
type=str,
default='inception_v3',
help="""\
Which model architecture to use. 'inception_v3' is the most accurate, but
also the slowest. For faster or smaller models, chose a MobileNet with the
form 'mobilenet_<parameter size>_<input_size>[_quantized]'. For example,
'mobilenet_1.0_224' will pick a model that is 17 MB in size and takes 224
pixel input images, while 'mobilenet_0.25_128_quantized' will choose a much
less accurate, but smaller and faster network that's 920 KB on disk and
takes 128x128 images. See https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html
for more information on Mobilenet.\
""")
parser.add_argument(
'--hyperdrive',
type=bool,
default=False,
help="""\
Whether to log validation accuracy to HyperDrive logs.
"""
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
%%writefile scripts/oxford_dataset_helpers.py
import shutil, tarfile, os, shutil, re
from os.path import basename, isfile
from urllib.parse import urlparse
from urllib.request import urlopen
from pathlib import Path
# Fetch a file from uri, unzip and untar it into its own directory.
def fetch_and_untar(uri):
# Parse the uri to extract the local filename
parsed_uri = urlparse(uri)
local_filename = basename(parsed_uri.path)
# If file is not already on disk, retrieve from uri
if not isfile(local_filename):
with urlopen(uri) as response:
with open(local_filename, 'bw+') as f:
shutil.copyfileobj(response, f)
# Expand the archive
with tarfile.open(local_filename) as tar:
tar.extractall()
# The script below will rearrange the files so that all of the photos for a specific breed of dog
# or cat will be stored in its own directory, where the name of the directory is the name of the
# pet's breed.
def move_images_into_labelled_directories(image_dir):
images_path = Path(image_dir)
extract_breed_from_filename = re.compile(r'([^/]+)_\d+.jpg$')
for filename in os.listdir('images'):
match = extract_breed_from_filename.match(filename)
if match is not None:
breed = match.group(1)
if not os.path.exists(images_path / breed):
os.makedirs(images_path / breed)
src_path = images_path / filename
dest_path = images_path / breed / filename
shutil.move(src_path, dest_path)
```
## Initialize the Azure ML Resources required by this demo
In this section of the notebook, we will initialize all of the Azure ML resources that are required by the demo:
- Create a Workspace defined by the ```AML.workspace_name``` config
- Create an Experiment defined by the ```AML.experiment_name``` config
Create the Azure ML Workspace using the name from our configuration
```
from scripts.config import AML
from azureml.core import Workspace
ws = Workspace.create(name=AML.workspace_name,
subscription_id=AML.subscription_id,
resource_group=AML.resource_group,
exist_ok=True)
ws.name
```
Create the Azure ML Experiment using the name from our configuration
```
from azureml.core import Experiment
experiment = Experiment(ws, AML.experiment_name)
experiment
```
## Download and transform the data in the Oxford IIIT Pets dataset
The [Oxford IIIT Pets](http://www.robots.ox.ac.uk/~vgg/data/pets/) dataset contains photos of 37 breeds of dogs and cats, with approximately 200 photos of each one. The dataset is ~800MB in size. All of the photos are stored in a single directory, with the breed of the dog or cat stored in the filename of the photo.
There are two files, one that contains the images, and another that contains the annotations of the image. This will create two local directories, ```images``` and ```annotations```, that contain the contents of the unzipped and untarred files.
The code in this section only needs to be run **once** for local training in a compute kernel.
```
from scripts.oxford_dataset_helpers import fetch_and_untar, move_images_into_labelled_directories
fetch_and_untar('http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz')
move_images_into_labelled_directories(AML.images_dir)
```
## Using transfer learning, train a mobilenet model against the training dataset
Perform the first training run. This writes out the intermediate [bottleneck]() files to the ```models/bottleneck``` directory where they will be saved to the DataStore that we will be using later to optimize the model using Hyperdrive.
```
%%time
import scripts.retrain as rt
rt.train(architecture='mobilenet_0.50_224',
image_dir='images',
output_dir='outputs',
bottleneck_dir='bottleneck',
model_dir='model',
learning_rate=0.00008,
training_steps=500,
use_hyperdrive=False)
```
## Create an Azure ML Datastore
We will set the previously created Azure Blob container as the default Datastore for this workspace.
```
from azureml.core import Datastore
datastore = Datastore.register_azure_blob_container(workspace=ws,
datastore_name=AML.datastore_name,
container_name=AML.container_name,
account_name=AML.storage_account_name,
account_key=AML.storage_account_key)
datastore.upload(src_dir='./images', target_path='images', overwrite=True, show_progress=True)
datastore.upload(src_dir='./bottleneck', target_path='bottleneck', overwrite=True, show_progress=True)
datastore.upload(src_dir='./model', target_path='model', overwrite=True, show_progress=True)
ws.set_default_datastore(name=AML.datastore_name)
```
## AML Compute cluster creation
The easiest way to do this right now is just create it in the portal. The name should be ```nc6cluster``` per the configuration files. Would be great to write a script here that does the same thing as what we do in the portal using the AML Compute provisioning APIs.
```
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
try:
compute_target = ComputeTarget(workspace=ws, name=AMLConfig.compute_name)
print('Found existing compute target.')
except ComputeTargetException:
print('Creating a new compute target...')
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',
max_nodes=4)
# create the cluster
compute_target = ComputeTarget.create(ws, AMLConfig.compute_name, compute_config)
compute_target.wait_for_completion(show_output=True)
# Use the 'status' property to get a detailed status for the current cluster.
print(compute_target.status.serialize())
```
## Setup complete
At this point, everything for a one-time setup of this workspace in your subscription should be complete. **If you switch containers, however, you may need to re-run the part of the script that writes ```retrain.py``` out to disk.**
| github_jupyter |
# 12- InputOutput
```
from scipy import *
```
## File handling
### Interacting with files
```
myfile = open('measurement.dat','r')
print(myfile.read())
myfile.close() # closes the file object
with open('measurement.dat','r') as myfile:
print(myfile.read())
name='someotherfile.dat'
myfile = open(name,'w')
myfile.write('some data')
a = 1/0
myfile.write('other data')
myfile.close()
myfile.closed
with open(name,'w') as myfile:
myfile.write('some data')
a = 1/0
myfile.write('other data')
myfile.closed
```
### Files are iterable
```
with open('tem.dat','r') as myfile:
for line in myfile:
data = line.split(';')
print('time {} sec temperature {} C'.format(data[0],data[1]))
with open('tem.dat','r') as myfile:
data=list(myfile)
print(data)
```
## NumPy methods
```
x = range(100) # 100 integers
savetxt('test.txt',x,delimiter=',') # use comma instead of space
savetxt('test.txt',x,fmt='%d') # integer format instead of float with e
filename = 'test.txt'
data = loadtxt(filename)
data
```
## Pickling
```
import pickle
with open('file.dat','wb') as myfile:
a = random.rand(20,20)
b = 'hello world'
pickle.dump(a,myfile) # first call: first object
pickle.dump(b,myfile) # second call: second object
with open('file.dat','rb') as myfile:
numbers = pickle.load(myfile) # restores the array
text = pickle.load(myfile) # restores the string
print(numbers[-1,1], text)
a = [1,2,3,4]
pickle.dumps(a) # returns '(lp0\nI1\naI2\naI3\naI4\na.'
b = {'a':1,'b':2}
pickle.dumps(b) # returns "(dp0\nS'a'\np1\nI1\nsS'b'\np2\nI2\ns."
```
## Shelves
```
from contextlib import closing
import shelve as sv
# opens a data file (creates it before if necessary)
with closing(sv.open('datafile')) as data:
A = array([[1,2,3],[4,5,6]])
data['my_matrix'] = A
from contextlib import closing
import shelve as sv
with closing(sv.open('datafile')) as data: # opens a data file
A = data['my_matrix']
A
```
## Reading and writing Matlab data files
```
import scipy.io
data = scipy.io.loadmat('datafile.mat')
print(data)
data ={}
data['x'] = x = array([1,2,3])
data['y'] = 2*x
scipy.io.savemat('datafile.mat',data)
```
## Reading and writing images
```
import scipy.misc as sm
# read image to array
im = sm.imread("test.jpg")
print(im.shape) # (128, 128, 3)
print(im.dtype) # uint8
# resize image
im_small = sm.imresize(im, (64,64))
print(im_small.shape) # (64, 64, 3)
# write result to new image file
sm.imsave("test_small.jpg", im_small)
import scipy.misc
```
| github_jupyter |
<a href="https://colab.research.google.com/github/arashash/deep_exercises/blob/main/Ch2_Linear-Algebra/Ch2_Exam2_extra.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import matplotlib.pyplot as plt
import ipywidgets as widgets
import numpy as np
import torch
```
# Chapter 2 - Linear Algebra
## 2.7 Eigendecomposition
### Q1 [30 points, S]
Using [Gershgorin Circle Theorem](https://en.wikipedia.org/wiki/Gershgorin_circle_theorem), give the bounds of the eigenvalues of matrix $A$,
$$
\begin{split}\mathbf{A} = \begin{bmatrix}
1.0 & 0.1 & 0.1 & 0.1 \\
0.1 & 3.0 & 0.2 & 0.3 \\
0.1 & 0.2 & 5.0 & 0.5 \\
0.1 & 0.3 & 0.5 & 9.0
\end{bmatrix}\end{split}
$$
Also explain, for any matrix $A$, under what condition the bounds are tight and can give good approximations for the eigenvalues?
## Q2 [20 M, 20 H]
Consider a deep (large N) feedforward network without activations consisting of same square matrices with dimenion $k\times k$,
$$
\mathbf{v}_{out} = \mathbf{A}\cdot \mathbf{A}\cdots \mathbf{A} \mathbf{v}_{in} = \mathbf{A}^N \mathbf{v}_{in}
$$
Using the widget below, determine how does the dimension of matrix and standard deviation of the random Gaussian initialization changes whether the output blows up?
```
#@markdown Execute this cell to simulate the question for different dimensions
@widgets.interact
def plot_iterated_mapping(k=(0, 100, 1), std=(0, 1, 0.01)):
A = std*torch.randn(k, k, dtype=torch.float64)
v_in = torch.randn(k, 1, dtype=torch.float64)
# Calculate the sequence of norms after repeatedly applying `A`
norm_list = [torch.norm(v_in).item()]
for i in range(1, 100):
v_in = A @ v_in
norm_list.append(torch.norm(v_in).item())
plt.ylabel('Value')
plt.xlabel('Iteration')
plt.plot(torch.arange(0, 100), norm_list)
```
Now using the same setup, write a function that estimates the average largest eigenvalue of A given the STD of the initialization and dimenion $k$. Then using your function and fixing STD, try many $k$ values and plot the estimated average largest eigenvalue vs $\sqrt{k}$. Then using the jupyter interactive widget, plot it for different STD values. What relation do you observe between these three variables?
## 2.8 Singular Value Decomposition
## Q3 [20 M, 10 H]
Programmatically dowload [this](https://upload.wikimedia.org/wikipedia/en/7/7d/Lenna_%28test_image%29.png) picture and convert it to $512\times 512$ grayscale and uint8 datatype and load it to a 2-D *Numpy* array and properly display it using *plt.imshow*. Then calculate the SVD of image and choose the top k singular values and set the rest equal to zero and display the image using the interactive Jupyter widget for different possible values of k. At which value of k do you start to see significant drop in image quality? How many Bytes does it take to store the original array? How many Bytes does it take to store the decomposed arrays when using only the top k singular values? Calculate the compression ratio for your selected ideal $k$ value. Save the array as JPEG and check the file size. whats the approximate JPEG compression rate?
| github_jupyter |
# An RNN model for temperature data
This time we will be working with real data: daily (Tmin, Tmax) temperature series from 36 weather stations spanning 50 years. It is to be noted that a pretty good predictor model already exists for temperatures: the average of temperatures on the same day of the year in N previous years. It is not clear if RNNs can do better but we will see how far they can go.
<div class="alert alert-block alert-info">
Things to do:<br/>
<ol start="0">
<li>Run the notebook as it is. Look at the data visualisations. Then look at the predictions at the end. Not very good...
<li>Fist play with the data to find good values for RESAMPLE_BY and SEQLEN in hyperparameters ([Assignment #1](#assignment1)).
<li>Now implement the RNN model in the model function ([Assignment #2](#assignment2)).
<li>Temperatures are noisy, let's try something new: predicting N data points ahead instead of only 1 ahead ([Assignment #3](#assignment3)).
<li>Now we will adjust more traditional hyperparameters and add regularisations. ([Assignment #4](#assignment4))
<li>
Look at the save-restore code. The model is saved at the end of the [training loop](#train) and restored when running [validation](#valid). Also see how the restored model is used for [inference](#inference).
<br/><br/>
You are ready to run in the cloud on all 1666 weather stations. Use [this bash notebook](../run-on-cloud-ml-engine.ipynb) to convert your code to a regular Python file and invoke the Google Cloud ML Engine command line.
When the training is finished on ML Engine, change one line in [validation](#valid) to load the SAVEDMODEL from its cloud bucket and display.
</div>
```
import math
import sys
import time
import numpy as np
sys.path.insert(0, '../temperatures/utils/') #so python can find the utils_ modules
import utils_batching
import utils_args
import tensorflow as tf
from tensorflow.python.lib.io import file_io as gfile
print("Tensorflow version: " + tf.__version__)
from matplotlib import pyplot as plt
import utils_prettystyle
import utils_display
```
## Download Data
```
%bash
DOWNLOAD_DIR=../temperatures/data
mkdir $DOWNLOAD_DIR
gsutil -m cp gs://cloud-training-demos/courses/machine_learning/deepdive/09_sequence/temperatures/* $DOWNLOAD_DIR
```
<a name="hyperparameters"></a>
<a name="assignment1"></a>
## Hyperparameters
<div class="alert alert-block alert-info">
***Assignment #1*** Temperatures have a periodicity of 365 days. We would need to unroll the RNN over 365 steps (=SEQLEN) to capture that. That is way too much. We will have to work with averages over a handful of days instead of daily temperatures. Bump the unrolling length to SEQLEN=128 and then try averaging over 3 to 5 days (RESAMPLE_BY=3, 4, 5). Look at the data visualisations in [Resampling](#resampling) and [Training sequences](#trainseq). The training sequences should capture a recognizable part of the yearly oscillation.
***In the end, use these values: SEQLEN=128, RESAMPLE_BY=5.***
</div>
```
NB_EPOCHS = 5 # number of times the model sees all the data during training
N_FORWARD = 8 # train the network to predict N in advance (traditionnally 1)
RESAMPLE_BY = 1 # averaging period in days (training on daily data is too much)
RNN_CELLSIZE = 80 # size of the RNN cells
N_LAYERS = 1 # number of stacked RNN cells (needed for tensor shapes but code must be changed manually)
SEQLEN = 128 # unrolled sequence length
BATCHSIZE = 64 # mini-batch size
DROPOUT_PKEEP = 1.0 # dropout: probability of neurons being kept (NOT dropped). Should be between 0.5 and 1.
ACTIVATION = tf.nn.tanh # Activation function for GRU cells (tf.nn.relu or tf.nn.tanh)
JOB_DIR = "temperature_checkpoints"
DATA_DIR = "../temperatures/data"
# potentially override some settings from command-line arguments
if __name__ == '__main__':
JOB_DIR, DATA_DIR = utils_args.read_args1(JOB_DIR, DATA_DIR)
ALL_FILEPATTERN = DATA_DIR + "/*.csv" # pattern matches all 1666 files
EVAL_FILEPATTERN = DATA_DIR + "/USC000*2.csv" # pattern matches 8 files
# pattern USW*.csv -> 298 files, pattern USW*0.csv -> 28 files
print('Reading data from "{}".\nWrinting checkpoints to "{}".'.format(DATA_DIR, JOB_DIR))
```
## Temperature data
This is what our temperature datasets looks like: sequences of daily (Tmin, Tmax) from 1960 to 2010. They have been cleaned up and eventual missing values have been filled by interpolation. Interpolated regions of the dataset are marked in red on the graph.
```
all_filenames = gfile.get_matching_files(ALL_FILEPATTERN)
eval_filenames = gfile.get_matching_files(EVAL_FILEPATTERN)
train_filenames = list(set(all_filenames) - set(eval_filenames))
# By default, this utility function loads all the files and places data
# from them as-is in an array, one file per line. Later, we will use it
# to shape the dataset as needed for training.
ite = utils_batching.rnn_multistation_sampling_temperature_sequencer(eval_filenames)
evtemps, _, evdates, _, _ = next(ite) # gets everything
print('Pattern "{}" matches {} files'.format(ALL_FILEPATTERN, len(all_filenames)))
print('Pattern "{}" matches {} files'.format(EVAL_FILEPATTERN, len(eval_filenames)))
print("Evaluation files: {}".format(len(eval_filenames)))
print("Training files: {}".format(len(train_filenames)))
print("Initial shape of the evaluation dataset: " + str(evtemps.shape))
print("{} files, {} data points per file, {} values per data point"
" (Tmin, Tmax, is_interpolated) ".format(evtemps.shape[0], evtemps.shape[1],evtemps.shape[2]))
# You can adjust the visualisation range and dataset here.
# Interpolated regions of the dataset are marked in red.
WEATHER_STATION = 0 # 0 to 7 in default eval dataset
START_DATE = 0 # 0 = Jan 2nd 1950
END_DATE = 18262 # 18262 = Dec 31st 2009
visu_temperatures = evtemps[WEATHER_STATION,START_DATE:END_DATE]
visu_dates = evdates[START_DATE:END_DATE]
utils_display.picture_this_4(visu_temperatures, visu_dates)
```
<a name="resampling"></a>
## Resampling
Our RNN would need to be unrolled across 365 steps to capture the yearly temperature cycles. That's a bit too much. We will resample the temparatures and work with 5-day averages for example. This is what resampled (Tmin, Tmax) temperatures look like.
```
# This time we ask the utility function to average temperatures over 5-day periods (RESAMPLE_BY=5)
ite = utils_batching.rnn_multistation_sampling_temperature_sequencer(eval_filenames, RESAMPLE_BY, tminmax=True)
evaltemps, _, evaldates, _, _ = next(ite)
# display five years worth of data
WEATHER_STATION = 0 # 0 to 7 in default eval dataset
START_DATE = 0 # 0 = Jan 2nd 1950
END_DATE = 365*5//RESAMPLE_BY # 5 years
visu_temperatures = evaltemps[WEATHER_STATION, START_DATE:END_DATE]
visu_dates = evaldates[START_DATE:END_DATE]
plt.fill_between(visu_dates, visu_temperatures[:,0], visu_temperatures[:,1])
plt.show()
```
<a name="trainseq"></a>
## Visualize training sequences
This is what the neural network will see during training.
```
# The function rnn_multistation_sampling_temperature_sequencer puts one weather station per line in
# a batch and continues with data from the same station in corresponding lines in the next batch.
# Features and labels are returned with shapes [BATCHSIZE, SEQLEN, 2]. The last dimension of size 2
# contains (Tmin, Tmax).
ite = utils_batching.rnn_multistation_sampling_temperature_sequencer(eval_filenames,
RESAMPLE_BY,
BATCHSIZE,
SEQLEN,
N_FORWARD,
nb_epochs=1,
tminmax=True)
# load 6 training sequences (each one contains data for all weather stations)
visu_data = [next(ite) for _ in range(6)]
# Check that consecutive training sequences from the same weather station are indeed consecutive
WEATHER_STATION = 4
utils_display.picture_this_5(visu_data, WEATHER_STATION)
```
<a name="assignment2"></a><a name="assignment3"></a>
## The model definition
<div class="alert alert-block alert-info">
***Assignement #2*** Implement the RNN model. You can copy-paste it from the previous exercise but you will have to make one modification: we are now predicting vectors of 2 values (Tmin, Tmax) instead of single values. Train then evaluate to see if you are getting better results.
</div>
<div class="alert alert-block alert-info">
***Assignement #3*** Temperatures are noisy. If we ask the model to predict the naxt data point, noise might drown the trend and the model will not train. The trend should be clearer if we ask the moder to look further ahead. You can use the [hyperparameter](#hyperparameters) N_FORWARD to shift the target sequences by more than 1. Try values between 4 and 16 and see how [training sequences](#trainseq) look.<br/>
<br/>
If the model predicts N_FORWARD in advance, you will also need it to output N_FORWARD predicted values instead of 1. Please check that the output of your model is indeed `Yout = Yr[:,-N_FORWARD:,:]`. The inference part has already been adjusted to generate the sequence by blocks of N_FORWARD points. You can have a [look at it](#inference).<br/>
<br/>
Train and evaluate to see if you are getting better results. ***In the end, use this value: N_FORWARD=8***
</div>
<a name="assignment4"></a>
<div class="alert alert-block alert-info">
***Assignement #4*** Try adjusting the follwing parameters:<ol><ol>
<li> Use a stacked RNN cell with 2 layers with in the model:<br/>
```
cells = [tf.nn.rnn_cell.GRUCell(RNN_CELLSIZE) for _ in range(N_LAYERS)]
cell = tf.nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=False)
```
<br/>Do not forget to set N_LAYERS=2 in [hyperparameters](#hyperparameters)
</li>
<li>Increase RNN_CELLSIZE -> 128 to allow the cells to model more complex behaviors.</li>
<li>Regularisation: add a decaying learning rate. Replace learning_rate=0.01 with:<br/>
```
learning_rate = 0.001 + tf.train.exponential_decay(0.01, step, 1000, 0.5) # 0.001+0.01*0.5^(step/1000)
``` </li>
<li>Regularisation: add dropout between cell layers.<br/>
```
cells = [tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob = dropout_pkeep) for cell in cells]
```
<br/>
Check that you have a good value for DROPOUT_PKEEP in [hyperparameters](#hyperparameters). 0.7 should do. Also check that dropout is deactivated i.e. dropout_pkeep=1.0 during [inference](#inference).
</li>
</ol></ol>
Play with these options until you get a good fit for at least 1.5 years.
</div>

<div style="text-align: right; font-family: monospace">
X shape [BATCHSIZE, SEQLEN, 2]<br/>
Y shape [BATCHSIZE, SEQLEN, 2]<br/>
H shape [BATCHSIZE, RNN_CELLSIZE*NLAYERS]
</div>
When executed, this function instantiates the Tensorflow graph for our model.
```
def model_rnn_fn(features, Hin, labels, step, dropout_pkeep):
print('features: {}'.format(features))
X = features # shape [BATCHSIZE, SEQLEN, 2], 2 for (Tmin, Tmax)
batchsize = tf.shape(X)[0] # allow for variable batch size
seqlen = tf.shape(X)[1] # allow for variable sequence length
# --- dummy model that does almost nothing (one trainable variable is needed) ---
cell = tf.nn.rnn_cell.GRUCell(RNN_CELLSIZE)
Hr, H = tf.nn.dynamic_rnn(cell,X,initial_state=Hin)
# --- The regression layer is missing too! ---
# --- When adding it, keep in mind we are prediction two values: Tmin, Tmax ---
Yn = tf.reshape(Hr, [batchsize*seqlen, RNN_CELLSIZE])
Yr = tf.layers.dense(Yn, 2) # Yr [BATCHSIZE*SEQLEN, 2] predicting vectors of 2 element
Yr = tf.reshape(Yr, [batchsize, seqlen, 2]) # Yr [BATCHSIZE, SEQLEN, 2]
# --- end of dummy model ---
Yout = Yr[:,-N_FORWARD:,:] # Last N_FORWARD outputs. Yout [BATCHSIZE, N_FORWARD, 2]
loss = tf.losses.mean_squared_error(Yr, labels) # labels[BATCHSIZE, SEQLEN, 2]
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss)
return Yout, H, loss, train_op, Yr
```
## Instantiate the model
```
tf.reset_default_graph() # restart model graph from scratch
# placeholder for inputs
Hin = tf.placeholder(tf.float32, [None, RNN_CELLSIZE * N_LAYERS])
features = tf.placeholder(tf.float32, [None, None, 2]) # [BATCHSIZE, SEQLEN, 2]
labels = tf.placeholder(tf.float32, [None, None, 2]) # [BATCHSIZE, SEQLEN, 2]
step = tf.placeholder(tf.int32)
dropout_pkeep = tf.placeholder(tf.float32)
# instantiate the model
Yout, H, loss, train_op, Yr = model_rnn_fn(features, Hin, labels, step, dropout_pkeep)
```
## Initialize Tensorflow session
This resets all neuron weights and biases to initial random values
```
# variable initialization
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run([init])
saver = tf.train.Saver(max_to_keep=1)
```
<a name="train"></a>
## The training loop
You can re-execute this cell to continue training. <br/>
<br/>
Training data must be batched correctly, one weather station per line, continued on the same line across batches. This way, output states computed from one batch are the correct input states for the next batch. The provided utility function `rnn_multistation_sampling_temperature_sequencer` does the right thing.

```
losses = []
indices = []
last_epoch = 99999
last_fileid = 99999
for i, (next_features, next_labels, dates, epoch, fileid) in enumerate(
utils_batching.rnn_multistation_sampling_temperature_sequencer(train_filenames,
RESAMPLE_BY,
BATCHSIZE,
SEQLEN,
N_FORWARD,
NB_EPOCHS, tminmax=True)):
# reinintialize state between epochs or when starting on data from a new weather station
if epoch != last_epoch or fileid != last_fileid:
batchsize = next_features.shape[0]
H_ = np.zeros([batchsize, RNN_CELLSIZE * N_LAYERS])
print("State reset")
#train
feed = {Hin: H_, features: next_features, labels: next_labels, step: i, dropout_pkeep: DROPOUT_PKEEP}
Yout_, H_, loss_, _, Yr_ = sess.run([Yout, H, loss, train_op, Yr], feed_dict=feed)
# print progress
if i%20 == 0:
print("{}: epoch {} loss = {} ({} weather stations this epoch)".format(i, epoch, np.mean(loss_), fileid+1))
sys.stdout.flush()
if i%10 == 0:
losses.append(np.mean(loss_))
indices.append(i)
# This visualisation can be helpful to see how the model "locks" on the shape of the curve
# if i%100 == 0:
# plt.figure(figsize=(10,2))
# plt.fill_between(dates, next_features[0,:,0], next_features[0,:,1]).set_alpha(0.2)
# plt.fill_between(dates, next_labels[0,:,0], next_labels[0,:,1])
# plt.fill_between(dates, Yr_[0,:,0], Yr_[0,:,1]).set_alpha(0.8)
# plt.show()
last_epoch = epoch
last_fileid = fileid
# save the trained model
SAVEDMODEL = JOB_DIR + "/ckpt" + str(int(time.time()))
tf.saved_model.simple_save(sess, SAVEDMODEL,
inputs={"features":features, "Hin":Hin, "dropout_pkeep":dropout_pkeep},
outputs={"Yout":Yout, "H":H})
plt.ylim(ymax=np.amax(losses[1:])) # ignore first value for scaling
plt.plot(indices, losses)
plt.show()
```
<a name="inference"></a>
## Inference
This is a generative model: run an trained RNN cell in a loop. This time, with N_FORWARD>1, we generate the sequence by blocks of N_FORWAD data points instead of point by point. The RNN is unrolled across N_FORWARD steps, takes in a the last N_FORWARD data points and predicts the next N_FORWARD data points and so on in a loop. State must be passed around correctly.
```
def prediction_run(predict_fn, prime_data, run_length):
H = np.zeros([1, RNN_CELLSIZE * N_LAYERS]) # zero state initially
Yout = np.zeros([1, N_FORWARD, 2])
data_len = prime_data.shape[0]-N_FORWARD
# prime the state from data
if data_len > 0:
Yin = np.array(prime_data[:-N_FORWARD])
Yin = np.reshape(Yin, [1, data_len, 2]) # reshape as one sequence of pairs (Tmin, Tmax)
r = predict_fn({'features': Yin, 'Hin':H, 'dropout_pkeep':1.0}) # no dropout during inference
Yout = r["Yout"]
H = r["H"]
# initaily, put real data on the inputs, not predictions
Yout = np.expand_dims(prime_data[-N_FORWARD:], axis=0)
# Yout shape [1, N_FORWARD, 2]: batch of a single sequence of length N_FORWARD of (Tmin, Tmax) data pointa
# run prediction
# To generate a sequence, run a trained cell in a loop passing as input and input state
# respectively the output and output state from the previous iteration.
results = []
for i in range(run_length//N_FORWARD+1):
r = predict_fn({'features': Yout, 'Hin':H, 'dropout_pkeep':1.0}) # no dropout during inference
Yout = r["Yout"]
H = r["H"]
results.append(Yout[0]) # shape [N_FORWARD, 2]
return np.concatenate(results, axis=0)[:run_length]
```
<a name="valid"></a>
## Validation
```
QYEAR = 365//(RESAMPLE_BY*4)
YEAR = 365//(RESAMPLE_BY)
# Try starting predictions from January / March / July (resp. OFFSET = YEAR or YEAR+QYEAR or YEAR+2*QYEAR)
# Some start dates are more challenging for the model than others.
OFFSET = 4*YEAR+1*QYEAR
PRIMELEN=7*YEAR
RUNLEN=4*YEAR
RMSELEN=3*365//(RESAMPLE_BY*2) # accuracy of predictions 1.5 years in advance
# Restore the model from the last checkpoint saved previously.
# Alternative checkpoints:
# Once you have trained on all 1666 weather stations on Google Cloud ML Engine, you can load the checkpoint from there.
# SAVEDMODEL = "gs://{BUCKET}/sinejobs/sines_XXXXXX_XXXXXX/ckptXXXXXXXX"
# A sample checkpoint is provided with the lab. You can try loading it for comparison.
# You will have to use the following parameters and re-run the entire notebook:
# N_FORWARD = 8, RESAMPLE_BY = 5, RNN_CELLSIZE = 128, N_LAYERS = 2
# SAVEDMODEL = "temperatures_best_checkpoint"
predict_fn = tf.contrib.predictor.from_saved_model(SAVEDMODEL)
for evaldata in evaltemps:
prime_data = evaldata[OFFSET:OFFSET+PRIMELEN]
results = prediction_run(predict_fn, prime_data, RUNLEN)
utils_display.picture_this_6(evaldata, evaldates, prime_data, results, PRIMELEN, RUNLEN, OFFSET, RMSELEN)
rmses = []
bad_ones = 0
for offset in [YEAR, YEAR+QYEAR, YEAR+2*QYEAR]:
for evaldata in evaltemps:
prime_data = evaldata[offset:offset+PRIMELEN]
results = prediction_run(predict_fn, prime_data, RUNLEN)
rmse = math.sqrt(np.mean((evaldata[offset+PRIMELEN:offset+PRIMELEN+RMSELEN] - results[:RMSELEN])**2))
rmses.append(rmse)
if rmse>7: bad_ones += 1
print("RMSE on {} predictions (shaded area): {}".format(RMSELEN, rmse))
print("Average RMSE on {} weather stations: {} ({} really bad ones, i.e. >7.0)".format(len(evaltemps), np.mean(rmses), bad_ones))
sys.stdout.flush()
```
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| github_jupyter |
<a href="https://colab.research.google.com/github/sokrypton/ColabFold/blob/main/beta/AlphaFold2_batch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#AlphaFold2 w/ MMseqs2 Batch
Easy to use version of AlphaFold 2 [(Jumper et al. 2021, Nature)](https://www.nature.com/articles/s41586-021-03819-2) a protein structure prediction pipeline, with an API hosted at the Södinglab based on the MMseqs2 server [(Mirdita et al. 2019, Bioinformatics)](https://academic.oup.com/bioinformatics/article/35/16/2856/5280135) for the multiple sequence alignment creation.
**Usage**
input_dir: directory with only fasta files stored in Google Drive
result_dir: results will be written to the result directory in Google Drive
<strong>For detailed instructions, see <a href="#Instructions">bottom</a> of notebook!</strong>
```
#@title Mount google drive
from google.colab import drive
drive.mount('/content/drive')
#@title Input protein sequence, then hit `Runtime` -> `Run all`
from google.colab import files
import os
import os.path
import re
import hashlib
def add_hash(x,y):
return x+"_"+hashlib.sha1(y.encode()).hexdigest()[:5]
input_dir = '/content/drive/MyDrive/input_fasta' #@param {type:"string"}
result_dir = '/content/drive/MyDrive/result' #@param {type:"string"}
# number of models to use
#@markdown ---
#@markdown ### Advanced settings
msa_mode = "MMseqs2 (UniRef+Environmental)" #@param ["MMseqs2 (UniRef+Environmental)", "MMseqs2 (UniRef only)","single_sequence","custom"]
num_models = 5 #@param [1,2,3,4,5] {type:"raw"}
use_msa = True if msa_mode.startswith("MMseqs2") else False
use_env = True if msa_mode == "MMseqs2 (UniRef+Environmental)" else False
use_custom_msa = False
use_amber = False #@param {type:"boolean"}
use_templates = False
use_turbo = True
use_ptm = True
max_recycles = 3
tol = 0
is_training = False
num_samples = 1
num_ensemble = 1
show_images = False
save_tmp_pdb = True
rank_by = "pLDDT"
save_pae_json = False
homooligomer = "1"
homooligomer = re.sub("[:/]+",":",homooligomer)
if len(homooligomer) == 0: homooligomer = "1"
homooligomer = re.sub("[^0-9:]", "", homooligomer)
homooligomers = [int(h) for h in homooligomer.split(":")]
max_msa = "512:1024"
max_msa_clusters, max_extra_msa = [int(x) for x in max_msa.split(":")]
#@markdown Don't forget to hit `Runtime` -> `Run all` after updating form
with open(f"run.log", "w") as text_file:
text_file.write("num_models=%s\n" % num_models)
text_file.write("use_amber=%s\n" % use_amber)
text_file.write("use_msa=%s\n" % use_msa)
text_file.write("msa_mode=%s\n" % msa_mode)
text_file.write("use_templates=%s\n" % use_templates)
text_file.write("homooligomer=%s\n" % homooligomer)
#@title Install software
#@markdown Please execute this cell by pressing the _Play_ button
#@markdown on the left.
use_amber_relax = use_amber
import os
import tensorflow as tf
tf.config.set_visible_devices([], 'GPU')
import jax
if jax.local_devices()[0].platform == 'tpu':
raise RuntimeError('Colab TPU runtime not supported. Change it to GPU via Runtime -> Change Runtime Type -> Hardware accelerator -> GPU.')
elif jax.local_devices()[0].platform == 'cpu':
raise RuntimeError('Colab CPU runtime not supported. Change it to GPU via Runtime -> Change Runtime Type -> Hardware accelerator -> GPU.')
from IPython.utils import io
import subprocess
import tqdm.notebook
GIT_REPO = 'https://github.com/deepmind/alphafold'
SOURCE_URL = 'https://storage.googleapis.com/alphafold/alphafold_params_2021-07-14.tar'
PARAMS_DIR = './alphafold/data/params'
PARAMS_PATH = os.path.join(PARAMS_DIR, os.path.basename(SOURCE_URL))
TQDM_BAR_FORMAT = '{l_bar}{bar}| {n_fmt}/{total_fmt} [elapsed: {elapsed} remaining: {remaining}]'
# if not already installed
try:
total = 100 if use_amber_relax else 55
with tqdm.notebook.tqdm(total=total, bar_format=TQDM_BAR_FORMAT) as pbar:
with io.capture_output() as captured:
if not os.path.isdir("alphafold"):
%shell rm -rf alphafold
%shell git clone {GIT_REPO} alphafold
%shell (cd alphafold; git checkout 1e216f93f06aa04aa699562f504db1d02c3b704c --quiet)
# colabfold patches
%shell mkdir --parents tmp
%shell wget -qnc https://raw.githubusercontent.com/sokrypton/ColabFold/main/beta/colabfold.py
%shell wget -qnc https://raw.githubusercontent.com/sokrypton/ColabFold/main/beta/pairmsa.py
%shell wget -qnc https://raw.githubusercontent.com/sokrypton/ColabFold/main/beta/protein.patch -P tmp/
%shell wget -qnc https://raw.githubusercontent.com/sokrypton/ColabFold/main/beta/config.patch -P tmp/
%shell wget -qnc https://raw.githubusercontent.com/sokrypton/ColabFold/main/beta/model.patch -P tmp/
%shell wget -qnc https://raw.githubusercontent.com/sokrypton/ColabFold/main/beta/modules.patch -P tmp/
# hhsuite + reformat.pl
%shell curl -fsSL https://github.com/soedinglab/hh-suite/releases/download/v3.3.0/hhsuite-3.3.0-SSE2-Linux.tar.gz | tar xz -C tmp/
# Apply multi-chain patch from Lim Heo @huhlim
%shell patch -u alphafold/alphafold/common/protein.py -i tmp/protein.patch
# Apply patch to dynamically control number of recycles (idea from Ryan Kibler)
%shell patch -u alphafold/alphafold/model/model.py -i tmp/model.patch
%shell patch -u alphafold/alphafold/model/modules.py -i tmp/modules.patch
%shell patch -u alphafold/alphafold/model/config.py -i tmp/config.patch
pbar.update(4)
%shell pip3 install ./alphafold
pbar.update(5)
# speedup from kaczmarj
%shell mkdir --parents "{PARAMS_DIR}"
%shell curl -fsSL "{SOURCE_URL}" | tar x -C "{PARAMS_DIR}"
pbar.update(14+27)
#######################################################################
%shell sudo apt install --quiet --yes hmmer
pbar.update(3)
# Install py3dmol.
%shell pip install py3dmol
pbar.update(1)
# Create a ramdisk to store a database chunk to make Jackhmmer run fast.
%shell sudo mkdir -m 777 --parents /tmp/ramdisk
%shell sudo mount -t tmpfs -o size=9G ramdisk /tmp/ramdisk
pbar.update(1)
else:
pbar.update(55)
if use_amber_relax:
if not os.path.isfile("stereo_chemical_props.txt"):
# Install OpenMM and pdbfixer.
%shell rm -rf /opt/conda
%shell wget -q -P /tmp \
https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \
&& bash /tmp/Miniconda3-latest-Linux-x86_64.sh -b -p /opt/conda \
&& rm /tmp/Miniconda3-latest-Linux-x86_64.sh
pbar.update(4)
PATH=%env PATH
%env PATH=/opt/conda/bin:{PATH}
%shell conda update -qy conda \
&& conda install -qy -c conda-forge \
python=3.7 \
openmm=7.5.1 \
pdbfixer
pbar.update(40)
%shell wget -q -P /content \
https://git.scicore.unibas.ch/schwede/openstructure/-/raw/7102c63615b64735c4941278d92b554ec94415f8/modules/mol/alg/src/stereo_chemical_props.txt
pbar.update(1)
%shell mkdir -p /content/alphafold/common
%shell cp -f /content/stereo_chemical_props.txt /content/alphafold/common
# Apply OpenMM patch.
%shell pushd /opt/conda/lib/python3.7/site-packages/ && \
patch -p0 < /content/alphafold/docker/openmm.patch && \
popd
else:
pbar.update(45)
except subprocess.CalledProcessError:
print(captured)
raise
########################################################################################
# --- Python imports ---
import colabfold as cf
import pairmsa
import sys
import pickle
if use_amber_relax:
sys.path.append('/opt/conda/lib/python3.7/site-packages')
if "/content/tmp/bin" not in os.environ['PATH']:
os.environ['PATH'] += ":/content/tmp/bin:/content/tmp/scripts"
from urllib import request
from concurrent import futures
from google.colab import files
import json
from matplotlib import gridspec
import matplotlib.pyplot as plt
import numpy as np
import py3Dmol
from alphafold.model import model
from alphafold.model import config
from alphafold.model import data
from alphafold.data import parsers
from alphafold.data import pipeline
from alphafold.data.tools import jackhmmer
from alphafold.common import protein
if use_amber_relax:
from alphafold.relax import relax
from alphafold.relax import utils
#@title Gather input features, predict structure
def _placeholder_template_feats(num_templates_, num_res_):
return {
'template_aatype': np.zeros([num_templates_, num_res_, 22], np.float32),
'template_all_atom_masks': np.zeros([num_templates_, num_res_, 37, 3], np.float32),
'template_all_atom_positions': np.zeros([num_templates_, num_res_, 37], np.float32),
'template_domain_names': np.zeros([num_templates_], np.float32),
'template_sum_probs': np.zeros([num_templates_], np.float32),
}
def parse_results(prediction_result, processed_feature_dict):
b_factors = prediction_result['plddt'][:,None] * prediction_result['structure_module']['final_atom_mask']
out = {"unrelaxed_protein": protein.from_prediction(processed_feature_dict, prediction_result, b_factors=b_factors),
"plddt": prediction_result['plddt'],
"pLDDT": prediction_result['plddt'].mean(),
"dists": prediction_result["distogram"]["bin_edges"][prediction_result["distogram"]["logits"].argmax(-1)],
"adj": jax.nn.softmax(prediction_result["distogram"]["logits"])[:,:,prediction_result["distogram"]["bin_edges"] < 8].sum(-1)}
if "ptm" in prediction_result:
out.update({"pae": prediction_result['predicted_aligned_error'],
"pTMscore": prediction_result['ptm']})
return out
model_names = ['model_1', 'model_2', 'model_3', 'model_4', 'model_5'][:num_models]
total = len(model_names) * num_samples
if use_amber_relax:
if relax_all: total += total
else: total += 1
### run
for filename in os.listdir(input_dir):
jobname=os.path.splitext(filename)[0]
filepath = input_dir+"/"+filename
print("Running: "+filepath)
with open(filepath) as f:
input_fasta_str = f.read()
(seqs, header) = pipeline.parsers.parse_fasta(input_fasta_str)
seq = seqs[0]
if os.path.isfile(result_dir+"/"+jobname+".result.zip"):
continue
# prediction directory
output_dir = 'prediction_' + cf.get_hash(seq)[:5]
os.makedirs(output_dir, exist_ok=True)
# delete existing files in working directory
for f in os.listdir(output_dir):
os.remove(os.path.join(output_dir, f))
prefix = cf.get_hash("".join(seq))
prefix = os.path.join('tmp',prefix)
print(f"running mmseqs2")
a3m_lines = cf.run_mmseqs2(seq, prefix, filter=True)
# write a3m to output folder
with open(f"{output_dir}/msa.a3m","w") as out_a3m:
out_a3m.write(a3m_lines)
msa, deletion_matrice = parsers.parse_a3m(a3m_lines)
msas, deletion_matrices = [],[]
msas.append(msa)
deletion_matrices.append(deletion_matrice)
#############################
# homooligomerize
#############################
lengths = [len(seq) for seq in seqs]
msas_mod, deletion_matrices_mod = cf.homooligomerize_heterooligomer(msas, deletion_matrices,
lengths, homooligomers)
num_res = len(seq)
feature_dict = {}
feature_dict.update(pipeline.make_sequence_features(seq, 'test', num_res))
feature_dict.update(pipeline.make_msa_features(msas_mod, deletion_matrices=deletion_matrices_mod))
if not use_turbo:
feature_dict.update(_placeholder_template_feats(0, num_res))
################################
# set chain breaks
################################
Ls = []
for seq,h in zip(seq.split(":"),homooligomers):
Ls += [len(s) for s in seq.split("/")] * h
Ls_plot = sum([[len(seq)]*h for seq,h in zip(seqs,homooligomers)],[])
feature_dict['residue_index'] = cf.chain_break(feature_dict['residue_index'], Ls)
from string import ascii_uppercase
with tqdm.notebook.tqdm(total=total, bar_format=TQDM_BAR_FORMAT) as pbar:
#######################################################################
# precompile model and recompile only if length changes
#######################################################################
if use_turbo:
name = "model_5_ptm" if use_ptm else "model_5"
N = len(feature_dict["msa"])
L = len(feature_dict["residue_index"])
compiled = (N, L, use_ptm, max_recycles, tol, num_ensemble, max_msa, is_training)
if "COMPILED" in dir():
if COMPILED != compiled: recompile = True
else: recompile = True
if recompile:
cf.clear_mem("gpu")
cfg = config.model_config(name)
cfg.data.common.max_extra_msa = min(N,max_extra_msa)
cfg.data.eval.max_msa_clusters = min(N,max_msa_clusters)
cfg.data.common.num_recycle = max_recycles
cfg.model.num_recycle = max_recycles
cfg.model.recycle_tol = tol
cfg.data.eval.num_ensemble = num_ensemble
params = data.get_model_haiku_params(name,'./alphafold/data')
model_runner = model.RunModel(cfg, params, is_training=is_training)
COMPILED = compiled
recompile = False
else:
cf.clear_mem("gpu")
recompile = True
# cleanup
if "outs" in dir(): del outs
outs = {}
cf.clear_mem("cpu")
#######################################################################
for num, model_name in enumerate(model_names): # for each model
name = model_name+"_ptm" if use_ptm else model_name
# setup model and/or params
params = data.get_model_haiku_params(name, './alphafold/data')
if use_turbo:
for k in model_runner.params.keys():
model_runner.params[k] = params[k]
else:
cfg = config.model_config(name)
cfg.data.common.num_recycle = cfg.model.num_recycle = max_recycles
cfg.model.recycle_tol = tol
cfg.data.eval.num_ensemble = num_ensemble
model_runner = model.RunModel(cfg, params, is_training=is_training)
for seed in range(num_samples): # for each seed
# predict
key = f"{name}_seed_{seed}"
pbar.set_description(f'Running {key}')
processed_feature_dict = model_runner.process_features(feature_dict, random_seed=seed)
prediction_result, (r, t) = cf.to(model_runner.predict(processed_feature_dict, random_seed=seed),"cpu")
outs[key] = parse_results(prediction_result, processed_feature_dict)
# report
pbar.update(n=1)
line = f"{key} recycles:{r} tol:{t:.2f} pLDDT:{outs[key]['pLDDT']:.2f}"
if use_ptm: line += f" pTMscore:{outs[key]['pTMscore']:.2f}"
print(line)
if show_images:
fig = cf.plot_protein(outs[key]["unrelaxed_protein"], Ls=Ls_plot, dpi=100)
plt.show()
if save_tmp_pdb:
tmp_pdb_path = os.path.join(output_dir,f'unranked_{key}_unrelaxed.pdb')
pdb_lines = protein.to_pdb(outs[key]["unrelaxed_protein"])
with open(tmp_pdb_path, 'w') as f: f.write(pdb_lines)
# cleanup
del processed_feature_dict, prediction_result
if use_turbo:
del params
else:
del params, model_runner, cfg
cf.clear_mem("gpu")
# delete old files
for f in os.listdir(output_dir):
if "rank" in f:
os.remove(os.path.join(output_dir, f))
# Find the best model according to the mean pLDDT.
model_rank = list(outs.keys())
model_rank = [model_rank[i] for i in np.argsort([outs[x][rank_by] for x in model_rank])[::-1]]
# Write out the prediction
for n,key in enumerate(model_rank):
prefix = f"rank_{n+1}_{key}"
pred_output_path = os.path.join(output_dir,f'{prefix}_unrelaxed.pdb')
fig = cf.plot_protein(outs[key]["unrelaxed_protein"], Ls=Ls_plot, dpi=200)
plt.savefig(os.path.join(output_dir,f'{prefix}.png'), bbox_inches = 'tight')
plt.close(fig)
pdb_lines = protein.to_pdb(outs[key]["unrelaxed_protein"])
with open(pred_output_path, 'w') as f:
f.write(pdb_lines)
if use_amber_relax:
pbar.set_description(f'AMBER relaxation')
if relax_all or n == 0:
amber_relaxer = relax.AmberRelaxation(
max_iterations=0,
tolerance=2.39,
stiffness=10.0,
exclude_residues=[],
max_outer_iterations=20)
relaxed_pdb_lines, _, _ = amber_relaxer.process(prot=outs[key]["unrelaxed_protein"])
pred_output_path = os.path.join(output_dir,f'{prefix}_relaxed.pdb')
with open(pred_output_path, 'w') as f:
f.write(relaxed_pdb_lines)
pbar.update(n=1)
############################################################
print(f"model rank based on {rank_by}")
for n,key in enumerate(model_rank):
print(f"rank_{n+1}_{key} {rank_by}:{outs[key][rank_by]:.2f}")
if use_ptm and save_pae_json:
pae = outs[key]["pae"]
max_pae = pae.max()
# Save pLDDT and predicted aligned error (if it exists)
pae_output_path = os.path.join(output_dir,f'rank_{n+1}_{key}_pae.json')
# Save predicted aligned error in the same format as the AF EMBL DB
rounded_errors = np.round(np.asarray(pae), decimals=1)
indices = np.indices((len(rounded_errors), len(rounded_errors))) + 1
indices_1 = indices[0].flatten().tolist()
indices_2 = indices[1].flatten().tolist()
pae_data = json.dumps([{
'residue1': indices_1,
'residue2': indices_2,
'distance': rounded_errors.flatten().tolist(),
'max_predicted_aligned_error': max_pae.item()
}],
indent=None,
separators=(',', ':'))
with open(pae_output_path, 'w') as f:
f.write(pae_data)
dpi = 100
if use_ptm:
print("predicted alignment error")
cf.plot_paes([outs[k]["pae"] for k in model_rank], Ls=Ls_plot, dpi=dpi)
plt.savefig(os.path.join(output_dir,f'predicted_alignment_error.png'), bbox_inches = 'tight', dpi=np.maximum(200,dpi))
print("predicted contacts")
cf.plot_adjs([outs[k]["adj"] for k in model_rank], Ls=Ls_plot, dpi=dpi)
plt.savefig(os.path.join(output_dir,f'predicted_contacts.png'), bbox_inches = 'tight', dpi=np.maximum(200,dpi))
print("predicted distogram")
cf.plot_dists([outs[k]["dists"] for k in model_rank], Ls=Ls_plot, dpi=dpi)
plt.savefig(os.path.join(output_dir,f'predicted_distogram.png'), bbox_inches = 'tight', dpi=np.maximum(200,dpi))
print("predicted LDDT")
cf.plot_plddts([outs[k]["plddt"] for k in model_rank], Ls=Ls_plot, dpi=dpi)
plt.savefig(os.path.join(output_dir,f'predicted_LDDT.png'), bbox_inches = 'tight', dpi=np.maximum(200,dpi))
%shell zip -FSrj $result_dir"/"$jobname".result.zip" "run.log" $output_dir"/"*
```
# Instructions <a name="Instructions"></a>
**Quick start**
1. Change the runtime type to GPU at "Runtime" -> "Change runtime type" (improves speed).
2. Paste your protein sequence in the input field below.
3. Press "Runtime" -> "Run all".
4. The pipeline consists of 10 steps. The currently running steps is indicated by a circle with a stop sign next to it.
**Result zip file contents**
1. PDB formatted structures sorted by avg. pIDDT. (relaxed, unrelaxed).
2. Plots of the model quality.
3. Plots of the MSA coverage.
4. Parameter log file.
5. A3M formatted input MSA.
6. BibTeX file with citations for all used tools and databases.
At the end of the job a download modal box will pop up with a `jobname.result.zip` file. Additionally, if the `save_to_google_drive` option was selected, the `jobname.result.zip` will be uploaded to your Google Drive.
**Using a custom MSA as input**
To predict the structure with a custom MSA (A3M formatted): (1) Change the msa_mode: to "custom", (2) Wait for an upload box to appear at the end of the "Input Protein ..." box. Upload your A3M. The first fasta entry of the A3M must be the query sequence without gaps.
To generate good input MSAs the HHblits server can be used here: https://toolkit.tuebingen.mpg.de/tools/hhblits
After submitting your query, click "Query Template MSA" -> "Download Full A3M". Download the a3m file and upload it to the notebook.
**Troubleshooting**
* Try to restart the session "Runtime" -> "Factory reset runtime".
* Check your input sequence.
**Known issues**
* Colab assigns different types of GPUs with varying amount of memory. Some might have not enough memory to predict the structure.
* Your browser can block the pop-up for downloading the result file. You can choose the `save_to_google_drive` option to upload to Google Drive instead or manually download the result file: Click on the little folder icon to the left, navigate to file: `jobname.result.zip`, right-click and select \"Download\" (see [screenshot](https://pbs.twimg.com/media/E6wRW2lWUAEOuoe?format=jpg&name=small)).
**Limitations**
* MSAs: MMseqs2 is very precise and sensitive but might find less hits compared to HHblits/HMMer searched against BFD or Mgnify.
* Computing resources: Our MMseqs2 API can probably handle ~20k requests per day.
* For best results, we recommend using the full pipeline: https://github.com/deepmind/alphafold
**Description of the plots**
* **Number of sequences per position** - We want to see at least 30 sequences per position, for best performance, ideally 100 sequences.
* **Predicted lDDT per position** - model confidence (out of 100) at each position. Higher the better.
* **Predicted Alignment Error** - For homooligomers, this could be a useful metric to assess how confident the model is about the interface. Lower the better.
**Bugs**
- If you encounter any bugs, please report the issue to https://github.com/sokrypton/ColabFold/issues
**Acknowledgments**
- We would like to thank the AlphaFold team for developing an excellent model and open sourcing the software.
- A colab by Sergey Ovchinnikov ([@sokrypton](https://twitter.com/sokrypton)), Milot Mirdita ([@milot_mirdita](https://twitter.com/milot_mirdita)) and Martin Steinegger ([@thesteinegger](https://twitter.com/thesteinegger)).
- Minkyung Baek ([@minkbaek](https://twitter.com/minkbaek)) and Yoshitaka Moriwaki ([@Ag_smith](https://twitter.com/Ag_smith)) for protein-complex prediction proof-of-concept in AlphaFold2.
- Also, credit to [David Koes](https://github.com/dkoes) for his awesome [py3Dmol](https://3dmol.csb.pitt.edu/) plugin, without whom these notebooks would be quite boring!
- For related notebooks see: [ColabFold](https://github.com/sokrypton/ColabFold)
| github_jupyter |
```
dat <- read.csv("full_cohort_data.csv")
```
\doublespacing
# Chapter Goals
In this subchapter, the reader will learn the fundamentals of logistic regression, and how to present and interpret such an analysis.
# Introduction
\doublespacing
In subchapter 5b we covered a very useful methodology for modeling quantitative or continuous outcomes. We of course know though that health outcomes come in all different types of data types. In fact, the health outcomes we often care about most -- cured/not cured, alive/dead, are discrete binary outcomes. It would be ideal if we could extend the same general framework for continuous outcomes to these binary outcomes. Logistic regression allows us to incorporate much of what we learned in the previous subchapter and apply the same principles to binary outcomes.
When dealing with binary data, we would like to be able to model the probability of a type of outcome given one or more covariates. One might ask, why not just simply use linear regression? There are several reasons why this is generally a bad idea. Probabilities need to be somewhere between zero and one, and there is nothing in linear regression to constrain the estimated probabilities to this interval. This would mean that you could have an estimated probability 2, or even a negative probability! This is one unattractive property of such a method (there are others), and although it is sometimes used, the availability of good software such as `R` allows us to perform better analyses easily and efficiently. Before introducing such software, we should introduce the analysis of small contingency tables.
# 2x2 Tables
\doublespacing
Contingency tables are the best way to start to think about binary data. A contingency table cross-tabulates the outcome across two or more levels of a covariate. Let's begin by creating a new variable (`age.cat`) which dichotomizes `age` into two age categories: $\le55$ and $>55$. Note, because we are making age a discrete variable, we also change the data type to a factor. This is similar to what we did for the `gender_num` variable when discussing linear regression in the previous subchapter. We can get a breakdown of the new variable using the `table` function.
\singlespacing
```
dat$age.cat <- as.factor(ifelse(dat$age<=55, "<=55",">55"))
table(dat$age.cat)
```
\doublespacing
We would like to see how 28 day mortality is distributed among the age categories. We can do so by constructing a contingency table, or in this case what is commonly referred to as a 2x2 table.
\singlespacing
```
table(dat$age.cat,dat$day_28_flg)
```
\doublespacing
From the above table, you can see that 40 patients in the young group ($\le55$) died within 28 days, while 243 in the older group died. These correspond to $P(\text{die} | \text{age}\le55) = 0.043$ or 4.3\% and $P(\text{die} | \text{age}>55) = 0.284$ or 28.4\%, where the "|" can be interpreted as "given" or "for those who have." This difference is quite marked, and we know that age is an important factor in mortality, so this is not surprising.
The odds of an event happening is a positive number and can be calculated from the probability of an event, $p$, by the following formula
\centering
$\text{Odds} = \frac{p}{1-p}$.
\raggedright
An event with an odds of zero never happens, and an event with a very large odds (>100) is very likely to happen. Here, the odds of dying within 28 days in the young group is 0.043/(1-0.043)=0.045, and in the older group is 0.284/(1-0.284)=0.40. It is convenient to represent these two figures as a ratio, and the choice of what goes in the numerator and the denominator is somewhat arbitrary. In this case, we will choose to put the older group's odds on the numerator and the younger in the denominator, and it's important to make it clear which group is in the numerator and denominator in general. In this case the *Odds ratio* is 0.40/0.045 = 8.79, which indicates a very strong association between age and death, and means that the odds of dying in the older group is nearly 9 fold higher than when compared to the younger group. There is a convenient shortcut for doing odds ratio calculation by making an X on a 2x2 table and multiplying top left by bottom right, then dividing it by the product of bottom left and top right. In this case $\frac{883 \times 243}{610 \times 40}= 8.79$.
<!-- [^ORnote]: There's a convenient shortcut for doing odds ratio calculation by making an X on a 2x2 table and multiplying top left by bottom right, then dividing it by the product of bottom left and top right. In this case $\frac{883 \times 243}{610 \times 40}= 8.79$. -->
Now let us look at a slightly different case -- when the covariate takes on more than two values. Such a variable is the `service_unit`. Let's see how the deaths are distributed among the different units:
\singlespacing
```
deathbyservice <- table(dat$service_unit,dat$day_28_flg)
deathbyservice
```
\doublespacing
we can get frequencies of these service units by applying the `prop.table` function to our cross-tabulated table.
\singlespacing
```
dbys.proptable <- prop.table(deathbyservice,1)
dbys.proptable
```
\doublespacing
It appears as though the `FICU` may have a lower rate of death than either the `MICU` or `SICU`. To compute an odds ratios, first compute the odds:
\singlespacing
```
dbys.proptable[,"1"]/dbys.proptable[,"0"]
```
\doublespacing
and then we need to pick which of `FICU`, `MICU` or `SICU` will serve as the reference or baseline group. This is the group which the other two groups will be compared to. Again the choice is arbitrary, but should be dictated by the study objective. If this were a clinical trial with two drug arms and a placebo arm, it would be foolish to use one of the treatments as the reference group, particularly if you wanted to compare the efficacy of the treatments. In this particular case, there is no clear reference group, but since the FICU is so much smaller than the other two units, we will use it as the reference group. Computing the odds ratio for MICU and SICU we get 4.13 and 3.63, respectively. These are also very strong associations, meaning that the odds of dying in the SICU and MICU are around 4 times higher than in the FICU, but relatively similar.
Contingency tables and 2x2 tables in particular are the building blocks of working with binary data, and it's often a good way to begin looking at the data.
# Introducing Logistic Regression
While contingency tables are a fundamental way of looking at binary data, they are somewhat limited. What happens when the covariate of interest is continuous? We could of course create categories from the covariate by establishing cut points, but we may still miss some important aspect of the relationship between the covariate and the outcome by not choosing the right cut points. Also, what happens when we know that a nuisance covariate is related to both the outcome and the covariate of interest. This type of nuisance variable is called a confounder and occurs frequently in observational data, and although there are ways of accounting for confounding in contingency tables, they become more difficult to use when there are more than one present.
Logistic regression is a way of addressing both of these issues, among many others. If you recall, using linear regression is problematic because it is prone to estimating probabilities outside of the [0,1] range. Logistic regression has no such problem per se, because it uses a link function known as the logit function which maps probabilities in the interval $[0,1]$ to a real number $(-\infty,\infty)$. This is important for many practical and technical reasons. The logit of $p$ and how it is related to the covariates is defined as
\centering
$logit(p_x) = log(Odds_x) = log(\frac{p_x}{1-p_x}) = \beta_0 + \beta_1 \times x$.
\raggedright
It is worth pointing out here that log here, and in most places in statistics is referring to the natural logarithm, sometimes denoted $ln$.
The first covariate we were considering, `age.cat` was also a binary variable, where it takes on values 1 when the `age`$>55$ and 0 when `age`$\le55$. So plugging these values in, first for the young group $(x=0)$:
\centering
$logit(p_{x=0}) = log(Odds_{x=0}) = log(\frac{p_{x=0}}{1-p_{x=0}}) = \beta_0 + \beta_1 \times 0 = \beta_0$,
\raggedright
and then for the older group $(x=1)$:
\centering
$logit(p_{x=1}) = log(Odds_{x=1}) = log(\frac{p_{x=1}}{1-p_{x=1}}) = \beta_0 + \beta_1 \times 1 = \beta_0 + \beta_1$.
\raggedright
If we subtract the two cases $logit(p_{x=1}) - logit(p_{x=0}) = log(Odds_{x=1}) - log(Odds_{x=0})$, and we notice that this quantity is equal to $\beta_1$. If you recall the properties of logarithms, that the difference of two logs is the log of their ratio, so $log(Odds_{x=1}) - log(Odds_{x=0}) = log(Odds_{x=1}/Odds_{x=0})$, which may be looking familiar. This is the log ratio of the odds or the *log odds ratio* in the $x=1$ group relative to the $x=0$ group. Hence, we can estimate odds ratios using logistic regression by exponentiating the coefficients of the model (the intercept notwithstanding, which we will get to in a moment).
Let's fit this model, and see how this works using a real example. We fit logistic regression very similarly to how we fit linear regression models, with a few exceptions. First, we will use a new function called `glm`, which is a very powerful function in `R` which allow one to fit a class of models known as generalized linear models or GLMs [@mccullagh1989generalized]. The `glm` function works in much the same way the `lm` function does. We need to specify a formula of the form: `outcome ~ covariates`, specify what dataset to use (in our case the `dat` data frame), and then specify the family. For logistic regression `family='binomial'` will be our choice. You can run the `summary` function, just like you did for `lm` and it produces output very similar to what `lm` did.
\singlespacing
```
age.glm <- glm(day_28_flg ~ age.cat,data=dat,family="binomial")
summary(age.glm)
```
\doublespacing
As you can see, we get a coefficients table that is similar to the `lm` table we used earlier. Instead of a `t value`, we get a `z value`, but this can be interpreted similarly. The rightmost column is a p-value, for testing the null hypothesis $\beta=0$. If you recall, the non-intercept coefficients are log-odds ratios, so testing if they are zero is equivalent to testing if the odds ratios are one. If an odds ratio is one the odds are equal in the numerator group and denominator group, indicating the probabilities of the outcome are equal in each group. So, assessing if the coefficients are zero will be an important aspect of doing this type of analysis.
Looking more closely at the coefficients. The intercept is `r round(age.glm$coef[1],2)` and the `age.cat` coefficient is `r round(age.glm$coef[2],2)`. The coefficient for `age.cat` is the log odds ratio for the 2x2 table we previously did the analysis on. When we exponentiate `r round(age.glm$coef[2],2)`, we get `exp(` `r round(age.glm$coef[2],2)` `)` = `r round(exp(age.glm$coef[2]),2)`. This corresponds with the estimate using the 2x2 table. For completeness, let's look at the other coefficient, the intercept. If you recall, $log(Odds_{x=0}) = \beta_0$, so $\beta_0$ is the log odds of the outcome in the younger group. Exponentiating again, `exp(` `r round(age.glm$coef[1],2)` `)` = `r round(exp(age.glm$coef[1]),3)`, and this corresponds with the previous analysis we did. Similarly, $log(Odds_{x=1}) = \beta_0 + \beta_1$, and the estimated odds of 28 day death in the older group is `exp(` `r round(age.glm$coef[1],2)` ` + ` `r round(age.glm$coef[2],2)` `)` = `r round(exp(sum(age.glm$coef[1:2])),2)`, as was found above. Converting estimated odds into a probability can be done directly using the `plogis` function, but we will cover a more powerful and easier way of doing this later on in the section.
## Beyond a Single Binary Covariate
While the above analysis is useful for illustration, it does not readily demonstrate anything we could not do with our 2x2 table example above. Logistic regression allows us to extend the basic idea to at least two very relevant areas. The first is the case where we have more than one covariate of interest. Perhaps we have a confounder, we are concerned about, and want to adjust for it. Alternatively, maybe there are two covariates of interest. Secondly, it allows use to use covariates as continuous quantities, instead of discretizing them into categories. For example, instead of dividing age up into exhaustive strata (as we did very simply by just dividing the patients into two groups, $\le55$ and $>55$ ), we could instead use age as a continuous covariate.
First, having more than one covariate is simple. For example, if we wanted to add `service_unit` to our previous model, we could just add it as we did when using the `lm` function for linear regression. Here we specify `day_28_flg ~ age.cat + service_unit` and run the `summary` function.
\singlespacing
```
ageunit.glm <- glm(day_28_flg ~ age.cat + service_unit,data=dat,family="binomial")
summary(ageunit.glm)$coef
```
\doublespacing
A coefficient table is produced, and now we have four estimated coefficients. The same two, `(Intercept)` and `age.cat` which were estimated in the unadjusted model, but also we have `service_unitMICU` and `service_unitSICU` which correspond to the log odds ratios for the MICU and SICU relative to the FICU. Taking the exponential of these will result in an odds ratio for each variable, adjusted for the other variables in the model. In this case the adjusted odds ratios for Age>55, MICU and SICU are `r round(exp(ageunit.glm$coef[2]),2) `, `r round(exp(ageunit.glm$coef[3]),2) `, and `r round(exp(ageunit.glm$coef[4]),2) `, respectively. We would conclude that there is an almost 9-fold increase in the odds of 28 day mortality for those in the $>55$ year age group relative to the younger $\le55$ group while holding service unit constant. This adjustment becomes important in many scenarios where groups of patients may be more or less likely to receive treatment, but also more or less likely to have better outcomes, where one effect is confounded by possibly many others. Such is almost always the case with observational data, and this is why logistic regression is such a powerful data analysis tool in this setting.
Another case we would like to be able to deal with is when we have a continuous covariate we would like to include in the model. One can always break the continuous covariate into mutually exclusive categories by selecting break or cut points, but selecting the number and location of these points can be arbitrary, and in many cases unnecessary or inefficient. Recall that in logistic regression we are fitting a model:
\centering
$logit(p_x) = log(Odds_x) = log(\frac{p_x}{1-p_x}) = \beta_0 + \beta_1 \times x$,
\raggedright
but now assume $x$ is continuous. Imagine a hypothetical scenario where you know $\beta_0$ and $\beta_1$ and have a group of 50 year olds, and a group of 51 year olds. The difference in the log Odds between the two groups is:
\centering
$log(Odds_{51}) -log(Odds_{50}) = (\beta_0 + \beta_1 \times 51) - (\beta_0 + \beta_1 \times 50) = \beta_1(51-50) = \beta_1$.
\raggedright
Hence, the odds ratio for 51 year olds versus 50 year olds is $\exp{(\beta_1)}$. This is actually true for any group of patients which are 1 year apart, and this gives a useful way to interpret and use these estimated coefficients for continuous covariates. Let's work with an example. Again fitting the 28 day mortality outcome as a function of age, but treating age as it was originally recorded in the dataset, a continuous variable called `age`.
\singlespacing
```
agects.glm <- glm(day_28_flg ~ age,data=dat,family="binomial")
summary(agects.glm)$coef
```
\doublespacing
We see the estimated coefficient is `r round(agects.glm$coef[2],2)` and still very statistically significant. Exponentiating the log odds ratio for age, we get an estimated odds ratio of `r round(exp(agects.glm$coef[2]),2)`, which is per 1 year increase in age. What if the age difference of interest is ten years instead of one year? There are at least two ways of doing this. One is to replace `age` with `I(age/10)`, which uses a new covariate which is `age` divided by ten. The second is to use the `agects.glm` estimated log odds ratio, and multiple by ten prior to exponentiating. They will yield equivalent estimates of `r round(exp(agects.glm$coef[2]*10),2)`, but it is now per 10 year increases in age. This is useful when the estimated odds ratios (or log odds ratios) are close to one (or zero). When this is done, one unit of the covariate is 10 years, so the generic interpretation of the coefficients remains the same, but the units (per 10 years instead of per 1 year) changes.
This of course assumes that the form of our equation relating the log odds of the outcome to the covariate is correct. In cases where odds of the outcome decreases and increases as a function of the covariate, it is possible to estimate a relatively small effect of the linear covariate, when the outcome may be strongly affected by the covariate, but not in the way the model is specified. Assessing the linearity of the log odds of the outcome and some discretized form of the covariate can be done graphically. For instance, we can break age into 5 groups, and estimate the log odds of 28 day mortality in each group. Plotting these quantities in Figure 1, we can see in this particular case, age is indeed strongly related to the odds of the outcome. Further, expressing age linearly appears like it would be a good approximation. If on the other hand, 28 day mortality has more of a "U"-shaped curve, we may falsely conclude that no relationship between age and mortality exists, when the relationship may be rather strong. Such may be the case when looking at the the log odds of mortality by the first temperature (`temp_1st`) in Figure 1 (right).
<!-- [^mortagenote]: Not likely the case for ICU mortality and age, but possible for other types of covariates. -->
```
library(Hmisc); library(grid); library(gridExtra)
postscript("FigC1.eps")
#tmp <- prop.table(table(cut2(dat$age,g=5), dat$day_28_flg),1)
tmp.glm <- glm(day_28_flg ~ cut2(age,g=5),data=dat,family="binomial")
tmp <- tmp.glm$coef
tmp <- tmp[1] + c(0,tmp[2:5])
names(tmp) <- levels(cut2(dat$age,g=5))
library(ggplot2)
se <- sqrt(diag(summary(tmp.glm)$cov.unscaled) + c(0,diag(summary(tmp.glm)$cov.unscaled)[-1]) + 2*c(0,summary(tmp.glm)$cov.unscaled[1,2:5]))
limits <- aes(ymax = tmp + se, ymin=tmp - se)
plotage <- qplot(names(tmp),tmp) + xlab("Age Group") + ylab("Log Odds of 28 Day Mortality") + geom_errorbar(limits, width=0.12) + theme(axis.text.x = element_text(colour="grey20",size=6,angle=0,hjust=.5,vjust=.5,face="plain"))
tmp2.glm <- glm(day_28_flg ~ cut2(temp_1st,g=5),data=dat,family="binomial")
tmp2 <- tmp2.glm$coef
tmp2 <- tmp2[1] + c(0,tmp2[2:5])
names(tmp2) <- levels(cut2(dat$temp_1st,g=5))
library(ggplot2)
se <- sqrt(diag(summary(tmp2.glm)$cov.unscaled) + c(0,diag(summary(tmp2.glm)$cov.unscaled)[-1]) + 2*c(0,summary(tmp2.glm)$cov.unscaled[1,2:5]))
limits <- aes(ymax = tmp2 + se, ymin=tmp2 - se)
plottemp <- qplot(names(tmp2),tmp2) + xlab("Temperature Group") + ylab("Log Odds of 28 Day Mortality") + geom_errorbar(limits, width=0.12) + theme(axis.text.x = element_text(colour="grey20",size=6,angle=0,hjust=.5,vjust=.5,face="plain"))
grid.arrange(plotage, plottemp, nrow=1, ncol=2)
dev.off()
```{r echo=FALSE,message=FALSE,warning=FALSE,fig.cap="Plot of log-odds of mortality for each of the five age and temperature groups. Error bars represent 95% confidence intervals for the log odds"}
tmp.glm <- glm(day_28_flg ~ cut2(age,g=5),data=dat,family="binomial")
tmp <- tmp.glm$coef
tmp <- tmp[1] + c(0,tmp[2:5])
names(tmp) <- levels(cut2(dat$age,g=5))
library(ggplot2)
se <- sqrt(diag(summary(tmp.glm)$cov.unscaled) + c(0,diag(summary(tmp.glm)$cov.unscaled)[-1]) + 2*c(0,summary(tmp.glm)$cov.unscaled[1,2:5]))
limits <- aes(ymax = tmp + se, ymin=tmp - se)
plotage <- qplot(names(tmp),tmp) + xlab("Age Group") + ylab("Log Odds of 28 Day Mortality") + geom_errorbar(limits, width=0.12) + theme(axis.text.x = element_text(colour="grey20",size=6,angle=0,hjust=.5,vjust=.5,face="plain"))
tmp2.glm <- glm(day_28_flg ~ cut2(temp_1st,g=5),data=dat,family="binomial")
tmp2 <- tmp2.glm$coef
tmp2 <- tmp2[1] + c(0,tmp2[2:5])
names(tmp2) <- levels(cut2(dat$temp_1st,g=5))
library(ggplot2)
se <- sqrt(diag(summary(tmp2.glm)$cov.unscaled) + c(0,diag(summary(tmp2.glm)$cov.unscaled)[-1]) + 2*c(0,summary(tmp2.glm)$cov.unscaled[1,2:5]))
limits <- aes(ymax = tmp2 + se, ymin=tmp2 - se)
plottemp <- qplot(names(tmp2),tmp2) + xlab("Temperature Group") + ylab("Log Odds of 28 Day Mortality") + geom_errorbar(limits, width=0.12) + theme(axis.text.x = element_text(colour="grey20",size=6,angle=0,hjust=.5,vjust=.5,face="plain"))
grid.arrange(plotage, plottemp, nrow=1, ncol=2)
```
# Hypothesis Testing and Model Selection
Just as in the case for linear regression, there is a way to test hypotheses for logistic regression. It follows much of the same framework, with the null hypothesis being $\beta=0$. If you recall, this is the log odds ratio, and testing if it is zero is equivalent to a test for the odds ratio being equal to one. Particularly when dealing with a single categorical covariate, there are techniques taught in introductory statistics courses which can be applied here (see `?fisher.test` and `?chisq.test`). In this chapter, we focus on how to conduct such a test in `R`.
<!-- [^chisqfisher]: See `?fisher.test` and `?chisq.test` for more details on how to do a Fisher's exact test or Chi-Squared test, respectively, in `R`. -->
As was the case when using `lm`, we first fit the two competing models, a larger (alternative model), and a smaller (null model). Provided that the models are nested, we can again use the `anova` function, passing the smaller model, then the larger model. Here our larger model is the one which contained `service_unit` and `age.cat`, and the smaller only contains `age.cat`, so they are nested. We are then testing if the log odds ratios for the two coefficients associated with `service_unit` are zero. Let's call these coefficients $\beta_{MICU}$ and $\beta_{SICU}$. To test if $\beta_{MICU}$ and $\beta_{SICU} = 0$, we can use the `anova` function, where this time we will specify the type of test, in this case set the `test` parameter to `"Chisq"`.
\singlespacing
```
anova(age.glm,ageunit.glm,test="Chisq")
```
\doublespacing
Here the output of the `anova` function when applied to `glm` objects looks similar to the output generated when used on `lm` objects. A couple good practices to get in a habit are to first make sure the two competing models are correctly specified. He we are are testing `~ age.cat` versus `age.cat + service_unit`. Next, the difference between the residual degrees of freedom (`Resid. Df`) in the two models tell us how many more parameters the larger model has when compared to the smaller model. Here we see `1774 - 1772 = 2` which means that there are two more coefficients estimated in the larger model than the smaller one, which corresponds with the output from the `summary` table above. Next looking at the p-value (`Pr(>Chi)`), we see a test for $\beta_{MICU}$ and $\beta_{SICU} = 0$ has a p-value of around 0.08. At the typical 0.05 significance level, we would not reject the null, and use the simpler model without the service unit. In logistic regression, this is a common way of testing whether a categorical covariate should be retained in the model, as it can be difficult to assess using the `z value` in the `summary` table, particularly when one is very statistically significant, and one is not.
# Confidence Intervals
Generating confidence intervals for either the log-odds ratios or the odds ratios are relatively straightforward. To get the log-odds ratios and respective confidence intervals for the `ageunit.glm` model which includes both age and service unit.
\singlespacing
```
ageunit.glm$coef
confint(ageunit.glm)
```
\doublespacing
Here the coefficient estimates and confidence intervals are presented in much the same way as for a linear regression. In logistic regression, it is often convenient to exponentiate these quantities to get it on a more interpretable scale.
\singlespacing
```
exp(ageunit.glm$coef[-1])
exp(confint(ageunit.glm)[-1,])
```
\doublespacing
Similar to linear regression, we will look at if the confidence intervals for the log odds ratios include zero. This is equivalent to seeing if the intervals for the odds ratios include 1. Since the odds ratios are more directly interpretable it is often more convenient to report them instead of the coefficients on the log odds ratio scale.
# Prediction
Once you have decided on your final model, you may want to generate predictions from your model. Such a task may occur when doing a propensity score analysis (Chapter 3.9) or creating tools for clinical decision support. In the logistic regression setting this involves attempting to estimating the probability of the outcome given the characteristics (covariates) of a patient. This quantity is often denoted $P(outcome | X)$. This is relatively easy to accomplish in `R` using the `predict` function. One must pass a dataset with all the variables contained in the model. Let's assume that we decided to include the `service_unit` in our final model, and want to generate predictions from this based on a new set of patients. Let's first create a new data frame called `newdat` using the `expand.grid` function which computes all combinations of the values of variables passed to it.
\singlespacing
```
newdat <- expand.grid(age.cat=c("<=55",">55"),service_unit=c("FICU","MICU","SICU"))
newdat$pred <- predict(ageunit.glm,newdata=newdat,type="response")
newdat
```
\doublespacing
We followed this by adding a `pred` column to our new data frame by using the `predict` function. The `predict` function for logistic regression works similar to when we used it for linear regression, but this time we also specify `type="response"` which ensures the quantities computed are what we need, $P(outcome | X)$. Outputting this new object shows our predicted probability of 28 day mortality for six hypothetical patients. Two in each of the service units, where one is in the younger group and another in the older group. We see that our lowest prediction is for the youngest patients in the FICU, while the patients with highest risk of 28 day mortality are the older group in the MICU, but the predicted probability is not all that much higher than the same age patients in the SICU.
To do predictions on a different dataset, just replace the `newdata` argument with the other dataset. We could, for instance, pass `newdata=dat` and receive predictions for the dataset we built the model on. As was the case with linear regression, evaluating the predictive performance of our model on data used to build the model will generally be too optimistic as to how well it would perform *in the real world*. How to get a better sense of the accuracy of such models is covered in Chapter 3.2.
# Presenting and Interpreting Logistic Regression Analysis
In general, presenting the results from a logistic regression model will follow quite closely to what was done in the linear regression setting. Results should always be put in context, including what variables were considered and which variables were in the final model. Reporting the results should always include some form of the coefficient estimate, a measure of uncertainty and likely a p-value. In medical and epidemiological journals, coefficients are usually exponentiated so that they are no longer on the log scale, and reported as odds ratios. Frequently, multivariable analyses (analysis with more than one covariate) is distinguished from univariate analyses (one covariate) by denoting the estimated odds ratios as adjusted odds ratios (AOR).
For the `age.glm` model, an example of what could be reported is:
> Mortality at 28 days was much higher in the older ($>55$ years) group than the younger group ($\le55$ years), with rates of 28.5% and 4.3%, respectively (OR=8.79, 95% CI: 6.27-12.64, p<0.001).
For when treating age as a continuous covariate in the `agects.glm` model we could report:
> Mortality at 28 days was associated with older age (OR=1.07 per year increase, 95% CI: 1.06-1.08, p<0.001).
And for the case with more than one covariate, (`ageunit.glm`) an example of what could be reported:
> Older age ($>55$ vs $\le55$ years) was independently associated with 28 day mortality (AOR=8.68, 95% CI: 6.18-12.49, p<0.001) after adjusting for service unit.
# Caveats and Conclusions
As was the case with linear regression, logistic regression is an extremely powerful tool for data analysis of health data. Although the study outcomes in each approach are different, the framework and way of thinking of the problem have similarities. Likewise, many of the problems encountered in linear regression are also of concern in logistic regression. Outliers, missing data, colinearity and dependent/correlated outcomes are all problems for logistic regression as well, and can be dealt with in a similar fashion. Modelling assumptions are as well, and we briefly touched on this when discussing whether it was appropriate to use age as a continuous covariate in our models. Although continuous covariates are frequently modeled in this way, it is important to ensure if the relationship between the log odds of the outcome is indeed linear with the covariate. In cases where the data has been divided into too many subgroups (or the study may be simply too small), you may encounter a level of a discrete variable where none (or very few) of one of the outcomes occurred. For example, if we had an additional `service_unit` with 50 patients, all of whom lived. In such a case, the estimated odds ratios and subsequent confidence intervals or hypothesis testing may not be appropriate to use. In such a case, collapsing the discrete covariate into fewer categories will often help return the analysis into a manageable form. For our hypothetical new service unit, creating a new group of it and FICU would be a possible solution. Sometimes a covariate is so strongly related to the outcome, and this is no longer possible, and the only solution may be to report this finding, and remove these patients.
Overall, logistic regression is a very valuable tool in modelling binary and categorical data. Although we did not cover this latter case, a similar framework is available for discrete data which is ordered or has more than one category (see `?multinom` in the `nnet` package in `R` for details about multinomial logistic regression). This and other topics such as assessing model fit, and using logistic regression in more complicated study designs are discussed in [@hosmer2004applied].
<!-- [^multinomial]: This is in general called multinomial logistic regression, which can be modeled using the `multinom` function in the `nnet` package. -->
# References
| github_jupyter |
<a href="https://cognitiveclass.ai"><img src = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/Logos/organization_logo/organization_logo.png" width = 400> </a>
<h1 align = "center"> Spark Fundamentals I - Introduction to Spark</h1>
<h2 align = "center"> Getting Started</h2>
<br align = "left">
**Related free online courses:**
Related courses can be found in the following learning paths:
- [Spark Fundamentals path](http://cocl.us/Spark_Fundamentals_Path)
- [Big Data Fundamentals path](http://cocl.us/Big_Data_Fundamentals_Path)
<img src="http://spark.apache.org/images/spark-logo.png" height=100>
## Spark is built around speed and the ease of use. In these labs you will see for yourself how easy it is to get started using Spark.
Spark’s primary abstraction is a distributed collection of items called a Resilient Distributed Dataset or RDD. In a subsequent lab exercise, you will learn more about the details of RDD. RDDs have actions, which return values, and transformations, which return pointers to new RDD.
This set of labs uses Skills Network (SN) Labs to provide an interactive environment to develop applications and analyze data. It is available in either Scala or Python shells. Scala runs on the Java VM and is thus a good way to use existing Java libraries. In this lab exercise, we will set up our environment in preparation for the later labs.
After completing this set of hands-on labs, you should be able to:
1. Perform basic RDD actions and transformations
2. Use caching to speed up repeated operations
### Using this notebook
This is an interactive environment where you can show your code through cells, and documentation through markdown.
Look at the top right corner. Do you see "Python 3"? This indicates that you are running Python in this notebook.
**To run a cell:** Shift + Enter
### Try creating a new cell below.
**To create a new cell:** In the menu, go to _"Insert" > "Insert Cell Below"_. Or, click outside of a cell, and press "a" (insert cell above) or "b" (insert cell below).
# Lab Setup
Run the following cells to get the lab data.
```
# download the data from the IBM server
# this may take ~30 seconds depending on your internet speed
!wget --quiet https://cocl.us/BD0211EN_Data
print("Data Downloaded!")
```
Let's unzip the data that we just downloaded into a directory dedicated for this course. Let's choose the directory **/resources/jupyter/labs/BD0211EN/**.
```
# this may take ~30 seconds depending on your internet speed
!unzip -q -o -d /resources/jupyterlab/labs/BD0211EN/ BD0211EN_Data
print("Data Extracted!")
```
The data is in a folder called **LabData**. Let's list all the files in the data that we just downloaded and extracted.
```
# list the extracted files
!ls -1 /resources/jupyterlab/labs/BD0211EN/LabData
```
Should have:
* followers.txt
* notebook.log
* nyctaxi100.csv
* nyctaxi.csv
* nyctaxisub.csv
* nycweather.csv
* pom.xml
* README.md
* taxistreams.py
* users.txt
### Starting with Spark
Let's first import the tools that we need to use Spark in this SN Labs.
```
!pip install findspark
!pip install pyspark
import findspark
import pyspark
findspark.init()
sc = pyspark.SparkContext.getOrCreate()
```
The notebooks provide code assist. For example, type in "sc." followed by the Tab key to get the list of options associated with the spark context:
```
sc.
```
To run a command as code, simple select the cell you want to run and either:
* Click the play button in the toolbar above
* Press "_Shift+Enter_"
Let's run a basic command and check the version of Spark running:
```
sc.version
```
Add in the path to the *README.md* file in **LabData**.
```
readme = sc.textFile("/resources/jupyterlab/labs/BD0211EN/LabData/README.md")
```
Let’s perform some RDD actions on this text file. Count the number of items in the RDD using this command:
```
readme.count()
```
You should see that this RDD action returned a value of 103.
Let’s run another action. Run this command to find the first item in the RDD:
```
readme.first()
```
Now let’s try a transformation. Use the filter transformation to return a new RDD with a subset of the items in the file. Type in this command:
```
linesWithSpark = readme.filter(lambda line: "Spark" in line)
```
You can even chain together transformations and actions. To find out how many lines contains the word “Spark”, type in:
```
linesWithSpark = readme.filter(lambda line: "Spark" in line)
readme.filter(lambda line: "Spark" in line).count()
```
# More on RDD Operations
This section builds upon the previous section. In this section, you will see that RDD can be used for more complex computations. You will find the line from that "README.md" file with the most words in it.
Run the following cell.
```
readme.map(lambda line: len(line.split())).reduce(lambda a, b: a if (a > b) else b)
```
There are two parts to this. The first maps a line to an integer value, the number of words in that line. In the second part reduce is called to find the line with the most words in it. The arguments to map and reduce are Python anonymous functions (lambdas), but you can use any top level Python functions. In the next step, you’ll define a max function to illustrate this feature.
Define the max function. You will need to type this in:
```
def max(a, b):
if a > b:
return a
else:
return b
```
Now run the following with the max function:
```
readme.map(lambda line: len(line.split())).reduce(max)
```
Spark has a MapReduce data flow pattern. We can use this to do a word count on the readme file.
```
wordCounts = readme.flatMap(lambda line: line.split()).map(lambda word: (word, 1)).reduceByKey(lambda a, b: a+b)
```
Here we combined the flatMap, map, and the reduceByKey functions to do a word count of each word in the readme file.
To collect the word counts, use the _collect_ action.
#### It should be noted that the collect function brings all of the data into the driver node. For a small dataset, this is acceptable but, for a large dataset this can cause an Out Of Memory error. It is recommended to use collect() for testing only. The safer approach is to use the take() function e.g. print take(n)
```
wordCounts.collect()
```
### <span style="color: red">YOUR TURN:</span>
#### In the cell below, determine what is the most frequent word in the README, and how many times was it used?
```
# WRITE YOUR CODE BELOW
```
Double-click __here__ for the solution.
<!-- The correct answer is:
wordCounts.reduce(lambda a, b: a if (a[1] > b[1]) else b)
-->
## Using Spark caching
In this short section, you’ll see how Spark caching can be used to pull data sets into a cluster-wide in-memory cache. This is very useful for accessing repeated data, such as querying a small “hot” dataset or when running an iterative algorithm. Both Python and Scala use the same commands.
As a simple example, let’s mark our linesWithSpark dataset to be cached and then invoke the first count operation to tell Spark to cache it. Remember that transformation operations such as cache does not get processed until some action like count() is called. Once you run the second count() operation, you should notice a small increase in speed.
```
print(linesWithSpark.count())
from timeit import Timer
def count():
return linesWithSpark.count()
t = Timer(lambda: count())
print(t.timeit(number=50))
linesWithSpark.cache()
print(t.timeit(number=50))
```
It may seem silly to cache such a small file, but for larger data sets across tens or hundreds of nodes, this would still work. The second linesWithSpark.count() action runs against the cache and would perform significantly better for large datasets.
<div class="alert alert-success alertsuccess" style="margin-top: 20px">
<strong>Tip</strong>: Enjoyed using Jupyter notebooks with Spark? Get yourself a free
<a href="http://cocl.us/DSX_on_Cloud">IBM Cloud</a> account where you can use Data Science Experience notebooks
and have <em>two</em> Spark executors for free!
</div>
### Summary
Having completed this exercise, you should now be able to log in to your environment and use the Spark shell to run simple actions and transformations for Scala and/or Python. You understand that Spark caching can be used to cache large datasets and subsequent operations on it will utilize the data in the cache rather than re-fetching it from HDFS.
This notebook is part of the free course on **cognitiveclass.ai** called *Spark Fundamentals I*. If you accessed this notebook outside the course, you can take this free self-paced course online by going to: http://cocl.us/Spark_Fundamentals_I
### About the Authors:
Hi! It's Alex Aklson, one of the authors of this notebook. I hope you found this lab educational! There is much more to learn about Spark but you are well on your way. Feel free to connect with me if you have any questions.
<hr>
| github_jupyter |
# One variable plotting using python's [matplotlib](https://matplotlib.org/)
<p> referenced from Josh Peters implementation using R<p>
## [1] Import all necessary files
* use ipython magic `%matplotlib inline` to show figures beneath each cell
* `SHOW = True` will show details on the dataframes and figures below. Turn to False to hide these outputs
* `%qtconsole` is a nice GUI which lets you dabel in python code without a jupyter notebook
```
import pandas as pd
import numpy as np
import matplotlib as mpl
from matplotlib import rcParams
from matplotlib import pylab as plt
from matplotlib import colors
%matplotlib inline
import os
SHOW = True
# open qtconsole for debugging
OPEN_QT = True
if OPEN_QT:
%qtconsole
OPEN_QT = False
```
## [2] Read and peruse the data as a dataframe
```
# read in data
filename = "nature25479_f2_formatted.csv"
data_df = pd.read_csv(filename, sep='\t')
# get column names, because there are weird escape characterts in the csv
Species, Individual, Peak_Power = data_df.columns
# show data
if SHOW:
print("\n---dataframe column names---")
print(Species + ", " + Individual + ", " + Peak_Power)
print("\n---head of dataframe---")
print(data_df.head())
print("\n---info of dataframe---")
print(data_df.info())
# copy df
data_original = data_df.copy()
# drop the individual's column, not used
data_df = data_original.drop(Individual, axis=1)
# group by species on peak power column
data_group = data_df.groupby(by=Species)
# apply method on group, such as average or std
data_count = data_group.count()
data_mean = data_group.mean()
data_std = data_group.std()
data_sem = data_std/data_count
# show data
if SHOW:
print("\n---number of data points per species---")
print(data_count)
print("\n---mean per species---")
print(data_mean)
print("\n---standard deviation per species---")
print(data_std)
print("\n---standard error from the mean per species---")
print(data_sem)
```
## [3] Set up the figure parameters using [default variables](https://matplotlib.org/users/dflt_style_changes.html)
defaults are obtained using matplotlib's rcParams dictionary
```
# figure size
rcParams['figure.figsize'] = (2,4) # figsize height x width
# text sizes (follow publication guidelines)
SMALL_SIZE = 6
MEDIUM_SIZE = 8
BIGGER_SIZE = 10
rcParams['font.size'] = SMALL_SIZE # controls default text sizes
rcParams['axes.titlesize'] = SMALL_SIZE # fontsize of the axes title
rcParams['axes.labelsize'] = MEDIUM_SIZE # fontsize of the x and y labels
rcParams['xtick.labelsize'] = SMALL_SIZE # fontsize of the tick labels
rcParams['ytick.labelsize'] = SMALL_SIZE # fontsize of the tick labels
rcParams['legend.fontsize'] = SMALL_SIZE # legend fontsize
rcParams['figure.titlesize'] = BIGGER_SIZE # fontsize of the figure title
# convert data to array (rather than dataframe) and input to violin plot
# this may not be the most optimal way, please add in your suggestions
unique_animals = data_df[Species].unique()
data_list = []
for animal in unique_animals:
# grab data associated per species (aka animal), and only the peak power column (aka data)
animal_data = data_df[data_df[Species] == animal][Peak_Power]
animal_data = animal_data.values
data_list.append(animal_data)
```
## [4] Iteratively improve on a violin plot
```
# plot basic skeleton
fig, ax = plt.subplots(1,1);
ax.violinplot(data_list)
# make them horizontal
# note, that we want to rotate them from left to right
data_list = data_list[::-1]
ax.clear()
ax.violinplot(data_list, vert=False)
fig
# remove and add relevant peices
ax.clear()
parts = ax.violinplot(data_list,
showmeans=True,
showmedians=False,
showextrema=False,
vert=False)
if SHOW:
print("\n---parameters of the violin plot that you can change---")
print(parts.keys())
fig
# change colors of violins
# colors
cmap = np.array(["#B9DC3D", "#78CE5C", "#3B568A", "#45397F"])
cmap_rep = np.array(["#4473B0", "#C15436", "#6E348C", "#E4AC43"])
# opacity
alpha = 1
for color, patch in zip(cmap, parts['bodies']):
# annoying aspect of color changing, changing alpha changes both face and edge color
# convert color to rgb and manually insert alpha channel
# note that the data type we're dealing with is tuple (immutable)
rgb = colors.hex2color(color)
rgba = [rgb[0], rgb[1], rgb[2], alpha]
# convert edge and face color individually
patch.set_facecolor(rgba)
patch.set_edgecolor('k')
# patch.set_alpha(0.25)
# change line color
line_collection = parts['cmeans']
line_collection.set_color(cmap)
fig
# place points
# matplotlib's dogma is to not tinker with the data, that being said, there is no jitter command.
# so we will make one of our own
def jitter(N, index, amplitude=0.25, method='gaussian'):
"""
@N : number of data points to create new indexes for
@index : numerical value. Index to plot, or equivalently the mean of the gaussian distribution
@amplitude : noise power
@method : gaussian or random
returns: 1D array of list with gaussian noise
"""
new_index = index * np.ones(N)
if method == 'gaussian':
return new_index + (np.random.normal(size=N) * amplitude)
elif method == "random":
return new_index + (np.random.uniform(-1, 1, N) * amplitude)
else:
raise Exception("invalid method. Please choose between gaussian or random")
# add in the data using scatter plot
s=12
markers = ['o' ,'s', 'D', '^']
# iterate through each data and assign the appropriate attributes
for index, data in enumerate(data_list):
# add jitter to index
N = len(data)
amplitude=0.05
new_index = jitter(N, index+1, amplitude=amplitude) # index starts at 1 for violin plot
# plot scatter
# don't forget we switched the x and y cood with vert=False
ax.scatter(data, new_index,
c=cmap[index],
edgecolor=cmap[index],
marker=markers[index],
linewidths=0.5,
s=s,
alpha=0.5,
zorder=0)
fig
# annotations
# you'll realize that many things are hardcoded are manually inputed. All options are user-specific
# add y ticks
ax.set_yticks([1, 2, 3, 4])
ax.set_yticklabels(unique_animals[::-1])
# add x label
# adding some latex magic with r'$ $'
ax.set_xlabel(r'Peak Power (W kg$^{\mathregular{-1}}$)', weight='bold')
# add y label
ax.set_ylabel('Species', weight='bold', rotation=0, labelpad=30)
# remove spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# remove ticks
# tick_params is a powerful wrapper for controlling many aspects of ticks and tick labels
ax.tick_params(top='off', right='off')
# adjusting x limits
ax.set_xlim([0, 225])
ax.locator_params(axis='x', nbins=5)
# just because I don't like zeros, substitute the first 0'th index with empty string
xticks = ax.get_xticklabels()
xticks = [""] + xticks[1:]
ax.set_xticklabels(xticks)
fig
# save figure
SAVE = True
if SAVE:
figure_name = "figure"
fig.savefig("{}.eps".format(figure_name), transparent=True, format='eps')
fig.savefig("{}.png".format(figure_name), transparent=True, format='png')
fig.savefig("{}.pdf".format(figure_name), transparent=True, format='pdf')
fig.savefig("{}.svg".format(figure_name), transparent=True, format='svg')
```
| github_jupyter |
# Pre-process test data
This notebook takes you through the steps of how to preprocess a high S/N and low S/N test set
* required packages: numpy, h5py, vos
* required data files: apStar_combined_main.h5 and training_data.h5
```
import numpy as np
import h5py
import os
import vos
datadir=""
```
** If you have not downloaded apStar_combined_main.h5 uncomment the below code to copy the file **
Note: This file requires 10.3GB. It is necessary to download this file to run particular notebook, although this notebook can be skipped by downloading the files created here seperately. See $1\_Download\_Data.ipynb$ for instructions on how to do so.
```
'''
def starnet_download_file(filename):
vclient = vos.Client()
vclient.copy('vos:starnet/public/'+filename, datadir+filename)
print(filename+' downloaded')
starnet_download_file('apStar_combined_main.h5')
'''
filename = datadir + 'apStar_combined_main.h5'
F = h5py.File(filename,'r')
print('Dataset keys in file: \n')
print(list(F.keys()))
```
**Load the data into memory**
For the testing of StarNet, it is necessary to obtain the spectra, error spectra, combined S/N, and labels, but we need to make eliminations to the test set to obtain the labels of highest validity to compare with, so we will first include the APOGEE_IDs, the S/N of the combined spectra, $T_{\mathrm{eff}}$, $\log(g)$, [Fe/H], $V_{scatter}$, STARFLAGs, and ASPCAPFLAGs to make certain eliminations. Once the stars for the test sets have been collected we will then gather the spectra and error spectra and save the two test sets to an h5 file.
```
ap_id = F['IDs'][:,0]
combined_snr = F['stacked_snr'][:]
starflag = F['star_flag']
aspcapflag = F['aspcap_flag']
teff = F['TEFF'][:]
logg = F['LOGG'][:]
fe_h = F['FE_H'][:]
vscatter = F['VSCATTER']
print('Obtainined data for '+str(len(list(set(list(ap_id)))))+' stars.')
```
**Collect label normalization data**
Create a file that contains the mean and standard deviation for $T_{\mathrm{eff}}$, $\log(g)$, and $[Fe/H]$ in order to normalize labels during training and testing. Ignore values equal to -9999.
```
mean = np.array([np.mean(teff[teff!=-9999.]),np.mean(logg[logg!=-9999.]),np.mean(fe_h[fe_h!=-9999.])])
std = np.array([np.std(teff[teff!=-9999.]),np.std(logg[logg!=-9999.]),np.std(fe_h[fe_h!=-9999.])])
mean_and_std = np.row_stack((mean,std))
np.save(datadir+'mean_and_std', mean_and_std)
print('mean_and_std.npy saved')
```
**Separate out a dataset with good labels**.
- STARFLAGs = 0
- ASPCAPFLAGs = 0
- 4000K < $T_{\mathrm{eff}}$ < 5500K
- -3.0 < [Fe/H]
- $\log(g)$ $\neq$ -9999. (value defined by ASPCAP when no ASPCAP labels are given)
- $V_{scatter}$ < 1.0 km/s
```
teff_min = 4000.
teff_max = 5500.
vscatter_max = 1.
fe_h_min = -3.
indices, cols = np.where((aspcapflag[:]==0.)&(starflag[:]==0.)&(vscatter[:]<vscatter_max)&(fe_h[:]>fe_h_min)&(teff[:]>teff_min)&(teff[:]<teff_max)&(logg[:]!=-9999.).reshape(len(ap_id),1))
ap_id = ap_id[indices]
teff = teff[indices]
logg = logg[indices]
fe_h = fe_h[indices]
combined_snr = combined_snr[indices]
print(str(len(list(set(list(ap_id)))))+' stars remain.')
```
**Load test set APOGEE IDs**
Load previously created file that contains the training data. We do not want to include any of the APOGEE IDs used in the training set in our test set. This file was created in 2_Preprocessing_of_Training_Data.ipynb
```
savename = 'training_data.h5'
with h5py.File(datadir + savename, "r") as f:
train_ap_id = f['Ap_ID'][:]
```
**Separate data for High S/N test set**
```
indices_test = [i for i, item in enumerate(ap_id) if item not in train_ap_id]
test_ap_id = ap_id[indices_test]
test_teff = teff[indices_test]
test_logg = logg[indices_test]
test_fe_h = fe_h[indices_test]
test_combined_snr = combined_snr[indices_test]
indices_test_set = indices[indices_test] # These indices will be used to index through the spectra
print('Test set includes '+str(len(test_ap_id))+' combined spectra')
```
**Now collect spectra and error spectra. Then normalize each spectrum and save the data**
**Steps taken to normalize spectra:**
1. separate into three chips
2. divide by median value in each chip
3. recombine each spectrum into a vector of 7214 flux values
4. Error spectra must also be normalized with the same median values for use in the error propagation
```
# Define edges of detectors
blue_chip_begin = 322
blue_chip_end = 3242
green_chip_begin = 3648
green_chip_end = 6048
red_chip_begin = 6412
red_chip_end = 8306
savename = 'test_data.h5'
with h5py.File(datadir + savename, "w") as f:
# Create datasets for your test data file
spectra_ds = f.create_dataset('spectrum', (1,7214), maxshape=(None,7214), dtype="f", chunks=(1,7214))
error_spectra_ds = f.create_dataset('error_spectrum', (1,7214), maxshape=(None,7214), dtype="f", chunks=(1,7214))
teff_ds = f.create_dataset('TEFF', test_teff.shape, dtype="f")
logg_ds = f.create_dataset('LOGG', test_logg.shape, dtype="f")
fe_h_ds = f.create_dataset('FE_H', test_fe_h.shape, dtype="f")
combined_snr_ds = f.create_dataset('combined_snr', test_combined_snr.shape, dtype="f")
ap_id_ds = f.create_dataset('Ap_ID', test_ap_id.shape, dtype="S18")
# Save data to data file
teff_ds[:] = test_teff
logg_ds[:] = test_logg
fe_h_ds[:] = test_fe_h
combined_snr_ds[:] = test_combined_snr
ap_id_ds[:] = test_ap_id.tolist()
# Collect spectra
first_entry=True
for i in indices_test_set:
spectrum = F['spectrum'][i:i+1]
spectrum[np.isnan(spectrum)]=0.
err_spectrum = F['error_spectrum'][i:i+1]
# NORMALIZE SPECTRUM
# Separate spectra into chips
blue_sp = spectrum[0:1,blue_chip_begin:blue_chip_end]
green_sp = spectrum[0:1,green_chip_begin:green_chip_end]
red_sp = spectrum[0:1,red_chip_begin:red_chip_end]
blue_sp_med = np.median(blue_sp, axis=1)
green_sp_med = np.median(green_sp, axis=1)
red_sp_med = np.median(red_sp, axis=1)
# Normalize spectra by chips
blue_sp = (blue_sp.T/blue_sp_med).T
green_sp = (green_sp.T/green_sp_med).T
red_sp = (red_sp.T/red_sp_med).T
# Recombine spectra
spectrum = np.column_stack((blue_sp,green_sp,red_sp))
# Normalize error spectrum using the same method
# Separate error spectra into chips
blue_sp = err_spectrum[0:1,blue_chip_begin:blue_chip_end]
green_sp = err_spectrum[0:1,green_chip_begin:green_chip_end]
red_sp = err_spectrum[0:1,red_chip_begin:red_chip_end]
# Normalize error spectra by chips
blue_sp = (blue_sp.T/blue_sp_med).T
green_sp = (green_sp.T/green_sp_med).T
red_sp = (red_sp.T/red_sp_med).T
# Recombine error spectra
err_spectrum = np.column_stack((blue_sp,green_sp,red_sp))
if first_entry:
spectra_ds[0] = spectrum
error_spectra_ds[0] = err_spectrum
first_entry=False
else:
spectra_ds.resize(spectra_ds.shape[0]+1, axis=0)
error_spectra_ds.resize(error_spectra_ds.shape[0]+1, axis=0)
spectra_ds[-1] = spectrum
error_spectra_ds[-1] = err_spectrum
print(savename+' has been saved as the test set to be used in 5_Test_Model.ipynb')
```
| github_jupyter |
### Dataset Information
This dataset contains information on default payments, demographic factors, credit data, history of payment, and bill statements of credit card clients in Taiwan from April 2005 to September 2005.
ATTRIBUTES DESCRIPTION-
There are 25 variables:
* ID: ID of each client
* LIMIT_BAL: Amount of given credit in NT dollars (includes individual and family/supplementary credit
* SEX: Gender (1=male, 2=female)
* EDUCATION: (1=graduate school, 2=university, 3=high school, 4=others, 5=unknown, 6=unknown)
* MARRIAGE: Marital status (1=married, 2=single, 3=others)
* AGE:Age in years
* PAY_0: Repayment status in September, 2005 (-1=pay duly, 1=payment delay for one month, 2=payment delay for two months, ... 8=payment delay for eight months, 9=payment delay for nine months and above)
* PAY_2: Repayment status in August, 2005 (scale same as above)
* PAY_3: Repayment status in July, 2005 (scale same as above)
* PAY_4: Repayment status in June, 2005 (scale same as above)
* PAY_5: Repayment status in May, 2005 (scale same as above)
* PAY_6: Repayment status in April, 2005 (scale same as above)
* BILL_AMT1: Amount of bill statement in September, 2005 (NT dollar)
* BILL_AMT2: Amount of bill statement in August, 2005 (NT dollar)
* BILL_AMT3: Amount of bill statement in July, 2005 (NT dollar)
* BILL_AMT4: Amount of bill statement in June, 2005 (NT dollar)
* BILL_AMT5: Amount of bill statement in May, 2005 (NT dollar)
* BILL_AMT6: Amount of bill statement in April, 2005 (NT dollar)
* PAY_AMT1: Amount of previous payment in September, 2005 (NT dollar)
* PAY_AMT2: Amount of previous payment in August, 2005 (NT dollar)
* PAY_AMT3: Amount of previous payment in July, 2005 (NT dollar)
* PAY_AMT4: Amount of previous payment in June, 2005 (NT dollar)
* PAY_AMT5: Amount of previous payment in May, 2005 (NT dollar)
* PAY_AMT6: Amount of previous payment in April, 2005 (NT dollar)
* default.payment.next.month: Default payment (1=yes, 0=no)
```
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
import re
# Set working directory
os.chdir("/Users/Dippies/CODE PACKT - EML/Chapter 6")
```
### Read the data
```
df_creditcarddata = pd.read_csv("UCI_Credit_Card.csv")
df_creditcarddata.head() # see if it is loaded properly
df_creditcarddata.tail()
df_creditcarddata.shape #check dimensions of loaded data
df_creditcarddata.dtypes #check data types
df_creditcarddata.columns.values # check column names
df_creditcarddata.describe()
# average value for amount of credit card limit = 167484
# 75% of the clients are single
# 35yrs is the average age
df_creditcarddata = df_creditcarddata.drop("ID", axis= 1) #ID not needed
```
### EXPLORE DEFAULTING
```
sns.factorplot('default.payment.next.month', data=df_creditcarddata, \
kind='count', hue='default.payment.next.month', \
aspect=1.00)
```
### Checking data unbalance
```
print((df_creditcarddata["default.payment.next.month"].value_counts())*100/len(df_creditcarddata["default.payment.next.month"]))
#It says, there are only 22% of credit card contracts that'll default
#Since, data does not have a large unbalance thereby, will verify at the later stage
df_creditcarddata.apply(lambda x: len(x.unique()))
print((df_creditcarddata== 0).sum())
# attribute
# 'Education' - has 14 0's and 'marriage' status has 54 0's meaning, there are 14 'unknown' non defaulters by education and 54 of those
# who couldn't be 'identified/divorced'
selected_columns = df_creditcarddata[['AGE','BILL_AMT1','BILL_AMT2','BILL_AMT3','BILL_AMT4','BILL_AMT5','BILL_AMT6', 'LIMIT_BAL']]
# ; avoid having the matplotlib verbose informations
selected_columns.hist(figsize=(16, 20), bins=50, xlabelsize=8, ylabelsize=8);
#default vs gender
df_creditcarddata.groupby(['default.payment.next.month','SEX']).mean()
#1 - male
#0 - female
pd.crosstab(df_creditcarddata.SEX, df_creditcarddata["default.payment.next.month"]).plot(kind='bar', stacked=False)
plt.title('Payment Defaults by Sex')
plt.xlabel('Sex')
plt.ylabel('# Payment Defaults')
#default vs education
print(pd.crosstab(df_creditcarddata.EDUCATION, df_creditcarddata["default.payment.next.month"]))
pd.crosstab(df_creditcarddata.EDUCATION, df_creditcarddata["default.payment.next.month"]).plot(kind='bar', stacked=False)
# kind='line' can also be used
plt.title('Payment Defaults by Education Level')
plt.xlabel('Education Level')
plt.ylabel('# Payment Defaults')
# Graduate school and university students has a significant default count in comparison to other.
#default vs marriage
df_creditcarddata.groupby(['default.payment.next.month','MARRIAGE']).mean()
# 1 - married
# 2 - single
# 3 - others
print(pd.crosstab(df_creditcarddata.MARRIAGE, df_creditcarddata["default.payment.next.month"]))
pd.crosstab(df_creditcarddata.MARRIAGE, df_creditcarddata["default.payment.next.month"]).plot(kind='bar', stacked=False)
# kind='line' can also be used
plt.title('Defaults payments by marriage')
plt.xlabel('Marriage')
plt.ylabel('# Default payments')
df_creditcarddata['age_group'] = pd.cut(df_creditcarddata['AGE'], range(0, 100, 10), right=False)
df_creditcarddata.head()
#default vs age
pd.crosstab(df_creditcarddata.age_group, \
df_creditcarddata["default.payment.next.month"]).plot(kind='bar',stacked=False, grid=True)
plt.title('Count of Defaults by AGE')
plt.xlabel('AGE')
plt.ylabel('# of Default')
plt.legend(loc='upper left')
#default vs limit bal
fig_facetgrid = sns.FacetGrid(df_creditcarddata, hue='default.payment.next.month', aspect=4)
fig_facetgrid.map(sns.kdeplot, 'LIMIT_BAL', shade=True)
max_limit_bal = df_creditcarddata['LIMIT_BAL'].max()
fig_facetgrid.set(xlim=(0,max_limit_bal));
fig_facetgrid.set(ylim=(0.0,0.000007));
fig_facetgrid.set(title='Distribution of limit balance by default.payment')
fig_facetgrid.add_legend()
#largest group of credut limit is apparently for amount of 10,00,000.00
# high no of defaulters can be seen with credit limit < 200000
```
### Outlier detection wrt. Amount of Credit limit
```
fig, axes = plt.subplots(nrows=5,ncols=1)
fig.set_size_inches(10, 20)
sns.boxplot(data=df_creditcarddata,y="LIMIT_BAL",orient="v",ax=axes[0])
sns.boxplot(data=df_creditcarddata,y="LIMIT_BAL",x="SEX",orient="v",ax=axes[1])
sns.boxplot(data=df_creditcarddata,y="LIMIT_BAL",x="EDUCATION",orient="v",ax=axes[2])
sns.boxplot(data=df_creditcarddata,y="LIMIT_BAL",x="MARRIAGE",orient="v",ax=axes[3])
sns.boxplot(data=df_creditcarddata,y="LIMIT_BAL",x="default.payment.next.month",orient="v",ax=axes[4])
axes[0].set(ylabel='LIMIT_BAL',title="credit balance distribution")
axes[1].set(xlabel='SEX', ylabel='LIMIT_BAL',title="Credit/Sex Distribution")
axes[2].set(xlabel='EDUCATION', ylabel='LIMIT_BAL',title="Credit/EDUCATION Distribution")
axes[3].set(xlabel='MARRIAGE', ylabel='LIMIT_BAL',title="Box Plot On Credit/MARRIAGE Distribution")
axes[4].set(xlabel='default.payment.next.month', ylabel='LIMIT_BAL',title="Box Plot On Credit/default payment Distribution")
sns.boxplot(x='MARRIAGE',hue='SEX', y='AGE',data=df_creditcarddata,palette="Set3")
sns.boxplot(x='EDUCATION',hue='MARRIAGE', y='AGE',data=df_creditcarddata,palette="cubehelix")
```
From the above plots it is clear that -
* average value for amount of credit card limit within 0 - 200000
* either unknown or graduate school clients have considerably higher %age of credit card limit
* 75% of the clients are single (Check description) - aged b/w 20-30
* 35yrs is the average age for all the customers
* max no of customers, whose married status is not identfied have high school education
### History of -
1. Past payment delays
2. Bill statement - credit/debit getting accrued
3. Payments performed in the previous month
```
payment_delay_cols = ['PAY_0','PAY_2', 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6']
fig, ax = plt.subplots(1, 6, figsize=(16, 4), sharey=True)
for i,(column, ax) in enumerate(zip(payment_delay_cols, ax)):
sns.pointplot(x=column, y='default.payment.next.month', data= df_creditcarddata, ax=ax, color=sns.color_palette()[i])
plt.suptitle('Risk of default with payment delay across past periods');
#default.payment.next.month
```
Risk of default with repayment status seems to have an increasing trend across the months
```
pattern = re.compile("^PAY_[0-9]+$")
payment_delay = [ x for x in df_creditcarddata.columns if (pattern.match(x))]
df_creditcarddata[payment_delay].describe().round(2)
import re
pattern = re.compile("^BILL_AMT[0-9]+$")
bill_columns = [ x for x in df_creditcarddata.columns if (pattern.match(x))]
df_creditcarddata[bill_columns].describe().round(2)
pattern = re.compile("^PAY_AMT[0-9]+$")
pay_amount_columns = [ x for x in df_creditcarddata.columns if (pattern.match(x))]
df_creditcarddata[pay_amount_columns].describe().round(2)
print(df_creditcarddata[bill_columns].head())
print(df_creditcarddata[payment_delay].head())
print(df_creditcarddata[pay_amount_columns].head())
```
### CORRELATION WITH THE RESPONSE VARIABLE
```
plt.figure(figsize=(20,10))
sns.heatmap(df_creditcarddata.drop(columns = ['default.payment.next.month'], axis = 1).corr(), annot=True)
```
### Amt of Bill statement
here we can observe that the correlation b/w the Amount of bill statement is decreasing with distance between months. Lowest correlations are between Sept-April.
### Repayment Status
Here as well, it can be seen that correlation decreases b/w the months
### Previous Payments
Similarly, there is no correlation b/w amount of previous payments.
```
#sorting features with higher correlation
pd.options.display.max_rows = None
corr = df_creditcarddata.corr()
c2 = corr.abs().unstack()
c2.sort_values(ascending = False)
```
### Assigning the labels to make interpretation easier
```
GenderMap = {2:'female', 1:'male'}
MarriageMap = {1:'married', 2:'single', 3:'other', 0: 'other'}
EducationMap = {1:'graduate school', 2:'university', 3:'high school', 4:'others', 5:'unknown', 6:'unknown', 0:'unknown'}
df_creditcarddata['SEX'] = df_creditcarddata.SEX.map(GenderMap)
df_creditcarddata['MARRIAGE'] = df_creditcarddata.MARRIAGE.map(MarriageMap)
df_creditcarddata['EDUCATION'] = df_creditcarddata.EDUCATION.map(EducationMap)
df_creditcarddata['PAY_0'] = df_creditcarddata['PAY_0'].astype(str)
df_creditcarddata['PAY_2'] = df_creditcarddata['PAY_2'].astype(str)
df_creditcarddata['PAY_3'] = df_creditcarddata['PAY_3'].astype(str)
df_creditcarddata['PAY_4'] = df_creditcarddata['PAY_4'].astype(str)
df_creditcarddata['PAY_5'] = df_creditcarddata['PAY_5'].astype(str)
df_creditcarddata['PAY_6'] = df_creditcarddata['PAY_6'].astype(str)
df_creditcarddata.dtypes
df_creditcarddata.head()
df_creditcarddata = df_creditcarddata.drop(columns = ['age_group'])
df_creditcarddata.head()
```
### Saving it to another csv file for modelling
```
df_creditcarddata.to_csv('new_credit_data.csv', index = False)
```
### DATA PREPARATION
```
df_creditcarddata.head()
df_creditcarddata.columns
```
### Split the target variables
```
predictor= df_creditcarddata.iloc[:, df_creditcarddata.columns != 'default.payment.next.month']
target= df_creditcarddata.iloc[:, df_creditcarddata.columns == 'default.payment.next.month']
# save all categorical columns in list
categorical_columns = [col for col in predictor.columns.values if predictor[col].dtype == 'object']
# dataframe with categorical features
df_categorical = predictor[categorical_columns]
# dataframe with numerical features
df_numeric = predictor.drop(categorical_columns, axis=1)
# Using pandas.get_dummies function to Convert categorical variable into dummy/indicator variables
dummy_code_cat_vars = pd.get_dummies(df_categorical,drop_first=True)
dummy_code_cat_vars.columns
df_numeric.columns
# using concat function we merging two dataframe for furthere analysis
df_predictor = pd.concat([df_numeric, dummy_code_cat_vars], axis=1)
pd.set_option('display.max_column',None)
df_predictor.head()
df_predictor.shape
```
### Splitting the dataset into train & test subset
```
#Let us now split the dataset into train & test
from sklearn.model_selection import train_test_split
X_train,X_test, y_train, y_test = train_test_split(df_predictor, target, test_size = 0.30, random_state=0)
print("x_train ",X_train.shape)
print("x_test ",X_test.shape)
print("y_train ",y_train.shape)
print("y_test ",y_test.shape)
```
### Feature Scaling
```
# Standarize features
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = pd.DataFrame(scaler.fit_transform(X_train))
X_test_scaled = pd.DataFrame(scaler.transform(X_test))
X_train_scaled.columns = X_train.columns.values
X_test_scaled.columns = X_test.columns.values
X_train_scaled.index = X_train.index.values
X_test_scaled.index = X_test.index.values
X_train = X_train_scaled
X_test = X_test_scaled
```
### Model Building
```
from sklearn.ensemble import RandomForestClassifier
#creating an instance of classifier
classifier = RandomForestClassifier(random_state = 0, n_estimators = 100,\
criterion = 'entropy', max_leaf_nodes= 20,oob_score = True, n_jobs = -1 )
# fit the model
model_RF = classifier.fit(X_train, y_train)
#################################################
# njobs means - -1 means using all the processors
# nestimators being the no of trees in the forest
# criterion - gives you the quality of split
# other parameters can be used as per the requirement from -
# http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
# train accuracy
acc_random_forest = round(classifier.score(X_train, y_train) * 100, 2)
print(round(acc_random_forest,2,), "%")
```
### ROC Curve & AUC
```
from sklearn import metrics
y_pred_proba = model_RF.predict_proba(X_test)[::,1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
plt.plot(fpr,tpr,label="AUC="+str(auc))
plt.legend(loc=4)
plt.show()
#predict the model
y_pred_RF = model_RF.predict(X_test)
#Evaluate
evaluation_scores = pd.Series({'Model': " Random Forest Classifier ",
'ROC Score' : metrics.roc_auc_score(y_test, y_pred_RF),
'Precision Score': metrics.precision_score(y_test, y_pred_RF),
'Recall Score': metrics.recall_score(y_test, y_pred_RF),
'Accuracy Score': metrics.accuracy_score(y_test, y_pred_RF),
'Kappa Score':metrics.cohen_kappa_score(y_test, y_pred_RF)})
print(evaluation_scores)
#Accuracy of Test set
acc_random_forest = round(classifier.score(X_test, y_test) * 100, 2)
print(round(acc_random_forest,2,), "%")
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred_RF))
import matplotlib.pyplot as plt
import itertools
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
cnf_matrix = confusion_matrix(y_test,y_pred_RF)
plot_confusion_matrix(cnf_matrix,classes=[0,1])
```
### Feature Importance
```
importances = classifier.feature_importances_
std = np.std([tree.feature_importances_ for tree in classifier.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
top_k = 10
new_indices = indices[:10] # this will get you top 10 features
# Print the feature ranking
print("Feature ranking:")
for f in range(top_k):
print("%d. feature %d (%f)" % (f + 1, new_indices[f], importances[new_indices[f]]))
feature_importances = pd.Series(classifier.feature_importances_, index=X_train.columns)
feature_importances.nlargest(10).plot(kind='barh')t #top 10 features
```
| github_jupyter |
# Statistics: Skewness #6953
Skewness, in statistics, is the degree of asymmetry observed in a probability distribution. It is the distortion or asymmetry that deviates from the symmetrical bell curve. When the shape is moved right or left it is said to be skewed.
## Types of Skewness :
<b>Right skew or positive skewness</b></i> : When most of the outliers or noise lie on the right of the distribution, it is said to be right-skew or positively skewed. The value of the right skew is greater than zero. In right skew, mean > median > mode.
<b>Left skew or negative skewness</b></i> : When most of the outliers or noise lie on the left of the distribution, it is said to be left-skew or negatively skewed. The value of the right skew is greater than zero. In left skew or negative skew, mean < median < mode.
Besides positive and negative skew, distributions can also be said to have zero or undefined skew. A normal distribution (bell curve) exhibits <b>zero skewness</b>.
<img src = "https://www.conversion-uplift.co.uk/wp-content/uploads/2020/06/Skewness-photo.png"/>
## Advantages of Skewness:
1. Skewness is a measure that helps to understand the distribution of data points.
2. It helps to identify outliers and noise data points.
3. It also helps to roughly understand the frequencies of each data point.
4. It considers the extremes of the data set rather than focusing solely on the average.
5. understanding of the skewness of the dataset indicates whether deviations from the mean are going to be positive or negative.
## Disadvantages of Skewness :
1. Skewness is unpredictable.
2. The presence of outliers or noise also has an impact on accuracy and error.
## Measuring Skewness :
There are several ways to calculate the skewness of the data distribution. One of which is Pearson’s first & second coefficients.
The Formulae for Pearson's Skewness Are:
\begin{aligned} &\begin{gathered} Sk _1 = \frac {\bar{X} - Mo}{s} \\ \underline{\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\quad} \\ Sk _2 = \frac {3\bar{X} - Md}{s} \end{gathered}\\ &\textbf{where:}\\ &Sk_1=\text{Pearson's first coefficient of skewness and }Sk_2\\ &\qquad\ \ \ \text{ the second}\\ &s=\text{the standard deviation for the sample}\\ &\bar{X}=\text{is the mean value}\\ &Mo=\text{the modal (mode) value}\\ &Md=\text{is the median value} \end{aligned}
## Conclusion:
Skewness is simply how much data set is deviating from its normal distribution. In statistics, it plays an important role when distribution data is not normally distributed. The extreme data points into the data set can lead data distribution to skew towards left or towards right.
| github_jupyter |
## Face and Facial Keypoint detection
After you've trained a neural network to detect facial keypoints, you can then apply this network to *any* image that includes faces. The neural network expects a Tensor of a certain size as input and, so, to detect any face, you'll first have to do some pre-processing.
1. Detect all the faces in an image using a face detector (we'll be using a Haar Cascade detector in this notebook).
2. Pre-process those face images so that they are grayscale, and transformed to a Tensor of the input size that your net expects. This step will be similar to the `data_transform` you created and applied in Notebook 2, whose job was tp rescale, normalize, and turn any iimage into a Tensor to be accepted as input to your CNN.
3. Use your trained model to detect facial keypoints on the image.
---
In the next python cell we load in required libraries for this section of the project.
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
```
#### Select an image
Select an image to perform facial keypoint detection on; you can select any image of faces in the `images/` directory.
```
import cv2
# load in color image for face detection
image = cv2.imread('images/obamas.jpg')
# switch red and blue color channels
# --> by default OpenCV assumes BLUE comes first, not RED as in many images
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# plot the image
fig = plt.figure(figsize=(9,9))
plt.imshow(image)
```
## Detect all faces in an image
Next, you'll use one of OpenCV's pre-trained Haar Cascade classifiers, all of which can be found in the `detector_architectures/` directory, to find any faces in your selected image.
In the code below, we loop over each face in the original image and draw a red square on each face (in a copy of the original image, so as not to modify the original). You can even [add eye detections](https://docs.opencv.org/3.4.1/d7/d8b/tutorial_py_face_detection.html) as an *optional* exercise in using Haar detectors.
An example of face detection on a variety of images is shown below.
<img src='images/haar_cascade_ex.png' width=80% height=80%/>
```
# load in a haar cascade classifier for detecting frontal faces
face_cascade = cv2.CascadeClassifier('detector_architectures/haarcascade_frontalface_default.xml')
# run the detector
# the output here is an array of detections; the corners of each detection box
# if necessary, modify these parameters until you successfully identify every face in a given image
faces = face_cascade.detectMultiScale(image, 1.2, 2)
# make a copy of the original image to plot detections on
image_with_detections = image.copy()
# loop over the detected faces, mark the image where each face is found
for (x,y,w,h) in faces:
# draw a rectangle around each detected face
# you may also need to change the width of the rectangle drawn depending on image resolution
cv2.rectangle(image_with_detections,(x,y),(x+w,y+h),(255,0,0),3)
fig = plt.figure(figsize=(9,9))
plt.imshow(image_with_detections)
```
## Loading in a trained model
Once you have an image to work with (and, again, you can select any image of faces in the `images/` directory), the next step is to pre-process that image and feed it into your CNN facial keypoint detector.
First, load your best model by its filename.
```
import torch
from models import Net
net = Net()
## TODO: load the best saved model parameters (by your path name)
## You'll need to un-comment the line below and add the correct name for *your* saved model
net.load_state_dict(torch.load('saved_models/keypoints_model_ilias_2.pt'))
## print out your net and prepare it for testing (uncomment the line below)
net.eval()
```
## Keypoint detection
Now, we'll loop over each detected face in an image (again!) only this time, you'll transform those faces in Tensors that your CNN can accept as input images.
### TODO: Transform each detected face into an input Tensor
You'll need to perform the following steps for each detected face:
1. Convert the face from RGB to grayscale
2. Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]
3. Rescale the detected face to be the expected square size for your CNN (224x224, suggested)
4. Reshape the numpy image into a torch image.
**Hint**: The sizes of faces detected by a Haar detector and the faces your network has been trained on are of different sizes. If you find that your model is generating keypoints that are too small for a given face, try adding some padding to the detected `roi` before giving it as input to your model.
You may find it useful to consult to transformation code in `data_load.py` to help you perform these processing steps.
### TODO: Detect and display the predicted keypoints
After each face has been appropriately converted into an input Tensor for your network to see as input, you can apply your `net` to each face. The ouput should be the predicted the facial keypoints. These keypoints will need to be "un-normalized" for display, and you may find it helpful to write a helper function like `show_keypoints`. You should end up with an image like the following with facial keypoints that closely match the facial features on each individual face:
<img src='images/michelle_detected.png' width=30% height=30%/>
```
image_copy = np.copy(image)
# loop over the detected faces from your haar cascade
for (x,y,w,h) in faces:
# Select the region of interest that is the face in the image
roi = image_copy[y:y+h, x:x+w]
## TODO: Convert the face region from RGB to grayscale
roi = cv2.cvtColor(roi, cv2.COLOR_RGB2GRAY)
## TODO: Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]
roi = roi/255.0
## TODO: Rescale the detected face to be the expected square size for your CNN (224x224, suggested)
roi = cv2.resize(roi, (224, 224))
roi_copy = np.copy(roi)
## TODO: Reshape the numpy image shape (H x W x C) into a torch image shape (C x H x W)
roi = torch.Tensor(np.reshape(roi, (1, roi.shape[0], roi.shape[1])))
roi = torch.unsqueeze(roi, 0)
## TODO: Make facial keypoint predictions using your loaded, trained network
with torch.no_grad():
output_points = net(roi)
output_points = output_points.view(output_points.size()[0], 68, -1)[0].data.numpy()
output_points = output_points * 90.0 + 90.0
#output_points = output_points * (roi.shape[1]/224, roi.shape[0]/218)
## TODO: Display each detected face and the corresponding keypoints
plt.imshow(roi_copy, cmap='gray')
plt.scatter(output_points[:, 0], output_points[:, 1], s=20, marker='.', c='m')
plt.show()
```
| github_jupyter |
# Interaktives Übungsblatt
## Zusammenfassung der Mathematik in der Übung
### [Matrixnorms](https://de.wikipedia.org/wiki/Matrixnorm)
#### Spektrale Norm
Ist definiert als induzierte Vektornorm
$$\| A \| := \max_{x \neq 0}\frac{\| Ax \|_2}{\| x \|_2} = \max_{\| x \|_2 = 1} \| Ax \|_2.$$
Kann aber auch als maximaler Singulärwert
$$\| A \|_2 = \sqrt{\mu_\max}$$
dargestellt werden.
#### Hilbert-Schmidt Norm
Wir definieren die Hilbert-Schmidt Norm einer Matrx $A \in K^{n \times n}$ als
$$ |A| = \left( \frac{1}{n}\sum_{i = 0}^{n-1}\sum_{i = 0}^{n-1} |a_{i,j}|^2 \right)^{1/2}.$$
Es gilt
1. $|A| = \left( \frac{1}{n}\mbox{Spur}(A^*A) \right)^{1/2}$
1. $|A| = \left( \frac{1}{n}\sum_{k=0}^{n-1}\lambda_k\right)^{1/2}$, wobei $\lambda_k$ die Eigenwerte von $A^*A$ sind
1. $|A| \leq \|A\|$
### Rayleigh-quotient
Sei $H$ eine hermitsche Matrix, so definiert sich der zugehörige Rayleigh quotient als
$$R_H(x) = \frac{x^*Hx}{x^*\cdot x}.$$
Der Rayleigh-quotient ist vor allem nützlich fü̇r Eigenwertabschätzungen.
So lässt sich der größte und kleinste Eigenwert $\lambda_M,\lambda_m$ abschätzen durch
$$ \lambda_m = \min_x R_H(x) \; \mbox{und} \; \lambda_M = \max_x R_H(x).$$
### Funktion der Wiener Klasse
Sei $\{t_k\}_{-\infty}^{\infty}$ eine absolut summierbare Folge, d.h. $\sum_{k=-\infty}^{\infty}|t_k| < \infty$.
Sei desweiteren $f(\lambda) = \lim_{n \to \infty} \sum_{k=-n}^{n}t_k e^{-ik\lambda}$. So ist $f(\lambda)$ stetig und Riemann-Integrierbar.
### [Zyklische Matrizen](https://de.wikipedia.org/wiki/Zyklische_Matrix)
Alle zyklischen Matrizen $C \in K^{n \times n}$
$$C:=\begin{pmatrix}
c_0 &c_{1} &c_{2} &\ldots &c_{n-1}\\
c_{n-1} &c_0 &c_{1} &\ldots &c_{n-2}\\
c_{n-2} &c_{n-1} &c_0 &\ldots &c_{n-3}\\
&\ddots &\ddots &\ddots\\
c_{1} &c_{2} &c_{3} &\ldots &c_0\end{pmatrix}. $$
haben die gleichen Eigenvektoren
$$v^{(m)} = \frac{1}{\sqrt{n}} \begin{pmatrix}1 \\ e^{-2\pi i m/n} \\ \vdots \\ e^{-2\pi i m(n-1)/n} \end{pmatrix} .$$
und die gleiche Formel funktioniert für die Berechnung der Eigenwerte aller zyklischen Matrizen
$$ \lambda_m = \sum_{k=0}^{n-1}c_k e^{-2\pi i m k /n}. $$
Eine weitere Möglichkeit die Eigenwerte zu berechnen ist es die generierende Funktion
$$f(\lambda) = \sum_{k=-s}^{s} t_k \exp(-i\lambda)^k.$$
zu verwenden. Z.B. für die zyklischen Matrizen
$$C_{n}(f)=\begin{pmatrix}
t_0 & \ldots & t_{-s}& \mathbf{0}_m &t_{s} &\ldots & t_{1} \\
\vdots & & & \ddots& &\ddots & \vdots \\
t_{s} & & & & & & t_{s} \\
\mathbf{0}_m^T &\ddots & & t_0 & & \ddots& \mathbf{0}_m^T \\
t_{-s} & & & & & & t_{-s} \\
\vdots & \ddots & & \ddots& & & \vdots \\
t_{-1} & \ldots & t_{-s}& \mathbf{0}_m & t_s & \ldots & t_0
\end{pmatrix} \in K^{n \times n}, \mbox{mit}\; n > 2s+1.$$
So sind die Eigenwerte von $C_n(f)$ $\psi_{n,j} = f(\frac{2 \pi j}{n})$.
Was einem sofort auch Abschätzungen für die Eigenwerte gibt und falls $f$ reelwertig und $F$ zusätzlich stetig ist gilt
$$ \lim_{n \to \infty} \frac{1}{n} \sum_{j=0}^{n-1} F(\psi_{n,j}) = \frac{1}{2\pi}\int_{0}^{2\pi} F(f(\lambda)) \mathrm{d}\lambda .$$
### [Toeplitz Matrizen](https://de.wikipedia.org/wiki/Toeplitz-Matrix)
Zu jeder Toeplitz Matrix
$$T_{n+1}= \begin{pmatrix}
t_0 & \ldots & t_{-n} \\
\vdots & \ddots & \vdots \\
t_{n} & \ldots & t_{0}
\end{pmatrix} $$
ergibt sich eine generierende Funktion
$$f(\lambda) = \sum_{k=-n}^{n} t_k \exp(-i\lambda)^k.$$
Für diese gilt dann
$$ T_n(f) = \{\frac{1}{2 \pi} \int_0^{2\pi} f(\lambda) e^{-i(k-j)\lambda} \mathrm{d}\lambda ; k,j = 0,1,\ldots,n-1\}.$$
Dieses $f$ kann genutzt werden um die Norm und Eigenwerte von $T_n$ abzuschätzen.
$$ \| T_n(f) \|\leq 2 M_{|f|}, \quad \mbox{wobei}\quad M_{|f|} = \max_{\lambda \in K} |f(\lambda)|.$$
Diese Matrizen können künstlich vergrößert werden ohne grundlegende Eigenschaften zu ändern.
Es entstehen zusätzlich sogar asymptotische Eigenschaften. Sei z.B.
$$T_{n}(f)=\begin{pmatrix}
t_0 & \ldots & t_{-s}& & & & \\
\vdots & & & \ddots & & & \\
t_{s} & & & & & & \\
&\ddots & & t_0 & & \ddots& \\
& & & & & & t_{-s} \\
& & & \ddots & & & \vdots \\
& & & & t_s & \ldots & t_0
\end{pmatrix} \in K^{n \times n}, \mbox{mit}\; n > 2s+1.$$
so gilt
$$ \lim_{n \to \infty} \frac{1}{n} \sum_{j=0}^{n-1} F(\tau_{n,j}) = \frac{1}{2\pi}\int_{0}^{2\pi} F(f(\lambda)) \mathrm{d}\lambda.$$
### Asymptotisch äquivalente Folgen von Matrizen
Seien $\{A_n\}$ und $\{B_n\}$ Folgen von $n\times n$ Matrizen, welche
beschränkt bzgl. der starken Norm sind:
$$ \|A_n\|,\|B_n\| \leq M \le \infty, n=1,2,\ldots $$
und bzgl. der schwachen Norm konvergieren
$$\lim_{n \to \infty} |A_n -B_n| = 0.$$
Wir nennen diese Folgen *asymptotisch äquivalent* und notieren dies als $A_n \sim B_n$.
Für $\{A_n\}$ , $\{B_n\}$ und $\{C_n\}$,
welche jeweils die Eigenwerte $\{\alpha_{n,i}\}$,$\{\beta_{n,i}\}$ und $\{\zeta_{n,i}\}$ haben gelten
folgende Zusammenhänge.
1. Wenn $A_n \sim B_n$, dann $\lim_{n \to \infty} |A_n| = \lim_{n \to \infty} |B_n| $
1. Wenn $A_n \sim B_n$ und $B_n \sim C_n$, dann $A_n \sim C_n$
1. Wenn $A_nB_n \sim C_n$ und $\|A_n^{-1}\|\leq K \le \infty$, dann gilt $B_n \sim A_n^{-1}C_n$
1. Wenn $A_n \sim B_n$, dann $\exists -\infty \le m,M\le \infty$, s.d. $m\leq \alpha_{n,i}, \beta_{n,i}\leq M \; \forall n\geq 1 \mbox{und}\; k\geq 0$
1. Wenn $A_n \sim B_n$, dann gilt $ \lim_{n \to \infty} \frac{1}{n} \sum_{k=0}^{n-1} (\alpha_{n,k}^s - \beta_{n,k}^s) = 0$
## Theory in action
### Systemmatrizen
Nutzen Sie `pyMG` um die Systemmatrix für das Helmholtz-Problem in 1D aufzustellen für gegebene Parameter $ n$ und $\sigma$.
```
import sys
sys.path.append("/home/moser/MG_2016/pyMG/")
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import pymg
from project.helmholtz1d import Helmholtz1D
from project.helmholtz1d_periodic import Helmholtz1D_Periodic
from project.gauss_seidel import GaussSeidel
from project.weighted_jacobi import WeightedJacobi
from project.pfasst.plot_tools import eigvalue_plot_list, matrix_plot, matrix_row_plot
from project.pfasst.transfer_tools import to_dense
from project.pfasst.matrix_method_tools import matrix_power
def system_matrix_hh1d(n,sig):
hh1d = Helmholtz1D(n, sig)
return hh1d.A
def system_matrix_hh1d_periodic(n,sig):
hh1d = Helmholtz1D_Periodic(n, sig)
return hh1d.A
def spec_rad(A):
return np.max(np.abs(sp.linalg.eigvals(to_dense(A))))
```
Plotten Sie mithilfe von `matrix_plot` die Systemmatrizen für $\sigma = 0$ und $n=10$.
```
matrix_plot(to_dense(system_matrix_hh1d(10,0)))
matrix_plot(to_dense(system_matrix_hh1d_periodic(10,0)))
def plot_3_eigvalueplots(A_p,A_z,A_m):
eig_p.append(sp.linalg.eigvals(to_dense(A_p)))
eig_z.append(sp.linalg.eigvals(to_dense(A_z)))
eig_m.append(sp.linalg.eigvals(to_dense(A_m)))
real_part_p = np.real(eig_p[-1])
img_part_p = np.imag(eig_p[-1])
real_part_z = np.real(eig_z[-1])
img_part_z = np.imag(eig_z[-1])
real_part_m = np.real(eig_m[-1])
img_part_m = np.imag(eig_m[-1])
fig1, (ax1, ax2, ax3) = plt.subplots(ncols=3,figsize=(15,3))
ax1.plot(real_part_p,img_part_p,'ro')
ax1.set_xlabel("real part")
ax1.set_ylabel("img part")
ax1.set_title('eigenvalues')
ax2.plot(real_part_z,img_part_z,'bo')
ax2.set_xlabel("real part")
ax2.set_ylabel("img part")
ax2.set_title('eigenvalues')
ax3.plot(real_part_m,img_part_m,'go')
ax3.set_xlabel("real part")
ax3.set_ylabel("img part")
ax3.set_title('eigenvalues')
fig1.tight_layout()
plt.show()
def plot_2_eigvalueplots(A_p,A_z):
eig_p.append(sp.linalg.eigvals(to_dense(A_p)))
eig_z.append(sp.linalg.eigvals(to_dense(A_z)))
real_part_p = np.real(eig_p[-1])
img_part_p = np.imag(eig_p[-1])
real_part_z = np.real(eig_z[-1])
img_part_z = np.imag(eig_z[-1])
fig1, (ax1, ax2) = plt.subplots(ncols=2,figsize=(15,3))
ax1.plot(real_part_p,img_part_p,'ro')
ax1.set_xlabel("real part")
ax1.set_ylabel("img part")
ax1.set_title('eigenvalues')
ax2.plot(real_part_z,img_part_z,'bo')
ax2.set_xlabel("real part")
ax2.set_ylabel("img part")
ax2.set_title('eigenvalues')
fig1.tight_layout()
plt.show()
```
**Aufgabe:** Plotten Sie mithilfe von `plot_3_eigvalueplots` die Eigenwerte der Systemmatrix für $n \in [5,10,20]$ und $\sigma = 100$,$\sigma = -100$ und $\sigma = 0$.
```
eig_p=[]
eig_m=[]
eig_z=[]
for n in [5,10,20]:
A_p = system_matrix_hh1d(n,100.0)
A_z = system_matrix_hh1d(n,0.0)
A_m = system_matrix_hh1d(n,-100.0)
plot_3_eigvalueplots(A_p, A_z, A_m)
```
**Frage**: Wie unterscheiden sich die Spektren der verschiedenen Systemmatrizen?
### Iterationsmatrizen des Glätters
Weitaus spannender sind die Spektren der Iterationsmatrizen eines Glätters spannender.
```
def iteration_matrix_wjac(n, sigma, periodic=True):
if periodic:
A = system_matrix_hh1d_periodic(n,sigma)
else:
A = system_matrix_hh1d(n,sigma)
wjac = WeightedJacobi(A, 2.0/3.0)
P_inv = wjac.Pinv
return np.eye(n) - P_inv.dot(A)
matrix_plot(iteration_matrix_wjac(10,0))
n=10
for sigma in [100,0,-100]:
plot_2_eigvalueplots(iteration_matrix_wjac(n, sigma,periodic=True),iteration_matrix_wjac(n, sigma,periodic=False))
sigma_range = np.linspace(-100,100,100)
sr_wjac_periodic = map(lambda sig : spec_rad(iteration_matrix_wjac(n, sig,periodic=True)), sigma_range)
sr_wjac = map(lambda sig : spec_rad(iteration_matrix_wjac(n, sig,periodic=False)), sigma_range)
# Achsen festhalten
fig1, (ax1, ax2) = plt.subplots(ncols=2,figsize=(15,4))
ax1.plot(sigma_range, sr_wjac_periodic,'k-')
ax1.set_xlabel('$\sigma$')
ax1.set_ylabel("spectral radius")
ax1.set_title('periodic')
ax2.plot(sigma_range, sr_wjac,'k-')
ax2.set_xlabel('$\sigma$')
ax2.set_ylabel("spectral radius")
ax2.set_title('non-periodic')
fig1.tight_layout()
plt.show()
```
**Frage** : Wie verhalten sich die Spektren für das periodische Problem zu den Problemen mit Dirichletrandbedingungen?
Schreiben Sie eine Funktion um die Iterationsmatrix für Gauß-Seidel abhängig von $\sigma$ und $n$ zu berechnen und finden Sie heraus wie sich der Spektralradius für verschiedene $\sigma$ und den periodischen, sowie nicht periodischen Fall verhält.
```
def iteration_matrix_gs(n, sigma, periodic=True):
if periodic:
A = system_matrix_hh1d_periodic(n,sigma)
else:
A = system_matrix_hh1d(n,sigma)
gs = GaussSeidel(A)
P_inv = gs.Pinv
return np.eye(n) - P_inv.dot(A)
matrix_plot(iteration_matrix_gs(10,0,True))
n=10
for sigma in [100,0,-100]:
plot_2_eigvalueplots(iteration_matrix_gs(n, sigma,periodic=True),iteration_matrix_gs(n, sigma,periodic=False))
sr_gs_periodic = map(lambda sig : spec_rad(iteration_matrix_gs(n, sig,periodic=True)), sigma_range)
sr_gs = map(lambda sig : spec_rad(iteration_matrix_gs(n, sig,periodic=False)), sigma_range)
# Achsen festhalten
fig1, (ax1, ax2) = plt.subplots(ncols=2,figsize=(15,4))
ax1.plot(sigma_range, sr_gs_periodic,'k-')
ax1.set_xlabel('$\sigma$')
ax1.set_ylabel("spectral radius")
ax1.set_title('periodic')
ax2.plot(sigma_range, sr_gs,'k-')
ax2.set_xlabel('$\sigma$')
ax2.set_ylabel("spectral radius")
ax2.set_title('non-periodic')
fig1.tight_layout()
plt.show()
```
**Frage:** Was sieht man und was ist eigentlich mit der Differenz zwischen den beiden?
```
plt.semilogy(sigma_range, np.asarray(sr_gs_periodic)-np.asarray(sr_gs),'k-')
```
### Nur die hohen Frequenzen
```
def transformation_matrix_fourier_basis(N):
psi = np.zeros((N,N),dtype=np.complex128)
for i in range(N):
for j in range(N):
psi[i,j] = np.exp(2*np.pi*1.0j*j*i/N)
return psi/np.sqrt(N)
PSI_trafo = transformation_matrix_fourier_basis(n)
PSI_trafo_inv = sp.linalg.inv(PSI_trafo)
wjac_trafo = np.dot(PSI_trafo_inv, np.dot(iteration_matrix_wjac(n,0),PSI_trafo))
gs_trafo = np.dot(PSI_trafo_inv, np.dot(iteration_matrix_gs(n,0),PSI_trafo))
matrix_plot(np.real(wjac_trafo))
matrix_plot(np.real(gs_trafo))
```
**Frage:** Was ist hier passiert?
Die hohen Eigenwerte extrahiert man nun durch auspicken der richtigen Diagonalwerte nach der Transformation, falls die Matrix zyklisch ist. Wir giessen
das ganze in eine Funktion.
```
def plot_fourier_transformed(A):
A = to_dense(A)
n = A.shape[0]
PSI_trafo = transformation_matrix_fourier_basis(n)
PSI_trafo_inv = sp.linalg.inv(PSI_trafo)
A_traf = np.dot(PSI_trafo_inv, np.dot(A,PSI_trafo))
matrix_row_plot([A,np.abs(A_traf)])
def get_high_theta_eigvals(A, plot=False):
A = to_dense(A)
n = A.shape[0]
PSI_trafo = transformation_matrix_fourier_basis(n)
PSI_trafo_inv = sp.linalg.inv(PSI_trafo)
A_traf = np.dot(PSI_trafo_inv, np.dot(A,PSI_trafo))
if plot:
matrix_plot(np.abs(A_traf))
eigvals = np.asarray(map(lambda k : A_traf[k,k],range(n)))
return eigvals[np.floor(n/4):np.ceil(3.0*n/4)]
def get_low_theta_eigvals(A, plot=False):
A = to_dense(A)
n = A.shape[0]
PSI_trafo = transformation_matrix_fourier_basis(n)
PSI_trafo_inv = sp.linalg.inv(PSI_trafo)
A_traf = np.dot(PSI_trafo_inv, np.dot(A,PSI_trafo))
if plot:
matrix_plot(np.abs(A_traf))
eigvals = np.asarray(map(lambda k : A_traf[k,k],range(n)))
return np.hstack([eigvals[:np.floor(n/4)],eigvals[np.ceil(3.0*n/4):]])
high_eigvals_periodic = np.abs(get_high_theta_eigvals(iteration_matrix_gs(20,0,True),True))
high_eigvals_dirichlet = np.abs(get_high_theta_eigvals(iteration_matrix_gs(20,0,False),True))
print high_eigvals_periodic
high_eigvals_dirichlet - high_eigvals_periodic
```
**Aufgabe**: Schauen Sie sich die Differenz (periodisch und nicht-periodisch) der Eigenwerte der Gauss-Seidel Iterationsmatrix, die mit den hohen Frequenzen assoziiert werden können , für $n\in \{5,10,50\}$ und $\sigma \in \{-100,0,100\}$ an. Was fällt auf?
```
do_plot = False
for n in [5,10,50]:
for sig in [-100,0,100]:
h_eigs_periodic = np.abs(get_high_theta_eigvals(iteration_matrix_gs(n,sig,True),do_plot))
h_eigs_dirichlet = np.abs(get_high_theta_eigvals(iteration_matrix_gs(n,sig,False),do_plot))
print "Sigma :\t\t",sig
print "n:\t\t",n
# print "\n",h_eigs_periodic-h_eigs_dirichlet,"\n"
print "sum:\t\t", np.sum(np.abs(h_eigs_periodic-h_eigs_dirichlet))
print "max:\t\t", np.max(np.abs(h_eigs_periodic-h_eigs_dirichlet)),"\n"
```
### Zweigitter-Iterationsmatrix
```
from project.linear_transfer import LinearTransfer
from project.linear_transfer_periodic import LinearTransferPeriodic
```
Im Folgenden werden wir nun mithilfe des `pymg` frameworks die Zweigitter-Iterationsmatrix für ein einfaches Multigrid
aufstellen. Wir beginnen mit der Grobgitterkorrektur.
```
def coarse_grid_correction(n,nc, sigma):
A_fine = to_dense(system_matrix_hh1d(n,sigma))
A_coarse = to_dense(system_matrix_hh1d(nc,sigma))
A_coarse_inv = sp.linalg.inv(A_coarse)
lin_trans = LinearTransfer(n, nc)
prolong = to_dense(lin_trans.I_2htoh)
restrict = to_dense(lin_trans.I_hto2h)
return np.eye(n)- np.dot(prolong.dot(A_coarse_inv.dot(restrict)), A_fine)
```
**Buntebilderaufgabe:** Nutze `matrix_plot` um für $n=31$, $n_c=15$ und verschiedene $\sigma\in[-1000,1000]$ um die Grobgitterkorrekturiterationsmatrizen und deren Fourier-transformierten zu plotten.
```
plot_fourier_transformed(coarse_grid_correction(31,15,-1000))
plot_fourier_transformed(coarse_grid_correction(31,15,0))
plot_fourier_transformed(coarse_grid_correction(31,15,1000))
plot_3_eigvalueplots(coarse_grid_correction(31,15,-1000),coarse_grid_correction(31,15,0),coarse_grid_correction(31,15,100))
```
**Aufgabe:** Schreiben Sie die Grobgitterkorrektur für das periodische Problem und plotten Sie nochmal für verschiedene $\sigma$.
**Frage:** Was genau passiert bei $\sigma = 0$ und in der Nähe davon?
```
def coarse_grid_correction_periodic(n,nc, sigma):
A_fine = to_dense(system_matrix_hh1d_periodic(n,sigma))
A_coarse = to_dense(system_matrix_hh1d_periodic(nc,sigma))
A_coarse_inv = sp.linalg.inv(A_coarse)
lin_trans = LinearTransferPeriodic(n, nc)
prolong = to_dense(lin_trans.I_2htoh)
restrict = to_dense(lin_trans.I_hto2h)
return np.eye(n)- np.dot(prolong.dot(A_coarse_inv.dot(restrict)), A_fine)
matrix_plot(coarse_grid_correction_periodic(31,15,-1000))
matrix_plot(coarse_grid_correction_periodic(31,15,-0.00))
matrix_plot(coarse_grid_correction_periodic(31,15,1000))
plot_fourier_transformed(coarse_grid_correction_periodic(32,16,-1000))
plot_fourier_transformed(coarse_grid_correction_periodic(32,16,-0.00))
plot_fourier_transformed(coarse_grid_correction_periodic(32,16,1000))
prolong = to_dense(LinearTransferPeriodic(16, 8).I_2htoh)
restrict = to_dense(LinearTransferPeriodic(16, 8).I_hto2h)
matrix_plot(to_dense(LinearTransferPeriodic(16, 8).I_2htoh))
matrix_plot(to_dense(LinearTransferPeriodic(16, 8).I_hto2h))
matrix_plot(prolong.dot(restrict))
def mat_of_interest(n):
prolong = to_dense(LinearTransferPeriodic(n, n/2).I_2htoh)
restrict = to_dense(LinearTransferPeriodic(n, n/2).I_hto2h)
return prolong.dot(restrict)
prolong = to_dense(LinearTransferPeriodic(15, 7).I_2htoh)
restrict = to_dense(LinearTransferPeriodic(15, 7).I_hto2h)
matrix_plot(to_dense(LinearTransferPeriodic(15, 7).I_2htoh))
matrix_plot(to_dense(LinearTransferPeriodic(15, 7).I_hto2h))
matrix_plot(prolong.dot(restrict))
def two_grid_it_matrix(n,nc, sigma, nu1=3,nu2=3,typ='wjac'):
cg = coarse_grid_correction(n,nc,sigma)
if typ is 'wjac':
smoother = iteration_matrix_wjac(n,sigma, periodic=False)
if typ is 'gs':
smoother = iteration_matrix_gs(n,sigma, periodic=False)
pre_sm = matrix_power(smoother, nu1)
post_sm = matrix_power(smoother, nu2)
return pre_sm.dot(cg.dot(post_sm))
```
**Buntebilderaufgabe:**Nutze `matrix_plot` um für $n=31$, $n_c=15$ und verschiedene $\sigma\in[-1000,1000]$ um die Zweigittermatrizen und deren Fourier-transformierten zu plotten.
```
plot_fourier_transformed(two_grid_it_matrix(15,7,-100,typ='wjac'))
plot_fourier_transformed(two_grid_it_matrix(15,7,0,typ='wjac'))
plot_fourier_transformed(two_grid_it_matrix(15,7,100,typ='wjac'))
plot_fourier_transformed(two_grid_it_matrix(15,7,-100,typ='gs'))
plot_fourier_transformed(two_grid_it_matrix(15,7,0,typ='gs'))
plot_fourier_transformed(two_grid_it_matrix(15,7,100,typ='gs'))
eig_p=[]
eig_m=[]
eig_z=[]
for n,nc in zip([7,15,31],[3,7,15]):
A_p = two_grid_it_matrix(n,nc,100.0)
A_z = two_grid_it_matrix(n,nc,0.0)
A_m = two_grid_it_matrix(n,nc,-100.0)
plot_3_eigvalueplots(A_p, A_z, A_m)
sr_2grid_var_sigma = map(lambda sig : spec_rad(two_grid_it_matrix(15,7,sig)), sigma_range)
plt.semilogy(sigma_range, sr_2grid_var_sigma,'k-')
plt.title('$n_f = 15, n_c = 7$')
plt.xlabel('$\sigma$')
plt.ylabel("spectral radius")
nf_range = map(lambda k: 2**k-1,range(3,10))
nc_range = map(lambda k: 2**k-1,range(2,9))
sr_2grid_m1000 = map(lambda nf,nc : spec_rad(two_grid_it_matrix(nf,nc,-1000)), nf_range, nc_range)
sr_2grid_0 = map(lambda nf,nc : spec_rad(two_grid_it_matrix(nf,nc,0)), nf_range, nc_range)
sr_2grid_p1000 = map(lambda nf,nc : spec_rad(two_grid_it_matrix(nf,nc,1000)), nf_range, nc_range)
plt.semilogy(nf_range, sr_2grid_m1000,'k-',nf_range, sr_2grid_0,'k--',nf_range, sr_2grid_p1000,'k:')
plt.xlabel('$n_f$')
plt.ylabel("spectral radius")
plt.legend(("$\sigma = -1000$","$\sigma = 0$","$\sigma = 1000$"),'upper right',shadow = True)
```
**Aufgabe:** Schreiben Sie eine Funktion `two_grid_it_matrix_periodic` für den periodischen Fall und führen plotten Sie den Spektralradius über $\sigma$ und den Spektralradius über $n$ für 3 verschiedene $\sigma$.
```
def two_grid_it_matrix_periodic(n,nc, sigma, nu1=3,nu2=3,typ='wjac'):
cg = coarse_grid_correction_periodic(n,nc,sigma)
if typ is 'wjac':
smoother = iteration_matrix_wjac(n,sigma, periodic=True)
if typ is 'gs':
smoother = iteration_matrix_gs(n,sigma, periodic=True)
pre_sm = matrix_power(smoother, nu1)
post_sm = matrix_power(smoother, nu2)
return pre_sm.dot(cg.dot(post_sm))
plot_fourier_transformed(two_grid_it_matrix_periodic(16,8,-100,typ='wjac'))
plot_fourier_transformed(two_grid_it_matrix_periodic(16,8,0.01,typ='wjac'))
plot_fourier_transformed(two_grid_it_matrix_periodic(16,8,100,typ='wjac'))
plot_fourier_transformed(two_grid_it_matrix_periodic(16,8,-100,typ='gs'))
plot_fourier_transformed(two_grid_it_matrix_periodic(16,8,-0.01,typ='gs'))
plot_fourier_transformed(two_grid_it_matrix_periodic(16,8,100,typ='gs'))
sr_2grid_var_sigma_periodic = map(lambda sig : spec_rad(two_grid_it_matrix_periodic(16,8,sig)), sigma_range)
plt.plot(sigma_range, np.asarray(sr_2grid_var_sigma)-np.asarray(sr_2grid_var_sigma_periodic),'k-')
plt.title('Differenz periodisch und nicht periodisch')
plt.xlabel('$\sigma$')
plt.ylabel("spectral radius")
nf_range = map(lambda k: 2**k,range(3,10))
nc_range = map(lambda k: 2**k,range(2,9))
sr_2grid_m1000_p = map(lambda nf,nc : spec_rad(two_grid_it_matrix_periodic(nf,nc,-1000)), nf_range, nc_range)
sr_2grid_0_p = map(lambda nf,nc : spec_rad(two_grid_it_matrix_periodic(nf,nc,0.01)), nf_range, nc_range)
sr_2grid_p1000_p = map(lambda nf,nc : spec_rad(two_grid_it_matrix_periodic(nf,nc,1000)), nf_range, nc_range)
plt.semilogy(nf_range, sr_2grid_m1000_p,'k-',nf_range, sr_2grid_0_p,'k--',nf_range, sr_2grid_p1000_p,'k:')
plt.xlabel('$n_f$')
plt.ylabel("spectral radius")
plt.legend(("$\sigma = -1000$","$\sigma = 0$","$\sigma = 1000$"),'upper right',shadow = True)
```
**Aufgabe:** Plotten sie die Differenz zwischen periodischem und nicht periodischem Fall.
```
plt.semilogy(nf_range, np.abs(np.asarray(sr_2grid_m1000_p) - np.asarray(sr_2grid_m1000)),'k-',
nf_range, np.abs(np.asarray(sr_2grid_0_p) - np.asarray(sr_2grid_0)),'k--',
nf_range, np.abs(np.asarray(sr_2grid_p1000_p) - np.asarray(sr_2grid_p1000)),'k:')
plt.xlabel('$n_f$')
plt.ylabel("spectral radius")
plt.legend(("$\sigma = -1000$","$\sigma = 0$","$\sigma = 1000$"),'upper right',shadow = True)
```
### Asymptotische Äquivalenz zwischen periodisch und nicht-periodisch
Wir sehen, dass die Spektralradiien auf den ersten Blick gut übereinstimmen. Wir wollen nun empirisch ergründen ob die Matrizenklassen der periodischen und nicht periodischen Fällen möglicherweise zueinander asymptotisch äquivalent sind.
**Aufgabe:**
Schreiben sie eine Funktion `hs_norm`, welche die Hilbert-Schmidt Norm berechnet.
```
def hs_norm(A):
n = A.shape[0]
return sp.linalg.norm(A,'fro')/np.sqrt(n)
```
**Aufgabe:**
Überprüfen Sie empirisch ob die
1. _Systemmatrizenklassen_
1. _Glättungsiterationsmatrizenklassen_
1. _Grobgitterkorrekturmatrizenklassen_
1. _Zweigitteriterationsmatrizenklassen_
asymptotisch äquivalent sind für $\sigma = \{ -1000, 0.001, 1000 \}$.
__Systemmatrizen:__
```
n_range = np.arange(10,100)
hs_sysmat_m1000 = map(lambda n: hs_norm(to_dense(system_matrix_hh1d(n,-1000))-to_dense(system_matrix_hh1d_periodic(n,-1000))),n_range)
hs_sysmat_0 = map(lambda n: hs_norm(to_dense(system_matrix_hh1d(n,0.001))-to_dense(system_matrix_hh1d_periodic(n,0.001))),n_range)
hs_sysmat_p1000 = map(lambda n: hs_norm(to_dense(system_matrix_hh1d(n,1000))-to_dense(system_matrix_hh1d_periodic(n,1000))),n_range)
plt.plot(hs_sysmat_m1000)
plt.plot(hs_sysmat_0)
plt.plot(hs_sysmat_p1000)
```
__Glättung:__
_Jacobi_
```
n_range = 2**np.arange(1,11)
hs_wjac_m1000 = map(lambda n: hs_norm(to_dense(iteration_matrix_wjac(n,-1000))-to_dense(iteration_matrix_wjac(n,-1000,False))),n_range)
hs_wjac_0 = map(lambda n: hs_norm(to_dense(iteration_matrix_wjac(n,0))-to_dense(iteration_matrix_wjac(n,0,False))),n_range)
hs_wjac_p1000 = map(lambda n: hs_norm(to_dense(iteration_matrix_wjac(n,1000))-to_dense(iteration_matrix_wjac(n,1000,False))),n_range)
plt.plot(n_range, hs_wjac_m1000)
plt.plot(n_range, hs_wjac_0)
plt.plot(n_range, hs_wjac_p1000)
```
_Gauss-Seidel_
```
n_range = 2**np.arange(1,11)
hs_gs_m1000 = map(lambda n: hs_norm(to_dense(iteration_matrix_gs(n,-1000))-to_dense(iteration_matrix_gs(n,-1000,False))),n_range)
hs_gs_0 = map(lambda n: hs_norm(to_dense(iteration_matrix_gs(n,0))-to_dense(iteration_matrix_gs(n,0,False))),n_range)
hs_gs_p1000 = map(lambda n: hs_norm(to_dense(iteration_matrix_gs(n,1000))-to_dense(iteration_matrix_gs(n,1000,False))),n_range)
plt.plot(n_range, hs_gs_m1000)
plt.plot(n_range, hs_gs_0)
plt.plot(n_range, hs_gs_p1000)
```
__Grobgitterkorrektur__
Hier trifft man mal wieder auf das Problem, dass die Freiheitsgrade im periodischen und nicht periodischen Fall unterschiedlich verteilt sind. Für einen Vergleich wird die Matrix um eine Nullzeile und Nullspalte erweitert.
```
def einmal_einpacken(A):
return np.r_[[np.zeros(A.shape[0]+1)],np.c_[np.zeros(A.shape[0]),A]]
n_f_range = 2**np.arange(3,10)
n_c_range = 2**np.arange(2,9)
hs_cgc_m1000 = map(lambda nf,nc: hs_norm(einmal_einpacken(coarse_grid_correction(nf-1,nc-1,-1000))-coarse_grid_correction_periodic(nf,nc,-1000)),n_f_range ,n_c_range)
hs_cgc_0 = map(lambda nf,nc: hs_norm(einmal_einpacken(coarse_grid_correction(nf-1,nc-1,0))-coarse_grid_correction_periodic(nf,nc,0.001)),n_f_range ,n_c_range)
hs_cgc_p1000 = map(lambda nf,nc: hs_norm(einmal_einpacken(coarse_grid_correction(nf-1,nc-1,1000))-coarse_grid_correction_periodic(nf,nc,1000)),n_f_range ,n_c_range)
plt.semilogy(n_f_range, hs_cgc_m1000)
plt.semilogy(n_f_range, hs_cgc_0)
plt.semilogy(n_f_range, hs_cgc_p1000)
# plt.semilogy(n_f_range, 1/np.sqrt(n_f_range))
```
__Zweigitter__
```
n_f_range = 2**np.arange(3,12)
n_c_range = 2**np.arange(2,11)
hs_2grid_m1000 = map(lambda nf,nc: hs_norm(
einmal_einpacken(two_grid_it_matrix(nf-1,nc-1,-1000))-two_grid_it_matrix_periodic(nf,nc,-1000))
,n_f_range ,n_c_range)
hs_2grid_0 = map(lambda nf,nc: hs_norm(
einmal_einpacken(two_grid_it_matrix(nf-1,nc-1,0.001))-two_grid_it_matrix_periodic(nf,nc,0.001))
,n_f_range ,n_c_range)
hs_2grid_p1000 = map(lambda nf,nc: hs_norm(
einmal_einpacken(two_grid_it_matrix(nf-1,nc-1,1000))-two_grid_it_matrix_periodic(nf,nc,1000))
,n_f_range ,n_c_range)
plt.semilogy(n_f_range, hs_2grid_m1000)
plt.semilogy(n_f_range, hs_2grid_0)
plt.semilogy(n_f_range, hs_2grid_p1000)
plt.semilogy(n_f_range, 1/np.sqrt(n_f_range)*30)
```
| github_jupyter |
<h1> Using Functions </h1>
<h3>Introduction to Using Functions</h3><br>
  In Python, a function is a named block of code that performs a specific task.
* One example of this finding the square root of a number.Python has a function in its standard library named sqrt . We will discuss more about built in functions in upcoming lectures.
<b>What is the use of functions?</b><br>
  Let's understand it by a simple program :<br>
* Suppose we have a program to perform the four basic arithmetic operation as per users wish.If we write it without functions then the code becomes very difficult to debug and it reduces the readbility also.
* Now let us try to do the same problem with the help of function.
```
def operation(num1,num2,operator):
if operator == '+':
print(num1,operator,num2,"=",num1 + num2)
elif operator == '-':
print(num1,operator,num2,"=",num1 - num2)
elif operator == '*':
print(num1,operator,num2,"=",num1 * num2)
elif operator == '/':
print(num1,operator,num2,"=",num1 / num2)
else:
print("Invalid operator")
operation(3,5,'+')
operation(8,5,'*')
operation(7,3,'-')
```
* Whenever we need to do some basic arithmetic operations on two numbers we can call this functions.
<hr style="border:2px solid gray"> </hr>
<h3>How to write Functions</h3><br>
 We now know the importance of functions in a program.Now let's study how to write funtions.<br>
<b>In python funtion is written using the keyword `def` following the function name.</b>
* The general form is :
  def funtion_name(paramter1,paramter2,...) :
return block
* <b>funtion_name</b> : Every function has a name that identifies the code to be executed. Function names follow the
same rules as variable names; a function name is another example of an identifier.
* <b>Parameters</b>. A function can be called with a certain number of parameters, and each parameter must be the correct type. Some functions, like print and range, permit clients to pass a variable number of arguments, but most functions, like sqrt, specify an exact number. If a client attempts to
call a function with too many or too few parameters, the interpreter will issue an error message and refuse to run the program.
 In the above program the function name is <b>operation</b> and paramters are <b> num1 ,num2, operator</b>.<br>
<hr style="border:2px solid gray"> </hr>
<h3>How to call Functions</h3><br>
 We can call functions by their function name() and pass the arguments inside the parentheses(if any specifed in the function header).
* In the above program ,the function is called with the function name and passed the 3 paramters(because it accept 3 arguments).If we call the funtion as operation(4,5,'+') then,4 will be stored in num1 ,5 in num2 and '+' in operator.<b>This type of function call is called call by value</b>.
<hr style="border:2px solid gray"> </hr>
<h3>Return type</h3><br>
 Some function return some value to the client.To return some value to the client we will use <b>return</b> keyword.
```
def func1():
return "This function return a string"
val = func1()
print(val)
```
In this program the funtion is returning a string to the client and the client accept the return value and store it in the variable val.
* return type is used in functions.<br>
* It is usually placed at the end of the funtion.<br>
* It can return more than one value in a return statement.<br>
```
def func2():
return "This number is ",12
val,num = func2()
print(val,num)
```
<hr style="border:2px solid gray"> </hr>
<h3>Importing other modules</h3><br>
  We can import other modules in python program in following ways :<br>
* <b>from math import sqrt,pow :</b><br>
This type will import only the neccessary function to the program.Here it will import only sqrt and pow function from the module math.We can call the function using the function name sqrt() and passing a integer argument.
* <b> from math import * </b>:<br>
This will import all the functions to the program from math module.Such type of import is not preferred.We can call the function using the function name sqrt() and passing a integer argument.<br>
* <b>import math :</b><br>
This will not import any class or functions in the modules.We can call the function using math.sqrt(5)
<hr style="border:2px solid gray"> </hr>
<h3> Standard Mathematical Functions</h3><br>
<b>sqrt</b> => Computes the square root of a number: sqrt(x) =√x.<br>
<b>exp</b> => Computes e raised a power: exp(x) = $e^{x}$.<br>
<b>log</b> => Computes the natural logarithm of a number: log(x) = $log_{x}$ - lnx.<br>
<b>log10</b> => Computes the common logarithm of a number: log(x) = log10 x.<br>
<b>cos</b> => Computes the cosine of a value specified in radians: cos(x) = cos x.other trigonometric functions include sine, tangent, arc cosine, arc sine, arc tangent, hyperbolic cosine, hyperbolic sine, and hyperbolic tangent.<br>
<b>pow</b> => Raises one number to a power of another: pow(x,y) = ${x}^y$.<br>
<b>degrees</b> => Converts a value in radians to degrees: degrees(x) =(π /180) * x.<br>
<b>radians</b> => Converts a value in degrees to radians: radians(x) = (180 / π) * x.<br>
<b>fabs</b> => Computes the absolute value of a number: fabs(x) = |x|.<br>
```
import math
result = math.sqrt(144)
print("Square root of 12 is ",result)
result = math.pow(2,4)
print("2^4 is ",result)
radian = math.radians(30)
result = math.sin(radian)
print("sin 30 degree is ",result)
```
<hr style="border:2px solid gray"> </hr>
<h3> Time functions </h3><br>
The time package contains a number of functions that relate to time. We will consider two: process_time and
sleep.
In the earlier versions of python ,clock finction was there but in Python 3.8 it was removed after being deprecated in Python 3.3.We will use time.process_time() for the clock.
```
from time import process_time
sum = 0;
start_time = process_time()
for i in range(1,100000001):
sum += i
elapsed = process_time() - start_time
print("sum : ",sum," time : ",elapsed)
```
* sleep() function suspend execution for a particulat time
```
from time import sleep
for i in range(1,10):
print(i)
sleep(2) # suspend execution for 2 second
```
<hr style="border:2px solid gray"> </hr>
<h3> Random functions </h3><br>
There may be situation to generate some random values in a program.To acheive it we can use random module.
All algorithmic random number generators actually produce pseudorandom numbers, not true random numbers. A pseudorandom number generator has a particular period, based on the nature of the algorithm used.
<b><u>Random Module functions</u></b>
<b>random</b> => Returns a pseudorandom floating-point number x in the range 0 ≤ x < 1.<br>
<b>randrange</b> => Returns a pseudorandom integer value within a specified range.<br>
<b>seed</b> => Sets the random number seed.<br>
```
from random import randrange,seed
seed(26)
for i in range(0,50):
print(randrange(1,1000),end=" ") # Range 1...1000
print() # print new line
```
Since we seed a random number even if we re-run the above code the values will not change unless we change the seed value.
| github_jupyter |
# Random numbers and simulation
You will learn how to use a random number generator with a seed and produce simulation results (**numpy.random**, **scipy.stats**), and calcuate the expected value of a random variable through Monte Carlo integration. You will learn how to save your results for later use (**pickle**). Finally, you will learn how to make your figures interactive (**ipywidgets**).
**Links:**
* [numpy.random](https://docs.scipy.org/doc/numpy-1.13.0/reference/routines.random.html)
* [scipy.stats](https://docs.scipy.org/doc/scipy/reference/stats.html)
* [ipywidgets](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.html)
* datacamp on [pickle](https://www.datacamp.com/community/tutorials/pickle-python-tutorial)
**Imports:** We now import all the modules, we need for this notebook. Importing everything in the beginning makes it more clear what modules the notebook relies on.
```
import math
import pickle
import numpy as np
from scipy.stats import norm # normal distribution
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
import ipywidgets as widgets
```
# Exchange economy with many consumers
Consider an **exchange economy** with
1. 2 goods, $(x_1,x_2)$
2. $N$ consumers indexed by $j \in \{1,2,\dots,N\}$
3. Preferences are Cobb-Douglas with uniformly *heterogenous* coefficients
$$
\begin{aligned}
u^{j}(x_{1},x_{2}) & = x_{1}^{\alpha_{j}}x_{2}^{1-\alpha_{j}}\\
& \,\,\,\alpha_{j}\sim\mathcal{U}(\underline{\mu},\overline{\mu})\\
& \,\,\,0<\underline{\mu}<\overline{\mu}<1
\end{aligned}
$$
4. Endowments are *homogenous* and given by
$$
\boldsymbol{e}^{j}=(e_{1}^{j},e_{2}^{j})=(k,1),\,k>0
$$
where good 2 is *numeraire*.
The implied **demand functions** are:
$$
\begin{aligned}
x_{1}^{\star j}(p_{1},p_{2},e^{j})&=&\alpha_{j}\frac{I}{p_{1}}=\alpha_{j}\frac{kp_{1}+p_{2}}{p_{1}} \\
x_{2}^{\star j}(p_{1},p_{2},e^{j})&=&(1-\alpha_{j})\frac{I}{p_{2}}=(1-\alpha_{j})\frac{kp_{1}+p_{2}}{p_{2}}
\end{aligned}
$$
The **equilibrium** for a random draw of $\alpha = \{\alpha_1,\alpha_2,\dots,\alpha_N\}$ is a set of **prices** $p_1$ and $p_2$ satifying:
$$
\begin{aligned}
x_1(p_1,p_2) = \sum_{j=1}^N x_{1}^{\star j}(p_{1},p_{2},e^{j}) &= \sum_{j=1}^N e_1^j = Nk \\
x_2(p_1,p_2) = \sum_{j=1}^N x_{2}^{\star j}(p_{1},p_{2},e^{j}) &= \sum_{j=1}^N e_2^j = N
\end{aligned}
$$
**Problem:** Solve for this equilibrium.
But this requires a **distribution** of agents. Which means randomness.
We need a random number generator (RNG)!
# Random numbers
The two main approaches to generating random numbers are:
1. **Physical observations** of random processes (radioactive decay, atmospheric noise, roulette wheels, etc.)
2. **Algorithms** creating **pseudo-random numbers**. Numbers that are *determined* by the algo but **appear** random.
**Pseudo-random numbers** satisfy propoerties such that they are as good as random. It should be impossible (for all practical purposes) to calculate, or otherwise guess, from any given subsequence, any previous or future values in the sequence.
**More information:** See this [video](https://www.youtube.com/watch?v=C82JyCmtKWg&app=desktop#fauxfullscreen) by Infinite Series.
## Numpy
Numpy provides various functions for drawing random numbers. We can, for example, draw random integers between 0 and 10000:
```
X = np.random.randint(0,10000,size=5)
print(X)
```
**Problem:** How can we reproduce our results the next time we open Python?
**Solution:** Use a **seed**. This will fix the algorithm to give the same set numbers each time.
Choose the seed, and reset the random number generator:
```
print('set seed to 2000 and create numbers:')
np.random.seed(2000)
print(np.random.uniform(size=5))
print('\nreset algorithm by stating the same seed again:')
np.random.seed(2000)
print(np.random.uniform(size=5))
```
> **Note:** The first and third draws above are exactly the same.
We can also **save and load the state** of the random number generator.
```
# a. save state
state = np.random.get_state()
print('generate numbers from current state:')
print(np.random.uniform(size=5))
# b. draw some random number
print('\nchange state by generating some more numbers:')
print(np.random.uniform(size=5))
# c. reset state
np.random.set_state(state)
# d. draw the same random numbers again
print('\ngenerate numbers from past state by reloading state:')
print(np.random.uniform(size=5))
```
> **Important**: You should *only set the seed once* per program. Changing seed might brake randomness.
## Different distributions
Draw random numbers from various distributions: normal, beta, uniform.
```
X = np.random.normal(loc=0,scale=1,size=10**6)
Y = np.random.beta(a=5,b=2,size=10**6)
Z = np.random.uniform(low=-2,high=2,size=10**6)
```
Create a **user-defined** probability distribution with `np.random.choice`
```
# a. Support of distribution
vec = np.array([-2.5,-2.0,-1.5,-1.0,-0.5,0,0.5,1.0,1.5,2])
# b. Define probabilities
prob = np.exp(np.linspace(-1,1,vec.size))**1.1254 # all positive numbers
prob /= np.sum(prob) # make probabilities sum to one
# c. Get draws from distribution
K = np.random.choice(vec,size=10**6,p=prob)
```
Plot the various distributions:
```
fig = plt.figure(dpi=100)
ax = fig.add_subplot(1,1,1)
ax.hist(X,bins=100,density=True,alpha=0.5,label='normal') # alpha < 1 = transparent
ax.hist(Y,bins=100,density=True,alpha=0.5,label='beta')
ax.hist(Z,bins=100,density=True,alpha=0.5,label='uniform')
ax.hist(K,bins=100,density=True,alpha=0.5,label='choice')
ax.set_xlim([-3,3])
ax.legend(loc='upper left'); # note: the ; stops output from being printed
```
**Task:** Follow this [link](https://docs.scipy.org/doc/numpy-1.13.0/reference/routines.random.html). Choose a distribution and add it to the figure above.
## Analytical results
Sometimes, you want to compare a distribution of numbers to a **known** distribution.
For instance, how close are our draws to a normal distribution?
In this case, the `scipy.stats.norm` module comes in handy.
It operates differently from numpy. It creates an **object** to generate numbers from rather than just making an array at once.
```
from scipy.stats import norm
# a. create analytical distribution
loc_guess = 0.25
scale_guess = 0.75
# Alternative: estimate the guesses
# loc_guess, scale_guess = norm.fit(X)
# Object to generate random numbers based on parameters
F = norm(loc=loc_guess,scale=scale_guess)
rnd = F.rvs(5) # example: create 5 random draws from the distribution F
print(f'F pdf at 0.0: {F.pdf(0.0): 1.3f} \nF cdf at 0.0: {F.cdf(0.0): 1.3f}') # the object F has several useful functions available
# b. vector of x values
x_low = F.ppf(0.001) # x value where cdf is 0.001
x_high = F.ppf(0.999) # x value where cdf is 0.999
x_support = np.linspace(x_low,x_high,100)
x_guess = F.pdf(x_support) # call F to evaluate density at x_support
# c. compare X from before with new distribution
fig = plt.figure(dpi=100)
ax = fig.add_subplot(1,1,1)
ax.plot(x_support,x_guess,lw=2,label='estimated')
ax.hist(X,bins=100,density=True,histtype='stepfilled'); # plot X using a histogram
# Scipy is built on Numpy. So the seed set by Numpy is carried over to Scipy.
np.random.seed(1)
print(F.rvs(5))
np.random.seed(1)
print(F.rvs(5))
```
**Task:** Make the pdf fit the historgram closely.
# Demand
**Warm-up**: Choose parameters and define demand functions.
```
# a. parameters
N = 1000 # number of agents
k = 2 # relative endowment of good 1
mu_low = 0.1 # lower bound on alpha
mu_high = 0.9 # upper bound on alpha
# b. demand functions
def demand_good_1_func(alpha,p1,p2,k):
I = k*p1+p2
return alpha*I/p1
def demand_good_2_func(alpha,p1,p2,k):
I = k*p1+p2
return (1-alpha)*I/p2
```
**Quizz:** take a quick [quizz](https://forms.office.com/Pages/ResponsePage.aspx?id=kX-So6HNlkaviYyfHO_6kckJrnVYqJlJgGf8Jm3FvY9UMFpSRTIzUlJKMkdFQlpIN1VZUE9EVTBaMSQlQCN0PWcu) regarding the demand functions.
$$
x_1(p_1,p_2) = \sum_{j=1}^N x_{1}^{\star j}(p_{1},p_{2},e^{j}) = \alpha_{j}\frac{kp_{1}+p_{2}}{p_{1}}
$$
Find demand distribution and total demand:
```
def find_demand_good_1(alphas,p1,p2,k):
distr = demand_good_1_func(alphas,p1,p2,k) # Notice we are passing in arrays of alphas together with scalars! It works because of numpy broadcasting.
total = distr.sum()
return distr,total
```
Calculate for various prices:
```
# a. draw alphas
alphas = np.random.uniform(low=mu_low, high=mu_high, size=N)
# b. a set of hypothetical prices
p1_vec = [0.5,1,2,5]
p2 = 1
# c. calculate demand given
dists = np.empty((len(p1_vec),N))
totals = np.empty(len(p1_vec))
for i,p1 in enumerate(p1_vec):
dist,total = find_demand_good_1(alphas,p1,p2,k)
dists[i,:] = dist
totals[i] = total
```
Plot the resulting demand given prices
```
fig = plt.figure(figsize=(10,4))
ax_left = fig.add_subplot(1,2,1)
ax_left.set_title('Distributions of demand')
for i,p1 in enumerate(p1_vec):
ax_left.hist(dists[i],density=True,alpha=0.5,label=f'$p_1 = {p1}$')
ax_left.legend(loc='upper right')
ax_right = fig.add_subplot(1,2,2)
ax_right.set_title('Total demand given $p_1$')
ax_right.grid(True)
ax_right.plot(p1_vec,totals)
ax_right.set_xlabel('$p_1$');
```
# Interactive figures
Create a function constructing a figure:
```
# This function tells the widget how the plot should look at a specific parametrization
def interactive_figure(alphas,p1,p2,k):
# a. calculations
dist,_total = find_demand_good_1(alphas,p1,p2,k)
# b. figure
fig = plt.figure(dpi=100)
ax = fig.add_subplot(1,1,1)
ax.hist(dist,density=True)
ax.set_xlim([0,4]) # fixed x range
ax.set_ylim([0,0.8]) # fixed y range
```
**Case 1:** Make it interactive with a **slider**
```
# Write out which arguments to interactive_figure you want to be changing or staying fixed
widgets.interact(interactive_figure,
alphas=widgets.fixed(alphas),
p1=widgets.FloatSlider(description="$p_1$", min=0.1, max=5, step=0.05, value=2),
p2=widgets.fixed(p2),
k=widgets.fixed(k)
);
```
**Case 2:** Make it interactive with a **textbox**:
```
widgets.interact(interactive_figure,
alphas=widgets.fixed(alphas),
p1=widgets.FloatText(description="$p_1$", value=2),
p2=widgets.fixed(p2),
k=widgets.fixed(k)
);
```
**Case 3:** Make it interactive with a **dropdown menu**
```
widgets.interact(interactive_figure,
alphas=widgets.fixed(alphas),
p1=widgets.Dropdown(description="$p_1$", options=[0.5,1,1.5,2.0,2.5,3], value=2),
p2=widgets.fixed(p2),
k=widgets.fixed(k)
);
```
**Task:** Add a slider for \\(k\\) to the interactive figure below.
```
# change this code
widgets.interact(interactive_figure,
alphas=widgets.fixed(alphas),
p1=widgets.FloatSlider(description="$p_1$", min=0.1, max=5, step=0.05, value=2),
p2=widgets.fixed(p2),
k=widgets.fixed(k)
);
```
# Equilibrium
The equilibrium conditions were that **excess demand**, $Z$, is = 0 for both goods:
$$
\begin{aligned}
\sum_{j=1}^N x_{1}^{\star j}(p_{1},p_{2},e^{j}) &= Nk \Leftrightarrow Z_1 \equiv \sum_{j=1}^N x_{1}^{\star j}(p_{1},p_{2},e^{j}) - Nk = 0 \\
\sum_{j=1}^N x_{2}^{\star j}(p_{1},p_{2},e^{j}) &= N \Leftrightarrow Z_2 \equiv \sum_{j=1}^N x_{2}^{\star j}(p_{1},p_{2},e^{j}) - N = 0
\end{aligned}
$$
**Idea:** Solve just the first equation. Then the second is satisfied due to Walras's law.
**Algorithm:**
First choose a tolerance $\epsilon > 0$ and an adjustment factor $\kappa>0$, and a guess on $p_1 > 0$.
Then find the equilibrium price by following iterative algorithm:
1. Calculate total excess demand of good 1: $Z_1 = \sum_{j=1}^N x_{1}^{\star j}(p_{1},p_{2},e^{j}) - Nk$
2. If $|Z_1| < \epsilon $ stop
3. If $|Z_1| \geq \epsilon $ set $p_1 = p_1 + \kappa \cdot \frac{Z_1}{N}$
4. Return to step 1
**Intuition**
If excess demand is **positive** and far from 0, then **increase** the price.
If excess demand is **negative** and far from 0, then **decrease** the price.
```
# Use the functions in module market_eq
%load_ext autoreload
%autoreload 2
from market_eq import *
```
Finding the equilibrium price.
See `market_eq.py` for implementation.
```
p1_guess = 1.4
p2 = 1
kappa = 0.1
eps = 1e-8
p1 = find_equilibrium(alphas,p1_guess,p2,k,kappa=kappa,eps=eps)
```
**Check:** Ensure that excess demand of both goods are (almost) zero.
```
Z1 = excess_demand_good_1_func(alphas,p1,p2,k)
Z2 = excess_demand_good_2_func(alphas,p1,p2,k)
print(Z1,Z2)
assert np.abs(Z1) < eps
assert np.abs(Z2) < eps
```
**Quizz:** take a quick quizz on the algorithm [here](https://forms.office.com/Pages/ResponsePage.aspx?id=kX-So6HNlkaviYyfHO_6kckJrnVYqJlJgGf8Jm3FvY9UMjRVRkEwQTRGVVJPVzRDS0dIV1VJWjhJVyQlQCN0PWcu)
# Numerical integration by Monte Carlo
Numerical integration is the task of computing
$$
\mathbb{E}[\mathbf{g}(x)] \text{ where } x \sim F,
$$
where $F$ is a known probability distribution and $\mathbf{g}(\cdot)$ is a function. In terms of a integral this can also be writen as
$$
\mathbb{E}[\mathbf{g}(x)] = \int_{x\in\mathcal{X}} \mathbf{g}(x) dF(x) = \int_{x\in\mathcal{X}} \mathbf{g}(x) f(x) dx
$$
where $f$ is the PDF for the CDF $F$ and $\mathcal{X}$ is the domain of $x$.
In an economic model, $\mathbf{g}$ might represent the way the state of an agent or the economy develops stochastically.
Relying on the law of large numbers we **approximate** the true integral with a finite sample:
$$
\mathbb{E}[\mathbf{g}(x)] \approx \frac{1}{N}\sum_{i=1}^{N} \mathbf{g}(x_i)
$$
where $x_i$ is drawn from $F$ using a random number generator. This is also called **numerical integration by Monte Carlo**.
For an easy implementation, we use
$$
\mathbf{g}(x) = (x-2)^2
$$
**Monte Carlo function:**
```
def g(x):
return (x-2)**2
def MC(N,g,F):
X = F.rvs(size=N) # rvs = draw N random values from F
return np.mean(g(X)) # apply g to X
```
**Example** with a normal distribution:
```
N = 1000
mu = 0.1
sigma = 0.5
F = norm(loc=mu,scale=sigma)
mc_integral = MC(N,g,F)
print('E[g(x)] =', mc_integral)
```
Function for drawning \\( K \\) Monte Carlo samples:
```
def MC_sample(N,g,F,K):
results = np.empty(K)
for i in range(K):
results[i] = MC(N,g,F)
return results
```
The variance across Monte Carlo samples falls with larger $N$:
```
K = 1000
for N in [10**2,10**3,10**4,10**5]:
results = MC_sample(N,g,F,K)
print(f'N = {N:8d}: {results.mean():.6f} (std: {results.std():.4f})')
```
In principle, you can always use Monte Carlo. But if $\mathbf{g}$ had been a very computationally costly function, Monte Carlo would be infeasible. But there are other ways, see Appendix.
# Load and save variables
## Pickle
A good allround method for loading and saving is to use **pickle**. Here is how to save:
```
# a. variables
my_dict = {'a':1,'b':2}
my_vec = np.array([1,2,3])
my_tupple = (1,4,2)
# b. put them in a dictionary
my_data = {}
my_data['my_dict'] = my_dict
my_data['my_vec'] = my_vec
my_data['my_tupple'] = my_tupple
# c. save the dictionary in a file
with open(f'data.p', 'wb') as f: # wb = write binary
pickle.dump(my_data, f)
```
Delete the variables:
```
del my_dict
del my_vec
del my_tupple
```
Load the data again:
```
# a. try
try:
print(my_tupple)
except:
print('my_vec does not exist')
# b. load
with open(f'data.p', 'rb') as f: # rb = read binary
data = pickle.load(f)
my_dict = data['my_dict']
my_vec = data['my_vec']
my_tupple = data['my_tupple']
# c. try again
print(my_vec)
print(my_tupple)
```
## Saving with numpy
When only saving/loading **numpy arrays**, an alternative is to use ``np.savez`` (or ``np.savez_compressed``). This is typically faster than pickle.
Here is how to save some data:
```
my_data = {}
my_data['A'] = np.array([1,2,3])
my_data['B'] = np.zeros((5,8))
my_data['C'] = np.ones((7,3,8))
np.savez(f'data.npz', **my_data)
# '**' unpacks the dictionary
```
Here is how to load the data again:
```
# a. delete
del my_data
# a. load all
my_data = {}
with np.load(f'data.npz') as data_obj:
for key in data_obj.files:
my_data[key] = data_obj[key]
print(my_data['A'])
# b. load single array
X = np.load(f'data.npz')['A']
print(X)
```
## Create a class to generate random numbers
```
class dice_cup:
def __init__(self,ndice):
self.ndice = ndice
def roll(self):
self.dice = np.random.randint(1,7,size=self.ndice)
print(self.dice)
def shuffle(self):
np.random.shuffle(self.dice)
print(self.dice)
def roll_and_sum(self):
pass
my_dice_cup = dice_cup(4)
my_dice_cup.roll()
my_dice_cup.shuffle()
my_dice_cup.roll_and_sum()
```
**Task:** Add a method ``roll_and_sum()`` to the class above, which rolls and print the sum of the dice. Compare the value of your roll to your neighbor.
*(You can delete the pass statement when starting to code. It's there to inform Python that roll_and_sum() is well defined as Python cannot handle a totally codeless function)*
# Summary
**This lecture:** We have talked about:
1. numpy.random: Drawing (pseudo-)random numbers (seed, state, distributions)
2. scipy.stats: Using analytical random distributions (ppf, pdf, cdf, rvs)
3. ipywidgets: Making interactive figures
4. pickle and np.savez: Saving and loading data
The method you learned for finding the equilibrium can be used in a lot of models. For example, a simple method can be applied with multiple goods.
**Your work:** Before solving Problem Set 2 read through this notebook and play around with the code.
**Next lecture:** Workflow and debugging. Go through these guides beforehand:
1. [Installing Python and VSCode](https://numeconcopenhagen.netlify.com//guides/python-setup)
2. [Running Python in JupyterLab](https://numeconcopenhagen.netlify.com//guides/jupyterlab)
3. [Running Python in VSCode](https://numeconcopenhagen.netlify.com//guides/vscode-basics)
You must have installed **git** and have a **GitHub account!** (step 2 in [Installing Python and VSCode](https://numeconcopenhagen.netlify.com//guides/python-setup)).
**Finally:** You can begin to think about who you want to work together with for the group assignments. We will talk more about inaugural project next-time.
## Advanced: Middle-square method for generating random numbers
Proposed by **John von Neumann**:
1. Start with a $N$ digit number
2. Square the number
3. Pad the number with leading zeros making it a $2N$ digit number
4. Extract the middle $N$ digits (*your random number*)
5. Return to step 1 to generate one more
> **Pro:** Simple and easy to implement. Conceptually somewhat similar to more advanced methods (e.g. *Mersenne-Twister* used by *numpy*).
>
> **Con:** Cycles can be no longer than $8^N$ periods. Many repeating cycles are very short. Internal state is directly observable.
>
> **Conclusion:** Can not be used in practice.
**Code:** An implementation in Python for $N = 4$ digit random integers:
```
def rng(number,max_iter=100):
already_seen = [] # list of seen numbers
i = 0
while number not in already_seen and i < max_iter:
already_seen.append(number)
squared = number**2
padded = str(squared).zfill(8) # add leading zeros
number = int(padded[2:6]) # extract middle 4 numbers
print(f"square = {squared:8d}, padded = {padded} -> {number:4d}")
i += 1
```
A reasonable cycle:
```
rng(4653)
```
A short cycle:
```
rng(540)
```
No cycle at all:
```
rng(3792)
```
## Advanced: Gauss-Hermite quadrature
**Problem:** Numerical integration by Monte Carlo is **slow**.
**Solution:** Use smarter integration formulas on the form
$$
\mathbb{E}[g(x)] \approx \sum_{i=1}^{n} w_ig(x_i)
$$
where $(x_i,w_i), \forall n \in \{1,2,\dots,N\}$, are called **quadrature nodes and weights** and are provided by some theoretical formula depending on the distribution of $x$.
**Example I, Normal:** If $x \sim \mathcal{N}(\mu,\sigma)$ then we can use [Gauss-Hermite quadrature](https://en.wikipedia.org/wiki/Gauss%E2%80%93Hermite_quadrature) as implemented below.
```
def gauss_hermite(n):
""" gauss-hermite nodes
Args:
n (int): number of points
Returns:
x (numpy.ndarray): nodes of length n
w (numpy.ndarray): weights of length n
"""
# a. calculations
i = np.arange(1,n)
a = np.sqrt(i/2)
CM = np.diag(a,1) + np.diag(a,-1)
L,V = np.linalg.eig(CM)
I = L.argsort()
V = V[:,I].T
# b. nodes and weights
x = L[I]
w = np.sqrt(math.pi)*V[:,0]**2
return x,w
def normal_gauss_hermite(sigma, n=7, mu=None, exp=False):
""" normal gauss-hermite nodes
Args:
sigma (double): standard deviation
n (int): number of points
mu (double,optinal): mean
exp (bool,optinal): take exp and correct mean (if not specified)
Returns:
x (numpy.ndarray): nodes of length n
w (numpy.ndarray): weights of length n
"""
if sigma == 0.0 or n == 1:
x = np.ones(n)
if mu is not None:
x += mu
w = np.ones(n)
return x,w
# a. GaussHermite
x,w = gauss_hermite(n)
x *= np.sqrt(2)*sigma
# b. log-normality
if exp:
if mu is None:
x = np.exp(x - 0.5*sigma**2)
else:
x = np.exp(x + mu)
else:
if mu is None:
x = x
else:
x = x + mu
w /= np.sqrt(math.pi)
return x,w
```
**Results:** Becuase the function is "nice", very few quadrature points are actually needed (*not generally true*).
```
for n in [1,2,3,5,7,9,11]:
x,w = normal_gauss_hermite(mu=mu,sigma=sigma,n=n)
result = np.sum(w*g(x))
print(f'n = {n:3d}: {result:.10f}')
```
**Example II, log-normal ([more info](https://en.wikipedia.org/wiki/Log-normal_distribution)):**
1. Let $\log x \sim \mathcal{N}(\mu,\sigma)$.
2. Gauss-Hermite quadrature nodes and weights can be used with the option `exp=True`.
3. To ensure $\mathbb{E}[x] = 1$ then $\mu = -0.5\sigma^2$.
```
z = np.random.normal(size=1_000_000,scale=sigma)
print('mean(x) when mu = 0')
x,w = normal_gauss_hermite(mu=0,sigma=sigma,n=7,exp=True)
print(f'MC: {np.mean(np.exp(z)):.4f}')
print(f'Gauss-Hermite: {np.sum(x*w):.4f}')
print('')
print('mean(x), mu = -0.5*sigma^2')
x,w = normal_gauss_hermite(sigma=sigma,n=7,exp=True)
print(f'MC: {np.mean(np.exp(z)-0.5*sigma**2):.4f}')
print(f'Gauss-Hermite: {np.sum(x*w):.4f}')
```
| github_jupyter |
# 2021-07-18 Mollow-triplets cleanup units
There was lots of confusion in the `mollow-triplets-plots` notebook about factors of $\Gamma$, so let's try and fix that.
I abandoned this after I figured out how to rescue the results of the previous notebook, but I'll leave this here, as the removal of including gamma and L as arguments to functions might be helpful to be reminded of in the future.
```
from functools import partial
import pathlib
import pdb
import pickle
import numpy as np
from scipy.optimize import minimize
from scipy.fftpack import fft, rfft, fftfreq, rfftfreq, fftshift
from scipy.integrate import quad
from scipy.special import factorial
import matplotlib.pyplot as plt
import seaborn as sns
import pysme.integrate as integ
import pysme.hierarchy as hier
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
# define Qubit operators
sx = np.array([[0, 1], [1, 0]], dtype=np.complex)
sy = np.array([[0, -1.j], [1.j, 0]], dtype=np.complex)
sz = np.array([[1, 0], [0, -1]], dtype=np.complex)
Id = np.eye(2, dtype=np.complex)
sp = (sx + 1.j * sy) / 2
sm = (sx - 1.j * sy) / 2
zero = np.zeros((2, 2), dtype=np.complex)
plt.style.use('../paper.mplstyle')
```
A coherent drive adds a Hamiltonian term.
\begin{align}
d\rho
&=
dt[\beta^*L-\beta L^\dagger,\rho]+dt\mathcal{D}[L]\rho
\\
&=
-idt[i\beta^*L-i\beta L^\dagger,\rho]+dt\mathcal{D}[L]\rho
\\
H_{\text{eff}}
&=
i\beta^*L-i\beta L^\dagger
\\
&=
\sqrt{\Gamma}\big[\mathrm{Im}(\beta)\sigma_x+\mathrm{Re}(\beta)\sigma_y\big]
\\
&=
\frac{\Omega}{2}\hat{\mathbf{n}}\cdot\boldsymbol{\sigma}
\\
\Omega
&=
2\sqrt{\Gamma}|\beta|
\end{align}
Carmichæl notates the Rabi frequency as $\Omega_\text{Carmichael}=\Omega/2$, so $\Omega_\text{Carmichael}=\sqrt{\Gamma}|\beta|$.
```
def rect(x, a, b):
return np.where(x < a, 0, np.where(x < b, 1, 0))
def xi_rect(t, a, b):
return rect(t, a, b)/np.sqrt(b - a)
def rho_from_ket(ket):
return np.outer(ket, ket.conj())
def vac_rho(n):
ket = np.zeros(n + 1, dtype=np.complex)
ket[0] = 1
return rho_from_ket(ket)
def make_squeezed_state_vec(r, mu, N, normalized=True):
r'''Make a truncated squeezed-state vector.
The squeezed-state vector is :math:`S(r,\mu)|0\rangle`. The truncated
vector is renormalized by default.
Parameters
----------
N: positive integer
The dimension of the truncated Hilbert space, basis {0, ..., N-1}
r: real number
Squeezing amplitude
mu: real number
Squeezing phase
normalized: boolean
Whether or not the truncated vector is renormalized
Returns
-------
numpy.array
Squeezed-state vector in the truncated Hilbert space, represented in the
number basis
'''
ket = np.zeros(N, dtype=np.complex)
for n in range(N//2):
ket[2*n] = (1 / np.sqrt(np.cosh(r))) * ((-0.5 * np.exp(2.j * mu) * np.tanh(r))**n /
factorial(n)) * np.sqrt(factorial(2 * n))
return ket / np.linalg.norm(ket) if normalized else ket
def sqz_rho(r, mu, n):
return rho_from_ket(make_squeezed_state_vec(r, mu, n + 1))
def Heff_fn(beta, L):
return 1.j*np.conj(beta)*L - 1.j*beta*L.conj().T
def calc_hier_state_evo(xi_fn, L, r, mu, beta, m_max, rho_ss, t, t0=0, timesteps=2**10,
solve_ivp_kwargs=None):
Id_field = np.eye(m_max + 1, dtype=np.complex)
factory = hier.HierarchyIntegratorFactory(2, m_max)
integrator = factory.make_uncond_integrator(xi_fn, Id, L, Heff_fn(beta, L), r, mu)
times = np.linspace(t0, t, timesteps)
soln_t = integrator.integrate(rho_ss, times)
return soln_t
def calc_hier_auto_corr(xi_fn, L, r, mu, beta, m_max, taus, rho_ss, t, t0=0, timesteps=2**10,
solve_ivp_kwargs=None):
sp_ss = np.trace(sp @ rho_ss)
sm_ss = np.trace(sm @ rho_ss)
Id_field = np.eye(m_max + 1, dtype=np.complex)
factory = hier.HierarchyIntegratorFactory(2, m_max)
integrator = factory.make_uncond_integrator(xi_fn, Id, L, Heff_fn(beta, L), r, mu)
times = np.linspace(t0, t, timesteps)
soln_t = integrator.integrate(rho_ss, times)
sp_ss_t = soln_t.get_expectations(sp, vac_rho(m_max), idx_slice=np.s_[-1], hermitian=False)
rho_ss_t = soln_t.get_hierarchy_density_matrices(np.s_[-1])
L_t_t = rho_ss_t @ np.kron(sp, Id_field)
L_t_taus = integrator.integrate_hier_init_cond(L_t_t, taus + t,
solve_ivp_kwargs=solve_ivp_kwargs)
Expt_t_taus = L_t_taus.get_expectations(sm, vac_rho(m_max), hermitian=False)
soln_t_taus = integrator.integrate_hier_init_cond(rho_ss_t, taus + t,
solve_ivp_kwargs=solve_ivp_kwargs)
sm_ss_t_taus = soln_t_taus.get_expectations(sm, vac_rho(m_max), hermitian=False)
# Subtract off a bunch of stuff that gets rid of the delta
return (Expt_t_taus - sp_ss_t * sm_ss - sp_ss * sm_ss_t_taus + sp_ss * sm_ss,
sp_ss, sm_ss, sp_ss_t, sm_ss_t_taus)
def calc_white_auto_corr(L, beta, r, mu, times_ss, taus, solve_ivp_kwargs=None):
N = np.sinh(r)**2
M_sq = -np.exp(2.j * mu) * np.sinh(r) * np.cosh(r)
integrator = integ.UncondGaussIntegrator(L, M_sq, N, Heff_fn(beta, L))
soln_ss = integrator.integrate(Id/2, times_ss)
#pdb.set_trace()
rho_ss = soln_ss.get_density_matrices(np.s_[-1])
sp_ss = np.trace(sp @ rho_ss)
sm_ss = np.trace(sm @ rho_ss)
#pdb.set_trace()
L_0_taus = integrator.integrate_non_herm(rho_ss @ sp, taus, solve_ivp_kwargs=solve_ivp_kwargs)
#pdb.set_trace()
Expt_t_taus = L_0_taus.get_expectations(sm, hermitian=False)
#pdb.set_trace()
return rho_ss, Expt_t_taus - sp_ss * sm_ss
def calc_quasi_markoff_degen_PA_auto_corr(
gamma, Omega, omega_A, omega_L, gamma_c, eps, phi_L, phi_s, times_ss, taus, solve_ivp_kwargs=None):
N_A, N_Om, M_A, M_Om, Delta_AL, F_A, G_A = get_degen_PA_params(
Omega, omega_A, omega_L, gamma_c, eps, phi_L, phi_s)
integrator = integ.QuasiMarkoff2LvlIntegrator(
gamma, N_A, N_Om, M_A, M_Om, Delta_AL, Omega, phi_L, F_A, G_A)
soln_ss = integrator.integrate(Id/2, times_ss)
rho_ss = soln_ss.get_density_matrices(np.s_[-1])
sp_ss = np.trace(sp @ rho_ss)
sm_ss = np.trace(sm @ rho_ss)
L_0_taus = integrator.integrate_non_herm(rho_ss @ sp, taus, solve_ivp_kwargs=solve_ivp_kwargs)
Expt_t_taus = L_0_taus.get_expectations(sm, hermitian=False)
return rho_ss, Expt_t_taus - sp_ss * sm_ss
def calc_hier_auto_corr_fock(xi_fn, L, r, mu, beta, m_max, taus, rho_ss, t, t0=0, timesteps=2**10,
solve_ivp_kwargs=None):
"""Get the auto-correlation function for the Fock hierarchy
Parameters
----------
xi_fn:
The wavepacket-envelope function
L:
The Lindblad operator (contains the factor sqrt(gamma), gamma being the atomic linewidth)
r:
The logarithm of the magnitude of the squeezing amplitude
mu:
The phase of the squeezing
beta:
The amplitude of the coherent drive
m_max:
The maximum level of the hierarchy used
taus:
The time offsets for the two-time correlations
rho_ss:
The steady-state density matrix under the coherent drive
t:
The reference time for the two-time correlations
t0:
The initial time to start evolving the coherent-drive steady-state density matrix
(should be when the wavepacket starts)
timesteps:
The number of timesteps to use for the density-matrix evolution
"""
sp_ss = np.trace(sp @ rho_ss)
sm_ss = np.trace(sm @ rho_ss)
Id_field = np.eye(m_max + 1, dtype=np.complex)
factory = hier.HierarchyIntegratorFactory(2, m_max)
integrator = factory.make_uncond_integrator(xi_fn, Id, L, Heff_fn(beta, L), 0, 0)
times = np.linspace(t0, t, timesteps)
soln_t = integrator.integrate(rho_ss, times)
sp_ss_t = soln_t.get_expectations(sp, sqz_rho(r, mu, m_max), idx_slice=np.s_[-1], hermitian=False)
rho_ss_t = soln_t.get_hierarchy_density_matrices(np.s_[-1])
L_t_t = rho_ss_t @ np.kron(sp, Id_field)
L_t_taus = integrator.integrate_hier_init_cond(L_t_t, taus + t,
solve_ivp_kwargs=solve_ivp_kwargs)
Expt_t_taus = L_t_taus.get_expectations(sm, sqz_rho(r, mu, m_max), hermitian=False)
soln_t_taus = integrator.integrate_hier_init_cond(rho_ss_t, taus + t,
solve_ivp_kwargs=solve_ivp_kwargs)
sm_ss_t_taus = soln_t_taus.get_expectations(sm, sqz_rho(r, mu, m_max), hermitian=False)
# Subtract off a bunch of stuff that gets rid of the delta
return (Expt_t_taus - sp_ss_t * sm_ss - sp_ss * sm_ss_t_taus + sp_ss * sm_ss,
sp_ss, sm_ss, sp_ss_t, sm_ss_t_taus)
def plot_emission_spectrum(auto_corr, d_tau, label, rabi_freq, plot_kwargs=None, ax=None, figsize=(10,3)):
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
return_fig = True
else:
return_fig = False
if plot_kwargs is None:
plot_kwargs = dict()
fourier = fft(auto_corr.real)
freqs = fftfreq(n=auto_corr.size, d=d_tau)
ax.plot(fftshift(freqs), np.abs(fftshift(fourier)), label=label, **plot_kwargs)
ax.legend()
ax.set_xticks([-rabi_freq / np.pi, 0, rabi_freq / np.pi])
ax.set_xlim(-1.5 * rabi_freq / np.pi, 1.5 * rabi_freq / np.pi)
ax.set_xticklabels([r'$\omega_c-\Omega$', r'$\omega_c$', r'$\omega_c+\Omega$'])
ax.set_yticks([])
ax.set_ylim(0, None)
ax.set_ylabel(r'$S(\omega)$')
ax.set_xlabel(r'$\omega$')
return (fig, ax) if return_fig else None
rho0 = (Id - sz) / 2
gamma = 0.5
S = Id
L = np.sqrt(gamma) * sm
H = zero
beta = 8.j
T = 2**8
taus = np.linspace(0, T, 2**13)
times_ss = np.linspace(0, 32, 2**12)
rho_ss_coh, delta_Expt_t_taus_coh = calc_white_auto_corr(
L=L,
beta=beta,
r=0.,
mu=0.,
times_ss=times_ss,
taus=taus,
solve_ivp_kwargs={'rtol': 1e-6, 'atol': 1e-9},
)
fig, ax = plot_emission_spectrum(
auto_corr=delta_Expt_t_taus_coh,
d_tau=np.diff(taus)[0],
label='No squeezing',
rabi_freq=np.sqrt(gamma) * np.abs(beta),
)
plt.tight_layout()
plt.show()
def gen_save_load_data(data_gen_method, fname, overwrite=False):
'''Get the data returned by the generating method, running the method only if the data isn't already available.
If the given filename exists, load and return the data from that file. Otherwise generate the data using the
supplied method and save and return it.
Useful for notebooks you imagine running multiple times, but where some of the data is expensive to generate
and you want to save it to disk to be reloaded for future sessions.
'''
try:
with open(fname, 'xb' if not overwrite else 'wb') as f:
data = data_gen_method()
pickle.dump(data, f)
except FileExistsError:
print('Data already exist.')
with open(fname, 'rb') as f:
data = pickle.load(f)
return data
def gen_Expt_t_taus_square_wavepacket_freq_offset_fock(L, r, mu, beta, gamma, m_max, taus, rho_ss_coh, t_final, wavepacket_length, freq_offset=0., solve_ivp_kwargs=None):
xi_fn = lambda t: xi_rect(t, 0, wavepacket_length) * np.exp(-1.j * freq_offset * t)
return calc_hier_auto_corr_fock(xi_fn,
L, r, mu, beta, gamma, m_max,
taus, rho_ss_coh, t=t_final,
solve_ivp_kwargs=solve_ivp_kwargs)
mollow_triplet_plot_dir = pathlib.Path('mollow-triplit-plot')
if not mollow_triplet_plot_dir.exists():
mollow_triplet_plot_dir.mkdir()
%%time
r = np.log(2)
mu = 0
m_max = 12
t_final = 0.5
Expt_t_taus_wavepacket_fock = gen_save_load_data(
partial(
gen_Expt_t_taus_wavepacket_fock,
L=L,
r=r,
mu=mu,
beta=beta,
gamma=gamma,
m_max=m_max,
taus=taus,
rho_ss_coh=rho_ss_coh,
t_final=t_final,
solve_ivp_kwargs={'rtol': 1e-6, 'atol': 1e-9}
),
mollow_triplet_plot_dir.joinpath(f'er_{np.exp(r):.2f}_mu_{mu / np.pi:.2f}pi_mmax_{m_max}_tfinal_{t_final:.2f}.pickle'),
overwrite=False,
)
fig, ax = plot_emission_spectrum(
auto_corr=delta_Expt_t_taus_coh,
d_tau=np.diff(taus)[0],
label='No squeezing',
beta=beta,
)
plot_emission_spectrum(
auto_corr=Expt_t_taus_wavepacket_fock[0],
d_tau=np.diff(taus)[0],
label='Wavepacket',
beta=beta,
ax=ax,
)
plt.tight_layout()
```
The squeezing used in my dissertation (as extracted from my `2018-06-12-2` notebook, which generated the data I plotted for my dissertation in my `2018-06-13` notebook) is $r=\ln4$
```python
Expt_t_taus['Resonant'] = {'data': Expt_t_taus_wavepacket_on_res[0],
'gamma': 0.5,
'beta': 8.j,
'r': np.log(4),
'mu': 0,
'xi freq': 0,
'm_max': 12,
't': 0.5,
'T': 2**8,
'N taus': 2**13}
```
```python
Expt_t_taus['Resonant Phased'] = {'data': Expt_t_taus_wavepacket_on_res_phase[0],
'gamma': 0.5,
'beta': 8.j,
'r': np.log(4),
'mu': np.pi / 2,
'xi freq': 0,
'm_max': 12,
't': 0.5,
'T': 2**8,
'N taus': 2**13}
```
```python
Expt_t_taus['Sideband'] = {'data': Expt_t_taus_wavepacket[0],
'gamma': 0.5,
'beta': 8.j,
'r': np.log(4),
'mu': 0,
'xi freq': 2 * np.abs(8.j),
'm_max': 12,
't': 0.5,
'T': 2**8,
'N taus': 2**13}
```
```python
Expt_t_taus['Sideband Phased'] = {'data': Expt_t_taus_wavepacket_phase[0],
'gamma': 0.5,
'beta': 8.j,
'r': np.log(4),
'mu': np.pi / 2,
'xi freq': 2 * np.abs(8.j),
'm_max': 12,
't': 0.5,
'T': 2**8,
'N taus': 2**13}
```
```
def gen_save_load_data_with_args(fname, data_gen_method, method_args=None, method_kwargs=None, overwrite=False):
'''Get the data returned by the generating method, running the method only if the data isn't already available.
If the given filename exists, load and return the data from that file. Otherwise generate the data using the
supplied method and save and return it.
Useful for notebooks you imagine running multiple times, but where some of the data is expensive to generate
and you want to save it to disk to be reloaded for future sessions.
'''
if method_args is None:
method_args = []
if method_kwargs is None:
methd_kwargs = dict()
try:
with open(fname, 'xb' if not overwrite else 'wb') as f:
data = data_gen_method(*method_args, **method_kwargs)
pickle.dump(data, f)
except FileExistsError:
print('Data already exist.')
with open(fname, 'rb') as f:
data = pickle.load(f)
return data
Expt_t_taus = dict()
```
```python
Expt_t_taus['Resonant'] = {'data': Expt_t_taus_wavepacket_on_res[0],
'gamma': 0.5,
'beta': 8.j,
'r': np.log(4),
'mu': 0,
'xi freq': 0,
'm_max': 12,
't': 0.5,
'T': 2**8,
'N taus': 2**13}
```
```
%%time
r = np.log(4)
mu = 0
freq_offset = 0.
m_max = 20
t_final = 0.5
wavepacket_length = 4 * gamma
Expt_t_taus['Resonant'] = gen_save_load_data_with_args(
fname=mollow_triplet_plot_dir.joinpath(f'er_{np.exp(r):.2f}_mu_{mu / np.pi:.2f}pi_mmax_{m_max}_tfinal_{t_final:.2f}.pickle'),
data_gen_method=gen_Expt_t_taus_square_wavepacket_freq_offset_fock,
method_kwargs=dict(
L=L,
r=r,
mu=mu,
beta=beta,
gamma=gamma,
m_max=m_max,
taus=taus,
rho_ss_coh=rho_ss_coh,
t_final=t_final,
freq_offset=freq_offset,
wavepacket_length=wavepacket_length,
solve_ivp_kwargs={'rtol': 1e-6, 'atol': 1e-9},
),
overwrite=False,
)
mollow_triplet_plot_dir.joinpath(f'er_{np.exp(r):.2f}_mu_{mu / np.pi:.2f}pi_mmax_{m_max}_tfinal_{t_final:.2f}.pickle')
```
```python
Expt_t_taus['Resonant Phased'] = {'data': Expt_t_taus_wavepacket_on_res_phase[0],
'gamma': 0.5,
'beta': 8.j,
'r': np.log(4),
'mu': np.pi / 2,
'xi freq': 0,
'm_max': 12,
't': 0.5,
'T': 2**8,
'N taus': 2**13}
```
```
%%time
gamma = 0.5
beta = 8.j
r = np.log(4)
mu = np.pi / 2
freq_offset = 0.
m_max = 20
t_final = 0.5
taus = np.linspace(0, 2**8, 2**13)
wavepacket_length = 4 * gamma
Expt_t_taus['Resonant Phased'] = gen_save_load_data_with_args(
fname=mollow_triplet_plot_dir.joinpath(f'er_{np.exp(r):.2f}_mu_{mu / np.pi:.2f}pi_mmax_{m_max}_tfinal_{t_final:.2f}.pickle'),
data_gen_method=gen_Expt_t_taus_square_wavepacket_freq_offset_fock,
method_kwargs=dict(
L=L,
r=r,
mu=mu,
beta=beta,
gamma=gamma,
m_max=m_max,
taus=taus,
rho_ss_coh=rho_ss_coh,
t_final=t_final,
freq_offset=freq_offset,
wavepacket_length=wavepacket_length,
solve_ivp_kwargs={'rtol': 1e-6, 'atol': 1e-9},
),
overwrite=False,
)
```
```python
Expt_t_taus['Sideband'] = {'data': Expt_t_taus_wavepacket[0],
'gamma': 0.5,
'beta': 8.j,
'r': np.log(4),
'mu': 0,
'xi freq': 2 * np.abs(8.j),
'm_max': 12,
't': 0.5,
'T': 2**8,
'N taus': 2**13}
```
```
%%time
gamma = 0.5
beta = 8.j
r = np.log(4)
mu = 0.
freq_offset = 2 * np.abs(beta)
m_max = 20
t_final = 0.5
taus = np.linspace(0, 2**8, 2**13)
wavepacket_length = 4 * gamma
Expt_t_taus['Sideband'] = gen_save_load_data_with_args(
fname=mollow_triplet_plot_dir.joinpath(f'er_{np.exp(r):.2f}_mu_{mu / np.pi:.2f}pi_mmax_{m_max}_tfinal_{t_final:.2f}_freq_offset_{freq_offset:.2f}.pickle'),
data_gen_method=gen_Expt_t_taus_square_wavepacket_freq_offset_fock,
method_kwargs=dict(
L=L,
r=r,
mu=mu,
beta=beta,
gamma=gamma,
m_max=m_max,
taus=taus,
rho_ss_coh=rho_ss_coh,
t_final=t_final,
freq_offset=freq_offset,
wavepacket_length=wavepacket_length,
solve_ivp_kwargs={'rtol': 1e-6, 'atol': 1e-9},
),
overwrite=False,
)
```
```python
Expt_t_taus['Sideband Phased'] = {'data': Expt_t_taus_wavepacket_phase[0],
'gamma': 0.5,
'beta': 8.j,
'r': np.log(4),
'mu': np.pi / 2,
'xi freq': 2 * np.abs(8.j),
'm_max': 12,
't': 0.5,
'T': 2**8,
'N taus': 2**13}
```
```
%%time
gamma = 0.5
beta = 8.j
r = np.log(4)
mu = np.pi / 2
freq_offset = 2 * np.abs(beta)
m_max = 20
t_final = 0.5
taus = np.linspace(0, 2**8, 2**13)
wavepacket_length = 4 * gamma
Expt_t_taus['Sideband Phased'] = gen_save_load_data_with_args(
fname=mollow_triplet_plot_dir.joinpath(f'er_{np.exp(r):.2f}_mu_{mu / np.pi:.2f}pi_mmax_{m_max}_tfinal_{t_final:.2f}_freq_offset_{freq_offset:.2f}.pickle'),
data_gen_method=gen_Expt_t_taus_square_wavepacket_freq_offset_fock,
method_kwargs=dict(
L=L,
r=r,
mu=mu,
beta=beta,
gamma=gamma,
m_max=m_max,
taus=taus,
rho_ss_coh=rho_ss_coh,
t_final=t_final,
freq_offset=freq_offset,
wavepacket_length=wavepacket_length,
solve_ivp_kwargs={'rtol': 1e-6, 'atol': 1e-9},
),
overwrite=False,
)
fig, ax = plot_emission_spectrum(
auto_corr=delta_Expt_t_taus_coh,
label='No squeezing',
d_tau=np.diff(taus)[0],
beta=beta,
plot_kwargs=dict(
color='k',
linestyle='--',
),
)
plot_emission_spectrum(
auto_corr=Expt_t_taus['Resonant'][0],
label='Wavepacket Resonant',
d_tau=np.diff(taus)[0],
beta=beta,
ax=ax,
plot_kwargs=dict(
color='k',
),
)
plt.tight_layout()
plt.show()
fig, ax = plot_emission_spectrum(
auto_corr=delta_Expt_t_taus_coh,
label='No squeezing',
d_tau=np.diff(taus)[0],
beta=beta,
plot_kwargs=dict(
color='k',
linestyle='--',
),
)
plot_emission_spectrum(
auto_corr=Expt_t_taus['Resonant Phased'][0],
label='Wavepacket Resonant Phased',
d_tau=np.diff(taus)[0],
beta=beta,
ax=ax,
plot_kwargs=dict(
color='k',
),
)
plt.tight_layout()
plt.show()
fig, ax = plot_emission_spectrum(
auto_corr=delta_Expt_t_taus_coh,
label='No squeezing',
d_tau=np.diff(taus)[0],
beta=beta,
plot_kwargs=dict(
color='k',
linestyle='--',
),
)
plot_emission_spectrum(
auto_corr=Expt_t_taus['Sideband'][0],
label='Wavepacket Sideband',
d_tau=np.diff(taus)[0],
beta=beta,
ax=ax,
plot_kwargs=dict(
color='k',
),
)
plt.tight_layout()
plt.show()
fig, ax = plot_emission_spectrum(
auto_corr=delta_Expt_t_taus_coh,
label='No squeezing',
d_tau=np.diff(taus)[0],
beta=beta,
plot_kwargs=dict(
color='k',
linestyle='--',
linewidth=1,
),
)
plot_emission_spectrum(
auto_corr=Expt_t_taus['Sideband Phased'][0],
label='Wavepacket Sideband Phased',
d_tau=np.diff(taus)[0],
beta=beta,
ax=ax,
plot_kwargs=dict(
color='k',
),
)
plt.tight_layout()
plt.savefig('sideband-phased.png', dpi=150)
plt.show()
SINGLE_COL_WIDTH = 3.4039 #INCHES
DOUBLE_COL_WIDTH = 7.0569 #INCHES
SMALL_SIZE = 6.5
MEDIUM_SIZE = 8.5
BIGGER_SIZE = 10.5
plt.rc('text', usetex=False)
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rc('lines',linewidth=0.75)
#plt.rcParams['ytick.major.pad']='0.5'
fig, axs = plt.subplots(nrows=4, ncols=1, sharex='col', figsize=(SINGLE_COL_WIDTH, SINGLE_COL_WIDTH))
for ax, expt_t_taus_key in zip(axs, ['Resonant', 'Resonant Phased', 'Sideband', 'Sideband Phased']):
plot_emission_spectrum(
auto_corr=delta_Expt_t_taus_coh,
label='No squeezing',
d_tau=np.diff(taus)[0],
beta=beta,
ax=ax,
plot_kwargs=dict(
color='k',
linestyle='--',
),
)
plot_emission_spectrum(
auto_corr=Expt_t_taus[expt_t_taus_key][0],
label=expt_t_taus_key,
d_tau=np.diff(taus)[0],
beta=beta,
ax=ax,
plot_kwargs=dict(
color='k',
),
)
for ax in axs[:-1]:
ax.set_xlabel('')
plt.tight_layout()
plt.savefig(mollow_triplet_plot_dir.joinpath('reproduced-thesis-plots.pdf'))
plt.show()
```
| github_jupyter |
```
%matplotlib inline
```
Saving and Loading Models
=========================
**Author:** `Matthew Inkawhich <https://github.com/MatthewInkawhich>`_
This document provides solutions to a variety of use cases regarding the
saving and loading of PyTorch models. Feel free to read the whole
document, or just skip to the code you need for a desired use case.
When it comes to saving and loading models, there are three core
functions to be familiar with:
1) `torch.save <https://pytorch.org/docs/stable/torch.html?highlight=save#torch.save>`__:
Saves a serialized object to disk. This function uses Python’s
`pickle <https://docs.python.org/3/library/pickle.html>`__ utility
for serialization. Models, tensors, and dictionaries of all kinds of
objects can be saved using this function.
2) `torch.load <https://pytorch.org/docs/stable/torch.html?highlight=torch%20load#torch.load>`__:
Uses `pickle <https://docs.python.org/3/library/pickle.html>`__\ ’s
unpickling facilities to deserialize pickled object files to memory.
This function also facilitates the device to load the data into (see
`Saving & Loading Model Across
Devices <#saving-loading-model-across-devices>`__).
3) `torch.nn.Module.load_state_dict <https://pytorch.org/docs/stable/nn.html?highlight=load_state_dict#torch.nn.Module.load_state_dict>`__:
Loads a model’s parameter dictionary using a deserialized
*state_dict*. For more information on *state_dict*, see `What is a
state_dict? <#what-is-a-state-dict>`__.
**Contents:**
- `What is a state_dict? <#what-is-a-state-dict>`__
- `Saving & Loading Model for
Inference <#saving-loading-model-for-inference>`__
- `Saving & Loading a General
Checkpoint <#saving-loading-a-general-checkpoint-for-inference-and-or-resuming-training>`__
- `Saving Multiple Models in One
File <#saving-multiple-models-in-one-file>`__
- `Warmstarting Model Using Parameters from a Different
Model <#warmstarting-model-using-parameters-from-a-different-model>`__
- `Saving & Loading Model Across
Devices <#saving-loading-model-across-devices>`__
What is a ``state_dict``?
-------------------------
In PyTorch, the learnable parameters (i.e. weights and biases) of an
``torch.nn.Module`` model is contained in the model’s *parameters*
(accessed with ``model.parameters()``). A *state_dict* is simply a
Python dictionary object that maps each layer to its parameter tensor.
Note that only layers with learnable parameters (convolutional layers,
linear layers, etc.) have entries in the model’s *state_dict*. Optimizer
objects (``torch.optim``) also have a *state_dict*, which contains
information about the optimizer’s state, as well as the hyperparameters
used.
Because *state_dict* objects are Python dictionaries, they can be easily
saved, updated, altered, and restored, adding a great deal of modularity
to PyTorch models and optimizers.
Example:
^^^^^^^^
Let’s take a look at the *state_dict* from the simple model used in the
`Training a
classifier <https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#sphx-glr-beginner-blitz-cifar10-tutorial-py>`__
tutorial.
.. code:: python
# Define model
class TheModelClass(nn.Module):
def __init__(self):
super(TheModelClass, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# Initialize model
model = TheModelClass()
# Initialize optimizer
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# Print model's state_dict
print("Model's state_dict:")
for param_tensor in model.state_dict():
print(param_tensor, "\t", model.state_dict()[param_tensor].size())
# Print optimizer's state_dict
print("Optimizer's state_dict:")
for var_name in optimizer.state_dict():
print(var_name, "\t", optimizer.state_dict()[var_name])
**Output:**
::
Model's state_dict:
conv1.weight torch.Size([6, 3, 5, 5])
conv1.bias torch.Size([6])
conv2.weight torch.Size([16, 6, 5, 5])
conv2.bias torch.Size([16])
fc1.weight torch.Size([120, 400])
fc1.bias torch.Size([120])
fc2.weight torch.Size([84, 120])
fc2.bias torch.Size([84])
fc3.weight torch.Size([10, 84])
fc3.bias torch.Size([10])
Optimizer's state_dict:
state {}
param_groups [{'lr': 0.001, 'momentum': 0.9, 'dampening': 0, 'weight_decay': 0, 'nesterov': False, 'params': [4675713712, 4675713784, 4675714000, 4675714072, 4675714216, 4675714288, 4675714432, 4675714504, 4675714648, 4675714720]}]
Saving & Loading Model for Inference
------------------------------------
Save/Load ``state_dict`` (Recommended)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Save:**
.. code:: python
torch.save(model.state_dict(), PATH)
**Load:**
.. code:: python
model = TheModelClass(*args, **kwargs)
model.load_state_dict(torch.load(PATH))
model.eval()
When saving a model for inference, it is only necessary to save the
trained model’s learned parameters. Saving the model’s *state_dict* with
the ``torch.save()`` function will give you the most flexibility for
restoring the model later, which is why it is the recommended method for
saving models.
A common PyTorch convention is to save models using either a ``.pt`` or
``.pth`` file extension.
Remember that you must call ``model.eval()`` to set dropout and batch
normalization layers to evaluation mode before running inference.
Failing to do this will yield inconsistent inference results.
.. Note ::
Notice that the ``load_state_dict()`` function takes a dictionary
object, NOT a path to a saved object. This means that you must
deserialize the saved *state_dict* before you pass it to the
``load_state_dict()`` function. For example, you CANNOT load using
``model.load_state_dict(PATH)``.
Save/Load Entire Model
^^^^^^^^^^^^^^^^^^^^^^
**Save:**
.. code:: python
torch.save(model, PATH)
**Load:**
.. code:: python
# Model class must be defined somewhere
model = torch.load(PATH)
model.eval()
This save/load process uses the most intuitive syntax and involves the
least amount of code. Saving a model in this way will save the entire
module using Python’s
`pickle <https://docs.python.org/3/library/pickle.html>`__ module. The
disadvantage of this approach is that the serialized data is bound to
the specific classes and the exact directory structure used when the
model is saved. The reason for this is because pickle does not save the
model class itself. Rather, it saves a path to the file containing the
class, which is used during load time. Because of this, your code can
break in various ways when used in other projects or after refactors.
A common PyTorch convention is to save models using either a ``.pt`` or
``.pth`` file extension.
Remember that you must call ``model.eval()`` to set dropout and batch
normalization layers to evaluation mode before running inference.
Failing to do this will yield inconsistent inference results.
Saving & Loading a General Checkpoint for Inference and/or Resuming Training
----------------------------------------------------------------------------
Save:
^^^^^
.. code:: python
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss,
...
}, PATH)
Load:
^^^^^
.. code:: python
model = TheModelClass(*args, **kwargs)
optimizer = TheOptimizerClass(*args, **kwargs)
checkpoint = torch.load(PATH)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
model.eval()
# - or -
model.train()
When saving a general checkpoint, to be used for either inference or
resuming training, you must save more than just the model’s
*state_dict*. It is important to also save the optimizer’s *state_dict*,
as this contains buffers and parameters that are updated as the model
trains. Other items that you may want to save are the epoch you left off
on, the latest recorded training loss, external ``torch.nn.Embedding``
layers, etc.
To save multiple components, organize them in a dictionary and use
``torch.save()`` to serialize the dictionary. A common PyTorch
convention is to save these checkpoints using the ``.tar`` file
extension.
To load the items, first initialize the model and optimizer, then load
the dictionary locally using ``torch.load()``. From here, you can easily
access the saved items by simply querying the dictionary as you would
expect.
Remember that you must call ``model.eval()`` to set dropout and batch
normalization layers to evaluation mode before running inference.
Failing to do this will yield inconsistent inference results. If you
wish to resuming training, call ``model.train()`` to ensure these layers
are in training mode.
Saving Multiple Models in One File
----------------------------------
Save:
^^^^^
.. code:: python
torch.save({
'modelA_state_dict': modelA.state_dict(),
'modelB_state_dict': modelB.state_dict(),
'optimizerA_state_dict': optimizerA.state_dict(),
'optimizerB_state_dict': optimizerB.state_dict(),
...
}, PATH)
Load:
^^^^^
.. code:: python
modelA = TheModelAClass(*args, **kwargs)
modelB = TheModelBClass(*args, **kwargs)
optimizerA = TheOptimizerAClass(*args, **kwargs)
optimizerB = TheOptimizerBClass(*args, **kwargs)
checkpoint = torch.load(PATH)
modelA.load_state_dict(checkpoint['modelA_state_dict'])
modelB.load_state_dict(checkpoint['modelB_state_dict'])
optimizerA.load_state_dict(checkpoint['optimizerA_state_dict'])
optimizerB.load_state_dict(checkpoint['optimizerB_state_dict'])
modelA.eval()
modelB.eval()
# - or -
modelA.train()
modelB.train()
When saving a model comprised of multiple ``torch.nn.Modules``, such as
a GAN, a sequence-to-sequence model, or an ensemble of models, you
follow the same approach as when you are saving a general checkpoint. In
other words, save a dictionary of each model’s *state_dict* and
corresponding optimizer. As mentioned before, you can save any other
items that may aid you in resuming training by simply appending them to
the dictionary.
A common PyTorch convention is to save these checkpoints using the
``.tar`` file extension.
To load the models, first initialize the models and optimizers, then
load the dictionary locally using ``torch.load()``. From here, you can
easily access the saved items by simply querying the dictionary as you
would expect.
Remember that you must call ``model.eval()`` to set dropout and batch
normalization layers to evaluation mode before running inference.
Failing to do this will yield inconsistent inference results. If you
wish to resuming training, call ``model.train()`` to set these layers to
training mode.
Warmstarting Model Using Parameters from a Different Model
----------------------------------------------------------
Save:
^^^^^
.. code:: python
torch.save(modelA.state_dict(), PATH)
Load:
^^^^^
.. code:: python
modelB = TheModelBClass(*args, **kwargs)
modelB.load_state_dict(torch.load(PATH), strict=False)
Partially loading a model or loading a partial model are common
scenarios when transfer learning or training a new complex model.
Leveraging trained parameters, even if only a few are usable, will help
to warmstart the training process and hopefully help your model converge
much faster than training from scratch.
Whether you are loading from a partial *state_dict*, which is missing
some keys, or loading a *state_dict* with more keys than the model that
you are loading into, you can set the ``strict`` argument to **False**
in the ``load_state_dict()`` function to ignore non-matching keys.
If you want to load parameters from one layer to another, but some keys
do not match, simply change the name of the parameter keys in the
*state_dict* that you are loading to match the keys in the model that
you are loading into.
Saving & Loading Model Across Devices
-------------------------------------
Save on GPU, Load on CPU
^^^^^^^^^^^^^^^^^^^^^^^^
**Save:**
.. code:: python
torch.save(model.state_dict(), PATH)
**Load:**
.. code:: python
device = torch.device('cpu')
model = TheModelClass(*args, **kwargs)
model.load_state_dict(torch.load(PATH, map_location=device))
When loading a model on a CPU that was trained with a GPU, pass
``torch.device('cpu')`` to the ``map_location`` argument in the
``torch.load()`` function. In this case, the storages underlying the
tensors are dynamically remapped to the CPU device using the
``map_location`` argument.
Save on GPU, Load on GPU
^^^^^^^^^^^^^^^^^^^^^^^^
**Save:**
.. code:: python
torch.save(model.state_dict(), PATH)
**Load:**
.. code:: python
device = torch.device("cuda")
model = TheModelClass(*args, **kwargs)
model.load_state_dict(torch.load(PATH))
model.to(device)
# Make sure to call input = input.to(device) on any input tensors that you feed to the model
When loading a model on a GPU that was trained and saved on GPU, simply
convert the initialized ``model`` to a CUDA optimized model using
``model.to(torch.device('cuda'))``. Also, be sure to use the
``.to(torch.device('cuda'))`` function on all model inputs to prepare
the data for the model. Note that calling ``my_tensor.to(device)``
returns a new copy of ``my_tensor`` on GPU. It does NOT overwrite
``my_tensor``. Therefore, remember to manually overwrite tensors:
``my_tensor = my_tensor.to(torch.device('cuda'))``.
Save on CPU, Load on GPU
^^^^^^^^^^^^^^^^^^^^^^^^
**Save:**
.. code:: python
torch.save(model.state_dict(), PATH)
**Load:**
.. code:: python
device = torch.device("cuda")
model = TheModelClass(*args, **kwargs)
model.load_state_dict(torch.load(PATH, map_location="cuda:0")) # Choose whatever GPU device number you want
model.to(device)
# Make sure to call input = input.to(device) on any input tensors that you feed to the model
When loading a model on a GPU that was trained and saved on CPU, set the
``map_location`` argument in the ``torch.load()`` function to
*cuda:device_id*. This loads the model to a given GPU device. Next, be
sure to call ``model.to(torch.device('cuda'))`` to convert the model’s
parameter tensors to CUDA tensors. Finally, be sure to use the
``.to(torch.device('cuda'))`` function on all model inputs to prepare
the data for the CUDA optimized model. Note that calling
``my_tensor.to(device)`` returns a new copy of ``my_tensor`` on GPU. It
does NOT overwrite ``my_tensor``. Therefore, remember to manually
overwrite tensors: ``my_tensor = my_tensor.to(torch.device('cuda'))``.
Saving ``torch.nn.DataParallel`` Models
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Save:**
.. code:: python
torch.save(model.module.state_dict(), PATH)
**Load:**
.. code:: python
# Load to whatever device you want
``torch.nn.DataParallel`` is a model wrapper that enables parallel GPU
utilization. To save a ``DataParallel`` model generically, save the
``model.module.state_dict()``. This way, you have the flexibility to
load the model any way you want to any device you want.
| github_jupyter |
## _*Using Qiskit Aqua for exact cover problems*_
In mathematics, given a collection $S$ of subsets of a set $X$.
An exact cover is a subcollection $S_{ec} \subseteq S$ such that each element in $X$ is contained in exactly one subset $\in S_{ec}$.
We will go through two examples to show:
1. How to run the optimization
2. How how to run the optimization with the VQE.
#### The problem and the brute-force method.
First, let us take a look at the list of subsets.
```
import numpy as np
import json
from qiskit import BasicAer
from qiskit.aqua.algorithms import NumPyMinimumEigensolver
from qiskit.optimization.applications.ising import exact_cover
from qiskit.optimization.applications.ising.common import sample_most_likely
input_file = 'sample.exactcover'
with open(input_file) as f:
list_of_subsets = json.load(f)
print(list_of_subsets)
```
Then we apply the brute-force method. Basically, we exhaustively try all the binary assignments. In each binary assignment, the entry of a subset is either 0 (meaning the subset is not in the cover) or 1 (meaning the subset is in the cover). We print the binary assignment that satisfies the definition of the exact cover.
```
def brute_force():
# brute-force way: try every possible assignment!
has_sol = False
def bitfield(n, L):
result = np.binary_repr(n, L)
return [int(digit) for digit in result] # [2:] to chop off the "0b" part
L = len(list_of_subsets)
max = 2**L
for i in range(max):
cur = bitfield(i, L)
cur_v = exact_cover.check_solution_satisfiability(cur, list_of_subsets)
if cur_v:
has_sol = True
break
return has_sol, cur
has_sol, cur = brute_force()
if has_sol:
print("Solution is", cur)
else:
print("No solution is found")
qubit_op, offset = exact_cover.get_operator(list_of_subsets)
```
#### Part I: Run the optimization
```
algo = NumPyMinimumEigensolver(qubit_op, aux_operators=[])
result = algo.run()
x = sample_most_likely(result.eigenstate)
ising_sol = exact_cover.get_solution(x)
np.testing.assert_array_equal(ising_sol, [0, 1, 1, 0])
if exact_cover.check_solution_satisfiability(ising_sol, list_of_subsets):
print("Solution is", ising_sol)
else:
print("No solution is found")
```
#### Part II: Run the optimization with the VQE
```
from qiskit.aqua import aqua_globals
from qiskit.aqua.algorithms import VQE
from qiskit.aqua.components.optimizers import COBYLA
from qiskit.circuit.library import TwoLocal
aqua_globals.random_seed = 10598
optimizer = COBYLA()
var_form = TwoLocal(qubit_op.num_qubits, 'ry', 'cz', reps=5, entanglement='linear')
vqe = VQE(qubit_op, var_form, optimizer)
backend = BasicAer.get_backend('statevector_simulator')
result = vqe.run(backend)
x = sample_most_likely(result.eigenstate)
ising_sol = exact_cover.get_solution(x)
if exact_cover.check_solution_satisfiability(ising_sol, list_of_subsets):
print("Solution is", ising_sol)
else:
print("No solution is found")
```
| github_jupyter |
<a href="https://colab.research.google.com/github/kalz2q/mycolabnotebooks/blob/master/matplotlibtutorials.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# メモ
matplotlib tutorials を利用した matplotlib の実験学習
https://matplotlib.org/stable/tutorials/index.html
# 日本語が使えるようにする
```
# 現在20210325 import japanize_matplotlib のために、下記が必要
%%capture
!pip install japanize_matplotlib
import matplotlib.pyplot as plt
import japanize_matplotlib
plt.text(0.5, 0.5, 'matplotlibで\n日本語が\n使える!!!!'
, fontsize=40
, horizontalalignment='center'
, verticalalignment='center')
plt.show()
```
# 環境 スタイル
matplotlib.style.use で スタイルが選べる。
```
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('ggplot') # ここでスタイル名を指定する
data = np.random.randn(50)
plt.plot(data) # x,y形式でなくてもplotできる
plt.show()
# print(plt.style.available) とすればstyleのリストが得られる
# styleによってなにをしているかが違うので事前に使ったstyleの影響を受ける。使い物にならないのではないか
# それぞれのstyleの内容を知りたい。まあいいか
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('dark_background')
plt.style.use('fivethirtyeight')
plt.figure(figsize=(4,4),facecolor='pink')
plt.style.use('Solarize_Light2')
plt.style.use('classic')
plt.style.use('seaborn')
plt.plot(np.sin(np.linspace(0, 2 * np.pi)), 'r-o')
plt.show()
```
plt.plot の中で 'r-o' とか指定できる。
plt.rcParamsで細かく指定できる。
rcParamsで指定したもののセットが style なのだろう。
plt.figure(figsize=(4,4),facecolor='pink')
とかで指定すると、rcParamsにかかわらず優先されるので、plt.plotを使う限りではrcParamsをあまり細かく学ぶ必要はないのかもしれない。 古い仕様なのかもしれない。
sympyのplotを使う場合、rcParamsの環境だけを使うようなので、知っておく必要はある。
```
import matplotlib.pyplot as plt
import numpy as np
data = np.random.randn(50)
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['lines.linestyle'] = '--'
plt.plot(data)
plt.show()
# from sympy import *
# x =symbols('x')
# plot(x**2,(x,-0.5,0.5))
```
グラフの色は勝手に選ばれるのを変えるには axes.prop_cycle を変えるとあるが、次のプログラムはエラーになった。
colorについて調べる
cyclerについて調べる
```
# import matplotlib as mpl
# mpl.rcParams['axes.prop_cycle'] = cycler(color=['r', 'g', 'b', 'y']
# plt.plot(data) # first color is red
# plt.show()
```
グラフのスタイルを設定するのに `style.use`, `rcParams` 以外に `rc` でキーワード引数をつかってまとめて設定する方法がある。
```
import matplotlib.pyplot as plt
import numpy as np
data = np.random.randn(50)
# plt.rcParams['lines.linewidth'] = 8
plt.rc('lines', linewidth=4, linestyle='-.')
plt.plot(data)
import matplotlib.pyplot as plt
import numpy as np
data = np.random.randn(50)
plt.rcdefaults() # defaultはcolabのdefaultではない
plt.rcParams['figure.figsize']=[4.2,2.8] # 反映する
# plt.figure(figsize=(4,4),facecolor='pink') # 反映する。優先される
plt.rcParams['figure.facecolor']='yellow' # 反映するが、弱い
plt.rcParams['lines.color']='black' # 反映しない
# plt.plot(data, 'k--') # 反映する
plt.plot(data)
plt.show()
# rcParamsの内容を知る方法
import matplotlib.pyplot as plt
# print(plt.rcParams) # lines.color:C0 , figure.figsize: [6.4, 4.8], C0 は cycler の 0番目の意味か?
```
`matplotlib.rcdefaults` Matplotlib のデフォルトに戻る。
colab のデフォルトはランタイムを中断すればわかる => 違いを比較してみよう。 => 違いはほとんどなかった
rcParams の設定の validation については `matplotlib.rcsetup` にかかれているとか。
=> 例 >>> c = cycler(color=['red', 'green', 'blue']) とか。 わからない。
matplotlibの設定は `matplotlibrc` というファイルにあります、とか。
```
import matplotlib.pyplot as plt
# print(plt.rcParams)
```
# Usage Guide
matplotlibはfigureというエリアに設定されたaxという座標にデータを描画する。
axを持ったfigureを作る最も簡単な方法は
fig, ax = plt.subplots()
である。
```
import matplotlib.pyplot as plt
fig, ax = plt.subplots() # 1つの座標axを持つfigureを作る
ax.plot([1, 2, 3, 4], [1, 4, 2, 3]) # axにplotする
```
MATLABを含め他のグラフ作成ライブラリーは座標を明示的に作る必要がない。
matplotlibでも、axを作る手間を省いて、次のようにすると現在(current)の座標にグラフを書くことができる。
```
import matplotlib.pyplot as plt
plt.plot([1, 2, 3, 4], [1, 4, 2, 3]) # Matplotlib plot.
import matplotlib
# help(matplotlib.figure.Figure)
# dir(matplotlib.figure.Figure)
dir(matplotlib.axes.Axes)
# help(matplotlib.axes.Axes)
```
plotで使うのは `numpy.array` なので、次のように変換する必要があるかもしれない。
pandas.Dataframe の場合:
a = pandas.DataFrame(np.random.rand(4, 5), columns = list('abcde'))
a_asarray = a.values
numpy.matrix の場合:
b = np.matrix([[1, 2], [3, 4]])
b_asarray = np.asarray(b)
```
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 2, 100) # 0,2の間を100に刻む
fig, ax = plt.subplots() # figure と ax を作る
ax.plot(x, x, label='linear') # プロットする
ax.plot(x, x**2, label='quadratic') # y=x^2のグラフ
ax.plot(x, x**3, label='cubic') # y=x^3のグラフ
ax.set_xlabel('x label') # x軸のラベル
ax.set_ylabel('y label') # y軸のラベル
ax.set_title("Simple Plot") # グラフにタイトルをつける
ax.legend() # レジェンドを加える
```
or (pyplot-style)
```
# fgとaxを作らない場合の例
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 2, 100)
plt.plot(x, x, label='linear') # 明示されていない座標にプロットする
plt.plot(x, x**2, label='quadratic')
plt.plot(x, x**3, label='cubic')
plt.xlabel('x label')
plt.ylabel('y label')
plt.title("Simple Plot")
plt.legend()
plt.show()
```
fig,axを作ってオブジェクトに関数を使っていくいわゆるOO型のプログラミングのメリットは、違うデータセットで同じグラフを繰り返し書く際に現れる。
そのような場合の関数の書き方の例を次に示す。
```
def my_plotter(ax, data1, data2, param_dict):
"""
A helper function to make a graph
Parameters
----------
ax : Axes
The axes to draw to
data1 : array
The x data
data2 : array
The y data
param_dict : dict
Dictionary of kwargs to pass to ax.plot
Returns
-------
out : list
list of artists added
"""
out = ax.plot(data1, data2, **param_dict)
return out
```
この関数は次のように使う
```
import matplotlib.pyplot as plt
import numpy as np
data1, data2, data3, data4 = np.random.randn(4, 100)
fig, ax = plt.subplots(1, 1)
my_plotter(ax, data1, data2, {'marker': 'x'})
```
違うデータで同じ関数を使って2枚のグラフを描くことができる。
```
fig, (ax1, ax2) = plt.subplots(1, 2)
my_plotter(ax1, data1, data2, {'marker': 'x'})
my_plotter(ax2, data3, data4, {'marker': 'o'})
```
メモ randomについて
table | 意味
--- | ---
rand() | 0.0以上、1.0未満の乱数を1個生成
rand(3) | 0.0以上、1.0未満の乱数を3個生成
rand(2,3) | 0.0以上、1.0未満の乱数で 2x3 の行列を生成
(b-a) * np.random.rand() + a |([a, b): a以上、b未満)の乱数
randn() | 標準正規分布 (平均0, 標準偏差1)
randn(10) | 標準正規分布を10個生成
randn(10,10) | 標準正規分布による 10x10 の行列
normal(50,10) | 平均50、標準偏差10の正規分布
binomial(n=100, p=0.5) | 二項分布
poisson(lam=10) | λ=10 のポアソン分布
beta(a=3, b=5) | a=3, b=5 のベータ分布
randint(100) | 0〜99 の整数を1個生成
randint(30,70) | 30〜69 の整数を1個生成
randint(0,100,20) | 0〜99 の整数を20個生成
randint(0,100,(5,5)) | 0〜99 の整数で5x5の行列を生成
random_integers(100) | 1〜100 の整数を1個生成
random_integers(30,70) | 30〜70 の整数を1個生成
random.choice(city) | 1個をランダム抽出
random.choice(city,10) | 10個をランダム抽出(重
random.choice(city,5,replace=False) | 5個をランダム抽出(重複なし)
random.choice(city, p=weight) | 指定した確率で1個を抽出
seed(100) | 数値はなんでもいい
numpy.random.random_sample((2,3)) | 0.0以上、1.0未満の乱数で 2x3 の行列を生成
| np.random.random、np.random.ranf、
| np.random.sampleはぜんぶ同じ
numpy.random.gamma(5,1,10) | 形状母数shape, 尺度母数scale, size
numpy.random.chisquare() | カイ二乗分布 自由度df, size
# バックエンド backend
バックエンドとはなにか。
グラフを出力する時、colab (jupyter) のように inline でプロットする場合だけでなく、さまざまな状況で matplotlib は使われる。 そのような様々な出力に対応する部分をバックエンドと呼ぶ。
例えば、次のようなユースケースがある。
- Pythonシェルからインタラクティブにmatplotlibを使用し、コマンドを入力するとプロットウィンドウがポップアップする
- wxpythonやpygtkなどのグラフィカルユーザーインターフェイスに埋め込んで、アプリケーションを構築する
- バッチスクリプトで数値シミュレーションからポストスクリプト画像を生成する
- Webアプリケーションサーバーを実行してグラフを動的に提供する
バックエンドには2つのタイプがある。
1. pygtk、wxpython、tkinter、qt4、macosx で使用するためのユーザーインターフェイスバックエンド。ユーザーインターフェイスバックエンドはインタラクティブバックエンド、対話型バックエンドとも呼ばれる
2. PNG、SVG、PDF、PS などの画像ファイルを作成するためのハードコピーバックエンド。非対話型バックエンドとも呼ばれる
バックエンドの設定は3通りの方法がある。
1. `matplotlibrc` ファイルで パラメーター `backend` で指定する
2. 環境変数 envvar:`MPLBACKEND` を使う
3. 関数 `matplotlib.use` を使う
# pyplot
pyplot は matplotlib で画像を描くための関数の集合である。
画像を描く座標のことを ax, axes というが、ここでいう ax, axes は厳密な数学用語ではなく、matplotlib.pyplot の用語と思ってもらいたい。 pyplot が作用するのはこの ax, axes に対してである。
```
import matplotlib.pyplot as plt
plt.plot([1, 2, 3, 4])
plt.ylabel('some numbers')
plt.show()
```
上のグラフでなぜ x軸が 0-3 で、y軸が 1-4 なのか。
なにも指定しないで 1つのリストを plot に与えると、pyplot はそれを y の値とみなして、対応する x を自動生成する。 自動生成される x のリストが 0 ベースなので、この場合 [0,1,2,3] になる。
plot に 2 つのリストを与えると次のようになる。
```
import matplotlib.pyplot as plt
plt.plot([1, 2, 3, 4], [1, 4, 9, 16])
```
pyplot.plt はリストだけではなく、numpy.array を処理する。 list は numpy array に変換されて処理されている。
次の例では plot におけるさまざまなフォーマットを示す。
```
import numpy as np
# evenly sampled time at 200ms intervals
t = np.arange(0., 5., 0.2)
# red dashes, blue squares and green triangles
plt.plot(t, t, 'r--', t, t**2, 'bs', t, t**3, 'g^')
plt.show()
```
numpy.recarray と pandas.DataFrame については文字列で変数にアクセスすることができる。
そのようなオブジェクトは `data`キーワードを用いると、変数に対応するグラフを描くことができる。
```
import matplotlib.pyplot as plt
import numpy as np
data = {'a': np.arange(50),
'c': np.random.randint(0, 50, 50),
'd': np.random.randn(50)}
data['b'] = data['a'] + 10 * np.random.randn(50)
data['d'] = np.abs(data['d']) * 100
plt.scatter('a', 'b', c='c', s='d', data=data)
plt.xlabel('entry a')
plt.ylabel('entry b')
plt.show()
```
カテゴリー変数 categorical variables
カテゴリー変数で直接プロットすることができる。
```
names = ['group_a', 'group_b', 'group_c']
values = [1, 10, 100]
plt.figure(figsize=(9, 3))
plt.subplot(131)
plt.bar(names, values)
plt.subplot(132)
plt.scatter(names, values)
plt.subplot(133)
plt.plot(names, values)
plt.suptitle('Categorical Plotting')
plt.show()
ラインプロパティの制御
キーワードargsを使用する
x = np.arange(10) # [0,1,2,3,4,5,6,7,8,9] <= 10個
y = np.random.rand(10)*10 # [0..10) のランダムな実数
plt.plot(x, y, linewidth=10.0)
line, = plt.plot(x, y, '-')
line.set_antialiased(False) # turn off antialiasing
x1 = np.random.rand(10)
x2 = np.random.rand(10)
y1 = np.random.rand(10)
y2 = np.random.rand(10)
# lines = plt.plot(x1, y1, x2, y2)
line1 = plt.plot(x1, y1)
line2 = plt.plot(x2, y2)
# use keyword args
plt.setp(line1, color='k', linewidth=10.0)
# or MATLAB style string value pairs
plt.setp(line2, 'color', 'g', 'linewidth', 10.0)
plt.show()
# line2D のプロパティのリストは line を引数に plt.setp でも得られる
import matplotlib.pyplot as plt
import numpy as np
line = plt.plot(np.random.rand(7),np.random.rand(7))
plt.setp(line, linewidth=15, color='red')
# plt.setp(line)
複数のグラフを扱う方法
plt.gca は current axes (a matplotlib.axes.Axes instance) を返す
plt.gcf は current figure (a matplotlib.figure.Figure instance) を返す
def f(t):
return np.exp(-t) * np.cos(2*np.pi*t)
t1 = np.arange(0.0, 5.0, 0.1)
t2 = np.arange(0.0, 5.0, 0.02)
plt.figure()
plt.subplot(211)
plt.plot(t1, f(t1), 'bo', t2, f(t2), 'k')
plt.subplot(212)
plt.plot(t2, np.cos(2*np.pi*t2), 'r--')
plt.show()
```
plt.figure は省略可能。
座標軸が1つだけのときは subplot(111)も省略可能。
plt.subplot の引数は 3桁の数字で 行数、列数、プロット番号の順。
subplot(211)はsubplot(2、1、1)と同じ。
格子状でなく自由な位置に座標を作成するには plt.axes([left, bottom, width, height]) を使う。 数字は 0から1の有理数 fractional である。
https://matplotlib.org/3.3.3/gallery/subplots_axes_and_figures/axes_demo.html
https://matplotlib.org/3.3.3/gallery/subplots_axes_and_figures/subplot_demo
を参照。
```
# 上記のサイトの例
import numpy as np
import matplotlib.pyplot as plt
# Data for plotting
x1 = np.linspace(0.0, 5.0)
x2 = np.linspace(0.0, 2.0)
y1 = np.cos(2 * np.pi * x1) * np.exp(-x1)
y2 = np.cos(2 * np.pi * x2)
# Create two subplots sharing y axis
fig, (ax1, ax2) = plt.subplots(2, sharey=True)
ax1.plot(x1, y1, 'ko-')
ax1.set(title='A tale of 2 subplots', ylabel='Damped oscillation')
ax2.plot(x2, y2, 'r.-')
ax2.set(xlabel='time (s)', ylabel='Undamped')
plt.show()
# 上記のサイトの例
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(19680801) # Fixing random state for reproducibility.
# create some data to use for the plot
dt = 0.001
t = np.arange(0.0, 10.0, dt)
r = np.exp(-t[:1000] / 0.05) # impulse response
x = np.random.randn(len(t))
s = np.convolve(x, r)[:len(x)] * dt # colored noise
fig, main_ax = plt.subplots()
main_ax.plot(t, s)
main_ax.set_xlim(0, 1)
main_ax.set_ylim(1.1 * np.min(s), 2 * np.max(s))
main_ax.set_xlabel('time (s)')
main_ax.set_ylabel('current (nA)')
main_ax.set_title('Gaussian colored noise')
# this is an inset axes over the main axes
right_inset_ax = fig.add_axes([.65, .6, .2, .2], facecolor='k')
right_inset_ax.hist(s, 400, density=True)
right_inset_ax.set(title='Probability', xticks=[], yticks=[])
# this is another inset axes over the main axes
left_inset_ax = fig.add_axes([.2, .6, .2, .2], facecolor='k')
left_inset_ax.plot(t[:len(r)], r)
left_inset_ax.set(title='Impulse response', xlim=(0, .2), xticks=[], yticks=[])
plt.show()
```
フィギュア番号を増やしながら複数の
plt.figure を使うことにより
複数のフィギュアを作成できる。
```
# 下の例は現在 20210329 警告 warning が出る。 将来は OO-style で書く方法のみになる
import matplotlib.pyplot as plt
plt.figure(1) # the first figure
plt.subplot(211) # the first subplot in the first figure
plt.plot([1, 2, 3])
plt.subplot(212) # the second subplot in the first figure
plt.plot([4, 5, 6])
plt.figure(2) # a second figure
plt.plot([4, 5, 6]) # creates a subplot(111) by default
plt.figure(1) # figure 1 current; subplot(212) still current
plt.subplot(211) # make subplot(211) in figure1 current
plt.title('Easy as 1, 2, 3') # subplot 211 title
plt.show()
```
plt.clf, plt.cla, plt.close について
figure や axes を plt.clf, plt.cla でクリアできる。
plt.close でメモリーを解放する。
とのこと。
テキスト処理
plt.textを使用して任意の場所にテキストを追加できる。
plt.xlabel、plt.ylabel、plt.titleを使用して、指定された場所にテキストを追加できる。
より詳細な例は
https://matplotlib.org/stable/tutorials/text/text_intro.html
```
import matplotlib
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot()
fig.subplots_adjust(top=0.85)
# Set titles for the figure and the subplot respectively
fig.suptitle('bold figure suptitle', fontsize=14, fontweight='bold')
ax.set_title('axes title')
ax.set_xlabel('xlabel')
ax.set_ylabel('ylabel')
# Set both x- and y-axis limits to [0, 10] instead of default [0, 1]
ax.axis([0, 10, 0, 10])
ax.text(3, 8, 'boxed italics text in data coords', style='italic',
bbox={'facecolor': 'red', 'alpha': 0.5, 'pad': 10})
ax.text(2, 6, r'an equation: $E=mc^2$', fontsize=15)
ax.text(3, 2, 'unicode: Institut für Festkörperphysik')
ax.text(0.95, 0.01, 'colored text in axes coords',
verticalalignment='bottom', horizontalalignment='right',
transform=ax.transAxes,
color='green', fontsize=15)
ax.plot([2], [1], 'o')
ax.annotate('annotate', xy=(2, 1), xytext=(3, 4),
arrowprops=dict(facecolor='black', shrink=0.05))
plt.show()
mu, sigma = 100, 15
x = mu + sigma * np.random.randn(10000)
# the histogram of the data
n, bins, patches = plt.hist(x, 50, density=1, facecolor='g', alpha=0.75)
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.title('Histogram of IQ')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
```
すべてのplt.text関数は、matplotlib.text.Textインスタンスを返します。 キーワード引数をテキスト関数に渡すか、plt.setp を使用して、プロパティをカスタマイズできる。
```
import matplotlib.pyplot as plt
fig,ax=plt.subplots(figsize=(6,3))
t = plt.xlabel('my data', fontsize=14, color='red')
plt.setp(t, color='blue')
plt.show()
```
# テキストで数式を使用する
次の例でテキストの前の `r` は `raw` の意味で省略してはいけない。 なぜなら `r` のついたテキストの中では python のテキストのエスケープを扱わないことになっているから。
```
import matplotlib.pyplot as plt
fig,ax=plt.subplots(figsize=(6,3))
ax.set_title(r'$\sigma_i=15$', color='red')
plt.text(0.5, 0.5, 'No Japanese\nIn Matplotlib!'
, fontsize=40
, horizontalalignment='center'
, verticalalignment='center')
plt.show()
plt.text?
```
# 注釈 Annotating text
上記の plt.text 関数で好きな位置に、テキストを配置できる。
plt.annotateメソッドは、注釈を簡単にするためのヘルパー機能を提供する。
引数 xyで表される注釈を付ける場所と、xytext で表されるテキストの場所を
両方とも(xy)タプルで指定する。
```
import matplotlib.pyplot as plt
import numpy as np
ax = plt.subplot(111)
t = np.arange(0.0, 5.0, 0.01)
s = np.cos(2*np.pi*t)
line, = plt.plot(t, s, lw=2)
plt.annotate('local max', xy=(2, 1), xytext=(3, 1.5),
arrowprops=dict(facecolor='black', shrink=0.05),
)
plt.ylim(-2, 2)
plt.show()
```
上の例では xy が矢印の先端の座標で、xytext がテキストの場所の座標である。
その他の例は
https://matplotlib.org/stable/gallery/text_labels_and_annotations/annotation_demo.html
を参照する。
# 対数グラフ Logarithmic and other nonlinear axes
対数スケールのグラフにするのは簡単で
plt.xscale('log')
とするだけ。
以下に例を示す。
```
import matplotlib.pyplot as plt
import numpy as np
# Fixing random state for reproducibility
np.random.seed(19680801)
# make up some data in the open interval (0, 1)
y = np.random.normal(loc=0.5, scale=0.4, size=1000)
y = y[(y > 0) & (y < 1)]
y.sort()
x = np.arange(len(y))
# plot with various axes scales
plt.figure()
# linear
plt.subplot(221)
plt.plot(x, y)
plt.yscale('linear')
plt.title('linear')
plt.grid(True)
# log
plt.subplot(222)
plt.plot(x, y)
plt.yscale('log')
plt.title('log')
plt.grid(True)
# symmetric log
plt.subplot(223)
plt.plot(x, y - y.mean())
plt.yscale('symlog', linthresh=0.01) # linthresh は deprecated という warning が出る
plt.title('symlog')
plt.grid(True)
# logit
plt.subplot(224)
plt.plot(x, y)
plt.yscale('logit')
plt.title('logit')
plt.grid(True)
# Adjust the subplot layout, because the logit one may take more space
# than usual, due to y-tick labels like "1 - 10^{-3}"
plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=0.25,
wspace=0.35)
plt.show()
```
自分で作ったスケールを加えることもできる。 詳細は adding-new-scales を参照。
# サンプルグラフについて
折れ線グラフ
複数のグラフを一度に扱う
画像
2次元グラフのカラー処理
ヒストグラム
自由曲線 path patch
3次元グラフ
流線グラフ、流線描画 streamplot
楕円 ellipse
棒グラフ bar chart
円グラフ pie chart
表 table
散布図 scatter plot
GUI部品 スライダー、ラジオボタンなど
塗りつぶされた曲線、ポリゴン
日付処理
対数グラフ
極座標
凡例 legend
数式処理 (内部プログラム、外部プログラム)
外部ツールキットへの出力 Qt, GTK, Tk, or wxWidgets, EEG viewer pbrain
スケッチ風グラフ XKCD-style sketch plot
複数の処理の組み合わせ subplot example
```
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(19680801)
data = np.random.randn(2, 100)
fig, axs = plt.subplots(2, 2, figsize=(5, 5))
axs[0, 0].hist(data[0])
axs[1, 0].scatter(data[0], data[1])
axs[0, 1].plot(data[0], data[1])
axs[1, 1].hist2d(data[0], data[1])
plt.show()
import matplotlib.pyplot as plt
import numpy as np
data = np.random.randn(2, 3)
print(type(data))
print(data)
```
# 画像処理
Colab の環境ではチュートリアルの例が使えないので、この章は飛ばす。
表題のみ。
画像処理モジュールの import
```
import matplotlib.image as mpimg
```
画像データを numpy array に import する
matplotlib は画像データをロードするために pillow ライブラリーを使っている。
```
img = mpimg.imread('../../doc/_static/stinkbug.png')
print(img)
```
numpy array を画像グラフとして plot する
```
imgplot = plt.imshow(img)
```
画像グラフにカラースキーム pseudocolor schemes を適用する
```
# lum_img = img[:, :, 0]
# This is array slicing. You can read more in the `Numpy tutorial
# <https://docs.scipy.org/doc/numpy/user/quickstart.html>`_.
# plt.imshow(lum_img)
# plt.imshow(lum_img, cmap="hot")
# imgplot = plt.imshow(lum_img)
# imgplot.set_cmap('nipy_spectral')
# カラーバーを表示する
# imgplot = plt.imshow(lum_img)
# plt.colorbar()
# 色データの範囲をヒストグラムにする
# plt.hist(lum_img.ravel(), bins=256, range=(0.0, 1.0), fc='k', ec='k')
# ピーク周辺にズームインする
# imgplot = plt.imshow(lum_img, clim=(0.0, 0.7))
# 返り値をつかって clim を指定する方法
# fig = plt.figure()
# ax = fig.add_subplot(1, 2, 1)
# imgplot = plt.imshow(lum_img)
# ax.set_title('Before')
# plt.colorbar(ticks=[0.1, 0.3, 0.5, 0.7], orientation='horizontal')
# ax = fig.add_subplot(1, 2, 2)
# imgplot = plt.imshow(lum_img)
# imgplot.set_clim(0.0, 0.7)
# ax.set_title('After')
# plt.colorbar(ticks=[0.1, 0.3, 0.5, 0.7], orientation='horizontal')
# 色補完スキーム Array Interpolation scheme
# 画像を縮小すると失われる情報がある。 拡大する際には補間する必要がある。
# 画像をロードしりサイズするのに Pillow ライブラリーを使う
# from PIL import Image
# img = Image.open('../../doc/_static/stinkbug.png')
# img.thumbnail((64, 64), Image.ANTIALIAS) # resizes image in-place
# imgplot = plt.imshow(img)
```
上のコードではデフォルトの補間スキーム bilinear が使われている。
次のコードでは "nearest" を指定しているが、これは補間を行わない。
```
# imgplot = plt.imshow(img, interpolation="nearest")
```
次のコードでは "bicubic" を使っている。
"bicubic" は写真の拡大にしばしば使われる。 ピクセルが見えるよりもボケて見える方が好まれるからである。
```
# imgplot = plt.imshow(img, interpolation="bicubic")
```
# グラフ描画のライフサイクル
次にグラフを描いて、加工して、保存する手順の中で、参考になるベストプラスティスを示したい。
この章は
<http://pbpython.com/effective-matplotlib.html>`_
by Chris Moffitt
をベースに作成した。
## Our data
We'll use the data from the post from which this tutorial was derived.
It contains sales information for a number of companies.
```
# 以下のサンプルで使われるデータは元のサイトで使われていたもの
# 会社名をキーにした売上金額の辞書の形式になっている
import numpy as np
import matplotlib.pyplot as plt
data = {'Barton LLC': 109438.50,
'Frami, Hills and Schmidt': 103569.59,
'Fritsch, Russel and Anderson': 112214.71,
'Jerde-Hilpert': 112591.43,
'Keeling LLC': 100934.30,
'Koepp Ltd': 103660.54,
'Kulas Inc': 137351.96,
'Trantow-Barrows': 123381.38,
'White-Trantow': 135841.99,
'Will LLC': 104437.60}
group_data = list(data.values())
group_names = list(data.keys())
group_mean = np.mean(group_data)
print(type(data))
```
# いまここ
## Getting started
This data is naturally visualized as a barplot, with one bar per
group. To do this with the object-oriented approach, we first generate
an instance of :class:`figure.Figure` and
:class:`axes.Axes`. The Figure is like a canvas, and the Axes
is a part of that canvas on which we will make a particular visualization.
<div class="alert alert-info"><h4>Note</h4><p>Figures can have multiple axes on them. For information on how to do this,
see the :doc:`Tight Layout tutorial
</tutorials/intermediate/tight_layout_guide>`.</p></div>
```
fig, ax = plt.subplots()
```
Now that we have an Axes instance, we can plot on top of it.
```
fig, ax = plt.subplots()
ax.barh(group_names, group_data)
```
## Controlling the style
There are many styles available in Matplotlib in order to let you tailor
your visualization to your needs. To see a list of styles, we can use
:mod:`.style`.
```
print(plt.style.available)
```
You can activate a style with the following:
```
plt.style.use('fivethirtyeight')
```
Now let's remake the above plot to see how it looks:
```
fig, ax = plt.subplots()
ax.barh(group_names, group_data)
```
The style controls many things, such as color, linewidths, backgrounds,
etc.
## Customizing the plot
Now we've got a plot with the general look that we want, so let's fine-tune
it so that it's ready for print. First let's rotate the labels on the x-axis
so that they show up more clearly. We can gain access to these labels
with the :meth:`axes.Axes.get_xticklabels` method:
```
fig, ax = plt.subplots()
ax.barh(group_names, group_data)
labels = ax.get_xticklabels()
```
If we'd like to set the property of many items at once, it's useful to use
the :func:`pyplot.setp` function. This will take a list (or many lists) of
Matplotlib objects, and attempt to set some style element of each one.
```
fig, ax = plt.subplots()
ax.barh(group_names, group_data)
labels = ax.get_xticklabels()
plt.setp(labels, rotation=45, horizontalalignment='right')
```
It looks like this cut off some of the labels on the bottom. We can
tell Matplotlib to automatically make room for elements in the figures
that we create. To do this we set the ``autolayout`` value of our
rcParams. For more information on controlling the style, layout, and
other features of plots with rcParams, see
:doc:`/tutorials/introductory/customizing`.
```
plt.rcParams.update({'figure.autolayout': True})
fig, ax = plt.subplots()
ax.barh(group_names, group_data)
labels = ax.get_xticklabels()
plt.setp(labels, rotation=45, horizontalalignment='right')
```
Next, we add labels to the plot. To do this with the OO interface,
we can use the :meth:`.Artist.set` method to set properties of this
Axes object.
```
fig, ax = plt.subplots()
ax.barh(group_names, group_data)
labels = ax.get_xticklabels()
plt.setp(labels, rotation=45, horizontalalignment='right')
ax.set(xlim=[-10000, 140000], xlabel='Total Revenue', ylabel='Company',
title='Company Revenue')
```
We can also adjust the size of this plot using the :func:`pyplot.subplots`
function. We can do this with the ``figsize`` kwarg.
<div class="alert alert-info"><h4>Note</h4><p>While indexing in NumPy follows the form (row, column), the figsize
kwarg follows the form (width, height). This follows conventions in
visualization, which unfortunately are different from those of linear
algebra.</p></div>
```
fig, ax = plt.subplots(figsize=(8, 4))
ax.barh(group_names, group_data)
labels = ax.get_xticklabels()
plt.setp(labels, rotation=45, horizontalalignment='right')
ax.set(xlim=[-10000, 140000], xlabel='Total Revenue', ylabel='Company',
title='Company Revenue')
```
For labels, we can specify custom formatting guidelines in the form of
functions. Below we define a function that takes an integer as input, and
returns a string as an output. When used with `.Axis.set_major_formatter` or
`.Axis.set_minor_formatter`, they will automatically create and use a
:class:`ticker.FuncFormatter` class.
For this function, the ``x`` argument is the original tick label and ``pos``
is the tick position. We will only use ``x`` here but both arguments are
needed.
```
def currency(x, pos):
"""The two args are the value and tick position"""
if x >= 1e6:
s = '${:1.1f}M'.format(x*1e-6)
else:
s = '${:1.0f}K'.format(x*1e-3)
return s
```
We can then apply this function to the labels on our plot. To do this,
we use the ``xaxis`` attribute of our axes. This lets you perform
actions on a specific axis on our plot.
```
fig, ax = plt.subplots(figsize=(6, 8))
ax.barh(group_names, group_data)
labels = ax.get_xticklabels()
plt.setp(labels, rotation=45, horizontalalignment='right')
ax.set(xlim=[-10000, 140000], xlabel='Total Revenue', ylabel='Company',
title='Company Revenue')
ax.xaxis.set_major_formatter(currency)
```
## Combining multiple visualizations
It is possible to draw multiple plot elements on the same instance of
:class:`axes.Axes`. To do this we simply need to call another one of
the plot methods on that axes object.
```
fig, ax = plt.subplots(figsize=(8, 8))
ax.barh(group_names, group_data)
labels = ax.get_xticklabels()
plt.setp(labels, rotation=45, horizontalalignment='right')
# Add a vertical line, here we set the style in the function call
ax.axvline(group_mean, ls='--', color='r')
# Annotate new companies
for group in [3, 5, 8]:
ax.text(145000, group, "New Company", fontsize=10,
verticalalignment="center")
# Now we move our title up since it's getting a little cramped
ax.title.set(y=1.05)
ax.set(xlim=[-10000, 140000], xlabel='Total Revenue', ylabel='Company',
title='Company Revenue')
ax.xaxis.set_major_formatter(currency)
ax.set_xticks([0, 25e3, 50e3, 75e3, 100e3, 125e3])
fig.subplots_adjust(right=.1)
plt.show()
```
## Saving our plot
Now that we're happy with the outcome of our plot, we want to save it to
disk. There are many file formats we can save to in Matplotlib. To see
a list of available options, use:
```
print(fig.canvas.get_supported_filetypes())
```
We can then use the :meth:`figure.Figure.savefig` in order to save the figure
to disk. Note that there are several useful flags we show below:
* ``transparent=True`` makes the background of the saved figure transparent
if the format supports it.
* ``dpi=80`` controls the resolution (dots per square inch) of the output.
* ``bbox_inches="tight"`` fits the bounds of the figure to our plot.
```
# Uncomment this line to save the figure.
# fig.savefig('sales.png', transparent=False, dpi=80, bbox_inches="tight")
```
# いまここ
| github_jupyter |
# Reproduction and analysis of Phil's figures
This notebook is to reproduce and add a little bit to the anlysis of Phil's paper.
```
from __future__ import print_function
import pprint
import subprocess
import sys
sys.path.append('../')
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
from network import BCPNN, NetworkManager, BCPNNFast, Protocol
from data_transformer import build_ortogonal_patterns
from plotting_functions import plot_state_variables_vs_time, plot_network_activity, plot_network_activity_angle
from plotting_functions import plot_adaptation_dynamics, plot_weight_matrix, plot_winning_pattern, plot_sequence
%matplotlib inline
matplotlib.rcParams.update({'font.size': 22})
np.set_printoptions(suppress=True, precision=2)
sns.set(font_scale=2.0)
```
#### Git machinery
```
run_old_version = False
if run_old_version:
hash_when_file_was_written = '8694da3b241624adc178ea3c52e82f7356c7e1ee'
hash_at_the_moment = subprocess.check_output(["git", 'rev-parse', 'HEAD']).strip()
print('Actual hash', hash_at_the_moment)
print('Hash of the commit used to run the simulation', hash_when_file_was_written)
subprocess.call(['git', 'checkout', hash_when_file_was_written])
```
## Plot 4
First we get the general parameters and build the network
```
# Patterns parameters
hypercolumns = 4
minicolumns = 15
n_patterns = 10 # Number of patterns
# Manager properties
dt = 0.001
T_recalling = 10.0
values_to_save = ['o']
# Protocol
training_time = 0.1
inter_sequence_interval = 1.0
inter_pulse_interval = 0.0
epochs = 20
```
Now we build the Network, Manager and Training Protocol
```
# Build patterns
patterns_dic = build_ortogonal_patterns(hypercolumns, minicolumns)
patterns = list(patterns_dic.values())
patterns = patterns[:n_patterns]
# Build the network
nn = BCPNNFast(hypercolumns, minicolumns, epsilon=0.5*1e-3)
# Build the manager
manager = NetworkManager(nn=nn, dt=dt, values_to_save=values_to_save)
# Build the protocol for training
protocol = Protocol()
protocol.simple_protocol(patterns, training_time=training_time, inter_pulse_interval=inter_pulse_interval,
inter_sequence_interval=inter_sequence_interval, epochs=epochs)
```
We train the network and process the data
```
# Train
epoch_history = manager.run_network_protocol(protocol=protocol, verbose=False, values_to_save_epoch=['w'])
w_history = epoch_history['w']
from_pattern = 0
w_epoch = [w_t[:, from_pattern].reshape(nn.hypercolumns, nn.minicolumns) for w_t in w_history]
w_epoch = [np.mean(w, axis=0) for w in w_epoch]
w_epoch = np.array(w_epoch)
```
#### Plot the data
```
cmap_string = 'nipy_spectral'
cmap_string = 'hsv'
cmap_string = 'Paired'
cmap = matplotlib.cm.get_cmap(cmap_string)
norm = matplotlib.colors.Normalize(vmin=0, vmax=minicolumns)
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
for index, w in enumerate(w_epoch.T):
ax.plot(w, '*-', color=cmap(norm(index)), markersize=12, label=str(index))
ax.axhline(y=0, color='gray', linestyle='--')
ax.set_xlim([-1, epochs + 2])
ax.set_xlabel('Epoch')
ax.set_ylabel('Connectivity weight')
ax.set_title('AMPA connectivity after training going from 0 to different attractors')
ax.legend();
from_pattern = 4
w_final = w_history[-1][:, from_pattern].reshape((hypercolumns, minicolumns)).mean(axis=0)
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
ax.plot(w_final, '*-', markersize=12)
ax.axhline(y=0, color='grey', linestyle='--')
ax.set_xlabel('Attractor')
ax.set_ylabel('Weight')
ax.set_title('Final connections emanating from attractor ' + str(from_pattern))
plt.show()
plot_weight_matrix(nn)
```
#### Git recovery
```
if run_old_version:
subprocess.call(['git', 'checkout', 'master'])
```
| github_jupyter |
# Lecture 11: Introduction to Multiobjective Optimization
## What means multiobjective?
* Consider several criteria simultaneously
* Criteria are conflicting (e.g. usually good quality is not cheap) $\Rightarrow$ all the criteria can not be optimized simultaneously
* Need for considering compromises between the criteria
* Compromise can be better than optimal solution in practice (e.g., optimize only costs/profit)
* http://en.wikipedia.org/wiki/Multiobjective_optimization
# Multiobjective optimization process

# Example 1: Continuous casting of steel
* Optimal control of the secondary cooling of continuous casting of steel
* Long history of research in the Dept. of Mathematical Information Technology, Univ. of Jyväskylä
* modelling (1988)
* single objective optimization (1988-1994)
* multiobjective optimization (1994-1998)

* Liquid steel enters (tundish)
* Initial cooling by a water cooled mold $\Rightarrow$ thin crust
* Movement supported by rollers
* Secondary cooling by water sprays
* Rest of the cooling by radiation
## Modelling
* Measuring temperature in casting difficult $\Rightarrow$ numerical temperature distribution
* Process modelled as a multiphase heat equation (PDEs, solid & liquid phase) $\Rightarrow$ temperature distribution
* Numerical model by using the finite element method (FEM)
* Dynamic process
## Single-objective optimization
* Secondary cooling significant: intensity of sprays (easy to control) affects significantly to the solidification of steel
* Goal: minimize the amount of defects in steel
* Quality depends on e.g. the temperature distribution at the surface of steel
* too slow cooling → too long liquid part
* too fast cooling → defects appear
* Objective function: keep the surface temperature as close to a given profile as possible
* Constraints e.g. for the change of temperature and for the temperature in critical spots
## Single-objective optimization results
* Analysis of single objective optimization problem:
* constraints tight (based on metallurgy)
* no feasible solutions
* which constraints to relax?
* $\Rightarrow$ Convert constraints into objective functions (5 in total)
* Enables simultaneous relaxation of different constraints
* Gives information on satisfaction of different constraints and their interdependences
## Multiobjective optimization results
** Found a solution, which only violated one of the constraints**
$\Rightarrow$ satisfactory to the application expert
## Example 2: Data-Based Forest Management with Uncertainties and Multiple Objectives
* A recent study by Profs Annika Kangas (Forest research institute, Joensuu) and Kaisa Miettinen, PhD Kyle Eyvindson (University of Helsinki) and myself
* Main research question: How to use multiobjective optimization to handle in forest management simultaneously
* conflicting objectives income and biodiversity, and
* uncertainties.
### Main problems
* **Harvesting implies incomes from forests**, but on the other hand, it **diminishes the recreational and esthetical values of the forest**, and it may have **adverse effects on the natural values of the area**, for instance, the biodiversity within the area and the viability of wildlife populations living in the area.
* On the other hand, forest management planning **involves lots of uncertainty**. It is **not possible to measure all trees** within a forest area, so there is **uncertainty concerning the current state** of the forests. **Measuring biodiversity is prohibitively expensive**, so using **proxy variables** (biodiversity indices) is the only possibility.
* All forest decisions **concern the future** (typically the next 5-20 years), so that the **state of the forest stands and biodiversity and the consequences of the treatment options needs to be predicted** using statistical models. As we do not know the exact consequences of the decisions, the **decisions involve uncertainty which the decision makers may wish to manage.**
## Our approach
* Model the decision problem as a six-objective optimization problem
* Model uncertainty using scenario based-approach
* Different scenarios represent possible future states
* Handle the uncertainty using a well known risk measure called "value-at-risk" with different risk levels
* Objectives to be maximized
1. Expected minimum (over all time periods) income in all the scenarios.
2. Expected minimum (over all time periods) biodiversity in all the scenarios
3. Value of the biodiversity at the given risk
4. Value of the income at the given risk
5. Probability of the income being greater than the value-at-risk. The risk level for income is, thus, one minus this probability.
6. Probability of the biodiversity being greater than the value-at-risk. The risk level for biodiversity is, thus, one minus this probability.
## Multiobjective optimization results
* The decision maker was able to study interdependencies between income and biodiversity, gains and losses in income and biodiversity at different risk levels and risk levels associated to these
* The results will be submitted to KDD2016 conference for review the end of this week
* I will *hopefully* present the results at the KDD2016 conference in August in San Francisco, USA
## Mathematical formulation of multiobjective optimization problems
Multiobjective optimization problems are often formulated as
$$
\begin{align} \
\min \quad &\{f_1(x),\ldots,f_k(x)\}\\
\text{s.t.} \quad & g_j(x) \geq 0\text{ for all }j=1,\ldots,J\\
& h_k(x) = 0\text{ for all }k=1,\ldots,K\\
&a_i\leq x_i\leq b_i\text{ for all } i=1,\ldots,n\\
&x\in \mathbb R^n,
\end{align}
$$
where $$f_1,\ldots,f_k:\{x\in\mathbb R^n: g_j(x) \geq 0 \text{ for all }j=1,\ldots,J \text{ and } h_k(x) = 0\text{ for all }k=1,\ldots,K\}\mapsto\mathbb R$$ are the objective functions.
## Basic concepts
Basic concepts of solution, feasible solution, equality and inequality constraints are the same.
However, the concept of optimality gets replaced by the concept of **Pareto optimality**. A feasible solution $x^1$ is Pareto optimal to the multiobjective optimization problem, if there does not exist a feasible solution $x^2$ such that
$$
\left\{
\begin{align}
&f_i(x^2)\leq f_i(x^1)\text{ for all }i\in \{1,\ldots,k\}\\
&f_j(x^2)<f_j(x^1)\text{ for some }j\in \{1,\ldots,k\}.\\
\end{align}
\right.
$$

There are now two spaces connected to the problem: the space $\mathbb R^n$ is called the decision space and $\mathbb R^k$ is called the objective space.
** Usually there exist multiple Pareto optimal solutions to a problem. However, the most preferred one needs to be selected for implementation. Thus, a decision maker is needed.**
In addition to Pareto optimality, two more concepts are important, which are called the ideal and the nadir vector. Mathematically the ideal vector $z^{ideal}$ can be defined as having
$$
z^{ideal}_i = \begin{align} \
\min \quad &f_i(x)\\
\text{s.t.} \quad &x\text{ is feasible}
\end{align}
$$
for all $i=1,\ldots,k$. The nadir vector $z^{nadir}$ on the other hand has
$$
z^{nadir}_i =
\begin{align}
\max \quad &f_i(x)\\
\text{s.t.} \quad &x\text{ is Pareto optimal},
\end{align}
$$
for all $i=1,\ldots,k$.

## Example
Consider multiobjective optimization problem
$$
\min \{x^2+y,1-x\}\\
\text{s.t. }x\in[0,1], y\geq0.
$$
#### Pareto optimal solutions
Now, the set of Pareto optimal solutions is
$$
\{(x,0):x\in[0,1]\}.
$$
Let's show that $(x',0)$ is Pareto optimal for all $x'\in[0,1]$. Let's assume $(x,y)$ with $x\in[0,1]$ and $y\geq0$ such that
$$
\left\{
\begin{align}
x^2+y\leq x'^2,\text{ and}\\
1-x\leq 1-x'.
\end{align}
\right.
$$
and
$$
\left\{
\begin{align}
x^2+y< x'^2 \text{ or}\\
1-x< 1-x'.
\end{align}
\right.
$$
Second inequality in the first system of inequalities gives $x\geq x'$. This yields from the first inequality in that same system of inequalities
$$
y\leq x'^2-x^2\leq 0.
$$
Thus, $y=0$. This means that $x=x'$ using again the first inequality.
This means that the solution cannot satisfy the second system of strict inequalities.
Let's assume a solution $(x,y)$, where $x\in[0,1]$ and $y>0$ and show that this is not Pareto optimal:
By choosign solution $(x,0)$, we have
$$
\left\{
\begin{align}
x^2<x^2+y ,\text{ and}\\
1-x\leq 1-x.
\end{align}
\right.
$$
Thus, the solution cannot be Pareto optimal.
#### Ideal
Now
$$
\begin{align}
\min x^2+y\\
\text{s.t. }x\in[0,1],\ y\geq0
\end{align}
= 0
$$
and
$$
\begin{align}
\min 1-x\\
\text{s.t. }x\in[0,1],\ y\geq0
\end{align}
= 0.
$$
Thus, the ideal is
$$
z^{ideal} = (0,0)^T
$$
#### Nadir
Now,
$$
\begin{align}
\max x^2+y\\
\text{s.t. }x\in[0,1],\ y=0
\end{align}
= 1
$$
and
$$
\begin{align}
\max 1-x\\
\text{s.t. }x\in[0,1],\ y=0
\end{align}
= 1.
$$
Thus,
$$
z^{nadir}=(1,1)^T.
$$
| github_jupyter |
# House Building with worker skills
This tutorial includes everything you need to set up decision optimization engines, build constraint programming models.
Table of contents:
- [Describe the business problem](#Describe-the-business-problem)
* [How decision optimization (prescriptive analytics) can help](#How--decision-optimization-can-help)
* [Use decision optimization](#Use-decision-optimization)
* [Step 1: Set up the engines](#Step-1:-Set-up-the-prescriptive-engine)
- [Step 2: Model the Data](#Step-2:-Model-the-data)
- [Step 3: Set up the prescriptive model](#Step-3:-Set-up-the-prescriptive-model)
* [Define the decision variables](#Define-the-decision-variables)
* [Express the business constraints](#Express-the-business-constraints)
* [Express the objective](#Express-the-objective)
* [Solve with Decision Optimization solve service](#Solve-with-Decision-Optimization-solve-service)
* [Step 4: Investigate the solution and run an example analysis](#Step-4:-Investigate-the-solution-and-then-run-an-example-analysis)
* [Summary](#Summary)
****
### Describe the business problem
* This is a problem of building five houses in different locations; the masonry, roofing, painting, etc. must be scheduled. Some tasks must necessarily take place before others and these requirements are expressed through precedence constraints.
* There are three workers, and each worker has a given skill level for each task. Each task requires one worker; the worker assigned must have a non-null skill level for the task. A worker can be assigned to only one task at a time.
* Each house has a deadline.
* The objective is to maximize the skill levels of the workers assigned to the tasks.
*****
## How decision optimization can help
* Prescriptive analytics technology recommends actions based on desired outcomes, taking into account specific scenarios, resources, and knowledge of past and current events. This insight can help your organization make better decisions and have greater control of business outcomes.
* Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes.
* Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage.
<br/>
+ For example:
+ Automate complex decisions and trade-offs to better manage limited resources.
+ Take advantage of a future opportunity or mitigate a future risk.
+ Proactively update recommendations based on changing events.
+ Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes.
<h2>About Detailed Scheduling concepts</h2>
<p>
<ul>
<li> Scheduling consists of assigning start and completion times to a set of activities while satisfying different types of constraints (resource availability, precedence relationships, … ) and optimizing some criteria (minimizing tardiness, …)
<!-- <img src = "./house_building_utils/activity.png" > -->
<img src = "https://github.com/IBMDecisionOptimization/docplex-examples/blob/master/examples/cp/jupyter/house_building_utils/activity.PNG?raw=true " >
<li> Time is considered as a continuous dimension: domain of possible start/completion times for an activity is potentially very large
<li>Besides start and completion times of activities, other types of decision variables are often involved in real industrial scheduling problems (resource allocation, optional activities …)
</ul>
## Use decision optimization
### Step 1: Set up the prescriptive engine
For display of the solution, ensure that last version of matplotlib is available:
```
try:
import matplotlib
if matplotlib.__version__ < "1.4.3":
!pip install --upgrade matplotlib
except:
!pip install --user matplotlib
```
Now, import all required modeling functions that are provided by the Constraint Programming <i>docplex.cp</i> package.
```
from docplex.cp.model import CpoModel
from sys import stdout
from collections import namedtuple
```
### Step 2: Model the data
Planning contains the number of houses and the max amount of periods (<i>days</i>) for our schedule.
```
NB_HOUSES = 5
MAX_AMOUNT_OF_PERIODS = 318
HOUSES = range(1, NB_HOUSES + 1)
```
All tasks must start and end between 0 and the max amount of periods.
```
period_domain = (0, MAX_AMOUNT_OF_PERIODS)
```
For each task type in the house building project, the following table shows the duration of the task in days along with the tasks that must be finished before the task can start. A worker can only work on one task at a time; each task, once started, may not be interrupted.
<p>
| *Task* | *Duration* | *Preceding tasks* |
|---|---|---|
| masonry | 35 | |
| carpentry | 15 | masonry |
| plumbing | 40 | masonry |
| ceiling | 15 | masonry |
| roofing | 5 | carpentry |
| painting | 10 | ceiling |
| windows | 5 | roofing |
| facade | 10 | roofing, plumbing |
| garden | 5 | roofing, plumbing |
| moving | 5 | windows, facade, garden, painting |
##### Tasks' durations
```
Task = (namedtuple("Task", ["name", "duration"]))
TASKS = {Task("masonry", 35),
Task("carpentry", 15),
Task("plumbing", 40),
Task("ceiling", 15),
Task("roofing", 5),
Task("painting", 10),
Task("windows", 5),
Task("facade", 10),
Task("garden", 5),
Task("moving", 5),
}
```
##### Task precedences
```
TaskPrecedence = (namedtuple("TaskPrecedence", ["beforeTask", "afterTask"]))
TASK_PRECEDENCES = {TaskPrecedence("masonry", "carpentry"),
TaskPrecedence("masonry", "plumbing"),
TaskPrecedence("masonry", "ceiling"),
TaskPrecedence("carpentry", "roofing"),
TaskPrecedence("ceiling", "painting"),
TaskPrecedence("roofing", "windows"),
TaskPrecedence("roofing", "facade"),
TaskPrecedence("plumbing", "facade"),
TaskPrecedence("roofing", "garden"),
TaskPrecedence("plumbing", "garden"),
TaskPrecedence("windows", "moving"),
TaskPrecedence("facade", "moving"),
TaskPrecedence("garden", "moving"),
TaskPrecedence("painting", "moving"),
}
```
There are three workers with varying skill levels in regard to the ten tasks. If a worker has a skill level of zero for a task, he may not be assigned to the task.
<p>
| *Task* | *Joe* | *Jack* | *Jim* |
|---|---|---|---|
|masonry |9 | 5 | 0|
|carpentry |7 | 0 | 5|
|plumbing |0 | 7 | 0|
|ceiling |5 | 8 | 0|
|roofing |6 | 7 | 0|
|painting |0 | 9 | 6|
|windows |8 | 0 | 5|
|façade |5 | 5 | 0|
|garden |5 | 5 | 9|
|moving |6 | 0 | 8|
##### Workers Names
```
WORKERS = {"Joe", "Jack", "Jim"}
```
##### Workers Name and level for each of there skill
```
Skill = (namedtuple("Skill", ["worker", "task", "level"]))
SKILLS = {Skill("Joe", "masonry", 9),
Skill("Joe", "carpentry", 7),
Skill("Joe", "ceiling", 5),
Skill("Joe", "roofing", 6),
Skill("Joe", "windows", 8),
Skill("Joe", "facade", 5),
Skill("Joe", "garden", 5),
Skill("Joe", "moving", 6),
Skill("Jack", "masonry", 5),
Skill("Jack", "plumbing", 7),
Skill("Jack", "ceiling", 8),
Skill("Jack", "roofing", 7),
Skill("Jack", "painting", 9),
Skill("Jack", "facade", 5),
Skill("Jack", "garden", 5),
Skill("Jim", "carpentry", 5),
Skill("Jim", "painting", 6),
Skill("Jim", "windows", 5),
Skill("Jim", "garden", 9),
Skill("Jim", "moving", 8)
}
```
##### Utility functions
find_tasks: returns the task it refers to in the TASKS vector
```
def find_tasks(name):
return next(t for t in TASKS if t.name == name)
```
find_skills: returns the skill it refers to in the SKILLS vector
```
def find_skills(worker, task):
return next(s for s in SKILLS if (s.worker == worker) and (s.task == task))
```
find_max_level_skill: returns the tuple "skill" where the level is themaximum for a given task
```
def find_max_level_skill(task):
st = [s for s in SKILLS if s.task == task]
return next(sk for sk in st if sk.level == max([s.level for s in st]))
```
### Step 3: Set up the prescriptive model
<h3>Create the model container</h3>
<p>
The model is represented by a Python object that is filled with the different model elements (variables, constraints, objective function, etc). The first thing to do is then to create such an object:
```
mdl = CpoModel(name="HouseBuilding")
```
#### Define the decision variables
<h5><i><font color=blue>Concept: interval variable</font></i></h5>
<p>
<ul>
<li> What for?<br>
<blockquote> Modeling an interval of time during which a particular property holds <br>
(an activity executes, a resource is idle, a tank must be non-empty, …)</blockquote>
<li> Example:<br>
<blockquote><code><font color=green>interval_var(start=(0,1000), end=(0,1000), size=(10,20))</font></code>
</blockquote>
<!-- <img src = "./house_building_utils/intervalVar.png" > -->
<img src = "https://github.com/IBMDecisionOptimization/docplex-examples/blob/master/examples/cp/jupyter/house_building_utils/intervalVar.PNG?raw=true" >
<li>Properties:
<ul>
<li>The **value** of an interval variable is an integer interval [start,end)
<li>**Domain** of possible values: [0,10), [1,11), [2,12),...[990,1000), [0,11),[1,12),...
<li>Domain of interval variables is represented **compactly** in CP Optimizer (a few bounds: smin, smax, emin, emax, szmin, szmax)
</ul>
</ul>
For each house, an interval variable is created for each task.<br>
This interval must start and end inside the period_domain and its duration is set as the value stated in TASKS definition.
```
tasks = {} # dict of interval variable for each house and task
for house in HOUSES:
for task in TASKS:
tasks[(house, task)] = mdl.interval_var(start=period_domain,
end=period_domain,
size=task.duration,
name="house {} task {}".format(house, task))
```
<h5><i><font color=blue>Concept: optional interval variable</font></i></h5>
<p>
<ul>
<li>Interval variables can be defined as being **optional** that is, it is part of the decisions of the problem to decide whether the interval will be **present** or **absent** in the solution<br><br>
<li> What for?<br>
<blockquote> Modeling optional activities, alternative execution modes for activities, and … most of the discrete decisions in a schedule</blockquote>
<li> Example:<br>
<blockquote><code><font color=green>interval_var(</font><font color=red>optional=True</font><font color=green>, start=(0,1000), end=(0,1000), size=(10,20))</font></code>
</blockquote>
<li>Properties:
<ul>
<li>An optional interval variable has an additional possible value in its domain (absence value)
<li>**Optionality** is a powerful property that you must learn to leverage in your models
</ul>
</ul>
For each house, an __optional__ interval variable is created for each skill.<br>
Skill being a tuple (worker, task, level), this means that for each house, an __optional__ interval variable is created for each couple worker-task such that the skill level of this worker for this task is > 0.<p>
The "**set_optional()**" specifier allows a choice between different variables, thus between different couples house-skill.
This means that the engine decides if the interval will be present or absent in the solution.
```
wtasks = {} # dict of interval variable for each house and skill
for house in HOUSES:
for skill in SKILLS:
iv = mdl.interval_var(name='H' + str(house) + '-' + skill.task + '(' + skill.worker + ')')
iv.set_optional()
wtasks[(house, skill)] = iv
```
#### Express the business constraints
<h5>Temporal constraints</h5>
<h5><i><font color=blue>Concept: precedence constraint</font></i></h5>
<p>
<ul>
<li> What for?<br>
<ul>
<li>Modeling temporal constraints between interval variables
<li>Modeling constant or variable minimal delays
</ul>
<li>Properties
<blockquote>Semantic of the constraints handles optionality (as for all constraints in CP Optimizer).<br>
Example of endBeforeStart:<br>
<code><font color=green>end_before_start(a,b,z)</font></code><br>
present(a) <font color=red>AND</font> present(b) ⇒ end(a)+z ⩽ start(b)
</blockquote>
<ul>
The tasks in the model have precedence constraints that are added to the model.
```
for h in HOUSES:
for p in TASK_PRECEDENCES:
mdl.add(mdl.end_before_start(tasks[(h, find_tasks(p.beforeTask))], tasks[(h, find_tasks(p.afterTask))]))
```
<h5>Alternative workers</h5>
<h5><i><font color=blue>Concept: alternative constraint</font></i></h5>
<p>
<ul>
<li> What for?<br>
<ul>
<li>Modeling alternative resource/modes/recipes
<li>In general modeling a discrete selection in the schedule
</ul>
<li> Example:<br>
<blockquote><code><font color=green>alternative(a,[b1,...,bn])</font></code>
</blockquote>
<!-- <img src = "./house_building_utils/alternative.png" > -->
<img src = "https://github.com/IBMDecisionOptimization/docplex-examples/blob/master/examples/cp/jupyter/house_building_utils/alternative.PNG?raw=true" >
<li>Remark: Master interval variable **a** can of course be optional
</ul>
To constrain the solution so that exactly one of the interval variables wtasks associated with a given task of a given house is to be present in the solution, an "**alternative**" constraint is used.
```
for h in HOUSES:
for t in TASKS:
mdl.add(mdl.alternative(tasks[(h, t)], [wtasks[(h, s)] for s in SKILLS if (s.task == t.name)], 1))
```
<h5>No overlap constraint</h5>
<h5><i><font color=blue>Concept: No-overlap constraint</font></i></h5>
<p>
<ul>
<li> Constraint noOverlap schedules a group of interval variables in such a way that they do not overlap in time.
<li> Absent interval variables are ignored.
<li>It is possible to constrain minimum delays between intervals using transition matrix.
<li>It is possible to constraint the first, last in the sequence or next or preceding interval
</ul>
<!-- <img src = "./house_building_utils/noOverlap.png" > -->
<img src = "https://github.com/IBMDecisionOptimization/docplex-examples/blob/master/examples/cp/jupyter/house_building_utils/noOverlap.PNG?raw=true" >
To add the constraints that a given worker can be assigned only one task at a given moment in time, a **noOverlap** constraint is used.
```
for w in WORKERS:
mdl.add(mdl.no_overlap([wtasks[(h, s)] for h in HOUSES for s in SKILLS if s.worker == w]))
```
#### Express the objective
The presence of an interval variable in tasks in the solution must be accounted for in the objective. Thus for each of these possible tasks, the cost is incremented by the product of the skill level and the expression representing the presence of the interval variable in the solution.<p>
The objective of this problem is to maximize the skill level used for all the tasks.
```
obj = mdl.sum([s.level * mdl.presence_of(wtasks[(h, s)]) for s in SKILLS for h in HOUSES])
mdl.add(mdl.maximize(obj))
```
#### Solve the model
The model is now completely defined. It is time to solve it !
```
# Solve the model
print("\nSolving model....")
msol = mdl.solve(TimeLimit=10)
```
### Step 4: Investigate the solution and then run an example analysis
```
print("Solve status: " + msol.get_solve_status())
if msol.is_solution():
stdout.write("Solve time: " + str(msol.get_solve_time()) + "\n")
# Sort tasks in increasing begin order
ltasks = []
for hs in HOUSES:
for tsk in TASKS:
(beg, end, dur) = msol[tasks[(hs, tsk)]]
ltasks.append((hs, tsk, beg, end, dur))
ltasks = sorted(ltasks, key = lambda x : x[2])
# Print solution
print("\nList of tasks in increasing start order:")
for tsk in ltasks:
print("From " + str(tsk[2]) + " to " + str(tsk[3]) + ", " + tsk[1].name + " in house " + str(tsk[0]))
else:
stdout.write("No solution found\n")
```
#### Import graphical tools
*You can set __POP\_UP\_GRAPHIC=True__ if you prefer a pop up graphic window instead of an inline one.*
```
POP_UP_GRAPHIC=False
import docplex.cp.utils_visu as visu
import matplotlib.pyplot as plt
if not POP_UP_GRAPHIC:
%matplotlib inline
#Change the plot size
from pylab import rcParams
rcParams['figure.figsize'] = 15, 3
```
#### Draw solution
#### Useful functions
To facilitate the display of tasks names, keep only the first n characters.
```
def compact_name(name,n): return name[:n]
if msol and visu.is_visu_enabled():
workers_colors = {}
workers_colors["Joe"] = 'lightblue'
workers_colors["Jack"] = 'violet'
workers_colors["Jim"] = 'lightgreen'
visu.timeline('Solution per houses', 0, MAX_AMOUNT_OF_PERIODS)
for h in HOUSES:
visu.sequence(name="house " + str(h))
for s in SKILLS:
wt = msol.get_var_solution(wtasks[(h,s)])
if wt.is_present():
color = workers_colors[s.worker]
wtname = compact_name(s.task,2)
visu.interval(wt, color, wtname)
visu.show()
```
The purpose of this function is to compact the names of the different tasks to make the graphical display more readable. </p>
For example "H3-garden" becomes "G3"
```
def compact_house_task(name):
loc, task = name[1:].split('-', 1)
return task[0].upper() + loc
```
A green color is used in the display when the task is using the most skilled worker. A red color is used in the display when the task does not use the most skilled worker.
```
if msol and visu.is_visu_enabled():
visu.timeline('Solution per workers', 0, MAX_AMOUNT_OF_PERIODS)
for w in WORKERS:
visu.sequence(name=w)
for h in HOUSES:
for s in SKILLS:
if s.worker == w:
wt = msol.get_var_solution(wtasks[(h,s)])
if wt.is_present():
ml = find_max_level_skill(s.task).level
if s.level == ml:
color = 'lightgreen'
else:
color = 'salmon'
wtname = compact_house_task(wt.get_name())
visu.interval(wt, color, wtname)
visu.show()
```
<h4>Going further with Constraint Programming</h4>
The last available installable package is available on Pypi here: <a href="https://pypi.python.org/pypi/docplex" target="_blank" rel="noopener noreferrer">https://pypi.python.org/pypi/docplex</a>
A complete set of modeling examples can be downloaded here: <a href="https://github.com/IBMDecisionOptimization/docplex-examples" target="_blank" rel="noopener noreferrer">https://github.com/IBMDecisionOptimization/docplex-examples</a>
## Summary
You have learned how to set up and use the IBM Decision Optimization CPLEX Modeling for Python to build and solve a Constraint Programming model.
#### References
* <a href="https://rawgit.com/IBMDecisionOptimization/docplex-doc/master/docs/index.html" target="_blank" rel="noopener noreferrer">Decision Optimization CPLEX Modeling for Python documentation</a>
* <a href="https://dataplatform.cloud.ibm.com/docs/content/wsj/getting-started/welcome-main.html" target="_blank" rel="noopener noreferrer">Watson Studio documentation</a>
<hr>
Copyright © 2017-2021. This notebook and its source code are released under the terms of the MIT License.
<div style="background:#F5F7FA; height:110px; padding: 2em; font-size:14px;">
<span style="font-size:18px;color:#152935;">Love this notebook? </span>
<span style="font-size:15px;color:#152935;float:right;margin-right:40px;">Don't have an account yet?</span><br>
<span style="color:#5A6872;">Share it with your colleagues and help them discover the power of Watson Studio!</span>
<span style="border: 1px solid #3d70b2;padding:8px;float:right;margin-right:40px; color:#3d70b2;"><a href="https://ibm.co/wsnotebooks" target="_blank" style="color: #3d70b2;text-decoration: none;">Sign Up</a></span><br>
</div>
| github_jupyter |
# Amazon Comprehend Sentiment Analysis
Amazon Comprehend can be used to perform sentiment analysis. You can accurately analyze customer interactions, including social media posts, reviews, customer interaction transcripts to improve your products and services.
You can use Amazon Comprehend to determine the sentiment of a document. You can determine if the sentiment is positive, negative, neutral, or mixed. For example, you can use sentiment analysis to determine the sentiments of comments on a blog posting to determine if your readers liked the post.
Determine sentiment operations can be performed using any of the primary languages supported by Amazon Comprehend. All documents must be in the same language.
You can use any of the following operations to detect the sentiment of a document or a set of documents.
DetectSentiment
BatchDetectSentiment
StartSentimentDetectionJob
The operations return the most likely sentiment for the text as well as the scores for each of the sentiments. The score represents the likelihood that the sentiment was correctly detected. For example, in the example below it is 95 percent likely that the text has a Positive sentiment. There is a less than 1 percent likelihood that the text has a Negative sentiment. You can use the SentimentScore to determine if the accuracy of the detection meets the needs of your application.
The DetectSentiment operation returns an object that contains the detected sentiment and a SentimentScore object. The BatchDetectSentiment operation returns a list of sentiments and SentimentScore objects, one for each document in the batch. The StartSentimentDetectionJob operation starts an asynchronous job that produces a file containing a list of sentiments and SentimentScore objects, one for each document in the job.
This lab includes step-by-step instructions for performing sentiment analysis using Amazon Comprehend.
## Setup
Let's start by specifying:
* AWS region.
* The IAM role arn used to give access to Comprehend API and S3 bucket.
* The S3 bucket that you want to use for training and model data.
```
import os
import boto3
import re
import json
import sagemaker
from sagemaker import get_execution_role
region = boto3.Session().region_name
role = get_execution_role()
bucket = sagemaker.Session().default_bucket()
prefix = "sagemaker/sentiment-analysis"
bucketuri="s3://"+bucket+"/"+prefix
print(bucketuri)
# customize to your bucket where you have stored the data
```
## Data
Let's start by uploading the dataset the sample data s3 bucket.The sample dataset contains Amazon reviews taken from the larger dataset "Amazon reviews - Full", which was published with the article "Character-level Convolutional Networks for Text Classification" (Xiang Zhang et al., 2015).
Now lets read this into a Pandas data frame and take a look.
```
# Download the data set
!wget https://docs.aws.amazon.com/comprehend/latest/dg/samples/tutorial-reviews-data.zip
!apt-get install unzip -y
!unzip -o tutorial-reviews-data.zip
import numpy as np # For matrix operations and numerical processing
import pandas as pd
# data = pd.read_csv('./amazon-reviews.csv')
data = pd.read_csv('./amazon-reviews.csv', header=None, names=['Review'])
pd.set_option('display.max_rows', 20)# Keep the output on one page
data
```
## Use detect_sentiment API for real time usecase
First, we will be using detect_sentiment API. The DetectSentiment operation returns an object that contains the detected sentiment and a SentimentScore object.
Lets check a plain text example to begin.
Steps:
* Use boto3 to initialize the comprehend client
* Define the sample text
* Called the detect_sentiment API and pass in the text as the input parameter.
```
import boto3
import json
comprehend = boto3.client(service_name='comprehend', region_name=region)
text = "It is raining today in Seattle"
print('Calling DetectSentiment')
print(json.dumps(comprehend.detect_sentiment(Text=text, LanguageCode='en'), sort_keys=True, indent=4))
print('End of DetectSentiment\n')
```
Now lets use the detect_sentiment API for our sample dataset and check the response.
Note: We are just testing with 5 reviews and we will check the output
```
for index, row in data.iloc[:5].iterrows():
print(row[0])
print("\n")
print(json.dumps(comprehend.detect_sentiment(Text=row[0], LanguageCode='en'), sort_keys=True, indent=4))
```
## Use batch_detect_sentiment API
To send batches of up to 25 documents, you can use the Amazon Comprehend batch operations. Calling a batch operation is identical to calling the single document APIs for each document in the request. Using the batch APIs can result in better performance for your applications.
```
#We will prepare a list of the 25 review document so we can use it for batch function
rows,columns=data.shape
list_text=[] #your empty list
for index in range(25): #iteration over the dataframe
list_text.append(data.iat[index,0])
response = comprehend.batch_detect_sentiment(
TextList=list_text,
LanguageCode='en'
)
print(response)
```
## Asynchronous Batch Processing using StartSentimentDetectionJob
To analyze large documents and large collections of documents, use one of the Amazon Comprehend asynchronous operations. There is an asynchronous version of each of the Amazon Comprehend operations and an additional set of operations for topic modeling.
To analyze a collection of documents, you typically perform the following steps:
* Store the documents in an Amazon S3 bucket.
* Start one or more jobs to analyze the documents.
* Monitor the progress of an analysis job.
* Retrieve the results of the analysis from an S3 bucket when the job is complete.
The following sections describe using the Amazon Comprehend API to run asynchronous operations.
We would be using the following API:
StartSentimentDetectionJob — Start a job to detect the emotional sentiment in each document in the collection.
```
s3 = boto3.resource('s3')
s3.Bucket(bucket).upload_file("amazon-reviews.csv", "sagemaker/sentiment-analysis/amazon-reviews.csv")
import uuid
job_uuid = uuid.uuid1()
job_name = f"sentimentanalysis-job-{job_uuid}"
inputs3uri= bucketuri+"/amazon-reviews.csv"
asyncresponse = comprehend.start_sentiment_detection_job(
InputDataConfig={
'S3Uri': inputs3uri,
'InputFormat': 'ONE_DOC_PER_LINE'
},
OutputDataConfig={
'S3Uri': bucketuri,
},
DataAccessRoleArn=role,
JobName=job_name,
LanguageCode='en',
)
events_job_id = asyncresponse['JobId']
job = comprehend.describe_sentiment_detection_job(JobId=events_job_id)
print(job)
from time import sleep
# Get current job status
job = comprehend.describe_sentiment_detection_job(JobId=events_job_id)
print(job)
# Loop until job is completed
waited = 0
timeout_minutes = 10
while job['SentimentDetectionJobProperties']['JobStatus'] != 'COMPLETED':
sleep(60)
waited += 60
assert waited//60 < timeout_minutes, "Job timed out after %d seconds." % waited
job = comprehend.describe_sentiment_detection_job(JobId=events_job_id)
```
The job would take roughly 6-8 minutes to complete and you can download the output from the output location you specified in the job paramters. You can open Comprehend in your console and check the job details there as well. Asynchronous method would be very useful when you have multiple documents and you want to run asynchronous batch.
| github_jupyter |
```
%matplotlib inline
from pyvista import set_plot_theme
set_plot_theme('document')
```
Applying Textures {#ref_texture_example}
=================
Plot a mesh with an image projected onto it as a texture.
```
import pyvista as pv
from pyvista import examples
import numpy as np
from matplotlib.cm import get_cmap
```
Texture mapping is easily implemented using PyVista. Many of the
geometric objects come preloaded with texture coordinates, so quickly
creating a surface and displaying an image is simply:
```
# load a sample texture
tex = examples.download_masonry_texture()
# create a surface to host this texture
surf = pv.Cylinder()
surf.plot(texture=tex)
```
But what if your dataset doesn\'t have texture coordinates? Then you can
harness the
`pyvista.DataSetFilters.texture_map_to_plane`{.interpreted-text
role="func"} filter to properly map an image to a dataset\'s surface.
For example, let\'s map that same image of bricks to a curvey surface:
```
# create a structured surface
x = np.arange(-10, 10, 0.25)
y = np.arange(-10, 10, 0.25)
x, y = np.meshgrid(x, y)
r = np.sqrt(x ** 2 + y ** 2)
z = np.sin(r)
curvsurf = pv.StructuredGrid(x, y, z)
# Map the curved surface to a plane - use best fitting plane
curvsurf.texture_map_to_plane(inplace=True)
curvsurf.plot(texture=tex)
```
Display scalar data along with a texture by ensuring the
`interpolate_before_map` setting is `False` and specifying both the
`texture` and `scalars` arguments.
```
elevated = curvsurf.elevation()
elevated.plot(scalars='Elevation',
cmap='terrain',
texture=tex,
interpolate_before_map=False)
```
Note that this process can be completed with any image texture!
```
# use the puppy image
tex = examples.download_puppy_texture()
curvsurf.plot(texture=tex)
```
Textures from Files
===================
What about loading your own texture from an image? This is often most
easily done using the `pyvista.read_texture`{.interpreted-text
role="func"} function - simply pass an image file\'s path, and this
function with handle making a `vtkTexture` for you to use.
```
image_file = examples.mapfile
tex = pv.read_texture(image_file)
curvsurf.plot(texture=tex)
```
NumPy Arrays as Textures
========================
Want to use a programmatically built image?
`pyvista.UniformGrid`{.interpreted-text role="class"} objects can be
converted to textures using `pyvista.image_to_texture`{.interpreted-text
role="func"} and 3D NumPy (X by Y by RGB) arrays can be converted to
textures using `pyvista.numpy_to_texture`{.interpreted-text
role="func"}.
```
# create an image using numpy,
xx, yy = np.meshgrid(np.linspace(-200, 200, 20), np.linspace(-200, 200, 20))
A, b = 500, 100
zz = A * np.exp(-0.5 * ((xx / b) ** 2.0 + (yy / b) ** 2.0))
# Creating a custom RGB image
cmap = get_cmap("nipy_spectral")
norm = lambda x: (x - np.nanmin(x)) / (np.nanmax(x) - np.nanmin(x))
hue = norm(zz.ravel())
colors = (cmap(hue)[:, 0:3] * 255.0).astype(np.uint8)
image = colors.reshape((xx.shape[0], xx.shape[1], 3), order="F")
# Convert 3D numpy array to texture
tex = pv.numpy_to_texture(image)
# Render it!
curvsurf.plot(texture=tex)
```
Create a GIF Movie with updating textures
=========================================
Generate a moving gif from an active plotter with updating textures.
```
# Create a plotter object
plotter = pv.Plotter(notebook=False, off_screen=True)
# Open a gif
plotter.open_gif("texture.gif")
pts = curvsurf.points.copy()
# Update Z and write a frame for each updated position
nframe = 15
for phase in np.linspace(0, 2 * np.pi, nframe + 1)[:nframe]:
# create an image using numpy,
z = np.sin(r + phase)
pts[:, -1] = z.ravel()
# Creating a custom RGB image
zz = A * np.exp(-0.5 * ((xx / b) ** 2.0 + (yy / b) ** 2.0))
hue = norm(zz.ravel()) * 0.5 * (1.0 + np.sin(phase))
colors = (cmap(hue)[:, 0:3] * 255.0).astype(np.uint8)
image = colors.reshape((xx.shape[0], xx.shape[1], 3), order="F")
# Convert 3D numpy array to texture
tex = pv.numpy_to_texture(image)
plotter.add_mesh(curvsurf, smooth_shading=True, texture=tex)
plotter.update_coordinates(pts, render=False)
# must update normals when smooth shading is enabled
plotter.mesh.compute_normals(cell_normals=False, inplace=True)
plotter.render()
plotter.write_frame()
plotter.clear()
# Closes and finalizes movie
plotter.close()
```
Textures with Transparency
==========================
Textures can also specify per-pixel opacity values. The image must
contain a 4th channel specifying the opacity value from 0
\[transparent\] to 255 \[fully visible\]. To enable this feature just
pass the opacity array as the 4th channel of the image as a 3
dimensional matrix with shape \[nrows, ncols, 4\]
`pyvista.numpy_to_texture`{.interpreted-text role="func"}.
Here we can download an image that has an alpha channel:
```
rgba = examples.download_rgba_texture()
rgba.n_components
# Render it!
curvsurf.plot(texture=rgba, show_grid=True)
```
Repeating Textures
==================
What if you have a single texture that you\'d like to repeat across a
mesh? Simply define the texture coordinates for all nodes explicitly.
Here we create the texture coordinates to fill up the grid with several
mappings of a single texture. In order to do this we must define texture
coordinates outside of the typical `(0, 1)` range:
```
axial_num_puppies = 4
xc = np.linspace(0, axial_num_puppies, curvsurf.dimensions[0])
yc = np.linspace(0, axial_num_puppies, curvsurf.dimensions[1])
xxc, yyc = np.meshgrid(xc, yc)
puppy_coords = np.c_[yyc.ravel(), xxc.ravel()]
```
By defining texture coordinates that range `(0, 4)` on each axis, we
will produce 4 repetitions of the same texture on this mesh.
Then we must associate those texture coordinates with the mesh through
the `pyvista.DataSet.active_t_coords`{.interpreted-text role="attr"}
property.
```
curvsurf.active_t_coords = puppy_coords
```
Now display all the puppies!
```
# use the puppy image
tex = examples.download_puppy_texture()
curvsurf.plot(texture=tex, cpos="xy")
```
Spherical Texture Coordinates
=============================
We have a built in convienance method for mapping textures to spherical
coordinate systems much like the planar mapping demoed above.
```
mesh = pv.Sphere()
tex = examples.download_masonry_texture()
mesh.texture_map_to_sphere(inplace=True)
mesh.plot(texture=tex)
```
The helper method above does not always produce the desired texture
coordinates, so sometimes it must be done manually. Here is a great,
user contributed example from [this support
issue](https://github.com/pyvista/pyvista-support/issues/257)
Manually create the texture coordinates for a globe map. First, we
create the mesh that will be used as the globe. Note the
[start\_theta]{.title-ref} for a slight overlappig
```
sphere = pv.Sphere(radius=1,
theta_resolution=120,
phi_resolution=120,
start_theta=270.001,
end_theta=270)
# Initialize the texture coordinates array
sphere.active_t_coords = np.zeros((sphere.points.shape[0], 2))
# Populate by manually calculating
for i in range(sphere.points.shape[0]):
sphere.active_t_coords[i] = [
0.5 + np.arctan2(-sphere.points[i, 0], sphere.points[i, 1])/(2 * np.pi),
0.5 + np.arcsin(sphere.points[i, 2])/np.pi
]
# And let's display it with a world map
tex = examples.load_globe_texture()
sphere.plot(texture=tex)
```
| github_jupyter |
# Player 0 is deep player with rank exper replay
# Player 1 is deep player with random exper replay
```
import pickle
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
with open('Drank_lastepisode_heat.pickle', 'rb') as f:
last_heat = pickle.load(f)
with open('Drank_heat_unique0.pickle', 'rb') as f:
heat_uniq0 = pickle.load(f)
with open('Drank_heat_freq0.pickle', 'rb') as f:
heat_freq0 = pickle.load(f)
with open('Drank_heat_unique1.pickle', 'rb') as f:
heat_uniq1 = pickle.load(f)
with open('Drank_heat_freq1.pickle', 'rb') as f:
heat_freq1 = pickle.load(f)
num_episodes = len(heat_freq0)
actions_space = np.arange(1.43, 2.0, 0.04)
num_actions = actions_space.size
num_sub = 500
np.unique(last_heat[:, 0, :, :], return_counts=True)
np.unique(last_heat[:, 0, :, :], return_counts=True)[1]/num_sub/num_actions**2
np.unique(last_heat[:, 1, :, :], return_counts=True)
np.unique(last_heat[:, 1, :, :], return_counts=True)[1]/num_sub/num_actions**2
np.sum(last_heat[:, 0, 0, 0] - last_heat[:, 1, 0, 0] < 0)/num_sub
np.sum(last_heat[:, 0, 0, 0] - last_heat[:, 1, 0, 0] == 0)/num_sub
np.sum(last_heat[:, 0, 0, 0] - last_heat[:, 1, 0, 0] > 0)/num_sub
full_freq0 = np.zeros((num_episodes, num_actions))
for i in range(num_episodes):
full_freq0[i, heat_uniq0[i].astype(int)] = heat_freq0[i]
np.argmax(np.sum(full_freq0, axis=0))
max_price = np.zeros(num_episodes)
max_freq = np.zeros(num_episodes)
bottom9_freq = np.zeros(num_episodes)
bottom3_freq = np.zeros(num_episodes)
for i in range(num_episodes):
max_price[i] = np.max(heat_uniq0[i])
max_freq[i] = np.argmax(full_freq0[i, :])
bottom9_freq[i] = np.sum(full_freq0[i, :9])
bottom3_freq[i] = np.sum(full_freq0[i, :3])
plt.figure(figsize=(8, 6))
ax = sns.heatmap(last_heat[-10, 0, :, :], cbar=False, annot=True)
plt.xlabel('Player 1')
plt.ylabel('Player 0')
cbar = ax.figure.colorbar(ax.collections[0])
cbar.set_ticks([0, 2, 4, 6, 8, 10, 12, 14])
fig = ax.get_figure()
# fig.savefig('htrg_heat0.eps', format='eps', dpi=500, bbox_inches='tight', pad_inches=0.1)
fig, ax = plt.subplots(figsize=(8, 6), dpi=120)
ax.plot(bottom9_freq/num_sub/num_actions**2, color='tab:blue', label=r'Price $\leq$ 1.75')
ax.plot(bottom3_freq/num_sub/num_actions**2, color='tab:orange', label ='Price $\leq$ 1.51')
# ax.plot(full_freq0[:, 10]/num_sub/num_actions**2, color='tab:red', label ='Price = 1.83')
ax.set_ylabel('Percent')
ax.set_xlabel('Episodes')
ax.legend(loc='best')
ax.grid(True)
# plt.savefig('.eps', format='eps', dpi=500, bbox_inches='tight', pad_inches=0.1)
plt.show()
fig, ax = plt.subplots(figsize=(14, 5), dpi=120)
ax.plot(1.43 + 0.04*max_price, color='tab:blue', label='Highest')
ax.plot(1.43 + 0.04*max_freq, color='tab:orange', label ='Most frequent')
ax.set_ylabel('Price')
ax.yaxis.set_ticks(np.arange(1.43, 2.0, 0.04))
ax.set_xlabel('Episodes')
ax.legend(loc='best')
ax.grid(True)
# plt.savefig('.eps', format='eps', dpi=1000, bbox_inches='tight', pad_inches=0.1)
plt.show()
full_freq1 = np.zeros((num_episodes, num_actions))
for i in range(num_episodes):
full_freq1[i, heat_uniq1[i].astype(int)] = heat_freq1[i]
max_price1 = np.zeros(num_episodes)
max_freq1 = np.zeros(num_episodes)
bottom9_freq1 = np.zeros(num_episodes)
bottom3_freq1 = np.zeros(num_episodes)
for i in range(num_episodes):
max_price1[i] = np.max(heat_uniq1[i])
max_freq1[i] = np.argmax(full_freq1[i, :])
bottom9_freq1[i] = np.sum(full_freq1[i, :9])
bottom3_freq1[i] = np.sum(full_freq1[i, :3])
fig, ax = plt.subplots(figsize=(18, 6), dpi=120)
ax.plot(bottom9_freq1/num_sub/num_actions**2, color='tab:blue', label=r'Price $\leq$ 1.75')
ax.plot(bottom3_freq1/num_sub/num_actions**2, color='tab:orange', label =r'Price $\leq$ 1.51')
ax.set_ylabel('Percent')
ax.set_xlabel('Episodes')
ax.legend(loc='best')
ax.grid(True)
# plt.savefig('.eps', format='eps', dpi=500, bbox_inches='tight', pad_inches=0.1)
plt.show()
fig, ax = plt.subplots(figsize=(18, 6), dpi=120)
ax.plot(1.43 + 0.04*max_price1, color='tab:blue', label='Highest')
ax.plot(1.43 + 0.04*max_freq1, color='tab:orange', label ='Most frequent')
ax.set_ylabel('Price')
ax.yaxis.set_ticks(np.arange(1.43, 2.0, 0.04))
ax.set_xlabel('Episodes')
ax.legend(loc='best')
ax.grid(True)
# plt.savefig('.eps', format='eps', dpi=500, bbox_inches='tight', pad_inches=0.1)
plt.show()
N = 2000 - 30
ind = np.arange(N+1, N+31)
width = 0.5
# plt.style.use('default')
cm = plt.get_cmap('tab20')
plt.rcParams["axes.prop_cycle"] = plt.cycler('color', [cm(1.*i/num_actions) for i in range(num_actions)])
p = []
fig, ax = plt.subplots(figsize=(8,6), dpi=120)
for k in range(num_actions):
p.append(plt.bar(ind, full_freq0[N:N+30, k]/112500, width, bottom = np.sum(full_freq0[N:N+30, :k], axis=1)/112500))
# plt.legend((p[0][0], p[1][0], p[2][0], p[3][0], p[4][0], p[5][0], p[6][0], p[7][0],
# p[8][0], p[9][0], p[10][0], p[11][0], p[12][0], p[13][0], p[14][0]),
# ('1.43', '1.47', '1.51', '1.55', '1.59', '1.63', '1.67', '1.71',
# '1.75', '1.79', '1.83', '1.87', '1.91', '1.95', '1.99'), bbox_to_anchor=(1.0, 1.0))
plt.xticks(ind)
plt.xticks(rotation=70)
ax.set_xlabel('Episodes')
ax.set_ylabel('Percent')
plt.savefig('htrg_drank0.eps', format='eps', dpi=1000, bbox_inches='tight', pad_inches=0.1)
plt.show()
```
| github_jupyter |
# Updating features in a feature layer
As content publishers, you may be required to keep certain web layers up to date. As new data arrives, you may have to append new features, update existing features etc. There are a couple of different options to accomplish this:
- Method 1: editing individual features as updated datasets are available
- Method 2: overwriting feature layers altogether with updated datasets
Depending on the number of features that are updated, your workflow requirements, you may adopt either or both kinds of update mechanisms.
In this sample, we explore the first method:
**Method 1**
- [Updating feature layer by editing individual features](#Updating-feature-layer-by-editing-individual-features)
- [Publish the cities feature layer using the initial dataset](#Publish-the-cities-feature-layer-using-the-initial-dataset)
- [Apply updates from the second spreadsheet](#Apply-updates-from-the-second-spreadsheet)
- [Identifying existing features that need to be updated](#Identifying-existing-features-that-need-to-be-updated)
- [Perform updates to the existing features](#Perform-updates-to-the-existing-features)
- [Identifying new features that need to be added](#Identifying-new-features-that-need-to-be-added)
- [Adding new features](#Adding-new-features)
- [Apply edits from third spreadsheet](#Apply-edits-from-third-spreadsheet)
- [Inspecting existing fields of the feature layer](#Inspecting-existing-fields-of-the-feature-layer)
- [Preparing additional columns to add to the feature layer](#Preparing-additional-columns-to-add-to-the-feature-layer)
- [Adding additional columns to the feature layer](#Adding-additional-fields-to-the-feature-layer)
- [Adding attribute values to the new columns](#Adding-attribute-values-to-the-new-columns)
For **Method 2**, refer to the sample titled [Overwriting feature layers](https://developers.arcgis.com/python/sample-notebooks/overwriting-feature-layers)
**Note**: To run this sample, you need the ``pandas`` library in your conda environment. If you don't have the library, install it by running the following command from cmd.exe or your shell
```
conda install pandas```
```
# Connect to the GIS
from arcgis.gis import GIS
from arcgis import features
import pandas as pd
#Access the portal using "amazing_arcgis_123" as password for the given Username.
gis = GIS("https://pythonapi.playground.esri.com/portal", "arcgis_python")
```
## Updating feature layer by editing individual features
Let us consider a scenario where we need to update a feature layer containing the capital cities of the US. We have 3 csv datasets simulating an update workflow as described below:
1. capitals_1.csv -- contains the initial, incomplete dataset
2. capitals_2.csv -- contains additional points and updates to existing points, building on top of capitals_1.csv
3. capitals_annex.csv -- an alternate table containing additional attribute information
Our goal is to update the feature layer with each of these datasets doing the necessary edit operations.
### Publish the cities feature layer using the initial dataset
```
# read the initial csv
csv1 = 'data/updating_gis_content/capitals_1.csv'
cities_df_1 = pd.read_csv(csv1)
cities_df_1.head()
# print the number of records in this csv
cities_df_1.shape
```
As you can see, this dataset only contains 19 rows or 19 capital cities. It is not the complete dataset.
Let's add this `csv` as a portal item. Adding the item creates a CSV item and uploads the original file to the portal, establishing a link between the item and the original file name. Therefore, we need a unique name for the file to guarantee it does not collide with any file of the same name that may have been uploaded by the same user. We'll use standard library modules to copy the file and give it a new name so we can add it to the portal
```
import os
import datetime as dt
import shutil
# assign variables to locations on the file system
cwd = os.path.abspath(os.getcwd())
data_pth = os.path.join(cwd, r'data/updating_gis_content/')
# create a unique timestamp string to append to the file name
now_ts = str(int(dt.datetime.now().timestamp()))
# copy the file, appending the unique string and assign it to a variable
my_csv = shutil.copyfile(os.path.abspath(csv1),
os.path.join(data_pth, 'capitals_1_' + now_ts + '.csv'))
my_csv
# add the initial csv file and publish that as a web layer
item_prop = {'title':'USA Capitals spreadsheet ' + now_ts}
csv_item = gis.content.add(item_properties=item_prop, data=my_csv)
csv_item
```
This spreadsheet has co-ordinates as `latitude` and `longitude` columns which will be used for geometries during publishing.
```
# publish the csv item into a feature layer
cities_item = csv_item.publish()
cities_item
# update the item metadata
item_prop = {'title':'USA Capitals'}
cities_item.update(item_properties = item_prop, thumbnail='data/updating_gis_content/capital_cities.png')
cities_item
```
### Apply updates from the second spreadsheet
The next set of updates have arrived and are stored in `capitals_2.csv`. We are told it contains corrections for the original set in addition to new features. We need to figure out which rows have changed, apply `update` operation on those, then apply `add` operation to new rows.
To start with, let us read the second csv file. Note, in this sample, data is stored in csv. In reality, it could be from your enterprise database or any other data source.
```
# read the second csv set
csv2 = 'data/updating_gis_content/capitals_2.csv'
cities_df_2 = pd.read_csv(csv2)
cities_df_2.head()
# get the dimensions of this csv
cities_df_2.shape
```
#### Identifying existing features that need to be updated
To identify features that need to be updated, let us read the attribute table of the published feature layer and compare that against the second csv. To read the attribute table, we perform a `query()` on the feature layer which returns us an `arcgis.feature.FeatureSet` object. Refer to the guide pages on [accessing features from feature layers](https://developers.arcgis.com/python/guide/working-with-feature-layers-and-features/) to learn more about this.
Note, at this point, we could work with the `cities_df_1` dataframe we created from the original csv file. However, in practice you may not always have the original dataset or your feature layer might have undergone edits after it was published. Hence, we query the feature layer directly.
```
cities_flayer = cities_item.layers[0]
cities_fset = cities_flayer.query() #querying without any conditions returns all the features
cities_fset.sdf.head()
```
The `city_id` column is common between both the datasets. Next, let us perform an `inner` join with the table from feature layer as left and updated csv as right. Inner joins will yield those rows that are present in both tables. Learn more about [inner joins here](https://www.w3schools.com/sql/sql_join_inner.asp).
```
overlap_rows = pd.merge(left = cities_fset.sdf, right = cities_df_2, how='inner',
on = 'city_id')
overlap_rows
```
Thus, of 19 features in original and 36 features in second csv, 4 features are common. Inspecting the table, we find certain columns are updated, for instance, Cheyenne has its coordinates corrected, Oklahoma City has its state abbreviation corrected and similarly other cities have one of their attribute columns updated.
We could either update individual attribute values for these 4 features or update all attribute values with the latest csv. Below, we are performing the latter as it is simple and fast.
#### Perform updates to the existing features
```
features_for_update = [] #list containing corrected features
all_features = cities_fset.features
# inspect one of the features
all_features[0]
```
Note the X and Y geometry values are different from decimal degree coordinates present in Longitude and Latitude fields. To perform geometry edits, we need to project the coordinates to match that of the feature layer.
```
# get the spatial reference of the features since we need to update the geometry
cities_fset.spatial_reference
```
Below, we prepare updated geometries and attributes for each of the 4 features we determined above. We use the `arcgis.geometry` module to `project` the coordinates from geographic to projected coordinate system. The cell below prints the original `Feature` objects followed by the updated ones. If you look closely, you can find the differences.
```
from arcgis import geometry #use geometry module to project Long,Lat to X and Y
from copy import deepcopy
for city_id in overlap_rows['city_id']:
# get the feature to be updated
original_feature = [f for f in all_features if f.attributes['city_id'] == city_id][0]
feature_to_be_updated = deepcopy(original_feature)
print(str(original_feature))
# get the matching row from csv
matching_row = cities_df_2.where(cities_df_2.city_id == city_id).dropna()
#get geometries in the destination coordinate system
input_geometry = {'y':float(matching_row['latitude']),
'x':float(matching_row['longitude'])}
output_geometry = geometry.project(geometries = [input_geometry],
in_sr = 4326,
out_sr = cities_fset.spatial_reference['latestWkid'],
gis = gis)
# assign the updated values
feature_to_be_updated.geometry = output_geometry[0]
feature_to_be_updated.attributes['longitude'] = float(matching_row['longitude'])
feature_to_be_updated.attributes['city_id'] = int(matching_row['city_id'])
feature_to_be_updated.attributes['state'] = matching_row['state'].values[0]
feature_to_be_updated.attributes['capital'] = matching_row['capital'].values[0]
feature_to_be_updated.attributes['latitude'] = float(matching_row['latitude'])
feature_to_be_updated.attributes['name'] = matching_row['name'].values[0]
feature_to_be_updated.attributes['pop2000'] = int(matching_row['pop2000'])
feature_to_be_updated.attributes['pop2007'] = int(matching_row['pop2007'])
#add this to the list of features to be updated
features_for_update.append(feature_to_be_updated)
print(str(feature_to_be_updated))
print("========================================================================")
```
We have constructed a list of features with updated values. We can use this list to perform updates on the feature layer.
```
features_for_update
```
To update the feature layer, call the `edit_features()` method of the `FeatureLayer` object and pass the list of features to the `updates` parameter:
```
cities_flayer.edit_features(updates= features_for_update)
```
We have successfully applied corrections to those features which existed in the feature layer from the initial dataset. Next let us proceed to adding new features present only in the second csv file.
#### Identifying new features that need to be added
```
#select those rows in the capitals_2.csv that do not overlap with those in capitals_1.csv
new_rows = cities_df_2[~cities_df_2['city_id'].isin(overlap_rows['city_id'])]
print(new_rows.shape)
new_rows.head()
```
Thus, of the total 36 rows in the second csv, we have determined the 32 other rows which are new and need to be appended as new features.
#### Adding new features
Next, let us compose another `list` of `Feature` objects similar to earlier, from the `new_rows` data frame.
```
features_to_be_added = []
# get a template feature object
template_feature = deepcopy(features_for_update[0])
# loop through each row and add to the list of features to be added
for row in new_rows.iterrows():
new_feature = deepcopy(template_feature)
#print
print("Creating " + row[1]['name'])
#get geometries in the destination coordinate system
input_geometry = {'y':float(row[1]['latitude']),
'x':float(row[1]['longitude'])}
output_geometry = geometry.project(geometries = [input_geometry],
in_sr = 4326,
out_sr = cities_fset.spatial_reference['latestWkid'],
gis = gis)
# assign the updated values
new_feature.geometry = output_geometry[0]
new_feature.attributes['longitude'] = float(row[1]['longitude'])
new_feature.attributes['city_id'] = int(row[1]['city_id'])
new_feature.attributes['state'] = row[1]['state']
new_feature.attributes['capital'] = row[1]['capital']
new_feature.attributes['latitude'] = float(row[1]['latitude'])
new_feature.attributes['name'] = row[1]['name']
new_feature.attributes['pop2000'] = int(row[1]['pop2000'])
new_feature.attributes['pop2007'] = int(row[1]['pop2007'])
#add this to the list of features to be updated
features_to_be_added.append(new_feature)
# take a look at one of the features we created
features_to_be_added[0]
```
Thus, we have created a `list` of `Feature` objects with appropriate attributes and geometries. Next, to add these new features to the feature layer, call the `edit_features()` method of the `FeatureLayer` object and pass the list of `Feature` objects to the `adds` parameter:
```
cities_flayer.edit_features(adds = features_to_be_added)
```
Thus, we have successfully applied edits from second csv file. Next let us look at how we can apply edits from third csv file.
### Apply edits from third spreadsheet
The next set of updates have arrived and are stored in `capitals_annex.csv`. We are told it contains additional columns for each of the features that we want to add to the feature layer.
To start with, let us read the third csv file. Note in this sample, data is stored in csv. In reality, it could be from your enterprise database or any other data source.
```
# read the third csv set
csv3 = 'data/updating_gis_content/capitals_annex.csv'
cities_df_3 = pd.read_csv(csv3)
cities_df_3.head()
#find the number of rows in the third csv
cities_df_3.shape
```
The `capitals_annex.csv` does not add new features, instead it adds additional attribute columns to existing features. It has 51 rows which were found to match the 19 + 32 rows from first and second csv files. The columns `City_ID` and `NAME` are common to all 3 spreadsheets. Next let us take a look at how we can append this additional attribute information to our feature layer.
#### Inspecting existing fields of the feature layer
The `manager` property of the `FeatureLayer` object exposes a set of methods to read and update the properties and definition of feature layers.
```
#Get the existing list of fields on the cities feature layer
cities_fields = cities_flayer.manager.properties.fields
# Your feature layer may have multiple fields,
# instead of printing all, let us take a look at one of the fields:
cities_fields[1]
```
From above, we can see the representation of one of the fields. Let us loop through each of the fields and print the `name`, `alias`, `type` and `sqlType` properties
```
for field in cities_fields:
print(f"{field.name:13}| {field.alias:13}| {field.type:25}| {field.sqlType}")
```
#### Preparing additional columns to add to the feature layer
Now that we have an idea of how the fields are defined, we can go ahead and append new fields to the layer's definition. Once we compose a list of new fields, by calling the `add_to_definition()` method we can push those changes to the feature layer. Once the feature layer's definition is updated with new fields, we can loop through each feature and add the appropriate attribute values.
To compose a list of new fields to be added, we start by making a copy of one of the fields as a template and start editing it. One easy part in this example is, all new fields that need to be added except one, are of the same data type: integer. With your data, this may not be the case. In such instances, you can add each field individually.
```
# get a template field
template_field = dict(deepcopy(cities_fields[1]))
template_field
```
Let us use pandas to get the list of fields that are **new** in spread sheet 3
```
# get the list of new fields to add from the third spreadsheet, that are not in spread sheets 1,2
new_field_names = list(cities_df_3.columns.difference(cities_df_1.columns))
new_field_names
```
Now loop though each new field name and create a field dictionary using the template we created earlier. Except the field titled `class` all other fields are of type `integer`.
```
fields_to_be_added = []
for new_field_name in new_field_names:
current_field = deepcopy(template_field)
if new_field_name.lower() == 'class':
current_field['sqlType'] = 'sqlTypeVarchar'
current_field['type'] = 'esriFieldTypeString'
current_field['length'] = 8000
current_field['name'] = new_field_name.lower()
current_field['alias'] = new_field_name
fields_to_be_added.append(current_field)
len(fields_to_be_added)
#inspect one of the fields
fields_to_be_added[3]
```
#### Adding additional fields to the feature layer
The list of new fields we composed can be pushed to the server by calling `add_to_definition()` method on the `manager` property.
```
cities_flayer.manager.add_to_definition({'fields':fields_to_be_added})
```
Thus, we have successfully added new fields to our feature layer. Let us verify the new columns show up:
```
new_cities_fields = cities_flayer.manager.properties.fields
len(new_cities_fields)
for field in new_cities_fields:
print(f"{field.name:10}| {field.type}")
```
#### Adding attribute values to the new columns
Next we can loop through each row in the third csv and add the new attribute values for these newly created columns.
```
# Run a fresh query on the feature layer so it includes the new features from
# csv2 and new columns from csv3
cities_fset2 = cities_flayer.query()
cities_features2 = cities_fset2.features
```
Loop through each row in the third spreadsheet, find the corresponding feature by matching the `city_id` value and apply the attribute values for the new fields.
```
features_for_update = []
for city_id in cities_df_3['city_id']:
# get the matching row from csv
matching_row = cities_df_3.where(cities_df_3.city_id == city_id).dropna()
print(str(city_id) + " Adding additional attributes for: " + matching_row['name'].values[0])
# get the feature to be updated
original_feature = [f for f in cities_features2 if f.attributes['city_id'] == city_id][0]
feature_to_be_updated = deepcopy(original_feature)
# assign the updated values
feature_to_be_updated.attributes['class'] = matching_row['class'].values[0]
feature_to_be_updated.attributes['white'] = int(matching_row['white'])
feature_to_be_updated.attributes['black'] = int(matching_row['black'])
feature_to_be_updated.attributes['ameri_es'] = int(matching_row['ameri_es'])
feature_to_be_updated.attributes['asian'] = int(matching_row['asian'])
feature_to_be_updated.attributes['hawn_pl'] = int(matching_row['hawn_pl'])
feature_to_be_updated.attributes['hispanic'] = int(matching_row['hispanic'])
feature_to_be_updated.attributes['males'] = int(matching_row['males'])
feature_to_be_updated.attributes['females'] = int(matching_row['females'])
#add this to the list of features to be updated
features_for_update.append(feature_to_be_updated)
# inspect one of the features
features_for_update[-1]
# apply the edits to the feature layer
cities_flayer.edit_features(updates= features_for_update)
```
#### Verify the changes made so far
Let us run another query on the feature layer and visualize a few rows.
```
cities_fset3 = cities_flayer.query()
cities_fset3.sdf.head(5)
```
## Conclusion
In this sample, we observed an edit intensive method to keep feature layers updated. We published data from first spreadsheet as a feature layer. We then updated existing features from second spread sheet (used geometry module to project the coordinates in the process), and added new features. The third spreadsheet presented additional attribute columns which were added to the feature layer by editing its definition and then updating the features with this additional data.
This method is editing intensive and you may choose this when the number of features to edit is less or if you needed to selectively update certain features as updates come in.
An alternate method is to overwrite the feature layer altogether when you always have current information coming in. This method is explained in the sample [Overwriting feature layers](https://developers.arcgis.com/python/sample-notebooks/overwriting-feature-layers)
| github_jupyter |
```
# Basic imports
import numpy as np
import matplotlib.pyplot as plt
import copy
from sklearn.preprocessing import StandardScaler
# Custom library for Censored GP
import GPy
"""
In order to use custom optimizers for the training of Censored Models it is important to have
installed the 'climin' package for optimization. Alternatively, you can consider using different
optimizers already packaged in GPy. However, the results in Section 4.2.1 were obtained through
the Adam optimizer (packaged in climin)
"""
try:
import climin
except:
!pip install climin
import climin
plt.style.use("ggplot")
```
As a working example let's replicate results for the Synthetic Dataset shown in Section 4.1.1:
### Toy data construction
_Let's define the experimental design for an effective observation/understanding of the censoring issue:_
1. _Given the underlying function $f(x) = 2 + 0.5\sin{(2x)} + \frac{x}{10}$ we sample 100 equally spaced noisy observations between $x=0$ and $x=10$._
2. _Generate censored versions of the observations defined in 1. The censoring will be characterized by (i) Number of censored points out of the 100 samples (ii) Censoring intensity_
3. _Fit both Gaussian and Censored GP to the data and observe model fit + record performances_
```
"""Data is generated as follows:
- Define latent function f(x) = 2 + 0.5*sin(2x) + x/10
- Generetae observations from the latent function y_obs (assuming some small observation noise, let's focus on censoring)
- Select the points in the oscillation peaks.
- Apply a p_c% manual censoring to those points sampled uniformly between [0.2, 0.3] for all selected points.
"""
np.random.seed(10)
# Define underlying function
x = np.linspace(0, 10, 100)
y_true = 0.5*np.sin(2*x) + 2 + x/10
# Generate noisy observations
y_obs = y_true + np.random.normal(loc=0, scale=0.1, size=x.shape[0])
y_cens = copy.deepcopy(y_obs)
# Select random points as censored and apply p% censoring
censoring = np.int32(0.5*np.sin(2*x) + 2 >= 2)
p_c = np.random.uniform(low=0.2, high=0.3, size=np.sum(censoring==1))
y_cens[censoring == 1] = y_obs[censoring == 1]*(1-p_c)
# Standardize the data
scaler = StandardScaler()
y_cens_sc = scaler.fit_transform(y_cens.reshape(-1,1))
y_true_sc = scaler.transform(y_true.reshape(-1,1))
plt.figure(figsize=(16,8))
plt.plot(x, y_true_sc, linestyle="--", label="True function", color='#348ABD')
plt.scatter(x[censoring==1].reshape(-1,1), y=y_cens_sc[censoring==1].reshape(-1,1), marker="x", label="Censored Observations", color='#348ABD')
plt.scatter(x[censoring==0].reshape(-1,1), y=y_cens_sc[censoring==0].reshape(-1,1), marker="o", label="Non-Censored Observations", color='#348ABD');
```
## Models
The training scheme can be summarized as follows:
1. Define the kernel
2. Build the model
3. Optimize
4. Generate predictions
For simplicity we will assume to use an RBF Kernel for both Censored and Non-Censored GPs
**1) Non Censored GP (NCGP)**
```
"""Define kernel"""
kernel = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.)
"""Build model.
GPRegression assumes Gaussian Likelihood (no censoring here).
"""
gp = GPy.models.GPRegression(X=x.reshape(-1,1), Y=y_cens_sc.reshape(-1,1), kernel=kernel)
"""Optimize
For the case of Gaussian Likelihood we will use BFGS as an optimization algorithm.
The optimization regards the kernel hyper-parameters as well as the likelihood noise.
"""
gp.optimize(optimizer="BFGS", max_iters=5000, messages=True)
"""Predict
Obtain posterior samples from the GP latent variable f. We then compute the mean and 95% Confidence Interval for our predictions.
"""
f_gaus_samples = gp.posterior_samples_f(X=x.reshape(-1,1), size=10000)
f_gaus_mean = np.mean(f_gaus_samples, axis=2)
f_gaus_025 = np.quantile(a=f_gaus_samples, q=0.025, axis=2).reshape(-1,)
f_gaus_975 = np.quantile(a=f_gaus_samples, q=0.975, axis=2).reshape(-1,)
# Plot predictions
plt.figure(figsize=(16,8))
plt.plot(x, f_gaus_mean, label="GP Prediction")
plt.fill_between(x, f_gaus_025, f_gaus_975, alpha=0.3)
plt.plot(x, y_true_sc, linestyle="--", label="True function")
plt.scatter(x[censoring==1].reshape(-1,1), y=y_cens_sc[censoring==1].reshape(-1,1), marker="x", label="Censored Observations", color='#348ABD')
plt.scatter(x[censoring==0].reshape(-1,1), y=y_cens_sc[censoring==0].reshape(-1,1), marker="o", label="Non-Censored Observations", color='#348ABD');
```
**2) Non Censored GP - Aware (NCGP-A)**
```
"""Define kernel"""
kernel = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.)
"""Build model.
GPRegression assumes Gaussian Likelihood (no censoring here).
"""
gp = GPy.models.GPRegression(X=x[censoring==0].reshape(-1,1), Y=y_cens_sc[censoring==0].reshape(-1,1), kernel=kernel)
"""Optimize
For the case of Gaussian Likelihood we will use BFGS as an optimization algorithm.
The optimization regards the kernel hyper-parameters as well as the likelihood noise.
"""
gp.optimize(optimizer="BFGS", max_iters=5000, messages=True)
"""Predict
Obtain posterior samples from the GP latent variable f. We then compute the mean and 95% Confidence Interval for our predictions.
"""
f_gaus_samples = gp.posterior_samples_f(X=x.reshape(-1,1), size=10000)
f_gaus_mean = np.mean(f_gaus_samples, axis=2)
f_gaus_025 = np.quantile(a=f_gaus_samples, q=0.025, axis=2).reshape(-1,)
f_gaus_975 = np.quantile(a=f_gaus_samples, q=0.975, axis=2).reshape(-1,)
# Plot predictions
plt.figure(figsize=(16,8))
plt.plot(x, f_gaus_mean, label="GP Prediction")
plt.fill_between(x, f_gaus_025, f_gaus_975, alpha=0.3)
# plt.plot(X, y_pred_main, label="True function", linestyle="--")
plt.plot(x, y_true_sc, linestyle="--", label="True function")
plt.scatter(x[censoring==1].reshape(-1,1), y=y_cens_sc[censoring==1].reshape(-1,1), marker="x", label="Censored Observations", color='#348ABD')
plt.scatter(x[censoring==0].reshape(-1,1), y=y_cens_sc[censoring==0].reshape(-1,1), marker="o", label="Non-Censored Observations", color='#348ABD');
```
**3) Censored GP (CGP)**
```
"""Define kernel"""
kernel = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.)
"""Build model.
GPCensoredRegression assumes Censored Likelihood as define in Section 3.
The GPCensoredRegression model requires two additional arguments compared to the GPRegression showed above:
- censoring: a binary vector indicating which observations in the input data are to be considered as censored (1 for censored, 0 for non censored)
- likelihood: since we are moving away from a Gaussian likelihood setting we need to use the custom 'CensoredGaussian Likelihood' (which also requires the 'censoring' vector)
"""
gp = GPy.models.GPCensoredRegression(X=x.reshape(-1,1), Y=y_cens_sc.reshape(-1,1), censoring=censoring, kernel=kernel, likelihood=GPy.likelihoods.CensoredGaussian(censoring=censoring, variance=0.1))
"""Optimize
For the case of Censored Likelihood we will use Adam as an optimization algorithm.
The optimization regards the kernel hyper-parameters as well as the likelihood noise.
"""
gp.optimize(optimizer="adam", max_iters=2500, messages=True)
"""Predict
Obtain posterior samples from the GP latent variable f. We then compute the mean and 95% Confidence Interval for our predictions.
"""
f_gaus_samples = gp.posterior_samples_f(X=x.reshape(-1,1), size=10000)
f_gaus_mean = np.mean(f_gaus_samples, axis=2)
f_gaus_025 = np.quantile(a=f_gaus_samples, q=0.025, axis=2).reshape(-1,)
f_gaus_975 = np.quantile(a=f_gaus_samples, q=0.975, axis=2).reshape(-1,)
# Plot predictions
plt.figure(figsize=(16,8))
plt.plot(x, f_gaus_mean, label="GP Prediction")
plt.fill_between(x, f_gaus_025, f_gaus_975, alpha=0.3)
# plt.plot(X, y_pred_main, label="True function", linestyle="--")
plt.plot(x, y_true_sc, linestyle="--", label="True function")
plt.scatter(x[censoring==1].reshape(-1,1), y=y_cens_sc[censoring==1].reshape(-1,1), marker="x", label="Censored Observations", color='#348ABD')
plt.scatter(x[censoring==0].reshape(-1,1), y=y_cens_sc[censoring==0].reshape(-1,1), marker="o", label="Non-Censored Observations", color='#348ABD');
```
| github_jupyter |
# Developing FEM in 1D
Copyright (C) 2020 Andreas Kloeckner
<details>
<summary>MIT License</summary>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
</details>
```
import numpy as np
import numpy.linalg as la
import scipy as sp
import matplotlib.pyplot as plt
import scipy.sparse as sparse
import scipy.sparse.linalg as sla
```
## Boundary Value Problem
$$
\begin{align*}
- u'' &= f(x)\\
u(0) = u(1) &= 0
\end{align*}
$$
```
if 1:
def f(x):
return 2+0*x
def uexact(x):
return x*(1-x)
elif 1:
wavenr = 5
def uexact(x):
return np.sin(wavenr * np.pi * x)
def f(x):
return (wavenr*np.pi)**2 * uexact(x)
else:
def f(x):
return 100*np.exp(-(x-0.5)**2 / 0.001)
uexact = None
```
## Grid Setup
`V` is a list of vertices. `E` is a list of elements (segments).
```
# number of points, crank me up
nx = 6
V = np.linspace(0,1,nx)
E = np.zeros((nx-1,2), dtype=int)
E[:,0] = np.arange(0,nx-1)
E[:,1] = np.arange(1,nx)
h = V[1] - V[0] # mesh spacing
if len(E) < 10:
print(E)
```
## COOrdinate Matrix Semantics
*Note:* What happened to the duplicated entry?
```
rows = [0,2,3,4,4]
cols = [4,1,2,4,4]
vals = [7,7,7,5,50]
sample_mat = sparse.coo_matrix((vals, (rows, cols))).toarray()
sample_mat
```
## Reference Matrix
Basis functions (on [0,1]):
$$
\begin{align*}
\phi_1(x) &= 1-x,\\
\phi_2(x) &= x,
\end{align*}
$$
For both degrees of freedom in the element, figure:
$$
\hat A_{i,j} = \int \phi_i'(x) \phi_j'(x) dx
$$
```
#clear
Aref = np.array([
[1, -1],
[-1, 1]
])
```
## Assembly Helper
```
class MatrixBuilder:
def __init__(self):
self.rows = []
self.cols = []
self.vals = []
def add(self, rows, cols, submat):
for i, ri in enumerate(rows):
for j, cj in enumerate(cols):
self.rows.append(ri)
self.cols.append(cj)
self.vals.append(submat[i, j])
def coo_matrix(self):
return sparse.coo_matrix((self.vals, (self.rows, self.cols)))
```
## Assembly of the Linear System
Assemble $A$:
```
#clear
a_builder = MatrixBuilder()
for va, vb in E:
a_builder.add(
[va, vb], [va, vb],
h * 1/h * 1/h * Aref)
A = a_builder.coo_matrix()
```
For both degrees of freedom involved in each element, assemble the RHS vector:
$$
b_i=\int_E f(x) \phi_i(x) dx
$$
```
#clear
b = np.zeros(nx)
for va, vb in E:
b[va] += f(V[va]) * h/2
b[vb] += f(V[vb]) * h/2
```
Examine the matrix.
```
print(A.toarray()*h)
```
Notice anything?
```
#clear
if len(E) < 10:
u, s, vt = la.svd(A.toarray())
print(s)
print(vt[-1])
```
## Boundary Conditions
Add boundary conditions.
```
#clear
for i in range(A.nnz):
if A.row[i] in [0, nx-1]:
A.data[i] = 1 if A.row[i] == A.col[i] else 0
b[0] = 0
b[nx-1] = 0
A = A.tocsr()
```
Examine the matrix after applying BCs:
```
print(A.toarray()*h)
```
## Computing the Solution
Plot the RHS $f$.
```
if len(E) < 10:
plotmode = "o-"
else:
plotmode = "-"
plt.plot(V, f(V), plotmode)
```
Solve and plot the solution.
```
u = sla.spsolve(A, b)
plt.plot(V, u, plotmode)
if uexact is not None:
plt.plot(V, uexact(V), plotmode)
if uexact is not None:
u_ex_h = uexact(V)
print(la.norm(u - u_ex_h)/la.norm(u_ex_h))
```
| github_jupyter |
# HyperLearning AI - Introduction to Python
An introductory course to the Python 3 programming language, with a curriculum aligned to the Certified Associate in Python Programming (PCAP) examination syllabus (PCAP-31-02).<br/>
https://knowledgebase.hyperlearning.ai/courses/introduction-to-python
## 08. Classes and Objects Part 1
https://knowledgebase.hyperlearning.ai/en/courses/introduction-to-python/modules/8/classes-and-objects-part-1
In this module we will introduce the object oriented programming (OOP) paradigm - a means to model the world and our software applications as objects that interact with each other. Supported by hands-on examples in Python, we will explore the fundamental concepts in object oriented programming, including:
* **Classes** - classes, superclasses, subclasses, inheritance, and creating objects
* **Class Attributes** - class variables, instance variables, managing attributes and explicit constructor invocation
* **Class Methods** - defining and using class methods, the self parameter, the init method and the str method
* **Inheritance** - inheritance, overriding, single inheritance and multiple inheritance
* **Constructors** - writing and using constructors
* **Introspection** - dict, name, module and bases properties, and examining class structure
### 1. Programming Paradigms
#### 1.1. Imperative Programming
```
# Write a program using imperative programming to calculate the sum of a given list of numbers
sum = 0
my_numbers = [1, 2, 3, 4, 5]
for number in my_numbers:
sum += number
print(f'The sum of the numbers in {my_numbers} is: {sum}')
```
#### 1.2. Functional Programming
```
from functools import reduce
# Write a program using functional programming to calculate the sum of a given list of numbers
def add(x, y):
return x + y
sum = reduce(add, my_numbers)
print(sum)
# Write a program using functional programming and a lambda function to calculate the sum of a given list of numbers
sum = reduce(lambda x, y: x + y, my_numbers)
print(sum)
```
### 2. OOP Fundamentals
#### 2.2.1. Creating Objects
```
# Update sys.path so that it can find our example module
import sys
sys.path.append('examples/formulapy')
# Import our example module containing our Car class definition
from example import Car
# Try to create a new car object without the required arguments
my_car = Car()
# Create a new car object
mercedes_f1 = Car(number_doors = 0,
registration_number = 'MERC 123',
make = 'Mercedes',
model = 'AMG F1 W10 EQ Power+',
year_manufactured = 2019,
maximum_speed = 200,
acceleration_rate = 20,
deceleration_rate = 50)
# Print the type of object that this is i.e. the class that was used to instantiate this object
print(type(mercedes_f1))
# Print a string representation of the car object
print(mercedes_f1)
# Create another new car object
ferrari_f1 = Car(number_doors = 0,
registration_number = 'MON 888',
make = 'Ferrari',
model = 'SF1000',
year_manufactured = 2020,
maximum_speed = 200,
acceleration_rate = 15,
deceleration_rate = 60)
# Print a string representation of the car object
print(ferrari_f1)
```
#### 2.2.2. Accessing Attributes
```
# Access and display the maximum speed of the Mercedes car object
print(mercedes_f1.maximum_speed)
# Access and display the registration number of the Ferrari car object
print(ferrari_f1.registration_number)
```
#### 2.2.3. Modifying Attributes
```
# Modify the maximum speed of the Mercedes car object
mercedes_f1.maximum_speed = 220
print(mercedes_f1)
# Modify the registration number of the Ferrari car object
ferrari_f1.registration_number = 'SCUD 888'
print(ferrari_f1)
```
#### 2.2.4. Deleting Attributes
```
# Delete the number of doors attribute belonging to the Mercedes car object
print(mercedes_f1)
print(mercedes_f1.number_doors)
del mercedes_f1.number_doors
print(mercedes_f1)
print(mercedes_f1.number_doors)
```
#### 2.2.5. Deleting Objects
```
# Create a new car object
redbull_f1 = Car(number_doors = 0,
registration_number = 'RB 999',
make = 'Red Bull',
model = 'RB9',
year_manufactured = 2013,
maximum_speed = 210,
acceleration_rate = 18,
deceleration_rate = 60)
# Print a string representation of the car object
print(redbull_f1)
# Delete the previously created car object
del redbull_f1
# Try to print a string representation of the deleted car object
print(redbull_f1)
```
#### 2.2.6. Introspection
```
# Access an object's attribute references
print(mercedes_f1.__dict__)
# Access a class's name
print(Car.__name__)
# Access an object's class name (or type name) from which it was instantiated
print(mercedes_f1.__class__)
print(mercedes_f1.__class__.__name__)
# Access the name of the module that a class was defined in
print(Car.__module__)
print(mercedes_f1.__class__.__module__)
# Access a class's base classes
print(Car.__bases__)
print(mercedes_f1.__class__.__bases__)
```
#### 2.2.7. Adding Attributes
```
# Add a completely new attribute to one of our car objects that was not defined in the Car class definition
print(mercedes_f1)
setattr(mercedes_f1, 'height_mm', 950)
setattr(mercedes_f1, 'width_mm', 2000)
setattr(mercedes_f1, 'weight_kg', 743)
setattr(mercedes_f1, 'power_kw', 750)
print(mercedes_f1)
```
#### 2.2.8. Invoking Methods
```
# Invoke the Car.accelerate() method on an existing car object
ferrari_f1.accelerate()
```
#### 2.3. Inheritance
#### 2.3.3. Super Function
```
# Update sys.path so that it can find our formulapy module
import sys
sys.path.append('examples/formulapy')
# Import our new vehicle domain data model module
from model import Aircraft, Car, RoadVehicle, Vehicle
# Create a new car object
mclaren_p1 = Car(number_engines = 1,
engine_horsepower_kw = 673,
chassis_height_mm = 1188,
chassis_width_mm = 1946,
chassis_depth_mm = 4588,
make = 'McLaren',
model = 'P1',
year_manufactured = 2013,
maximum_speed_mph = 217,
acceleration_rate_mps = 20,
deceleration_rate_mps = 50,
registration_number = 'MCL P1')
# Print a string representation of the car object
print(mclaren_p1)
# Access an attribute that is set in the Vehicle superclass
print(mclaren_p1.maximum_speed_mph)
# Access an attribute that is set in the RoadVehicle superclass
print(mclaren_p1.last_mot_date)
# Access an attribute that is set in the Car subclass
print(mclaren_p1.number_wheels)
# Invoke a method that is defined in the Vehicle superclass
print(mclaren_p1.accelerate())
# Invoke a method that is defined in the Car class that itself invokes methods defined in the Vehicle superclass
print(mclaren_p1.avoid_collision())
```
#### 2.3.4. Overriding Methods
```
# Create a new aircraft object
airbus_a380 = Aircraft(number_engines = 4,
engine_horsepower_kw = 670,
chassis_height_mm = 24100,
chassis_width_mm = 79800,
chassis_depth_mm = 72700,
make = 'Airbus',
model = 'A380',
year_manufactured = 2005,
maximum_speed_mph = 736,
acceleration_rate_mps = 30,
deceleration_rate_mps = 30,
minimum_speed_mph = 150)
# Print a string representation of the aircraft object
print(airbus_a380)
# Invoke a method that is defined in the Vehicle superclass
print(airbus_a380.accelerate())
# Invoke a method that is defined in the Aircraft class that overrides a method defined in the Vehicle superclass
print(airbus_a380.brake())
```
#### 2.3.6. Method Resolution Order
```
# Define a simple superclass
class ClassA:
var1 = 'A'
def __init__(self, a1, a2, a3):
self.attr1 = a1
self.attr2 = a2
self.attr3 = a3
def method1(self):
return self.attr1 + self.attr2
# Define a simple class derived from ClassA
class ClassB(ClassA):
var1 = 'B'
def __init__(self, b1, b2, b3):
super().__init__(b1, b2, b3)
self.attr4 = b2 * b3
def method1(self):
return self.attr1 * self.attr2
# Define another simple class derived from ClassA
class ClassC(ClassA):
def __init__(self, c1, c2, c3):
super().__init__(c1, c2, c3)
self.attr4 = c2 - c3
def method1(self):
return self.attr1 - self.attr2
# Define a class that is derived from both ClassB and ClassC
class ClassD(ClassB, ClassC):
pass
# Create and test an instance of ClassB
class_b_object = ClassB(1, 2, 3)
print(f'ClassB attr1: {class_b_object.attr1}')
print(f'ClassB attr2: {class_b_object.attr2}')
print(f'ClassB attr3: {class_b_object.attr3}')
print(f'ClassB attr4: {class_b_object.attr4}')
print(f'ClassB var1: {class_b_object.var1}')
print(f'ClassB method1(): {class_b_object.method1()}')
# Create and test an instance of ClassC
class_c_object = ClassC(1, 2, 3)
print(f'ClassC attr1: {class_c_object.attr1}')
print(f'ClassC attr2: {class_c_object.attr2}')
print(f'ClassC attr3: {class_c_object.attr3}')
print(f'ClassC attr4: {class_c_object.attr4}')
print(f'ClassC var1: {class_c_object.var1}')
print(f'ClassC method1(): {class_c_object.method1()}')
# Create and test an instance of ClassD
class_d_object = ClassD(1, 2, 3)
print(f'ClassD attr1: {class_d_object.attr1}')
print(f'ClassD attr2: {class_d_object.attr2}')
print(f'ClassD attr3: {class_d_object.attr3}')
print(f'ClassD attr4: {class_d_object.attr4}')
print(f'ClassD var1: {class_d_object.var1}')
print(f'ClassD method1(): {class_d_object.method1()}')
# Access the resolved MRO for each class
print(f'ClassA MRO:\n{ClassA.__mro__}\n')
print(f'ClassB MRO:\n{ClassB.__mro__}\n')
print(f'ClassC MRO:\n{ClassC.__mro__}\n')
print(f'ClassD MRO:\n{ClassD.__mro__}\n')
```
#### 2.3.7. Finding Subclasses
```
# List the names of all the subclasses directly derived from the Vehicle class
print([cls.__name__ for cls in Vehicle.__subclasses__()])
# List all the subclasses directly derived from the Vehicle class
print(Vehicle.__subclasses__())
# Create a function to list all direct and indirect subclasses of a given class
def find_all_subclasses(cls):
return set(cls.__subclasses__()).union(
[subclass for c in cls.__subclasses__() for subclass in find_all_subclasses(c)])
# List all the subclasses that are both directly and indirectly derived from the Vehicle class
print(find_all_subclasses(Vehicle))
```
#### 2.4. Constructors
#### 2.4.2. Non-Parameterized Constructor
```
# Define a class with a non-parameterized constructor
class ClassX:
def __init__(self):
print(f'Creating an instance of {type(self).__name__}...')
def sum(self, a, b):
return a + b
# Create a new instance of ClassX
class_x_object = ClassX()
# Invoke a method on the new object of type ClassX
print(class_x_object.sum(100, 38))
```
#### 2.4.3. Default Constructor
```
# Define a class without an explicit constructor
class ClassY:
def product(self, a, b):
return a * b
# Create a new instance of ClassY
class_y_object = ClassY()
# Invoke a method on the new object of type ClassY
print(class_y_object.product(13, 20))
```
#### 2.4.4. Explicit Invocation
```
# Define a class with a constructor that explicitly invokes the constructor of another class
class ClassZ:
def __init__(self):
ClassX.__init__(self)
def modulus(self, a, b):
return a % b
# Create a new instance of ClassZ
class_z_object = ClassZ()
# Invoke a method on the new object of type ClassZ
print(class_z_object.modulus(103, 4))
```
#### 2.5. Name Mangling
```
# Define a class that contains 'private' variables
class User:
def __init__(self, fname, lname, email, dob, postal_address):
self.fname = fname
self.lname = lname
self.email = email
self.__dob = dob
self.__postal_address = postal_address
def display_dob(self):
print(self.__dob)
def display_postal_address(self):
print(self.__postal_address)
# Create a new user
barack_obama_user = User('Barack', 'Obama', 'bobama@whitehouse.gov', '04/08/1961', '1600 Pennsylvania Avenue')
# Display the user's private dob using the display_dob() method
barack_obama_user.display_dob()
# Display the user's private dob by directly accessing the dob attribute using dot notation
print(barack_obama_user.dob)
# Display the user's private dob indirectly by accessing the mangled dob attribute
print(barack_obama_user._User__dob)
```
#### 2.6. Common Functions
```
# Create a new aircraft object
airbus_a380 = Aircraft(number_engines = 4,
engine_horsepower_kw = 670,
chassis_height_mm = 24100,
chassis_width_mm = 79800,
chassis_depth_mm = 72700,
make = 'Airbus',
model = 'A380',
year_manufactured = 2005,
maximum_speed_mph = 736,
acceleration_rate_mps = 30,
deceleration_rate_mps = 30,
minimum_speed_mph = 150)
# hasattr()
print( hasattr(airbus_a380, 'engine_horsepower_kw') )
print( hasattr(airbus_a380, 'last_mot_date') )
print()
# type()
print( type(airbus_a380) )
print( type([1, 2, 3]) )
print( type(('a', 'b', 'c')) )
print( type({1: 'a', 2: 'b', 3: 'c'}) )
print( type(Vehicle) )
print( type(str) )
print( type(int) )
print( type(str) )
print( type('Hello World!') )
print( type(38) )
print( type(0.5) )
print( type(True) )
print( type(False) )
print( type(47 & 55) )
print( type(None) )
print()
# issubclass()
print( issubclass(type(airbus_a380), Vehicle) )
print( issubclass(Aircraft, Vehicle) )
print( issubclass(Vehicle, Aircraft) )
print( issubclass(Vehicle, Vehicle) )
print( issubclass(Vehicle, object) )
print( issubclass(str, object) )
print( issubclass(list, object) )
print( issubclass(tuple, object) )
print( issubclass(dict, object) )
print( issubclass(type(ClassD(1, 2, 3)), ClassA) )
print( issubclass(type(None), ClassA) )
print( issubclass(type(None), object) )
print()
# isinstance()
print( isinstance(airbus_a380, Aircraft) )
print( isinstance(airbus_a380, Vehicle) )
print( isinstance(airbus_a380, object) )
print( isinstance(airbus_a380, Car) )
print( isinstance(None, object) )
```
| github_jupyter |
```
import glob
import time
import pandas as pd
import numpy as np
import json
pushd ../backend/vary_data/
df = pd.read_csv("print_macrobenchmark.csv")
trial = []
for nb_name in glob.glob("print/print_*_*_*_output.ipynb"):
snb = nb_name.split("_")
if len(snb)==5:
_, condition, nCols, _,_ = nb_name.split("_")
with open(nb_name) as json_file:
data = json.load(json_file)
for cell in data['cells']:
if (len(cell["source"])>0 and cell["source"][0]=="df"):
duration = cell["metadata"]["papermill"]["duration"]
#print(nb_name,nCols,condition, duration)
trial.append([condition, int(nCols),duration])
trial = pd.DataFrame(trial,columns=["condition","nCols","time"])
popd
trial.to_csv("plot_data/synthetic_ncols.csv")
import pandas as pd
import altair as alt
# trial.groupby(["condition","nCols"]).mean()
# df = pd.read_csv("print_macrobenchmark.csv")
overall = trial.groupby(["condition","nCols"]).sum().reset_index()
overall = overall[overall["time"]!=0]
alt.Chart(overall).mark_bar().encode(
x = "condition",
y = alt.Y("time",scale=alt.Scale(type='log'),title="log(total time)"),
column=alt.Column("nCols",type="ordinal"),
color="condition"
)
trial = trial.replace({"o1":"wflow","o1o2":"wflow+prune","o1o2o3":"all"})
overall = trial.groupby(["condition","nCols"]).sum().reset_index()
overall = overall[overall["time"]!=0]
chart = alt.Chart(overall).mark_line().encode(
x=alt.Column("nCols",type="quantitative",title="Number of Columns"),
y = alt.Y("time",scale=alt.Scale(type='log',domain=(0.5,3000)),title="log(single print df runtime) [s]"),
color=alt.X("condition",legend=alt.Legend(orient="top-left",padding=-10)),
tooltip=["time"]
)
chart.properties(width=200,height=200).interactive()
# trial.to_csv("effect_of_columns.csv",index=None)
exp = df[df["condition"]=="all"]
exp = exp.dropna()
y = exp["time"]
X = np.array(exp["nCols"])
from scipy.optimize import curve_fit
def func(x, a,b,c):
return a +b *x**c
popt, pcov = curve_fit(func, X,y)
popt
y_pred = func(X,*popt)
perr = np.sqrt(np.diag(pcov))
print ("error:", sum(perr))
from matplotlib.pylab import plt
plt.plot(exp["nCols"],exp["time"],'o')
plt.plot(exp["nCols"],y_pred,'.')
exp = df[df["condition"]=="wflow"]
y = exp["time"]
X = np.array(exp["nCols"])
from scipy.optimize import curve_fit
def func(x, a,b,c):
return a +b *x**c
popt, pcov = curve_fit(func, X,y)
popt
y_pred = func(X,*popt)
perr = np.sqrt(np.diag(pcov))
print ("error:", sum(perr))
plt.plot(exp["nCols"],exp["time"],'o')
plt.plot(exp["nCols"],y_pred,'.')
exp = df[df["condition"]=="wflow+prune"]
# exp = exp.dropna()
# exp = exp[exp["nCols"]>15]
y = exp["time"]
X = np.array(exp["nCols"])
from scipy.optimize import curve_fit
def func(x, a,b,c):
return a +b *x**c
popt, pcov = curve_fit(func, X,y)
popt
y_pred = func(X,*popt)
perr = np.sqrt(np.diag(pcov))
print ("error:", sum(perr))
plt.plot(exp["nCols"],exp["time"],'o')
plt.plot(exp["nCols"],y_pred,'.')
```
| github_jupyter |
# Analyzing Military Spending and UFO Reports
### A Data Analysis Project (Work in Progress)
## Author
### Adam C. Sanders
## Introduction
It is a fact that UFOs (i.e., unidentified flying objects) have become a topic of interest for many people. At first glance, this interest seems to have grown over the years. There are now numerous television shows, movies, and video games that focus on UFOs. UFOs have also been discussed and debated on news outlets and social media sites.
There are several interesting questions we can explore. Of course, there are epistemic questions regarding the justification and rationality of belief in UFOs. If an agent, S, at time, t1, believes to have seen a UFO, then is S's belief at t1 rational? This is a contextual question that depends on S's justification at t1. There are also questions regarding the nature of UFOs. For instance, are instances of UFO sightings (1) cases of mistaken identity or (2) cases involving extraterestrial space crafts. As for (1), these cases can break down into several other categories. One might, for example, mistake the occurrence of some natural phenomena as a UFO (e.g., a unique weather pattern). In other cases, one might mistake exotic aircraft as being UFOs (e.g., experimental military aircraft of terestrial origins).
For the purpose of this project, I will not focus on matters of epistemic justification for a given agent's belief in UFOs. I take it for granted that a belief or degree of belief is justified insofar as the target propostion is sufficiently justified. I will also avoid focusing on the accuracy of UFO sightings. Instead, I will investigate whether there are interesting relationships that hold between U. S. military spending and the frequency of UFO reports between 2004 and 2013. One might think that when there is an increase in military spending, there is an increase in the development and application of new military technology. This new technology would also include the development of new aircraft. So, it might be the case that an increase in new military technology could lead to an increase in unexplained sightings that can subsequently be reported as UFOs. This is the issue that I will address.
## Questions
I will investigate several questions:
#### (1) Has there been an increase in UFO reports in the United States between 2004 and 2013?
#### (2) Has there been an increase in military spending in the United States between 2004 and 2013?
#### (3) Is there any correlation between the frequency of UFO reports and military spending?
## Hypothesis
#### (1) There is a positive correlation between military spending and UFO reports in the United States. As U. S. military spending increases, the frequency of UFO reports increases.
## Methodology and Tools
#### Languages and Libraries
I will use Python to analyze and visualize information from several datasets. The specific libraries I will use include Pandas, Matplotlib, Numpy, and Seaborn.
#### Data
I will pull information from several datasets on Kaggle. These datasets cover military spending by country, world happiness scores, and UFO reports. The acknowledgement section provides further information.
#### Analysis
I will perform some basic data visualization and exploratory analysis on two select datasets. I will also test for correlation between select variables.
## Acknowledgements
#### Military Spending of Countries dataset
STOCKHOLM INTERNATIONAL PEACE RESEARCH INSTITUTE (SIPRI) and
The World Bank Database.
Accessed from Kaggle https://www.kaggle.com/nitinsss/military-expenditure-of-countries-19602019
#### UFO Sightings
National UFO Reporting Center (NUFORC)
https://github.com/planetsig/ufo-reports.
Accessed from Kaggle https://www.kaggle.com/NUFORC/ufo-sightings
## Caveats
For my purpose, I take several propositions for granted. First, it is assumed that there has been an increase in interest in UFOs. This assumption is based on the apparent increase in the number of television shows and movies that have focused on UFO activities. Second, it is assumed that the number of UFO reports referenced in the data does not represent the total number of UFO sightings. Many UFO sightings are never reported, and therefore the total number of UFO sightings is expected to be higher than the number of UFO reports. In addition to these assumptions, it is important to keep in mind that this project is being continuously developed over time and is not in a finished state.
# Table of Contents
### Part 1: Importing LIbraries and Data
### Part 2: Exploring Military Spending in the U. S.
### Part 3: Preliminary Analysis of U. S. Spending
### Part 4: Exploring UFO Reports in the U. S.
### Part 5: Preliminary Analysis of UFO Reports
### Part 6: The Relationship Between U. S. Military Spending and UFO Reports
### Part 7: Discussion
# Part 1: Importing Libraries and Data
```
# import libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
# import military spending dataset
military = pd.read_csv("C:/Users/Sande/Desktop/Datasets/MilitaryExpenditure.csv")
# data check of military spending by displaying the first 5 rows
military.head()
# import UFO dataset
ufodf = pd.read_csv("C:/Users/Sande/Desktop/Datasets/UFOSightings.csv")
# data check of UFO reports by displaying first 5 rows
ufodf.head()
```
# Part 2: Exploring Military Spending in the U. S.
In this section I will slice the military spending data to obtain a dataframe that consists of only U. S. military spending data from 2004 to 2013. I will then perform exploratory data analysis.
```
# Create a subset of data that includes only U. S. spending
us_military = military[military["Name"] == "United States"]
us_military.head()
# Summary statistics of U. S. military spending from 1960 to 2018
us_military.describe()
# Create a dataframe that contains data of U. S. military spending between 2004 and 2013
us_military_df = us_military[["2004", "2005", "2006", "2007", "2008", "2009", "2010", "2011", "2012", "2013"]]
print(us_military_df)
# Transpose the dates to rows.
us_military_df = us_military_df.T
us_military_df
# Remove the old country ID and attach a new column name
us_military_df.rename(columns = {249: "US Spending"}, inplace = True)
us_military_df
```
# Part 3: Preliminary Analysis of U. S. Spending
### Sum, Average, Min, Max, and Visualizing the Data
```
# The sum of U. S. military spending between 2004 and 2013
military_total = us_military_df["US Spending"].sum()
print(military_total)
# The average amount of military spending
military_avg = military_total / 10
print(military_avg)
# The minimum amount of U. S. spending
us_minimum = us_military_df["US Spending"].min()
print(us_minimum)
# The maximum amount of U. S. spending
us_maximum = us_military_df["US Spending"].max()
print(us_maximum)
# Visualizing U. S. military spending between 2004 and 2013
spending_list = us_military_df["US Spending"]
years = [2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013]
plt.plot(years, spending_list)
plt.xlabel("Years")
plt.ylabel("U. S. Military Spending")
plt.title("U. S. Military Spending Between 2004 and 2013")
plt.show()
```
# Part 4: Exploring UFO Reports in the U. S
In this part I will create a dataframe of the number of UFO reports between 2004 and 2013.
```
# Prepare the datetime column.
ufodf["datetime"] = pd.to_datetime(ufodf.datetime)
# Creating a dataframe of UFO reports in 2004, and then checking the data by displaying the first 5 rows.
ufo2004 = ufodf.loc[(ufodf.datetime >= "1/1/2004") & (ufodf.datetime <= "12/31/2004"), :]
ufo2004.head()
# Determine the number of unique UFO reports.
ufo2004.count()
# Given the information above, we can use the datetime column to determine the number of unique UFO reports for 2004.
# I will then assign the number of UFO reports to a variable, and then display the variable.
# Note that I will use this technique for all other years.
report2004 = ufo2004.datetime.count()
print(report2004)
# Next I'll set up a dataframe of UFO reports in 2005. I'll then check the data.
ufo2005 = ufodf.loc[(ufodf.datetime >= "1/1/2005") & (ufodf.datetime <= "12/31/2005"), :]
ufo2005.head(3)
# I'll assign the unique unique number of UFO reports in 2005 to a variable.
report2005 = ufo2005.datetime.count()
print(report2005)
# Create a datafram of UFO reports in 2006.
ufo2006 = ufodf.loc[(ufodf.datetime >= "1/1/2006") & (ufodf.datetime <= "12/31/2006"), :]
ufo2006.head(3)
# Assign the unique number of UFO reports in 2006 to a variable.
report2006 = ufo2006.datetime.count()
print(report2006)
# Create a dataframe of UFO reports in 2007
ufo2007 = ufodf.loc[(ufodf.datetime >= "1/1/2007") & (ufodf.datetime <= "12/31/2007"), :]
ufo2007.head(3)
# Assign the unique number of UFO reports in 2007 to a variable.
report2007 = ufo2007.datetime.count()
print(report2007)
# Create a dataframe of UFO reports in 2008
ufo2008 = ufodf.loc[(ufodf.datetime >= "1/1/2008") & (ufodf.datetime <= "12/31/2008"), :]
ufo2008.head(3)
# Assign the unique number of UFO reports in 2008 to a variable.
report2008 = ufo2008.datetime.count()
print(report2008)
# Create a dataframe of UFO reports in 2009
ufo2009 = ufodf.loc[(ufodf.datetime >= "1/1/2009") & (ufodf.datetime <= "12/31/2009"), :]
ufo2009.head(3)
# Assign the unique number of UFO reports in 2009 to a variable
report2009 = ufo2009.datetime.count()
print(report2009)
# Create a dataframe of UFO reports in 2010
ufo2010 = ufodf.loc[(ufodf.datetime >= "1/1/2010") & (ufodf.datetime <= "12/31/2010"), :]
ufo2010.head(3)
# Assign the unique number of UFO reports in 2010 to a variable
report2010 = ufo2010.datetime.count()
print(report2010)
# Create a dataframe of UFO reports in 2011
ufo2011 = ufodf.loc[(ufodf.datetime >= "1/1/2011") & (ufodf.datetime <= "12/31/2011"), :]
ufo2011.head(3)
# Assign the unique number of UFO reports in 2011 to a variable.
report2011 = ufo2011.datetime.count()
print(report2011)
# Create a dataframe of UFO reports in 2012.
ufo2012 = ufodf.loc[(ufodf.datetime >= "1/1/2012") & (ufodf.datetime <= "12/31/2012"), :]
ufo2012.head(3)
# Assigning the unique number of UFO reports in 2012 to a variable.
report2012 = ufo2012.datetime.count()
print(report2012)
# Create a dataframe of UFO reports in 2013
ufo2013 = ufodf.loc[(ufodf.datetime >= "1/1/2013") & (ufodf.datetime <= "12/31/2013"), :]
ufo2013.head(3)
# Assign the Unique number of UFO reports in 2013 to a variable.
report2013 = ufo2013.datetime.count()
print(report2013)
```
# Part 5: Preliminary Analysis of UFO Reports
### Sum, Average, Min, Max, and Visualizing the Data
```
# Total UFO reports from 2004 to 2013
total = (report2004 + report2005 + report2006 + report2007 + report2008 + report2009 + report2010 + report2011 + report2012 + report2013)
print(total)
# Average number of UFO reports.
average = (report2004 + report2005 + report2006 + report2007 + report2008 + report2009 + report2010 + report2011 + report2012 + report2013)/10
print(average)
# Determine the minimum number of UFO reports in a given year.
report_list = [report2004, report2005, report2006, report2007, report2008, report2009, report2010, report2011, report2012, report2013]
minimum = min(report_list)
print(minimum)
# Determine the maximum number of UFO reports in a given year.
maximum = max(report_list)
print(maximum)
# Visualizing the frequency of UFO reports between 2004 and 2013.
report_list = [report2004, report2005, report2006, report2007, report2008, report2009, report2010, report2011, report2012, report2013]
years = [2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013]
plt.plot(years, report_list)
plt.xlabel("Years")
plt.ylabel("Number of UFO Reports")
plt.title("UFO Reports Between 2004 and 2013")
plt.show()
```
# Part 6: The Relationship Between U. S. Military Spending and UFO Reports
In this section we will (1) determine if there is a correlation between U. S. military spending and the frequency of UFO reports, and (2) create some visualizations of our data.
```
# Create a new dataframe with "Years", "U. S. Spending", and "UFO Reports" as column labels. The years will be rows.
# Note that the variables used have already been defined above
df = pd.DataFrame({"Years": years, "U. S. Spending": spending_list, "UFO Reports": report_list})
df
# Perform Pearsons correlation
corr = df["U. S. Spending"].corr(df["UFO Reports"])
corr
# Visualizing the correlation with Seaborn
sns.heatmap(df[["U. S. Spending", "UFO Reports"]].corr(), annot = True, vmin = -1, vmax = 1, center = 0)
# Visualizing data with pairplots
sns.pairplot(df)
```
# Part 7: Discussion
In this project I looked at the amount of U. S. military spending that occurred between 2004 and 2013. I also looked at the number and frequency of UFO reports between 2004 and 2013. It was shown that military spending increased from $465,000,000,000 in 2004 to $640,000,000,000 in 2013. The peak of military spending was $711,000,000,000 in 2011. We also saw an increase in the number of reported UFO sightings between 2004 and 2013. There were 4,711 reports in 2004. And this number rose to 7,608 in 2013. The peak of UFO reports was 7,946 in 2012. Further analysis showed that there was a fairly moderate positive correlation between U. S. military spending and the frequency of UFO reports. While this does not indicate that military spending and ufo reports are causally related, it does show an interesting relationship that can be further explored and analyzed in more detail.
| github_jupyter |
# Is Income to loan ratio a major factor in high Delinquency market?
We will answer this question by using a machine learning algorith to predict if there is a connection between income to loan ratio
```
#first we will import our inline matplotlib
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import psycopg2
# Import our dependencies
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import tensorflow as tf
import numpy as np
import datetime as dt
import time
# Import checkpoint dependencies
import os
from tensorflow.keras.callbacks import ModelCheckpoint
```
# We are now going to connect our postgress DB into out jupyter notebook using SQLalchemy
```
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from config import POSTGRES_ADDRESS
from config import POSTGRES_PORT
from config import POSTGRES_USERNAME
from config import POSTGRES_PASSWORD
from config import POSTGRES_DBNAME
```
### Lets Crete our engine to link postgress to our jupyter file. This will allow us to access the dataset we have uploaded.
```
postgres_str = ('postgresql://{username}:{password}@{ipaddress}:{port}/{dbname}'.format(username=POSTGRES_USERNAME,
password=POSTGRES_PASSWORD,
ipaddress=POSTGRES_ADDRESS,
port=POSTGRES_PORT,
dbname=POSTGRES_DBNAME))
# Create the connection
engine = create_engine(postgres_str)
merged_db= pd.read_sql_query('''SELECT * FROM merged;''', engine)
merged_db
```
### Description Of The Source Of Data
Fannie Mae provides loan performance data on a portion of its single-family mortgage loans to promote better understanding of the credit performance of Fannie Mae mortgage loans.
Link: https://www.fanniemae.com/portal/funding-the-market/data/loan-performance-data.html
```
# Import our input dataset
#mortgage_df = pd.read_csv('Final Project Data.csv')
mortgage_df = merged_db
mortgage_df.head()
len(mortgage_df.index)
mortgage_df.isnull().sum()
```
### Drop Cloumns with low value to our model or high NULL values
```
# Drop the unwanted variables from the data
mortgage_input = mortgage_df.drop(columns = ['loan_identifier',
'maturity_date',
'monthly_reporting_period',
'current_interest_rate',
'loan_age',
'remaining_months_to_legal_maturity',
'adj_remaining_months_to_maturity',
'maturity_date',
'primary_mortgage_insurance_percent',
'current_loan_delinquency_status',
'modification_flag',
'seller_name',])
mortgage_input.head()
mortgage_input.isnull().sum()
```
### Remove all Nan Values
```
# Drop NA rows
mortgage_input = mortgage_input.dropna()
len(mortgage_input.index)
mortgage_input.head(10)
```
### Now that we have cleaned our data set, lets see what input we have for our ML model
```
# Generate our categorical variable list
mortgage_cat = mortgage_input.dtypes[mortgage_input.dtypes == "object"].index.tolist()
mortgage_cat
```
### Since we are focusing on potential forclosure risk, we will be looking in to all the states, all the loan sellers, type of property and other indicators
```
# Check the number of unique values in each column
mortgage_input[mortgage_cat].nunique()
```
### We will now use one hot encoder to turn object/string data into interbased data to set up our ML
```
# Create a OneHotEncoder instance
enc = OneHotEncoder(sparse=False)
# Fit and transform the OneHotEncoder using the categorical variable list
encode_df = pd.DataFrame(enc.fit_transform(mortgage_input[mortgage_cat]))
# Add the encoded variable names to the DataFrame
encode_df.columns = enc.get_feature_names(mortgage_cat)
encode_df.head()
```
### We will now merge the one hot encoder data to our original dataset
```
# Merge one-hot encoded features and drop the originals
mortgage_merged = mortgage_input.merge(encode_df,left_index=True, right_index=True)
mortgage_merged = mortgage_merged.drop(mortgage_cat,1)
mortgage_merged.head()
mortgage_merged.tail()
#mortgage_merged = mortgage_merged.drop(columns = ['delinquency_False'])
mortgage_merged = mortgage_merged.dropna()
pd.set_option('display.max_rows', None)
mortgage_merged.dtypes
```
### Once We confirmed that all our values are int64 or float64, we will create our X and y values
```
# Split our preprocessed data into our features and target arrays, we will set our target to delinquency_True
# and our df minus delinquency_True and delinquency_false will be our y
y = mortgage_merged["delinquency_True"].values
X = mortgage_merged.drop(["delinquency_True", "delinquency_False"],1).values
# Split the preprocessed data into a training and testing dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
# Create a StandardScaler instance
scaler = StandardScaler()
# Fit the StandardScaler
X_scaler = scaler.fit(X_train)
# Scale the data
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
```
## Why a switch was made to Random Forest Classifier
In the second deliverable Linear regression was being used, how ever this deliverable a choice was made to switch to random forest.
After running our model through Pycaret, a toolkit which measures upwards of 15 different machine learning models and determine which will best fit our data type. Random Forest yielded best results. It makes sense since the model is sequential in nature.
```
#% For deep forest neural network use this cell and all cells with % sign
from sklearn.metrics import accuracy_score
# Create a random forest classifier.
rf_model = RandomForestClassifier(n_estimators=128, random_state=78)
# Fitting the model
rf_model = rf_model.fit(X_train_scaled, y_train)
# Evaluate the model
y_pred = rf_model.predict(X_test_scaled)
print(f" Random forest predictive accuracy: {accuracy_score(y_test,y_pred):.3f}")
```
## We have added multiple layers to better train our model with varying node sizes.
### for testing the code epoch is set to <10
### for performance and evaluation, change epoch to >70
```
#% For deep forest neural network use this cell and all cells with % sign
# Define the model - deep neural net
number_input_features = len(X_train_scaled[0])
hidden_nodes_layer1 = 24
nn = tf.keras.models.Sequential()
# First hidden layer
nn.add(
tf.keras.layers.Dense(units=hidden_nodes_layer1, input_dim=number_input_features, activation="tanh")
)
#add multiple hidden layers
#first i created a list of int to be used as nodes, add more digits to be used as i in our range value in the next line
units = [ 52, 31, 25, 16, 11, 17, 15, 5, 23, 5]
#then using a for loop i created multiple layers for our machine learning model
for i in range( 10 ):
nn.add( tf.keras.layers.Dense( units[i] , activation= "tanh" ) )
# Output layer
nn.add(tf.keras.layers.Dense(units=1, activation="sigmoid"))
# Compile the Sequential model together and customize metrics
nn.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
# Define the checkpoint path and filenames
os.makedirs("checkpoints/",exist_ok=True)
checkpoint_path = "checkpoints/cp.{acc:.4f}-{epoch:02d}.hdf5"
# Create a callback that saves the model's weights every 5 epochs
cp_callback = ModelCheckpoint(
filepath=checkpoint_path,
monitor='accuracy',
verbose=1,
#save_best_only=True,
save_weights_only=False,
save_freq='epoch')
# Train the model (play around with the epoch number, curve starts to flatten after 70-80 epochs)
fit_model = nn.fit(X_train_scaled, y_train, epochs=80, callbacks=[cp_callback])
# Evaluate the model using the test data
model_loss, model_accuracy = nn.evaluate(X_test_scaled,y_test,verbose=2)
print(f"Loss: {model_loss}, Accuracy: {model_accuracy}")
```
## Deep Forest Result
##### We received 77% accuracy with our Model, which confirms that INCOME (MSA) has a very tight relation to Delinquency and is a major factor for mortgage loans going in a negative status.
## LETS TAKE A LOOK AT HOW OUR MODEL PERFORMED
```
# Create a DataFrame containing training history
history_df = pd.DataFrame(fit_model.history, index=range(1,len(fit_model.history["loss"])+1))
# Plot the loss
history_df.plot(y="loss")
plt.title('Model Loss')
plt.ylabel('loss')
plt.xlabel('epoch')
# Plot the accuracy
history_df.plot(y="acc")
plt.title('Model Accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
X_train_scaled
# Save the cleaned dataframe into a csv file.
merged_db.to_csv('Mortgage_merged_db.csv', index = False)
```
| github_jupyter |
# Dwave dataset
- Training RBM to learn distribution of Dwave output
# Tasks
1. Generate 10000 samples
2. Ensure the samples are generated from an equilibrated model. What metric do you use to prove equilibrium?
3. Report the mean of each Ising variable
4. Report the mean correlation of pairs of variables listed
#### Training
- We first train RBM using PCD and track likelihood of training data to see if it converges.
- Visible node count = 2041 is quite a high demand. With time limited for the cohort, the training is incomplete.
- I could not use sufficient node count ~O(visible) for hidden nodes.
- I could not use sufficient CD-k steps ~O(visible).
- I could not do sufficient epochs.
- Convergence, i.e. equilibrium, are tracked while training using log-likelihood .
```
from datetime import datetime
import pandas as pd
import torch
from helper import load_dataset
train_data, test_data = load_dataset('dataset_x1a46w3557od23s750k9.npz')
df = pd.read_csv('correlated_features.csv')
from RBM_helper import RBM
total_data_count, visible_nodes = train_data.shape
n_vis = visible_nodes
n_hin = int(n_vis / 10)
rbm = RBM(n_vis, n_hin)
train_data = torch.Tensor(train_data)
def train(rbm):
epochs = 100
# num_samples = visible_nodes * 10
k_train = 100
k_validate = n_vis + n_hin
print('Starting training on n_h = ', n_hin)
energies = []
for e in range(1, epochs+1):
# do one epoch of training
start = datetime.now()
rbm.train(train_data, k=k_train)
end = datetime.now()
print(f"Elapsed {end - start} for epoch {e}")
# now generate samples and calculate the energy
if e % 10 == 0:
print("\nEpoch: ", e)
# We check if log likelihood of training data converges
energy = rbm.effective_energy(train_data).item()
energies.append(energy)
print("Energy : ", energy)
return energies
likelihooods = train(rbm)
rbm.save_params('dwave_k100_e400')
df = pd.DataFrame(likelihoods)
df.to_csv('dwave_k100_e400_ll.csv')
```
## 1 Drawing samples
- Drawing from random state
```
import torch
from RBM_helper import RBM
from datetime import datetime
rbm = RBM.from_weights('params/dwave_k100_e400')
n_vis = rbm.n_vis
sample_count = 10000
k = 1000
initial_state = torch.rand((sample_count, n_vis)).cuda()
rbm = rbm.to(initial_state)
start = datetime.now()
samples = rbm.draw_samples(k, initial_state)
end = datetime.now()
print(f"Elapsed {end - start} for k = {k}")
rbm2 = RBM.from_weights('params/dwave_k100_e401-1000')
rbm2 = rbm2.to(initial_state)
start = datetime.now()
samples2 = rbm.draw_samples(k, initial_state)
end = datetime.now()
print(f"Elapsed {end - start} for k = {k}")
```
## 2 Check if training is converged
- By viewing log likelihood tracking, it seems the training is still ongoing.
- However for rough qualitative point of view, it seems almost sufficient as shown in the following sections.
```
import pandas as pd
ll_df1 = pd.read_csv('training_logs/dwave_k100_e400_ll.csv', names=['Log Likelihood'], skiprows=1)
ll_df2 = pd.read_csv('training_logs/dwave_k100_e401-1000_ll.csv', names=['Log Likelihood'], skiprows=1)
ll_df2.index = list(range(400, 1000))
ll_df = pd.concat([ll_df1, ll_df2])
ll_df.index.name = 'Epoch'
ll_df.plot()
```
## 2 Calculating mean of each spins
- While the log likelihood is still decreasing, another 600 epochs after 400 epochs does not change mean spin population.
- It shows it almost reached the equilibrium state.
```
import pandas as pd
samples_numpy = samples.cpu().numpy()
average = samples_numpy.mean(axis=1)
samples_numpy2 = samples2.cpu().numpy()
average2 = samples_numpy2.mean(axis=1)
df = pd.DataFrame(average, columns=['Spin mean(400 epochs)'])
ax = df.plot.hist(bins=80)
df2 = pd.DataFrame(average2, columns=['Spin mean(1000 epochs)'])
df2.plot.hist(bins=80, ax=ax, alpha=0.4)
ax.set_xlabel('Spin mean')
```
## 3 Calculate correlation between each pairs specified
- Spin correlations are also does not significantly changes if we train more.
```
correlated_df = pd.read_csv('correlated_features.csv', index_col=0)
correlated_df
samples_df = pd.DataFrame(samples_numpy)
correlations = pd.DataFrame(columns=['correlation(400 epochs)'])
for i in correlated_df.index:
correlations.loc[i] = samples_df[correlated_df['left'][i]].corr(samples_df[correlated_df['right'][i]])
samples_df2 = pd.DataFrame(samples_numpy2)
correlations2 = pd.DataFrame(columns=['correlation(1000 epochs)'])
for i in correlated_df.index:
correlations2.loc[i] = samples_df2[correlated_df['left'][i]].corr(samples_df2[correlated_df['right'][i]])
ax = correlations.plot.hist(bins=100)
correlations2.plot.hist(bins=100, ax=ax, alpha=0.4)
correlations.to_csv('correlations_e400.csv')
correlations2.to_csv('correlations_e401-100.csv')
```
| github_jupyter |
<a href="https://colab.research.google.com/github/unicamp-dl/IA025_2022S1/blob/main/ex08/Gustavo_Arantes/Atividade_08_IA025A_Gustavo_Arantes.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
nome = 'Gustavo da Silva Arantes'
print(f'Meu nome é {nome}')
print('Inspirado no código de "Larissa Antonelli Santesso"')
```
# Exercício: Modelo de Linguagem com auto-atenção
Este exercício é similar ao da Aula 7, mas iremos agora treinar uma rede neural *com auto-atenção* para prever a próxima palavra de um texto, data as palavras anteriores como entrada.
Na camada de auto-atenção, não se esqueça de implementar:
- Embeddings de posição
- Projeções lineares (WQ, WK, WV, WO)
- Conexões residuais
- Camada de feed forward (2-layer MLP)
O dataset usado neste exercício (BrWaC) possui um tamanho razoável e você vai precisar rodar seus experimentos com GPU.
Alguns conselhos úteis:
- **ATENÇÃO:** o dataset é bem grande. Não dê comando de imprimí-lo.
- Durante a depuração, faça seu dataset ficar bem pequeno, para que a depuração seja mais rápida e não precise de GPU. Somente ligue a GPU quando o seu laço de treinamento já está funcionando
- Não deixe para fazer esse exercício na véspera. Ele é trabalhoso.
```
# iremos utilizar a biblioteca dos transformers para ter acesso ao tokenizador do BERT.
!pip install transformers
```
## Importação dos pacotes
```
import collections
import itertools
import functools
import math
import random
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import DataLoader
from tqdm import tqdm_notebook
# Check which GPU we are using
!nvidia-smi
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
device = torch.device(dev)
print('Using {}'.format(device))
```
## Implementação do MyDataset
```
from typing import List
def tokenize(text: str, tokenizer):
return tokenizer(text, return_tensors=None, add_special_tokens=False).input_ids
class MyDataset():
def __init__(self, texts: List[str], tokenizer, context_size: int):
self.examples = []
for text in tqdm_notebook(texts):
token_ids = tokenize(text=text, tokenizer=tokenizer)
if len(token_ids) < context_size + 1:
continue
# Compute n-grams:
for i in range(len(token_ids) - context_size):
input_ids = token_ids[i:i + context_size]
target_id = token_ids[i + context_size]
self.examples.append((input_ids, target_id))
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
input_ids, target_id = self.examples[idx]
return torch.LongTensor(input_ids), target_id
```
## Testando se a implementação do MyDataset está correta
```
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained("neuralmind/bert-base-portuguese-cased")
dummy_texts = ['Eu gosto de correr', 'Ela gosta muito de comer pizza']
dummy_dataset = MyDataset(texts=dummy_texts, tokenizer=tokenizer, context_size=3)
dummy_loader = DataLoader(dummy_dataset, batch_size=6, shuffle=False)
assert len(dummy_dataset) == 5
print('passou no assert de tamanho do dataset')
first_batch_input, first_batch_target = next(iter(dummy_loader))
correct_first_batch_input = torch.LongTensor(
[[ 3396, 10303, 125],
[ 1660, 5971, 785],
[ 5971, 785, 125],
[ 785, 125, 1847],
[ 125, 1847, 13779]])
correct_first_batch_target = torch.LongTensor([13239, 125, 1847, 13779, 15616])
assert torch.equal(first_batch_input, correct_first_batch_input)
print('Passou no assert de input')
assert torch.equal(first_batch_target, correct_first_batch_target)
print('Passou no assert de target')
```
# Carregamento do dataset
Iremos usar uma pequena amostra do dataset [BrWaC](https://www.inf.ufrgs.br/pln/wiki/index.php?title=BrWaC) para treinar e avaliar nosso modelo de linguagem.
```
!wget -nc https://storage.googleapis.com/unicamp-dl/ia025a_2022s1/aula7/sample_brwac.txt
# Load datasets
context_size = 9
valid_examples = 100
test_examples = 100
texts = open('sample_brwac.txt').readlines()
# print('Truncating for debugging purposes.')
# texts = texts[:500]
training_texts = texts[:-(valid_examples + test_examples)]
valid_texts = texts[-(valid_examples + test_examples):-test_examples]
test_texts = texts[-test_examples:]
training_dataset = MyDataset(texts=training_texts, tokenizer=tokenizer, context_size=context_size)
valid_dataset = MyDataset(texts=valid_texts, tokenizer=tokenizer, context_size=context_size)
test_dataset = MyDataset(texts=test_texts, tokenizer=tokenizer, context_size=context_size)
print(f'training examples: {len(training_dataset)}')
print(f'valid examples: {len(valid_dataset)}')
print(f'test examples: {len(test_dataset)}')
training_dataset[0]
from collections import OrderedDict
class LanguageModel(torch.nn.Module):
def __init__(self, vocab_size, context_size, embedding_dim):
"""
Implements the Self-attention, decoder-only."
Args:
vocab_size (int): Size of the input vocabulary.
context_size (int): Size of the sequence to consider as context for prediction.
embedding_dim (int): Dimension of the embedding layer for each word in the context.
"""
super(LanguageModel, self).__init__()
self.context_size = context_size
self.embedding_dim = embedding_dim
# Embedding of the words
self.embeddings_C = nn.Embedding(vocab_size, embedding_dim)
# Embedding of the words positions
self.embeddings_P = nn.Embedding(context_size, embedding_dim)
# linear projections
self.W_q = nn.Linear(embedding_dim, embedding_dim, bias=False)
self.W_k = nn.Linear(embedding_dim, embedding_dim, bias=False)
self.W_v = nn.Linear(embedding_dim, embedding_dim, bias=False)
self.W_0 = nn.Linear(embedding_dim, embedding_dim, bias=False)
# Linear layer
hidden_size = 2*embedding_dim
self.linear_layers = nn.Sequential(
OrderedDict([
('dense1', nn.Linear(embedding_dim, hidden_size)),
('relu1', nn.ReLU()),
('dense2', nn.Linear(hidden_size, vocab_size, bias = False))
])
)
# Softmax
self.softmax = nn.Softmax(dim=-1)
def forward(self, inputs, debug=False):
"""
Args:
inputs is a LongTensor of shape (batch_size, context_size)
Returns:
logits of shape (batch_size, vocab_size)
"""
batch_size = inputs.shape[0]
pos = torch.arange(0, self.context_size).unsqueeze(0).repeat(batch_size,1).to(inputs.device)
# Arguments of the model #
## B: batch_size
## L: context_size
## D: embedding_dim
## V: vocab_size
# inputs shape: (B, L)
# pos shape: (B, L)
embeds = self.embeddings_C(inputs) # embeds shape: (B, L, D)
embeds_pos = self.embeddings_P(pos) # embeds pos: (B, L, D)
X = embeds + embeds_pos # X shape: (B, L, D)
# Q = X[-1] * Wq
X_1 = X[:,-1,:].unsqueeze(1) # X[:,-1,:] -> shape: (B, D)
Q = self.W_q(X_1) # Q shape: (B, 1, D)
# K = X * Wk
K = self.W_k(X) # K shape: (B, L, D)
# V = X * Wv
V = self.W_v(X) # V shape: (B, L, D)
# s = Q*K^T
# normalized scores: s = s/sqrt(D)
scores = torch.matmul(Q,torch.transpose(K,1,2))/math.sqrt(self.embedding_dim) # scores shape: (B, 1, L)
probs = self.softmax(scores) # probs shape: (B, 1, L)
E = torch.matmul(probs, V) # E shape: (B, 1, D)
# E = E*W0
E = self.W_0(E)
logits = self.linear_layers(E.view(batch_size,-1)) # logits shape: (B, Vocab_size)
if debug:
print(f"embeds shape: {embeds.shape}")
print(f"embeds_pos shape: {embeds_pos.shape}")
print(f"X shape: {X.shape}")
print(f"Q shape: {Q.shape}")
print(f"K shape: {K.shape}")
print(f"V shape: {V.shape}")
print(f"scores shape: {scores.shape}")
print(f"probs shape: {probs.shape}")
print(f"E shape: {E.shape}")
print(f"logits shape: {logits.shape}")
return logits
next(iter(DataLoader(training_dataset)))
```
## Análise pessoal
```
# print('vocab_size:', tokenizer.vocab_size)
# print('context_size:', context_size)
# embedding_dim=64
# print('embedding_dim:', embedding_dim)
# sample_train, _ = next(iter(DataLoader(training_dataset, batch_size=5)))
# sample_train
# sample_train
# # para uma única linha
# token_ids = sample_train#[0]
# print('token_ids:', token_ids)
# # L = 9 = context_size
# L = context_size
# print('L:', L)
# print()
# # D = embedding_dim
# D = embedding_dim
# print('D:', D)
# # X.shape = L, D
# # embeddings
# embeddings = nn.Embedding(tokenizer.vocab_size, embedding_dim)
# print(embeddings)
# X = embeddings(token_ids)
# print('X:', X.shape)
# # Q = X[-1] - 1xD
# # Q = X[-1]
# # Q = Q[None, :]
# Q = X[:,-1,:]
# Q = Q[:,None,:]
# print('Q:', Q.shape)
# # K = V = X - LxD
# V = X
# K = V
# print('K:', K.shape)
# print('V:', V.shape)
# # scores - 1xL
# # Q * Kt --> 1x64 * 64x9 --> 1x9
# # scores = Q.mm(torch.t(K))
# scores = torch.matmul(Q, K.permute(0, 2, 1))
# print('scores:', scores.shape)
# # probs - 1xL
# # softmax = nn.Softmax(dim=1)
# softmax = nn.Softmax(dim=2)
# probs = softmax(scores)
# print('probs:', probs.shape)
# # E - 1XD
# # probs * V --> 1x9 * 9x64 --> 1x64 #<--- duvida
# E = torch.matmul(probs, V)
# print('E:', E.shape)
# # logist = linear(E) - 1,V --> 1xtotal de palavras <---- ## Dúvida V = X?
# linear = nn.Linear(embedding_dim, tokenizer.vocab_size) #<---
# logist = linear(E)
# print('linear:', logist.shape)
# # probs - 1xV
# probs = softmax(logist)
# print('probs:', probs.shape)
# # tokenin
# # token_id = probs.argmax()
# token_id = probs.argmax(dim=2)
# token_id
# # WQ, WK, WV, WO <-- shape=DxD
# w_q = torch.randn(embedding_dim, embedding_dim)
# w_k = torch.randn(embedding_dim, embedding_dim)
# w_v = torch.randn(embedding_dim, embedding_dim)
# w_o = torch.randn(embedding_dim, embedding_dim)
# print(w_q.shape)
# # para uma única linha
# token_ids = sample_train#[0]
# print('token_ids:', token_ids)
# # L = 9 = context_size
# L = context_size
# print('L:', L)
# # D = embedding_dim
# D = embedding_dim
# print('D:', D)
# # X.shape = L, D
# # embeddings
# embeddings = nn.Embedding(tokenizer.vocab_size, embedding_dim)
# print(embeddings)
# X = embeddings(token_ids)
# print('X:', X.shape)
# # Q = X[-1]*WQ - 1xD
# Q = X[:,-1,:]
# Q = Q[:,None,:]
# Q = torch.matmul(Q, w_q)
# print('Q:', Q.shape)
# # K = X*WK
# # V = X*WV
# V = torch.matmul(X, w_v)# - LxD
# K = torch.matmul(X, w_k)# - LxD
# print('K:', K.shape)
# print('V:', V.shape)
# # scores - 1xL
# # Q * Kt --> 1x64 * 64x9 --> 1x9
# scores = torch.matmul(Q, K.permute(0, 2, 1))
# print('scores:', scores.shape)
# # probs - 1xL
# softmax = nn.Softmax(dim=-1)
# probs = softmax(scores)
# print('probs:', probs.shape)
# # E - 1XD
# # probs * V --> 1x9 * 9x64 --> 1x64 #<--- duvida
# E = torch.matmul(probs, V)
# E = torch.matmul(E, w_o)
# print('E:', E.shape)
# # logist = linear(E) - 1,V --> 1xtotal de palavras <---- ## Dúvida V = X?
# linear = nn.Linear(embedding_dim, tokenizer.vocab_size) #<---
# logist = linear(E)
# print('linear:', logist.shape)
# # probs - 1xV
# probs = softmax(logist)
# print('probs:', probs.shape)
# # tokenin
# token_id = probs.argmax(dim=-1)
# token_id
```
# Teste o modelo com um exemplo
```
model = LanguageModel(
vocab_size=tokenizer.vocab_size,
context_size=context_size,
embedding_dim=64,
).to(device)
sample_train, _ = next(iter(DataLoader(training_dataset)))
sample_train_gpu = sample_train.to(device)
model(sample_train_gpu).shape
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'Number of model parameters: {num_params}')
```
## Assert da Perplexidade
```
random.seed(123)
np.random.seed(123)
torch.manual_seed(123)
def perplexity(logits, target):
"""
Computes the perplexity.
Args:
logits: a FloatTensor of shape (batch_size, vocab_size)
target: a LongTensor of shape (batch_size,)
Returns:
A float corresponding to the perplexity
"""
loss = nn.functional.cross_entropy(logits, target, reduction='mean')
return torch.exp(loss)
n_examples = 1000
sample_train, target_token_ids = next(iter(DataLoader(training_dataset, batch_size=n_examples)))
sample_train_gpu = sample_train.to(device)
target_token_ids = target_token_ids.to(device)
logits = model(sample_train_gpu)
my_perplexity = perplexity(logits=logits, target=target_token_ids)
print(f'my perplexity: {int(my_perplexity)}')
print(f'correct initial perplexity: {tokenizer.vocab_size}')
assert math.isclose(my_perplexity, tokenizer.vocab_size, abs_tol=7000)
print('Passou o no assert da perplexidade')
```
## Laço de Treinamento e Validação
```
max_examples = 100_000_000
eval_every_steps = 10000
lr = 3e-4
embedding_dim = 128
batch_size = 128
model = LanguageModel(
vocab_size=tokenizer.vocab_size,
context_size=context_size,
embedding_dim=embedding_dim,
).to(device)
train_loader = DataLoader(training_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
validation_loader = DataLoader(valid_dataset, batch_size=batch_size)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
def train_step(input, target):
model.train()
model.zero_grad()
logits = model(input.to(device))
loss = nn.functional.cross_entropy(logits, target.to(device))
loss.backward()
optimizer.step()
return loss.item()
def validation_step(input, target):
model.eval()
logits = model(input)
loss = nn.functional.cross_entropy(logits, target)
return loss.item()
train_losses = []
n_examples = 0
step = 0
while n_examples < max_examples:
for input, target in train_loader:
loss = train_step(input.to(device), target.to(device))
train_losses.append(loss)
if step % eval_every_steps == 0:
train_ppl = np.exp(np.average(train_losses))
with torch.no_grad():
valid_ppl = np.exp(np.average([
validation_step(input.to(device), target.to(device))
for input, target in validation_loader]))
print(f'{step} steps; {n_examples} examples so far; train ppl: {train_ppl:.2f}, valid ppl: {valid_ppl:.2f}')
train_losses = []
n_examples += len(input) # Increment of batch size
step += 1
if n_examples >= max_examples:
break
```
## Avaliação final no dataset de teste
Bonus: o modelo com menor perplexidade no dataset de testes ganhará 0.5 ponto na nota final.
```
test_loader = DataLoader(test_dataset, batch_size=64)
with torch.no_grad():
test_ppl = np.exp(np.average([
validation_step(input.to(device), target.to(device))
for input, target in test_loader
]))
print(f'test perplexity: {test_ppl}')
```
## Teste seu modelo com uma sentença
Escolha uma sentença gerada pelo modelo que ache interessante.
```
prompt = 'Eu gosto de comer pizza pois me faz'
max_output_tokens = 20
model.eval()
for _ in range(max_output_tokens):
input_ids = tokenize(text=prompt, tokenizer=tokenizer)
input_ids_truncated = input_ids[-context_size:] # Usamos apenas os últimos <context_size> tokens como entrada para o modelo.
logits = model(torch.LongTensor([input_ids_truncated]).to(device))
# Ao usarmos o argmax, a saída do modelo em cada passo é o token de maior probabilidade.
# Isso se chama decodificação gulosa (greedy decoding).
predicted_id = torch.argmax(logits).item()
input_ids += [predicted_id] # Concatenamos a entrada com o token escolhido nesse passo.
prompt = tokenizer.decode(input_ids)
print(prompt)
```
| github_jupyter |
## Partial Codes
```
%matplotlib inline
# 기본 패키지
import numpy as np
import random
from collections import deque
import matplotlib.pyplot as plt
# 강화학습 환경 패키지
import gym
# 인공지능 패키지: 텐서플로, 케라스
# 호환성을 위해 텐스플로에 포함된 케라스를 불러옴
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
def create_q_model(num_states, num_actions):
inputs = Input(shape=(num_states,))
layer = Dense(32, activation="relu")(inputs)
layer = Dense(16, activation="relu")(layer)
action = Dense(num_actions, activation="linear")(layer)
return Model(inputs=inputs, outputs=action)
model = create_q_model(4,2)
model.summary()
class World_00:
def __init__(self):
self.get_env_model()
def get_env_model(self):
self.env = gym.make('CartPole-v1')
self.num_states = self.env.observation_space.shape[0]
self.num_actions = self.env.action_space.n
self.model = create_q_model(self.num_states, self.num_actions)
# print(self.model.summary())
def train(self):
states = np.zeros((10,self.num_states), dtype=np.float32)
with tf.GradientTape() as tape:
predicts = self.model(states)
new_world = World_00()
new_world.train()
print('Simple training is completed!')
def env_test_model_memory(memory, env, model, n_episodes=1000,
flag_render=False):
for e in range(n_episodes):
done = False
score = 0
s = env.reset()
while not done:
s_array = np.array(s).reshape((1,-1))
Qsa = model.predict(s_array)[0]
a = np.argmax(Qsa)
next_s, r, done, _ = env.step(a)
if flag_render:
env.render()
score += r
memory.append([s,a,r,next_s,done])
print(f'Episode: {e:5d} --> Score: {score:3.1f}')
print('Notice that the max score is set to 500.0 in CartPole-v1')
def list_rotate(l):
return list(zip(*l))
class World_01(World_00):
def __init__(self):
World_00.__init__(self)
self.memory = deque(maxlen=2000)
self.N_batch = 64
self.t_model = create_q_model(self.num_states, self.num_actions)
self.discount_factor = 0.99
self.learning_rate = 0.001
self.optimizer = Adam(lr=self.learning_rate)
def trial(self, flag_render=False):
env_test_model_memory(self.memory, self.env,
self.model, n_episodes=10, flag_render=flag_render)
print(len(self.memory))
def train_memory(self):
if len(self.memory) >= self.N_batch:
memory_batch = random.sample(self.memory, self.N_batch)
s_l,a_l,r_l,next_s_l,done_l = [np.array(x) for x in list_rotate(memory_batch)]
model_w = self.model.trainable_variables
with tf.GradientTape() as tape:
Qsa_pred_l = self.model(s_l.astype(np.float32))
a_l_onehot = tf.one_hot(a_l, self.num_actions)
Qs_a_pred_l = tf.reduce_sum(a_l_onehot * Qsa_pred_l, axis=1)
Qsa_tpred_l = self.t_model(next_s_l.astype(np.float32))
Qsa_tpred_l = tf.stop_gradient(Qsa_tpred_l)
max_Q_next_s_a_l = np.amax(Qsa_tpred_l, axis=-1)
Qs_a_l = r_l + (1 - done_l) * self.discount_factor * max_Q_next_s_a_l
loss = tf.reduce_mean(tf.square(Qs_a_l - Qs_a_pred_l))
grads = tape.gradient(loss, model_w)
self.optimizer.apply_gradients(zip(grads, model_w))
new_world = World_01()
new_world.trial()
new_world.train_memory()
new_world.env.close()
print('Completed!')
class World_02(World_01):
def __init__(self):
World_01.__init__(self)
self.epsilon = 0.2
def update_t_model(self):
self.t_model.set_weights(self.model.get_weights())
def best_action(self, s):
if random.random() <= self.epsilon:
return random.randrange(self.num_actions)
else:
s_array = np.array(s).reshape((1,-1))
Qsa = self.model.predict(s_array)[0]
return np.argmax(Qsa)
def trials(self, n_episodes=100, flag_render=False):
memory = self.memory
env = self.env
model = self.model
score_l = []
for e in range(n_episodes):
done = False
score = 0
s = env.reset()
while not done:
a = self.best_action(s)
next_s, r, done, _ = env.step(a)
if flag_render:
env.render()
score += r
memory.append([s,a,r,next_s,done])
# self.train_memory()
s = next_s
self.train_memory()
self.update_t_model()
print(f'Episode: {e:5d} --> Score: {score:3.1f}')
score_l.append(score)
return score_l
new_world = World_02()
score_l = new_world.trials(n_episodes=50)
new_world.env.close()
np.save('score_l.npy', score_l)
```
## Full Codes
| github_jupyter |
```
import pandas as pd
# Gain
rf_base_rmse={
'horizon':[1, 2, 5, 7, 10, 15, 20, 25, 30],
'window_3': [12.832877531423264, 18.509245767866105, 66.66476270997691, 83.84262755140071, 92.06286536108928, 168.46719412949506, 334.25515671794733, 502.67172086715937, 463.46864208011766],
'window_5': [15.283981387542099, 27.527940692854827, 77.64689401293822, 79.69990740349726, 124.98468698259285, 233.70026291610407, 345.5498888892039, 539.6175619994922, 309.51835410333],
'window_7': [25.269781370696137, 53.9848269882209, 66.23106860426788, 97.88427009297061, 164.65228849442065, 306.0274028956578, 419.15388232641794, 455.54630773279655, 472.29886564383855]}
rf_base_rmse = pd.DataFrame(rf_base_rmse, columns=['horizon','window_3', 'window_5', 'window_7'])
enet_base_rmse={
'horizon':[1, 2, 5, 7, 10, 15, 20, 25, 30],
'window_3':[7.119026980618628, 3.8831696752636162, 73.62452429394898, 87.43821701810703, 84.57453069938856, 157.06225866094815, 318.60841854362, 456.5736688874788, 431.08799412786016],
'window_5': [3.53112934829187, 4.143659561293134, 60.73407068885035, 47.879425228903074, 71.07919476491134, 156.1006941639173, 206.14792095133157, 342.3700050216712, 113.12423972086435],
'window_7': [38.535718049324714, 65.09698273335835, 45.6730621130107, 31.261431085806862, 93.09825316281831, 237.47204427559336, 231.5629240231176, 253.85028914970644, 250.97244314760655]}
enet_base_rmse =pd.DataFrame(enet_base_rmse, columns=['horizon','window_3', 'window_5', 'window_7'])
gp_base_rmse={
'horizon':[1, 2, 5, 7, 10, 15, 20, 25, 30],
'window_3':[16.665048358288686, 14.269135636688588, 43.70504466573084, 69.5523468437093, 116.73323339285753, 169.82334911449902, 346.4630104197274, 348.4969031736, 602.4986986545614],
'window_5': [10.996703519845596, 17.317895453616302, 72.78761151729987, 61.02060459539962, 155.4549275504271, 240.22194385007083, 345.2127623681954, 561.0491090251173, 232.36373606750794],
'window_7':[25.992429989356424, 53.007806757020035, 70.49274356763794, 79.56338966147041, 182.64117154430278, 309.8191826774273, 310.52939637435816, 582.6489290275531, 292.5354137327995]}
gp_base_rmse = pd.DataFrame(gp_base_rmse, columns=['horizon','window_3', 'window_5', 'window_7'])
xgbt_base_rmse = {
'horizon':[1, 2, 5, 7, 10, 15, 20, 25, 30],
'window_3': [17.578994985125746, 8.508985831467044, 46.47942367148835, 88.41591803112101, 64.55678874602995, 141.67290557191336, 307.0876938257302, 430.0776092536257, 377.31445666526224],
'window_5':[8.45939193278241, 30.04451089325721, 87.89571653105736, 104.69484116182116, 98.77221842391214, 210.82483578559481, 300.2608707274262, 473.131141659135, 345.28596475861536],
'window_7':[45.149905593290896, 61.20458277302646, 77.9999840182104, 76.77723711920021, 129.2100257613588, 296.3316202785695, 355.442505154934, 455.0923679423099, 538.4621315849354]}
xgbt_base_rmse = pd.DataFrame(xgbt_base_rmse, columns=['horizon','window_3', 'window_5', 'window_7'])
arimax_base_rmse = {
'horizon':[1, 2, 5, 7, 10, 15, 20, 25, 30],
'window_3': [17.578994985125746, 8.508985831467044, 46.47942367148835, 88.41591803112101, 64.55678874602995, 141.67290557191336, 307.0876938257302, 430.0776092536257, 377.31445666526224],
'window_5':[8.45939193278241, 30.04451089325721, 87.89571653105736, 104.69484116182116, 98.77221842391214, 210.82483578559481, 300.2608707274262, 473.131141659135, 345.28596475861536],
'window_7':[45.149905593290896, 61.20458277302646, 77.9999840182104, 76.77723711920021, 129.2100257613588, 296.3316202785695, 355.442505154934, 455.0923679423099, 538.4621315849354]}
arimax_base_rmse = pd.DataFrame(xgbt_base_rmse, columns=['horizon','window_3', 'window_5', 'window_7'])
rf_base_rmse.set_index('horizon', drop=True, inplace=True)
enet_base_rmse.set_index('horizon', drop=True, inplace=True)
gp_base_rmse.set_index('horizon', drop=True, inplace=True)
xgbt_base_rmse.set_index('horizon', drop=True, inplace=True)
arimax_base_rmse.set_index('horizon', drop=True, inplace=True)
#models_w3_rmse = pd.concat([rf_base_rmse['window_3'], enet_base_rmse['window_3'], gp_base_rmse['window_3'], xgbt_base_rmse['window_3'] , arimax_base_rmse['window_3']], axis=1)
models_w3_rmse = pd.concat([rf_base_rmse['window_3'], enet_base_rmse['window_3']], axis=1)
models_w3_rmse.columns = ['rf_base_rmse', 'enet_base_rmse']
models_w3_rmse = models_w3_rmse.plot()
models_w3_rmse.figure.savefig('models_w3_rmse.jpg')
models_w5_rmse = pd.concat([rf_base_rmse['window_5'], enet_base_rmse['window_5']], axis=1)
models_w5_rmse.columns = ['rf_base_rmse', 'enet_base_rmse']
models_w5_rmse = models_w5_rmse.plot()
models_w5_rmse.figure.savefig('models_w5_rmse.jpg')
models_w7_rmse = pd.concat([rf_base_rmse['window_7'], enet_base_rmse['window_7']], axis=1)
models_w7_rmse.columns = ['rf_base_rmse', 'enet_base_rmse']
models_w7_rmse = models_w7_rmse.plot()
models_w7_rmse.figure.savefig('models_w7_rmse.jpg')
```
| github_jupyter |
# Agrodem
A python tool for estimating water and electricity demand for irrigation.
**Original code:** [Konstantinos Pegios](https://github.com/kopegios) <br />
**Conceptualization & Methodological review :** [Alexandros Korkovelos](https://github.com/akorkovelos) & [Konstantinos Pegios](https://github.com/kopegios)<br />
**Updates, Modifications:** [Alexandros Korkovelos](https://github.com/akorkovelos), [Youssef Almulla](https://github.com/JZF07) & [Camilo Ramírez](https://github.com/camiloramirezgo) <br />
**Funding:** The World Bank (contract number: 7190531), [KTH](https://www.kth.se/en/itm/inst/energiteknik/forskning/desa/welcome-to-the-unit-of-energy-systems-analysis-kth-desa-1.197296)
## Import modules required
```
import pandas as pd
from datetime import datetime
import pyeto
import numpy as np
import ast
import math
import xlrd
from ast import literal_eval
from pandas import DataFrame
from scipy.interpolate import interp1d
import dateutil #dateutil module provides powerful extensions to the standard datetime module
from dateutil import parser #This module offers reads the given date in string and convert it to date format or timestamps,it represent a date and/or time from most known formats
import os
from tkinter import filedialog, messagebox
import matplotlib.pyplot as plt
import folium
import branca.colormap as cm
import json
from IPython.display import display, Markdown, HTML
#from plotly.offline import iplot, init_notebook_mode
#init_notebook_mode()
#import plotly.graph_objs as go
# note that pyeto is available here https://github.com/woodcrafty/PyETo.git
from pyeto import fao
%matplotlib inline
math.exp = np.exp
math.pow = np.power
math.sqrt = np.sqrt
```
#### Define crop & scenario
```
# Crop name and scenario
cropname = "Maize"
scenarioname = "2017_10km"
```
## Part 1 - Biophysical characteristics
### Importing input dataset from GIS
The following biophysical characteristics are necessary inputs to the irrigation model. A more elaborate description of the preparation of such data is available in supporting [documentation](https://agrodem.readthedocs.io/en/latest/Overview.html). Code is also availble in the ```GIS preparation``` sub-folder of this repo.
* country (name)
* state (name - admin 1 or 2)
* lat, lon (deg)
* crop (name - modelling crop)
* Fraction (%)
* harv_area (harvested area in ha)
* curr_yield (Current yield in kg/ha)
* max_yield (Maximun yield in kg/ha)
* gw_depth (Ground water depth in m)
* sw_dist (Distance to surface water in m)
* sw_depth (elevation of the nearest surface water source)
* elevation (in m)
* awsc (Water storage capacity of the soil in mm/m)
* sw_suit_idx (Surface irrigation suitability index: 1= suitable 9999= non suitable)
* prec_i (Average precipitation in mm/month; i=1-12)
* srad_i (Average solar irradiation per month in kJ m-2 day-1; i=1-12)
* wind_i (Average wind speed per month in m s-1; i=1-12)
* tavg_i, tmax_i, tmin_i(Average, Max, Min temperature per month in C; i=1-12)
<div class="alert alert-block alert-warning">
<b>Note:</b> The supporting file <b>Pilot_Moz_Maize_Sample_1km.csv</b> used below is a sample dataset of 1000 randomly selected locations around Mozambique.
</div>
```
# Import data
# Path and name of crop allocation file
path = r"C:\Users\alekor\Desktop\GithubFolder\agrodem\agrodem_sample_input_data" ## directory of input crop file
name_of_file = "Pilot_Moz_Maize_Sample_1km.csv"
# Import csv as pandas dataframe
df = pd.read_csv(path + "\\" + name_of_file)
df.head(5)
```
### Calculating
* Reference evapotranspiration (ETo)
* Crop coefficient (kc)
* Crop evaporotransiration (ETc)
* Effective rainfall (eff)
### Reference evapotranspiration (ETo)
ETo is estimated based on FAO's **Penman-Monteith** formula. The reference evapotranspiration, ETo, provides a standard to which:
* evapotranspiration at different periods of the year or in other regions can be compared
* evapotranspiration of other crops can be related
**Sources**
- [FAO Irrigation and Drainage Paper No. 56, Chapter 2](http://www.fao.org/3/X0490E/x0490e06.htm)
- [Andreas P. SAVVA Karen FRENKEN, "Irrigation manual", Volume 1, Module 4](http://www.fao.org/tempref/agl/AGLW/ESPIM/CD-ROM/documents/7I1_e.pdf)
- [Lincoln Zotarelli, Michael D. Dukes, Consuelo C. Romero, Kati W. Migliaccio, and Kelly T.
Morgan, "Step by Step Calculation of the Penman-Monteith
Evapotranspiration (FAO-56 Method)"](http://www.agraria.unirc.it/documentazione/materiale_didattico/1462_2016_412_24509.pdf)
- [Richard G. ALLEN et al., "Crop evapotranspiration - Guidelines for computing crop water
requirements - FAO Irrigation and drainage paper 56"](https://appgeodb.nancy.inra.fr/biljou/pdf/Allen_FAO1998.pdf)
--------------------------------------------------------------------------------------------------------------
```
# Defining function
def evap_i(lat,elev,wind,srad,tmin,tmax,tavg,month):
if month ==1:
J = 15
else:
J = 15 + (month-1)*30
latitude = pyeto.deg2rad(lat)
atmosphericVapourPressure = pyeto.avp_from_tmin(tmin)
saturationVapourPressure = pyeto.svp_from_t(tavg)
ird = pyeto.inv_rel_dist_earth_sun(J)
solarDeclination = pyeto.sol_dec(J)
sha = [pyeto.sunset_hour_angle(l, solarDeclination) for l in latitude]
extraterrestrialRad = [pyeto.et_rad(x, solarDeclination,y,ird) for x, y in zip(latitude,sha)]
clearSkyRad = pyeto.cs_rad(elev,extraterrestrialRad)
netInSolRadnet = pyeto.net_in_sol_rad(srad*0.001, albedo=0.23)
netOutSolRadnet = pyeto.net_out_lw_rad(tmin, tmax, srad*0.001, clearSkyRad, atmosphericVapourPressure)
netRadiation = pyeto.net_rad(netInSolRadnet,netOutSolRadnet)
tempKelvin = pyeto.celsius2kelvin(tavg)
windSpeed2m = wind
slopeSvp = pyeto.delta_svp(tavg)
atmPressure = pyeto.atm_pressure(elev)
psyConstant = pyeto.psy_const(atmPressure)
return pyeto.fao56_penman_monteith(netRadiation, tempKelvin, windSpeed2m, saturationVapourPressure, atmosphericVapourPressure, slopeSvp, psyConstant, shf=0.0)
#Initiate
for i in range(1,13):
df['ETo_{}'.format(i)]=0 ##To make sure that it is reset to zero
# calculate ETo for each row for each month
# range(1,13) and .format(i): to generate monthly calculation of ETo
for i in range(1,13):
df['ETo_{}'.format(i)] = evap_i(df['lat'],df['elevation'],df['wind_{}'.format(i)],df['srad_{}'.format(i)],df['tmin_{}'.format(i)],df['tmax_{}'.format(i)],df['tavg_{}'.format(i)],i)
```
### Define rainfall pattern (unimodal vs bimodal vs trimodal)
For this example we assume that unimodal pattern for the whole study area, which means it has one raining season only.
--------------------------------------------------------------------------------------------------------------
```
# See all states included in the input file
df.State.unique()
# Classify states per region (example on "Pilot_Input_Crop_Calendar.xlsx" )
# List of regions and their crop calendar are available "Pilot_Input_Crop_Calendar.xlsx"
list_of_counties_region_1 = ['Mueda', 'Mecula', 'Sanga', 'Lago', 'Mavago', 'Lichinga', 'Maua',
'Nipepe', 'Memba', "N'gauma", 'Majune', 'Mecuburi', 'Chifunde',
'Metarica', 'Lalaua', 'Muecate', 'Mecanhelas', 'Malema', 'Cuamba',
'Nacala_Velha', 'Maravia', 'Angonia', 'Monapo', 'Ribaue',
'Mossuril', 'Tsangano', 'Zumbu', 'Meconta', 'Nampula', 'Gurue',
'Alto_Molocue', 'Mongincual', 'Gile', 'Mogovolas', 'Moatize',
'Namarroi', 'Ile', 'Angoche', 'Lugela', 'Moma', 'Pebane',
'Mutarara', 'Morrumbala', 'Cahora_Bassa', 'Maganja_da_Cost',
'Tambara', 'Chemba', 'Namacurra', 'Maringue', 'Caia', 'Mopeia',
'Nicoadala', 'Barue', 'Macossa']
list_of_counties_region_2 = ['Cheringoma', 'Inhassunge',
'Chinde', 'Gorongosa', 'Manica', 'Muanza', 'Gondola', 'Nhamatanda',
'Sussundenga', 'Dondo', 'Buzi', 'Chibabava', 'Machanga',
'Mossurize', 'Govuro', 'Machaze']
list_of_counties_region_3 = ['Mabote', 'Inhassoro',
'Massangena', 'Vilanculos', 'Funhalouro', 'Morrumbene', 'Panda',
'Homoine', 'Jangamo', 'Inharrime', 'Magude', 'Manhica',
'Marracuene', 'Matutuine']
# Run this and the model will do the assignment
#df['Mode']=('region_1')
df['Mode'] = ["region_1" if x in list_of_counties_region_1 else
("region_2" if x in list_of_counties_region_2 else
("region_3" if x in list_of_counties_region_3 else "region_unknown")) for x in df['State']]
```
### Calculate kc based on the different growth stages
Note that the user shall define the kc values for different stages of a crop. In this case values of 0.8, 0.9, 1 and 0.8 were used for the 4 growth stages of cassava. Source is available [here](http://www.fao.org/3/X0490E/x0490e0b.htm).
**Other sources**
- [Fatemeh Aghdasi, "Crop Water Requirement Assessment.."](https://webapps.itc.utwente.nl/librarywww/papers_2010/msc/wrem/aghdasi.pdf), 2010
- [FAO Irrigation potential in Africa Chapter 3"](http://www.fao.org/3/S2022E/s2022e07.htm)
```
# Import sample crop calendar and its file name
calendar_path = r"C:\Users\alekor\Desktop\GithubFolder\agrodem\agrodem_sample_input_data"
name_of_file = "Pilot_Input_Crop_Calendar_Maize.xlsx"
# Define kc function and its attributes
def kc(plantation,Li,Ld,Lm,Le,kci,kcd,kcm,kce,isodate):
"""
Each crop goes through four growing stages: initial - development - mid-season and end-season (check FAO-56 chapter 6 for more details)
Inputs:
Plantation = plantation datetime
Li = length of the initial stage (in days)
Ld = length of the development stage (in days)
Lm = length of the mid-season stage (in days)
Le = length of the end-season stage (in days)
kci = crop coefficient 'kc' at the initial stage. In this stage the ckc value is constant and equal to kci
kcm = crop coefficient 'kc' at the mid-season stage. In this stage the ckc value is constant and equal to kcm
kce = crop coefficient 'kc' at the end-season stage. In this stege the ckc value varies linearly between kce and kcm (check equation 66 - page 132, FAO56).
isodate = current date (optional)
Outputs:
* ckc : current crop coefficient, which is constant in the initial and mid-season stages and varies linearly in the development (increasing) and end-season (declining) stages.
Some Examples:
Kc(plantation="2014-01-01",Li=25,Ld=25,Lm=30,Le=20,Kci=0.15,Kcm=1.19,Kce=0.35,isodate="2014-01-20")
>>> 0.15
Kc(plantation="2014-01-01",Li=25,Ld=25,Lm=30,Le=20,Kci=0.15,Kcm=1.19,Kce=0.35,isodate="2014-02-10")
>>> 0.774
Kc(plantation="2014-01-01",Li=25,Ld=25,Lm=30,Le=20,Kci=0.15,Kcm=1.19,Kce=0.35,isodate="2014-03-12")
>>> 1.19
Kc(plantation="2014-01-01",Li=25,Ld=25,Lm=30,Le=20,Kci=0.15,Kcm=1.19,Kce=0.35,isodate="2014-04-06")
>>> 0.559
"""
#step 1:
plantation = pd.to_datetime(plantation, format='%d/%m') #converting the plantation input info to data time
isodate = pd.to_datetime(isodate , format='%d/%m') #converting the current date input info to data time
test = ((isodate-plantation).days)%365 #The difference in days between the current day and the plantation day.
# Setting the plantation date and the current date (this is not used)
Jc = test
Jp = 0
J = (Jc - Jp)%365 # %365 means the remaing days of the year
#Step 2: Calculating the day of the year when each crop stage ends placing the date in the number of days year betweem 0 (1/jan) and 365 (31/Jan)
JLi = Jp + Li #end of initial stage = plantation date + lenght of initial stage
JLd = JLi + Ld #end of development stage = end of initial stage + length of development stage
JLm = JLd + Lm #end of mid-season stage = end of development stage + length of mid-season stage
JLe = JLm + Le #end of end-season stage = end of mid-season stage + length of end-season stage
#step 3: calculating ckc based on the end of each stage date
if Jc > Jp and Jc < JLe: #if the current date is greater than the plantation date and it is greater than the end of end-season stage
if J <= JLi:
ckc = kci #if the current date is before the end of initial stage then ckc = kci the coefficient of the initial stege
elif Jc > JLi and Jc <=JLd: #if the current date is betweeen the end of the intial stege and the end of the development stage, then ckc is computed based on equation 66 (page 132.FAO56)
ckc = kci + ((Jc-JLi)/Ld * (kcm-kci))
elif Jc > JLd and Jc <= JLm:
ckc = kcm
elif Jc > JLm and Jc <= JLe:
ckc = kcm + ((Jc-JLm)/Le * (kce-kcm))
else:
ckc = 0
return ckc
```
**Running the function**
```
# Define kc factors for the crop; k_1: sowing period, k_2: growing first, k_3: growing second, k_4: harvesting
# Example for Maize
k_1 = 0.8
k_2 = 0.8
k_3 = 1.2
k_4 = 0.9
# Import csv as pandas dataframe
mode = pd.read_excel(calendar_path + "\\" + name_of_file)
#Note: The code here is adjusted to avoid the end of year issue. In other cases, the init1 and init2 are one stage init:
#pay attention to all changes, you may need to change this if the crop calendar change
#Planting season: Initial Stage (plant = init)
init_start = pd.to_datetime(mode['init_start'], format='%d/%m') #defining the plant start date from excel and setting the correct month and days sequence to read.
init_end = pd.to_datetime(mode['init_end'], format='%d/%m')
mode['init_start_month'] = init_start.dt.month
mode['init_end_month'] = init_end.dt.month
mode['init_days'] = abs(init_end - init_start).dt.days #Calculating the length of the planting season
Li = abs(init_end - init_start).dt.days
#growing 1: Development Stage (grow = dev)
dev_start = pd.to_datetime(mode['dev_start'], format='%d/%m')
dev_end = pd.to_datetime(mode['dev_end'], format='%d/%m')
mode['dev_start_month'] = dev_start.dt.month
mode['dev_end_month'] = dev_end.dt.month
mode['dev_days'] = abs(dev_end - dev_start).dt.days
Ld = abs(dev_end - dev_start).dt.days
#growing 2: Mid stage ( add : mid)
mid_start = pd.to_datetime(mode['mid_start'], format='%d/%m')
mid_end = pd.to_datetime(mode['mid_end'], format='%d/%m')
mode['mid_start_month'] = mid_start.dt.month
mode['mid_end_month'] = mid_end.dt.month
mode['mid_days'] = abs(mid_end - mid_start).dt.days
Lm = abs(mid_end - mid_start).dt.days
#Harvesting: Late stage (harv = late)
late_start = pd.to_datetime(mode['late_start'], format='%d/%m') #defining the plant start date from excil and setting the correct month and days sequence to read.
late_end = pd.to_datetime(mode['late_end'], format='%d/%m')
mode['late_start_month'] = late_start.dt.month
mode['late_end_month'] = late_end.dt.month
mode['late_days'] = abs(late_end - late_start).dt.days #Calculating the length of the planting season
Le = abs(late_end - late_start).dt.days
for i in range(1,13):
mode['kc_{}'.format(i)]=0
for index,row in mode.iterrows():
for i in range(0,12):
init_start = pd.to_datetime(mode['init_start'].iloc[index], format='%d/%m') #read the plant start date from excel.
day_start= (init_start.day+1-31)%31 #what does this represent??
if (init_start.day-1==30):
month_start = (init_start.month+1-12)%12 #next month
else:
month_start = (init_start.month-12)%12 #the current month
month_start = (month_start+i)%12
if (month_start==0):
month_start = 12
mode.loc[index,'kc_{}'.format(month_start)] = kc(mode['init_start'].iloc[index],mode['init_days'].iloc[index],mode['dev_days'].iloc[index],mode['mid_days'].iloc[index],mode['late_days'].iloc[index],k_1,k_2,k_3,k_4,'{}/{}'.format(day_start,month_start))
#print (kc)
# so far we worked with (df) dataframe which contains GIS outputs, then we created a (mode) dataframe.
# Here we merge them on into one new dataframe called (data) and we chose the merging to be on the 'Mode' column
data = pd.merge(df, mode, on='Mode')
```
### Calculating crop evapotransiration (ETc)
Note! This is also refered to as Crop Water Requirements (CWR)
**Sources**
See [here](https://www.sciencedirect.com/topics/agricultural-and-biological-sciences/crop-water-requirement) for definitions
```
# Estimate monthly crop evaropotransoration ETc
for i in range(1,13):
data['ETc_{}'.format(i)] = data['ETo_{}'.format(i)] * data['kc_{}'.format(i)]
```
### Sum precipitation
```
# Calculating the annual precipitation: which is the sum of precipitation values
data['precipitation_annual']=data.filter(like='prec_').sum(axis=1) #Filter is used to specify the column of interest
```
### Calculate effective rainfall for every row for each month
Effective rainfall calculation is based on [USDA‐SCS method](http://www.fao.org/3/x5560e/x5560e03.htm#TopOfPage), expressed through the emperical formula presented by [M. Ali, S. Mubarak](https://doi.org/10.9734/ARJA/2017/36812).
Note that usable soil water storage (d) is defined in mm. d depends on soil water holding capacity, root depth as shown [here](http://www.droughtmanagement.info/literature/BC_MA_Soil_Water_Storage_Capacity_2005.pdf). It is generally calculated as 40 to 60 percent of the available soil-water capacity in the crop root zone, depending on the irrigation management practices used. In this analysis we use 50%.
**Sources:**
- USDA‐SCS Method, Chapter 2 "Irrigation Water Requirements", page 147, url:https://www.wcc.nrcs.usda.gov/ftpref/wntsc/waterMgt/irrigation/NEH15/ch2.pdf
- FAO, "Effective rainfall in irrigated agriculture", emperical methods, url: http://www.fao.org/3/x5560e/x5560e00.htm#Contents
- M. Ali, S. Mubarak, "Effective Rainfall Calculation Methods for Field Crops: An Overview, Analysis and New Formulation", url: https://doi.org/10.9734/ARJA/2017/36812
- FAO, "Irrigation Water Management: Irrigation Water Needs", url: http://www.fao.org/3/S2022E/s2022e00.htm#Contents
- S. Mohan, B. Simhadrirao, N. Arumugam, "Comparative study of effective rainfall estimation methods for lowland rice", url: https://link.springer.com/article/10.1007/BF00698810
- Balram Panigrahi, Megh R. Goyal, (Book), "Soil and Water Engineering: Principles and Applications of Modeling", page 265, url: https://books.google.se/books?id=wR9jDAAAQBAJ&pg=PA264&lpg=PA264&dq=criwar+semi+empirical+information+Pe&source=bl&ots=Bim8aJhvm3&sig=ACfU3U3xQK7FeZxIutU1-W962mydu3yesQ&hl=en&sa=X&ved=2ahUKEwjH0e3H-JriAhUxtIsKHftyB_UQ6AEwCnoECAcQAQ#v=onepage&q=criwar%20semi%20empirical%20information%20Pe&f=false
```
# Effective Rooting Depth of Mature Crops (rd) in m; defined by user.
# Indicative values available here (http://www.droughtmanagement.info/literature/BC_MA_Soil_Water_Storage_Capacity_2005.pdf)
data["rd"] = 0.79 # m
# Water storage capacity of the soil (awsc) in mm/m; defined by user.
# Indicative values for different soils available here (http://www.droughtmanagement.info/literature/BC_MA_Soil_Water_Storage_Capacity_2005.pdf)
# Defining usable soil water storage (d) in inches
data["da"] = data["rd"] * data["awsc"] * 0.5 * 0.0393701
# correction factor; depends on da (see Balram Panigrani above)
data["sf"] = 0.531747 + 0.295164*data["da"] - 0.057697*(data["da"]**2) + 0.003804*(data["da"]**3)
#Define rainfall function
def eff_rainfall(sf, prec, etc):
return (sf*(0.70917*(((prec*0.0393701) ** 0.82416) - 0.11556))*(10**(0.2426*(etc*0.0393701))))/0.0393701
#Initiate
for i in range(1,13):
data['eff_{}'.format(i)]=0
for i in range(1,13):
data['eff_{}'.format(i)] = eff_rainfall(data["sf"], data['prec_{}'.format(i)],data['ETc_{}'.format(i)])
data.loc[data['eff_{}'.format(i)] < 0, 'eff_{}'.format(i)] = 0.0001
data.loc[(data['eff_{}'.format(i)] >= data['prec_{}'.format(i)]), 'eff_{}'.format(i)] = data['prec_{}'.format(i)]
data.loc[(data['eff_{}'.format(i)] >= data['ETc_{}'.format(i)]), 'eff_{}'.format(i)] = data['ETc_{}'.format(i)]
## Uncomment to save file up to this point
##Create a Pandas Excel writer using XlsxWriter as the engine.
#writer = pd.ExcelWriter('Pilot_Result_Part1.xlsx', engine='xlsxwriter')
#
## Convert the dataframe to an XlsxWriter Excel object.
#data.to_excel(writer, sheet_name='part_1')
#
## Close the Pandas Excel writer and output the Excel file.
#writer.save()
```
## Part 2. Calculating Irrigation requirements
* Net Irrigation requirements (IRn)
* Peak Crop Water Requirements (PCWR)
* Peak Water Demand (PWD)
* Seasonal Scheme Water Demand (SSWD)
### Net Irrigation requirements (IRn) (mm/month)
**Sources**
- [FAO paper 24, "Crop Water Requirements"](http://www.fao.org/3/s8376e/s8376e.pdf)
- [FAO, "Irrigation potential in Africa Chapter 5"](http://www.fao.org/3/W4347E/w4347e0c.htm#chapter%205:%20irrigation%20water%20requirements)
- [Andreas P. SAVVA Karen FRENKEN, "Irrigation manual", Volume 1, Module 4](http://www.fao.org/tempref/agl/AGLW/ESPIM/CD-ROM/documents/7I1_e.pdf)
--------------------------------------------------------------------------------------------------------------
```
for i in range (1,13):
data['IRn_{}'.format(i)]= data['ETc_{}'.format(i)]*30 - data['eff_{}'.format(i)]*30
```
### Peak Crop Water Requirements (PCWR)
```
# Converting IRn into (m3/ha per month)
for i in range (1,13):
data['IRn_{}'.format(i)] *= 10 # 0.001*10000
# Converting IRn into (m3/ha per day)
for i in range (1,13):
data['IRnd_{}'.format(i)] = data['IRn_{}'.format(i)] / 30
# Peak crop water requirement (PCWR) is estimated as 2*IRnd (source: FAO manual) (unit: m3/ha per day)
for i in range (1,13):
data['PCWR_{}'.format(i)] = data['IRnd_{}'.format(i)] * 2
# Converting PCWR into l/s/ha "Duty"
for i in range (1,13):
data['PCWR_{}'.format(i)] *= 0.012
```
### Peak Water Demand (PWD) in l/s || Seasonal Scheme Water Demand (SSWD) in m3
```
# In order to estimate PWD and SSWS we need first to compute the irrigated area used in that particular month.
for index,row in data.iterrows():
len_init = (len(range(row['init_start_month'],row['init_end_month']))+1)
# PWD = PCWR / Irrigation efficiency(IrrEff)
# IrrEff = Field Application Efficiency (aeff) * Distribution Efficiency (deff)*100
# deff = (Conveyance efficiency + field canal efficiency)
# deff: 0.95 (all scenarios)
# aeff: 0.6 (Surface Irr), 0.75 (Sprinkler Irr), 0.9 (Drip Irr)
pumping_hours_per_day=8 # Assumption
deff= 0.95 # Assumption
aeff= 0.75 # Assumption
count_p=0 # To adjust the count of months in the loop below
count_h=0 # To adjust the count of months in the loop below
init_count = np.zeros(len(data))
late_count = np.zeros(len(data))
for i in [1,2,3,4,5,6,7,8,9,10,11,12]:
init = [(i >= j) & (i <= k) for j, k in zip(data['init_start_month'],data['init_end_month'])]
data.loc[init,'harvested_{}'.format(i)] =(data['harv_area']/(len_init)*init_count)
data.loc[init,'PWD_{}'.format(i)]= (data['PCWR_{}'.format(i)] *(data['harvested_{}'.format(i)]*24))/(pumping_hours_per_day*aeff*deff)
data.loc[init,'SSWD_{}'.format(i)]= (data['IRn_{}'.format(i)]*(data['harvested_{}'.format(i)])/(aeff*deff))
dev = [(i >= j) & (i <= k) for j, k in zip(data['dev_start_month'],data['dev_end_month'])]
data.loc[dev,'harvested_{}'.format(i)]=data['harv_area']
data.loc[dev,'PWD_{}'.format(i)]=(data['PCWR_{}'.format(i)]*data['harv_area']*24)/(pumping_hours_per_day*aeff*deff)
data.loc[dev,'SSWD_{}'.format(i)]= (data['IRn_{}'.format(i)]*data['harv_area'])/(aeff*deff)
mid = [(i >= j) & (i <= k) for j, k in zip(data['mid_start_month'],data['mid_end_month'])]
data.loc[mid,'harvested_{}'.format(i)]=data['harv_area']
data.loc[mid,'PWD_{}'.format(i)]=(data['PCWR_{}'.format(i)]*data['harv_area']*24)/(pumping_hours_per_day*aeff*deff)
data.loc[mid,'SSWD_{}'.format(i)]= (data['IRn_{}'.format(i)]*data['harv_area'])/(aeff*deff)
late = [(i >= j) & (i <= k) for j, k in zip(data['late_start_month'],data['late_end_month'])]
late_count += late * 1
data.loc[late,'harvested_{}'.format(i)]=(data['harv_area']/([len(range(i,j+1)) for i,j in zip(data['late_start_month'],data['late_end_month'])])*late_count)
data.loc[late,'PWD_{}'.format(i)]= (data['PCWR_{}'.format(i)]*(data['harvested_{}'.format(i)]*24)/(pumping_hours_per_day*aeff*deff))
data.loc[late,'SSWD_{}'.format(i)]= (data['IRn_{}'.format(i)]*(data['harvested_{}'.format(i)])/(aeff*deff))
```
Export dataframe into a csv file (Uncomment to activate)
```
## Finally, print results of part 2
#
##Create a Pandas Excel writer using XlsxWriter as the engine.
#writer = pd.ExcelWriter('Pilot_Results.xlsx', engine='xlsxwriter')
#
## Convert the dataframe to an XlsxWriter Excel object.
#data.to_excel(writer, sheet_name='test_all')
#
#
## Close the Pandas Excel writer and output the Excel file.
#writer.save()
```
## Part 3. Estimatind energy and power requirements
***Sources***
- [FAO, Chapter 4 - *Comparison of energy alternatives for small-scale irrigation"](http://www.fao.org/3/u2246e/u2246e05.htm#1%20technical%20calculations)
- [Andreas P. SAVVA Karen FRENKEN, "Irrigation manual", Volume 1, Module 4](http://www.fao.org/tempref/agl/AGLW/ESPIM/CD-ROM/documents/7I1_e.pdf)
### Total dynamic head (TDH) for ground and surface water sources
```
#data['sw_depth'] = np.random.randint(0,5, size=len(data)) ## refering to the suction lift im (m)
#data['sw_dist'] = np.random.randint(0,50, size=len(data)) ## refering to the distance to water source in (m)
#data['sw_suitability'] = np.random.choice([1,9999], size=len(data)) ## refering to the distance to water source in (m)
# Total dynamic head (TDH) in meters: Total static head (m) + Pressure head (m) + Friction head (m)
# Total static head for groundwater: ground water depth gw_depth
# Total static head for surface water: (elevation of water source - elevation of application) x distance to source
# Pressure head (m): 0 m (SU), 30 m (SP), 10 m (DR) , check if it is in 1992?
# Friction head (m): 1 m (SU), 20% of presure head (SP, DR) , check if it is in 1992?
pres_head_gw = 1
frict_head_gw = 0.2*pres_head_gw
pres_head_sw = 1
frict_head_sw = 0.2*pres_head_sw
def tdh_gw(row):
tdh=(row['gw_depth']) + pres_head_gw + frict_head_gw
return tdh
def tdh_sw(row):
tsh = row['sw_depth'] - row['elevation']
if tsh <= 0:
tdh=(abs(row['sw_depth'] - row['elevation'])) + pres_head_sw + (frict_head_sw*row["sw_dist"]*row["sw_suit"])
else:
tdh = pres_head_sw + (frict_head_sw*row["sw_dist"]*row["sw_suit"])
return tdh
data['tdh_gw'] = data.apply(tdh_gw , axis=1)
data['tdh_sw'] = data.apply(tdh_sw , axis=1)
```
### Estimating power (kW) and electricity (kWh) demand
```
#Setting the default value for these parameters
for i in range (1,13):
data['PD_E_gw_{}'.format(i)]=0 #PD_E_gw: Peak Demand (kw) using electric powered pump for ground water
data['PD_E_sw_{}'.format(i)]=0 #PD_E_sw: Peak Demand (kw) using electric powered pump for surface water
data['ED_E_gw_{}'.format(i)]=0 #ED_E_gw: Electricity Demand (kwh) using electric powered pump for ground water
data['ED_E_sw_{}'.format(i)]=0 #ED_E_sw: Electricity Demand (kwh) using electric powered pump for surface water
# Pumping plant efficiencty (%)= fuel efficiency (%) * "power unit eff (%)" * transmission eff (%) * pump eff (%) * 100%
# The Power Unit: can be diesel engine or electric engine. In the first we call it (diesel powered pump) and the second (electric powered pump)
# Diesel powered pump
#Worst case: 0.9*0.3*0.9*0.4 ~ 10 % (0.1)
#Best case: 1*0.4*1*0.8 = 32% (0.32)
# Electric powered pump
#Worst case: 0.9*0.75*0.9*0.4 ~ 25% (0.25)
#Best case: 1*0.85*1*0.8 ~ 70% (0.7)
pump_plant_eff=0.7
for i in range (1,13):
PWD = 'PWD_{}'.format(i)
SSWD = 'SSWD_{}'.format(i)
PD_E_gw = 'PD_E_gw_{}'.format(i)
ED_E_gw = 'ED_E_gw_{}'.format(i)
PD_E_sw = 'PD_E_sw_{}'.format(i)
ED_E_sw = 'ED_E_sw_{}'.format(i)
data[PD_E_gw]=(9.81*(data[PWD]/1000)*data['tdh_gw'])/pump_plant_eff
data[ED_E_gw]=(data[SSWD]*data['tdh_gw']*0.00272)/pump_plant_eff
data[PD_E_sw]=(9.81*(data[PWD]/1000)*data['tdh_sw'])/pump_plant_eff
data[ED_E_sw]=(data[SSWD]*data['tdh_sw']*0.00272)/pump_plant_eff
```
### Decision between ground or surface water irrigation
Selection based on peak power demand (kW) for the two options. First, we identify the max value between the two sub-categories and then we select the option that provides the minimum peak power.
```
data['PD_E_gw_max']=data.filter(like='PD_E_gw_').max(axis=1)
data['PD_E_sw_max']=data.filter(like='PD_E_sw_').max(axis=1)
data['PD_E'] = np.minimum.reduce(data[['PD_E_gw_max', 'PD_E_sw_max']].values, axis=1)
```
### Estimate annual electricity demand (kWh/year)
**Sources**
- [Andreas P. SAVVA Karen FRENKEN, "Irrigation manual", Volume 1, Module 5](http://www.fao.org/tempref/agl/AGLW/ESPIM/CD-ROM/documents/7I1_e.pdf)
```
# Estimate the gross annual irrigation requirements per location in (m3)
data['gross_an_irrig_req']=data.filter(like='SSWD_').sum(axis=1)
# Identify the peak water demand (PWD) and convert it from l/s to m3/h
data['PWD_max']=data.filter(like='PWD_').max(axis=1)
data['PWD_max'] *= 3.6
# Estimate annual electricity demand per location assuming motor efficiency
motor_eff = 0.88
data['Annual_elec_demand'] = (data['gross_an_irrig_req']/data['PWD_max'])*(data['PD_E']/0.88)
```
## Part 4. Summaries, Vizualization & Export of results
```
# This filters the dataframe, only keeping the locations for which electricity demand was identified
demandf = data[data['Annual_elec_demand'].notnull() & (data["Annual_elec_demand"] != 0)]
# Estimating summaries
a = demandf["gross_an_irrig_req"].count()
b = demandf["gross_an_irrig_req"].sum()
c = demandf["Annual_elec_demand"].sum()
d = demandf["harv_area"].sum()
e = data["harv_area"].sum()
f = d/e
sums = [{"Scenario":scenarioname, "Total harvested area":e,'Total locations irrigated':a, 'Total irrigated area': d, "% of area for irrigation":f,'Total water needs': b,'Total electricity demand':c, }]
pd.options.display.float_format = '{:.2f}'.format
summary_table = pd.DataFrame(sums, index = [cropname])
display(Markdown('### Summary \n These are the summarized results'))
summary_table
```
### Create an interactive map with results
Note! In case the analysis is conducted for thausands of locations, this might be computationally intensive.
```
# Vizualize result on an interactive map exported as html
#Define limits for map rendering
x_ave = demandf["lon"].mean()
y_ave = demandf["lat"].mean()
elecdem = demandf["Annual_elec_demand"].median()
# Create the map using folium module
map_dem = folium.Map(location=[y_ave,x_ave], zoom_start=6, control_scale=True)
# Definition of a function that returns different color names based on lcoe result categorization
# Colors are in Hexa-code e.g. #RRGGBB
def colorvalue(x):
if x <= 0.5:
return "#ADFF2F"
elif x >= 0.5 and x < 2:
return "#32CD32"
elif x >= 2 and x < 10:
return "#228B22"
elif x >= 10 and x < 100:
return "#008000"
elif x >= 100 and x < 500:
return "#006400"
else:
return "#000000"
# The we create a marker for each cluster;
# We pass coordinates, lcoe value and size as attributes to appear on the rendered map
for index, row in demandf.iterrows():
el_demand = row["Annual_elec_demand"]
area = row["harv_area"]
color_code = colorvalue(el_demand)
#radius_size = radius_sizing(area)
#print (color_code)
#print (radius_size)
folium.CircleMarker([row["lat"], row["lon"]],
radius=2,
color=color_code,
popup="Demand: {:.2} kWh, Area: {:.2} ha".format(row["Annual_elec_demand"], row["harv_area"]),
fill = True,
fill_opacity=0,
).add_to(map_dem)
# We define the limits of the legend and fix its printout format
# We use branca module to create a colormap legend and then add legend to the map
min_dem = demandf["Annual_elec_demand"].min()
max_dem = demandf["Annual_elec_demand"].max()
min_dem = float("{0:.2f}".format(min_dem))
max_dem = float("{0:.2f}".format(max_dem))
legend = cm.LinearColormap(['#ADFF2F','#32CD32','#228B22','#008000','#006400','#000000'],
index=None, vmin=min_dem, vmax=max_dem)
legend.add_to(map_dem)
# Create a new directory where the map(s) can be saved
try:
os.makedirs('maps')
except FileExistsError:
pass
map_dem_output = 'maps/map_{}_{}_{}.html'.format("Moz", cropname, scenarioname)
map_dem.save(map_dem_output)
# Finally add the link that leads to the final map output
display(Markdown('<a href="{}" target="_blank">Click here to render the map of electricity demand</a>'.format(map_dem_output)))
```
### Exporting results
<div class="alert alert-block alert-warning">
<b>Note:</b> In this example the filtered results are saved in the sub-folder <b>agrodem_sample_output_data</b> located in the repo.
</div>
```
# Exports dataframe to csv in a defined path
#messagebox.showinfo('OnSSET', 'Browse to the folder where you want to save the outputs')
#path = filedialog.askdirectory()
agrodem_output_path = r"C:\Users\alekor\Desktop\GithubFolder\agrodem\agrodem_sample_output_data"
name = "Sample_Moz_Maize_2017_1km_Results"
demandf.to_csv(os.path.join(agrodem_output_path,"{c}.csv".format(c=name)))
```
Export full dataframe into a csv file (Uncomment to activate)
```
## This part prints full results
#
#path = 'Sample_results/'
#data.to_csv(os.path.join(path,"{c}.csv".format(c="Pilot_Moz_Maize_Sample_1km_Results")))
```
| github_jupyter |
# Head Pose Estimator (Gluon)
Tatsuya J. Arai @araitats
02/28/2018
## Introduction
In this notebook, the details of construction of convolutional neural network based head pose estimator are described.
## Overview
When there is a face photo presented in front of you, your human eyes can immediately recognize what direction the person in the photo is looking at (e.g. either facing straight up to the camera or looking at somewhere else). The direction is defined as the head pose. A convolutional neural network model is trained with thousands of such face photos and their corresponding head pose labels, the model will be able to estimate various head poses when a new batch of face images are presented. In this notebook, the head pose is categorized into nine classes (i.e. the combinations of looking down, straight, and up (tilt angles) and looking right, middle and left (pan angles)).
## Modules
```
import os
import sys
import numpy as np
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
import mxnet as mx
## Python version
print(sys.version)
```
2.7.12 (default, Nov 20 2017, 18:23:56) [GCC 5.4.0 20160609]
```
print(mx.__version__)
```
1.1.0
## Helper function
### Color shift in HSV space
``shiftHSV`` shifts colors in a randomly selected input image.
```
def shiftHSV(im, h_shift_lim=(-180, 180),
s_shift_lim=(-255, 255),
v_shift_lim=(-255, 255), drop_p=0.5):
if np.random.random() < drop_p:
im = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(im)
h_shift = np.random.uniform(h_shift_lim[0], h_shift_lim[1])
h = cv2.add(h, h_shift)
s_shift = np.random.uniform(s_shift_lim[0], s_shift_lim[1])
s = cv2.add(s, s_shift)
v_shift = np.random.uniform(v_shift_lim[0], v_shift_lim[1])
v = cv2.add(v, v_shift)
im = cv2.merge((h, s, v))
im = cv2.cvtColor(im, cv2.COLOR_HSV2BGR)
im = np.uint8(im)
im = np.float32(im)
return im
```
### Obtain a pre-trained ResNet Model from model zoo
```
from mxnet.gluon.model_zoo.vision import resnet50_v1
pretrained_net = resnet50_v1(pretrained=True, prefix = 'headpose_')
print(pretrained_net)
```
## Load preprocessed datasets
### Dataset
Original Data: http://www-prima.inrialpes.fr/perso/Gourier/Faces/HPDatabase.html
> N. Gourier, D. Hall, J. L. Crowley,
> Estimating Face Orientation from Robust Detection of Salient Facial Features,
> *Proceedings of Pointing 2004, ICPR, International Workshop on Visual Observation of Deictic Gestures*, Cambridge, UK
You have to preprocess the dataset using ``preprocessingDataset_py2.py``. This may take some time.
> python2 preprocessingDataset_py2.py --num-data-aug 15 --aspect-ratio 1
Preprocessed Data: (6.7 GB (Aspect Ratio, 1:1) or 5.0 GB (Aspect Ratio, 16:9))
```
import pickle
trn_im, test_im, trn_output, test_output = pickle.load(open( "HeadPoseData_trn_test_x15_py2.pkl", "rb" ))
print(trn_im.shape, test_im.shape)
print(trn_output.shape, test_output.shape)
```
## Classification of Head Pose
### Make mirror images (data augmentation)
Head-pose images were flipped along the horizontal axis and the signs of corresponding head-pose pan angles were changed accordingly. The mirror image procedure effectively doubles the size of training data.
```
trn_im_mirror = trn_im[:,:,:,::-1]
trn_output_mirror = np.zeros(trn_output.shape)
# Tilt
trn_output_mirror[:,0] = trn_output[:,0]
# Pan
trn_output_mirror[:,1] = trn_output[:,1] * -1
im_idx = 200
print(trn_output[im_idx,:], trn_output_mirror[im_idx,:])
trn_im = np.concatenate((trn_im, trn_im_mirror), axis = 0)
trn_output = np.concatenate((trn_output, trn_output_mirror), axis = 0)
print(trn_im.shape, trn_output.shape)
```
### From (normalized) angles to angle classes (Tilts and Pans)
xxx_output[:,0] and xxx_output[i0,1] contain normalized agnle data (from -90 degrees to 90 degrees -> from -1.0 to 1.0) in tilt and pan directions, respectively. This process is to convert the normalized angle into one of three angle classes in each direction.
```
n_grid = 3
angles_thrshld = [np.arcsin(float(a) * 2 / n_grid - 1)/np.pi * 180 / 90 for a in range(1,n_grid)]
print("Threshold Angles " + (', ').join(["{:.2f}".format(a * 90) for a in angles_thrshld]))
```
Head pose is classified into 9 categories (the combinations of 3 tilt and 3 pan classes). The head pose contained within +/- 19.5 degrees in tilt and pan angles is labeled as a center position (i.e. Grid Class of 4, Tilt Class of 1 and Pan Class of 1). The threshold angles, +/- 19.5 degrees split a semicircle (i.e. the distance between sin(-90 degrees) and sin(90 degrees)) into three equal arch lengths.
```
def angles2Cat(angles_thrshld, angl_input):
# angl_input: Normalized angle -90 - 90 -> -1.0 - 1.0
angles_cat_temp = angles_thrshld + [angl_input]
return np.argmin(np.multiply(sorted(angles_cat_temp)-angl_input,sorted(angles_cat_temp)-angl_input))
### Dataset ###
trn_tilt_cls = []
trn_pan_cls = []
for i0 in range(trn_output.shape[0]):
trn_tilt_cls += [angles2Cat(angles_thrshld, trn_output[i0,0])]
trn_pan_cls += [angles2Cat(angles_thrshld, trn_output[i0,1])]
test_tilt_cls = []
test_pan_cls = []
for i0 in range(test_output.shape[0]):
test_tilt_cls += [angles2Cat(angles_thrshld, test_output[i0,0])]
test_pan_cls += [angles2Cat(angles_thrshld, test_output[i0,1])]
np_trn_tilt_cls = np.asarray(trn_tilt_cls)
np_test_tilt_cls = np.asarray(test_tilt_cls)
np_trn_pan_cls = np.asarray(trn_pan_cls)
np_test_pan_cls = np.asarray(test_pan_cls)
```
### From angles classes to 9 head pose classes
```
np_trn_grid_cls = np_trn_pan_cls * n_grid + np_trn_tilt_cls
np_test_grid_cls = np_test_pan_cls * n_grid + np_test_tilt_cls
print(np_trn_grid_cls.shape, np_trn_grid_cls.shape)
```
### Shift colors in the training data (additional data augmentation)
The color shift procedure would simulate the changes in the lighting condition, skin tone, and background.
```
for i0 in range(trn_im.shape[0]):
im_temp = trn_im[i0,:,:,:]
im_temp = np.transpose(im_temp, (1,2,0)) * 255 #transposing and restoring the color
im_temp = shiftHSV(im_temp,
h_shift_lim=(-0.1, 0.1),
s_shift_lim=(-0.1, 0.1),
v_shift_lim=(-0.1, 0.1))
im_temp = np.transpose(im_temp, (2,0,1)) / 255 #transposing and restoring the color
trn_im[i0,:,:,:] = im_temp
im_idx = 390
n_panels = 4
for i in range(n_panels):
i0 = i + im_idx
plt.subplot(1,n_panels,i+1)
im_temp = trn_im[i0,:,:,:]
im_temp = np.transpose(im_temp, (1,2,0))
## BGR -> RGB
plt.imshow(im_temp[:,:,[2,1,0]])
plt.axis('off')
```
## Train the model
### Modify the ResNet 50 model from model zoo
The number of output classes is modified to 9 to match our data.
```
net = resnet50_v1(classes=9, prefix='headpose_')
net.collect_params().initialize()
net.features = pretrained_net.features
from mxnet import init
#net.output.initialize(init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2)) # MXNet 1.1.0
net.initialize(init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2)) # MXNet 0.12.1
print(net)
```
### Training Helper Functions
```
from mxnet import autograd
from mxnet import nd
# Accuracy Evaluation
def eval_acc(data_iter, net, ctx):
acc = mx.metric.Accuracy()
for i, (data, label) in enumerate(data_iter):
data = data.as_in_context(ctx)
label = label.as_in_context(ctx)
output = net(data)
pred = nd.argmax(output, axis=1)
acc.update(preds=pred, labels=label)
return acc.get()[1]
# Training Loop
def train_util(net, train_iter, validation_iter, loss_fn, trainer, ctx, epochs, batch_size):
metric = mx.metric.create(['acc'])
lst_val_acc = []
lst_trn_acc = []
best_accuracy = 0
for epoch in range(epochs):
for i, (data, label) in enumerate(train_iter):
# ensure context
data = data.as_in_context(ctx)
label = label.as_in_context(ctx)
with autograd.record():
output = net(data)
loss = loss_fn(output, label)
loss.backward()
trainer.step(data.shape[0])
train_acc = eval_acc(train_iter, net, ctx)
validation_acc = eval_acc(validation_iter, net, ctx)
lst_trn_acc += [train_acc]
lst_val_acc += [validation_acc]
### Save checkpoint weights
#
# We save the model in the symbolic format (i.e. XXX.json and XXX.params)
#
# Export .json and .params files
# chkpt-XX-symbol.json does not come with softmax layer at the end.
net.export('chkpt-{}'.format(epoch))
# Overwrite .json with the one with softmax
net_with_softmax = net(mx.sym.var('data'))
net_with_softmax = mx.sym.SoftmaxOutput(data=net_with_softmax, name="softmax")
net_with_softmax.save('chkpt-{}-symbol.json'.format(epoch))
print("Epoch %s | training_acc %s | val_acc %s " % (epoch, train_acc, validation_acc))
if validation_acc > best_accuracy:
# A network with the best validation accuracy is returned.
net_best = net
net_with_softmax_best = net_with_softmax
best_accuracy = validation_acc
return lst_trn_acc, lst_val_acc, net_best
```
### Fine-tune the model
We run the training.
```
def train(net, ctx, batch_size=64, epochs=10, learning_rate=0.0005):
# Making Gluon iterators
train_iter = mx.gluon.data.DataLoader(mx.gluon.data.ArrayDataset((trn_im.astype(np.float32)-0.5) *2, np_trn_grid_cls),
batch_size=batch_size, shuffle=True, last_batch='discard')
test_iter = mx.gluon.data.DataLoader(mx.gluon.data.ArrayDataset((test_im.astype(np.float32)-0.5) *2 , np_test_grid_cls),
batch_size=batch_size, shuffle=True, last_batch='discard')
net.collect_params().reset_ctx(ctx)
net.hybridize()
loss = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': learning_rate})
return train_util(net, train_iter, test_iter, loss, trainer, ctx, epochs, batch_size)
from mxnet import gluon
import mxnet as mx
ctx = mx.gpu()
lst_trn_acc, lst_val_acc, net_gluon = train(net, ctx, batch_size=64, epochs=5, learning_rate=0.0005)
```
### Save the model in the serial (Gluon) format
We can also save the model in the serial (Gluon) format.
```
'''
Save net (Gluon model) in the serial format -> Pass the weights to another serialized model.
'''
net_gluon.save_params('net_gluon.params')
net_gluon2 = resnet50_v1(classes=9, prefix='headpose_')
net_gluon2.load_params('net_gluon.params', ctx=mx.cpu())
```
### Learning Curve
The plot shows the changes in validation accuracy during the trining.
```
plt.plot(lst_val_acc)
```
## Take a peek at validation results
Let's load a model and make inference results of validation data.
### Make iterators for the modular model artifact from nparrays
```
batch_size = 300
trn_iter_grid = mx.io.NDArrayIter((trn_im.astype(np.float32) -0.5) * 2, np_trn_grid_cls, batch_size, shuffle=True)
test_iter_grid = mx.io.NDArrayIter((test_im.astype(np.float32)-0.5) * 2, np_test_grid_cls, batch_size)
```
### Load modular model
```
def load_model(s_fname, p_fname):
"""
Load model checkpoint from file.
:return: (arg_params, aux_params)
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
"""
symbol = mx.symbol.load(s_fname)
save_dict = mx.nd.load(p_fname)
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return symbol, arg_params, aux_params
model_symbol = 'chkpt-3-symbol.json'
model_params = 'chkpt-3-0000.params'
sym, arg_params, aux_params = load_model(model_symbol, model_params)
ctx = [mx.gpu(i) for i in range(4)] # USE GPU to look up validation results
net2 = mx.mod.Module(symbol=sym,context=ctx)
shape = {"data": (batch_size, trn_im.shape[1], trn_im.shape[2], trn_im.shape[3])}
mx.viz.plot_network(sym, shape=shape)
### You may notice that Gluon-ResNet-50 does not have a batch norm layer right after the input layer.
net2.bind(data_shapes=[trn_iter_grid.provide_data[0]], label_shapes=[trn_iter_grid.provide_label[0]])
net2.set_params(arg_params, aux_params)
```
### Infer validation data iterator
```
preds = net2.predict(test_iter_grid).asnumpy()
print(preds.shape)
### Head Pose Prediction (9 classes)
pred_cls = []
for idx in range(preds.shape[0]):
pred_cls += [int(preds[idx].argmax())]
### Tilt Prediction (3 classes)
pred_tilt = pred_cls % max(np_trn_tilt_cls + 1)
### Pan Prediction (3 classes)
pred_pan = pred_cls // max(np_trn_tilt_cls + 1)
```
## Confusion matrix
Confusion matrix is one of the useful ways to visualize the validation accuracy.
```
from sklearn.metrics import confusion_matrix
lst_angl_lbl = ['<< -19.5 dgrs', '-19.5 to 19.5 dgrs', '>> 19.5 dgrs']
lst_angl_lbl_tilt = ['<< -19.5 dgrs (Down)', '-19.5 to 19.5 dgrs', '>> 19.5 dgrs (Up)']
lst_angl_lbl_pan = ['<< -19.5 dgrs (Your Right)', '-19.5 to 19.5 dgrs', '>> 19.5 dgrs (Your Left)']
cm_grid = confusion_matrix(y_true=np_test_grid_cls, # True class for test-set.
y_pred=pred_cls) # Predicted class.
cm_tilt = confusion_matrix(y_true=np_test_tilt_cls, # True class for test-set.
y_pred=pred_tilt) # Predicted class.
cm_pan = confusion_matrix(y_true=np_test_pan_cls, # True class for test-set.
y_pred=pred_pan) # Predicted class.
```
### Confusion matrix helper function
```
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.3f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
```
## Head pose prediction in the 9 classes
### Confusion matrix
```
# Compute confusion matrix Head Pose
cnf_matrix = cm_grid
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=["Grid Class" + str(i) for i in range(max(np_trn_grid_cls + 1))],
title='Confusion matrix, without normalization')
plt.rc('figure', figsize=(10.0, 5.0))
```
### Normalized confusion matrix
```
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=["Grid Class" + str(i) for i in range(9)], normalize=True,
title='Normalized confusion matrix')
plt.rc('figure', figsize=(10.0, 5.0))
```
## Head pose prediction in Tilt (Pitch)
```
# Compute confusion matrix Tilt (Pitch)
cnf_matrix = cm_tilt
np.set_printoptions(precision=3)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=lst_angl_lbl,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=lst_angl_lbl, normalize=True,
title='Normalized confusion matrix')
plt.rc('figure', figsize=(5.0, 4.0))
```
## Head pose prediction in Pan (Yaw)
```
# Compute confusion matrix Pan (Yaw)
cnf_matrix = cm_pan
np.set_printoptions(precision=3)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=lst_angl_lbl,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=lst_angl_lbl, normalize=True,
title='Normalized confusion matrix')
plt.rc('figure', figsize=(5.0, 4.0))
```
## Prediction accuracy
```
i_test_count = 0
for idx in range(np_test_tilt_cls.shape[0]):
if abs(np_test_tilt_cls[idx] - pred_tilt[idx]) + abs(np_test_pan_cls[idx] - pred_pan[idx]) >= 1:
i_test_count += 1
print("The number of false predictions: {}".format(i_test_count))
print("Validation Accuracy: {:.3f}".format(1 - float(i_test_count)/np_test_tilt_cls.shape[0]))
```
## Inference
### Predict a head pose of an arbitrary image
```
dshape = [('data', (1,trn_im.shape[1], trn_im.shape[2], trn_im.shape[3]))]
ctx = mx.cpu() # USE CPU to predict...
net2 = mx.mod.Module(symbol=sym,context=ctx)
net2.bind(for_training=False, data_shapes=dshape)
net2.set_params(arg_params, aux_params)
```
### Load, crop, and resize a head image
```
###
im = cv2.imread("testIMs/" + "IMG_1247.JPG")
im_true = im.copy()
if trn_im.shape[2] == trn_im.shape[3]:
### Aspect Ratio 1:1
crop_uly = 62
crop_height = 360
crop_ulx = 100
crop_width = 360
im = im[crop_uly:crop_uly + crop_height, crop_ulx:crop_ulx + crop_width]
im_crop = im.copy()
plt.imshow(im_crop[:,:,::-1])
plt.show()
print(im.shape)
im = cv2.resize(im, (trn_im.shape[3], trn_im.shape[2]))
plt.imshow(im[:,:,::-1])
plt.show()
print(im.shape)
else:
### Aspect Ratio 16:9
crop_uly = 62
crop_height = 360
crop_ulx = 0
crop_width = 640
im = im[crop_uly:crop_uly + crop_height, crop_ulx:crop_ulx + crop_width]
im_crop = im.copy()
plt.imshow(im_crop[:,:,::-1])
plt.show()
print(im.shape)
im = cv2.resize(im, (trn_im.shape[3], trn_im.shape[2]))
plt.imshow(im[:,:,::-1])
plt.show()
print(im.shape)
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
im = (im[np.newaxis, :] / 255.0 - 0.5) * 2
print(np.min(im), np.max(im))
print(im.shape)
```
### Predict a head pose
```
from collections import namedtuple
Batch = namedtuple('Batch', ['data'])
net2.forward(Batch([mx.nd.array(im)]))
prob = net2.get_outputs()[0].asnumpy()
pred = prob.argmax()
print(prob)
print("Predicted Class: {}, {:.1f}%".format(pred, prob[0][pred] * 100))
```
### Display a head pose
```
n_grid_cls = 9
n_tilt_cls = 3
### Tilt Prediction
pred_tilt_pic = pred % n_tilt_cls
### Pan Prediction
pred_pan_pic = pred // n_tilt_cls
extent = 0, im_true.shape[1]-1, im_true.shape[0]-1, 0
Panel_Pred = np.zeros((n_tilt_cls, n_tilt_cls))
Panel_Pred[pred_tilt_pic, pred_pan_pic] = 1
Panel_Pred = np.fliplr(Panel_Pred)
Panel_Pred = np.flipud(Panel_Pred)
plt.imshow(im_true[:,:,[2,1,0]], extent=extent)
plt.imshow(Panel_Pred, cmap=plt.cm.Blues, alpha=.2, interpolation='nearest', extent=extent)
plt.axis('off')
arrw_mg = 100
arrw_x_rad = 1 * (prob[0][0] + prob[0][1] + prob[0][2] - prob[0][6] -prob[0][7] - prob[0][8]) * 90 * np.pi / 180.
arrw_y_rad = 1 * (prob[0][0] + prob[0][3] + prob[0][6] - prob[0][2] -prob[0][5] - prob[0][8]) * 90 * np.pi / 180.
plt.arrow(im_true.shape[1]//2, im_true.shape[0]//2,
np.sin(arrw_x_rad) * arrw_mg, np.sin(arrw_y_rad) * arrw_mg,
head_width=10, head_length=10, fc='b', ec='b')
plt.show()
```
## End
| github_jupyter |
<center>
<h4>Universidad Nacional de Córdoba - Diplomatura en Ciencia de Datos, Aprendizaje Automático y sus Aplicaciones</h4>
<h3> Análisis y Visualización de Datos </h3>
</center>
# Distribuciones de probabilidad
En esta notebook vamos a ver cómo computar distintas probabilidades y realizar distintos gráficos.
```
import io
import pandas
import seaborn
print(seaborn.__version__)
seaborn.set_style("darkgrid")
seaborn.set_context(context='talk', font_scale=1)
```
Nota: la primera versión de esta notebook está ejecutada con un conjunto de datos generado artificialmente
```
df = pandas.read_csv('../data/dataset-ayvd2020.csv')
df.columns = ['timestamp', 'gender', 'age', 'zodiac_sign', 'profession',
'junior_programmer_salary', 'senior_programmer_salary']
df
```
## Probabilidad
Antes de hablar de probabilidad condicional entre dos variables, tenemos que encontrar un método para calcular la probabilidad de cada evento por separado. En el teórico vimos que si cada una de nuestros eventos es independiente e idénticamente distribuido, es decir, que $P(\{a_i\})=1/k$, entonces la probabilidad de un conjunto $A \subset \Omega$ es la proporción de $A$.
$$P(\{a_i\})=1/k \implies P(A)=|A|/|\Omega|=|A|/k$$
Calculemos entonces la probabilidad del evento *gender=Mujer*. Nuestro $\Omega$ son todas las respuestas del dataset, cada $a_i$ es una variable que representa una respuesta, y el conjunto $A$ son las respuestas en la que la columna gender tiene el valor "Mujer".
```
p_mujer = len(df[df.gender == 'Mujer']) / len(df)
p_mujer
```
### Graficar la frecuencia y la probabilidad
Podemos comparar visualmente la frecuencia de distintos conjuntos de datos de manera muy fácil. Con esto también veremos el primer tipo de gráficos: el *gráfico de barras*.
Elegimos este tipo de gráfico porque nos permite representar cantidades numéricas, en este caso la frecuencia o la probabilidad, correspondientes a distintos valores categóricos, por ejemplo el género.
Este gráfico es tan útil, que Seaborn, la librería para visualizaciones que usaremos, trae un método que cuenta los distintos valores posibles por nosotros.
```
import matplotlib.pyplot as plt
plt.figure(figsize=(8, 6))
seaborn.countplot(x=df.gender, color='steelblue')
plt.show()
```
Para calcular la probabilidad en lugar de la frecuencia de todos las respuestas posbiles para cada columna de una forma más sencilla con la operación *value_counts* de pandas.
```
data = df['gender'].value_counts(normalize=True).reset_index()
data
plt.figure(figsize=(8, 6))
seaborn.barplot(data=data, x='index', y='gender', color='steelblue')
plt.xlabel("Gender") # Rename the labels on the side of the axis
plt.ylabel("Probability")
plt.show()
```
## Probabilidad condicional
Ahora podemos pasar a hablar de la probabilidad condicional. La definimos como
$$P(A|B) = \frac{P(A \cap B)}{P(B)}$$
Esto es equivalente a:
$$P(A|B) = \frac{|A \cap B|}{|B|}$$
## Ejercicio 1
Calcular la probabilidad de que una respuesta tenga *gender=Mujer* dado que sabemos que tiene *zodiac_sign=Aries*
```
count_mujer_and_aries = len(df[(df.gender == 'Mujer') & (df.zodiac_sign == 'Aries')])
p_aries = len(df[df.zodiac_sign == 'Aries'])
p_mujer_cond_aries = count_mujer_and_aries/p_aries
msg = 'P(A=mujer|B=aires) = {}'
print(msg.format(p_mujer_cond_aries))
```
## Independecia
Ahora, para saber si dos conjuntos son independientes, tenemos que comprobar si $P(A|B) = P(A)$ ó $P(A\cap B) = P(A)*P(B)$.
## Ejercicio 2
Calcular si los conjuntos de respuestas con *gender=Mujer* y *zodiac_sign=Aries* son independientes o no, utilizando ambos métodos.
```
msg = 'P(A=mujer|B=aires) = {} y P(A) = {}'
p_mujer = count_mujer_and_aries/len(df)
print(msg.format(p_mujer_cond_aries, p_mujer))
```
No se puede decir que son independientes en este Dataset
| github_jupyter |
```
import time
import sys, os
import pickle
import numpy as np
import scvelo as scv
import scanpy
import scipy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from veloproj import *
from veloproj.model import leastsq_pt
from IPython import display
from matplotlib import pyplot as plt
import anndata
parser = get_parser()
args = parser.parse_args(args=['--lr', '1e-5',
'--n-epochs', '20000',
'--g-rep-dim', '100',
'--k-dim', '100',
'--model-name', 'baseline_tmp.cpt',
'--exp-name', 'baseline_dentategyrus',
'--device', 'cuda:0',
'--gumbsoft_tau', '5',
'--nb_g_src', "X",
])
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
torch.backends.cudnn.deterministic = True
device = torch.device(args.device if args.device.startswith('cuda') and torch.cuda.is_available() else "cpu")
cluster_edges = [("OPC", "OL")]
k_cluster = "clusters"
EXP_NAME = args.exp_name
exp_metrics = {}
def main_AE(args, model, lr=args.lr, weight_decay=args.weight_decay, save_name="tmp.cpt"):
optimizer = optim.AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)
lr = args.lr
i, losses = 0, [sys.maxsize]
min_loss = losses[-1]
model_saved = False
model.train()
while i < args.n_epochs:
i += 1
loss = train_step_AE([tensor_s, tensor_u], model, optimizer, xyids=[0, 1], device=device)
losses.append(loss)
if i % args.log_interval == 0:
if losses[-1] < min_loss:
min_loss = losses[-1]
torch.save(model.state_dict(), os.path.join(args.output, save_name))
model_saved = True
else:
if model_saved:
model.load_state_dict(torch.load(os.path.join(args.output, save_name)))
model = model.to(device)
lr *= args.lr_decay
for param_group in optimizer.param_groups:
param_group['lr'] = lr
print("Train Epoch: {:2d}/{:2d} \tLoss: {:.6f}"
.format(i, args.n_epochs, losses[-1]))
plt.plot(losses[1:])
if losses[-1] < min_loss:
torch.save(model.state_dict(), os.path.join(args.output, save_name))
return model
adata = scv.datasets.dentategyrus()
scv.utils.show_proportions(adata)
scv.pp.filter_and_normalize(adata, min_shared_counts=30, n_top_genes=2000)
scv.pp.moments(adata, n_pcs=30, n_neighbors=30)
print(adata)
scv.tl.velocity(adata, vkey='stc_velocity', mode="stochastic")
scv.tl.velocity_graph(adata, vkey='stc_velocity')
scv.tl.velocity_confidence(adata, vkey='stc_velocity')
scv.pl.velocity_embedding_stream(adata, vkey="stc_velocity", basis='X_umap', color=k_cluster,
title='ScVelo Stochastic Mode')
exp_metrics["stc_mode"] = evaluate(adata, cluster_edges, k_cluster, "stc_velocity")
spliced = adata.layers['Ms']
unspliced = adata.layers['Mu']
tensor_s, tensor_u = torch.FloatTensor(spliced).to(device), torch.FloatTensor(unspliced).to(device)
tensor_x = torch.FloatTensor(adata.X.toarray()).to(device)
tensor_v = torch.FloatTensor(adata.layers['stc_velocity']).to(device)
def expBaseAE(adata, exp_metrics):
n_cells, n_genes = adata.X.shape
in_dim = n_genes
z_dim = args.z_dim
h_dim = args.h_dim
model = get_baseline_AE(in_dim, z_dim, h_dim).to(device)
model = main_AE(args, model, save_name=f"baseAE_{args.model_name}")
model.eval()
with torch.no_grad():
x = model.encoder(tensor_x)
s = model.encoder(tensor_s)
u = model.encoder(tensor_u)
v = estimate_ld_velocity(s, u, device=device).cpu().numpy()
x = x.cpu().numpy()
s = s.cpu().numpy()
u = u.cpu().numpy()
adata = new_adata(adata, x, s, u, v, g_basis=args.nb_g_src)
scv.tl.velocity_graph(adata, vkey='new_velocity')
scv.pl.velocity_embedding_stream(adata, vkey="new_velocity", basis='X_umap', color=['clusters', 'age(days)'],
title="Baseline AutoEncoder",
)
scv.tl.velocity_confidence(adata, vkey='new_velocity')
exp_metrics['Baseline AutoEncoder'] = evaluate(adata, cluster_edges, k_cluster, "new_velocity")
expBaseAE(adata, exp_metrics)
def expAblationCohAgg(adata, exp_metrics):
n_cells, n_genes = adata.X.shape
in_dim = n_genes
z_dim = args.z_dim
h_dim = args.h_dim
g_basis = args.nb_g_src
model = get_ablation_CohAgg(
adata,
in_dim,
z_dim,
h_dim,
g_basis,
device)
model = main_AE(args, model, save_name=f"CohAgg_{args.model_name}")
model.eval()
with torch.no_grad():
x = model.encoder(tensor_x)
s = model.encoder(tensor_s)
u = model.encoder(tensor_u)
v = estimate_ld_velocity(s, u, device=device).cpu().numpy()
x = x.cpu().numpy()
s = s.cpu().numpy()
u = u.cpu().numpy()
adata = new_adata(adata, x, s, u, v, g_basis=args.nb_g_src)
scv.tl.velocity_graph(adata, vkey='new_velocity')
scv.pl.velocity_embedding_stream(adata, vkey="new_velocity", basis='X_umap', color=['clusters', 'age(days)'],
title="Ablation with GCN Only",
)
scv.tl.velocity_confidence(adata, vkey='new_velocity')
exp_metrics['Ablation GCN Only'] = evaluate(adata, cluster_edges, k_cluster, "new_velocity")
expAblationCohAgg(adata, exp_metrics)
def expAblationAttComb(adata, exp_metrics):
from sklearn.decomposition import PCA
n_cells, n_genes = adata.X.shape
z_dim = args.z_dim
g_rep_dim = args.g_rep_dim
h_dim = args.h_dim
k_dim = args.k_dim
gb_tau = args.gumbsoft_tau
G_embeddings = PCA(n_components=g_rep_dim).fit_transform(adata.X.T.toarray())
model = get_ablation_attcomb(
z_dim,
n_genes,
n_cells,
h_dim,
k_dim,
G_embeddings,
g_rep_dim,
gb_tau,
device)
model = main_AE(args, model, save_name=f"AttComb_{args.model_name}")
model.eval()
with torch.no_grad():
x = model.encoder(tensor_x)
s = model.encoder(tensor_s)
u = model.encoder(tensor_u)
v = estimate_ld_velocity(s, u, device=device).cpu().numpy()
x = x.cpu().numpy()
s = s.cpu().numpy()
u = u.cpu().numpy()
adata = new_adata(adata, x, s, u, v, g_basis=args.nb_g_src)
scv.tl.velocity_graph(adata, vkey='new_velocity')
scv.pl.velocity_embedding_stream(adata, vkey="new_velocity", basis='X_umap', color=['clusters', 'age(days)'],
title="Ablation with Attentive Combination Only",
)
scv.tl.velocity_confidence(adata, vkey='new_velocity')
exp_metrics['Ablation AttComb Only'] = evaluate(adata, cluster_edges, k_cluster, "new_velocity")
expAblationAttComb(adata, exp_metrics)
def expPCA(adata, exp_metrics):
from sklearn.decomposition import PCA
pca = PCA(n_components=args.z_dim)
x, s, u, v = sklearn_decompose(pca,
tensor_x.detach().cpu().numpy(),
tensor_s.detach().cpu().numpy(),
tensor_u.detach().cpu().numpy(),
tensor_v.detach().cpu().numpy()
)
adata = new_adata(adata, x, s, u, v, g_basis=args.nb_g_src)
scv.tl.velocity_graph(adata, vkey='new_velocity')
scv.pl.velocity_embedding_stream(adata, vkey="new_velocity", basis='X_umap', color=['clusters', 'age(days)'],
title="Principle Component Analysis",
)
scv.tl.velocity_confidence(adata, vkey='new_velocity')
exp_metrics['Baseline PCA'] = evaluate(adata, cluster_edges, k_cluster, "new_velocity")
expPCA(adata, exp_metrics)
def expFA(adata, exp_metrics):
from sklearn.decomposition import FactorAnalysis
method = FactorAnalysis(n_components=args.z_dim)
x, s, u, v = sklearn_decompose(method,
tensor_x.detach().cpu().numpy(),
tensor_s.detach().cpu().numpy(),
tensor_u.detach().cpu().numpy(),
tensor_v.detach().cpu().numpy()
)
adata = new_adata(adata, x, s, u, v, g_basis=args.nb_g_src)
scv.tl.velocity_graph(adata, vkey='new_velocity')
scv.pl.velocity_embedding_stream(adata, vkey="new_velocity", basis='X_umap', color=['clusters', 'age(days)'],
title="Factor Analysis",
)
scv.tl.velocity_confidence(adata, vkey='new_velocity')
exp_metrics['Baseline FA'] = evaluate(adata, cluster_edges, k_cluster, "new_velocity")
expFA(adata, exp_metrics)
with open("{}.pkl".format(EXP_NAME), 'wb') as out_file:
pickle.dump(exp_metrics, out_file)
```
| github_jupyter |
# Data Exploration
This lab is *optional*. It demonstrates advanced Pandas usage and in-depth data analysis.
---
Learning objectives:
1. Learn useful patterns for exploring data before modeling
2. Gain an understanding of the dataset and identify any data issues.
The goal of this notebook is to explore our base tables before we began feature engineering and modeling. We will explore the price history of stock in the S&P 500.
* Price history : Price history of stocks
* S&P 500 : A list of all companies and symbols for companies in the S&P 500
For our analysis, let's limit price history since 2000. In general, the further back historical data is used the lower it's predictive power can be.
```
import os
PROJECT = !(gcloud config get-value core/project)
PROJECT = PROJECT[0]
os.environ["PROJECT"] = PROJECT
os.environ["BUCKET"] = BUCKET
import numpy as np
import pandas as pd
import seaborn as sns
from google.cloud import bigquery
from IPython import get_ipython
from IPython.core.magic import register_cell_magic
from matplotlib import pyplot as plt
bq = bigquery.Client(project=PROJECT)
# Allow you to easily have Python variables in SQL query.
@register_cell_magic("with_globals")
def with_globals(line, cell):
contents = cell.format(**globals())
if "print" in line:
print(contents)
get_ipython().run_cell(contents)
```
## Preparing the dataset
Let's create the dataset in our project BiqQuery and import the stock data by running the following cells:
```
!bq mk stock_src
%%bash
TABLE=price_history
SCHEMA=symbol:STRING,Date:DATE,Open:FLOAT,Close:FLOAT
test -f $TABLE.csv || unzip ../stock_src/$TABLE.csv.zip
gsutil -m cp $TABLE.csv gs://$BUCKET/stock_src/$TABLE.csv
bq load --source_format=CSV --skip_leading_rows=1 \
stock_src.$TABLE gs://$BUCKET/stock_src/$TABLE.csv $SCHEMA
%%bash
TABLE=eps
SCHEMA=date:DATE,company:STRING,symbol:STRING,surprise:STRING,reported_EPS:FLOAT,consensus_EPS:FLOAT
test -f $TABLE.csv || unzip ../stock_src/$TABLE.csv.zip
gsutil -m cp $TABLE.csv gs://$BUCKET/stock_src/$TABLE.csv
bq load --source_format=CSV --skip_leading_rows=1 \
stock_src.$TABLE gs://$BUCKET/stock_src/$TABLE.csv $SCHEMA
%%bash
TABLE=snp500
SCHEMA=company:STRING,symbol:STRING,industry:STRING
test -f $TABLE.csv || unzip ../stock_src/$TABLE.csv.zip
gsutil -m cp $TABLE.csv gs://$BUCKET/stock_src/$TABLE.csv
bq load --source_format=CSV --skip_leading_rows=1 \
stock_src.$TABLE gs://$BUCKET/stock_src/$TABLE.csv $SCHEMA
```
Let's look at the tables and columns we have for analysis. Please query the `INFORMATION_SCHEMA`.
**Learning objective 1.**
```
%%with_globals
%%bigquery --project {PROJECT}
SELECT table_name, column_name, data_type
FROM `stock_src.INFORMATION_SCHEMA.COLUMNS`
ORDER BY table_name, ordinal_position
```
## Price History
**TODO**: Visualize stock symbols from the dataset.
```
%%with_globals
%%bigquery --project {PROJECT}
SELECT *
FROM `stock_src.price_history`
LIMIT 10
def query_stock(symbol):
return bq.query(
"""
# TODO: query a specific stock
""".format(
symbol
)
).to_dataframe()
df_stock = query_stock("GOOG")
df_stock.Date = pd.to_datetime(df_stock.Date)
ax = df_stock.plot(x="Date", y="Close", title="price")
# Add smoothed plot.
df_stock["Close_smoothed"] = df_stock.Close.rolling(100, center=True).mean()
df_stock.plot(x="Date", y="Close_smoothed", ax=ax);
```
**TODO 2**: Compare individual stocks to the S&P 500.
```
SP500_SYMBOL = gspc
df_sp = query_stock(SP500_SYMBOL)
# TODO: visualize S&P 500 price
```
Let's see how the price of stocks change over time on a yearly basis. Using the `LAG` function we can compute the change in stock price year-over-year.
Let's compute average close difference for each year. This line could, of course, be done in Pandas. Often times it's useful to use some combination of BigQuery and Pandas for exploration analysis. In general, it's most effective to let BigQuery do the heavy-duty processing and then use Pandas for smaller data and visualization.
**Learning objective 1, 2**
```
%%with_globals
%%bigquery df --project {PROJECT}
WITH
with_year AS
(
SELECT symbol,
EXTRACT(YEAR FROM date) AS year,
close
FROM `stock_src.price_history`
WHERE symbol in (SELECT symbol FROM `stock_src.snp500`)
),
year_aggregated AS
(
SELECT year, symbol, AVG(close) as avg_close
FROM with_year
WHERE year >= 2000
GROUP BY year, symbol
)
SELECT year, symbol, avg_close as close,
(LAG(
--# TODO: compute a year lag on avg_close
))
AS next_yr_close
FROM year_aggregated
ORDER BY symbol, year
```
Compute the year-over-year percentage increase.
```
df.dropna(inplace=True)
df["percent_increase"] = (df.next_yr_close - df.close) / df.close
```
Let's visualize some yearly stock
```
def get_random_stocks(n=5):
random_stocks = df.symbol.sample(n=n, random_state=3)
rand = df.merge(random_stocks)
return rand[["year", "symbol", "percent_increase"]]
rand = get_random_stocks()
for symbol, _df in rand.groupby("symbol"):
plt.figure()
sns.barplot(x="year", y="percent_increase", data=_df)
plt.title(symbol)
```
There have been some major fluctations in individual stocks. For example, there were major drops during the early 2000's for tech companies.
```
df.sort_values("percent_increase").head()
stock_symbol = "YHOO"
%%with_globals
%%bigquery df --project {PROJECT}
SELECT date, close
FROM `stock_src.price_history`
WHERE symbol='{stock_symbol}'
ORDER BY date
ax = df.plot(x="date", y="close")
```
**Stock splits** can also impact our data - causing a stock price to rapidly drop. In practice, we would need to clean all of our stock data to account for this. This would be a major effort! Fortunately, in the case of [IBM](https://www.fool.com/investing/2017/01/06/ibm-stock-split-will-2017-finally-be-the-year-shar.aspx), for example, all stock splits occurred before the year 2000.
**Learning objective 2**
**TODO**: Query the IBM stock history and to visualize how the stock splits affect our data. A stock split occurs when there is a sudden drop in price.
```
stock_symbol = "IBM"
%%with_globals
%%bigquery df --project {PROJECT}
SELECT date, close
FROM `stock_src.price_history`
WHERE symbol='{stock_symbol}'
ORDER BY date
# TODO: can you visualize when the major stock splits occured?
```
## S&P companies list
```
%%with_globals
%%bigquery df --project {PROJECT}
SELECT *
FROM `stock_src.snp500`
df.industry.value_counts().plot(kind="barh");
```
We can join the price histories table with the S&P 500 table to compare industries:
**Learning objective 1,2**
```
%%with_globals
%%bigquery df --project {PROJECT}
WITH sp_prices AS
(
SELECT a.*, b.industry
FROM `stock_src.price_history` a
JOIN `stock_src.snp500` b
USING (symbol)
WHERE date >= "2000-01-01"
)
SELECT Date, industry, AVG(close) as close
FROM sp_prices
GROUP BY Date, industry
ORDER BY industry, Date
df.head()
```
Using pandas we can "unstack" our table so that each industry has it's own column. This will be useful for plotting.
```
# Pandas `unstack` to make each industry a column. Useful for plotting.
df_ind = df.set_index(["industry", "Date"]).unstack(0).dropna()
df_ind.columns = [c[1] for c in df_ind.columns]
df_ind.head()
ax = df_ind.plot(figsize=(16, 8))
# Move legend down.
ax.legend(loc="upper center", bbox_to_anchor=(0.5, -0.05), shadow=True, ncol=2)
```
Let's scale each industry using min/max scaling. This will put all of the stocks on the same scale. Currently it can be hard to see the changes in stocks over time across industries.
**Learning objective 1**
```
def min_max_scale(df):
return (df - df.min()) / df.max()
scaled = min_max_scale(df_ind)
ax = scaled.plot(figsize=(16, 8))
ax.legend(loc="upper center", bbox_to_anchor=(0.5, -0.05), shadow=True, ncol=2);
```
We can also create a smoothed version of the plot above using a [rolling mean](https://en.wikipedia.org/wiki/Moving_average). This is a useful transformation to make when visualizing time-series data.
```
SMOOTHING_WINDOW = 30 # Days.
rolling = scaled.copy()
for col in scaled.columns:
rolling[col] = scaled[col].rolling(SMOOTHING_WINDOW).mean()
ax = rolling.plot(figsize=(16, 8))
ax.legend(loc="upper center", bbox_to_anchor=(0.5, -0.05), shadow=True, ncol=2);
```
Information technology had a large crash during the early 2000s and again in 2008/2009; along with all other stocks. After 2008, some industries were a bit slower to recover than other industries.
BONUS: In the next lab, we will want to predict the price of the stock in the future. What are some features that we can use to predict future price? Try visualizing some of these features.
Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| github_jupyter |
```
from IPython.display import Image
Image('../../Python_probability_statistics_machine_learning_2E.png',width=200)
```
It is sometimes very difficult to unequivocally attribute outcomes to causal
factors. For example, did your experiment generate the outcome you were hoping
for or not? Maybe something did happen, but the effect is not pronounced
enough
to separate it from inescapable measurement errors or other
factors in the
ambient environment? Hypothesis testing is a powerful
statistical method to
address these questions. Let's begin by again
considering our coin-tossing
experiment with unknown parameter $p$. Recall
that the individual coin-flips
are Bernoulli distributed. The first step is
to establish separate hypotheses.
First, $H_0$ is the so-called null
hypothesis. In our case this can be
$$
H_0 \colon \theta < \frac{1}{2}
$$
and the alternative hypothesis is then
$$
H_1 \colon \theta \geq \frac{1}{2}
$$
With this set up, the question now boils down to figuring out which
hypothesis
the data is most consistent with. To choose between these, we need
a
statistical test that is a function, $G$, of the sample set
$\mathbf{X}_n=\left\{ X_i \right\}_n $ into the real line, where $X_i$ is the
heads or tails outcome ($X_i \in \lbrace 0,1 \rbrace$). In other words, we
compute $G(\mathbf{X}_n)$ and check if it exceeds a threshold $c$. If not, then
we declare $H_0$ (otherwise, declare $H_1$). Notationally, this is the
following:
$$
\begin{align*}
G(\mathbf{X}_n) < c & \Rightarrow H_0 \\\
G(\mathbf{X}_n)
\geq c & \Rightarrow H_1
\end{align*}
$$
In summary, we have the observed data $\mathbf{X}_n$ and a function
$G$ that
maps that data onto the real line. Then, using the
constant $c$ as a threshold,
the inequality effectively divides the real line
into two parts, one
corresponding to each of the hypotheses.
Whatever this test $G$ is, it will
make mistakes of two types --- false
negatives and false positives. The false
positives arise from the case where we
declare $H_0$ when the test says we
should declare $H_1$. This is
summarized in the Table [1](#tbl:decision).
<!-- Equation labels as ordinary links -->
<div id="tbl:decision"></div>
$$
\begin{table}
\footnotesize
\centering
\begin{tabular}{l|p{1.3in}|p{1.3in}}
\multicolumn{1}{c}{ } & \multicolumn{1}{c}{Declare $H_0$ } & \multicolumn{1}{c}{
Declare $H_1$ } \\
\hline
$H_0\:$ True & Correct & False
positive (Type I error) \\
\hline
$H_1\:$ True & False negative (Type II error)
& Correct (true-detect) \\
\hline
\end{tabular}
\caption{Truth table for
hypotheses testing.}
\label{tbl:decision} \tag{1}
\end{table}
$$
For this example, here are the false positives (aka false alarms):
$$
P_{FA} = \mathbb{P}\left( G(\mathbf{X}_n) > c \mid \theta \leq \frac{1}{2}
\right)
$$
Or, equivalently,
$$
P_{FA} = \mathbb{P}\left( G(\mathbf{X}_n) > c \mid H_0 \right)
$$
Likewise, the other error is a false negative, which we can write
analogously
as
$$
P_{FN} = \mathbb{P}\left( G(\mathbf{X}_n) < c \vert H_1\right)
$$
By choosing some acceptable values for either of these errors,
we can solve for
the other one. The practice is usually to pick a value of
$P_{FA}$ and then
find the corresponding value of $P_{FN}$. Note that it is
traditional in
engineering to speak about *detection probability*, which is
defined as
$$
P_{D} = 1- P_{FN} = \mathbb{P}\left( G(\mathbf{X}_n) > c \mid H_1\right)
$$
In other words, this is the probability of declaring $H_1$ when the
test
exceeds the threshold. This is otherwise known as the *probability of a
true
detection* or *true-detect*.
## Back to the Coin Flipping Example
In our
previous maximum likelihood discussion, we wanted to derive an
estimator for the
*value* of the probability of heads for the coin
flipping experiment. For
hypthesis testing, we want to ask a softer
question: is the probability of heads
greater or less than $\nicefrac{1}{2}$? As we
just established, this leads to
the two hypotheses:
$$
H_0 \colon \theta < \frac{1}{2}
$$
versus,
$$
H_1 \colon \theta > \frac{1}{2}
$$
Let's assume we have five observations. Now we need the $G$ function
and a
threshold $c$ to help pick between the two hypotheses. Let's count the
number of
heads observed in five observations as our
criterion. Thus, we have
$$
G(\mathbf{X}_5) := \sum_{i=1}^5 X_i
$$
and, suppose further that we pick $H_1$ only if exactly five out of
five
observations are heads. We'll call this the *all-heads* test.
Now, because all
of the $X_i$ are random variables, so is $G$ and we must
find the corresponding
probability mass function for $G$. Assuming the
individual coin tosses are
independent, the probability of five heads is $\theta^5$.
This means that the
probability of rejecting the $H_0$ hypothesis (and choosing
$H_1$, because there
are only two choices here) based on the unknown underlying
probability is
$\theta^5$. In the parlance, this is known and the *power function*
as in
denoted by $\beta$ as in
$$
\beta(\theta) = \theta^5
$$
Let's get a quick plot this in [Figure](#fig:Hypothesis_testing_001).
<!--
@@@CODE src-statistics/Hypothesis_Testing.py fromto: import numpy as
np@plt.savefig -->
```
%matplotlib inline
from matplotlib.pylab import subplots
import numpy as np
fig,ax=subplots()
fig.set_size_inches((6,3))
xi = np.linspace(0,1,50)
_=ax.plot(xi, (xi)**5,'-k',label='all heads')
_=ax.set_xlabel(r'$\theta$',fontsize=22)
_=ax.plot(0.5,(0.5)**5,'ko')
fig.tight_layout()
fig.savefig('fig-statistics/Hypothesis_Testing_001.png')
```
<!-- dom:FIGURE: [fig-statistics/Hypothesis_Testing_001.png, width=500
frac=0.85] Power function for the all-heads test. The dark circle indicates the
value of the function indicating $\alpha$. <div
id="fig:Hypothesis_testing_001"></div> -->
<!-- begin figure -->
<div
id="fig:Hypothesis_testing_001"></div>
<p>Power function for the all-heads
test. The dark circle indicates the value of the function indicating
$\alpha$.</p>
<img src="fig-statistics/Hypothesis_Testing_001.png" width=500>
<!-- end figure -->
Now, we have the following false alarm probability,
$$
P_{FA} = \mathbb{P}( G(\mathbf{X}_n)= 5 \vert H_0) =\mathbb{P}( \theta^5
\vert H_0)
$$
Notice that this is a function of $\theta$, which means there are
many false
alarm probability values that correspond to this test. To be on the
conservative
side, we'll pick the supremum (i.e., maximum) of this function,
which is known
as the *size* of the test, traditionally denoted by $\alpha$,
$$
\alpha = \sup_{\theta \in \Theta_0} \beta(\theta)
$$
with domain $\Theta_0 = \lbrace \theta < 1/2 \rbrace$ which in our case is
$$
\alpha = \sup_{\theta < \frac{1}{2}} \theta^5 = \left(\frac{1}{2}\right)^5 =
0.03125
$$
Likewise, for the detection probability,
$$
\mathbb{P}_{D}(\theta) = \mathbb{P}( \theta^5 \vert H_1)
$$
which is again a function of the parameter $\theta$. The problem with
this test
is that the $P_{D}$ is pretty low for most of the domain of
$\theta$. For
instance, values in the nineties for $P_{D}$
only happen when $\theta > 0.98$.
In other words, if the coin produces
heads 98 times out of 100, then we can
detect $H_1$ reliably. Ideally, we want
a test that is zero for the domain
corresponding to $H_0$ (i.e., $\Theta_0$) and
equal to one otherwise.
Unfortunately, even if we increase the length of the
observed sequence, we
cannot escape this effect with this test. You can try
plotting $\theta^n$ for
larger and larger values of $n$ to see this.
### Majority Vote Test
Due to the
problems with the detection probability in the all-heads test, maybe
we can
think of another test that will have the performance we want? Suppose we
reject
$H_0$ if the majority of the observations are heads. Then, using the
same
reasoning as above, we have
$$
\beta(\theta) = \sum_{k=3}^5 \binom{5}{k} \theta^k(1-\theta)^{5-k}
$$
[Figure](#fig:Hypothesis_testing_002) shows the power function
for both the
majority vote and the all-heads tests.
```
fig,ax=subplots()
fig.set_size_inches((6,3))
from sympy.abc import theta,k # get some variable symbols
import sympy as S
xi = np.linspace(0,1,50)
expr=S.Sum(S.binomial(5,k)*theta**(k)*(1-theta)**(5-k),(k,3,5)).doit()
_=ax.plot(xi, (xi)**5,'-k',label='all heads')
_=ax.plot(xi, S.lambdify(theta,expr)(xi),'--k',label='majority vote')
_=ax.plot(0.5, (0.5)**5,'ko')
_=ax.plot(0.5, S.lambdify(theta,expr)(0.5),'ko')
_=ax.set_xlabel(r'$\theta$',fontsize=22)
_=ax.legend(loc=0)
fig.tight_layout()
fig.savefig('fig-statistics/Hypothesis_Testing_002.png')
```
<!-- dom:FIGURE: [fig-statistics/Hypothesis_Testing_002.png, width=500
frac=0.85] Compares the power function for the all-heads test with that of the
majority-vote test. <div id="fig:Hypothesis_testing_002"></div> -->
<!-- begin
figure -->
<div id="fig:Hypothesis_testing_002"></div>
<p>Compares the power
function for the all-heads test with that of the majority-vote test.</p>
<img
src="fig-statistics/Hypothesis_Testing_002.png" width=500>
<!-- end figure -->
In this case, the new test has *size*
$$
\alpha = \sup_{\theta < \frac{1}{2}} \theta^{5} + 5 \theta^{4} \left(- \theta
+ 1\right) + 10 \theta^{3} \left(- \theta + 1\right)^{2} = \frac{1}{2}
$$
As before we only get to upwards of 90% for detection
probability only when the
underlying parameter $\theta > 0.75$.
Let's see what happens when we consider
more than five samples. For
example, let's suppose that we have $n=100$ samples
and we want to
vary the threshold for the majority vote test. For example, let's
have
a new test where we declare $H_1$ when $k=60$ out of the 100 trials
turns
out to be heads. What is the $\beta$ function in this case?
$$
\beta(\theta) = \sum_{k=60}^{100} \binom{100}{k} \theta^k(1-\theta)^{100-k}
$$
This is too complicated to write by hand, but the statistics module
in Sympy
has all the tools we need to compute this.
```
from sympy.stats import P, Binomial
theta = S.symbols('theta',real=True)
X = Binomial('x',100,theta)
beta_function = P(X>60)
print (beta_function.subs(theta,0.5)) # alpha
print (beta_function.subs(theta,0.70))
```
These results are much better than before because the $\beta$
function is much
steeper. If we declare $H_1$ when we observe 60 out of 100
trials are heads,
then we wrongly declare heads approximately 1.8% of the
time. Otherwise, if it
happens that the true value for $p>0.7$, we will
conclude correctly
approximately 97% of the time. A quick simulation can sanity
check these results
as shown below:
```
from scipy import stats
rv=stats.bernoulli(0.5) # true p = 0.5
# number of false alarms ~ 0.018
print (sum(rv.rvs((1000,100)).sum(axis=1)>60)/1000.)
```
The above code is pretty dense so let's unpack it. In the first line, we use
the `scipy.stats` module to define the
Bernoulli random variable for the coin
flip. Then, we use the `rvs` method of
the variable to generate 1000 trials of
the experiment where each trial
consists of 100 coin flips. This generates a
$1000 \times 100$ matrix where the
rows are the individual trials and the
columns are the outcomes of each
respective set of 100 coin flips. The
`sum(axis=1)` part computes the sum across the
columns. Because the values of
the embedded matrix are only `1` or `0` this
gives us the count of flips that
are heads per row. The next `>60` part
computes the boolean 1000-long vector of
values that are bigger than 60. The
final `sum` adds these up. Again, because
the entries in the array are `True`
or `False` the `sum` computes the count of
times the number of heads has
exceeded 60 per 100 coin flips in each of 1000
trials. Then, dividing this
number by 1000 gives a quick approximation of false
alarm probability we
computed above for this case where the true value of
$p=0.5$.
## Receiver Operating Characteristic
Because the majority vote test
is a binary test, we can compute the *Receiver
Operating Characteristic* (ROC)
which is the graph of the $(P_{FA},
P_D)$. The term comes from radar systems but
is a very general method for
consolidating all of these issues into a single
graph. Let's consider a typical
signal processing example with two hypotheses.
In $H_0$, there is noise but no
signal present at the receiver,
$$
H_0 \colon X = \epsilon
$$
where $\epsilon \sim \mathcal{N}(0,\sigma^2)$ represents additive
noise. In the
alternative hypothesis, there is a deterministic signal at the receiver,
$$
H_1 \colon X = \mu + \epsilon
$$
Again, the problem is to choose between these two hypotheses. For
$H_0$, we
have $X \sim \mathcal{N}(0,\sigma^2)$ and for $H_1$, we have $ X \sim
\mathcal{N}(\mu,\sigma^2)$. Recall that we only observe values for $x$ and
must
pick either $H_0$ or $H_1$ from these observations. Thus, we need a
threshold,
$c$, to compare $x$ against in order to distinguish the two
hypotheses.
[Figure](#fig:Hypothesis_testing_003) shows the probability density
functions
under each of the hypotheses. The dark vertical line is the threshold
$c$. The
gray shaded area is the probability of detection, $P_D$ and the shaded
area is
the probability of false alarm, $P_{FA}$. The test evaluates every
observation
of $x$ and concludes $H_0$ if $x<c$ and $H_1$ otherwise.
<!-- dom:FIGURE: [fig-
statistics/Hypothesis_Testing_003.png, width=500 frac=0.85] The two density
functions for the $H_0$ and $H_1$ hypotheses. The shaded gray area is the
detection probability and the shaded dark gray area is the probability of false
alarm. The vertical line is the decision threshold. <div
id="fig:Hypothesis_testing_003"></div> -->
<!-- begin figure -->
<div
id="fig:Hypothesis_testing_003"></div>
<p>The two density functions for the
$H_0$ and $H_1$ hypotheses. The shaded gray area is the detection probability
and the shaded dark gray area is the probability of false alarm. The vertical
line is the decision threshold.</p>
<img src="fig-
statistics/Hypothesis_Testing_003.png" width=500>
<!-- end figure -->
**Programming Tip.**
The shading shown in [Figure](#fig:Hypothesis_testing_003)
comes from
Matplotlib's `fill_between` function. This function has a `where`
keyword
argument to specify which part of the plot to apply shading with
specified
`color` keyword argument. Note there is also a `fill_betweenx`
function that
fills horizontally. The `text` function can place formatted
text
anywhere in the plot and can utilize basic \LaTeX{} formatting.
As we slide
the threshold left and right along the horizontal axis, we naturally change the
corresponding areas under
each of the curves shown in
[Figure](#fig:Hypothesis_testing_003) and thereby
change the values of $P_D$ and
$P_{FA}$. The contour that emerges from sweeping
the threshold this way is the
ROC as shown in [Figure](#fig:Hypothesis_testing_004). This figure also shows
the diagonal line which
corresponds to making decisions based on the flip of a
fair coin. Any
meaningful test must do better than coin flipping so the more the
ROC bows up
to the top left corner of the graph, the better. Sometimes ROCs are
quantified
into a single number called the *area under the curve* (AUC), which
varies from
0.5 to 1.0 as shown. In our example, what separates the two
probability density
functions is the value of $\mu$. In a real situation, this
would be determined
by signal processing methods that include many complicated
trade-offs. The key
idea is that whatever those trade-offs are, the test itself
boils down to the
separation between these two density functions --- good tests
separate the two
density functions and bad tests do not. Indeed, when there is
no separation, we
arrive at the diagonal-line coin-flipping situation we just
discussed.
What values for $P_D$ and $P_{FA}$ are considered *acceptable*
depends on the
application. For example, suppose you are testing for a fatal
disease. It could
be that you are willing to except a relatively high $P_{FA}$
value if that
corresponds to a good $P_D$ because the test is relatively cheap
to administer
compared to the alternative of missing a detection. On the other
hand,
may be a false alarm triggers an expensive response, so that minimizing
these alarms is more important than potentially missing a detection. These
trade-offs can only be determined by the application and design factors.
<!--
dom:FIGURE: [fig-statistics/Hypothesis_Testing_004.png, width=500 frac=0.65] The
Receiver Operating Characteristic (ROC) corresponding to
[Figure](#fig:Hypothesis_testing_003). <div
id="fig:Hypothesis_testing_004"></div> -->
<!-- begin figure -->
<div
id="fig:Hypothesis_testing_004"></div>
<p>The Receiver Operating Characteristic
(ROC) corresponding to [Figure](#fig:Hypothesis_testing_003).</p>
<img src="fig-
statistics/Hypothesis_Testing_004.png" width=500>
<!-- end figure -->
##
P-Values
There are a lot of moving parts in hypothesis testing. What we need
is
a way to consolidate the findings. The idea is that we want to find
the minimum
level at which the test rejects $H_0$. Thus, the p-value
is the probability,
under $H_0$, that the test-statistic is at least
as extreme as what was actually
observed. Informally, this means
that smaller values imply that $H_0$ should be
rejected, although
this doesn't mean that large values imply that $H_0$ should
be
retained. This is because a large p-value can arise from either $H_0$
being
true or the test having low statistical power.
If $H_0$ is true, the p-value is
uniformly distributed in the interval $(0,1)$.
If $H_1$ is true, the
distribution of the p-value will concentrate closer to
zero. For continuous
distributions, this can be proven rigorously and implies
that if we reject $H_0$
when the corresponding p-value is less than $\alpha$,
then the probability of a
false alarm is $\alpha$. Perhaps it helps to
formalize this a bit before
computing it. Suppose $\tau(X)$ is a test
statistic that rejects $H_0$ as it
gets bigger. Then, for each sample $x$,
corresponding to the data we actually
have on-hand, we define
$$
p(x) = \sup_{\theta \in \Theta_0} \mathbb{P}_{\theta}(\tau(X) > \tau(x))
$$
This equation states that the supremum (i.e., maximum)
probability that the
test statistic, $\tau(X)$, exceeds the value for
the test statistic on this
particular data ($\tau(x)$) over the
domain $\Theta_0$ is defined as the
p-value. Thus, this embodies a
worst-case scenario over all values of $\theta$.
Here's one way to think about this. Suppose you rejected $H_0$, and someone
says
that you just got *lucky* and somehow just drew data that happened to
correspond
to a rejection of $H_0$. What p-values provide is a way to address
this by
capturing the odds of just a favorable data-draw. Thus, suppose that
your
p-value is 0.05. Then, what you are showing is that the odds of just
drawing
that data sample, given $H_0$ is in force, is just 5%. This means that
there's a
5% chance that you somehow lucked out and got a favorable draw of
data.
Let's
make this concrete with an example. Given, the majority-vote rule above,
suppose
we actually do observe three of five heads. Given the $H_0$, the
probability of
observing this event is the following:
$$
p(x) =\sup_{\theta \in \Theta_0} \sum_{k=3}^5\binom{5}{k}
\theta^k(1-\theta)^{5-k} = \frac{1}{2}
$$
For the all-heads test, the corresponding computation is the following:
$$
p(x) =\sup_{\theta \in \Theta_0} \theta^5 = \frac{1}{2^5} = 0.03125
$$
From just looking at these p-values, you might get the feeling that the second
test is better, but we still have the same detection probability issues we
discussed above; so, p-values help in summarizing some aspects of our
hypothesis
testing, but they do *not* summarize all the salient aspects of the
*entire*
situation.
## Test Statistics
As we have seen, it is difficult to derive good
test statistics for hypothesis
testing without a systematic process. The
Neyman-Pearson Test is derived from
fixing a false-alarm value ($\alpha$) and
then maximizing the detection
probability. This results in the Neyman-Pearson
Test,
$$
L(\mathbf{x}) = \frac{f_{X|H_1}(\mathbf{x})}{f_{X|H_0}(\mathbf{x})}
\stackrel[H_0]{H_1}{\gtrless} \gamma
$$
where $L$ is the likelihood ratio and where the threshold
$\gamma$ is chosen
such that
$$
\int_{x:L(\mathbf{x})>\gamma} f_{X|H_0}(\mathbf{x}) d\mathbf{x}=\alpha
$$
The Neyman-Pearson Test is one of a family of tests that use
the likelihood
ratio.
**Example.** Suppose we have a receiver and we want to distinguish
whether just noise ($H_0$) or signal pluse noise ($H_1$) is received.
For the
noise-only case, we have $x\sim \mathcal{N}(0,1)$ and for the
signal pluse
noise case we have $x\sim \mathcal{N}(1,1)$. In other
words, the mean of the
distribution shifts in the presence of the
signal. This is a very common problem
in signal processing and
communications. The Neyman-Pearson Test then boils down
to the
following,
$$
L(x)= e^{-\frac{1}{2}+x}\stackrel[H_0]{H_1}{\gtrless}\gamma
$$
Now we have to find the threshold $\gamma$ that solves the
maximization problem
that characterizes the Neyman-Pearson Test. Taking
the natural logarithm and
re-arranging gives,
$$
x\stackrel[H_0]{H_1}{\gtrless} \frac{1}{2}+\log\gamma
$$
The next step is find $\gamma$ corresponding to the desired
$\alpha$ by
computing it from the following,
$$
\int_{1/2+\log\gamma}^{\infty} f_{X|H_0}(x)dx = \alpha
$$
For example, taking $\alpha=1/100$, gives
$\gamma\approx 6.21$. To summarize
the test in this case, we have,
$$
x\stackrel[H_0]{H_1}{\gtrless} 2.32
$$
Thus, if we measure $X$ and see that its value
exceeds the threshold above, we
declare $H_1$ and otherwise
declare $H_0$. The following code shows how to
solve
this example using Sympy and Scipy. First, we
set up the likelihood ratio,
```
import sympy as S
from sympy import stats
s = stats.Normal('s',1,1) # signal+noise
n = stats.Normal('n',0,1) # noise
x = S.symbols('x',real=True)
L = stats.density(s)(x)/stats.density(n)(x)
```
Next, to find the $\gamma$ value,
```
g = S.symbols('g',positive=True) # define gamma
v=S.integrate(stats.density(n)(x),
(x,S.Rational(1,2)+S.log(g),S.oo))
```
**Programming Tip.**
Providing additional information regarding the Sympy
variable by using the
keyword argument `positive=True` helps the internal
simplification algorithms
work faster and better. This is especially useful when
dealing with complicated
integrals that involve special functions. Furthermore,
note that we used the
`Rational` function to define the `1/2` fraction, which is
another way of
providing hints to Sympy. Otherwise, it's possible that the
floating-point
representation of the fraction could disguise the simple
fraction and
thereby miss internal simplification opportunities.
We want to
solve for `g` in the above expression. Sympy has some
built-in numerical solvers
as in the following,
```
print (S.nsolve(v-0.01,3.0)) # approx 6.21
```
Note that in this situation it is better to use the numerical
solvers because
Sympy `solve` may grind along for a long time to
resolve this.
### Generalized
Likelihood Ratio Test
The likelihood ratio test can be generalized using the
following statistic,
$$
\Lambda(\mathbf{x})= \frac{\sup_{\theta\in\Theta_0}
L(\theta)}{\sup_{\theta\in\Theta}
L(\theta)}=\frac{L(\hat{\theta}_0)}{L(\hat{\theta})}
$$
where $\hat{\theta}_0$ maximizes $L(\theta)$ subject to
$\theta\in\Theta_0$ and
$\hat{\theta}$ is the maximum likelihood estimator.
The intuition behind this
generalization of the Likelihood Ratio Test is that
the denomimator is the usual
maximum likelihood estimator and the numerator is
the maximum likelihood
estimator, but over a restricted domain ($\Theta_0$).
This means that the ratio
is always less than unity because the maximum
likelihood estimator over the
entire space will always be at least as maximal
as that over the more restricted
space. When this $\Lambda$ ratio gets small
enough, it means that the maximum
likelihood estimator over the entire domain
($\Theta$) is larger which means
that it is safe to reject the null hypothesis
$H_0$. The tricky part is that
the statistical distribution of $\Lambda$ is
usually eye-wateringly difficult.
Fortunately, Wilks Theorem says that with
sufficiently large $n$, the
distribution of $-2\log\Lambda$ is approximately
chi-square with $r-r_0$ degrees
of freedom, where $r$ is the number of free
parameters for $\Theta$ and $r_0$ is
the number of free parameters in
$\Theta_0$. With this result, if we want an
approximate test at level
$\alpha$, we can reject $H_0$ when $-2\log\Lambda \ge
\chi^2_{r-r_0}(\alpha)$
where $\chi^2_{r-r_0}(\alpha)$ denotes the $1-\alpha$
quantile of the
$\chi^2_{r-r_0}$ chi-square distribution. However, the problem
with this
result is that there is no definite way of knowing how big $n$ should
be. The
advantage of this generalized likelihood ratio test is that it
can test
multiple hypotheses simultaneously, as illustrated
in the following example.
**Example.** Let's return to our coin-flipping example, except now we have
three
different coins. The likelihood function is then,
$$
L(p_1,p_2,p_3) =
\texttt{binom}(k_1;n_1,p_1)\texttt{binom}(k_2;n_2,p_2)\texttt{binom}(k_3;n_3,p_3)
$$
where $\texttt{binom}$ is the binomial distribution with
the given parameters.
For example,
$$
\texttt{binom}(k;n,p) =\sum_{k=0}^n \binom{n}{k} p^k(1-p)^{n-k}
$$
The null hypothesis is that all three coins have the
same probability of
heads, $H_0:p=p_1=p_2=p_3$. The alternative hypothesis is
that at least one of
these probabilites is different. Let's consider the
numerator of the $\Lambda$
first, which will give us the maximum likelihood
estimator of $p$. Because the
null hypothesis is that all the $p$ values are
equal, we can just treat this as
one big binomial distribution with
$n=n_1+n_2+n_3$ and $k=k_1+k_2+k_3$ is the
total number of heads observed for
any coin. Thus, under the null hypothesis,
the distribution of $k$ is binomial
with parameters $n$ and $p$. Now, what is
the maximum likelihood estimator for
this distribution? We have worked this
problem before and have the following,
$$
\hat{p}_0= \frac{k}{n}
$$
In other words, the maximum likelihood estimator under the null
hypothesis is
the proportion of ones observed in the sequence of $n$ trials
total. Now, we
have to substitute this in for the likelihood under the null
hypothesis to
finish the numerator of $\Lambda$,
$$
L(\hat{p}_0,\hat{p}_0,\hat{p}_0) =
\texttt{binom}(k_1;n_1,\hat{p}_0)\texttt{binom}(k_2;n_2,\hat{p}_0)\texttt{binom}(k_3;n_3,\hat{p}_0)
$$
For the denomimator of $\Lambda$, which represents the case of maximizing over
the entire space, the maximum likelihood estimator for each separate binomial
distribution is likewise,
$$
\hat{p}_i= \frac{k_i}{n_i}
$$
which makes the likelihood in the denominator the following,
$$
L(\hat{p}_1,\hat{p}_2,\hat{p}_3) =
\texttt{binom}(k_1;n_1,\hat{p}_1)\texttt{binom}(k_2;n_2,\hat{p}_2)\texttt{binom}(k_3;n_3,\hat{p}_3)
$$
for each of the $i\in \lbrace 1,2,3 \rbrace$ binomial distributions. Then, the
$\Lambda$ statistic is then the following,
$$
\Lambda(k_1,k_2,k_3) =
\frac{L(\hat{p}_0,\hat{p}_0,\hat{p}_0)}{L(\hat{p}_1,\hat{p}_2,\hat{p}_3)}
$$
Wilks theorems states that $-2\log\Lambda$ is chi-square
distributed. We can
compute this example with the statistics tools in Sympy and
Scipy.
```
from scipy.stats import binom, chi2
import numpy as np
# some sample parameters
p0,p1,p2 = 0.3,0.4,0.5
n0,n1,n2 = 50,180,200
brvs= [ binom(i,j) for i,j in zip((n0,n1,n2),(p0,p1,p2))]
def gen_sample(n=1):
'generate samples from separate binomial distributions'
if n==1:
return [i.rvs() for i in brvs]
else:
return [gen_sample() for k in range(n)]
```
**Programming Tip.**
Note the recursion in the definition of the `gen_sample`
function where a
conditional clause of the function calls itself. This is a
quick way to reusing
code and generating vectorized output. Using `np.vectorize`
is another way, but
the code is simple enough in this case to use the
conditional clause. In
Python, it is generally bad for performance to have code
with nested recursion
because of how the stack frames are managed. However,
here we are only
recursing once so this is not an issue.
Next, we compute
the logarithm of the numerator of the $\Lambda$
statistic,
```
np.random.seed(1234)
k0,k1,k2 = gen_sample()
print (k0,k1,k2)
pH0 = sum((k0,k1,k2))/sum((n0,n1,n2))
numer = np.sum([np.log(binom(ni,pH0).pmf(ki))
for ni,ki in
zip((n0,n1,n2),(k0,k1,k2))])
print (numer)
```
Note that we used the null hypothesis estimate for the $\hat{p}_0$.
Likewise,
for the logarithm of the denominator we have the following,
```
denom = np.sum([np.log(binom(ni,pi).pmf(ki))
for ni,ki,pi in
zip((n0,n1,n2),(k0,k1,k2),(p0,p1,p2))])
print (denom)
```
Now, we can compute the logarithm of the $\Lambda$ statistic as
follows and see
what the corresponding value is according to Wilks theorem,
```
chsq=chi2(2)
logLambda =-2*(numer-denom)
print (logLambda)
print (1- chsq.cdf(logLambda))
```
Because the value reported above is less than the 5% significance
level, we
reject the null hypothesis that all the coins have the same
probability of
heads. Note that there are two degrees of freedom because the
difference in the
number of parameters between the null hypothesis ($p$) and
the alternative
($p_1,p_2,p_3$) is two. We can build a quick Monte
Carlo simulation to check the
probability of detection for this example using
the following code, which is
just a combination of the last few code blocks,
```
c= chsq.isf(.05) # 5% significance level
out = []
for k0,k1,k2 in gen_sample(100):
pH0 = sum((k0,k1,k2))/sum((n0,n1,n2))
numer = np.sum([np.log(binom(ni,pH0).pmf(ki))
for ni,ki in
zip((n0,n1,n2),(k0,k1,k2))])
denom = np.sum([np.log(binom(ni,pi).pmf(ki))
for ni,ki,pi in
zip((n0,n1,n2),(k0,k1,k2),(p0,p1,p2))])
out.append(-2*(numer-denom)>c)
print (np.mean(out)) # estimated probability of detection
```
The above simulation shows the estimated probability of
detection, for this set
of example parameters. This relative low
probability of detection means that
while the test is unlikely (i.e.,
at the 5% significance level) to mistakenly
pick the null hypothesis,
it is likewise missing many of the $H_1$ cases (i.e.,
low probability
of detection). The trade-off between which is more important is
up to
the particular context of the problem. In some situations, we may
prefer
additional false alarms in exchange for missing fewer $H_1$
cases.
###
Permutation Test
<!-- p 475, Essential_Statistical_Inference_Boos.pdf -->
<!--
p. 35, Applied_adaptive_statistical_methods_OGorman.pdf -->
<!-- p. 80,
Introduction_to_Statistics_Through_Resampling_Methods_and_R_Good.pdf -->
<!-- p.
104, Statistical_inference_for_data_science_Caffo.pdf -->
<!-- p. 178, All of
statistics -->
The Permutation Test is good way to test whether or not
samples
samples come from the same distribution. For example, suppose that
$$
X_1, X_2, \ldots, X_m \sim F
$$
and also,
$$
Y_1, Y_2, \ldots, Y_n \sim G
$$
That is, $Y_i$ and $X_i$ come from different distributions. Suppose
we have
some test statistic, for example
$$
T(X_1,\ldots,X_m,Y_1,\ldots,Y_n) = \vert\overline{X}-\overline{Y}\vert
$$
Under the null hypothesis for which $F=G$, any of the
$(n+m)!$ permutations are
equally likely. Thus, suppose for
each of the $(n+m)!$ permutations, we have the
computed
statistic,
$$
\lbrace T_1,T_2,\ldots,T_{(n+m)!} \rbrace
$$
Then, under the null hypothesis, each of these values is equally
likely. The
distribution of $T$ under the null hypothesis is the *permutation
distribution*
that puts weight $1/(n+m)!$ on each $T$-value. Suppose $t_o$ is
the observed
value of the test statistic and assume that large $T$ rejects the
null
hypothesis, then the p-value for the permutation test is the following,
$$
P(T>t_o)= \frac{1}{(n+m)!} \sum_{j=1}^{(n+m)!} I(T_j>t_o)
$$
where $I()$ is the indicator function. For large $(n+m)!$, we can
sample
randomly from the set of all permutations to estimate this p-value.
**Example.** Let's return to our coin-flipping example from last time, but
now
we have only two coins. The hypothesis is that both coins
have the same
probability of heads. We can use the built-in
function in Numpy to compute the
random permutations.
```
x=binom(10,0.3).rvs(5) # p=0.3
y=binom(10,0.5).rvs(3) # p=0.5
z = np.hstack([x,y]) # combine into one array
t_o = abs(x.mean()-y.mean())
out = [] # output container
for k in range(1000):
perm = np.random.permutation(z)
T=abs(perm[:len(x)].mean()-perm[len(x):].mean())
out.append((T>t_o))
print ('p-value = ', np.mean(out))
```
Note that the size of total permutation space is
$8!=40320$ so we are taking
relatively few (i.e., 100) random
permutations from this space.
### Wald Test
The Wald Test is an asympotic test. Suppose we have $H_0:\theta=\theta_0$ and
otherwise $H_1:\theta\ne\theta_0$, the corresponding statistic is defined as
the
following,
$$
W=\frac{\hat{\theta}_n-\theta_0}{\texttt{se}}
$$
where $\hat{\theta}$ is the maximum likelihood estimator and
$\texttt{se}$ is
the standard error,
$$
\texttt{se} = \sqrt{\mathbb{V}(\hat{\theta}_n)}
$$
Under general conditions, $W\overset{d}{\to} \mathcal{N}(0,1)$.
Thus, an
asympotic test at level $\alpha$ rejects when $\vert W\vert>
z_{\alpha/2}$ where
$z_{\alpha/2}$ corresponds to $\mathbb{P}(\vert
Z\vert>z_{\alpha/2})=\alpha$
with $Z \sim \mathcal{N}(0,1)$. For our favorite
coin-flipping example, if
$H_0:\theta=\theta_0$, then
$$
W = \frac{\hat{\theta}-\theta_0}{\sqrt{\hat{\theta}(1-\hat{\theta})/n}}
$$
We can simulate this using the following code at the usual
5% significance
level,
```
from scipy import stats
theta0 = 0.5 # H0
k=np.random.binomial(1000,0.3)
theta_hat = k/1000. # MLE
W = (theta_hat-theta0)/np.sqrt(theta_hat*(1-theta_hat)/1000)
c = stats.norm().isf(0.05/2) # z_{alpha/2}
print (abs(W)>c) # if true, reject H0
```
This rejects $H_0$ because the true $\theta=0.3$ and the null hypothesis
is
that $\theta=0.5$. Note that $n=1000$ in this case which puts us well inside
the
asympotic range of the result. We can re-do this example to estimate
the
detection probability for this example as in the following code,
```
theta0 = 0.5 # H0
c = stats.norm().isf(0.05/2.) # z_{alpha/2}
out = []
for i in range(100):
k=np.random.binomial(1000,0.3)
theta_hat = k/1000. # MLE
W = (theta_hat-theta0)/np.sqrt(theta_hat*(1-theta_hat)/1000.)
out.append(abs(W)>c) # if true, reject H0
print (np.mean(out)) # detection probability
```
## Testing Multiple Hypotheses
Thus far, we have focused primarily on two
competing hypotheses. Now, we
consider multiple comparisons. The general
situation is the following. We test
the null hypothesis against a sequence of
$n$ competing hypotheses $H_k$. We
obtain p-values for each hypothesis so now
we have multiple p-values to
consider $\lbrace p_k \rbrace$. To boil this
sequence down to a single
criterion, we can make the following argument. Given
$n$ independent hypotheses
that are all untrue, the probability of getting at
least one false alarm is the
following,
$$
P_{FA} = 1-(1-p_0)^n
$$
where $p_0$ is the individual p-value threshold (say, 0.05). The
problem here
is that $P_{FA}\rightarrow 1$ as $n\rightarrow\infty$. If we want
to make many
comparisons at once and control the overall false alarm rate the
overall p-value
should be computed under the assumption that none of the
competing hypotheses is
valid. The most common way to address this is with the
Bonferroni correction
which says that the individual significance level should
be reduced to $p/n$.
Obviously, this makes it much harder to declare
significance for any particular
hypothesis. The natural consequence of this
conservative restriction is to
reduce the statistical power of the experiment,
thus making it more likely the
true effects will be missed.
In 1995, Benjamini and Hochberg devised a simple
method that tells which
p-values are statistically significant. The procedure is
to sort the list of
p-values in ascending order, choose a false-discovery rate
(say, $q$), and then
find the largest p-value in the sorted list such that $p_k
\le k q/n$, where
$k$ is the p-value's position in the sorted list. Finally,
declare that $p_k$
value and all the others less than it statistically
significant. This procedure
guarantees that the proportion of false-positives is
less than $q$ (on
average). The Benjamini-Hochberg procedure (and its
derivatives) is fast and
effective and is widely used for testing hundreds of
primarily false hypotheses
when studying genetics or diseases. Additionally,
this
procedure provides better statistical power than the Bonferroni correction.
<!-- TODO: Fisher transformation -->
<!-- TODO: Cohen's D test for effect size
-->
<!-- TODO: add log-linear transform -->
<!-- TODO: add Fisher transform -->
<!-- TODO: Log-Linear Models -->
## Fisher Exact Test
<!-- # #ifdef SINGLE -->
<!-- TITLE: Fisher Exact Test -->
<!-- AUTHOR: Jose Unpingco -->
<!-- DATE:
today -->
<!-- # #endif -->
<!-- References -->
<!-- ------------ -->
<!-- ..
[freeman-halton] Freeman, G. H., and John H. Halton. "Note on an exact -->
<!--
treatment of contingency, goodness of fit and other problems of -->
<!--
significance." Biometrika (1951): 141-149. -->
<!-- Equation labels as ordinary links -->
<div id="tab:contingencyTable"></div>
$$
\begin{table}[]
\centering
\caption{Example Contingency Table}
\label{tab:contingencyTable} \tag{2}
\begin{tabular}{lllll}
\cline{1-4}
\multicolumn{1}{|l|}{} & \multicolumn{1}{l|}{Infection} &
\multicolumn{1}{l|}{No infection} & \multicolumn{1}{l|}{Total} & \\ \cline{1-4}
\multicolumn{1}{|l|}{Male} & \multicolumn{1}{c|}{13} &
\multicolumn{1}{c|}{11} & \multicolumn{1}{c|}{24} & \\ \cline{1-4}
\multicolumn{1}{|l|}{Female} & \multicolumn{1}{c|}{12} &
\multicolumn{1}{c|}{1} & \multicolumn{1}{c|}{13} & \\ \cline{1-4}
\multicolumn{1}{|l|}{Total} & \multicolumn{1}{c|}{25} &
\multicolumn{1}{c|}{12} & \multicolumn{1}{c|}{37} & \\ \cline{1-4}
\end{tabular}
\end{table}
$$
Contingency tables represent the partitioning of a sample population of two
categories between two different classifications as shown in the following
Table
[2](#tab:contingencyTable). The question is whether or not the observed
table
corresponds to a random partition of the sample population, constrained
by the
marginal sums. Note that because this is a two-by-two table, a change in
any of
the table entries automatically affects all of the other terms because
of the
row and column sum constraints. This means that equivalent questions
like "Under
a random partition, what is the probability that a particular
table entry is at
least as large as a given value?" can be meaningfully posed.
<!-- #`` -->
The Fisher Exact Test addresses this question. The idea is to compute the
probability of a particular entry of the table, conditioned upon the marginal
row and column sums,
$$
\mathbb{P}(X_{i,j}\vert r_1,r_2,c_1,c_2)
$$
where $X_{i,j}$ is $(i,j)$ table entry, $r_1$ represents the sum of
the first
row, $r_2$ represents the sum of the second row, $c_1$ represents the
sum of the
first column, and $c_2$ is the sum of the second column. This
probability is
given by the *hypergeometric distribution*. Recall that the
hypergeometric
distribution gives the probability of sampling (without
replacement) $k$ items
from a population of $N$ items consisting of exactly two
different kinds of
items,
$$
\mathbb{P}(X=k) = \frac{\binom{K}{k}\binom{N-K}{n-k}}{\binom{N}{n}}
$$
where $N$ is the population size, $K$ is the total number of possible
favorable
draws, $n$ is the number of draws, and $k$ is the number of observed
favorable
draws. With the corresponding identification of variables, the
hypergeometric
distribution gives the desired conditional probability: $K=r_1,
k=x, n= c_1,
N=r_1+r_2$.
In the example of the Table [2](#tab:contingencyTable), the
probability for
$x=13$ male infections among a population of $r_1=24$ males in a
total
population of $c_1=25$ infected persons, including $r_2=13$ females. The
`scipy.stats` module has the Fisher Exact Test implemented as shown below,
```
import scipy.stats
table = [[13,11],[12,1]]
odds_ratio, p_value=scipy.stats.fisher_exact(table)
print(p_value)
```
The default for `scipy.stats.fisher_exact` is the two-sided
test. The following
result is for the `less` option,
```
import scipy.stats
odds_ratio, p_value=scipy.stats.fisher_exact(table,alternative='less')
print(p_value)
```
This means that the p-value is computed by summing over the
probabilities of
contingency tables that are *less* extreme than the
given table. To undertand
what this means, we can use
the `scipy.stats.hypergeom` function to compute the
probabilities of
these with the number of infected men is less than or equal to
13.
```
hg = scipy.stats.hypergeom(37, 24, 25)
probs = [(hg.pmf(i)) for i in range(14)]
print (probs)
print(sum(probs))
```
This is the same as the prior p-value result we obtained from
`scipy.stats.fisher_exact`. Another option is `greater` which derives from the
following analogous summation,
```
odds_ratio, p_value=scipy.stats.fisher_exact(table,alternative='greater')
probs = [hg.pmf(i) for i in range(13,25)]
print(probs)
print(p_value)
print(sum(probs))
```
Finally, the two-sided version excludes those individual
table probabilities
that are less that of the given table
```
_,p_value=scipy.stats.fisher_exact(table)
probs = [ hg.pmf(i) for i in range(25) ]
print(sum(i for i in probs if i<= hg.pmf(13)))
print(p_value)
```
Thus, for this particular contingency table, we
could reasonably conclude that
13 infected males in this total
population is statistically significant with a
p-value less than
five percent.
Performing this kind of analysis for tables
larger than `2x2` easily becomes
computationally challenging due to the nature
of the underlying combinatorics and
usually requires specialized
approximations.
In this section, we discussed the structure of statistical
hypothesis testing
and defined the various terms that are commonly used for
this process, along
with the illustrations of what they mean in our running
coin-flipping example.
From an engineering standpoint, hypothesis testing is not
as common as
confidence-intervals and point estimates. On the other hand,
hypothesis testing
is very common in social and medical science, where one must
deal with
practical constraints that may limit the sample size or other aspects
of the
hypothesis testing rubric. In engineering, we can usually have much more
control over the samples and models we employ because they are typically
inanimate objects that can be measured repeatedly and consistently. This is
obviously not so with human studies, which generally have other ethical and
legal considerations.
| github_jupyter |
```
import pprint
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from json_shot_scraper import flatten_shot, flatten_goal, flatten_complete_pass, flatten_incomplete_pass, flatten_corner
from player_scraper import flatten_player, flatten_sub
from dataframe_cleaner import (pass_to_shot, corner_to_shot, transpose_coordinates, coord_to_yards,
shot_distance_angle, dummy_columns, drop_own_goals, goal_dummy)
from html_scraper import db
from mongo_to_db import game_to_cleaned_df, create_frame, create_master_df, create_master_player_min_df
pd.set_option('display.max_columns', 50)
games = db.games.find()
```
# Players DF
```
players_minutes_df = create_master_player_min_df(games)
players_minutes_df.head()
```
# Game Events DF
```
games = db.games.find()
shots_df = create_master_df(games)
shots_df.head()
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
possible_games = list(shots_df['game_id'].unique())
len(possible_games)
```
## manually creating train_test_splt by game_id
start: 159 games --> removed 64 (64/80 were unique) games to train (train model)
test_set = 95 games left --> random sample (40 games) --> predict
```
# the indices 80 games to train random forest on
games_to_train_on = np.random.choice(159, 80)
games_to_sample = []
for i in games_to_train_on:
games_to_sample.append(possible_games[i])
len(games_to_sample)
for game in games_to_sample:
if game in possible_games:
possible_games.remove(game)
```
### remove 60 games from possible games
```
len(possible_games)
#games to train rf on
len(games_to_sample)
rf_columns = ['player_id', 'shot_distance', 'shot_angle', 'assisted_shot', 'is_penalty_attempt']
shots_to_train_on = shots_df[shots_df['game_id'].isin(np.array(games_to_sample))].copy()
len(shots_to_train_on['game_id'].unique())
shots_to_train_on.head()
train_data = shots_to_train_on[rf_columns]
train_y = shots_to_train_on['is_goal']
indices = shots_to_train_on.index.values
random_forest_model = RandomForestClassifier(n_estimators=300, max_depth=3, verbose=1)
random_forest_model.fit(train_data, train_y)
```
### sample test set by game_id
```
len(possible_games)
games_to_test_on = np.random.choice(95, 50)
games_to_predict = []
for i in games_to_test_on:
games_to_predict.append(possible_games[i])
print(len(possible_games))
len(games_to_predict)
```
remove from that sample
```
for game in games_to_predict:
if game in possible_games:
possible_games.remove(game)
print(len(possible_games))
len(games_to_predict)
95 - 61
```
### 34 unique games removed from the possible_games
```
shots_to_predict = shots_df[shots_df['game_id'].isin(np.array(games_to_predict))].copy()
shots_to_predict.head()
len(shots_to_predict['game_id'].unique())
```
# Predicting the xG xA of 34 games
```
test_data = shots_to_predict[rf_columns]
test_y = shots_to_predict['is_goal']
indices1 = shots_to_predict.index.values
p_random_forest = random_forest_model.predict_proba(test_data)
df_xG = pd.DataFrame(test_data)
df_xG['is_goal'] = test_y
df_xG['xG'] = p_random_forest[:,1]
df_xG.head()
df_xG['xA'] = df_xG['assisted_shot'] * df_xG['xG']
df_xG.head()
unique_players = df_xG['player_id'].unique()
contributions = []
for player in unique_players:
xgsum = df_xG[df_xG['player_id'] == player]['xG'].sum()
xasum = df_xG[df_xG['player_id'] == player]['xA'].sum()
goals = df_xG[df_xG['player_id'] == player]['is_goal'].sum()
contributions.append([player, xgsum, xasum, goals])
by_xG = sorted(contributions, key=lambda x: x[1], reverse=True)
contribution_df = pd.DataFrame(by_xG, columns=['player_id', 'total_xG', 'total_xA', 'goals'])
contribution_df['combined xG + xA'] = (contribution_df['total_xG'] + contribution_df['total_xA']).round(2)
contribution_df.head()
```
### now want to find the total minutes from the games in 'games_to_predict' and find the xG in that many minutes
```
pred_min_df = players_minutes_df[players_minutes_df['game_id'].isin(np.array(games_to_predict))].copy()
pred_min_df.head()
players = pred_min_df['player_id'].unique()
player_minutes = []
for player in players:
total_minutes = pred_min_df[pred_min_df['player_id'] == player]['minutes_played'].sum()
name = pred_min_df[pred_min_df['player_id'] == player]['name'].iloc[0]
player_minutes.append([player, total_minutes, name])
player_minutes_df = pd.DataFrame(player_minutes, columns=['player_id', 'total_minutes_played', 'player_name'])
player_minutes_df.head()
```
# Merge contribution_df with player_minutes_df
```
xg_min = pd.merge(contribution_df, player_minutes_df, on='player_id', how='outer')
xg_min.head()
columns = ['player_name', 'player_id', 'total_xG', 'total_xA', 'combined xG + xA', 'goals', 'total_minutes_played']
xg_final = xg_min[columns]
xg_final.head()
xg_final['xG+xA/90'] = xg_final['combined xG + xA'].copy() / (xg_final['total_minutes_played'] / 90)
xg_final[xg_final['xG+xA/90'] > 0.5]
xg_final[xg_final['xG+xA/90'] > 1]
xg_final[(xg_final['xG+xA/90'] > xg_final['goals']) & (xg_final['goals'] > 0)]
xg_final[xg_final['total_xG'] > xg_final['goals']]
```
# Manual train_test_split
```
from model_prep import manual_train_split, manual_test_split
shots_df.head()
hold_test, train = manual_train_split(shots_df)
```
### training data
```
shots_to_train_on = shots_df[shots_df['game_id'].isin(np.array(train))].copy()
shots_to_train_on.head()
train_data = shots_to_train_on[rf_columns]
train_y = shots_to_train_on['is_goal']
indices = shots_to_train_on.index.values
random_forest_model = RandomForestClassifier(n_estimators=300, max_depth=3, verbose=1)
random_forest_model.fit(train_data, train_y)
```
### testing data
```
holdout, test = manual_test_split(hold_test)
shots_to_predict = shots_df[shots_df['game_id'].isin(np.array(test))].copy()
shots_to_predict.head()
test_data = shots_to_predict[rf_columns]
test_y = shots_to_predict['is_goal']
indices1 = shots_to_predict.index.values
```
### model prediction
```
p_random_forest = random_forest_model.predict_proba(test_data)
df_xG = pd.DataFrame(test_data)
df_xG['is_goal'] = test_y
df_xG['xG'] = p_random_forest[:,1]
df_xG['xA'] = df_xG['assisted_shot'] * df_xG['xG']
df_xG.head()
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
import theano, theano.tensor as T
import numpy as np
import theano_lstm
import random
```
## A Nonsensical Language Model using Theano LSTM
Today we will train a **nonsensical** language model !
We will first collect some language data, convert it to numbers, and then feed it to a recurrent neural network and ask it to predict upcoming words. When we are done we will have a machine that can generate sentences from our made-up language ad-infinitum !
### Collect Language Data
The first step here is to get some data. Since we are basing our language on nonsense, we need to generate good nonsense using a sampler.
Our sampler will take a probability table as input, e.g. a language where people are equally likely to say "a" or "b" would be written as follows:
nonsense = Sampler({"a": 0.5, "b": 0.5})
We get samples from this language like this:
word = nonsense()
We overloaded the `__call__` method and got this syntactic sugar.
```
## Fake dataset:
class Sampler:
def __init__(self, prob_table):
total_prob = 0.0
if type(prob_table) is dict:
for key, value in prob_table.items():
total_prob += value
elif type(prob_table) is list:
prob_table_gen = {}
for key in prob_table:
prob_table_gen[key] = 1.0 / (float(len(prob_table)))
total_prob = 1.0
prob_table = prob_table_gen
else:
raise ArgumentError("__init__ takes either a dict or a list as its first argument")
if total_prob <= 0.0:
raise ValueError("Probability is not strictly positive.")
self._keys = []
self._probs = []
for key in prob_table:
self._keys.append(key)
self._probs.append(prob_table[key] / total_prob)
def __call__(self):
sample = random.random()
seen_prob = 0.0
for key, prob in zip(self._keys, self._probs):
if (seen_prob + prob) >= sample:
return key
else:
seen_prob += prob
return key
```
### Parts of Speech
Now that we have a `Sampler` we can create a couple different word groups that our language uses to distinguish between different probability distributions easily:
```
samplers = {
"punctuation": Sampler({".": 0.49, ",": 0.5, ";": 0.03, "?": 0.05, "!": 0.05}),
"stop": Sampler({"the": 10, "from": 5, "a": 9, "they": 3, "he": 3, "it" : 2.5, "she": 2.7, "in": 4.5}),
"noun": Sampler(["cat", "broom", "boat", "dog", "car", "wrangler", "mexico", "lantern", "book", "paper", "joke","calendar", "ship", "event"]),
"verb": Sampler(["ran", "stole", "carried", "could", "would", "do", "can", "carry", "catapult", "jump", "duck"]),
"adverb": Sampler(["rapidly", "calmly", "cooly", "in jest", "fantastically", "angrily", "dazily"])
}
```
### Simple Grammar
To create sentences from our language we create a simple recursion that goes as follows:
1. If the sentence we have ends with a full stop, a question mark, or an exclamation point then end at once!
2. Else our sentence should have:
* A stop word
* A noun
* An adverb (with prob 0.3), or 2 adverbs (with prob 0.3*0.3=0.09)
* A verb
* Another noun (with prob 0.2), or 2 more nouns connected by a dash (with prob 0.2*0.1=0.02)
3. If our sentence is now over 500 characters, add a full stop and end at once!
4. Else add some punctuation and go back to (1)
```
def generate_nonsense(word = ""):
if word.endswith("."):
return word
else:
if len(word) > 0:
word += " "
word += samplers["stop"]()
word += " " + samplers["noun"]()
if random.random() > 0.7:
word += " " + samplers["adverb"]()
if random.random() > 0.7:
word += " " + samplers["adverb"]()
word += " " + samplers["verb"]()
if random.random() > 0.8:
word += " " + samplers["noun"]()
if random.random() > 0.9:
word += "-" + samplers["noun"]()
if len(word) > 500:
word += "."
else:
word += " " + samplers["punctuation"]()
return generate_nonsense(word)
def generate_dataset(total_size, ):
sentences = []
for i in range(total_size):
sentences.append(generate_nonsense())
return sentences
# generate dataset
lines = generate_dataset(100)
```
### Utilities
Now that we have our training corpus for our language model (optionally you could gather an actual corpus from the web :), we can now create our first utility, `Vocab`, that will hold the mapping from words to an index, and perfom the conversions from words to indices and vice-versa:
```
### Utilities:
class Vocab:
__slots__ = ["word2index", "index2word", "unknown"]
def __init__(self, index2word = None):
self.word2index = {}
self.index2word = []
# add unknown word:
self.add_words(["**UNKNOWN**"])
self.unknown = 0
if index2word is not None:
self.add_words(index2word)
def add_words(self, words):
for word in words:
if word not in self.word2index:
self.word2index[word] = len(self.word2index)
self.index2word.append(word)
def __call__(self, line):
"""
Convert from numerical representation to words
and vice-versa.
"""
if type(line) is np.ndarray:
return " ".join([self.index2word[word] for word in line])
if type(line) is list:
if len(line) > 0:
if line[0] is int:
return " ".join([self.index2word[word] for word in line])
indices = np.zeros(len(line), dtype=np.int32)
else:
line = line.split(" ")
indices = np.zeros(len(line), dtype=np.int32)
for i, word in enumerate(line):
indices[i] = self.word2index.get(word, self.unknown)
return indices
@property
def size(self):
return len(self.index2word)
def __len__(self):
return len(self.index2word)
```
### Create a Mapping from numbers to words
Now we can use the `Vocab` class to gather all the words and store an Index:
```
vocab = Vocab()
for line in lines:
vocab.add_words(line.split(" "))
```
To send our sentences in one big chunk to our neural network we transform each sentence into a row vector and place each of these rows into a bigger matrix that holds all these rows. Not all sentences have the same length, so we will pad those that are too short with 0s in `pad_into_matrix`:
```
def pad_into_matrix(rows, padding = 0):
if len(rows) == 0:
return np.array([0, 0], dtype=np.int32)
lengths = map(len, rows)
width = max(lengths)
height = len(rows)
mat = np.empty([height, width], dtype=rows[0].dtype)
mat.fill(padding)
for i, row in enumerate(rows):
mat[i, 0:len(row)] = row
return mat, list(lengths)
# transform into big numerical matrix of sentences:
numerical_lines = []
for line in lines:
numerical_lines.append(vocab(line))
numerical_lines, numerical_lengths = pad_into_matrix(numerical_lines)
```
## Build a Recurrent Neural Network
Now the real work is upon us! Thank goodness we have our language data ready. We now create a recurrent neural network by connecting an Embedding $E$ for each word in our corpus, and stacking some special cells together to form a prediction function. Mathematically we want:
$$\mathrm{argmax_{E, \Phi}} {\bf P}(w_{k+1}| w_{k}, \dots, w_{0}; E, \Phi) = f(x, h)$$
with $f(\cdot, \cdot)$ the function our recurrent neural network performs at each timestep that takes as inputs:
* an observation $x$, and
* a previous state $h$,
and outputs a probability distribution $\hat{p}$ over the next word.
We have $x = E[ w_{k}]$ our observation at time $k$, and $h$ the internal state of our neural network, and $\Phi$ is the set of parameters used by our classifier, and recurrent neural network, and $E$ is the embedding for our words.
In practice we will obtain $E$ and $\Phi$ iteratively using gradient descent on the error our network is making in its prediction. To do this we define our error as the [Kullback-Leibler divergence](http://en.wikipedia.org/wiki/Kullback–Leibler_divergence) (a distance between probability distributions) between our estimate of $\hat{p} = {\bf P}(w_{k+1}| w_{k}, \dots, w_{0}; E, \Phi)$ and the actual value of ${\bf P}(w_{k+1}| w_{k}, \dots, w_{0})$ from the data (e.g. a probability distribution that is 1 for word $w_k$ and 0 elsewhere).
#### Theano LSTM StackedCells function
To build this predictive model we make use of [theano_lstm](https://github.com/JonathanRaiman/theano_lstm), a Python module for building recurrent neural networks using Theano. The first step we take is to declare what kind of cells we want to use by declaring a celltype. There are many different celltypes we can use, but the most common these days (and incidentally most effective) are `RNN` and `LSTM`. For a more in-depth discussion of how these work I suggest checking out [Arxiv](http://arxiv.org/find/all/1/all:+lstm/0/1/0/all/0/1), or [Alex Graves' website](http://www.cs.toronto.edu/~graves/), or [Wikipedia](http://en.wikipedia.org/wiki/Long_short_term_memory). Here we use `celltype = LSTM`.
self.model = StackedCells(input_size, celltype=celltype, layers =[hidden_size] * stack_size)
Once we've declared what kind of cells we want to use, we can now choose to add an Embedding to map integers (indices) to vectors (and in our case map words to their indices, then indices to word vectors we wish to train). Intuitively this lets the network separate and recognize what it is "seeing" or "receiving" at each timestep. To add an Embedding we create `Embedding(vocabulary_size, size_of_embedding_vectors)` and insert it at the begging of the `StackedCells`'s layers list (thereby telling `StackedCells` that this Embedding layer needs to be activated before the other ones):
# add an embedding
self.model.layers.insert(0, Embedding(vocab_size, input_size))
The final output of our network needs to be a probability distribution over the next words (but in different application areas this could be a sentiment classification, a decision, a topic, etc...) so we add another layer that maps the internal state of the LSTMs to a probability distribution over the all the words in our language. To ensure that our prediction is indeed a probability distribution we "activate" our layer with a Softmax, meaning that we will exponentiate every value of the output, $q_i = e^{x_i}$, so that all values are positive, and then we will divide the output by its sum so that the output sums to 1:
$$p_i = \frac{q_i}{\sum_j q_j}\text{, and }\sum_i p_i = 1.$$
# add a classifier:
self.model.layers.append(Layer(hidden_size, vocab_size, activation = softmax))
For convenience we wrap this all in one class below.
#### Prediction
We have now defined our network. At each timestep we can produce a probability distribution for each input index:
def create_prediction(self, greedy=False):
def step(idx, *states):
# new hiddens are the states we need to pass to LSTMs
# from past. Because the StackedCells also include
# the embeddings, and those have no state, we pass
# a "None" instead:
new_hiddens = [None] + list(states)
new_states = self.model.forward(idx, prev_hiddens = new_hiddens)
return new_states[1:]
...
Our inputs are an integer matrix Theano symbolic variable:
...
# in sequence forecasting scenario we take everything
# up to the before last step, and predict subsequent
# steps ergo, 0 ... n - 1, hence:
inputs = self.input_mat[:, 0:-1]
num_examples = inputs.shape[0]
# pass this to Theano's recurrence relation function:
....
Scan receives our recurrence relation `step` from above, and also needs to know what will be outputted at each step in `outputs_info`. We give `outputs_info` a set of variables corresponding to the hidden states of our StackedCells. Some of the layers have no hidden state, and thus we should simply pass a `None` to Theano, while others do require some initial state. In those cases with wrap their initial state inside a dictionary:
def has_hidden(layer):
"""
Whether a layer has a trainable
initial hidden state.
"""
return hasattr(layer, 'initial_hidden_state')
def matrixify(vector, n):
return T.repeat(T.shape_padleft(vector), n, axis=0)
def initial_state(layer, dimensions = None):
"""
Initalizes the recurrence relation with an initial hidden state
if needed, else replaces with a "None" to tell Theano that
the network **will** return something, but it does not need
to send it to the next step of the recurrence
"""
if dimensions is None:
return layer.initial_hidden_state if has_hidden(layer) else None
else:
return matrixify(layer.initial_hidden_state, dimensions) if has_hidden(layer) else None
def initial_state_with_taps(layer, dimensions = None):
"""Optionally wrap tensor variable into a dict with taps=[-1]"""
state = initial_state(layer, dimensions)
if state is not None:
return dict(initial=state, taps=[-1])
else:
return None
Let's now create these inital states (note how we skip layer 1, the embeddings by doing `self.model.layers[1:]` in the iteration, this is because there is no point in passing these embeddings around in our recurrence because word vectors are only seen at the timestep they are received in this network):
# choose what gets outputted at each timestep:
outputs_info = [initial_state_with_taps(layer, num_examples) for layer in self.model.layers[1:]]
result, _ = theano.scan(fn=step,
sequences=[inputs.T],
outputs_info=outputs_info)
if greedy:
return result[0]
# softmaxes are the last layer of our network,
# and are at the end of our results list:
return result[-1].transpose((2,0,1))
# we reorder the predictions to be:
# 1. what row / example
# 2. what timestep
# 3. softmax dimension
#### Error Function:
Our error function uses `theano_lstm`'s `masked_loss` method. This method allows us to define ranges over which a probability distribution should obey a particular target distribution. We control this method by setting start and end points for these ranges. In doing so we mask the areas where we do not care what the network predicted.
In our case our network predicts words we care about during the sentence, but when we pad our short sentences with 0s to fill our matrix, we do not care what the network does there, because this is happening outside the sentence we collected:
def create_cost_fun (self):
# create a cost function that
# takes each prediction at every timestep
# and guesses next timestep's value:
what_to_predict = self.input_mat[:, 1:]
# because some sentences are shorter, we
# place masks where the sentences end:
# (for how long is zero indexed, e.g. an example going from `[2,3)`)
# has this value set 0 (here we substract by 1):
for_how_long = self.for_how_long - 1
# all sentences start at T=0:
starting_when = T.zeros_like(self.for_how_long)
self.cost = masked_loss(self.predictions,
what_to_predict,
for_how_long,
starting_when).sum()
#### Training Function
We now have a cost function. To perform gradient descent we now need to tell Theano how each parameter must be updated at every training epoch. We `theano_lstm`'s `create_optimization_udpates` method to generate a dictionary of updates and to apply special gradient descent rules that accelerate and facilitate training (for instance scaling the gradients when they are too large or too little, and preventing gradients from becoming too big and making our model numerically unstable -- in this example we use [Adadelta](http://arxiv.org/abs/1212.5701):
def create_training_function(self):
updates, _, _, _, _ = create_optimization_updates(self.cost, self.params, method="adadelta")
self.update_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.cost,
updates=updates,
allow_input_downcast=True)
PS: our parameters are obtained by calling `self.model.params`:
@property
def params(self):
return self.model.params
### Final Code
```
from theano_lstm import Embedding, LSTM, RNN, StackedCells, Layer, create_optimization_updates, masked_loss
def softmax(x):
"""
Wrapper for softmax, helps with
pickling, and removing one extra
dimension that Theano adds during
its exponential normalization.
"""
return T.nnet.softmax(x.T)
def has_hidden(layer):
"""
Whether a layer has a trainable
initial hidden state.
"""
return hasattr(layer, 'initial_hidden_state')
def matrixify(vector, n):
return T.repeat(T.shape_padleft(vector), n, axis=0)
def initial_state(layer, dimensions = None):
"""
Initalizes the recurrence relation with an initial hidden state
if needed, else replaces with a "None" to tell Theano that
the network **will** return something, but it does not need
to send it to the next step of the recurrence
"""
if dimensions is None:
return layer.initial_hidden_state if has_hidden(layer) else None
else:
return matrixify(layer.initial_hidden_state, dimensions) if has_hidden(layer) else None
def initial_state_with_taps(layer, dimensions = None):
"""Optionally wrap tensor variable into a dict with taps=[-1]"""
state = initial_state(layer, dimensions)
if state is not None:
return dict(initial=state, taps=[-1])
else:
return None
class Model:
"""
Simple predictive model for forecasting words from
sequence using LSTMs. Choose how many LSTMs to stack
what size their memory should be, and how many
words can be predicted.
"""
def __init__(self, hidden_size, input_size, vocab_size, stack_size=1, celltype=LSTM):
# declare model
self.model = StackedCells(input_size, celltype=celltype, layers =[hidden_size] * stack_size)
# add an embedding
self.model.layers.insert(0, Embedding(vocab_size, input_size))
# add a classifier:
self.model.layers.append(Layer(hidden_size, vocab_size, activation = softmax))
# inputs are matrices of indices,
# each row is a sentence, each column a timestep
self._stop_word = theano.shared(np.int32(999999999), name="stop word")
self.for_how_long = T.ivector()
self.input_mat = T.imatrix()
self.priming_word = T.iscalar()
self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024))
# create symbolic variables for prediction:
self.predictions = self.create_prediction()
# create symbolic variable for greedy search:
self.greedy_predictions = self.create_prediction(greedy=True)
# create gradient training functions:
self.create_cost_fun()
self.create_training_function()
self.create_predict_function()
def stop_on(self, idx):
self._stop_word.set_value(idx)
@property
def params(self):
return self.model.params
def create_prediction(self, greedy=False):
def step(idx, *states):
# new hiddens are the states we need to pass to LSTMs
# from past. Because the StackedCells also include
# the embeddings, and those have no state, we pass
# a "None" instead:
new_hiddens = [None] + list(states)
new_states = self.model.forward(idx, prev_hiddens = new_hiddens)
if greedy:
new_idxes = new_states[-1]
new_idx = new_idxes.argmax()
# provide a stopping condition for greedy search:
return ([new_idx.astype(self.priming_word.dtype)] + new_states[1:-1]), theano.scan_module.until(T.eq(new_idx,self._stop_word))
else:
return new_states[1:]
# in sequence forecasting scenario we take everything
# up to the before last step, and predict subsequent
# steps ergo, 0 ... n - 1, hence:
inputs = self.input_mat[:, 0:-1]
num_examples = inputs.shape[0]
# pass this to Theano's recurrence relation function:
# choose what gets outputted at each timestep:
if greedy:
outputs_info = [dict(initial=self.priming_word, taps=[-1])] + [initial_state_with_taps(layer) for layer in self.model.layers[1:-1]]
result, _ = theano.scan(fn=step,
n_steps=200,
outputs_info=outputs_info)
else:
outputs_info = [initial_state_with_taps(layer, num_examples) for layer in self.model.layers[1:]]
result, _ = theano.scan(fn=step,
sequences=[inputs.T],
outputs_info=outputs_info)
if greedy:
return result[0]
# softmaxes are the last layer of our network,
# and are at the end of our results list:
return result[-1].transpose((2,0,1))
# we reorder the predictions to be:
# 1. what row / example
# 2. what timestep
# 3. softmax dimension
def create_cost_fun (self):
# create a cost function that
# takes each prediction at every timestep
# and guesses next timestep's value:
what_to_predict = self.input_mat[:, 1:]
# because some sentences are shorter, we
# place masks where the sentences end:
# (for how long is zero indexed, e.g. an example going from `[2,3)`)
# has this value set 0 (here we substract by 1):
for_how_long = self.for_how_long - 1
# all sentences start at T=0:
starting_when = T.zeros_like(self.for_how_long)
self.cost = masked_loss(self.predictions,
what_to_predict,
for_how_long,
starting_when).sum()
def create_predict_function(self):
self.pred_fun = theano.function(
inputs=[self.input_mat],
outputs =self.predictions,
allow_input_downcast=True
)
self.greedy_fun = theano.function(
inputs=[self.priming_word],
outputs=T.concatenate([T.shape_padleft(self.priming_word), self.greedy_predictions]),
allow_input_downcast=True
)
def create_training_function(self):
updates, _, _, _, _ = create_optimization_updates(self.cost, self.params, method="adadelta")
self.update_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.cost,
updates=updates,
allow_input_downcast=True)
def __call__(self, x):
return self.pred_fun(x)
```
### Construct model
We now declare the model and parametrize it to use an RNN, and make predictions in the range provided by our vocabulary. We also tell the greedy reconstruction search that it can consider a sentence as being over when the symbol corresponding to a period appears:
```
# construct model & theano functions:
model = Model(
input_size=10,
hidden_size=10,
vocab_size=len(vocab),
stack_size=1, # make this bigger, but makes compilation slow
celltype=RNN # use RNN or LSTM
)
model.stop_on(vocab.word2index["."])
```
### Train Model
We run 10,000 times through our data and every 500 epochs of training we output what the model considers to be a natural continuation to the sentence "the":
```
# train:
for i in range(10000):
error = model.update_fun(numerical_lines, numerical_lengths)
if i % 100 == 0:
print("epoch %(epoch)d, error=%(error).2f" % ({"epoch": i, "error": error}))
if i % 500 == 0:
print(vocab(model.greedy_fun(vocab.word2index["the"])))
```
| github_jupyter |
# Slicing
Objects in scipp can be sliced in two ways. The general way to do this is by [positional indexing](#Positional-indexing) using indices as in numpy.
A second approach is to use [label-based indexing](#Label-based-indexing) which is uses actual coordinate values for selection.
## Positional indexing
### Overview
Data in a [variable](../generated/classes/scipp.Variable.rst#scipp.Variable), [data array](../generated/classes/scipp.DataArray.rst#scipp.DataArray), or [dataset](../generated/classes/scipp.Dataset.rst#scipp.Dataset) can be indexed in a similar manner to NumPy and xarray.
The dimension to be sliced is specified using a dimension label and, in contrast to NumPy, positional dimension lookup is not available.
Positional indexing with an integer or an integer range is made via `__getitem__` and `__setitem__` with a dimension label as first argument.
This is available for variables, data arrays, and datasets.
In all cases a *view* is returned, i.e., just like when slicing a [numpy.ndarray](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html#numpy.ndarray) no copy is performed.
### Variables
Consider the following variable:
```
import numpy as np
import scipp as sc
var = sc.array(
dims=['z', 'y', 'x'],
values=np.random.rand(2, 3, 4),
variances=np.random.rand(2, 3, 4))
sc.show(var)
```
As when slicing a `numpy.ndarray`, the dimension `'x'` is removed since no range is specified:
```
s = var['x', 1]
sc.show(s)
print(s.dims, s.shape)
```
When a range is specified, the dimension is kept, even if it has extent 1:
```
s = var['x', 1:3]
sc.show(s)
print(s.dims, s.shape)
s = var['x', 1:2]
sc.show(s)
print(s.dims, s.shape)
```
Slicing can be chained arbitrarily:
```
s = var['x', 1:4]['y', 2]['x', 1]
sc.show(s)
print(s.dims, s.shape)
```
The `copy()` method turns a view obtained from a slice into an independent object:`
```
s = var['x', 1:2].copy()
s += 1000
var
```
### Data arrays
Slicing for data arrays works in the same way, but some additional rules apply.
Consider:
```
a = sc.DataArray(
data=sc.array(dims=['y', 'x'], values=np.random.rand(2, 3)),
coords={
'x': sc.array(dims=['x'], values=np.arange(3.0), unit=sc.units.m),
'y': sc.array(dims=['y'], values=np.arange(2.0), unit=sc.units.m)},
masks={
'mask': sc.array(dims=['x'], values=[True, False, False])},
attrs={
'aux_x': sc.array(dims=['x'], values=np.arange(3.0), unit=sc.units.m),
'aux_y': sc.array(dims=['y'], values=np.arange(2.0), unit=sc.units.m)})
sc.show(a)
a
```
As when slicing a variable, the sliced dimension is removed when slicing without range, and kept when slicing with range.
When slicing a data array the following additional rule applies:
- Meta data (coords, masks, attrs) that *do not depend on the slice dimension* are marked as *readonly*
- Slicing **without range**:
- The *coordinates* for the sliced dimension are *removed* and inserted as *attributes* instead.
- Slicing **with a range**:
- The *coordinates* for the sliced dimension are *kept*.
The rationale behind this mechanism is as follows.
Meta data is often of a lower dimensionality than data, such as in this example where coords, masks, and attrs are 1-D whereas data is 2-D.
Elements of meta data entries are thus shared by many data elements, and we must be careful to not apply operations to subsets of data while unintentionally modifying meta data for other unrelated data elements:
```
a['x', 0:1].coords['x'] *= 2 # ok, modifies only coord value "private" to this x-slice
try:
a['x', 0:1].coords['y'] *= 2 # not ok, would modify coord value "shared" by all x-slices
except sc.VariableError as e:
print(f'\'y\' is shared with other \'x\'-slices and should not be modified by the slice, so we get an error:\n{e}')
```
In practice, a much more dangerous issue this mechanism protects from is unintentional changes to masks.
Consider
```
val = a['x', 1]['y', 1].copy()
val
```
If we now assign this scalar `val` to a slice at `y=0`, using `=` we need to update the mask.
However, the mask in this example depends only on `x` so it also applies to the slices `y=1`.
If we would allow updating the mask, the following would *unmask data for all* `y`:
```
try:
a['y', 0] = val
except sc.DimensionError as e:
print(e)
```
Since we cannot update the mask in a consistent manner the entire operation fails.
Data is not modified.
The same mechanism is applied for binary arithmetic operations such as `+=` where the masks would be updated using a logical OR operation.
The purpose for turning coords into attrs when slicing *without* a range is to support useful operations such as:
```
a - a['x', 1] # compute difference compared to data at x=1
```
If `a['x', 0]` had an `x` coordinate this would fail due to a coord mismatch.
If coord checking is required, use a range-slice such as `a['x', 1:2]`. Compare the two cases shown in the following and make sure to inspect the `dims` and `shape` of all variables (data and coordinates) of the resulting slice views (note the tooltip shown when moving the mouse over the name also contains this information):
```
sc.show(a['y', 1:2]) # Range of length 1
a['y', 1:2]
sc.show(a['y', 1]) # No range
a['y', 1]
```
### Datasets
Slicing for datasets works just like for data arrays.
In addition to changing certain coords into attrs and marking certain meta data entries as read-only, slicing a dataset also marks lower-dimensional *data entries* readonly.
Consider a dataset `d`:
```
d = sc.Dataset(
data={
'a': sc.array(dims=['y', 'x'], values=np.random.rand(2, 3)),
'b': sc.array(dims=['x', 'y'], values=np.random.rand(3, 2)),
'c': sc.array(dims=['y'], values=np.random.rand(2)),
'0d-data': sc.scalar(1.0)},
coords={
'x': sc.array(dims=['x'], values=np.arange(3.0), unit=sc.units.m),
'y': sc.array(dims=['y'], values=np.arange(2.0), unit=sc.units.m)})
sc.show(d)
```
and a slice of `d`:
```
sc.show(d['y', 0])
```
By marking lower-dimensional entries in the slice as read-only we prevent unintentional multiple modifications of the same scalar:
```
try:
d['y', 0] += 1 # would add 1 to `0d-data`
d['y', 1] += 2 # would add 2 to `0d-data`
except sc.VariableError as e:
print(e)
```
This is an important aspect and it is worthwhile to take some time and think through the mechanism.
Slicing a data item of a dataset should not bring any surprises.
Essentially this behaves like slicing a data array:
```
sc.show(d['a']['x', 1:2])
```
Slicing and item access can be done in arbitrary order with identical results:
```
d['x', 1:2]['a'] == d['a']['x', 1:2]
d['x', 1:2]['a'].coords['x'] == d.coords['x']['x', 1:2]
```
## Label-based indexing
### Overview
Data in a [dataset](../generated/classes/scipp.Dataset.rst#scipp.Dataset) or [data array](../generated/classes/scipp.DataArray.rst#scipp.DataArray) can be selected by the coordinate value.
This is similar to pandas [pandas.DataFrame.loc](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.loc.html).
Scipp leverages its ubiquitous support for physical units to provide label-based indexing in an intuitive manner, using the same syntax as [positional indexing](#Positional-indexing).
For example:
- `array['x', 0:3]` selects positionally, i.e., returns the first three element along `'x'`.
- `array['x', 1.2*sc.units.m:1.3*sc.units.m]` selects by label, i.e., returns the elements along `'x'` falling between `1.2 m` and `1.3 m`.
That is, label-based indexing is made via `__getitem__` and `__setitem__` with a dimension label as first argument and a scalar [variable](../generated/classes/scipp.Variable.rst#scipp.Variable) or a Python `slice()` as created by the colon operator `:` from two scalar variables.
In all cases a *view* is returned, i.e., just like when slicing a [numpy.ndarray](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html#numpy.ndarray) no copy is performed.
Consider:
```
da = sc.DataArray(
data=sc.array(dims=['year','x'], values=np.random.random((3, 7))),
coords={
'x': sc.array(dims=['x'], values=np.linspace(0.1, 0.9, num=7), unit=sc.units.m),
'year': sc.array(dims=['year'], values=[2020,2023,2027])})
sc.show(da)
da
```
We can select a slice of `da` based on the `'year'` labels:
```
year = sc.scalar(2023)
da['year', year]
```
In this case `2023` is the second element of the coordinate so this is equivalent to positionally slicing `data['year', 1]` and [the usual rules](#Positional-indexing) regarding dropping dimensions and converting dimension coordinates to attributes apply:
```
assert sc.identical(da['year', year], da['year', 1])
```
<div class="alert alert-warning">
**Warning**
It is **essential** to not mix up integers and scalar scipp variables containing an integer.
As in above example, positional indexing yields different slices than label-based indexing.
</div>
<div class="alert alert-info">
**Note**
Here, we created `year` using `sc.scalar`.
Alternatively, we could use `year = 2023 * sc.units.dimensionless` which is useful for dimensionful coordinates like `'x'` in this case, see below.
</div>
For floating-point-valued coordinates selecting a single point would require an exact match, which is typically not feasible in practice.
Scipp does *not* do fuzzy matching in this case, instead an `IndexError` is raised:
```
x = 0.23 * sc.units.m # No x coordinate value at this point. Equivalent of sc.scalar(0.23, unit=sc.units.m)
try:
da['x', x]
except IndexError as e:
print(str(e))
```
For such coordinates we may thus use an *interval* to select a *range* of values using the `:` operator:
```
x_left = 0.1 * sc.units.m
x_right = 0.4 * sc.units.m
da['x', x_left:x_right]
```
The selection includes the bounds on the "left" but excludes the bounds on the "right", i.e., we select the half-open interval $x \in [x_{\text{left}},x_{\text{right}})$, closed on the left and open on the right.
The half-open interval implies that we can select consecutive intervals without including any data point in both intervals:
```
x_mid = 0.2 * sc.units.m
sc.to_html(da['x', x_left:x_mid])
sc.to_html(da['x', x_mid:x_right])
```
Just like when slicing positionally one of the bounds can be omitted, to include either everything from the start, or everything until the end:
```
da['x', :x_right]
```
Coordinates used for label-based indexing must be monotonically ordered.
While it is natural to think of slicing in terms of ascending coordinates, the slicing mechanism also works for descending coordinates.
### Bin-edge coordinates
Bin-edge coordinates are handled slightly differently from standard coordinates in label-based indexing.
Consider:
```
da = sc.DataArray(
data = sc.array(dims=['x'], values=np.random.random(7)),
coords={
'x': sc.array(dims=['x'], values=np.linspace(1.0, 2.0, num=8), unit=sc.units.m)})
da
```
Here `'x'` is a bin-edge coordinate, i.e., its length exceeds the array dimensions by one.
Label-based slicing with a single coord value finds and returns the bin that contains the given coord value:
```
x = 1.5 * sc.units.m
da['x', x]
```
If an interval is provided when slicing with a bin-edge coordinate, the range of bins *containing* the values falling into the right-open interval bounds is selected:
```
x_left = 1.3 * sc.units.m
x_right = 1.7 * sc.units.m
da['x', x_left:x_right]
```
### Limitations
Label-based indexing *not* supported for:
- Multi-dimensional coordinates.
- Non-monotonic coordinates.
The first is a fundamental limitation since a slice cannot be defined in such as case.
The latter two will likely be supported in the future to some extent.
| github_jupyter |
```
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License
import os
import numpy as np
import pandas as pd
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import Input
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Reshape, Dense, Dropout, Flatten, LeakyReLU, Conv2D, MaxPooling2D, ZeroPadding2D, Conv2DTranspose, UpSampling2D, BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.datasets import cifar10
from tensorflow.keras import initializers
from privacygan import privacy_gan as pg
from privacygan.cifar import cifar_gan
import warnings
print(tf.__version__)
# Load CIFAR-10 data
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_all = np.concatenate((X_train,X_test))
(n, d1, d2, d3) = X_all.shape
if d3 !=3:
X_all = np.moveaxis(X_all, 1, 3)
X_all = np.float32(X_all)
X_all = (X_all / 255 - 0.5) * 2
X_all = np.clip(X_all, -1, 1)
#Generate training test split
frac = 0.1
n = int(frac*len(X_all))
l = np.array(range(len(X_all)))
l = np.random.choice(l,len(l),replace = False)
X = X_all[l[:n]]
X_comp = X_all[l[n:]]
print(X.shape)
print(X_comp.shape)
#Specify models
generator = cifar_gan.CIFAR_Generator()
discriminator = cifar_gan.CIFAR_Discriminator()
generators = [cifar_gan.CIFAR_Generator(),cifar_gan.CIFAR_Generator()]
discriminators = [cifar_gan.CIFAR_Discriminator(),cifar_gan.CIFAR_Discriminator()]
pDisc = cifar_gan.CIFAR_DiscriminatorPrivate(OutSize = 2)
(generator, discriminator, dLosses, gLosses) = pg.SimpGAN(X, epochs = 1,
generator = generator,
discriminator = discriminator,
batchSize=256)
#perform white box attack
Acc = pg.WBattack(X,X_comp, discriminator)
#plot distribution of discriminator scores of training and test set
plt.hist(discriminator.predict(X)[:,0],color = 'r', alpha = 0.5, label = 'train', density = True, bins = 50)
plt.hist(discriminator.predict(X_comp)[:,0],color = 'b', alpha = 0.5, label = 'test', density = True, bins = 50)
plt.xlabel('Discriminator probability')
plt.ylabel('Normed frequency')
plt.title('GAN')
plt.legend()
noise = np.random.normal(0, 1, size=[X.shape[0], 100])
generatedImages = generator.predict(noise)
temp = generatedImages[:25].reshape(25, 32, 32, 3)
plt.figure(figsize=(5, 5))
for i in range(temp.shape[0]):
plt.subplot(5,5, i+1)
plt.imshow(temp[i], interpolation='nearest', cmap='gray_r')
plt.axis('off')
plt.tight_layout()
tf.keras.backend.clear_session()
optim = Adam(lr=0.0002, beta_1=0.5)
generator = cifar_gan.CIFAR_Generator(optim = Adam(lr=0.0002, beta_1=0.5))
discriminator = cifar_gan.CIFAR_Discriminator(optim = Adam(lr=0.0002, beta_1=0.5))
generators = [cifar_gan.CIFAR_Generator(optim = Adam(lr=0.0002, beta_1=0.5)),
cifar_gan.CIFAR_Generator(optim = Adam(lr=0.0002, beta_1=0.5))]
discriminators = [cifar_gan.CIFAR_Discriminator(optim = Adam(lr=0.0002, beta_1=0.5)),
cifar_gan.CIFAR_Discriminator(optim = Adam(lr=0.0002, beta_1=0.5))]
pDisc = cifar_gan.CIFAR_DiscriminatorPrivate(OutSize = 2,
optim = Adam(lr=0.0002, beta_1=0.5))
(generators, discriminators, _, dLosses, dpLosses, gLosses)= pg.privGAN(X, epochs = 1,
disc_epochs=1,
generators = generators,
discriminators = discriminators,
pDisc = pDisc,
optim = optim,
privacy_ratio=1.0,
batchSize=256)
#perform white box attack
pg.WBattack_priv(X,X_comp, discriminators)
noise = np.random.normal(0, 1, size=[X.shape[0], 100])
generatedImages = generators[0].predict(noise)
temp = generatedImages[:25].reshape(25, 32, 32, 3)
plt.figure(figsize=(5, 5))
for i in range(temp.shape[0]):
plt.subplot(5,5, i+1)
plt.imshow(temp[i], interpolation='nearest', cmap='gray_r')
plt.axis('off')
plt.tight_layout()
plt.hist(discriminators[0].predict(X)[:,0],color = 'r', alpha = 0.5, label = 'train', density = True, bins = 50)
plt.hist(discriminators[0].predict(X_comp)[:,0],color = 'b', alpha = 0.5, label = 'test', density = True, bins = 50)
plt.xlabel('Discriminator probability')
plt.ylabel('Normed frequency')
plt.title('privGAN (1.0)')
plt.legend()
pg.WBattack_priv(X,X_comp, discriminators)
```
| github_jupyter |
# Daily Chart Theme
***
### Color Palette:
1. "#dc0d12", Monza
2. "#dc0d7a", Razzmatazz
3. "#aa3594", Medium Red Violet
4. "#a20c4b", Jazzberry Jam
5. "#074a7e", Venice Blue
6. "#02a3cd" Abbey <br>
background #FFFAFA lightestpink<br>
**FINAL**:
["#dc0d7a",
"#02a3cd",
"#e4a100",
"#dc0d12",
"#074a7e",
"#e46800",
"#aa3594",
"#a20c4b"]
**DIVERGING**:
["#dc0d12",
"#e9686b",
"#fbe1e1",
"#dff4f9",
"#81d1e6",
"#03a3cd"]
tints <br>
#### HEX
1. #dc0d7a Razzmatazz
2. #e02b8b Cerise
3. #e54a9b Cerise
4. #e968ac Deep blush
5. #ed86bd Carissma
6. #f2a4cd Illusion
7. #f6c3de Azalea
8. #fbe1ee Cherub
##### Mexican pink:
#E4007c <br>
yellow : #e4a100 <br>
orangey: #e12f00 & #e46800 <br>
tints of mexican pink <br>
#### HEX
1. #e4007c
2. #e7208c
3. #eb409d
4. #ee60ad
5. #f180be
7. #f59fce
8. #f8bfde
9. #fcdfef
resources:
https://htmlcolorcodes.com/color-picker/ <br>
https://encycolorpedia.com/e4007c <br>
viz palette: http://projects.susielu.com/viz-palette?colors=[%22#dc0d12%22,%22#dc0d7a%22,%22#aa3594%22,%22#a20c4b%22,%22#074a7e%22,%22#02a3cd%22]&backgroundColor=%22#f7eef7%22&fontColor=%22#282828%22
https://www.colorhexa.com/
```
def cimarron_theme():
markColor = "#282828"
axisColor = "#282828"
backgroundColor = "#F7EEF7"
font = "Helvetica"
labelfont = "Helvetica"
return {
"width": 600,
"height": 400,
"config": {
"arc": {
"fill": markColor,
},
"area": {
"fill": markColor,
},
"axisBand": {
"grid": False,
},
"axisBottom": {
"domain": False,
"domainColor": "black",
"domainWidth": 3,
"grid": True,
"gridColor": axisColor,
"gridWidth": 1,
"labelFontSize": 12,
"labelFont": labelfont,
"labelPadding": 4,
"tickColor": axisColor,
"tickSize": 10,
"titleFontSize": 14,
"titlePadding": 10,
"titleFont": font,
},
"axisLeft": {
"domainColor": axisColor,
"domainWidth": 1,
"gridColor": axisColor,
"gridWidth": 1,
"labelFontSize": 12,
"labelFont": labelfont,
"labelPadding": 4,
"tickColor": axisColor,
"tickSize": 10,
"ticks": True,
"titleFontSize": 14,
"titlePadding": 10,
"titleFont": font,
},
"axisRight": {
"domainColor": axisColor,
"domainWidth": 1,
"gridColor": axisColor,
"gridWidth": 1,
"labelFontSize": 12,
"labelFont": labelfont,
"labelPadding": 4,
"tickColor": axisColor,
"tickSize": 10,
"ticks": True,
"titleFontSize": 14,
"titlePadding": 10,
"titleFont": font,
},
"axisTop": {
"domain": False,
"domainColor": "black",
"domainWidth": 3,
"grid": True,
"gridColor": axisColor,
"gridWidth": 1,
"labelFontSize": 12,
"labelFont": labelfont,
"labelPadding": 4,
"tickColor": axisColor,
"tickSize": 10,
"titleFontSize": 14,
"titlePadding": 10,
"titleFont": font,
},
"background": backgroundColor,
"group": {
"fill": backgroundColor,
},
"legend": {
"labelFontSize": 11,
"labelFont": labelfont,
"padding": 1,
"symbolSize": 30,
"symbolType": "square",
"titleFontSize": 14,
"titlePadding": 10,
"titleFont": font,
},
"line": {
"stroke": markColor,
"strokewidth": 2,
},
"path": {
"stroke": markColor,
"strokeWidth": 0.5,
},
"point": {
"filled": True,
},
"rect": {
"fill": markColor,
},
"range": {
"category": [
"#dc0d7a",
"#02a3cd",
"#e4a100",
"#dc0d12",
"#074a7e",
"#e46800",
"#aa3594",
"#a20c4b"
],
"diverging": [
"#dc0d12",
"#e9686b",
"#fbe1e1",
"#dff4f9",
"#81d1e6",
"#03a3cd"
],
"heatmap": [
"#fcdfef",
"#f8bfde",
"#f59fce",
"#f180be",
"#ee60ad",
"#eb409d",
"#e7208c",
"#e4007c",
],
},
"symbol": {
"opacity": 1,
"shape": "circle",
"size": 40,
"strokeWidth": 1,
},
"style": {
"bar": {
"binSpacing": 2,
"fill": markColor,
"stroke": "null",
},
},
"title":{
"anchor": "start",
"fontSize": 24,
"fontWeight": 600,
"font": font,
"offset": 20,
},
},
}
alt.themes.register("cimarron", cimarron_theme)
alt.themes.enable("cimarron")
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.