code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 지연 속성에는 `__getattr__, __getattribute__, __setattr__`을 사용하자
#
# Python의 언어 후크를 이용하면 시스템들을 연계하는 범용 코드를 쉽게 만들 수 있다. 예를 들어 데이터베이스의 row를 파이썬 객체로 표현한다고 하자. 데이터베이스에는 스키마 세트가 있다. 그러므로 row에 대응하는 객체를 사용하는 코드는 데이터베이스 형태도 알아야 한다. 하지만 Python에서는 객체와 데이터베이스를 연결하는 코드에서 row의 스키마를 몰라도 된다. 코드를 범용으로 만들면 된다.
#
# 사용하기에 앞서 정의부터 해야 하는 일반 인스턴스 속성, `@property` 메서드, 디스크립터로는 이렇게 할 수 없다. Python은 `__getattr__`이라는 특별한 메서드로 이런 동적 동작을 가능하게 한다. 클래스에 `__getattr__` 메서드를 정의하면 객체의 인스턴스 딕셔너리에 속성을 찾을 수 없을 때마다 이 메서드가 호출된다.
class LazyDB(object):
def __init__(self):
self.exists = 5
def __getattr__(self, name):
value = 'Value for %s' % name
setattr(self, name, value)
return value
# 이제 존재하지 않는 속성인 foo에 접근해보자. 그러면 파이썬이 `__getattr__`메서드를 호출하게 되고 이어서 인스턴스 딕셔너리 `__dict__`를 변경하게 된다.
data = LazyDB()
print('Before:', data.__dict__)
print('foo: ', data.foo)
print('After: ', data.__dict__)
# `__getattr__`이 호출되는 시점을 보여주기 위해 LazyDB에 로깅 추가
#
# 무한 루프를 피하려고 `super().__getattr__()`로 실제 property 값을 얻어오는 부분을 눈여겨보자.
# +
class LoggingLazyDB(LazyDB):
def __getattr__(self, name):
print('Called __getattr__(%s)' % name)
return super().__getattr__(name)
data = LoggingLazyDB()
print('exists:', data.exists)
print('foo: ', data.foo)
print('foo: ', data.foo)
print('After: ', data.__dict__)
# -
# exists 속성은 인스턴스 딕셔너리에 있으므로 `__getattr__`이 절대 호출되지 않는다. foo 속성은 원래는 인스턴스 딕셔너리에 없으므로 처음에는 `__getattr__`이 호출된다. 하지만 foo에 대응하는 `__getattr__`호출은 setattr을 호출하며, setattr은 인스턴스 dictioinary에 foo를 저장한다. 따라서 foo에 두 번째로 접근할 때는 `__getattr__`이 호출되지 않는다.
#
# 이런 동작은 schemaless data에 지연 접근하는 경우에 특히 도움이 된다. `__getattr__`이 property loading이라는 어려운 작업을 한 번만 실행하면 다음 접근부터는 기존 결과를 가져온다.
#
# `__getattr__` 후크는 기존 속성에 빠르게 접근하려고 객체의 인스턴스 딕셔너리를 사용할 것이므로 이 작업에는 믿고 쓸 수 없다.
#
# Python에는 쓰임새를 고려한 `__getattribute__`라는 또 다른 후크가 있다. 이 특별한 메서드는 객체의 속성에 접근할 때마다 호출되며, 심지어 해당 속성이 속성 딕셔너리에 있을 때도 호출된다. 이런 동작 덕분에 속성에 접근할 때마다 전역 트랜잭션 상태를 확인하는 작업 등에 쓸 수 있다. 여기서는 `__getattribute__`가 호출될 때마다 로그를 남기려고 ValidatingDB를 정의한다.
# +
class ValidatingDB(object):
def __init__(self):
self.exists = 5
def __getattribute__(self, name):
print('Called __getattribute__(%s)' % name)
try:
return super().__getattribute__(name)
except AttributeError:
value = 'Value for %s' % name
setattr(self, name, value)
return value
data = ValidatingDB()
print('exists:', data.exists)
print('foo: ', data.foo)
print('foo: ', data.foo)
# -
# 동적으로 접근한 property가 존재하지 않아야 하는 경우에는 AttributeError를 일으켜서 `__getattr__, __getattribute__`에 속성이 없는 경우의 Python 표준 동작이 일어나게 한다.
# +
class MissingPropertyDB(object):
def __getattr__(self, name):
if name == 'bad_name':
raise AttributeError('%s is missing' % name)
data = MissingPropertyDB()
data.bad_name
# -
# Python 코드로 범용적인 기능을 구현할 때 종종 내장 함수 hasattr로 property가 있는지 확인하고 내장 함수 getattr로 property 값을 가져온다. 이 함수들도 `__getattr__`을 호출하기 전에 instance dictionary에서 속성 이름을 찾는다.
data = LoggingLazyDB()
print('Before: ', data.__dict__)
print('foo exists: ', hasattr(data, 'foo'))
print('After: ', data.__dict__)
print('foo exists: ', hasattr(data, 'foo'))
# 위 예제에서 `__getattr__`은 한 번만 호출된다. 이와 대조로, `__getattribute__`를 구현한 클래스인 경우 객체에 hasattr이나 getattr을 호출할 때마다 `__getattribute__`가 실행된다.
data = ValidatingDB()
print('foo exists: ', hasattr(data, 'foo'))
print('foo exists: ', hasattr(data, 'foo'))
# 이제 파이썬 객체에 값을 할당할 때 지연 방식으로 데이터를 데이터베이스에 집어넣고 싶다고 해보자. 이 작업은 임의 속성 할당을 가로채는 `__setattr__`언어 후크로 할 수 있다. `__getattr__`과 `__getattribute__`로 속성을 추출하는 것과는 다르게 별도의 메서드 두 개가 필요하지 않다. `__setattr__` 메서드는 인스턴스 속성이 할당 받을 때마다 직접 혹은 내장 함수 setattr을 통해 호출된다.
class SavingDB(object):
def __setattr__(self, name, value):
# 몇몇 데이터를 DB 로그로 저장함
# ...
super().__setattr__(name, value)
class LoggingSavingDB(SavingDB):
def __setattr__(self, name, value):
print('Called __setattr__(%s, %r)' % (name, value))
super().__setattr__(name, value)
data = LoggingSavingDB()
print('Before: ', data.__dict__)
data.foo = 5
print('After: ', data.__dict__)
data.foo = 7
print('Finally:', data.__dict__)
# `__getattribute__`와 `__setattr__`을 사용할 때 부딪히는 문제는 객체의 속성에 접근할 때마다 호출된다는 점이다.
class BrokenDcitionaryDB(object):
def __init__(self, data):
self._data = {}
def __getattribute__(self, name):
print('Called __getattribute__(%s)' % name)
return self._data[name]
# 그러려면 위와 같이 `__getattribute__` 메서드에서 `self._data`에 접근해야한다. 하지만 실제로 시도해보면 Python이 stack의 한계에 도달할 때까지 재귀 호출을 하게 되어 중단된다.
data = BrokenDcitionaryDB({'foo': 3})
data.foo
# 문제는 `__getattribute__`가 `self._data`에 접근하면 `__getattribute__`가 다시 실행되고, 다시 `self._data`에 접근한다는 점이다. 해결책은 인스턴스에서 `super().__getattribute__` 메서드로 인스턴스 속성 딕셔너리에서 값을 얻어 오는 것이다.
class DictionaryDB(object):
def __init__(self, data):
self._data = data
def __getattribute__(self, name):
data_dict = super().__getattribute__('_data')
return data_dict[name]
data = DictionaryDB()
print('Before')
| Chap 4: Metaclass & Property/Chap 32 Use at latency property __getattr__, __getattribute__, __setattr__.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
import pandas as pd
import numpy as np
import feather
from evaluator import Evaluator
from sklearn.metrics.pairwise import cosine_similarity
from sklearn import preprocessing
from tqdm import tqdm_notebook as tqdm
# -
training_ratings = feather.read_dataframe('./feather/training_ratings')
testing_ratings = feather.read_dataframe('./feather/testing_ratings')
book_profiles = feather.read_dataframe('./feather/book_profiles').set_index('book_id')
novelty_scores = feather.read_dataframe('./feather/novelty_scores').set_index('book_id')
books = feather.read_dataframe('./feather/books').set_index('book_id')
# +
book_sim = pd.DataFrame(
data = cosine_similarity(book_profiles, book_profiles),
index = book_profiles.index,
columns = book_profiles.index
)
book_sim.head()
# -
evl = Evaluator(
k = 10,
training_ratings = training_ratings,
testing_ratings = testing_ratings,
book_sim = book_sim,
novelty_scores = novelty_scores
)
# # Content-based recommender system
top_sim_books = {}
book_ids = book_sim.index
for book_id in tqdm(book_ids):
top_sim_books[book_id] = book_sim.loc[book_id].sort_values(ascending=False)[1:51]
list_of_5_ratings = training_ratings[training_ratings.rating==5].groupby('user_id')['book_id'].apply(list)
list_of_5_ratings.head()
class ContentBasedRecommender():
name = "Content-based RS"
preds = {}
def fit(self, training_ratings):
user_ids = training_ratings.user_id.unique().tolist()
self.preds = {}
for user_id in tqdm(user_ids):
excluded_books = training_ratings[training_ratings.user_id==user_id].book_id.unique().tolist()
most_similar_books = pd.Series([])
for book_id in list_of_5_ratings[user_id]:
most_similar_books = most_similar_books.append(top_sim_books[book_id])
most_similar_books = np.array(most_similar_books.groupby(most_similar_books.index).sum().sort_values(ascending=False).index)
recommendable = most_similar_books[~np.in1d(most_similar_books, excluded_books)]
self.preds[user_id] = recommendable[:10]
def recommendation_for_user(self, user_id):
if user_id not in self.preds:
return []
return self.preds[user_id]
def all_recommendation(self):
return self.preds
ctb_rec = ContentBasedRecommender()
evl.evaluate(ctb_rec)
evl.print_result()
# # Inspect
ratings_count_for_users = training_ratings.groupby('user_id').count()[['rating']].rename(columns={'rating': 'ratings_count'})
ratings_count_for_users.head(10)
# +
def get_inspection(model):
inspection = pd.DataFrame(columns=['ratings_count', '5_count', 'MAP'])
preds = model.all_recommendation()
for user_id in tqdm(preds.keys()):
pred = preds[user_id]
truth = evl.testing_idx[user_id]
inspection.loc[user_id] = [ratings_count_for_users.loc[user_id, 'ratings_count'], len(list_of_5_ratings[user_id]), evl._average_precision(pred, truth)]
return inspection
inspection = get_inspection(ctb_rec)
# -
inspection.head()
inspection.groupby('ratings_count').mean()['MAP'].plot()
inspection.groupby('5_counts').mean().plot()
# # Alternative version
# +
def build_user_profiles(training_ratings, book_profiles):
user_profiles = pd.DataFrame(columns=book_profiles.columns)
user_ids = training_ratings.user_id.unique()
list_of_5_ratings = training_ratings[training_ratings.rating==5].groupby('user_id')['book_id'].apply(list)
for user_id in tqdm(user_ids):
user_profiles.loc[user_id] = book_profiles.loc[list_of_5_ratings.loc[user_id]].sum()
return pd.DataFrame(
data = preprocessing.normalize(user_profiles.values),
columns = user_profiles.columns,
index = user_profiles.index
)
user_profiles = build_user_profiles(training_ratings, book_profiles)
# -
user_profiles.head()
class AltContentBasedRecommender():
name = "Alt Content-based RS"
preds = {}
def __init__(self, item_profiles):
self.item_profiles = item_profiles
def fit(self, training_ratings):
user_ids = training_ratings.user_id.unique().tolist()
sim = pd.DataFrame(
data=cosine_similarity(user_profiles, self.item_profiles),
index=user_profiles.index,
columns=self.item_profiles.index
)
self.preds = {}
for user_id in tqdm(user_ids):
book_ids = np.array(
sim.loc[user_id].sort_values(ascending=False).index)
excluded_books = training_ratings[training_ratings.user_id == user_id].book_id.unique(
).tolist()
recommendable = book_ids[~np.in1d(book_ids, excluded_books)]
self.preds[user_id] = recommendable[:10]
def recommendation_for_user(self, user_id):
if user_id not in self.preds:
return []
return self.preds[user_id]
def all_recommendation(self):
return self.preds
alt_ctb_rec = AltContentBasedRecommender(book_profiles)
evl.evaluate(alt_ctb_rec)
evl.print_result()
# The results are similar. But the first version of Content-based RS is much simpler to build.
user_profiles.shape
user_profiles.reset_index().rename(columns={'index': 'user_id'}).to_feather('./feather/user_profiles')
| notebooks/7. Content-based Recommender System.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Manipulating Time-series
#
# Time-series are a key element when assessing solar resource data. In this section, we present several examples to learn how to deal with different formats in the data and few common tasks to prepare our time-series for later analysis, such as down and up-sampling data when we need different temporal resolution than that initially available or interpolating missing values in the data.
#
# The dataset used in the examples of this section is a customized dataset using solar radiation measurements from the Measurement and Instrumentation Data Center (MIDC) of the U.S. National Renewable Energy Laboratory (NREL). The station selected is located at the University of Nevada - Las Vegas (UNLV) and the data used are 1-minute GHI, DHI and DNI measurements for the year 2020 {cite}`stoffel_university_2006`.
#
# In this section, we cover:
# - [1 Time-series handling](#Time-series-handling)
# - [2 Down and up-sampling time-series data](#Down-and-up-sampling-time-series-data)
# - [3 Interpolating time-series data](#Interpolating-time-series-data)
# - [4 Visualizing time-series data](#visualizing-time-series-data)
#
# ***
# <div style="text-align: right"> <i>Content by <NAME></i> </div>
# ## 1 Time-series handling
# Datasets often come in different formats depending on the source. Those formats sometimes cannot be used straightaway to build a time-series and may require additional processing steps before building the time-series. For example: <br>
# - **What if date and time are in different columns?** <br>
# - **What if the year, month, day and time are in separate columns?** <br>
# - **How to the define the timestamp format for a particular dataset?** <br>
# - **How to deal with timestamp issues, local vs. universal (UTC) time?**
#
# This subsection presents several examples to deal with different formats in which time-series data could come and shows how to build a time-series or *datetime series*, as known in Python, for later analysis. The processing steps to build time-series are based on [pandas library](https://pandas.pydata.org/).
#
# Let's get started!
# Importing the needed libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pvlib
# ### 1.1 Build our customized dataset
# In order to build the customized dataset for this section, we make use of the I/O tools of the Python library *pvlib* to retrieve the data from the UNLV station in the MIDC. Data from other stations from the MIDC can be also retrieved using this method by adapting the station ID in the query. The different station IDs are available in the [MIDC raw data page](https://midcdmz.nrel.gov/apps/data_api_doc.pl?_idtextlist).
# +
# Dictionary to rename certain variables from the raw data
var_map = {'Global Horiz [W/m^2]': 'ghi',
'Direct Normal [W/m^2]':'dni',
'Diffuse Horiz (calc) [W/m^2]':'dhi',
'Year':'year'}
# Retrieving the raw data from the station
df_ref = pvlib.iotools.read_midc_raw_data_from_nrel('UNLV', # Station id
pd.Timestamp('20200101'), # Start date YYYYMMDD
pd.Timestamp('20201231'), # End date YYYYMMDD
variable_map=var_map) # Variable Map
# Let's have a look to the first 2 rows of the dataset
df_ref.head(2)
# -
# Let's have a look to the last 2 rows of the dataset
df_ref.tail(2)
# The dataset is 1-minute resolution data with 21 variables related to meteorological and other relevant data: ambient temperature, wind speed, wind direction, global horizontal irradiance (GHI), direct normal irradiance (DNI), diffuse horizontal irradiance (DHI), zenith and azimuth angles, airmass, among other.
#
# For the examples in this section we will use GHI, DNI and DHI measurements and time-related data.
# +
# Slice desired variables out of the 21 variables provided in the raw data.
df_ref = df_ref[['ghi', 'dni', 'dhi', 'year']]
# Add multiple temporal data to the dataset
df_ref['month'] = df_ref.index.month
df_ref['day'] = df_ref.index.day
df_ref['hour'] = df_ref.index.hour
df_ref['minute'] = df_ref.index.minute
df_ref['date'] = df_ref.index.strftime('%Y-%m-%d')
df_ref['time'] = df_ref.index.strftime('%H:%M:%S')
df_ref['timestamp'] = df_ref.index.strftime('%Y-%m-%d %H:%M:%S%z')
# Epoch format
df_ref['epoch'] = df_ref.index.astype('int64')//1e9
# Reset the Index of the DataFrame
df_ref = df_ref.reset_index(drop=True)
# Let's have a look to the resulting columns of the dataset
df_ref.columns
# -
# Let's visualize the first rows of the **customized reference dataframe:**
# First 3 rows in the dataframe
df_ref.head(3)
# Now that we have our customized reference dataset of 1-minute irradiance measurements for 2020 and temporal data, we can start building the timeseries in different ways.
#
# ### 1.2 Time-series when timestamps are available:
#
# When timestamps are available, the most straightforward way to build the DataFrame with a datetime index is to convert the column with the timestamp into datetime format and set it as index.
#
# Let's see how!
# A new dataframe copy of the reference dataset
df = df_ref.copy()
# Convert the timestamp string into datetime format
df['timestamp'] = pd.to_datetime(df['timestamp'], format='%Y-%m-%d %H:%M:%S%z')
# Set timestamp column as index
df = df.set_index(df['timestamp'])
# See the first 3 rows of the DataFrame with Datetime Index
df.head(3)
# The format of the timestamp is specified in the argument 'format' as a string and can be adapted to any case. The available options in Python can be checked in this [link](https://strftime.org/).
#
# Universal Time Coordinated (UTC) is usually the timestamp provided for many solar radiation data networks and platforms like the BSRN, PVGIS, etc. However, data can be also reported in local time like in our example. Timestamps can be converted to other timezones with the funcion *tz_convert*, which can be useful when dealing with data from different databases and locations worldwide:
# Add UTC timestamp from the local time (Pacific Summer Time)
df['timestamp_utc'] = df.index.tz_convert('UTC')
# See the first 3 rows
df.head(3)
# The valid timezone strings for other timezones can be found in this [link](https://pvlib-python.readthedocs.io/en/stable/timetimezones.html). When the timezone is not provided as part of the timestamp, the function *tz_localize* can be used to localize the values in a timezone-naive series. *tz_localize* will be used in the next example.
# ### 1.3 Time-series when date and time are available:
#
# When date and time are available in separate columns, a timestamp can be created in a new column and the new column can then be set as index and localized. Let's have a look how to do that:
#
# A new dataframe copy of the reference dataset
df = df_ref.copy()
# New column with the date and time
df['datetime'] = df['date'] + 'T' + df['time']
# Convert the new column into datetime format
df['datetime'] = pd.to_datetime(df['datetime'], format='%Y-%m-%dT%H:%M:%S')
# Set the column 'datetime' as index and localize it to its timezone
df = df.set_index(df['datetime']).tz_localize('Etc/GMT+8')
# See the first 3 rows
df.head(3)
# ### 1.4 Time-series when the time data is split in multiple columns:
#
# If time-related data are split across multiple columns, a timestamp can be created in a new column similarly than in the previous case. Let's imagine our dataset would have the year, month, day, hour, and minute in separate columns. In that case, we could build our time-series as follows:
# A new dataframe copy of the reference dataset
df = df_ref.copy()
# Let's reduce the code lines and define the new string within the 'to_datetime' function
df['datetime'] = pd.to_datetime(df[['year', 'month', 'day', 'hour', 'minute']],
format = '%Y-%m-%d%H:%M')
# Set the column 'datetime' as index
df = df.set_index(df['datetime'])
# Localize the datetime series
df.index = df.index.tz_localize('Etc/GMT+8')
# See the first 3 rows
df.head(3)
# ### 1.5 Time-series when the timestamp is given as epoch (Unix Time)
#
# If the dataset has epoch timestamps, note that the data will have UTC time. However, it can be converted to any timezone using the function *tz_convert*. If there are epoch timestamps, a datetime series can be formed as follows:
# A new dataframe copy of the reference dataset
df = df_ref.copy()
# Convert epoch timestamps to datetime format and localize
df['datetime'] = pd.to_datetime(df['epoch'], unit='s', utc=True)
# Set datetime as index and convert UTC time to local time
df = df.set_index(df['datetime']).tz_convert('Etc/GMT+8')
# See the results
df.head(3)
# We have seen how the same DataFrame with *datetimeindex* can be obtained in multiple ways depending on the format of time data provided.
#
# ***
#
# ## 2 Down and up-sampling time-series data
#
# When assessing solar resource, you may need a different time-resolution than your data for a particular part of the analysis. In those cases, it is possible to **down-sample and up-sample the data at different temporal resolutions** using two different methods within [pandas library](https://pandas.pydata.org/) called *resample* and *asfreq*. Depending on your needs, you will opt for one or the other. Regardless of the method, both of them require a DataFrame with *datetimeindex* either time-aware (localized) or time-naive (not localized).
#
# ### 2.1 Method 'asfreq' vs. 'resample'
# Let's first create a new DataFrame with only the columns with solar data and see the differences between both methods with examples.
# New DataFrame with 1-minute data and solar data
df_1min = df[['ghi', 'dhi', 'dni']]
# See our new DataFrame
df_1min.head(3)
# Let's try to obtain a DataFrame down-sampled with the maximum monthly data with both methods and see the differences. With *asfreq*, it would be the following:
df_1min.asfreq("1M").max()
# With *resample* the result would be:
df_1min.resample("1M").max()
# It is obvious that the outputs are not the same and that is because the methods work differently. *asfreq* takes the value at the simultaneous stamps given by the frequency argument. See below:
df_1min.asfreq("1M")
# Then *.max()* has returned the maximum of each of the columns.
#
# In contrast, *resample* does return the maximum value within the period of time at the specified frequency. *resample* method requires a mathematical operation to perform in the resampled data (the maximum value in our case). Otherwise, it would return a *DatetimeIndexResampler* object without showing any data. See below:
df_1min.resample("1M")
# The *resample* method accepts multiple **mathematical and statistical operations**. For example: maximum (max), minimum (min), arithmetic mean (mean), standard deviation (std), median (median), mode (mode), addition (sum), among others.
#
# Both methods allow for multiple **frequencies options**, the available frequency tags within Python can be found [here](https://stackoverflow.com/questions/35339139/where-is-the-documentation-on-pandas-freq-tags).
# ### 2.2 Down-sampling the data in a time-series
#
# Down-sampling permits turning more frequent values into less frequent. In the context of solar resource and considering our 1-minute resolution dataset, down-sampling can be used for:
# - Producing a timeseries of hourly/daily average irradiance.
# - Producing a timeseries of maximum daily irradiance.
# - Estimating the hourly/daily/monthly sums of irradiation.
# - And many more!
#
# Let's implement some of these listed examples!
#
# #### Producing hourly average irradiance from minutely observations
# Resampling to hourly mean values
df_hourly = df_1min.resample("1H").mean()
# Showing the shape of the new DataFrame
df_hourly.shape # returns Rows, Columns
# There are 8760 hours in a year. Yet, we can have a look to the first few rows of the DataFrame:
df_hourly.head(12)
# A time-series with the maximum irradiance would be similar replacing *'mean()'* with *'max()'*.
#
# #### Producing time-series of monthly total GHI, DHI, DNI irradiation from minutely observations
# Resampling to monthly aggregated values
monthly_energy = df_1min[['ghi', 'dhi', 'dni']].resample("1M").sum()*(1/60)
# See the results expressed in kWh·sqm
monthly_energy/1000
# It could be done in similar way for other resolutions (e.g. daily or annual irradiation).
# ### 2.3 Up-sampling the data in a time-series
#
# Up-sampling permits obtaining more frequent values from less frequent. For solar data, depending on the application up to sub-minutely data could be required and up-sampling is a technique that provides a manner to increase the temporal resolution to adapt it to our needs. For example, turning an hourly time-series into a half-hourly. Let's see an example using both *resample* and *asfreq*.
#
# #### Producing half-hourly irradiance series from hourly observations
# Using the DataFrame *df_hourly* created previously, it can be up-sample as follows:
# Using 'resample' method:
df_hourly.resample("30Min").mean().head(10)
# Using 'asfreq' method:
df_hourly.asfreq("30Min").head(10)
# Contrary to the case of down-sampling, both *asfreq* and *resample* provide similar results when up-sampling. However, *asfreq* provides additional functionalities to treat the new timestamps without data, i.e. NaN values.
#
# By passing the argument *'method'* with the string *'backfill'* or *'bfill'* uses the next valid observation to fill the NaN value (back filling). If instead, the string *'pad'* or *'ffill'* is given, the method assigns the last valid observation forward to the next valid (forward filling).
#
# Let's see the same example adding this argument:
# Half-hourly up-sample with back filling function
df_hourly.asfreq("30Min", method='bfill').head(10)
# We see that the DataFrame now contains the next valid hourly value in the newly obtained half-hourly timestamps of the previous hour. It would take the previous valid hourly value if we used forward filling. For example:
# Half-hourly up-sample with forward-filling function
df_hourly.asfreq("30Min", method='ffill').head(10)
# The forward filling option provides the same value for o'clock and half past timestamps within the same hour. In addition to these two ways to complete the NaN values, the method *asfreq* can replace the NaN values with a constant. See below:
# Half-hourly up-sample filling the new timestamps with a constant
df_hourly.asfreq("30Min", fill_value=0).head(10)
# The use of the methods *asfreq* or *resample* will depend on your dataset and the analysis you aim to undertake.
#
# ***
# ## 3 Interpolating time-series data
#
# When up-sampling the data series, it can happen that back-filling, forward-filling and constant replacement does not necessarily work for your analysis/application. An alternative approach is interpolating the replacing the NaN values with an interpolated result. Interpolation in Pandas DataFrames with *DatetimeIndex* is done with the *interpolate* method.
#
# The mathematical interpolation method in *interpolate* is defined with the argument called *'method'*. Pandas permits several interpolation methods, such as 'linear', 'cubic', 'quadratic', 'spline', 'polynomial' and others. All the interpolation options can be found in the [documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.interpolate.html) of the *interpolate* method.
#
# Following the previous example, let's implement interpolation in the missing values of the half-hourly timestamps using 'linear', 'cubic' and 'polynomial' methods:
# Up-sample using the 'asfreq' method
df_30min = df_hourly.asfreq("30Min")
# Interpolate missing values (NaN) with linear interpolation
df_linear = df_30min.interpolate(method='linear')
# See the results:
df_linear.head(10)
# Similarly, it can be implemented to other methods:
# Interpolate missing values (NaN) with cubic interpolation
df_cubic = df_30min.interpolate(method='cubic')
# See the results:
df_cubic.head(10)
# With polynomial interpolation, the degree or order of the polynomial function needs to be defined as an argument:
# Interpolate missing values (NaN) with polynomial interpolation
df_polynomial = df_30min.interpolate(method='polynomial', order=5)
# See the results:
df_polynomial.head(10)
# The interpolation of NaN values when up-sampling time-series data can help overcome the issues of using back or forward filling, specially if you aim to up-sample at higher frequencies than the example shown (e.g. 1-hour to 15-minute resolution series). The mathematical methods available for interpolation within Pandas are diverse and cover beyond the most common interpolation functions.
# ## 4 Visualizing time-series data
#
# It is often useful to visualize the data to grasp insighs and observe trends about the data. This section shows few examples to visualize time-series data.
#
# ### 4.1 Plotting a time-series for a day of interest
#
# Below there is an example to visualize a single day of interest. With DataFrames using *DatetimeIndex* it is easy to select a particular day and Pandas interacts with Matplotlib.Pyplot library to plot straight-away.
# Plotting GHI for a given day in the time-series
df_1min['2020-06-01']['ghi'].plot(label='GHI')
plt.ylabel('Irradiance [W/m$^2$]')
plt.xlabel('Local Time [HH:MM]')
plt.legend(loc='best')
plt.show() # Not needed in Jupyter Notebooks but usually required in other IDEs.
# We can visualize the effect of using average (*resample*) vs. instantaneous (*asfreq*) measurements when down-sampling our data.
# Plotting GHI for a given day in the time-series
df_1min['2020-06-01']['ghi'].plot(label='1-min data', alpha=0.4) # Reference data
df_1min.asfreq('30Min')['2020-06-01']['ghi'].plot(label='30-min instant.') # Instantaneous 30-min values
df_1min.resample('30Min').mean()['2020-06-01']['ghi'].plot(label='30-min average') # Average 30-min values
plt.title('Average vs. Actual GHI Measurements') # title of the figure
plt.ylabel('Irradiance [W/m$^2$]') # y-axis label
plt.xlabel('Local Time [HH:MM]') # x-axis label
plt.legend(loc='upper left') # insert legend
plt.show() # Not needed in Jupyter Notebook but usually required in other IDEs.
# ### 4.2 Plotting a time-series for a few consecutive days of interest
#
# Below there is an example to visualize a few consecutive days (e.g. 5 days) of interest. By using ['start date']:['end date'] it is possible to select time ranges easily with a DataFrame having a *DatetimeIndex*.
# +
# Variables to plot
vars = ['ghi', 'dni', 'dhi']
# Create 3 subplots, with shared X and Y axis
fig, axs = plt.subplots(3, sharex=True, sharey=True, figsize=(9,6))
# Add title to the plot
fig.suptitle('Average Hourly Solar Radiation Observations', fontsize=14)
for i in range(3):
axs[i].plot(df_1min.resample('1H').mean()['2020-06-01':'2020-06-05'][vars[i]], label='Average') # Average hourly
axs[i].plot(df_1min.resample('1H').max()['2020-06-01':'2020-06-05'][vars[i]], label='Maximum') # Max. hourly
axs[i].plot(df_1min.resample('1H').min()['2020-06-01':'2020-06-05'][vars[i]], label='Minimum') # Min. hourly
axs[i].set_title(vars[i].upper()) # Title for each subplot
fig.subplots_adjust(hspace=0.3) # Adjust the white space between the subplots titles
fig.text(0.04, 0.5, 'Irradiance [W/m$^2$]', va='center', rotation='vertical', fontsize=12) # Common Y Axis
fig.text(0.51, 0.04, 'Local Time', ha='center', fontsize=12) # Common X Axis
plt.legend(loc='upper center', ncol=3) # Legend for the last subplot or 'axs[i].legend()' in the loop to a legend to each.
plt.show()
# -
# ### 4.3 Plotting a time-series for a few non-consecutive days of interest
#
# Below there is an example to visualize a few non-consecutive days of interest, which could be the case when we would like to observe several days scattered throughout the year a single plot. In order to do this, we need to select the day of interest from the DataFrame and then reset its *DatetimeIndex*. For example:
# List of days of interest
days = ['2020-01-01', '2020-03-01', '2020-06-01', '2020-09-01']
# Iterate over the days and plot each of them
for day in days:
df_day = df_1min.resample('1H').mean()[day]['ghi'].to_frame() # average hourly of GHI for current day
df_day = df_day.reset_index(drop=True) # reset its Index to numeric (i.e. 0,1,2,3...)
plt.plot(df_day, label=day) # plot the current day
plt.title('Average Hourly GHI Measurements for Days of Interest') # title of the figure
plt.xticks(np.arange(0, 25, step=3), np.arange(0, 25, step=3)) # set labels positions and names
plt.ylabel('Irradiance [W/m$^2$]') # y-axis label
plt.xlabel('Local Time') # x-axis label
plt.legend(loc='best') # insert legend
plt.show()
# ### 4.4 Daily insolation throughout the year
#
# With time-series data, the hourly/daily/monthly insolation (i.e. the sum of accumulated energy) can also be analysed throughout the year with time-series data. For example, below an example to visualize the daily insolation is shown:
# +
# Calculate the daily insolation expressed in kWh·sqm from GHI measurements
daily_energy = (df_1min['ghi'].resample("1D").sum()*(1/60))/1000 # selecting only GHI returns a Pandas Series
# Create time-series plot
daily_energy.plot(figsize=(9,6), legend=False) # plot timeseries
plt.title('Time-series of Daily Insolation') # add title
plt.ylabel('Energy [kWh/m$^2$]') # add Y-axis label
plt.xlabel('Time') # add X-axis label
plt.show()
# -
# Time-series data can also be visualized in other ways, for instance, as a heat map.
# +
# Prepare the data for heat map of hourly insolation
energy_array = pd.DataFrame() # empty DataFrame for the results
for i in range(1,13): # iterate over months
# select the data in the month and eliminate the datetimeindex
df_month = daily_energy[daily_energy.index.month==i].reset_index(drop=True)
# rename the column with the number of the month
df_month.columns = [str(i)]
# Append results to the DataFrame
energy_array = pd.concat([energy_array, df_month], axis=1)
# Transpose to have months in y-axis and days in x-axis
energy_array = energy_array.transpose()
# Rename the columns of the days
energy_array.columns = np.arange(1, 32)
# Plot heat map of daily insolation
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', # month labels
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
plt.figure(figsize=(10, 5))
ax = sns.heatmap(energy_array, cmap='CMRmap', linewidths=0.2, # plot heatmap with Seaborn (sns) library
xticklabels=2, annot=False,
cbar_kws={'label': 'Daily Energy [kWh/m$^2$]'})
ax.set_title('Heat Map of Daily Insolation') # add title
ax.set_yticklabels(months,rotation=0) # add the months as tick-labels for the y-axis
ax.set_xticklabels(ax.get_xticklabels(),rotation=0) # add the days as tick-labels for the x-axis
ax.set_xlabel('Day of the Month')
plt.show()
# -
# ***
#
# ## Section summary
#
# This section has shown how to build and work with a time-series in Python with multiple examples: <br>
# - We have seen how to prepare a DataFrame with *DatatimeIndex* to be used as a time-series when the timestamps are given in multiple formats in the temporal data and local/UTC time. <br>
# - Changes in the temporal resolution of the data can be applied by down and up-sampling the data and the differences between 2 available methods (*asfreq* and *resample*) have been shown with examples and different sampling frequencies. <br>
# - The interpolation of missing data in time-series can be used to up-sample the resolution of the data and examples with some methods have been shown. <br>
# - Finally, several ideas to visualize data have been presented. <br>
#
# Overall, the possibilities with time-series of solar resource are many. The most useful and suitable analysis and visualizations will be determined by the application and scope of the study.
#
# ***
#
# ## References<a id='references'></a>
# <NAME>.; <NAME>.; (2006). University of Nevada (UNLV):
# Las Vegas, Nevada (Data); NREL Report No. DA-5500-56509.
# http://dx.doi.org/10.5439/1052548
#
| assessingsolar/notebooks/manipulating_time_series.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## ---------------------------------------------------------------CHAPTER 1
# +
#import library yang dibutuhkan
import pandas as pd
import numpy as np
#lakukan pembacaan dataset
movie_rating_df = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/movie_rating_df.csv') #untuk menyimpan movie_rating_df.csv
# +
#tampilkan 5 baris teratas dari movive_rating_df
print(movie_rating_df.head())
#tampilkan info mengenai tipe data dari tiap kolom
print(movie_rating_df.info())
# +
#Simpan actor_name.csv pada variable name_df
name_df = pd.read_csv('https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/actor_name.csv')
#Tampilkan 5 baris teratas dari name_df
print(name_df.head())
#Tampilkan informasi mengenai tipe data dari tiap kolom pada name_df
print(name_df.info())
# +
#Menyimpan dataset pada variabel director_writers
director_writers = pd.read_csv('https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/directors_writers.csv')
#Manampilkan 5 baris teratas
print(director_writers.head())
#Menampilkan informasi tipe data
print(director_writers.info())
# +
import pandas as pd
director_writers = pd.read_csv('https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/directors_writers.csv')
#Mengubah director_name menjadi list
director_writers['director_name'] = director_writers['director_name'].apply(lambda row: row.split(','))
director_writers['writer_name'] = director_writers['writer_name'].apply(lambda row: row.split(','))
#Tampilkan 5 data teratas
print(director_writers.head())
# -
# ## -----------------------------------------------------------CHAPTER 3
# +
name_df = pd.read_csv('https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/actor_name.csv')
#Kita hanya akan membutuhkan kolom nconst, primaryName, dan knownForTitles
name_df = name_df[['nconst','primaryName','knownForTitles']]
#Tampilkan 5 baris teratas dari name_df
print(name_df.head())
name_df.info()
# +
#Melakukan pengecekan variasi
print(name_df['knownForTitles'].apply(lambda x: len(x.split(','))).unique())
#Mengubah knownForTitles menjadi list of list
name_df['knownForTitles'] = name_df['knownForTitles'].apply(lambda x: x.split(','))
#Mencetak 5 baris teratas
print(name_df.head())
# -
name_df.head(10)
# +
import numpy as np
#menyiapkan bucket untuk dataframe
df_uni = []
for x in ['knownForTitles']:
#mengulang index dari tiap baris sampai tiap elemen dari knownForTitles
idx = name_df.index.repeat(name_df['knownForTitles'].str.len())
#memecah values dari list di setiap baris dan menggabungkan nya dengan rows lain menjadi dataframe
df1 = pd.DataFrame({
x: np.concatenate(name_df[x].values)
})
#mengganti index dataframe tersebut dengan idx yang sudah kita define di awal
df1.index = idx
#untuk setiap dataframe yang terbentuk, kita append ke dataframe bucket
df_uni.append(df1)
#menggabungkan semua dataframe menjadi satu
df_concat = pd.concat(df_uni, axis=1)
#left join dengan value dari dataframe yang awal
unnested_df = df_concat.join(name_df.drop(['knownForTitles'], 1), how='left')
#select kolom sesuai dengan dataframe awal
unnested_df = unnested_df[name_df.columns.tolist()]
print(unnested_df)
# -
x1 = df_concat.join(name_df.drop(['knownForTitles'], 1), how='left')
# x1
unnested_df[name_df.columns.tolist()]
# ## ------------------------------------------------------------CHAPTER 5
# +
unnested_drop = unnested_df.drop(['nconst'], axis=1)
#menyiapkan bucket untuk dataframe
df_uni = []
for col in ['primaryName']:
#agregasi kolom PrimaryName sesuai group_col yang sudah di define di atas
dfi = unnested_drop.groupby(['knownForTitles'])[col].apply(list)
#Lakukan append
df_uni.append(dfi)
df_grouped = pd.concat(df_uni, axis=1).reset_index()
df_grouped.columns = ['knownForTitles','cast_name']
print(df_grouped)
# + run_control={"marked": true}
df_grouped
# -
# ## -----------------------------------------------------------CHAPTER 5
# +
movie_rating_df = pd.read_csv('https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/movie_rating_df.csv')
#join antara movie table dan cast table
base_df = pd.merge(df_grouped, movie_rating_df, left_on='knownForTitles', right_on='tconst', how='inner')
#join antara base_df dengan director_writer table
base_df = pd.merge(base_df, director_writers, left_on='tconst', right_on='tconst', how='left')
print(base_df.head())
# -
df_grouped.head()
movie_rating_df.head()
director_writers.head()
# +
#Melakukan drop terhadap kolom knownForTitles
base_drop = base_df.drop(['knownForTitles'], axis=1)
print(base_drop.info())
#Mengganti nilai NULL pada kolom genres dengan 'Unknown'
base_drop['genres'] = base_drop['genres'].fillna('Unknown')
#Melakukan perhitungan jumlah nilai NULL pada tiap kolom
print(base_drop.isnull().sum())
#Mengganti nilai NULL pada kolom dorector_name dan writer_name dengan 'Unknown'
base_drop[['director_name','writer_name']] = base_drop[['director_name','writer_name']].fillna('unknown')
#karena value kolom genres terdapat multiple values, jadi kita akan bungkus menjadi list of list
base_drop['genres'] = base_drop['genres'].apply(lambda x: x.split(','))
# +
#Drop kolom tconst, isAdult, endYear, originalTitle
base_drop2 = base_drop.drop(['tconst','isAdult','endYear','originalTitle'], axis=1)
base_drop2 = base_drop2[['primaryTitle','titleType','startYear','runtimeMinutes','genres','averageRating','numVotes','cast_name','director_name','writer_name']]
# Gunakan petunjuk!
base_drop2.columns = ['title','type','start','duration','genres','rating','votes','cast_name','director_name','writer_name']
print(base_drop2.head())
# -
# ## ------------------------------------------------------------CHAPTER 6
# +
#Klasifikasi berdasar title, cast_name, genres, director_name, dan writer_name
feature_df = base_drop2[['title','cast_name','genres','director_name','writer_name']]
#Tampilkan 5 baris teratas
print(feature_df.head())
# +
def sanitize(x):
try:
#kalau cell berisi list
if isinstance(x, list):
return [i.replace(' ','').lower() for i in x]
#kalau cell berisi string
else:
return [x.replace(' ','').lower()]
except:
print(x)
#Kolom : cast_name, genres, writer_name, director_name
feature_cols = ['cast_name','genres','writer_name','director_name']
#Apply function sanitize
for col in feature_cols:
feature_df[col] = feature_df[col].apply(sanitize)
# +
#kolom yang digunakan : cast_name, genres, director_name, writer_name
def soup_feature(x):
return ' '.join(x['cast_name']) + ' ' + ' '.join(x['genres']) + ' ' + ' '.join(x['director_name']) + ' ' + ' '.join(x['writer_name'])
#membuat soup menjadi 1 kolom
feature_df['soup'] = feature_df.apply(soup_feature, axis=1)
# +
#import CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
#definisikan CountVectorizer dan mengubah soup tadi menjadi bentuk vector
count = CountVectorizer(stop_words='english')
count_matrix = count.fit_transform(feature_df['soup'])
print(count)
print(count_matrix.shape)
# +
#Import cosine_similarity
from sklearn.metrics.pairwise import cosine_similarity
#Gunakan cosine_similarity antara count_matrix
cosine_sim = cosine_similarity(count_matrix, count_matrix)
#print hasilnya
print(cosine_sim)
# +
indices = pd.Series(feature_df.index, index=feature_df['title']).drop_duplicates()
def content_recommender(title):
#mendapatkan index dari judul film (title) yang disebutkan
idx = indices[title]
#menjadikan list dari array similarity cosine sim
#hint: cosine_sim[idx]
sim_scores = list(enumerate(cosine_sim[idx]))
#mengurutkan film dari similarity tertinggi ke terendah
sim_scores = sorted(sim_scores,key=lambda x: x[1],reverse=True)
#untuk mendapatkan list judul dari item kedua sampe ke 11
sim_scores = sim_scores[1:11]
#mendapatkan index dari judul-judul yang muncul di sim_scores
movie_indices = [i[0] for i in sim_scores]
#dengan menggunakan iloc, kita bisa panggil balik berdasarkan index dari movie_indices
return base_df.iloc[movie_indices]
#aplikasikan function di atas
print(content_recommender('The Lion King'))
# -
| python/Project/4_Project Machine Learning with Python Building Recommender System with Similarity Function.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.1
# language: julia
# name: julia-1.6
# ---
# Methods for a function can be ambiguous, consider the following example.
f(x, y::Integer) = x - y
f(x::Integer, y) = x + y
f(2, 3)
# You can test a module for ambiguous functions using `detect_ambiguities` in the `Test` module.
using Test
detect_ambiguities(Main)
# This can be resolved by adding a third method.
f(x::Integer, y::Integer) = x*y
f(2, 3)
detect_ambiguities(Main)
| source-code/semantics/ambiguous_methods.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# <img alt="" src="images/tau-data-Indonesia.png" />
# <center><h2><strong><font color="blue">Pendahuluan Basis Data untuk Data Science</font></strong></h2></center>
# <center><h3><strong><font color="blue"><a href="https://tau-data.id/course/bdds/"> https://tau-data.id/course/bdds/ </a></font></strong></h3></center>
#
# <img alt="" src="images/cover.jpg" />
#
# <center><h2><strong><font color="blue">Pendahuluan Elasticsearch - 01</font></strong></h2></center>
# <center><h3><strong><font color="blue"><a href="https://tau-data.id/lesson/pendahuluan-elasticsearch/">https://tau-data.id/lesson/pendahuluan-elasticsearch/</a></font></strong></h3></center>
# <b><center><h3>(C) <NAME></h3></center>
# + [markdown] slideshow={"slide_type": "slide"}
# # Outline
#
# 1. Instalasi Java
# 2. Instalasi Elasticsearch
# 3. Configurasi Server Elasticsearch
# 4. Instalasi Head Plugin
# 5. Instalasi Python Modules
# 6. Index & Mapping di Elasticsearch
# 7. Importing Data
# 8. Basic queries
# + [markdown] slideshow={"slide_type": "slide"}
# # Instalasi Java
#
# 1. Unduh java **JDK 8** + Install
# 2. Atur **Path** ke JDK binary executable
# 3. **Test** instalasi
# 4. Beberapa catatan kecil instalasi Java.
# - **JAVA_HOME**
# - Linux **OpenJDK**
#
#
# -
# # Instalasi Elasticsearch
#
# 1. Unduh **Elasticsearch** + Extract ke root (optional)
# 2. **Rename** folder jika diinginkan (optional)
# 3. Install Elasticsearch sebagai service:
# - **C:\elasticsearch\bin\elasticsearch-service install**
# 4. Jalankan service: services.msc (di windows)
# - Rubah sebagai Automatic run di startup jika diinginkan
# - Tekan "Start"
# 5. Instalasi plugin **head** di browser Chrome
# # instalasi python Module untuk Elastic search
#
# * Buka terminal (command prompt) lalu jalankan perintah
# * **pip install --upgrade elasticsearch**
# * import module elasticsearch dengan cara
# **import elasticsearch as es**
# +
from elasticsearch import Elasticsearch as Es
# -
server, port, timeout = 'localhost', 9200, 30
# local host = 172.27.0.1
try:
conEs = Es( [ {'host':server,'port':port,'timeout':timeout} ] )
print('Connected to ElasticSearch, koneksi = "conEs"')
except:
print('Error, tidak bisa terkoneksi ke ElasticSearch. Yakinkan server Es sudah berjalan dengan baik dan port serta ip server benar')
# Berikutnya kita akan membuat index (~tabel di RDBMS) di Elastic
# pertama-tama kita perlu parameter-parameter berikut
idx = "news20"
nShards = 1
nReplicas = 0
similarity = "BM25"
# https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-similarity.html
# Pertama tetapkan setting index
dSetting = { "settings": {"index":{"number_of_shards":nShards,"number_of_replicas":nReplicas} } }
# Yes!, ElasticSearch support Bahasa Indonesia
# https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-lang-analyzer.html
print('Done!')
# Lalu gunakan setting index di atas untuk membuat index baru di Elastic
#conEs.indices.delete(index=idx,ignore=[400, 404]) # Hapus index kalau sudah ada / Mulai dari awal
conEs.indices.create(index=idx,body=dSetting,ignore=400) # Buat Indexnya
# Coba refresh plugin Head untuk memeriksa apakah index telah dibuat oleh Elastic
# +
import pickle
try:
f = open('data/20_news_groups.pckl', 'rb')
data = pickle.load(f)
f.close()
except:
categories = ['sci.med', 'talk.politics.misc', 'rec.autos'] # Silahkan modifikasi
data = fetch_20newsgroups(categories=categories,remove=('headers', 'footers', 'quotes'))
f = open('data/20_news_groups.pckl', 'wb') # Agar tidak selalu butuh koneksi internet setiap mau load data
pickle.dump(data, f) # perhatikan harus ada folder "data"
f.close()
# -
kategori = [data.target_names[label] for label in data.target]
documents = [doc for doc in data.data]
print("Total dokumen = {}".format(len(documents)))
print(set(kategori), len(kategori))
print(documents[0])
# +
A = [1, 2, 3]
B = ['a', 'b', 'c']
for a,b in zip(A,B):
print(a,b)
# -
for doc, kat in zip(documents, kategori):
D = { "document":doc, "kategori":kat }
conEs.index(index="news20", body=D)
# Sampai sini sebenarnya kita sudah bisa melakukan pencarian, contoh:
qry = 'mazda mpv'
result = conEs.search(index=idx, body={"query": {"match": {'document':qry}}})['hits']['hits']
result[0]
result.keys()
result['hits']['hits'][:3]
res = hasil_query['hits']
type(res)
res.keys()
res = res
# # End of Module
# + slideshow={"slide_type": "slide"}
import warnings; warnings.simplefilter('ignore')
from IPython.display import HTML
HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/S_f2qV2_U00?rel=0&controls=0&showinfo=0" frameborder="0" allowfullscreen></iframe>')
# -
| 01- Pendahuluan ElasticSearch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Autoencoder with MNIST FCN
from __future__ import division, print_function
from keras.callbacks import ModelCheckpoint
from keras.layers import Input
from keras.layers.core import Dense, Lambda
from keras.models import Model
from keras.utils import np_utils
import matplotlib.pyplot as plt
import numpy as np
import os
# %matplotlib inline
# +
DATA_DIR = "../../data"
TRAIN_FILE = os.path.join(DATA_DIR, "mnist_train.csv")
TEST_FILE = os.path.join(DATA_DIR, "mnist_test.csv")
BATCH_SIZE = 128
NUM_FEATURES = 784
NUM_CLASSES = 10
NUM_EPOCHS = 10
BEST_MODEL = os.path.join(DATA_DIR, "keras-ae-mnist-fcn-best.h5")
FINAL_MODEL = os.path.join(DATA_DIR, "keras-ae-mnist-fcn-final.h5")
# -
# ## Prepare Data
# +
def parse_file(filename):
xdata, ydata = [], []
fin = open(filename, "rb")
i = 0
for line in fin:
if i % 10000 == 0:
print("{:s}: {:d} lines read".format(
os.path.basename(filename), i))
cols = line.strip().split(",")
ydata.append(int(cols[0]))
xdata.append([float(x) / 255. for x in cols[1:]])
i += 1
fin.close()
print("{:s}: {:d} lines read".format(os.path.basename(filename), i))
y = np.array(ydata)
X = np.array(xdata)
return X, y
Xtrain, ytrain = parse_file(TRAIN_FILE)
Xtest, ytest = parse_file(TEST_FILE)
print(Xtrain.shape, ytrain.shape, Xtest.shape, ytest.shape)
# -
# ## Define Network
# +
inputs = Input(batch_shape=(None, NUM_FEATURES))
encoded = Dense(256, activation="relu")(inputs)
hidden = Dense(2, activation="relu")(encoded)
decoded = Dense(256, activation="relu")(hidden)
outputs = Dense(NUM_FEATURES, activation="relu")(decoded)
model = Model(inputs=inputs, outputs=outputs)
# -
model.compile(optimizer="adam", loss="mean_squared_error")
# ## Train Network
history = model.fit(Xtrain, Xtrain, batch_size=BATCH_SIZE,
epochs=NUM_EPOCHS,
validation_data=(Xtest, Xtest))
plt.plot(np.arange(NUM_EPOCHS), history.history["loss"],
color="r", label="train")
plt.plot(np.arange(NUM_EPOCHS), history.history["val_loss"],
color="b", label="val")
plt.xlabel("epochs")
plt.ylabel("MSE loss")
plt.legend(loc="best")
# ## Visualize autoencoder outputs
Xviz = Xtest[0:5]
Xviz_ = model.predict(Xviz)
j = 1
for i in range(5):
img_orig = Xviz[i].reshape(28, 28)
img_ae = Xviz_[i].reshape(28, 28)
plt.subplot(5, 2, j)
plt.imshow(img_orig)
plt.xticks([])
plt.yticks([])
j += 1
plt.subplot(5, 2, j)
plt.imshow(img_ae)
plt.xticks([])
plt.yticks([])
j += 1
plt.show()
# ## Visualize encoder embeddings
encoder = Model(inputs=inputs, outputs=hidden)
ytest_ = encoder.predict(Xtest)
colors = ["r", "g", "b", "c", "m", "y", "orange", "brown", "gray", "k"]
for i in range(NUM_CLASSES):
yi_indices = np.where(ytest == i)[0]
xi = ytest_[yi_indices][:, 0].tolist()
yi = ytest_[yi_indices][:, 1].tolist()
plt.scatter(xi, yi, color=colors[i], alpha=0.5, label=str(i))
plt.legend(loc="best")
plt.xlabel("Z1")
plt.ylabel("Z2")
plt.show()
| src/keras/03-mnist-autoencoder-fcn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: drlnd
# language: python
# name: drlnd
# ---
# # Report for Udacity Deep Reinforement Learning
# # Project 3: Collaboration and Competition
# ## Overview
#
# This report is for the Collaboration and Competition project at Deep Reinforcement Learning course by Udacity. The objective of the project is to train two agents control rackets to bounce a ball over a net as many times as possible.
# If an agent hits the ball over the net, it receives a reward of +0.1. If an agent lets a ball hit the ground or hits the ball out of bounds, it receives a reward of -0.01.
#
# The observation space consists of 8 variables corresponding to the position and velocity of the ball and racket. Two continuous actions are available, corresponding to movement toward (or away from) the net, and jumping.
# ## Learning Algorithm
#
# The same learning algorithm with [p2-ball-tracking](https://github.com/yshk-mrt/p2-ball-tracking) is applied.
#
# [A2C](https://openai.com/blog/baselines-acktr-a2c/) is used, which is a synchronous variant of [A3C](https://arxiv.org/abs/1602.01783). Pseudocode of A3C from the cited paper is as follows.
#
# <img src="files/content/a3c_suedocode.png" width="800"/>
# ### Model
#
# One neural network for policy $\pi(a|s;\theta')$ and value function $V(s;\theta'_v)$ is shared by all agents. The first and second hidden fully connected layers are shared by policy (actor) and value function (critic). In order to generate actions for continuous control, the network generates mean and standard deviation of Gaussian distribution, and actions are sampled from the distribution. Here is the corresponding code in [TrainAgent.ipynb](./TrainAgent.ipynb)
#
#
# ```python
# out1 = F.relu(self.fc1(state))
# out2 = F.relu(self.fc2(out1))
#
# # mean of the Gaussian distribution range in [-1, 1]
# mean = torch.tanh(self.fc_actor(out2))
# # V value
# value = self.fc_critic(out2)
#
# # Create distribution from mean and standard deviation
# # Use softplus function to make deviation always positive
# # SoftPlus is a smooth approximation to ReLU function
# dist = torch.distributions.Normal(mean, F.softplus(self.std))
#
# # Sample next action from the distribution.
# action = dist.sample()
# action = torch.clamp(action, min=-1.0, max=1.0)
# ```
# #### Model structure
#
# Model structure by torchviz library.
# <img src="files/content/model.png" width="800"/>
# #### Model parameters
# The following table shows model parameters. Linear-1 and Linear-2 is hidden layers, Linear-3 is for actions, and Linear-4 is for value function.
# ```
# ----------------------------------------------------------------
# Layer (type) Output Shape Param #
# ================================================================
# Linear-1 [-1, 1, 128] 3,200
# Linear-2 [-1, 1, 64] 8,256
# Linear-3 [-1, 1, 2] 130
# Linear-4 [-1, 1, 1] 65
# ================================================================
# Total params: 11,651
# ```
#
# ### Hyperparameters
# The following hyperparameters are used.
#
# | Parameter | Value | Description |
# |:--------------------------------|--------------:|:--------------------------------------------------------------|
# | N-Step | 5 | Update model every n-step |
# | Discount factor \[$ \gamma $\] | 0.99 | Discount factor used in Q-Learning |
# | Critic_loss_coef | 100.0 | Weight of critic loss |
# | Entropy_loss_coef | 0.0001 | Weight of entropy loss |
# | Learning rate \[$ \alpha $\] | 0.0005 | Learning rate for Adam |
# | Standard deviation | 0.0 | Initial standard deviation before SoftPlus is applied |
# | Max norm of the gradients | 5.0 | Max norm for gradient clipping |
#
# ## Results
#
# ### Plot of Rewards
# At episode 6921, A2C achieved score 0.5.
# At episode 19779, A2C achieved average score 0.5.
# Thus, the environment is considered solved.
# 
# ### Trained agents' behavior
#
# The following gif shows trained agent behaviors. This time, standard deviation for action generation is set to nearly zero, which means agents always take greedy actions. As the result, agents got an average score 1.91 over 100 episodes.
# 
# 
# ## Consideration
#
# 1. Entropy
#
# The following graph shows the average score and entropy. When the score improves, the entropy decreases. On the other hand, when the score drops, the entropy increases, which will contribute to recover the score again by searching broader action spaces. The weight of entropy is important to balance convergent and divergent mode.
#
# 
#
# 2. Weight of critic loss
#
# The following graph (a) shows actor and critic loss without weight of critic loss. Graph (b) shows absolute value of actor loss divided by critic loss. As you can see, the actor loss is 10 to 1000 times bigger than the critic loss, and the ratio decreases as the learning continues. If the weight can be adaptively changed to keep the ratio, learning could be more stable. (A quick experiment did not work.)
# In this report, 100.0 is used.
#
# | ![actor_critic]| ![actor_divided_by_critic]|
# |---|---|
# |(a)Actor loss and critic loss|(b)Actor loss divided by critic loss|
#
# [actor_critic]:files/content/actor_critic.png
# [actor_divided_by_critic]:files/content/actor_critic_log.png
#
# ## Ideas for Future Work
#
# For more stable results:
# * Add a small value to std to prevent log probability becomes infinity when std is zero.
# * Use gradient clipping -> Done
# * Use Trust Region Policy Optimization (TRPO)
# * Try RMSprop instead of Adam
# * Try off-policy methods
# * Try separated network for actor and critic
# * Try adaptive critic loss weight mention in the second consideration point
# ## References
# * [GitHub:ShangtongZhang/DeepRL](https://github.com/ShangtongZhang/DeepRL)
# * [GitHub:qiaochen/A2C](https://github.com/qiaochen/A2C)
# * [Let’s make an A3C: Implementation](https://jaromiru.com/2017/03/26/lets-make-an-a3c-implementation)
# * [Understanding Actor Critic Methods and A2C](https://towardsdatascience.com/understanding-actor-critic-methods-931b97b6df3f)
# * [ゼロから作る A3C](https://qiita.com/s05319ss/items/2fe9bfe562fea1707e79)
# ## Appendix
# Code for graph of this report.
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
def moving_average(data, points=100):
b=np.ones(points)/points
a = np.zeros(points-1)
return np.convolve(np.hstack((a, data)), b, mode='vaild')
def plot(data_source):
"""Plot data
Parameters
----------
data_source : list
[Data label, data file path, line type]
"""
plt.rcParams["figure.dpi"] = 100.0
max_length = 30000
for source in data_source:
data = np.loadtxt(source[1], skiprows=1, delimiter=', \t', dtype='float')
length = len(data[:,])
if length > max_length:
length = max_length
data = data[:length]
# Comment/Uncomment to select data type
#plt.plot(np.linspace(1, length, length, endpoint=True), moving_average(data[:,1]), label="score " + source[0], ls = source[2])
#plt.plot(np.linspace(1, length, length, endpoint=True), moving_average(data[:,2]), label="total_loss " + source[0], ls = source[2])
#plt.plot(np.linspace(1, length, length, endpoint=True), moving_average(data[:,3]), label="actor_loss " + source[0], ls = source[2])
#plt.plot(np.linspace(1, length, length, endpoint=True), moving_average(data[:,4]), label="critic_loss " + source[0], ls = source[2])
#plt.plot(np.linspace(1, length, length, endpoint=True), moving_average(data[:,5]), label="entropy " + source[0], ls = source[2])
plt.plot(np.linspace(1, length, length, endpoint=True), moving_average(abs(data[:,3]/data[:,4])), label="actor/critic " + source[0], ls = source[2])
plt.xlabel('Episode #')
plt.legend()
plt.xlim(0, length)
#plt.ylim(0, 1000)
plt.yscale("log")
plt.grid()
plt.show()
data_paths = []
data_paths.append(['tennis','results/final/result.txt', "-"])
plot(data_paths)
| Report.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Softmax 回归的简洁实现
#
# + attributes={"classes": [], "id": "", "n": "1"}
import d2l
from mxnet import gluon, init, npx
from mxnet.gluon import nn
npx.set_np()
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size=256)
# + [markdown] slideshow={"slide_type": "slide"}
# 模型和初始化。
# + attributes={"classes": [], "id": "", "n": "3"}
net = nn.Sequential()
net.add(nn.Dense(10))
net.initialize(init.Normal(sigma=0.01))
# + [markdown] slideshow={"slide_type": "slide"}
# 损失函数,优化算法,和训练。
# + attributes={"classes": [], "id": "", "n": "5"}
loss = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(),
'sgd', {'learning_rate': 0.1})
d2l.train_ch3(net, train_iter, test_iter, loss, 10, trainer)
| notebooks-1/7-softmax-regression-gluon.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + slideshow={"slide_type": "skip"}
from IPython.core.display import HTML
def css_styling():
styles = open("./styles/custom.css", "r").read()
return HTML(styles)
css_styling()
# -
# + [markdown] slideshow={"slide_type": "slide"}
# ### BEFORE YOU DO ANYTHING...
# In the terminal:
# 1. Navigate to __inside__ your ILAS_Python repository.
# 2. __COMMIT__ any un-commited work on your personal computer.
# 3. __PULL__ any changes *you* have made using another computer.
# 4. __PULL__ textbook updates (including homework answers).
# + [markdown] slideshow={"slide_type": "slide"}
# # Data Structures
#
# # Lesson Goal
#
# Learn to manipulate multiple values and variables contained in data structures.
#
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Objectives
#
#
# - Express collections of mulitple variables using `lists`, `tuples` and `dictionaries`.
# - Use python functions to manipulate data structures.
# - Use control statements for iteratation and comprehension.
# - Learn to select the right data structure for an application.
# + [markdown] slideshow={"slide_type": "slide"}
# Why we are studying this:
#
# To use Python to solve more complex engineering problems you are likely to encounter involving:
# - multi-variable values (e.g. vectors)
# - large data sets (e.g. experiment results)
# - using control flow to manipulate data sets using logic (e.g. sorting and categorising)
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# Lesson structure:
#
# Three data strcutures: `lists`, `tuples` and `dictionaries`.
#
# - Lists
# - General data structure manipulations: sorting, adding, removing...
# - Indexing and enumerating
# - Iterating and comprension
# - Tuple
# - Dictionaries
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# In the last seminar we learnt to generate a range of numbers for use in control flow of a program, using the function `range()`:
# -
for j in range(20):
if j % 4 == 0: # Check remainer of j/4
continue # continue to next value of j
print(j, "is not a multiple of 4")
# + [markdown] slideshow={"slide_type": "slide"}
# ## Data Structures
#
# Often we want to manipulate data that is more meaningful than ranges of numbers.
#
# These collections of variables might include:
# - the results of an experiment
# - a list of names
# - the components of a vector
# - a telephone directory with names and associated numbers.
#
#
#
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# Python has different __data structures__ that can be used to store and manipulate these values.
#
# Like variable types (`string`, `int`,`float`...) different data structures behave in different ways.
#
# Today we will learn to use `list`, `tuple` and dictionary (`dict`) data structures.
# + [markdown] slideshow={"slide_type": "subslide"}
#
# We will study the differences in how they behave so that you can learn to select the most suitable data structure for an application.
# + [markdown] slideshow={"slide_type": "subslide"}
# Example
#
# If we want to store the names of students in a laboratory group,
# rather than representing each students using an individual string variable, we could use a list of names.
#
#
# +
lab_group0 = ["Sarah", "John", "Joe", "Emily"]
lab_group1 = ["Roger", "Rachel", "Amer", "Caroline", "Colin"]
print(lab_group0)
print(lab_group1)
# + [markdown] slideshow={"slide_type": "subslide"}
# This is useful because we can perform operations on lists such as:
# - checking its length (number of students in a lab group)
# - sorting the names in the list into alphabetical order
# - making a list of lists (we call this a *nested list*):
#
# -
lab_groups = [lab_group0, lab_group1]
# + [markdown] slideshow={"slide_type": "slide"}
# ## Lists
# + [markdown] slideshow={"slide_type": "slide"}
# A list is a sequence of data.
#
# We call each item in the sequence an *element*.
#
# A list is constructed using square brackets:
#
#
# -
a = [1, 2, 3]
# + [markdown] slideshow={"slide_type": "subslide"}
# A `range` can be converted to a list with the `list` function (casting).
# -
print(list(range(10)))
# + [markdown] slideshow={"slide_type": "subslide"}
# Reminder, when a range has one arguments:
# - the first value is 0.
# - the second value is the stopping value.
# - the stopping value is not included in the range
#
# + [markdown] slideshow={"slide_type": "subslide"}
# When a range has two arguments:
# - the first value is the starting value.
# - the second value is the stoping value.
# - the stopping value is not included in the range
# + slideshow={"slide_type": "-"}
print(list(range(10,20)))
# + [markdown] slideshow={"slide_type": "subslide"}
# You can optionally include a step:
# + slideshow={"slide_type": "-"}
print(list(range(10, 20, 2)))
# + [markdown] slideshow={"slide_type": "subslide"}
# A list can hold a mixture of types (`int`, `string`....).
# + slideshow={"slide_type": "-"}
a = [1, 2.0, "three"]
# + [markdown] slideshow={"slide_type": "subslide"}
# An empty list is created by
# -
my_list = []
# + [markdown] slideshow={"slide_type": "subslide"}
# A list of length 5 with repeated values can be created by
# -
my_list = ["Hello"]*5
print(my_list)
# + [markdown] slideshow={"slide_type": "subslide"}
# We can check if an item is in a list using the function `in`:
#
# -
print("Hello" in my_list)
print("Goodbye" in my_list)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Indexing
#
# Lists store data in order.
#
# We can select a single element or multiple elements of a list using the __index__ of the element(s).
#
# You are familiar with this process; it is the same as selecting individual characters of a string:
# + slideshow={"slide_type": "subslide"}
word = "string"
letter = word[1]
print(letter)
# + slideshow={"slide_type": "subslide"}
lab_group0 = ["Sarah", "John", "Joe", "Emily"]
first_member = lab_group0[0]
print(first_member)
# + [markdown] slideshow={"slide_type": "subslide"}
# If we select multiple elements they are returned as a list:
# +
lab_group0 = ["Sarah", "John", "Joe", "Emily"]
first_members = lab_group0[0:2]
print(first_members)
# + [markdown] slideshow={"slide_type": "subslide"}
# We can select the inidividual characters of a string using a second index.
#
# For example to select the first letter of the second group member's name:
# +
lab_group0 = ["Sarah", "John", "Joe", "Emily"]
letter = lab_group0[1][0]
print(letter)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Manipulating Lists
#
# There are many functions for manipulating lists.
#
# Many of these functions apply to other data structures.
#
#
#
#
#
#
#
#
#
#
# + [markdown] slideshow={"slide_type": "slide"}
#
# ### Finding the Length of a List
# <a id='Length'></a>
# We can find the length (number of items) of a list using the function `len()`, by including the name of the list in the brackets.
# + [markdown] slideshow={"slide_type": "subslide"}
# In the example below, we find the length of the list `lab_group0`.
# +
lab_group0 = ["Sara", "Mari", "Quang"]
size = len(lab_group0)
print("Lab group members:", lab_group0)
print("Size of lab group:", size)
print("Check the Python object type:", type(lab_group0))
# + [markdown] slideshow={"slide_type": "slide"}
# <a id='SortLists'></a>
# ### Sorting Lists
#
# To sort the list we use the function `sorted()`.
#
# #### Sorting Numerically
#
# If the list contains numerical variables, the numbers is sorted in ascending order.
# +
numbers = [7, 1, 3.0]
print(numbers)
numbers = sorted(numbers)
print(numbers)
# + [markdown] slideshow={"slide_type": "subslide"}
# __Note:__ We can sort a list with mixed numeric types (e.g. `float` and `int`).
#
# However, we cannot sort a list with types that cannot be sorted by the same ordering rule.
#
# (e.g. `numbers = sorted(["7", 1, 3.0])` causes an error.)
# +
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Sorting Alphabetically
#
# If the list contains strings of alphabet characters, the list is sorted by alphabetical order.
# +
lab_group0 = ["Sara", "Mari", "Quang"]
print(lab_group0)
lab_group0 = sorted(lab_group0)
print(lab_group0)
# + [markdown] slideshow={"slide_type": "subslide"}
# As with `len()` we include the name of the list we want to sort in the brackets.
# + [markdown] slideshow={"slide_type": "subslide"}
# `sort` is known as a 'method' of a `list`.
#
# If we suffix a list with `.sort()`, it performs an *in-place* sort.
# + slideshow={"slide_type": "-"}
lab_group0 = ["Sara", "Mari", "Quang"]
print(lab_group0)
#lab_group0 = sorted(lab_group0)
lab_group0.sort()
print(lab_group0)
# + [markdown] slideshow={"slide_type": "subslide"}
# __Try it yourself__
#
# In the cell provided in your textbook create a list of __numeric__ or __string__ values.
#
# Sort the list using `sorted()` __or__ `.sort()`.
#
# Print the sorted list.
#
# Print the length of the list using `len()`.
# +
# Sorting a list
# + [markdown] slideshow={"slide_type": "slide"}
# ### Removing an Item from a List
#
# We can remove items from a list using the method `pop`.
#
# We place the index of the element we wish to remove in brackets.
# + slideshow={"slide_type": "subslide"}
lab_group0 = ["Sara", "Mari", "Quang", "Sam", "Ryo"]
print(lab_group0)
# Remove the second student from the list: lab_group (remember indexing starts from 0 so 1 is the second element)
lab_group0.pop(1)
print(lab_group0)
# + slideshow={"slide_type": "subslide"}
# By default, pop removes the last element
lab_group0.pop()
print(lab_group0)
# + slideshow={"slide_type": "subslide"}
# Pop can by used to assign the removed value to a variable name
group_member = lab_group0.pop(1)
print(lab_group0)
print(group_member)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Adding an Item to a List
#
# We can add items to a list using the method `insert`.
#
# We place the desired index of new element in brackets.
# -
# Add new student "Mark" to the list
lab_group0.insert(2, "Mark")
print(lab_group0)
# + [markdown] slideshow={"slide_type": "subslide"}
# We can add items at the end of a list using the method `append`.
#
# We place the element we want to add to the end of the list in brackets.
# -
# Add new student "Lia" at the end of the list
lab_group0.append("Lia")
print(lab_group0)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Changing a list entry.
# We can change the entry of a list using indexing.
# +
lab_group0[3] = "Am"
print(lab_group0)
# Adding and removing items from a list.
# + [markdown] slideshow={"slide_type": "subslide"}
# __Try it yourself__
#
# In the cell provided in your textbook.
#
# Remove "Sara" from the list.
#
# Print the new list.
#
# Add a new lab group member, Tom, to the list.
#
# Print the new list.
# +
lab_group0 = ["Sara", "Mari", "Quang", "Sam", "Ryo"]
print(lab_group0)
# Adding and removing items from a list.
# + [markdown] slideshow={"slide_type": "slide"}
# <a id='NestedList'></a>
# ### Nested Data Structures: Lists of Lists
#
# A *nested list* is a list within a list.
#
# (Recall *nested loops* from the last seminar 1; Control Flow).
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# To access a __single element__ we need as many indices as there are levels of nested list.
#
# This is more easily explained with an example:
# + [markdown] slideshow={"slide_type": "subslide"}
# `lab_groups` is a nested list containing the lists:
# - `lab_group0`
# - `lab_group1`
# - `lab_group2`
# + slideshow={"slide_type": "-"}
lab_group0 = ["Sara", "Mika", "Ryo", "Am"]
lab_group1 = ["Hemma", "Miri", "Quy", "Sajid"]
lab_group2 = ["Adam", "Yukari", "Farad", "Fumitoshi"]
lab_groups = [lab_group0, lab_group1, lab_group2]
# + [markdown] slideshow={"slide_type": "subslide"}
# To select and element from `lab_group1` we:
# - first give the index of `lab_group1` in th list `lab_groups`
# - second give the index of the element within `lab_group1`
# + slideshow={"slide_type": "-"}
group = lab_groups[1]
print(group)
name = lab_groups[1][2]
print(name)
# + [markdown] slideshow={"slide_type": "slide"}
# <a id='IteratingLists'></a>
# ### Iterating Over Lists
#
# Looping over each item in a list is called *iterating*.
#
# To iterate over a list of the lab group we can use a `for` loop.
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# In the followig example, each iteration, variable `d` takes the value of the next item in the list:
# -
for d in [1, 2.0, "three"]:
print("the value of d is:", d)
# + [markdown] slideshow={"slide_type": "subslide"}
# We could also express this as:
# +
data = [1, 2.0, "three"]
for d in data:
print("the value of d is:", d)
# + [markdown] slideshow={"slide_type": "subslide"}
# __Try it yourself__
#
#
# In the cell provided in your textbook *iterate* over the list `data = [1, 2.0, "three"]`.
#
# Each time the code loops:
# 1. print the value of data __cast as a string__ (Seminar 1 Data Types and Operators)
# 1. print the variable type<br>(to demonstrate that the variable has been cast. Note that otherwise the variable appears to remain unchanged).
# -
# Iterate over a list and cast each item as a string
data = [1, 2.0, "three"]
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Indexing when Iterating over Lists
# Indexing can be useful when iterating over a list.
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# For example, we can select a range of elements to iterate over:
# +
lab_group0 = ["Sara", "Mari", "Quang", "Sam", "Ryo", "Nao", "Takashi"]
for member in lab_group0[2:5]:
print("name:", member)
# + [markdown] slideshow={"slide_type": "subslide"}
# A third value can used to choose a step size (similar to `range()`).
#
# For example, if we want to choose every other lab member we use step size, 2:
# +
lab_group0 = ["Sara", "Mari", "Quang", "Sam", "Ryo", "Nao", "Takashi"]
for member in lab_group0[::2]:
print("name:", member)
# + [markdown] slideshow={"slide_type": "subslide"}
# __Note:__<br>
# Some data structures that support *iterating* but do not support *indexing*.
#
# e.g. dictionaries, which we will learn about later.
#
# When possible, it is better to iterate over a list rather than use indexing.
# + [markdown] slideshow={"slide_type": "slide"}
# ### `enumerate()`
# The function `enumerate` can be used to return the index of each element.
# <br>This information is cast as a list to allow us to read it.
# + slideshow={"slide_type": "-"}
lab_group0 = ["Sara", "Mari", "Quang", "Sam", "Ryo", "Nao", "Takashi"]
a = enumerate(lab_group0)
b = list(enumerate(lab_group0))
print(a)
print(b)
# + slideshow={"slide_type": "subslide"}
string = "string"
a = list(enumerate(string))
print(a)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Iterating Over Multiple Lists Using `zip()`
# It can be very useful to iterate through multiple lists within the same loop. than one list.
# + [markdown] slideshow={"slide_type": "subslide"}
# For example if we have a list of group members and a list of their scores for an assignemt, we can print the score that corresponds to each lab member:
# +
lab_group0 = ["Sara", "Mari", "Quang", "Sam", "Ryo", "Nao", "Takashi"]
assignment1 = [72, 56, 65, 52, 71, 60]
for member, score in zip(lab_group0, assignment1):
print(member, ": score =", score)
# + [markdown] slideshow={"slide_type": "subslide"}
# In this example
#
# `member` is the name given to the *current* value from the list `lab_group0`
#
# `score` is the names given to the *current* value from the list `assignment1`.
# + [markdown] slideshow={"slide_type": "subslide"}
#
# We can include any number of lists in `zip`.
# + [markdown] slideshow={"slide_type": "subslide"}
# For example it may be useful to print the combined score a lab member has achieved for all assigments this semester:
# +
lab_group0 = ["Sara", "Mari", "Quang", "Sam", "Ryo", "Nao", "Takashi"]
assignment1 = [72, 56, 65, 52, 71, 60]
assignment2 = [52, 61, 73, 55, 62, 55]
assignment3 = [71, 71, 70, 66, 61, 71]
for member, score1, score2, score3 in zip(lab_group0,
assignment1,
assignment2,
assignment3):
print(member, ": score =", (score1 + score2 + score3))
# + [markdown] slideshow={"slide_type": "slide"}
# ### Lists Example: Vectors
#
# __Vector:__ A quantity with magnitude and direction.
# + [markdown] slideshow={"slide_type": "subslide"}
# Position vectors (or displacement vectors) in 3D space can be expressed in terms of x,y, and z-directions.
#
# <img src="img/3d_position_vector.png" alt="Drawing" style="width: 300px;"/>
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# The position vector 𝒓 indicates the position of a point in 3D space.
#
# $$
# \mathbf{r} = x\mathbf{i} + y\mathbf{j} + z\mathbf{k}
# $$
#
# $\mathbf{i}$ is the displacement one unit in the x-direction<br>
# $\mathbf{j}$ is the displacement one unit in the y-direction<br>
# $\mathbf{k}$ is the displacement one unit in the z-direction
#
# + [markdown] slideshow={"slide_type": "subslide"}
# We can conveniently express $\mathbf{r}$ in matrix (or basis vector) form using the coefficients $x, y$ and $z$:
# $$
# \mathbf{r} = [x, y, z]
# $$
#
# __...which looks a lot like a Python list!__
#
# + [markdown] slideshow={"slide_type": "subslide"}
# You will encounter 3D vectors a lot in your engineering studies.
#
# They are used to describe many physical quantities, e.g. force.
# + [markdown] slideshow={"slide_type": "subslide"}
# <a id='DotProductLists'></a>
#
# ### Iterating with Zip
#
# #### Example: The dot product of two vectors:
#
# The __dot product__ is a really useful algebraic operation.
#
# It takes two equal-length *sequences of numbers* (often coordinate vectors) and returns a single number.
#
# + [markdown] slideshow={"slide_type": "slide"}
# __GEOMETRIC REPRESENTATION OF THE DOT PRODUCT__
#
# \begin{align}
# \mathbf{A} \cdot \mathbf{B} = |\mathbf{A}| |\mathbf{B}| cos(\theta)
# \end{align}
#
# Where:
# $|\mathbf{A}|$ and $|\mathbf{B}|$ are the *magnitudes* of $\mathbf{A}$ and $\mathbf{B}$
#
# $\theta$ is the angle between the two vectors.
# + [markdown] slideshow={"slide_type": "subslide"}
# __NOTATION:__ *Magnitude*
#
# <br>__3D vector__
# <br>$\mathbf{r} = [x, y, z]$
#
# magnitude of $\mathbf{r}$:
# <br>$|\mathbf{r}| = \sqrt{x^2 + y^2 + z^2}$
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# __2D vector__
# <br>$\mathbf{r} = [x, y]$
#
# magnitude of $\mathbf{r}$:
# <br>$|\mathbf{r}| = \sqrt{x^2 + y^2}$
# + [markdown] slideshow={"slide_type": "subslide"}
# __ALGEBRAIC REPRESENTATION OF THE DOT PRODUCT__
#
# The dot product of two $n$-length-vectors:
# <br> $ \mathbf{A} = [A_1, A_2, ... A_n]$
# <br> $ \mathbf{B} = [B_1, B_2, ... B_n]$
#
# \begin{align}
# \mathbf{A} \cdot \mathbf{B} = \sum_{i=1}^n A_i B_i
# \end{align}
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# So the dot product of two 3D vectors:
# <br> $ \mathbf{A} = [A_x, A_y, A_z]$
# <br> $ \mathbf{B} = [B_x, B_y, B_z]$
#
#
# \begin{align}
# \mathbf{A} \cdot \mathbf{B} &= \sum_{i=1}^n A_i B_i \\
# &= A_x B_x + A_y B_y + A_z B_z
# \end{align}
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# __Example:__
# <br> The dot product $\mathbf{A} \cdot \mathbf{B}$:
# <br> $ \mathbf{A} = [1, 3, −5]$
# <br> $ \mathbf{B} = [4, −2, −1]$
#
#
#
# \begin{align}
# {\displaystyle {\begin{aligned}\ [1,3,-5]\cdot [4,-2,-1]&=(1)(4)+(3)(-2)+(-5)(-1)\\& = 4 \qquad - 6 \qquad + 5 \\&=3\end{aligned}}}
# \end{align}
# + [markdown] slideshow={"slide_type": "subslide"}
# We can solve this very easily using a Python `for` loop.
#
# With each iteration of the loop we increase the value of `dot_product` (initial value = 0.0) by the product of `a` and `b`.
# + slideshow={"slide_type": "-"}
A = [1.0, 3.0, -5.0]
B = [4.0, -2.0, -1.0]
# Create a variable called dot_product with value, 0.0
dot_product = 0.0
for a, b in zip(A, B):
dot_product += a * b
print(dot_product)
# + [markdown] slideshow={"slide_type": "subslide"}
# Example uses of the dot product:
#
# \begin{align}
# \mathbf{A} \cdot \mathbf{B} = |\mathbf{A}| |\mathbf{B}| cos(\theta)
# \end{align}
#
# - Find the angle between two vectors (from its cosine).
# - Check if the angle between two vectors is:
# - acute ($\mathbf{A} \cdot \mathbf{B}>0$)
# - obtuse ($\mathbf{A} \cdot \mathbf{B}<0$)
# - right angle ($\mathbf{A} \cdot \mathbf{B}==0$)
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# \begin{align}
# \mathbf{A} \cdot \mathbf{B} = |\mathbf{A}| |\mathbf{B}| cos(\theta)
# \end{align}
#
# - Find the magnitude of one vector in the direction of another.
# - Find physical quantities <br> e.g. the work, W, done pushing an object distance, d, with force, F:
#
# <img src="img/work_equation.jpg" alt="Drawing" style="width: 500px;"/>
#
# + [markdown] slideshow={"slide_type": "subslide"}
# __Try it yourself:__
#
# $\mathbf{C} = [2, 4, 3.5]$
#
# $\mathbf{D} = [1, 2, -6]$
#
# In the cell below find the dot product:
# $\mathbf{C} \cdot \mathbf{D}$
#
# Is the angle between the vectors obtuse or acute or are the vectors perpendicular? <br>
# (Perpendicular if $\mathbf{A} \cdot \mathbf{B}==0$, acute if $\mathbf{A} \cdot \mathbf{B}>0$, or obtuse if $\mathbf{A} \cdot \mathbf{B}<0$).
#
# +
# The dot product of C and D
# + [markdown] slideshow={"slide_type": "subslide"}
# ## List comprehension
#
# A list is a really useful way to build lists from other lists.
#
# However, it can be difficult to interpret when reading.
#
# Look carefully at the following example.
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# We have a list of numbers Y.
#
# `Y = [4, 6, 10, 11]`
#
# We want a new list of numbers, X where every element is the square of the element of the same index in Y.
# + slideshow={"slide_type": "subslide"}
Y = [4, 6, 10, 11]
X = [y**2 for y in Y]
print(Y)
print(X)
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Interpreting the comprehension:
#
# "<font color='blue'>y**2</font> for <font color='green'>y</font> in <font color='red'>Y</font>"
#
# means:
# - <font color='red'>Loop through each value in the list Y.</font>
# - With each iteration:
# - <font color='green'>assign the value of the next element in Y to the variable y.</font>
# - <font color='blue'>square it to get the next element in X.</font>
# + [markdown] slideshow={"slide_type": "subslide"}
# This is equivalent to:
# +
Y = [4, 6, 10, 11]
#Create an empty list
X = []
for y in Y:
X.append(y**2)
print(Y)
print(X)
# + [markdown] slideshow={"slide_type": "subslide"}
# ...but the list comprehension is more succinct.
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Comprehension: Operations on All Items in a List
#
# A list comprehension is useful if we want to perform an operation on all items of a list.
#
# Consider the exercise you completed earlier:
# -
# >*__Try it yourself__<br>
# >In the cell provided in your textbook *iterate* over the list `data = [1, 2.0, "three"]`.*<br>
# *Each time the code loops:*
# >1. *print the value of data __cast as a string__ *
# >1. *print the variable type*
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# You probably wrote something like...
# +
data = [1, 2.0, "three"]
for d in data:
d = str(d)
print(type(d))
# + [markdown] slideshow={"slide_type": "subslide"}
# The original value of in `data` remain unchanged:
# -
print(type(data[0]), type(data[1]), type(data[2]))
# + [markdown] slideshow={"slide_type": "subslide"}
# Using a list comprehension:
# +
data = [str(d) for d in data]
print(data)
print(type(data[0]), type(data[1]), type(data[2]))
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Comprehensions with Conditionals
#
# Comprehensions can be written so that elements will only be added to a list if they satisfy a condition.
# + [markdown] slideshow={"slide_type": "subslide"}
# In the example below, elements remain in list, `items`, if their value is greater than 2.
# +
items = [1, 3, 5, 7, 3, 2, 1]
items = [i for i in items if i > 2]
print(items)
# + [markdown] slideshow={"slide_type": "subslide"}
# In the example below, elements are squared and added to new list `squares` if their value is greater than 2.
# +
items = [1, 3, 5, 7, 3, 2, 1]
squares = [i**2 for i in items if i > 2]
print(items)
print(squares)
# + [markdown] slideshow={"slide_type": "subslide"}
# __Try it yourself__
#
# In the cell below, create a new list `V`, of the even numbers of list `U`, multiplied by 2.
# -
U = [2, 3, 6, 8, 9, 1, 2, 4]
# + [markdown] slideshow={"slide_type": "slide"}
# ## Tuples
#
# In many ways, tuples are similar to lists.
#
# However, after creatig a tuple:
# - you cannot __add__ or __remove__ elements from it without creating a new tuple (e.g. connecting two tuples).
# - you cannot change the value of a single tuple element e.g. by indexing.
#
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# Tuples are therefore used for values that should not change after being created.
# <br>e.g. a vector of length three with fixed entries
#
# It is 'safer' in this case since it cannot be modified accidentally in a program.
#
# To create a tuple, use () parentheses.
#
# + [markdown] slideshow={"slide_type": "subslide"}
# __Example__
# <br>At Kyoto University, each professor is assigned an office.
#
# Philamore-sensei is given room 32:
# +
room = ("Philamore", 32)
print("Room allocation:", room)
print("Length of entry:", len(room))
print(type(room))
# + [markdown] slideshow={"slide_type": "subslide"}
# <a id='IteratingTuples'></a>
# ### Iterating over Tuples
#
# We can *iterate* over tuples in the same way as with lists,
# -
# Iterate over tuple values
for d in room:
print(d)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Indexing
#
# We can index into a tuple:
# -
# Index into tuple values
print(room[1])
print(room[0])
# + [markdown] slideshow={"slide_type": "subslide"}
# __Note__ Take care when creating a tuple of length 1...
# + [markdown] slideshow={"slide_type": "subslide"}
# In this example, we create a *list* of length 1:
# -
# Creating a list of length 1
a = [1]
print(a)
print(type(a))
print(len(a))
# + [markdown] slideshow={"slide_type": "subslide"}
# However, if we use the same process for a tuple:
# -
a = (1)
print(a)
print(type(a))
#print(len(a))
# + [markdown] slideshow={"slide_type": "subslide"}
# To create a tuple of length 1, we use a comma:
# -
a = (1,)
print(a)
print(type(a))
print(len(a))
# + [markdown] slideshow={"slide_type": "subslide"}
# For example, Endo-sensei is a visiting professor who has not been assigned a room:
# -
room = ("Endo",)
print("Room allocation:", room)
print("Length of entry:", len(room))
print(type(room))
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Nested Data Structures: Lists of Tuples
# As part of a rooms database, we can create a list of tuples:
# +
room_allocations = [("Endo",),
("Philamore", 32),
("Matsuno", 31),
("Sawaragi", 28),
("Okino", 28),
("Kumegawa", 19)]
print(room_allocations)
# + [markdown] slideshow={"slide_type": "subslide"}
# __Try it yourself:__
#
# Refer to <a href='#NestedList'>Nested Lists</a> for how to index into *nested* data structures.
#
# In the cell below use indexing to print:
# - Matsuno-sensei's room number
# - Kumegawa-sensei's room number
# - The variable type of Kumegawa-sensei's room number
# + slideshow={"slide_type": "-"}
# Matsuno-sensei's room number
# Kumegawa-sensei's room number
# The Python variable type of Kumegawa-sensei's room number
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Sorting Tuples
# To make it easier to look up the office number of each professor, we can __sort__ the list of tuples into an office directory.
# + [markdown] slideshow={"slide_type": "subslide"}
# The ordering rule is determined by the __first element__ of each tuple:
#
# If the first element of each tuple is a __numeric__ type (`int`, `float`...)
# <br>the tulpes are sorted by ascending __numerical__ order of the first element:
#
# If the first element of each tuple is a __`string`__ (as in this case),
# <br>the tuples are sorted by alphabetical __order__ of the first element.
# + [markdown] slideshow={"slide_type": "subslide"}
# A tuple is sorted using the same method to sort a list.
#
# Jump to <a href='#SortLists'>Sorting Lists</a>.
#
# __Try it yourself__
#
# In the cell provided below, sort the tuple, `room_allocations` by alphabetical order.
# +
# room_allocation sorted by alphabetical order
# + [markdown] slideshow={"slide_type": "subslide"}
# The office directory can be improved by excluding professors who do not have an office at Yoshida campus:
# -
for entry in room_allocations:
# only professors with an office have an entry length > 1
if len(entry) > 1:
print(f"Name: {entry[0]}, Room: {entry[1]}")
# + [markdown] slideshow={"slide_type": "slide"}
# __Note:__ By prefixing the string you are printing with `f` you can interspace text with variables by placing the variable in { } braces. <br>This is known as a *formatted string* or *"f-string"*.
# + [markdown] slideshow={"slide_type": "slide"}
# <a id='Dictionaries'></a>
# ## Dictionaries
#
# We used a list of tuples in the previous section to store room allocations.
#
# What if we wanted to use a program to find which room a particular professor has been allocated?
#
#
# -
# Ee would need to either:
# - iterate through the list and check each name.
#
# > For a very large list, this might not be very efficient.
#
# - use the index to select a specific entry of a list or tuple.
#
# > This works if we know the index to the entry of interest. For a very large list, this is unlikely.
# + [markdown] slideshow={"slide_type": "subslide"}
# A human looking would identify individuals in an office directory by name (or "keyword") rather than a continuous set of integers.
#
# Using a Python __dictionary__ we can build a 'map' from names (*keys*) to room numbers (*values*).
# + [markdown] slideshow={"slide_type": "subslide"}
# A Python dictionary (`dict`) is declared using curly braces:
# +
room_allocations = {"Endo": None,
"Philamore": 32,
"Matsuno": 31,
"Sawaragi": 28,
"Okino": 28,
"Kumegawa": 19}
print(room_allocations)
print(type(room_allocations))
# + [markdown] slideshow={"slide_type": "subslide"}
# Each entry is separated by a comma.
#
# For each entry we have:
# - a 'key' (followed by a colon)
# - a 'value'.
#
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# __Note:__ For empty values (e.g. `Endo` in the example above) we use '`None`' for the value.
#
# `None` is a Python keyword for 'nothing' or 'empty'.
# + [markdown] slideshow={"slide_type": "subslide"}
#
# Now if we want to know which office belongs to Philamore-sensei, we can query the dictionary by key:
# -
philamore_office = room_allocations["Philamore"]
print(philamore_office)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Iterating over Dictionaries
#
# We can __*iterate*__ over the keys in a dictionary as we iterated over the elements of a list or tuple:
#
# __Try it yourself:__
# <br>
# Refer back to:
# - <a href='#IteratingLists'>Iterating Over Lists</a>
# - <a href='#IteratingTuples'>Iterating Over Tuples</a>
# <br>to remind yourself how to *iterate* over a data structure.
#
# <br>
# Using __exactly the same method__, iterate over the entries in the dictionary `room allocations` using a `for` loop.
# <br>
# Each time the code loops, print the next dictionary entry.
# -
# iterate over the dictionary, room_allocations.
# print each entry
# + [markdown] slideshow={"slide_type": "subslide"}
# Notice that this only prints the keys.
#
# We can access `keys` and `values` seperately by:
# - creating two variable names before `in`
# - putting `items()` after the dictionary name
# + slideshow={"slide_type": "subslide"}
for name, room_number in room_allocations.items():
print(name, room_number)
# + [markdown] slideshow={"slide_type": "subslide"}
# __Try it yourself__<br>
# Copy and paste the code from the previous cell.
# <br>
# Edit it so that it prints the room numbers only.
#
# Remember you can __"comment out"__ the existing code (instead of deleting it) so that you can refer to it later.
# e.g.
# ```python
# #print(name, room_number)
# ```
#
# +
# Iterate over the dictionary, room_allocations.
# Print each room number.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Look-up Keys
#
# A dictionary stores data differently from a list or tuple.
#
# Lists and tuples store entries as continuous pieces of memory, which is why we can access entries by index.
#
# Indexing cannot be used to access the entries of a dictionary. For example:
# ```python
# print(room_allocations[0])
# ```
# raises an error.
# + [markdown] slideshow={"slide_type": "subslide"}
# Dictionaries use a different type of storage which allows us to perform look-ups using a 'key'.
# + slideshow={"slide_type": "-"}
print(room_allocations["Philamore"])
# + [markdown] slideshow={"slide_type": "subslide"}
# <a id='DictionaryAdd'></a>
# ### Adding Entries to a Dictionary
#
# We use this same code to add new entries to an existing dictionary:
# +
print(room_allocations)
room_allocations["Fujiwara"] = 34
print(room_allocations)
# + [markdown] slideshow={"slide_type": "subslide"}
# <a id='DictionaryRemove'></a>
# ### Removing Entries from a Dictionary
#
# To remove an item from a disctionary we use the command `del`.
# +
print(room_allocations)
del room_allocations["Fujiwara"]
print(room_allocations)
# + [markdown] slideshow={"slide_type": "subslide"}
# __Try it yourself__
# <br>
# Okino-sensei is leaving Kyoto University.
#
# Her office will be re-allocated to a new member of staff, Ito-sensei.
#
# In the cell below, update the dictionary by deleting the entry for Okino-sensei and creating a new entry for Ito-sensei.
#
# Print the new dictionary.
# +
# Remove Okino-sensei (room 28) from the dictionary.
# Add a new entry for Ito-sensei (room 28)
# + [markdown] slideshow={"slide_type": "subslide"}
# So far we have used a string variable types for the dictionary keys.
#
# However, we can use almost any variable type as a key and we can mix types.
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Sorting Dictionaries
#
# A tuple is sorted using the same method to sort a list.
#
# Jump to <a href='#SortLists'>Sorting Lists</a>.
# + slideshow={"slide_type": "subslide"}
print(room_allocations)
# By default, the entries are sorted by key
print(sorted(room_allocations))
# This is the same as:
print(sorted(room_allocations.keys()))
# So to sort by value:
print(sorted(room_allocations.values()))
# + [markdown] slideshow={"slide_type": "subslide"}
# __Note:__ The value `None` cannot be sorted.
# + [markdown] slideshow={"slide_type": "subslide"}
# This is where a __comprehension__ can be useful.
#
# Let's produce a dictionary of the professors with offices only.
# +
occupied_rooms = {key: val for key, val in room_allocations.items() if val}
print(sorted(occupied_rooms.values()))
# -
#
#
# room_number: name for name, room_number in room_allocation.items()
#
# #### Interpreting the comprehension:
#
# "<font color='purple'>key: val</font> for <font color='green'>key</font>, <font color='blue'>val</font> in room_allocations.items() if <font color='red'>val</font>"
#
# means:
# <font color='red'>If the dictionary entry has a value (val)</font>, create an <font color='purple'>entry</font> in the new list using its:
# <br><font color='green'>key</font>
# <br><font color='blue'>value</font>
#
# + [markdown] slideshow={"slide_type": "subslide"}
# <a id='DictionaryRestructure'></a>
# ### Re-structuring to make a new Dictionary
# __Example__: We could 'invert' the room allocation dictionary to create a room-to-name map.
#
# Let's build a new dictionary (`room_map`) by looping through the old dictionary (`room_allocation`) using a `for` loop:
# + slideshow={"slide_type": "subslide"}
# Create empty dictionary
room_map = {}
# Build dictionary to map 'room number' -> name
for name, room_number in room_allocations.items():
# Insert entry into new dictionary
room_map[room_number] = name
print(room_map)
# + [markdown] slideshow={"slide_type": "subslide"}
# A more efficient way is to use a comprehension to do this:
# -
room_map = {room_number: name for name, room_number in room_allocation.items()}
print(room_map)
# + [markdown] slideshow={"slide_type": "subslide"}
# We can now consult the room-to-name map to find out if a particular room is occupied and by whom.
#
# Let's assume some rooms are unoccupied and therefore do not exist in this dictionary.
#
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# If we try to use a key that does not exist in the dictionary, e.g.
#
# > `occupant17 = room_map[17]`
#
# Python will give an error (raise an exception).
#
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# If we're not sure that a __key__ is present (that a room is occupied or unocupied in this case), we can check using the function in '`in`'.
# <br>(We used this function to check wether an entry exists in a __list__.)
# + slideshow={"slide_type": "-"}
print(19 in room_map)
print(17 in room_map)
# -
# So we know that:
# - room 19 is occupied
# - room 17 is unoccupied
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# The same result can be acheieved using:
# + slideshow={"slide_type": "-"}
print(19 in room_map.keys())
print(17 in room_map.keys())
# + [markdown] slideshow={"slide_type": "subslide"}
# When using `in`, take care to check for the __key__ (not the value)
# -
print('Kumegawa' in room_map)
# + [markdown] slideshow={"slide_type": "subslide"}
# If we instead need to check that a __value__ is present:
# -
print('Kumegawa' in room_map.values())
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Avoiding Errors
#
# It can be useful to check if a variable exists.
#
# If your program tries to use a variable that does not exist, errors can occur.
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# An example check is shown below.
#
# After the `if` statement we can include code that is only to be executed if the room is occupied.
# +
rooms_to_check = [17, 19]
for room in rooms_to_check:
if room in room_map:
print(f"Room {room} is occupied by {room_map[room]}-sensei")
else:
print(f"Room {room} is unoccupied.")
# + [markdown] slideshow={"slide_type": "slide"}
# ## Choosing a data structure
#
# An important task when developing a computer program is selecting the *appropriate* data structure for a task.
#
# Here are some examples of the suitablity of the data types we have studied for some common computing tasks.
#
# + [markdown] slideshow={"slide_type": "subslide"}
# - __Dynamically changing individual elements of a data structure.__
#
# e.g. updating the occupant of a room number or adding a name to a list of group members.
#
# __Lists and dictionaries__ allow us to do this.<br>
# __Tuples__ do not.
# + [markdown] slideshow={"slide_type": "subslide"}
# - __Performing an operation on every item in a sequence.__
#
# e.g. checking every item in a data set against a particular condition (e.g. prime number, multiple of 5....etc), performing an algebraic operation on every item in a data set.
#
# __Lists and tuples__ make this simple as we can call each entry in turn using its index.
# <br>
# __Dictionaries__ this is less efficient as it requires more code.
# + [markdown] slideshow={"slide_type": "subslide"}
# - __Selecting a single item from a data structure without knowing its position in a sequence.__
#
# e.g. looking up the profile of a person using their name, avoiding looping through a large data set in order to identify a single entry.
#
# __Dictionaries__ allow us to select a single entry by an associated (unique) key variable.
# <br>
# __Lists and tuples__ make this difficult as to pick out a single value we must either i) know it's position in an ordered sequence, ii) loop through every item until we find it.
# + [markdown] slideshow={"slide_type": "subslide"}
#
# - __Protecting individual items of a data sequence from being added, removed or changed within the program.__
#
# e.g. representing a vector of fixed length with fixed values, representing the coordintes of a fixed point.
#
# __Tuples__ allow us to do this.
# <br>
# __Lists and dictionaries__ do not.
# + [markdown] slideshow={"slide_type": "subslide"}
# - __Speed__
#
# For many numerical computations, efficiency is essential.
#
# More flexible data structures:
# - are generally less efficient computationally.
# - require more computer memory.
#
# We will study the difference in speed there can be between different data structures in a later seminar.
# + [markdown] slideshow={"slide_type": "subslide"}
# - __Storing items in a particular sequence (so that they can be addressed by index or in a particular order)__.
#
# e.g. representing the x, y, z coordinates of a 3D position vector, storing data collected from an experiment as a time series.
#
# __Lists and tuples__ allow us to do this.
# <br>
# __Dictionaries__ do not.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Review Exercises
# Here are a series of engineering problems for you to practise each of the new Python skills that you have learnt today.
# -
# ### Review Exercise: Data structures.
#
# __(A)__ In the cell below, what type of data structure is C?
#
# __(B)__ Write a line of code that checks whether 3 exists within the data structure.
#
# __(C)__ Write a line of code that checks whether 3.0 exists within the data structure.
#
# __(D)__ Write a line of code that checks whether "3" exists within the data structure.
#
C = (2, 3, 5, 6, 1, "hello")
# ### Review Exercise: Using Lists with `for` Loops.
#
# In the cell below:
#
# - Create a list with the names of the months.
# <br>
# - Create a second list with the number of days in each month (for a regular year).
# <br>
# - Create a `for` loop that prints:
#
# `The number of days in MONTH is XX days`
#
# where, `MONTH` is the name of the month and `XX` is the correct number of days in that month.
#
# Hint: Refer to <a href='#DotProductLists'>Indexing Example: The dot product of two vectors</a> for how to use two vectors in a loop.
#
# +
# A for loop to print the number of days in each month
# -
# ### Review Exercise: Indexing.
#
# __(A)__ In the cell below write a program that adds two vectors, $\mathbf{A}$ and $\mathbf{B}$, expressed as lists
# <br>
#
# $\mathbf{A} = [-2, 1, 3]$
#
# $\mathbf{B} = [6, 2, 2]$
#
# $ \mathbf{C} = [C_1,
# C_2, ...
# C_n] = \mathbf{A} + \mathbf{B} = [(A_1 + B_1),
# (A_2 + B_2), ...
# (A_n + B_n)]$
#
# __Hints:__
# - Refer to <a href='#DotProductLists'>Indexing Example: The dot product of two vectors</a> for how to use two vectors in a loop.
# - Start by creating an empty list, `C = []`.
# <br>Add an element to the list each time the code loops using the method `C.append()`
# <br><a href='#Append'>Jump to Adding an Item to a List</a>
#
# <br>
# __(B)__ To add two vectors, the number of elements in each vectors must be equal.
# <br>Use the function `len()` to print the length of $\mathbf{A}$ and the length of $\mathbf{B}$ before adding the two vectors.
# <br><a href='#Length'>Jump to Finding the Length of a List</a>
#
# <br>
# __(C)__ Use `if` and `else` statements (Seminar 2) to:
# - add the two vectors __only__ if the length of $\mathbf{A}$ and the length of $\mathbf{B}$ are equal.
# - otherwise print a message (e.g. "`unequal vector lengths!`")
#
# Hint: Use a logical operator (`==`, `<`, `>`....) to compare the lengths of $\mathbf{A}$ and $\mathbf{B}$. <br>Refer to __Logical Operators__ (Seminar 2).
#
# <br>
# __(D)__ Check your code works by using it to try and add two vectors with:
# <br>i) the same number of elements in each vector
# <br>ii) a different number of elements in each vector
# +
# Vector addition program with length check.
# -
# ### Review Exercise: `if` and `else` statements.
#
# Copy and paste the program you wrote earlier to <a href='#DotProductLists'>find the dot product of two vectors</a> into the cell below.
#
# Within the loop use `if`, `elif` and else `else` to make the program print:
# - "`The angle between vectors is acute`" if the dot product is positive.
# - "`The angle between vectors is obtuse`" if the dot product is negative.
# - "`The vectors are perpendicular`" if the dot product is 0.
# +
# Determinig angle types using the dot product.
# -
# ### Review Exercise: Dictionaries.
#
# <img src="img/newperiodictable.jpg" alt="Drawing" style="width: 500px;"/>
#
# __(A)__ Choose 5 elements from the periodic table.
# <br>
# In the cell below create a dictionary:
# <a href='#Dictionaries'>Jump to Dictionaries</a>
# - __keys:__ chemical symbol names
# - __values:__ atomic numbers
#
# e.g.
# ```python
# dictionary = {"C":6, "N":7, "O":8....}
#
# ```
#
#
# __(B)__ Remove one entry from the dictionary and print the updated version.
# <br><a href='#DictionaryRemove'>Jump to Removing Entries from a Dictionary</a>
#
# __(C)__ Add a new entry (chemical symbol and atomic number) to the dictionary and print the updated version.
# <br><a href='#DictionaryAdd'>Jump to Adding Entries to a Dictionary</a>
#
# __(D)__ Use a `for` loop to create a new dictionary:
# - __keys:__ atomic numbers
# - __values:__ chemical symbols
# using your original dictionary.
# <br> Hint: Refer to the earlier example of <a href='#DictionaryRestructure'>re-structuring to make a new dictioary.</a>
#
# __*Optional Extension*__
#
# __(E)__ Print a __list__ of the chemical symbols in your dictionary, sorted into alphabetical order.
# Hints:
# - Create an empty list
# - Use a for loop to add each chemical symbol to the list
# - Sort the list in alphabetical order
# +
# Dictionary of periodic table items.
# -
# ### Review Exercise: `while` loops (bisection)
#
# Bisection is an iterative method for approximating a root of a function $y = F(x)$
# <br>i.e. a value of $x$ for which the function $F(x)$ is equal to zero.
# <br>Therefore the roots are found where the line of the function F(x) __crosses__ the x axis (the red dot indicates the root of the function):
#
#
# <img src="img/bisection_method.png" alt="Drawing" style="width: 300px;"/>
#
#
# If we know such a __crossing point__ lies within the interval x = a and x = b we can repeatedly *bisect* this interval to narrow down the interval in which x = root must lie.
#
# Each iteration, x$_{mid} = \frac{a + b}{2}$ is determined and used to determine whether the crossing point is between x$_{mid}$ and a or x$_{mid}$ and b.
# <br>This is used to define a new, narrower interval in which we know the crossing point lies.
#
# x_mid = (a + b) / 2
#
# # If F(x) changes sign between F(x_mid) and F(a),
# # the root must lie between F(x_mid) and F(a)
#
# if F(x_mid) * F(a) < 0:
# b = x_mid
# x_mid = (a + b)/2
#
# # If F(x) changes sign between F(x_mid) and F(b),
# # the root must lie between F(x_mid) and F(b)
#
# else:
# a = x_mid
# x_mid = (a + b)/2
#
#
# <img src="img/bisection_method_simple.png" alt="Drawing" style="width: 300px;"/>
#
# In the example shown, the midpoint (x$_{mid}$) of a$_1$ and b$_1$ is b$_2$
# <br>F(a$_1$) $\times$ F(b$_2$) = negative
# <br>F(b$_1$) $\times$ F(b$_2$) = positive
#
# So the new increment is between a$_1$ and b$_2$.
#
# <br>
#
# By repeating this process, the value of F(x$_{mid}$) should become closer to zero with each iteration.
#
# The process is repeated until the *absolute* value |F(x$_{mid}$)| is sufficiently small (below a predetermined value (*tolerance*)).
#
# We then determine x$_{mid}$ is the root of the function.
#
# **Task:**
#
# $$
# F(x) = 4x^3 - 3x^2 - 25x - 6
# $$
#
# <img src="img/graph_polynomial.png" alt="Drawing" style="width: 300px;"/>
#
# The function has one root between x = 0 and x = -0.6.
#
# __(A)__ Use the bisection method to estimate the value of the root between x = 0 and x = -0.6.
# <br>Instructions:
# - Use a while loop to repeat the code above __while__ absF(x$_{mid}$) > 1 $\times10^{-6}$.
# - Each time the code loops:
# - __Compute__ F(a), F(b) and F(x_mid) [Hint: Use approprate variable names that don't contain () parentheses)
# - __Print__ F(x$_{mid}$) to check absF(x$_{mid}$) $< 1 \times10^{-6}$. <br>Use the function `abs()` to compute the absolute value of a number, <br>https://docs.python.org/3/library/functions.html#abs <br> e.g. `y = abs(x)` assigns the absolute value of `x` to `y`.
# - __Bisect__ the increment using the code shown above
# - __After__ the loop print the final value of x$_{mid}$ using `print("root = ", x_mid) `. <br>This value is the estimate of the root.
#
# <a href='#WhileLoops'>Jump to While Loops'</a>
#
# __(B)__ The bisection method is only effective where F(a) and F(b) are of opposite sign.
# <br> i.e. where F(a) $\times$ F(b) $ < 0$
# <br>Add an if statement to your code so that the while loop is only run *if* the inputs a and b are of opposite sign.
# +
# Bisection while loop
# -
# __(C)__ In the previous example you stopped the while loop when the value of the function was sufficiently small (abs(F(x$_{mid}$)) $< 1 \times10^{-6}$) that we can consider the corresponding value of x to be a root of the function.
#
# This time we are going to edit your code so that the loop is stopped when it reaches a __maximum number of iterations__. <br>Copy and paste your code from the cell above in the cel below.
# <br>Replace your __while loop__ with a __for loop__ that runs the code in the loop 25 times then stops.
#
# __(D)__ Within the for loop, add a `break` statement.
# <br>The `break` statement should exit the for loop __if__ abs(F(x$_{mid}$)) $< 1 \times10^{-6}$
# <br>i.e. __if__ abs(F(x$_{mid}$)) $< 1 \times10^{-6}$ the loop will stop before the maximum number of iterations is reached.
# <br>Before the command `break`, print the value of x$_{mid}$ using `print("root =", x_mid) `. <br>This value is the estimate of the root.
#
# <a href='#Break'>Jump to break'</a>
# +
# Copy and paste your code from the cell above, here
# + [markdown] slideshow={"slide_type": "slide"}
# # Updating your git repository
#
# You have made several changes to your interactive textbook.
#
# > Save your work.
# > <br> `git add -A`
# > <br>`git commit -m "A short message describing changes"`
# > <br>`git push`
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Summary
# - A data structure is used to assign a collection of values to a single collection name.
# - A Python list can store multiple items of data in sequentially numbered elements (numbering starts at zero)
# - Data stored in a list element can be referenced using the list name can be referenced using the list name followed by an index number in [] square brackets.
# - The `len()` function returns the length of a specified list.
# - A Python tuple whose values can not be individually changed, removed or added to (except by adding another tuple).
# - Data stored in a tuple element can be referenced using the tuple name followed by an index number in [] square brackets.
# - A Python dictionary is a list of key: value pairs of data in which each key must be unique.
# - Data stored in a dictionary element can be referenced using the dictionary name followed by its key in [] square brackets.
# + [markdown] slideshow={"slide_type": "slide"}
# # Homework
#
# 1. __COMPLETE__ any unfinished Review Exercises.<br>In particular, please complete: __Review Exercise: `while` loops (bisection)__.<br>You will need to refer to your answer in next week's Seminar.
# 1. __PUSH__ the changes you make at home to your online repository.
#
#
| 4_Data_Structures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="XXg0zy6FvhJ2" colab_type="text"
# ### LSTM Code Credited to: https://www.kaggle.com/taronzakaryan/stock-prediction-lstm-using-pytorch
# + id="bK0eqzkI_moO" colab_type="code" colab={}
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import torch
import torch.nn as nn
import math
from sklearn.metrics import mean_squared_error
from datetime import datetime
# + id="mCfVK4TZfwRt" colab_type="code" outputId="6045a344-9f97-4bc1-85e2-213eb537367a" colab={"base_uri": "https://localhost:8080/", "height": 281}
def convert_date(dt):
return datetime.strptime(dt, '%Y-%m-%dT%H:%M:%SZ').strftime('%Y-%m-%d-%H')
def convert_date_2(dt):
return datetime.strptime(dt, '%Y-%m-%d-%H').strftime('%Y-%m-%dT%H:%M:%SZ')
epoch_0 = datetime(1970,1,1,0,0,0)
def convertDate(date):
return (pd.Timestamp(date) - epoch_0) / (np.timedelta64(1,'s'))
df = pd.read_csv("./BTC-2h-new.csv")
df['created_at'] = df['created_at'].apply(convertDate)
df['modified'] = df['modified'].apply(convertDate)
df['Date'] = df['Date'].apply(convertDate)
df = df.sort_values(by=['created_at'])
df = df.fillna(method='ffill')
df = df.fillna(0)
df = df[df.Symbol == 3].reset_index()
df = df.drop_duplicates()
df[['VWAP']].plot()
plt.ylabel("Price")
plt.title("Currency")
plt.show()
# + id="iQSGrYWhWchF" colab_type="code" outputId="dcc118f4-67dc-4257-eace-fdd72dbca845" colab={"base_uri": "https://localhost:8080/", "height": 439}
df
# + id="IJhr7v1_rIWU" colab_type="code" outputId="1d9a4efb-d206-4ad7-8d7c-f032aaa3ef6d" colab={"base_uri": "https://localhost:8080/", "height": 153}
df.columns
# + id="Vk_8a3SeehUD" colab_type="code" outputId="61fff11f-853b-491e-af91-df9f58d42eff" colab={"base_uri": "https://localhost:8080/", "height": 439}
#df = df[['VWAP', 'positive_count', 'negative_count', 'null_count', 'Volume', 'volume_tweets', 'AdrActCnt', 'BlkCnt', 'BlkSizeMeanByte', 'CapMrktCurUSD', 'DiffMean','FeeTotUSD', 'HashRate', 'ROI1yr', 'ROI30d', 'SplyCur', 'TxCnt','TxTfrValAdjUSD', 'VtyDayRet180d', 'VtyDayRet30d', 'VtyDayRet60d']]
#df = df.drop(['index', 'created_at', 'Date', 'Symbol', 'modified'], axis=1)
#df = df[['VWAP']]
df
# + id="iK14BxMzh9bv" colab_type="code" colab={}
index_vwap = list(df.columns).index('VWAP')
# + id="EthjddiYfTxL" colab_type="code" colab={}
df = df.fillna(method='ffill')
scaler = MinMaxScaler(feature_range=(-1, 1))
df = scaler.fit_transform(df)
# + id="qftuuRBNfiys" colab_type="code" outputId="5aea007e-e755-4241-c5e9-4bb062d1a84e" colab={"base_uri": "https://localhost:8080/", "height": 85}
def load_data(stock, look_back):
data_raw = stock
data = []
for index in range(len(data_raw) - look_back):
data.append(data_raw[index: index + look_back])
data = np.array(data);
test_set_size = int(np.round(0.2*data.shape[0]));
train_set_size = data.shape[0] - (test_set_size);
x_train = data[:train_set_size,:-1,:]
y_train = data[:train_set_size,-1, index_vwap:index_vwap+1]
x_test = data[train_set_size:,:-1, :]
y_test = data[train_set_size:,-1,index_vwap:index_vwap+1]
return [x_train, y_train, x_test, y_test]
look_back = 32 # how much to look back in each step
x_train, y_train, x_test, y_test = load_data(df, look_back)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
# + id="jfwsJv0XfmlW" colab_type="code" outputId="c25f79b4-2ba8-4657-b59b-b4733a63a6af" colab={"base_uri": "https://localhost:8080/", "height": 34}
x_train.shape
# + id="Fozx2Kshf-zu" colab_type="code" colab={}
x_train = torch.from_numpy(x_train).type(torch.Tensor)
x_test = torch.from_numpy(x_test).type(torch.Tensor)
y_train = torch.from_numpy(y_train).type(torch.Tensor)
y_test = torch.from_numpy(y_test).type(torch.Tensor)
# + id="6XU3igKnj1-d" colab_type="code" outputId="48e1a557-7c28-4c9c-908d-aabe02e73bd2" colab={"base_uri": "https://localhost:8080/", "height": 34}
y_train.size(),x_train.size()
# + id="uQtAs2n9j3Yy" colab_type="code" colab={}
n_steps = look_back - 1
batch_size = 100
num_epochs = 400
train = torch.utils.data.TensorDataset(x_train,y_train)
test = torch.utils.data.TensorDataset(x_test,y_test)
train_loader = torch.utils.data.DataLoader(dataset=train,
batch_size=batch_size,
shuffle=False)
test_loader = torch.utils.data.DataLoader(dataset=test,
batch_size=batch_size,
shuffle=False)
# + id="EWDqC1pZj7Dm" colab_type="code" outputId="ec04b67f-8308-4f90-8b13-d24286842d92" colab={"base_uri": "https://localhost:8080/", "height": 326}
input_dim = x_train.shape[-1]
hidden_dim = 25
num_layers = 2
output_dim = 1
class LSTM(nn.Module):
def __init__(self, input_dim, hidden_dim, num_layers, output_dim):
super(LSTM, self).__init__()
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
h = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).requires_grad_()
c = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).requires_grad_()
out, (hn, cn) = self.lstm(x, (h.detach(), c.detach()))
out = self.fc(out[:, -1, :])
return out
model = LSTM(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim, num_layers=num_layers)
loss_fn = torch.nn.MSELoss(size_average=True)
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
print(model)
print(len(list(model.parameters())))
for i in range(len(list(model.parameters()))):
print(list(model.parameters())[i].size())
# + id="hmlaSGIYkM5L" colab_type="code" outputId="3056987e-5a85-4d7e-a60d-dfb90352a6b7" colab={"base_uri": "https://localhost:8080/", "height": 680}
hist = np.zeros(num_epochs)
seq_dim =look_back-1
for t in range(num_epochs):
y_train_pred = model(x_train)
loss = loss_fn(y_train_pred, y_train)
if t % 10 == 0 and t !=0:
print("Epoch ", t, "MSE: ", loss.item())
hist[t] = loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# + id="lNit-WL9kQWG" colab_type="code" outputId="f4aa2043-5655-4f1a-9144-fa305220de03" colab={"base_uri": "https://localhost:8080/", "height": 516}
plt.plot(y_train.detach().numpy(), label="Data")
plt.plot(y_train_pred.detach().numpy(), label="Preds")
plt.legend()
plt.show()
plt.plot(hist, label="Training loss")
plt.legend()
plt.show()
# + id="D7NB1VzLxHKl" colab_type="code" colab={}
scaler2 = MinMaxScaler()
scaler2.min_, scaler2.scale_ = scaler.min_[index_vwap], scaler.scale_[index_vwap]
# + id="dJ3phfjDka09" colab_type="code" outputId="fd28fcbd-1fe7-46fa-ff0a-2097a4420405" colab={"base_uri": "https://localhost:8080/", "height": 51}
y_test_pred = model(x_test)
# invert predictions
y_train_pred = scaler2.inverse_transform(y_train_pred.detach().numpy())
y_train = scaler2.inverse_transform(y_train.detach().numpy())
y_test_pred = scaler2.inverse_transform(y_test_pred.detach().numpy())
y_test = scaler2.inverse_transform(y_test.detach().numpy())
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(y_train[:,0], y_train_pred[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(y_test[:,0], y_test_pred[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
# + id="GroT_3X_kghG" colab_type="code" outputId="e78dd1b4-cb8f-4fcd-e6ff-1bfb4177b3ab" colab={"base_uri": "https://localhost:8080/", "height": 483}
trainPredictPlot = np.empty_like(df[:,index_vwap:index_vwap+1])
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(y_train_pred)+look_back, :] = y_train_pred
testPredictPlot = np.empty_like(df[:,index_vwap:index_vwap+1])
testPredictPlot[:, :] = np.nan
testPredictPlot[len(y_train_pred)+look_back-1:len(df)-1, :] = y_test_pred
regPredictPLot = np.empty_like(df[:,index_vwap:index_vwap+1])
regPredictPLot[:, :] = np.nan
#regPredictPLot[len(df)-len(y_predict):, :] = y_predict
# plot baseline and predictions
plt.figure(figsize=(15,8))
plt.plot(scaler.inverse_transform(df)[:,index_vwap], label='Original')
plt.plot(trainPredictPlot, label='train_pred')
plt.plot(testPredictPlot, label='LSTM')
#plt.plot(regPredictPLot, label='Regression')
plt.legend()
plt.show()
# + id="Bp_deySCVBep" colab_type="code" colab={}
import datetime
import numpy as np
epoch_0 = datetime.datetime(1970,1,1,0,0,0)
def convertDate(date):
return (pd.Timestamp(date) - epoch_0) / (np.timedelta64(1,'s'))
K_LIST = [2, 5, 12, 24, 48]
SYMBOLS_LIST = ['BTC.X', 'BCH.X', 'LTC.X', 'ETH.X']
twoHdata = pd.read_csv("2h.csv")
fiveH_data = pd.read_csv("5h.csv")
twelveH_data = pd.read_csv("12h.csv")
twoFourH_data = pd.read_csv("24h.csv")
fourEight_data = pd.read_csv("48h.csv")
hour_df_list = [twoHdata, fiveH_data, twelveH_data, twoFourH_data, fourEight_data]
hour_symbol_df_list = []
for k_index in range(len(K_LIST)):
df = hour_df_list[k_index]
df['created_at'] = df['created_at'].apply(convertDate)
df['modified'] = df['modified'].apply(convertDate)
df['Date'] = df['Date'].apply(convertDate)
df = df.sort_values(by=['created_at'])
df = df.fillna(method='ffill')
df = df.fillna(0)
hour_symbol_list = []
for symbol_index in range(len(SYMBOLS_LIST)):
df_symbol = df[df.Symbol == symbol_index].reset_index()
hour_symbol_list.append(df_symbol)
hour_symbol_df_list.append(hour_symbol_list)
# + id="dOvyubpn0trb" colab_type="code" outputId="e1da60e0-5163-4b7e-abf4-7c83797d2e20" colab={"base_uri": "https://localhost:8080/", "height": 607}
data = hour_symbol_df_list[0][0]
corr = data.corr()
fig = plt.figure(figsize=(15,8))
ax = fig.add_subplot(111)
cax = ax.matshow(corr,cmap='coolwarm', vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = np.arange(0,len(data.columns),1)
ax.set_xticks(ticks)
plt.xticks(rotation=90)
ax.set_yticks(ticks)
ax.set_xticklabels(data.columns)
ax.set_yticklabels(data.columns)
plt.show()
# + id="pg_4nFv20_72" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
X_train_hour_symbol = []
y_train_hour_symbol = []
X_test_hour_symbol = []
y_test_hour_symbol = []
for k_index in range(len(K_LIST)):
X_train_hour_symbol_list = []
y_train_hour_symbol_list = []
X_test_hour_symbol_list = []
y_test_hour_symbol_list = []
for symbol_index in range(len(SYMBOLS_LIST)):
df = hour_symbol_df_list[k_index][symbol_index]
X = df.drop('VWAP', axis = 1)
y = df[['VWAP']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, shuffle = False)
X_train_hour_symbol_list.append(X_train)
y_train_hour_symbol_list.append(y_train)
X_test_hour_symbol_list.append(X_test)
y_test_hour_symbol_list.append(y_test)
X_train_hour_symbol.append(X_train_hour_symbol_list)
y_train_hour_symbol.append(y_train_hour_symbol_list)
X_test_hour_symbol.append(X_test_hour_symbol_list)
y_test_hour_symbol.append(y_test_hour_symbol_list)
# + id="WCv7PeQ_1k8B" colab_type="code" outputId="f4d4ad2b-d6e7-48fc-c3ce-ce060130e684" colab={"base_uri": "https://localhost:8080/", "height": 34}
X_train_hour_symbol[0][0].shape
# + id="jJ6RjRPr1FtE" colab_type="code" colab={}
from sklearn.linear_model import LinearRegression
reg_model_hour_symbol = []
for k_index in range(len(K_LIST)):
reg_model_hour_symbol_list = []
for symbol_index in range(len(SYMBOLS_LIST)):
regression_model = LinearRegression()
regression_model.fit(X_train_hour_symbol[k_index][symbol_index], y_train_hour_symbol[k_index][symbol_index])
regression_model.score(X_test_hour_symbol[k_index][symbol_index], y_test_hour_symbol[k_index][symbol_index])
reg_model_hour_symbol_list.append(regression_model)
reg_model_hour_symbol.append(reg_model_hour_symbol_list)
# + id="sJmfIg4m1Mvm" colab_type="code" outputId="53d916ee-1725-41ac-bb11-e167b19e83f7" colab={"base_uri": "https://localhost:8080/", "height": 1000}
from sklearn.metrics import mean_squared_error
import math
total_r_squared = []
total_r_squared_sum = 0
for k_index in range(len(K_LIST)):
for symbol_index in range(len(SYMBOLS_LIST)):
regression_model = reg_model_hour_symbol[k_index][symbol_index]
X_test = X_test_hour_symbol[k_index][symbol_index]
X_train = X_train_hour_symbol[k_index][symbol_index]
y_test = y_test_hour_symbol[k_index][symbol_index]
y_train = y_train_hour_symbol[k_index][symbol_index]
rSquared = regression_model.score(X_test, y_test)
y_predict = regression_model.predict(X_test)
regression_model_mse = mean_squared_error(y_predict, y_test)
RMSE = math.sqrt(regression_model_mse)
total_r_squared.append(rSquared)
total_r_squared_sum = total_r_squared_sum + rSquared
# plot
print("R2 for", K_LIST[k_index], "h-", SYMBOLS_LIST[symbol_index], ":", rSquared)
print("RMSE for", K_LIST[k_index], "h-", SYMBOLS_LIST[symbol_index], ":", RMSE, "\n")
print(total_r_squared_sum)
print((total_r_squared_sum)/20)
print(np.var(total_r_squared))
# + id="zlBvNapC1PpT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="60944bc9-e3de-4467-c482-0828a312cd9b"
len(list(regression_model.coef_[0]))
# + id="w7128sZ8jgEX" colab_type="code" colab={}
coefs = {c:list(regression_model.coef_[0])[i] for i, c in enumerate(X.columns)}
# + id="M2OGEQS4j2zx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 493} outputId="0c997e92-d313-4f75-8f47-fc59caa1acd8"
sorted(coefs.items(), key=lambda x: x[1])
# + id="-1asdLkUkXem" colab_type="code" colab={}
| LSTM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # OPTIMADE Tutorial Exercises
#
# [](https://colab.research.google.com/github/Materials-Consortia/optimade-tutorial-exercises/blob/main/notebooks/exercises.ipynb)
# [](https://mybinder.org/v2/gh/Materials-Consortia/optimade-tutorial-exercises/HEAD?filepath=notebooks%2Fexercises.ipynb)
# [](https://github.com/Materials-Consortia/optimade-tutorial-exercises)
# ## Preface
#
# This repository hosts general tutorials on the OPTIMADE specification and particular database implementations of the API.
# These open-ended exercises were initially provided to accompany the following workshops:
# - NOMAD CoE [Tutorial 6: OPTIMADE](https://th.fhi-berlin.mpg.de/meetings/nomad-tutorials/index.php?n=Meeting.Tutorial6), 7-8 September 2021
# - ICTP-EAIFR [Training School: Working with Materials Databases and OPTIMADE](https://eaifr.ictp.it/about/news/ml-for-es-and-md/), November-December 2021.
#
# This document is hosted on [GitHub](https://github.com/Materials-Consortia/optimade-tutorial-exercises), and all feedback or suggestions for new exercises can be provided as an issue or pull request in that repository.
#
# If you would like to get involved with the OPTIMADE consortium, you can find some more details on the [OPTIMADE home page](https://optimade.org/#get-involved).
#
# ### Contributors
#
# - [<NAME>](https://ml-evs.science), *UCLouvain* (repository and general exercises)
# - [<NAME>](https://github.com/mkhorton), *LBNL* (`pymatgen` exercise)
# - [<NAME>](https://github.com/blokhin), *Tilde Informatics* (typos and bug fixes)
# - [<NAME>](https://github.com/ctoher), *Duke University* (AFLOW exercise)
# - [<NAME>](https://github.com/tachyontraveler), *Northwestern U.* (OQMD exercise)
# - [<NAME>](https://github.com/JPBergsma), *CECAM* (typos, testing and feedback)
#
# ## Introduction
# The OPTIMADE specification defines a web-based JSON API that is implemented by many [different materials databases](https://www.optimade.org/providers-dashboard) to allow users to query the underlying data with the same syntax and response format.
# There are several tools that can access these APIs, for example, any web browser, any programming language that can make HTTP requests, or common command-line tools such as `curl` or `wget`.
#
# There are also specialist tools, developed by members of the OPTIMADE community.
# You may have heard about three such tools in other tutorials and talks:
# 1. [The Materials Cloud web-based OPTIMADE client](https://dev-tools.materialscloud.org/optimadeclient/).
# 2. [The optimade.science web-based aggregator](https://optimade.science).
# 3. [`pymatgen`'s built-in OPTIMADE client](https://pymatgen.org/pymatgen.ext.optimade.html?highlight=optimade#module-pymatgen.ext.optimade).
#
# Each of these clients can send requests to multiple OPTIMADE providers *simultaneously*, based on programmatic [providers list](https://providers.optimade.org/).
# You can explore this list at the human-readable [providers dashboard](https://www.optimade.org/providers-dashboard/), where you can see the current OPTIMADE structure count exceeds 17 million!
#
# You may wish to familiarise yourselves with the OPTIMADE API by writing your own queries, scripts or code. Some possible options:
# - Craft (or copy) your own URL queries to a particular OPTIMADE implementation. Some web browsers (e.g., Firefox) will automatically format the JSON response for you (see Exercise 1).
# - Use command-line tools such as [`curl`](https://curl.se/) or [`wget`](https://www.gnu.org/software/wget/) to receive data in your terminal, or pipe it to a file. You could use the tool [`jq`](https://stedolan.github.io/jq/) to format the JSON response.
# - Make an appropriate HTTP request from your programming language of choice. For Python, you could use the standard library [urllib.request](https://docs.python.org/3/library/urllib.request.html) or the more ergonomic external library [requests](https://requests.readthedocs.io/en/master/). Some example code for Python is provided in Exercise 1 below. In Javascript, you can just use `fetch(...)` or a more advanced [optimade client](https://github.com/tilde-lab/optimade-client).
#
# If you are following these tutorials as part of a school or workshop, please do not hesitate to ask about how to get started with any of the above tools!
# ## Exercise 1
# This aim of this exercise is to familiarise yourself with the OPTIMADE JSON API.
# In the recent OPTIMADE paper [[1](#ref1)], we provided the number of results to a set of queries across all OPTIMADE implementations, obtained by applying the same filter to the structures endpoint of each database.
# The filters are:
# - Query for structures containing a group IV element: `elements HAS ANY "C", "Si", "Ge", "Sn", "Pb"`.
# - As above, but return only binary phases: `elements HAS ANY "C", "Si", "Ge", "Sn", "Pb" AND nelements=2`.
# - This time, exclude lead and return ternary phases: `elements HAS ANY "C", "Si", "Ge", "Sn" AND NOT elements HAS "Pb" AND elements LENGTH 3`.
#
# - In your browser, try visiting the links in Table 1 of the OPTIMADE paper [[1](#ref1)] (clickable links in arXiv version [[2](#ref2)]), which is reproduced below.
# - Familiarise yourself with the standard JSON:API output fields (`data`, `meta` and `links`).
# - You will find the crystal structures returned for the query as a list under the `data` key, with the OPTIMADE-defined fields listed under the `attributes` of each list entry.
# - The `meta` field provides useful information about your query, e.g. `data_returned` shows how many results there are in total, not just in the current page of the response (you can check if the table still contains the correct number of entries, or if it is now out of date).
# - The `links` field provides links to the next or previous pages of your response, in case you requested more structures than the `page_limit` for that implementation.
# - Choose one particular entry to focus on: replace the `filter` URL parameter with `/<structure_id>` for the `id` of one particular structure (e.g. `https://example.org/optimade/v1/structures/<structure_id>`).
# - Explore other endpoints provided by each of these providers. If they serve "extra" fields (i.e. those containing the provider prefix), try to find out what these fields mean by querying the `/info/structures` endpoint.
# - Try performing the same queries with some of the tools listed above, or in scripts of your own design.
#
# <center>
# <table>
# <tr>
# <th>Provider</th>
# <th>N<sub>1</sub></th>
# <th>N<sub>2</sub></th>
# <th>N<sub>3</sub></th>
# </tr>
# <tr>
# <td><a href="http://www.aflow.org">AFLOW</a><span class="citation" data-cites="AFLOW_database aflow_fleet_chapter"></span> </td>
# <td><a href="http://aflow.org/API/optimade/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn","Pb"">700,192</a> </td>
# <td><a href="http://aflow.org/API/optimade/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn","Pb" AND nelements=2">62,293</a></td>
# <td><a href="http://aflow.org/API/optimade/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn" AND NOT elements HAS "Pb" AND elements LENGTH 3">382,554</a></td>
# </tr>
# <tr>
# <td><a href="https://www.crystallography.net/cod">Crystallography Open Database</a> (COD)<span class="citation" data-cites="Grazulis_COD_2009 Grazulis_COD_2012"></span></td>
# <td><a href="https://www.crystallography.net/cod/optimade/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn","Pb"">416,314</a> </td>
# <td><a href="https://www.crystallography.net/cod/optimade/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn","Pb" AND nelements=2">3,896</a> </td>
# <td><a href="https://www.crystallography.net/cod/optimade/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn" AND NOT elements HAS "Pb" AND elements LENGTH 3">32,420</a></td>
# </tr>
# <tr>
# <td><a href="https://www.crystallography.net/tcod">Theoretical Crystallography Open Database</a> (TCOD)<span class="citation" data-cites="Merkys_TCOD_2017"></span> </td>
# <td><a href="https://www.crystallography.net/tcod/optimade/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn","Pb"">2,631</a> </td>
# <td><a href="https://www.crystallography.net/tcod/optimade/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn","Pb" AND nelements=2">296</a> </td>
# <td><a href="https://www.crystallography.net/tcod/optimade/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn" AND NOT elements HAS "Pb" AND elements LENGTH 3">660</a></td>
# </tr>
# <tr>
# <td><a href="https://materialscloud.org">Materials Cloud</a><span class="citation" data-cites="AiiDA AiiDA2 MaterialsCloud"></span> </td>
# <td><a href="https://aiida.materialscloud.org/optimade-sample/optimade/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn","Pb"">886,518</a> </td>
# <td><a href="https://aiida.materialscloud.org/optimade-sample/optimade/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn","Pb" AND nelements=2">801,382</a> </td>
# <td><a href="https://aiida.materialscloud.org/optimade-sample/optimade/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn" AND NOT elements HAS "Pb" AND elements LENGTH 3">103,075</a></td>
# </tr>
# <tr>
# <td><a href="http://materialsproject.org">Materials Project</a><span class="citation" data-cites="Materials_Project Jain_2011 Ong_pymatgen_2013 Mathew_Atomate_CMS_2017"></span> </td>
# <td><a href="https://optimade.materialsproject.org/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn","Pb"">27,309</a> </td>
# <td><a href="https://optimade.materialsproject.org/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn","Pb" AND nelements=2">3,545</a> </td>
# <td><a href="https://optimade.materialsproject.org/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn" AND NOT elements HAS "Pb" AND elements LENGTH 3">10,501</a></td>
# </tr>
# <tr>
# <td><a href="https://nomad-lab.eu">Novel Materials Discovery Laboratory</a> (NOMAD)<span class="citation" data-cites="NOMAD_2017 NOMAD_2018"></span> </td>
# <td><a href="https://nomad-lab.eu/prod/rae/optimade/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn","Pb"">3,359,594</a> </td>
# <td><a href="https://nomad-lab.eu/prod/rae/optimade/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn","Pb" AND nelements=2">532,123</a> </td>
# <td><a href="https://nomad-lab.eu/prod/rae/optimade/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn" AND NOT elements HAS "Pb" AND elements LENGTH 3">1,611,302</a></td>
# </tr>
# <tr>
# <td><a href="https://odbx.science">Open Database of Xtals</a> (odbx)<span class="citation" data-cites="odbx-matador"></span> </td>
# <td><a href="https://optimade.odbx.science/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn","Pb"">55</a> </td>
# <td><a href="https://optimade.odbx.science/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn","Pb" AND nelements=2">54</a> </td>
# <td><a href="https://optimade.odbx.science/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn" AND NOT elements HAS "Pb" AND elements LENGTH 3">0</a></td>
# </tr>
# <tr>
# <td><a href="http://openmaterialsdb.se">Open Materials Database</a> (<em>omdb</em>)<span class="citation" data-cites="HTTKOMDB"></span> </td>
# <td><a href="http://optimade.openmaterialsdb.se/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn","Pb"">58,718</a> </td>
# <td><a href="http://optimade.openmaterialsdb.se/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn","Pb" AND nelements=2">690</a> </td>
# <td><a href="http://optimade.openmaterialsdb.se/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn" AND NOT elements HAS "Pb" AND elements LENGTH 3">7,428</a></td>
# </tr>
# <tr>
# <td><a href="http://oqmd.org">Open Quantum Materials Database</a> (OQMD)<span class="citation" data-cites="OQMD"></span> </td>
# <td><a href="http://oqmd.org/optimade/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn","Pb"">153,113</a> </td>
# <td><a href="http://oqmd.org/optimade/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn","Pb" AND nelements=2">11,011</a> </td>
# <td><a href="http://oqmd.org/optimade/v1/structures?filter=elements HAS ANY "C","Si","Ge","Sn" AND NOT elements HAS "Pb" AND elements LENGTH 3">70,252</a></td>
# </tr>
# </table>
# </center>
#
#
# <span id="ref1">[1]</span> Andersen *et al.*, "OPTIMADE, an API for exchanging materials data", *Sci Data* **8**, 217 (2021) [10.1038/s41597-021-00974-z](https://doi.org/10.1038/s41597-021-00974-z).
#
# <span id="ref2">[2]</span> Andersen *et al.*, "OPTIMADE, an API for exchanging materials data" (2021) [arXiv:2103.02068](https://arxiv.org/abs/2103.02068).
# ## Exercise 2
#
# The filters from Exercise 1 screened for group IV containing compounds, further refining the query to exclude lead, and finally to include only ternary phases.
#
# - Choose a suitable database and modfiy the filters from Exercise 1 to search for binary [III]-[V] semiconductors.
# - A "suitable" database here is one that you think will have good coverage across this chemical space.
# - Using the `chemical_formula_anonymous` field, investigate the most common stoichiometric ratios between the constituent elements, e.g. 1:1, 2:1, etc.
# - You may need to follow pagination links (`links->next` in the response) to access all available data for your query, or you can try adding the `page_limit=100` URL parameter to request more structures per response.
# - Apply the same filter to another database and assess the similarity between the results, thinking carefully about how the different focuses of each database and different methods in their construction/curation could lead to biases in this outcome.
# - For example, an experimental database may have one crystal structure entry per experimental sample studied, in which case the most useful (or "fashionable") compositions will return many more entries, especially when compared to a database that curates crystal structures such that each ideal crystal has one canonical entry (e.g., a database of minerals).
# - Try to use the query you have constructed in the multi-provider clients (linked above), to query all OPTIMADE providers simultaneously.
# ## Exercise 3 (pymatgen)
# This interactive exercise will explore the use of the OPTIMADE client implemented in the `pymatgen` Python library. This exercise can be found in this repository under `./notebooks/demonstration-pymatgen.ipynb` or accessed online in [Google Colab](https://colab.research.google.com/github/Materials-Consortia/optimade-tutorial-exercises/blob/main/notebooks/demonstration-pymatgen-for-optimade-queries.ipynb) (or equivalent notebook runners, such as [Binder](https://mybinder.org/v2/gh/Materials-Consortia/optimade-tutorial-exercises/HEAD?filepath=notebooks%2Fdemonstration-pymatgen-for-optimade-queries.ipynb)).
#
# [](https://colab.research.google.com/github/Materials-Consortia/optimade-tutorial-exercises/blob/main/notebooks/demonstration-pymatgen-for-optimade-queries.ipynb)
# [](https://mybinder.org/v2/gh/Materials-Consortia/optimade-tutorial-exercises/HEAD?filepath=notebooks%2Fdemonstration-pymatgen-for-optimade-queries.ipynb)
# ## Exercise 4
#
# There are many useful properties that the OPTIMADE specification has not standardized.
# This is typically because the use of the property requires additional context, e.g., reporting a "band gap" without describing how it was calculated or measured, or properties that are only meaningful in the context of a database, e.g., relative energies that depend on other reference calculations.
# For this reason, the OPTIMADE specification allows implementations to serve their own fields with an appropriate "provider prefix" to the field name, and a description at the `/info/structures` endpoint.
#
# One computed property that is key to many high-throughput studies is the *chemical stability* ($\delta$) of a crystal structure, i.e. whether the structure is predicted to spontaneously decompose into a different phase (or phases).
# This is typically computed as the distance from the convex hull in composition-energy space, with a value of 0 (or <0, if the target structure was not used to compute the hull itself) indicating a stable structure.
#
# - Interrogate the `/info/structures` endpoints of the OPTIMADE implementations that serve DFT data (e.g., Materials Project, AFLOW, OQMD, etc.) and identify those that serve a field that could correspond to hull distance, or other stability metrics.
# - Construct a filter that allows you to screen a database for metastable materials (i.e., $0 < \delta < 25\text{ meV/atom}$) according to this metric.
# - Try to create a filter that can be applied to multiple databases simultaneously (e.g., apply `?filter=_databaseA_hull_distance < 25 OR _databaseB_stability < 25`). What happens when you run this filter against a database that does not contain the field?
# ## Exercise 5
# As a final general exercise, consider your own research problems and how you might use OPTIMADE.
# If you have any suggestions or feedback about how OPTIMADE can be made more useful for you, please start a discussion on the [OPTIMADE MatSci forum](https://matsci.org/c/optimade/29) or raise an issue at the appropriate [Materials-Consortia GitHub](https://github.com/Materials-Consortia/) repository.
#
# Some potential prompts:
#
# - What additional fields or entry types should OPTIMADE standardize to be most useful to you?
# - How could the existing tools be improved, or what new tools could be created to make OPTIMADE easier to use?
# - What features from other APIs/databases that you use could be adopted within OPTIMADE?
# ## Exercise 6 (AFLOW)
# The AFLOW database is primarily built by decorating crystallographic prototypes, and a list of the most common prototypes can be found in the [Library of Crystallographic Prototypes](https://aflow.org/prototype-encyclopedia/).
# The prototype labels can also be used to search the database for entries
# with relaxed structures matching a particular prototype, using the AFLOW
# keyword `aflow_prototype_label_relax`; a full list of AFLOW keywords can be
# found at AFLOW's `/info/structures` endpoint (http://aflow.org/API/optimade/v1.0/info/structures).
# Searches can be performed for prototype labels using OPTIMADE by appending the `_aflow_` prefix to the keyword: `_aflow_aflow_prototype_label_relax`.
#
# - Use OPTIMADE to search AFLOW for NaCl in the rock salt structure (prototype label `AB_cF8_225_a_b`)
# - Use OPTIMADE to search AFLOW for lead-free halide cubic perovskites with a band gap greater than 3 eV: (cubic perovskite prototype label is `AB3C_cP5_221_a_c_b`)
# ## Exercise 7 (OQMD)
# This interactive exercise explores the OQMD's OPTIMADE API, and demonstrates how you can train
# machine learning models on OPTIMADE data. The notebook is available at `./notebooks/exercise7-oqmd-optimade-tutorial` and can also be accessed online with [Colab](https://colab.research.google.com/github/Materials-Consortia/optimade-tutorial-exercises/blob/main/notebooks/exercise7-oqmd-optimade-tutorial.ipynb) or [Binder](https://mybinder.org/v2/gh/Materials-Consortia/optimade-tutorial-exercises/HEAD?filepath=notebooks/exercise7-oqmd-optimade-tutorial.ipynb) (buttons below).
#
# [](https://colab.research.google.com/github/Materials-Consortia/optimade-tutorial-exercises/blob/main/notebooks/exercise7-oqmd-optimade-tutorial.ipynb)
# [](https://mybinder.org/v2/gh/Materials-Consortia/optimade-tutorial-exercises/HEAD?filepath=notebooks/exercise7-oqmd-optimade-tutorial.ipynb)
#
# ## Example Python code
#
# You may find the following Python code snippets useful in the above exercises. This document can be opened as a Jupyter notebook using the Colab or Binder buttons above, or by downloading the notebook from the GitHub repository.
# +
# Construct a query URL.
#
# You should be able to use any valid OPTIMADE implementation's
# database URL with any valid query
#
# Lets choose a random provider for now:
import random
some_optimade_base_urls = [
"https://optimade.materialsproject.org",
"http://crystallography.net/cod/optimade",
"https://nomad-lab.eu/prod/rae/optimade/"
]
database_url = random.choice(some_optimade_base_urls)
query = 'elements HAS ANY "C", "Si", "Ge", "Sn", "Pb"'
params = {
"filter": query,
"page_limit": 3
}
query_url = f"{database_url}/v1/structures"
# -
# Using the third-party requests library:
# !pip install requests
# Import the requests library and make the query
import requests
response = requests.get(query_url, params=params)
print(response)
json_response = response.json()
# +
# Explore the first page of results
import pprint
print(json_response.keys())
structures = json_response["data"]
meta = json_response["meta"]
print(f"Query {query_url} returned {meta['data_returned']} structures")
print("First structure:")
pprint.pprint(structures[0])
# -
# Using pagination to loop multiple requests
# We want to add additional page_limit and page_offset parameters to the query
offset = 0
page_limit = 10
while True:
params = {
"filter": query,
"page_limit": page_limit,
"page_offset": offset
}
response = requests.get(query_url, params=params).json()
# Print the IDs in the response
for result in response["data"]:
print(result["id"])
offset += page_limit
if response["meta"]["data_returned"] < offset:
break
if offset > 100:
break
| notebooks/exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dice black jack king of the hill controller implementation
# * Menší pozitivní rozdíl do 21 vyhrává (negativní rozdíl má nejvyšší možnou penalizaci)
# * Při stejném rozdílu vyhrává dealer
# * Dealer - nemůže hrát přes 17
# * Bettor - nemůže hrát přes 21
# * První hraje Bettor
#
# ## Třída reprezentující hráče
# +
import random
class Player():
def __init__(self, log, name,
bettor_strategy_handler,
dealer_strategy_handler,
bet_size_handler,
call_strategy_handler,
):
self.log = log
self.name = name
self.wallet = 1000
self.total_value = 0
self.bettor_strategy_handler = bettor_strategy_handler
self.dealer_strategy_handler = dealer_strategy_handler
self.bet_size_handler = bet_size_handler
self.call_strategy_handler = call_strategy_handler
def roll_dice(self):
return random.randint(1, 6)
def get_penalty(self):
penalty = 21 - self.total_value
return 100 if penalty < 0 else penalty
def new_round(self):
self.total_value = 0
def bettor_play(self):
still_playing = True
while still_playing:
self.total_value += self.roll_dice()
still_playing = self.bettor_strategy_handler(self.total_value, self.log)
if self.total_value >= 21:
still_playing = False
def dealer_play(self):
still_playing = True
while still_playing:
self.total_value += self.roll_dice()
still_playing = self.dealer_strategy_handler(self.total_value, self.log)
if self.total_value >= 17:
still_playing = False
def bet(self):
return self.bet_size_handler(self.total_value, self.log)
def call(self, bet):
return self.call_strategy_handler(self.total_value, self.log, bet)
def dummy_bettor_handler(value, log):
return True
def dummy_dealer_handler(value, log):
return True
def dummy_bet_handler(value, log):
return 5
def dummy_check_handler(value, log, bet):
return True
# -
# ## <NAME>
# +
def game_round(dealer, bettor, log):
bettor.new_round()
dealer.new_round()
bettor.bettor_play()
bet_size = bettor.bet()
dealer.dealer_play()
if dealer.call(bet_size):
if bettor.get_penalty() < dealer.get_penalty():
bettor.wallet += bet_size
dealer.wallet -= bet_size
log.append((bettor.name, dealer.name, bettor.name, dealer.name, bet_size, 1, bettor.total_value, dealer.total_value))
else:
bettor.wallet -= bet_size
dealer.wallet += bet_size
log.append((bettor.name, dealer.name, dealer.name, bettor.name, bet_size, 1, bettor.total_value, dealer.total_value))
else:
bettor.wallet += 1
dealer.wallet -= 1
log.append((bettor.name, dealer.name, bettor.name, dealer.name, bet_size, 0, bettor.total_value, dealer.total_value))
def simulation(players, n, log):
for k in range(n):
game_round(players[0], players[1], log)
players = players[::-1]
for p in players:
print("{}\t{}".format(p.name, p.wallet))
output = "bettor_name,dealer_name,winner,loser,bet_size,call,bettor_value,dealer_value\n"
output += "\n".join([",".join(map(str, line)) for line in log])
with open("data/output.csv", "w") as f:
f.write(output)
log = []
p1 = Player(log, "A", dummy_bettor_handler, dummy_dealer_handler,
dummy_bet_handler, dummy_check_handler)
p2 = Player(log, "B", dummy_bettor_handler, dummy_dealer_handler,
dummy_bet_handler, dummy_check_handler)
simulation([p1, p2], 100, log)
# -
# ## <NAME>
# +
def better_bettor_handler(value, log):
if value < 18:
return True
else:
return False
def better_dealer_handler(value, log):
return True
def better_bet_handler(value, log):
return random.randint(1,5)
def better_check_handler(value, log, bet):
if value > 21:
return False
else:
return True
log = []
p1 = Player(log, "Better player", better_bettor_handler, better_dealer_handler,
better_bet_handler, better_check_handler)
p2 = Player(log, "Dummy player", dummy_bettor_handler, dummy_dealer_handler,
dummy_bet_handler, dummy_check_handler)
simulation([p1, p2], 100, log)
# -
log
| blackjack_koth.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.3 64-bit (''ProgramData'': virtualenv)'
# name: python37364bitprogramdatavirtualenv99403c2e8abd4ba0909557516bfee9d9
# ---
# + [markdown] id="yA_geNUyQidh" colab_type="text"
# # Predicting the value of a house
# > Top 3% in a data science competition
#
# - toc: false
# - branch: master
# - badges: true
# - comments: true
# - author: <NAME>
# - image: images/housing.png
# - categories: []
# -
# Recently I’ve made my way into Kaggle. If it’s new for you, I highly recommend checking it out. Kaggle is a platform where organizations host data science competitions. They come up with a data science challenge, make the data available to Kaggle users, and many data scientist worldwide compete to get the highest score on the leaderboard. After a defined period the competition ends and the winner is awarded with a (monetary) price.
#
#
#
# Participants also share their code (kernels), and have discussions on the data. This makes it an excellent platform to learn. The competitions can be a bit intimidating, since it can have extremely large datasets (100GB upwards), the objectives can be challenging (imaging, audio, text, combinations) and figuring out how to submit is not always trivial. But of course you can start with simpler competitions such as the [classic Titanic example](https://www.kaggle.com/c/titanic).
#
#
#
# Since I’m quite familiar with tabular data I decided to give the housing competition a try. [The training data]( https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data) consists of many features describing about 1500 houses and their selling price. After numerous experiments I ended up with a top 3% score on the leaderboard before throwing in the proverbial towel. The rush of inching up the leaderboard made it a great experience!
#
#
#
# Afterwards I wrote about the [main insights, learnings and questions](https://www.kaggle.com/c/house-prices-advanced-regression-techniques/discussion/170472)
#
# Next up: probably a imaging competition with Pytorch or fast.ai
| _notebooks/2020-07-26-housing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dicom to Bids
from src.data import subjects
from src.dicom_to_bids import dicom_to_bids
dicom_to_bids.dicom_to_bids(subjects.to_convert_to_BIDs)
| notebooks/01_dicom_to_bids.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Practice Querying the Snowexsql Database
#
# (12 minutes)
#
# Learning Objectives:
# - First taste of the database!
# - Code snippets to extract and prep data.
# - Generate ideas for project pitches.
# +
# standard imports
import numpy as np
import matplotlib.pyplot as plt
import datetime
#database imports
from snowexsql.db import get_db
from snowexsql.data import PointData, LayerData, ImageData, SiteData
from snowexsql.conversions import query_to_geopandas
# +
# load the database
db_name = 'snow:<EMAIL>/snowex'
engine, session = get_db(db_name)
print('snowexsql database successfully loaded!')
# -
# ## Snow Pit data are contained in the following data tables:
#
# _PointData_ = pit ruler depths, SWE.
# _LayerData_ = density, temperature, stratigraphy, etc.
# _SiteData_ = siteID, airTemp, vegetation, visit time, weather, etc.
#
# ### Example 1: Let's find all the pits that overlap with an airborne sensor of interest!
#
# First, it would be helpful to know, which of the airborne sensors are part of the database, right?
# +
# Query the session using .surveyors() to generate a list
qry = session.query(ImageData.surveyors)
# Locate all that are distinct
airborne_sensors_list = session.query(ImageData.surveyors).distinct().all()
print('list of airborne sensors by "surveyor" name: \n', airborne_sensors_list)
# -
# #### 1a). Unsure of the flight date, but know which sensor you'd like to overlap with, here's how:
# +
# Airborne sensor from list above
sensor = 'UAVSAR team, JPL'
# Form on the Images table that returns Raster collection dates
qry = session.query(ImageData.date)
# Filter for UAVSAR data
qry = qry.filter(ImageData.surveyors == sensor)
# Grab the unique dates
qry = qry.distinct()
# Execute the query
dates = qry.all()
# Clean up the dates
dates = [d[0] for d in dates]
dlist = [str(d) for d in dates]
dlist = ", ".join(dlist)
print('%s flight dates are: %s' %(sensor, dlist))
# Find all the snow pits done on these days
qry = session.query(SiteData.geom, SiteData.site_id, SiteData.date)
qry = qry.filter(SiteData.date.in_(dates))
# Return a geopandas df
df = query_to_geopandas(qry, engine)
# View the returned pandas dataframe!
print(df.head())
# Close your session to avoid hanging transactions
session.close()
# -
# #### 1b). Want to select an exact flight date match? Here's how:
# +
# Pick a day from the list of dates
dt = dates[0]
# Find all the snow pits done on these days
qry = session.query(SiteData.geom, SiteData.site_id, SiteData.date)
qry = qry.filter(SiteData.date == dt)
# Return a geopandas df
df_exact = query_to_geopandas(qry, engine)
print('%s pits overlap with %s on %s' %(len(df_exact), sensor, dt))
# View snows pits that align with first UAVSAR date
df_exact.head()
# -
# #### 1c). Want to select a range of dates near the flight date? Here's how:
# +
# Form a date range to query on either side of our chosen day
date_range = [dt + i * datetime.timedelta(days=1) for i in [-1, 0, 1]]
# Find all the snow pits done on these days
qry = session.query(SiteData.geom, SiteData.site_id, SiteData.date)
qry = qry.filter(SiteData.date.in_(date_range))
# Return a geopandas df
df_range = query_to_geopandas(qry, engine)
# Clean up dates (for print statement only)
dlist = [str(d) for d in date_range]
dlist = ", ".join(dlist)
print('%s pits overlap with %s on %s' %(len(df_range), sensor, dlist))
# View snow pits that are +/- 1 day of the first UAVSAR flight date
df_range.sample(10)
# -
# #### 1d). Have a known date that you wish to select data for, here's how:
# +
# Find all the data that was collected on 2-12-2020
dt = datetime.date(2020, 2, 12)
#--------------- Point Data -----------------------------------
# Grab all Point data instruments from our date
point_instruments = session.query(PointData.instrument).filter(PointData.date == dt).distinct().all()
point_type = session.query(PointData.type).filter(PointData.date == dt).distinct().all()
# Clean up point data (i.e. remove tuple)
point_instruments = [p[0] for p in point_instruments]
point_instruments = ", ".join(point_instruments)
point_type = [p[0] for p in point_type]
point_type = ", ".join(point_type)
print('Point data on %s are: %s, with the following list of parameters: %s' %(str(dt), point_instruments, point_type))
#--------------- Layer Data -----------------------------------
# Grab all Layer data instruments from our date
layer_instruments = session.query(LayerData.instrument).filter(LayerData.date == dt).distinct().all()
layer_type = session.query(LayerData.type).filter(LayerData.date == dt).distinct().all()
# Clean up layer data
layer_instruments = [l[0] for l in layer_instruments if l[0] is not None]
layer_instruments = ", ".join(layer_instruments)
layer_type = [l[0] for l in layer_type]
layer_type = ", ".join(layer_type)
print('\nLayer Data on %s are: %s, with the following list of parameters: %s' %(str(dt), layer_instruments, layer_type))
#--------------- Image Data -----------------------------------
# Grab all Image data instruments from our date
image_instruments = session.query(ImageData.instrument).filter(ImageData.date == dt).distinct().all()
image_type = session.query(ImageData.type).filter(ImageData.date == dt).distinct().all()
# Clean up image data (i.e. remove tuple)
image_instruments = [i[0] for i in image_instruments]
image_instruments = ", ".join(image_instruments)
image_type = [i[0] for i in image_type]
image_type = ", ".join(image_type)
print('\nImage Data on %s are: %s, with the following list of parameters: %s' %(str(dt), image_instruments, image_type))
# -
# ### Nice work, almost done here!
# ## Classify pit data based on the depth and vegetation matrix
# ### Example 2:
#
# #### 2a).Distinguish pits by vegetation coverage:
# - treeless (0% tree cover)
# - sparse (1-30% tree cover)
# - dense (31-100% tree cover)
#
# *vegetation classes assigned based on optical imagery: tree density map, Nov. 2010 WorldView-2 Imagery
def parse_veg_class(site_id):
'''
This function parses snow pit data into three vegetation classes:
- 1). Treeless, 2). Sparce, and 3). Dense
It uses a python dictionary where:
(k) keys: are the vegetation classes
(v) values: are the first digit in the pitID assignment
'''
# Classifying by vegetation coverage
veg_class = {'treeless':[1, 2, 3], 'sparse':[4, 5, 6], 'dense':[7, 8, 9]}
vclass = None
class_id = site_id[0]
if class_id.isnumeric():
class_id = int(class_id)
for k,v in veg_class.items():
if class_id in v: #if the first digit in the site_id is 'v' assign it to the corresponding 'k'
vclass = k
return vclass
# #### 2b). Distinguish pits by snow depth classes:
# - shallow (<90cm)
# - medium (90-122cm)
# - deep (>122cm)
#
# *depth classes assigned based on 2017 ASO snow depth lidar
def parse_depth_class(site_id):
'''
This function parses snow pit data into three depth classes:
- 1). Shallow, 2). Medium, and 3). Deep
It uses a python dictionary where:
(k) keys: are the depth classes
(v) values: are the first digit in the pitID assignment
'''
# Classifying by 2017 depth
depth_class = {'shallow':[1, 4, 7], 'medium':[2, 5, 8], 'deep':[3, 6, 9]}
dclass = None
class_id = site_id[0]
if class_id.isnumeric(): #for the outlier TS site
class_id = int(class_id) #cast as integer
for k,v in depth_class.items(): #for the key, value pairs in the dict listed above:
if class_id in v: #if the first digit in the site_id is 'v' assign it to the corresponding 'k'
dclass = k
return dclass
# +
# Load the database
db_name = 'snow:<EMAIL>/snowex'
engine, session = get_db(db_name)
# Query for Layer Data
result = session.query(LayerData.type).distinct().all()
# Filter for density data
qry = session.query(LayerData).filter(LayerData.type=='density')
# Form our dataframe from the query
df = query_to_geopandas(qry, engine)
df['value'] = df['value'].astype(float) #cast the value as a float (they are strings)
# Parse snow pit data by the veg/depth matrix
df['veg_class'] = [parse_veg_class(i) for i in df['site_id']] #run the parse_veg function for every site_id
df['depth_class'] = [parse_depth_class(i) for i in df['site_id']] #run the parse_depth function for every site_id
# Select columns of interest
col_list = ['site_id', 'date', 'type', 'latitude',
'longitude', 'depth', 'value', 'veg_class', 'depth_class']
df = df[col_list]
# View a sample --> notice parsed veg_class and depth_class columns were added!
df.sample(5)
# +
# Group by site-id to count classes
gb = df.groupby(['site_id', 'veg_class', 'depth_class'])
print(gb['site_id'].count().groupby('veg_class').count())
print('\n')
print(gb['site_id'].count().groupby('depth_class').count())
# -
# ### Plot
# boxplot for veg_class
df.boxplot(column='value', by='veg_class', fontsize='large')
plt.ylabel('Density kg/m3')
plt.tight_layout()
# boxplot for depth_class
df.boxplot(column='value', by='depth_class')
plt.ylabel('Density kg/m3')
plt.tight_layout()
# Great for debugging especially when trying different queries
session.rollback()
# Close your session to avoid hanging transactions
session.close()
| book/tutorials/core-datasets/03_practice-querying.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # A Test for the `InteractiveViewer`
#
# TODO test for changing predicate selector (toggle edge visibility)
# +
import json
import os
import random
import time
from pathlib import Path
import importnb
import ipywidgets as W
from rdflib import Graph
from requests_cache import CachedSession
from ipyradiant.query.framer import SPARQLQueryFramer
with importnb.Notebook():
try:
from .InteractiveViewer import iv, lw, simple_graph
except ImportError:
from InteractiveViewer import iv, lw, simple_graph
# -
# This notebook is used as part of the `ipyradiant` test suite, where `IPYRADIANT_TESTING`
# will be set, which will trigger the automated section below.
pl = lw.loader
IS_TESTING = json.loads(os.environ.get("IPYRADIANT_TESTING", "false"))
IS_TESTING
# Tests are implemented as buttons you click.
# +
timings = {}
def timestamp(key, msg):
if key not in timings:
timings[key] = []
timings[key] += [time.time()]
delta = 0 if len(timings[key]) == 1 else timings[key][-1] - timings[key][-2]
print(f"[{key}]", f"+{int(delta)}", msg)
# -
# TODO extend as needed
tests = [W.Button(description="simple_graph")]
# +
N_CHECKS = 5 # number of times to run the test selection
def _run_test(btn):
p = btn.description
try:
timestamp(p, "starting...")
iv.graph = Graph()
timestamp(p, "cleaned...")
assert len(iv.viewer.cytoscape_widget.graph.nodes) == 0
timestamp(p, f"assigning graph...")
iv.graph = simple_graph
assert len(iv.viewer.cytoscape_widget.graph.nodes) > 0
timestamp(p, f"graph updated...")
timestamp(p, f"verifying node visibility...")
cyto_graph_nodes = iv.viewer.cytoscape_widget.graph.nodes
n_nodes = len(cyto_graph_nodes)
visible_nodes = [
node for node in cyto_graph_nodes if "invisible" not in node.classes
]
n_visible_nodes = len(visible_nodes)
assert n_nodes == n_visible_nodes
timestamp(p, f"interactive viewer initialized w/ graph successfully...")
for ii in range(N_CHECKS):
timestamp(p, f"making selections for iteration #{ii}...")
# get some number of node type options from the list (at least one)
node_type_selections = random.sample(
iv.type_selector.options,
k=random.choice(range(1, len(iv.type_selector.options))),
)
node_type_selection_values = tuple([_[1] for _ in node_type_selections])
# make the selections
iv.type_selector.value = node_type_selection_values
timestamp(
p,
f"calculating expected number of visible nodes for iteration #{ii}...",
)
n_ex_visible = 0
for node in cyto_graph_nodes:
if type(node.data["rdf:type"]) is tuple:
types = set(node.data["rdf:type"])
else:
types = set([node.data["rdf:type"]])
if any(
[
visible_type in types
for visible_type in node_type_selection_values
]
):
n_ex_visible += 1
# calculating the number of visible nodes
visible_nodes = [
node for node in cyto_graph_nodes if "invisible" not in node.classes
]
n_visible_nodes = len(visible_nodes)
assert n_ex_visible == n_visible_nodes
timestamp(p, f"expected matches visible for iteration #{ii}...")
timestamp(p, "OK!")
except Exception as err:
timestamp(p, "ERROR")
timestamp(p, err)
raise Exception(f"{p} failed") from err
[d.on_click(_run_test) for d in tests]
# -
# ## Show the Test Application
W.VBox([iv, W.HBox([W.Label("Start Test"), *tests])])
# # Run Test Automatically
if IS_TESTING:
for test in tests:
test.click()
| examples/Test_InteractiveViewer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="aDBSK3kJXUDq"
import sqlite3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
# + id="YR1o941wZG7O"
df=pd.read_csv('/content/ELAVATION_STAT.csv')
# + id="vD7ig5zSXUDy" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="fe656dbe-a631-47e7-d326-c441365258ce"
print(df.columns)
df.head()
# + id="3UilOUn6XUDz" colab={"base_uri": "https://localhost:8080/"} outputId="2fef0bce-0a9b-48d6-9cf5-f0133d62c590"
# Check for Nan values
df.isnull().values.any()
df1 = df[df.isna().any(axis=1)]
print("Num NAN values: {}".format(df1.shape[0]))
# + colab={"base_uri": "https://localhost:8080/", "height": 298} id="5qSm5YywZ45t" outputId="23c0147d-fed2-4497-d725-894e7d13f609"
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
df['STAT_CAUSE_CODE'] = le.fit_transform(df.STAT_CAUSE_DESCR.values)
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="SlVhmtaJbK_y" outputId="1cc417ec-7ec3-40b4-afe2-94a999c594fc"
le.classes_
# + colab={"base_uri": "https://localhost:8080/", "height": 298} id="jErxar7JdOvJ" outputId="13b97b42-f743-4bc5-8a93-c1b46ff875e0"
import datetime as dt
df['MYDATE'] = pd.to_datetime(df['DISCOVERY_DATE'])
df['MYDATE']=df['MYDATE'].map(dt.datetime.toordinal)
df.head(5)
# + id="csToFytTXUDz" colab={"base_uri": "https://localhost:8080/"} outputId="f2e846ae-ef13-4bcf-9f47-13211f6edd11"
#Changing pandas dataframe to numpy array
Y = df['FIRE_SIZE'].values
X = df[['MYDATE', 'DISCOVERY_DOY','FIRE_YEAR', 'FOD_ID', 'STAT_CAUSE_CODE','lat', 'lon']]
print(Y.shape)
print(X.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 194} id="tZteBAJdGPSj" outputId="ed990c0e-795c-4b9b-9fab-1db51827747d"
X.head()
# + id="FUoEu9jPXUD0"
#Normalize the data
sc = StandardScaler()
X = sc.fit_transform(X)
# + colab={"base_uri": "https://localhost:8080/"} id="3fW3M284VNp6" outputId="db2da93b-bb3d-4ffd-cf5f-2d86e577dfb0"
X.shape
# + id="EYpggAU4XUD0" colab={"base_uri": "https://localhost:8080/"} outputId="7a71d6d1-0929-4880-a623-3035c73ba75e"
# Shuffles the data before split
# Train/Test split
X_train,X_test,y_train,y_test = train_test_split(X,Y,test_size = 0.5)
print("# Train: {} , #Test: {}".format(X_train.shape[0], X_test.shape[0]))
print("# inputs: {}".format(X_train.shape[1]))
n = X_train.shape[1]
# + id="TJ5brkT1XUD0" colab={"base_uri": "https://localhost:8080/", "height": 336} outputId="f238d844-4047-4285-a53b-e2658a1947c1"
#Distibution of fire size
plt.hist(Y, bins=np.logspace(-4,6, 50));
plt.ylabel('Number of Fires')
plt.xlabel('Fire Size')
plt.gca().set_yscale("log")
plt.gca().set_xscale("log")
plt.savefig("kaggle_fires_dist_log.png")
plt.show()
print("Mean fire size: {}".format(np.mean(Y)))
print("Median fire size: {}".format(np.median(Y)))
print("Var in fire size: {}".format(np.var(Y)))
# + id="NUPW7MWgXUD1" colab={"base_uri": "https://localhost:8080/"} outputId="bf48338f-7010-4602-9e4f-587de90d6d71"
# Neural network
model = Sequential()
model.add(Dense(32, input_dim=n, kernel_initializer='normal', activation='relu'))
model.add(Dense(64, input_dim=n , activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(16, activation='relu'))
model.add(Dense(1, activation='linear'))
model.summary()
# + id="LNsC6_qfXUD1"
model.compile(loss='mse', optimizer='adam', metrics=['mse','mae','accuracy'])
# + id="SydRHo_JXUD2" colab={"base_uri": "https://localhost:8080/"} outputId="76b71e30-9844-4b19-8471-68cc0354a3de"
#Save partly trained model
#model.save('partly_trained.h5')
#Load partly trained model
from keras.models import load_model
#model = load_model('partly_trained.h5')
#Continue training
history = model.fit(X_train, y_train, epochs=10, batch_size=640, verbose = 2, validation_split = 0.1, shuffle=True)
# + colab={"base_uri": "https://localhost:8080/"} id="O4Y3wQZTJbNg" outputId="2e5f586d-fa8e-4654-afae-13494736b31b"
model.metrics_names
# + id="GMk88xPsXUD2" colab={"base_uri": "https://localhost:8080/"} outputId="de1b0166-87d0-453f-c42c-e1d4e14ed625"
#Save partly trained model
model.save('partly_trained.h5')
#Load partly trained model
from keras.models import load_model
model = load_model('partly_trained.h5')
loss, mse, mae, accuracy = model.evaluate(X_test, y_test)
print('MSE: %.2f, MAE: %.2f' % (mse,mae))
# + id="j5jh5p5AXUD2" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="374aeb60-8736-4f1e-d510-b30f8cfc28d2"
#Visualize some predictions
preds = model.predict(X_test, verbose=1)
plt.scatter(y_test, preds, s=0.3, marker='.', c='r')
plt.xlabel('True Size')
plt.ylabel('Predicted Size')
plt.plot([20,50],[20,50],c='k',linestyle='--')
plt.show()
# + id="56-AHvvTXUD3" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="fded0c0e-b45d-44a4-e8de-58005a6d3d5d"
# Plot training & validation accuracy values
plt.plot(history.history['accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# + id="NEqxRlmtXUD3" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="d33589e0-9afc-4e9d-c666-c45a1398acf7"
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# + id="z7U73yhuXUD3" colab={"base_uri": "https://localhost:8080/"} outputId="9c49192c-b292-41ac-aaf7-6287bd2b0c22"
# SVM
from sklearn import svm
from sklearn.metrics import r2_score, mean_absolute_error
n = 70000
svm_reg = svm.SVR(kernel='rbf', degree=3, gamma='auto')
svm_reg.fit(X_train[0:n,:],y_train[0:n].reshape(-1))
y_pred = svm_reg.predict(X_test[0:n,:])
def mean_absolute_percentage_error(y_test, y_pred):
return np.mean(np.abs((y_test - y_pred) / (y_test+10e-1)) ) * 100
print("MAE: {}".format(mean_absolute_error(y_test[0:n],y_pred)))
print("MAPE: {}".format(mean_absolute_percentage_error(y_test[0:n],y_pred)))
print("---------------------------------------")
# + id="S8ONQ2UuXUD4" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="cb1e8339-6c46-4acc-961d-91f4151a2f41"
#Visualize svm predictions
plt.scatter(y_test[0:n], y_pred, s=0.3, marker='.', c='r')
plt.xlabel('True Size')
plt.ylabel('Predicted Size')
plt.gca().set_xscale("log")
plt.gca().set_yscale("log")
plt.plot([.1,20,1000],[.1,20,1000],c='k',linestyle='--')
plt.savefig("kaggle_fires_svm_preds.png")
plt.show()
# + id="1Jgh1FT-XUD4" colab={"base_uri": "https://localhost:8080/"} outputId="a938d435-0294-47c0-80a6-a551f6a191ea"
# SKlearn MLP
from sklearn.neural_network import MLPRegressor
n = 10000
NN = MLPRegressor(hidden_layer_sizes=(800,800,800,800),max_iter=200 )
NN.fit(X_train[0:n,:],y_train[0:n].reshape(-1))
y_pred = NN.predict(X_test[0:n,:])
print("MAE: {}".format(mean_absolute_error(y_test[0:n],y_pred)))
print("r2: {}".format(r2_score(y_test[0:n],y_pred)))
print("---------------------------------------")
# + id="zMJvQSF6XUD4" colab={"base_uri": "https://localhost:8080/"} outputId="42996700-c86b-45bb-8590-86167c0fdd5e"
# K Nearest Neighbours
from sklearn.neighbors import KNeighborsRegressor
n = 10000
for nbs in [3,5,10]:
for wts in ['distance','uniform']:
K_nn = KNeighborsRegressor(n_neighbors=nbs,
weights=wts,
n_jobs=6)
K_nn.fit(X_train[0:n,:],y_train[0:n].reshape(-1))
y_pred = K_nn.predict(X_test[0:n,:])
print("nbs: {}, wts: {}".format(nbs, wts))
print("MAE: {}".format(mean_absolute_error(y_test[0:n],y_pred)))
print("r2: {}".format(r2_score(y_test[0:n],y_pred)))
print("---------------------------------------")
# + id="by4pzY7vXUD5" colab={"base_uri": "https://localhost:8080/"} outputId="3cd9a5f5-3c6c-4e69-87e6-680d41edb6fe"
# Decision Tree
from sklearn import tree
tree_clf = tree.DecisionTreeRegressor(criterion='mae')
tree_clf.fit(X_train[0:n,:],y_train[0:n].reshape(-1))
y_pred = tree_clf.predict(X_test[0:n,:])
print("MAE: {}".format(mean_absolute_error(y_test[0:n],y_pred)))
print("r2: {}".format(r2_score(y_test[0:n],y_pred)))
print("---------------------------------------")
# + id="T-4ZV6S_XUD5" colab={"base_uri": "https://localhost:8080/"} outputId="fdc00724-3b95-45c7-fb9c-4513f316769f"
# Stacked regressors
from mlxtend.regressor import StackingRegressor
n = 10000
estimators = [K_nn, NN, svm_reg]
regStack = StackingRegressor(regressors=estimators,
meta_regressor=NN)
regStack.fit(X_train[0:n,:],y_train[0:n].reshape(-1))
y_pred = regStack.predict(X_test[0:n,:])
print("MAE: {}".format(mean_absolute_error(y_test[0:n],y_pred)))
print("r2: {}".format(r2_score(y_test[0:n],y_pred)))
print("---------------------------------------")
# + id="JWgZLyCdXUD5" colab={"base_uri": "https://localhost:8080/"} outputId="3f75d31a-4208-4e3e-e87b-5e74b0a71326"
#Best Features - Univariate statistical selection
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2, f_classif, mutual_info_classif
feature_names = ['MYDATE', 'DISCOVERY_DOY','FIRE_YEAR', 'FOD_ID', 'STAT_CAUSE_CODE','lat', 'lon']
bestfeatures = SelectKBest(score_func=f_classif, k=5)
fit = bestfeatures.fit(X,np.array(Y).astype(int))
dfscores = pd.DataFrame(fit.scores_)
df_specs = pd.Series(feature_names).astype(str)
featureScores = pd.concat([df_specs,dfscores],axis=1)
featureScores.columns = ['Feature','f_classif Score']
print(featureScores.nlargest(len(feature_names),'f_classif Score'))
# + id="EAF2dFe2XUD5" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="afa6768d-87fd-4157-f12c-c3afcbaa9d0e"
#Best Features - importance
# Feature importance gives you a score for each feature of your data, the higher the score more
# important or relevant is the feature towards your output variable.
# Feature importance is an inbuilt class that comes with Tree Based Classifiers, we will be using
# Extra Tree Classifier for extracting the top 10 features for the dataset.
from sklearn.ensemble import ExtraTreesRegressor
import matplotlib.pyplot as plt
model = ExtraTreesRegressor()
model.fit(X_train,y_train)
feat_importances = pd.Series(model.feature_importances_, index=feature_names)
feat_importances.nlargest(10).plot(kind='barh')
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="yHff40R3UW-B" outputId="2629077e-dc9d-4cab-ac90-baf9da936e45"
model.feature_importances_
# + id="HBNWXmI7XUD6" colab={"base_uri": "https://localhost:8080/", "height": 714} outputId="adee9348-8d2e-47bb-8256-144d506c65e5"
# Best feautres - correlation heatmap
print(df.columns)
import seaborn as sns
feature_names = ['MYDATE', 'DISCOVERY_DOY','FIRE_YEAR', 'FOD_ID', 'STAT_CAUSE_CODE','lat', 'lon']
data =df[feature_names]
corrmat = np.corrcoef(data.T)
plt.figure(figsize=(10,10))
g=sns.heatmap(corrmat,annot=True,cmap="coolwarm")
g.set_xticklabels(feature_names,rotation=30)
g.set_yticklabels(feature_names,rotation=30)
plt.show()
| Scripts/FOD_FDA_lon_lat.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NLP Practice 1: computing text similarities using gensim and nltk
import nltk
import gensim
import sys, logging
# %matplotlib inline
import matplotlib.pyplot as plt
# all output goes to logs
sys.path.append('~/logs')
import cjzpy_load_logging
#cjzpy_load_logging.load_logging_json(default_level=logging.INFO)
cjzpy_load_logging.load_logging_json(default_level=logging.DEBUG)
logger = logging.getLogger(sys._getframe().f_code.co_name)
logger.debug('hello gensim')
logger.info('hello NLP practice 1')
def gensim4texts(logger, texts, query, nTopic):
logger.info('This is %s', sys._getframe().f_code.co_name)
logger.debug('texts: %s', texts)
# In BoW representation, each document is represented by one vector where each vector element represents a question-answer pair, in the style of: How many times does the word system appear in the document? The mapping between the questions and ids is called a dictionary
dictionary = gensim.corpora.Dictionary(texts)
logger.debug('dictionary.token2id: %s', dictionary.token2id)
# To actually convert tokenized documents to vectors
corpus = [dictionary.doc2bow(text) for text in texts]
# gensim.corpora.MmCorpus.serialize('corpus_tmp.mm', corpus) # store to disk, for later use
logger.debug('corpus: %s', corpus)
tfidf = gensim.models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]
for doc in corpus_tfidf:
logger.debug('corpus_tfidf.doc: %s', doc)
logger.debug('tfidf.dfs: %s', tfidf.dfs)
logger.debug('tfidf.idfs: %s', tfidf.idfs)
lsi = gensim.models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=nTopic)
lsi.print_topics(2)
corpus_lsi = lsi[corpus_tfidf]
for doc in corpus_lsi:
logger.debug('copus_lsi.doc: %s', doc)
lda = gensim.models.LdaModel(corpus_tfidf, id2word=dictionary, num_topics=nTopic)
lda.print_topics(2)
index = gensim.similarities.MatrixSimilarity(lsi[corpus])
#query_bow = dictionary.doc2bow(query.lower().split())
query_bow = dictionary.doc2bow(query)
logger.debug('query_bow: %s', query_bow)
query_lsi = lsi[query_bow]
logger.debug('query_lsi: %s', query_lsi)
sims = index[query_lsi]
logger.debug('sims: %s', list(enumerate(sims)))
sort_sims = sorted(enumerate(sims), key=lambda item: -item[1])
logger.info('sort_sims: %s', sort_sims)
return sort_sims
documents = ["Shipment of gold damaged in a fire", "Delivery of silver arrived in a silver truck",
"Shipment of gold arrived in a truck"]
texts = [[word for word in document.lower().split()] for document in documents]
query = 'gold silver truck'
gensim4texts(logger, texts, query.lower().split(), 2)
def nltkPreProcess4texts(logger, texts, stemmer=None):
# logger: logging object
# texts: list of texts for preprocessing with NLTK
# stemmer: default is LancasterStemmer; PorterStemmer
logger.info('This is %s', sys._getframe().f_code.co_name)
logger.info('texts=%s', texts[0])
# lowering case
texts_lower = [[word for word in document.lower().split()] for document in texts]
logger.debug('texts_lower[0]=%s', texts_lower[0])
# tockenizing
texts_tokenized = [[word.lower() for word in nltk.word_tokenize(document)] for document in texts]
logger.debug('texts_tokenized[0]=%s', texts_tokenized[0])
# filtering stopwords and punctuations
english_stopwords = nltk.corpus.stopwords.words('english')
english_stopwords.extend([',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%'])
logger.debug('english_stopwords=%s, length=%d', english_stopwords,len(english_stopwords))
texts_filtered_stopwords = [[word for word in document if not word in english_stopwords] for document in texts_tokenized]
logger.debug('texts_filtered_stopwords[0]=%s', texts_filtered_stopwords[0])
# stemmering with LancasterStemmer
if stemmer is 'porter':
st = nltk.PorterStemmer()
# st = nltk.stem.porter.PorterStemmer()
else:
st = nltk.LancasterStemmer()
# st = nltk.stem.lancaster.LancasterStemmer()
texts_stemmed = [[st.stem(word) for word in docment] for docment in texts_filtered_stopwords]
logger.debug('texts_stemmed[0]=%s', texts_stemmed[0])
# eliminating words with only one occurence
all_stems = sum(texts_stemmed, [])
stems_once = set(stem for stem in set(all_stems) if all_stems.count(stem) == 1)
texts = [[stem for stem in text if stem not in stems_once] for text in texts_stemmed]
logger.debug('texts_WithMoreThanOneCounts[0]=%s', texts[0])
return texts
documents = ["你 认识 那个 和 主席 握手 的 Python 的哥 吗", "他 开 一辆 黑色 C++", "黑色 的士", "我 爱 Python 和 C++"]
texts = [[word for word in document.split()] for document in documents]
print(texts)
query = u'红色 的 Python'
gensim4texts(logger, texts, query.lower().split(), 2)
documents = ["运 送 黄金 的 车 在 火 中 受损", "一辆 银色 的 运 银 卡车 到达", "运送 黄金 的 卡车 抵达"]
texts = [[word for word in document.split()] for document in documents]
print(texts)
query = u'黄金 银 卡车'
gensim4texts(logger, texts, query.split(), 2)
# +
fn = '../cjzpyml/cjzpynlp/data/coursera_corpus'
courses = [line.strip() for line in open(fn)]
courses_name = [course.split('\t')[0] for course in courses]
logger.debug('courses_name[0:10]=%s', courses_name[0:10])
texts = nltkPreProcess4texts(logger, courses) #default stemmer is Lancaster
sims = gensim4texts(logger, texts, texts[210], 10)
for i, j in enumerate(sims[0:10]):
logger.info('(#,similar_course_name,sims)=(%d,%s,%f)', i, courses_name[j[0]], j[1])
texts = nltkPreProcess4texts(logger, courses, stemmer='porter')
sims = gensim4texts(logger, texts, texts[210], 10)
for i, j in enumerate(sims[0:10]):
logger.info('(#,similar_course_name,sims)=(%d,%s,%f)', i, courses_name[j[0]], j[1])
# +
fn = '../cjzpyml/cjzpynlp/data/coursera_corpus_c'
courses = [line.strip() for line in open(fn)]
courses_name = [course.split('\t')[0] for course in courses]
logger.debug('courses_name[0:10]=%s', courses_name[0:10])
texts = nltkPreProcess4texts(logger, courses) #default stemmer is Lancaster
sims = gensim4texts(logger, texts, texts[210], 10)
for i, j in enumerate(sims[0:10]):
logger.info('(#,similar_course_name,sims)=(%d,%s,%f)', i, courses_name[j[0]], j[1])
texts = nltkPreProcess4texts(logger, courses, stemmer='porter')
sims = gensim4texts(logger, texts, texts[210], 10)
for i, j in enumerate(sims[0:10]):
logger.info('(#,similar_course_name,sims)=(%d,%s,%f)', i, courses_name[j[0]], j[1])
# -
class MyCorpus(object):
def __init__(self, logger, fn):
self.fn = fn
self.logger = logger
self.stoplist = set('for a of the and to in'.split())
self.logger.info('I am at class %s', self)
def getDictionary1(self):
documents = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
# remove common words and tokenize
texts = [[word for word in document.lower().split() if word not in self.stoplist] for document in documents]
# remove words that appear only once
all_tokens = sum(texts, [])
tokens_once = set(word for word in set(all_tokens) if all_tokens.count(word) == 1)
texts = [[word for word in text if word not in tokens_once] for text in texts]
self.dictionary1 = gensim.corpora.Dictionary(texts)
# self.dictionary1.save('dict_tmp.dict') # store the dictionary, for future reference
def getDictionary(self):
# collect statistics about all tokens
self.dictionary = gensim.corpora.Dictionary(line.lower().strip().split() for line in open(self.fn))
# remove stop words and words that appear only once
stop_ids = [self.dictionary.token2id[stopword] for stopword in self.stoplist if stopword in self.dictionary.token2id]
once_ids = [tokenid for tokenid, docfreq in self.dictionary.dfs.items() if docfreq == 1]
self.dictionary.filter_tokens(stop_ids + once_ids) # remove stop words and words that appear only once
self.dictionary.compactify() # remove gaps in id sequence after words that were removed
def __iter__(self):
self.getDictionary1()
self.getDictionary()
for line in open(self.fn):
# assume there's one document per line, tokens separated by whitespace
yield self.dictionary.doc2bow(line.lower().strip().split())
fn = '../cjzpyml/cjzpynlp/data/mycorpus.txt'
corpus_memory_friendly = MyCorpus(logger, fn) # doesn't load the corpus into memory!
for vector in corpus_memory_friendly: # load one vector into memory at a time
logger.info('vector=%s', vector)
fn = '../cjzpyml/cjzpynlp/data/mycorpus1.txt'
corpus_memory_friendly = MyCorpus(logger, fn) # doesn't load the corpus into memory!
for vector in corpus_memory_friendly: # load one vector into memory at a time
logger.info('vector=%s', vector)
nltk.download()
| cjzpyjn_nlp1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import requests
import json
from ltr.client import ElasticClient
client = ElasticClient()
host = client.get_host()
# +
# Rebuild the index if necessary
from ltr import download
from ltr.index import rebuild
from ltr.helpers.movies import indexable_movies
corpus='http://es-learn-to-rank.labs.o19s.com/tmdb.json'
download([corpus], dest='data/');
movies=indexable_movies(movies='data/tmdb.json')
rebuild(client, index='tmdb', doc_src=movies)
# -
# ## Init Default Feature Store
# The feature store can be removed by sending a DELETE request to `_ltr` endpoint.
url = 'http://{}:9200/_ltr/'.format(host)
print(url)
requests.delete(url)
# To initialize the LTR plugin, issue a PUT request to the `_ltr` endpoint.
url = 'http://{}:9200/_ltr/'.format(host)
print(url)
requests.put(url)
# ## Create Feature Set
#
# A feature set can be created by issuing a PUT to `_ltr/featureset/[feature_name]`
# +
feature_set = {
"featureset": {
"features": [
{
"name": "title_bm25",
"params": [
"keywords"
],
"template": {
"match": {
"title": "{{keywords}}"
}
}
},
{
"name": "overview_bm25",
"params": [
"keywords"
],
"template": {
"match": {
"overview": "{{keywords}}"
}
}
}
]
},
"validation": {
"index": "tmdb",
"params": {
"keywords": "rambo"
}
}
}
url = 'http://{}:9200/_ltr/_featureset/my_feature_set'.format(host)
print(url)
requests.put(url, json=feature_set)
# -
# ## Log Some Judged Queries To Build Training Set
# If we have 4 judged documents: 7555,1370, 1369, and 1368 for keywords rambo:
#
# ```
# doc_id, relevant?, keywords
# 1368, 1, rambo
# 1369, 1, rambo
# 1370, 1, rambo
# 7555, 0, rambo
# ```
#
#
# We need to get feature value for each row.
#
# To do this, we utilize the logging extension to populate the judgment list with features for training.
# +
search_with_log = {
"query": {
"bool": {
"filter": [
{
"sltr": {
"_name": "logged_features",
"featureset": "my_feature_set",
"params": {
"keywords": "rambo"
}
}
},
{
"terms": {
"_id": [
"7555","1370", "1369", "1368"
]
}
}
]
}
},
"ext": {
"ltr_log": {
"log_specs": {
"name": "ltr_features",
"named_query": "logged_features"
}
}
}
}
url = 'http://{}:9200/tmdb/_search'.format(host)
print(url)
resp = requests.get(url, json=search_with_log).json()
print(json.dumps(resp['hits']['hits'][0], indent=2))
# -
# ## Training Set Now...
#
#
# ```
# doc_id, relevant?, keywords, title_bm25, overview_bm25
# 1368, 1, rambo, 0, 11.113943
# 1369, 1, rambo, 11.657, 10.08
# 1370, 1, rambo, 9.456, 13.265
# 7555, 0, rambo, 6.037, 11.114
# ```
#
#
# # Train a model
#
# We won't do this here, but if you like you can try out training a model using Ranklib
#
# ```
# # cd notebooks/elasticsearch/tmdb
# java -jar data/RankyMcRankFace.jar -train data/title_judgments.txt -save data/model.txt
#
# ```
# ## Uploading a Model
# Once features have been logged and training data has been generated, a model can be pushed into Elasticsearch. The following shows what a request to PUT a new model looks like.
# +
model = """## LambdaMART
## No. of trees = 10
## No. of leaves = 10
## No. of threshold candidates = 256
## Learning rate = 0.1
## Stop early = 100
<ensemble>
<tree id="1" weight="0.1">
<split>
<feature> 2 </feature>
<threshold> 10.664251 </threshold>
<split pos="left">
<feature> 1 </feature>
<threshold> 0.0 </threshold>
<split pos="left">
<output> -1.8305741548538208 </output>
</split>
<split pos="right">
<feature> 2 </feature>
<threshold> 9.502127 </threshold>
<split pos="left">
<feature> 1 </feature>
<threshold> 7.0849166 </threshold>
<split pos="left">
<output> 0.23645669221878052 </output>
</split>
<split pos="right">
<output> 1.7593677043914795 </output>
</split>
</split>
<split pos="right">
<output> 1.9719607830047607 </output>
</split>
</split>
</split>
<split pos="right">
<feature> 2 </feature>
<threshold> 0.0 </threshold>
<split pos="left">
<output> 1.3728954792022705 </output>
</split>
<split pos="right">
<feature> 2 </feature>
<threshold> 8.602512 </threshold>
<split pos="left">
<feature> 1 </feature>
<threshold> 0.0 </threshold>
<split pos="left">
<feature> 2 </feature>
<threshold> 13.815164 </threshold>
<split pos="left">
<output> 1.9401178359985352 </output>
</split>
<split pos="right">
<output> 1.99532949924469 </output>
</split>
</split>
<split pos="right">
<feature> 1 </feature>
<threshold> 11.085816 </threshold>
<split pos="left">
<output> 2.0 </output>
</split>
<split pos="right">
<output> 1.99308180809021 </output>
</split>
</split>
</split>
<split pos="right">
<output> 1.9870178699493408 </output>
</split>
</split>
</split>
</split>
</tree>
</ensemble>
"""
create_model = {
"model": {
"name": "my_model",
"model": {
"type": "model/ranklib",
"definition": model
}
}
}
url = 'http://{}:9200/_ltr/_featureset/my_feature_set/_createmodel'.format(host)
print(url)
requests.post(url, json=create_model).json()
# -
# ## Searching with a Model
# Now that a model has been uploaded to Elasticsearch we can use it to re-rank the results of a query.
# +
search = {
"query": {
"sltr": {
"params": {
"keywords": "rambo"
},
"model": "my_model"
}
}
}
url = 'http://{}:9200/tmdb/_search'.format(host)
resp = requests.get(url, json=search).json()
# -
print(url)
for hit in resp['hits']['hits']:
print(hit['_source']['title'])
| notebooks/elasticsearch/tmdb/raw-es-commands.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.3
# language: julia
# name: julia-1.0
# ---
# # Projections.jl
#
# Projections.jl is a Julia package that collects commonly used projection operators. It is useful for optimization algorithms such as projected gradient descent.
#
# Given a point $y \in \mathbb{R}^n$ and a set $S \subset \mathbb{R}^n$, the projection of $y$ onto $S$ is defined as
# $$
# \text{proj}_S(y) = \arg \min_{x \in S} \|x - y\|_2^2.
# $$
# If $S$ is a closed convex set, then $\text{proj}_S(y)$ is unique.
# ## Installation
#
# This package requires Julia v0.7 or later. The package has not yet been registered and must be installed using the repository location. Start julia and use the ] key to switch to the package manager REPL
# ```julia
# (v1.0) pkg> add https://github.com/Hua-Zhou/Projections.jl.git
# ```
# Machine information for this tutorial
versioninfo()
# ## Basic usage
#
# This package provides two functions for each set:
# ```julia
# project!(s, v, y)
# ```
# overwrites `v` by projection of `y` to set `s` and
# ```julia
# project(s, y)
# ```
# simply returns projection of `y` to set `s`.
# ## Projection operators
using Projections
# ### Ball
#
# Projection of $y$ onto the closed ball $S = \{x \in \mathbb{R}^n: \|x - c\|_2 \le r\}$ is
# $$
# \text{proj}_S(y)_i = \begin{cases}
# c + r \frac{y - c}{\|y - c\|_2} & \|y - c\|_2 > r \\
# y & \|y - c\|_2 \le r
# \end{cases}.
# $$
# Computational cost is $O(n)$.
y = [-2.5, -1.5, -0.5, 0.0, 0.5, 1.5, 2.5]
# norm is less than 5
sqrt(sum(abs2, y))
# projection onto the unit ball
project(Ball(length(y)), y)
# projection onto ball with center 0 and radius 5
project(Ball(length(y), 5.), y)
# projection onto ball with center (1,...,1) and radius 1
project(Ball(ones(length(y)), 1.), y)
# ### Box
#
# Projection of $y$ onto a closed box $S = [a_1, b_1] \times \cdots \times [a_n, b_n]$ is
# $$
# \text{proj}_S(y)_i = \begin{cases}
# a_i & y_i < a_i \\
# y_i & y_i \in [a_i, b_i] \\
# b_i & y_i > b_i
# \end{cases}.
# $$
# Computational cost is $O(n)$.
y = [-2.5, -1.5, -0.5, 0.0, 0.5, 1.5, 2.5]
a = fill(-1.0, 7) # vector of all -1
b = fill( 1.0, 7) # vector of all 1
project(Box(a, b), y)
# ### Simplex
#
# There is no analytical solution for the projection of a point $y$ onto the simplex $S = \{x \in \mathbb{R}^n: x_i \ge 0, \sum_i x_i = r\}$. An $O(n \log n)$ algorithm given by [<NAME>, Singer, and Chandra](https://stanford.edu/~jduchi/projects/DuchiShSiCh08.pdf) is implemented.
y = [1., 1., 1.]
# canonical probability simplex
project(Simplex(), y)
project(Simplex(2), y)
# ## Contributing
#
# You are welcome to contribute to the Projections.jl package. To add a projection operator, e.g., projection to the affine set, to the package, follow these steps:
#
# 0. Fork the Projections.jl package.
# 0. Add `affine.jl` to the `src` folder that implements the type `Affine`, the function `project!(s::Affine, v, y)`, and the function `project(s, y)`.
# 0. Add a line `include("affine.jl")` to the `/src/Projections.jl` file.
# 0. Add test file `affine_test.jl` to the `test` folder.
# 0. Add a line `include("affine_test.jl")` to the `/test/runtests.jl` file.
# 0. Make sure that `Pkg.test("Projections")` successfully runs.
# 0. Add documentation for the new projection operator to the `Projection operators` section of Jupyter notebook `/docs/Projections.jl`.
# 0. Issue a pull request.
| docs/Projections.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Algorithm implemented :
# Cluster based anomaly detection (K-mean)
# Repartition of data into categories then Gaussian/Elliptic Enveloppe on each categories separately
# Markov Chain
# Isolation Forest
# One class SVM
# RNN (comparison between prediction and reality)
# https://www.kaggle.com/victorambonati/unsupervised-anomaly-detection
# +
# #%matplotlib notebook
import pandas as pd
import numpy as np
import matplotlib
import seaborn
import matplotlib.dates as md
from matplotlib import pyplot as plt
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.covariance import EllipticEnvelope
#from pyemma import msm # not available on Kaggle Kernel
from sklearn.ensemble import IsolationForest
from sklearn.svm import OneClassSVM
# +
def getDistanceByPoint(data, model):
distance = pd.Series()
for i in range(0,len(data)):
Xa = np.array(data.loc[i])
Xb = model.cluster_centers_[model.labels_[i]-1]
#distance.set_value(i, np.linalg.norm(Xa-Xb))
distance.at[i] = np.linalg.norm(Xa-Xb)
return distance
# train markov model to get transition matrix
def getTransitionMatrix (df):
df = np.array(df)
model = msm.estimate_markov_model(df, 1)
return model.transition_matrix
def markovAnomaly(df, windows_size, threshold):
transition_matrix = getTransitionMatrix(df)
real_threshold = threshold**windows_size
df_anomaly = []
for j in range(0, len(df)):
if (j < windows_size):
df_anomaly.append(0)
else:
sequence = df[j-windows_size:j]
sequence = sequence.reset_index(drop=True)
df_anomaly.append(anomalyElement(sequence, real_threshold, transition_matrix))
return df_anomaly
# +
#df = pd.read_csv('../csv_data/m_data_fromNow.csv', usecols=['date', 'time', 'cpu'])
#df.head()
# +
#df['timestamp'] = df[df.columns[0:2]]. apply(
# lambda x: ' '.join(x.dropna().astype(str)),
# axis=1)
#df.head()
# +
## CSV : convert=> UTC -> [DATE time] cpu_value->[cpu]
df = pd.read_csv('../cto_k8s/m_data_10.11.1.80:9091.csv', usecols=['time', 'cpu_value'])
df.rename(columns={'cpu_value':'cpu'}, inplace=True)
df['timestamp'] = pd.to_datetime(df.time, unit='s')
df['cpu'] = df['cpu'].fillna(df['cpu'].mean())
# -
df = df[['timestamp', 'cpu']]
df.head()
print(df.info())
print(df['timestamp'].head())
print(df['cpu'].mean())
df['timestamp'] = pd.to_datetime(df['timestamp'])
df.plot(x = 'timestamp', y='cpu')
df['hours'] = df['timestamp'].dt.hour
df['daylight'] = ((df['hours'] >= 8) & (df['hours'] <= 19)).astype(int)
df.head(40)
s = pd.date_range('2020-08-04', '2020-08-31').to_series()
s.dt.dayofweek
df['DayOfTheWeek'] = df['timestamp'].dt.dayofweek
df['WeekDay'] = (df['DayOfTheWeek'] < 5).astype(int)
outliers_fraction = 0.01
df['time_epoch'] = (df['timestamp'].astype(np.int64)/100000000000).astype(np.int64)
# +
df['categories'] = df['WeekDay']*2 + df['daylight']
a = df.loc[df['categories'] == 0, 'cpu']
b = df.loc[df['categories'] == 1, 'cpu']
c = df.loc[df['categories'] == 2, 'cpu']
d = df.loc[df['categories'] == 3, 'cpu']
# +
fig, ax = plt.subplots()
a_heights, a_bins = np.histogram(a)
b_heights, b_bins = np.histogram(b, bins=a_bins)
c_heights, c_bins = np.histogram(c, bins=a_bins)
d_heights, d_bins = np.histogram(d, bins=a_bins)
width = (a_bins[1] - a_bins[0])/6
ax.bar(a_bins[:-1], a_heights*100/a.count(), width=width, facecolor='blue', label='WeekEndNight')
ax.bar(b_bins[:-1]+width, (b_heights*100/b.count()), width=width, facecolor='green', label ='WeekEndLight')
ax.bar(c_bins[:-1]+width*2, (c_heights*100/c.count()), width=width, facecolor='red', label ='WeekDayNight')
ax.bar(d_bins[:-1]+width*3, (d_heights*100/d.count()), width=width, facecolor='black', label ='WeekDayLight')
plt.rcParams['figure.figsize'] = [10, 5]
plt.legend()
# -
# Take useful feature and standardize them
data = df[['cpu', 'hours', 'daylight', 'DayOfTheWeek', 'WeekDay']]
min_max_scaler = preprocessing.StandardScaler()
np_scaled = min_max_scaler.fit_transform(data)
data = pd.DataFrame(np_scaled)
# reduce to 2 importants features
pca = PCA(n_components=2)
data = pca.fit_transform(data)
# standardize these 2 new features
min_max_scaler = preprocessing.StandardScaler()
np_scaled = min_max_scaler.fit_transform(data)
data = pd.DataFrame(np_scaled)
# calculate with different number of centroids to see the loss plot (elbow method)
n_cluster = range(1, 20)
kmeans = [KMeans(n_clusters=i).fit(data) for i in n_cluster]
scores = [kmeans[i].score(data) for i in range(len(kmeans))]
fig, ax = plt.subplots()
ax.plot(n_cluster, scores)
# Not clear for me, I choose 15 centroids arbitrarily and add these data to the central dataframe
df['cluster'] = kmeans[14].predict(data)
df['principal_feature1'] = data[0]
df['principal_feature2'] = data[1]
df['cluster'].value_counts()
#plot the different clusters with the 2 main features
fig, ax = plt.subplots()
colors = {0:'red', 1:'blue', 2:'green', 3:'pink', 4:'black', 5:'orange', 6:'cyan', 7:'yellow', 8:'brown', 9:'purple', 10:'white', 11: 'grey', 12:'lightblue', 13:'lightgreen', 14: 'darkgrey'}
ax.scatter(df['principal_feature1'], df['principal_feature2'], c=df["cluster"].apply(lambda x: colors[x]))
plt.show()
# get the distance between each point and its nearest centroid. The biggest distances are considered as anomaly
distance = getDistanceByPoint(data, kmeans[14])
number_of_outliers = int(outliers_fraction*len(distance))
threshold = distance.nlargest(number_of_outliers).min()
# anomaly21 contain the anomaly result of method 2.1 Cluster (0:normal, 1:anomaly)
df['anomaly21'] = (distance >= threshold).astype(int)
fig, ax = plt.subplots()
color = {0:'blue', 1:'red'}
ax.scatter(df['principal_feature1'], df['principal_feature2'], c=df["anomaly21"].apply(lambda x: color[x]))
plt.show()
# +
fig, ax = plt.subplots()
a = df.loc[df['anomaly21'] == 1, ['time_epoch', 'cpu']] #anomaly
ax.plot(df['time_epoch'], df['cpu'], color = 'blue')
ax.scatter(a['time_epoch'], a['cpu'], color = 'red')
plt.show()
# +
a = df.loc[df['anomaly21'] == 0, 'cpu']
b = df.loc[df['anomaly21'] == 1, 'cpu']
fig, axs = plt.subplots()
axs.hist([a, b], bins=32, stacked=True, color=['blue', 'red'], label = ['normal', 'anomaly'])
plt.legend()
plt.show()
# -
# ## Categories + Gaussian
# creation of 4 differents data set based on categories defined before
df_class0 = df.loc[df['categories'] == 0, 'cpu']
df_class1 = df.loc[df['categories'] == 1, 'cpu']
df_class2 = df.loc[df['categories'] == 2, 'cpu']
df_class3 = df.loc[df['categories'] == 3, 'cpu']
# plot the temperature repartition by categories
fig, axs = plt.subplots(2,2)
df_class0.hist(ax=axs[0,0],bins=32)
df_class1.hist(ax=axs[0,1],bins=32)
df_class2.hist(ax=axs[1,0],bins=32)
df_class3.hist(ax=axs[1,1],bins=32)
# +
envelope = EllipticEnvelope(contamination = outliers_fraction)
X_train = df_class0.values.reshape(-1,1)
envelope.fit(X_train)
df_class0 = pd.DataFrame(df_class0)
df_class0['deviation'] = envelope.decision_function(X_train)
df_class0['anomaly'] = envelope.predict(X_train)
envelope = EllipticEnvelope(contamination = outliers_fraction)
X_train = df_class1.values.reshape(-1,1)
envelope.fit(X_train)
df_class1 = pd.DataFrame(df_class1)
df_class1['deviation'] = envelope.decision_function(X_train)
df_class1['anomaly'] = envelope.predict(X_train)
envelope = EllipticEnvelope(contamination = outliers_fraction)
X_train = df_class2.values.reshape(-1,1)
envelope.fit(X_train)
df_class2 = pd.DataFrame(df_class2)
df_class2['deviation'] = envelope.decision_function(X_train)
df_class2['anomaly'] = envelope.predict(X_train)
envelope = EllipticEnvelope(contamination = outliers_fraction)
X_train = df_class3.values.reshape(-1,1)
envelope.fit(X_train)
df_class3 = pd.DataFrame(df_class3)
df_class3['deviation'] = envelope.decision_function(X_train)
df_class3['anomaly'] = envelope.predict(X_train)
# +
# plot the temperature repartition by categories with anomalies
a0 = df_class0.loc[df_class0['anomaly'] == 1, 'cpu']
b0 = df_class0.loc[df_class0['anomaly'] == -1, 'cpu']
a1 = df_class1.loc[df_class1['anomaly'] == 1, 'cpu']
b1 = df_class1.loc[df_class1['anomaly'] == -1, 'cpu']
a2 = df_class2.loc[df_class2['anomaly'] == 1, 'cpu']
b2 = df_class2.loc[df_class2['anomaly'] == -1, 'cpu']
a3 = df_class3.loc[df_class3['anomaly'] == 1, 'cpu']
b3 = df_class3.loc[df_class3['anomaly'] == -1, 'cpu']
plt.rcParams['figure.figsize'] = [15, 8]
fig, axs = plt.subplots(2,2)
axs[0,0].hist([a0,b0], bins=32, stacked=True, color=['blue', 'red'], label=['normal', 'anomaly'])
axs[0,1].hist([a1,b1], bins=32, stacked=True, color=['blue', 'red'], label=['normal', 'anomaly'])
axs[1,0].hist([a2,b2], bins=32, stacked=True, color=['blue', 'red'], label=['normal', 'anomaly'])
axs[1,1].hist([a3,b3], bins=32, stacked=True, color=['blue', 'red'], label=['normal', 'anomaly'])
axs[0,0].set_title("WeekEndNight")
axs[0,1].set_title("WeekEndLight")
axs[1,0].set_title("WeekDayNight")
axs[1,1].set_title("WeekDayLight")
plt.legend()
plt.show()
# -
df_class = pd.concat([df_class0, df_class1, df_class2, df_class3])
df['anomaly22'] = df_class['anomaly']
df['anomaly22'] = np.array(df['anomaly22'] == -1).astype(float)
# +
fig, ax = plt.subplots()
a = df.loc[df['anomaly22'] == 1, ('time_epoch', 'cpu')] #anomaly
ax.plot(df['time_epoch'], df['cpu'], color='blue')
ax.scatter(a['time_epoch'], a['cpu'], color='red')
plt.show()
# +
a = df.loc[df['anomaly22'] == 0, 'cpu']
b = df.loc[df['anomaly22'] == 1, 'cpu']
fig, axs = plt.subplots()
axs.hist([a, b], bins=32, stacked=True, color=['blue', 'red'], label=['normal', 'anomaly'])
plt.legend()
plt.show()
# -
# ## Markov chains
x1 = (df['cpu'] <= 18).astype(int)
x2 = ((df['cpu'] > 18) & (df['cpu'] <= 21)).astype(int)
x3 = ((df['cpu'] > 21) & (df['cpu'] <= 24)).astype(int)
x4 = ((df['cpu'] > 24) & (df['cpu'] <= 27)).astype(int)
x5 = (df['cpu'] > 27).astype(int)
df_mm = x1 + 2*x2 + 3*x3 + 4*x4 + 5*x5
print(df_mm)
# ## Isolation Forest
data = df[['cpu', 'hours', 'daylight', 'DayOfTheWeek', 'WeekDay']]
min_max_scaler = preprocessing.StandardScaler()
np_scaled = min_max_scaler.fit_transform(data)
data = pd.DataFrame(np_scaled)
# train isolation forest
model = IsolationForest(contamination = outliers_fraction)
model.fit(data)
# add the data to the main
df['anomaly25'] = pd.Series(model.predict(data))
df['anomaly25'] = df['anomaly25'].map( {1: 0, -1: 1} )
print(df['anomaly25'].value_counts())
# +
# visualisation of anomaly throughout time (viz 1)
fig, ax = plt.subplots()
a = df.loc[df['anomaly25'] == 1, ['time_epoch', 'cpu']] #anomaly
ax.plot(df['time_epoch'], df['cpu'], color='blue')
ax.scatter(a['time_epoch'],a['cpu'], color='red')
plt.show()
# +
# visualisation of anomaly with temperature repartition (viz 2)
a = df.loc[df['anomaly25'] == 0, 'cpu']
b = df.loc[df['anomaly25'] == 1, 'cpu']
fig, axs = plt.subplots()
axs.hist([a,b], bins=32, stacked=True, color=['blue', 'red'], label = ['normal', 'anomaly'])
plt.legend()
plt.show()
# -
# ## One class SVM
# Take useful feature and standardize them
data = df[['cpu', 'hours', 'daylight', 'DayOfTheWeek', 'WeekDay']]
min_max_scaler = preprocessing.StandardScaler()
np_scaled = min_max_scaler.fit_transform(data)
# train one class SVM
model = OneClassSVM(nu=0.95 * outliers_fraction) #nu=0.95 * outliers_fraction + 0.05
data = pd.DataFrame(np_scaled)
model.fit(data)
# add the data to the main
df['anomaly26'] = pd.Series(model.predict(data))
df['anomaly26'] = df['anomaly26'].map( {1: 0, -1: 1} )
print(df['anomaly26'].value_counts())
# +
# visualisation of anomaly throughout time (viz 1)
fig, ax = plt.subplots()
a = df.loc[df['anomaly26'] == 1, ['time_epoch', 'cpu']] #anomaly
ax.plot(df['time_epoch'], df['cpu'], color='blue')
ax.scatter(a['time_epoch'],a['cpu'], color='red')
plt.show()
# +
a = df.loc[df['anomaly26'] == 0, 'cpu']
b = df.loc[df['anomaly26'] == 1, 'cpu']
fig, axs = plt.subplots()
axs.hist([a,b], bins=32, stacked=True, color=['blue', 'red'], label=['normal', 'anomaly'])
plt.legend()
plt.show()
# -
# # RNN
# +
#select and standardize data
data_n = df[['cpu', 'hours', 'daylight', 'DayOfTheWeek', 'WeekDay']]
min_max_scaler = preprocessing.StandardScaler()
np_scaled = min_max_scaler.fit_transform(data_n)
data_n = pd.DataFrame(np_scaled)
# important parameters and train/test size
prediction_time = 1
testdatasize = 1000
unroll_length = 50
testdatacut = testdatasize + unroll_length + 1
#total_row = data_n.index.stop
#prediction_time = 1
#testdatasize = int(total_row - (total_row * 0.95))
#unroll_length = int(total_row * 0.05)
#testdatacut = testdatasize + unroll_length + 1
x_train = data_n[0:-prediction_time-testdatacut].values
y_train = data_n[prediction_time:-testdatacut][0].values
# test data
x_test = data_n[0-testdatacut:-prediction_time].values
y_test = data_n[prediction_time-testdatacut: ][0].values
# +
def unroll(data,sequence_length=24):
result = []
for index in range(len(data) - sequence_length):
result.append(data[index: index + sequence_length])
return np.asarray(result)
# adapt the datasets for the sequence data shape
x_train = unroll(x_train,unroll_length)
x_test = unroll(x_test,unroll_length)
y_train = y_train[-x_train.shape[0]:]
y_test = y_test[-x_test.shape[0]:]
# see the shape
print("x_train", x_train.shape)
print("y_train", y_train.shape)
print("x_test", x_test.shape)
print("y_test", y_test.shape)
# -
# specific libraries for RNN
# keras is a high layer build on Tensorflow layer to stay in high level/easy implementation
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
import time #helper libraries
from keras.models import model_from_json
import sys
# +
### CJB : 파라미터 다시 확인 후 조절할 것
# Build the model
model = Sequential()
model.add(LSTM(100, input_shape = (50, 5), return_sequences = True))
model.add(Dropout(0.2))
model.add(LSTM(100, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(units=1))
model.add(Activation('linear'))
start = time.time()
model.compile(loss='mean_squared_error', optimizer='rmsprop')
model.summary()
print('compilation time : {}'.format(time.time() - start))
# -
model.fit(x_train, y_train, batch_size=128, epochs=500, validation_split=0.1)
loaded_model = model
diff=[]
ratio=[]
p = loaded_model.predict(x_test)
# predictions = lstm.predict_sequences_multiple(loaded_model, x_test, 50, 50)
for u in range(len(y_test)):
pr = p[u][0]
ratio.append((y_test[u]/pr)-1)
diff.append(abs(y_test[u]- pr))
fig, axs = plt.subplots()
axs.plot(p,color='red', label='prediction')
axs.plot(y_test,color='blue', label='y_test')
plt.legend(loc='upper left')
plt.show()
| gantry-jupyterhub/time_series/unsupervised_anomaly_detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
print ("Hey there, this is Dr. Sala and I would like to welcome you to this Hypertensive algorithm program ")
print ("Without further ado, ")
name = input ("what is your name? ")
print ("What a wonderful name " + name + ":-)")
print ("Now I am going to ask you a series of questions and I would like you to answer me sincerely. Is that ok?" )
answer = input ()
if answer == "yes":
print ("That is good. We can now proceed.")
elif answer == "no":
print ("That is pretty unfortunate. Better be off. Thank you.")
input ("Press enter to exit")
else:
print ("Sorry your answer is not recognized. Answer with yes or no in small caps")
print ("Are you an adult who thinks he might be hypertensive?")
answer = input ()
if answer == "yes":
print ("You must have a lot of reason perhaps a family history? nonetheless we can now proceed")
elif answer == "no":
print ("Thank you for using this app. Have a great day!")
input ("Press enter to exit")
else:
print ("Sorry your answer is not recognized. Answer with yes or no in small caps")
print ("When you had your check up and on 2 separate occassions, is your blood pressure < 120/80?")
answer = input ()
if answer == "yes":
print ("You may Not have Hypertension. However, it is still best to consult your physician. Thank you")
input ("Press enter to exit")
elif answer == "no":
print ("Is that so? We can now proceed with the algorithm")
else:
print ("Sorry your answer is not recognized. Answer with yes or no in small caps")
print ("When you had your check up and on 2 separate occassions, is your Blood Pressure higher than 130 / 80?")
answer = input ()
if answer == "yes":
print ("Based on JNC 7 criteria, you may have Hypertension. We can proceed")
elif answer == "no":
print ("Thank you for using this app. Have a great day")
input ("Press enter to exit")
else:
print ("Sorry your answer is not recognized. Answer with yes or no in small caps")
print ("When you had your check up and on 2 separate occassions, is your Blood Pressure between 130 - 139 / 80 - 89?")
answer = input ()
if answer == "yes":
print ("Based on JNC 7 criteria, you may have Hypertension Stage 1. However, it is still best to consult with your physician")
input ("Press enter to exit")
elif answer == "no":
print ("Well, if it is higher then we may proceed")
else:
print ("Sorry your answer is not recognized. Answer with yes or no in small caps")
print ("On 2 separate occassions, is your Blood Pressure >= 140/90?")
answer = input ()
if answer == "yes":
print ("Based on JNC 7 criteria, you may have Hypertension Stage 2. However, it is still best to consult with your physician")
elif answer == "no":
print ("Thank you for your answer. Have a great day")
input ("Press enter to exit")
else:
print ("Sorry your answer is not recognized. Answer with a yes or no in small caps")
print ("I guess that's it. Thank you for using this app. Have a great day")
input ("Press enter to exit")
| Hypertension.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bayes Factors and Marginal Likelihood
# +
import arviz as az
import numpy as np
import pymc3 as pm
from matplotlib import pyplot as plt
from scipy.special import betaln
from scipy.stats import beta
print('Running on PyMC3 v{}'.format(pm.__version__))
# -
az.style.use('arviz-darkgrid')
# The "Bayesian way" to compare models is to compute the _marginal likelihood_ of each model $p(y \mid M_k)$, _i.e._ the probability of the observed data $y$ given the $M_k$ model. This quantity, the marginal likelihood, is just the normalizing constant of Bayes' theorem. We can see this if we write Bayes' theorem and make explicit the fact that all inferences are model-dependant.
#
# $$p (\theta \mid y, M_k ) = \frac{p(y \mid \theta, M_k) p(\theta \mid M_k)}{p( y \mid M_k)}$$
#
# where:
#
# * $y$ is the data
# * $\theta$ the parameters
# * $M_k$ one model out of K competing models
#
#
# Usually when doing inference we do not need to compute this normalizing constant, so in practice we often compute the posterior up to a constant factor, that is:
#
# $$p (\theta \mid y, M_k ) \propto p(y \mid \theta, M_k) p(\theta \mid M_k)$$
#
# However, for model comparison and model averaging the marginal likelihood is an important quantity. Although, it's not the only way to perform these tasks, you can read about model averaging and model selection using alternative methods [here](model_comparison.ipynb), [there](model_averaging.ipynb) and [elsewhere](GLM-model-selection.ipynb).
# ## Bayesian model selection
#
# If our main objective is to choose only one model, the _best_ one, from a set of models we can just choose the one with the largest $p(y \mid M_k)$. This is totally fine if **all models** are assumed to have the same _a priori_ probability. Otherwise, we have to take into account that not all models are equally likely _a priori_ and compute:
#
# $$p(M_k \mid y) \propto p(y \mid M_k) p(M_k)$$
#
# Sometimes the main objective is not to just keep a single model but instead to compare models to determine which ones are more likely and by how much. This can be achieved using Bayes factors:
#
# $$BF = \frac{p(y \mid M_0)}{p(y \mid M_1)}$$
#
# that is, the ratio between the marginal likelihood of two models. The larger the BF the _better_ the model in the numerator ($M_0$ in this example). To ease the interpretation of BFs some authors have proposed tables with levels of *support* or *strength*, just a way to put numbers into words.
#
# * 1-3: anecdotal
# * 3-10: moderate
# * 10-30: strong
# * 30-100: very strong
# * $>$ 100: extreme
#
# Notice that if you get numbers below 1 then the support is for the model in the denominator, tables for those cases are also available. Of course, you can also just take the inverse of the values in the above table or take the inverse of the BF value and you will be OK.
#
# It is very important to remember that these rules are just conventions, simple guides at best. Results should always be put into context of our problems and should be accompanied with enough details so others could evaluate by themselves if they agree with our conclusions. The evidence necessary to make a claim is not the same in particle physics, or a court, or to evacuate a town to prevent hundreds of deaths.
# ## Bayesian model averaging
#
# Instead of choosing one single model from a set of candidate models, model averaging is about getting one meta-model by averaging the candidate models. The Bayesian version of this weights each model by its marginal posterior probability.
#
# $$p(\theta \mid y) = \sum_{k=1}^K p(\theta \mid y, M_k) \; p(M_k \mid y)$$
#
# This is the optimal way to average models if the prior is _correct_ and the _correct_ model is one of the $M_k$ models in our set. Otherwise, _bayesian model averaging_ will asymptotically select the one single model in the set of compared models that is closest in [Kullback-Leibler divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence).
#
# Check this [example](model_averaging.ipynb) as an alternative way to perform model averaging.
# ## Some remarks
#
# Now we will briefly discuss some key facts about the _marginal likelihood_
#
# * The good
# * **Occam Razor included**: Models with more parameters have a larger penalization than models with fewer parameters. The intuitive reason is that the larger the number of parameters the more _spread_ the _prior_ with respect to the likelihood.
#
#
# * The bad
# * Computing the marginal likelihood is, generally, a hard task because it’s an integral of a highly variable function over a high dimensional parameter space. In general this integral needs to be solved numerically using more or less sophisticated methods.
#
# $$p(y \mid M_k) = \int_{\theta_k} p(y \mid \theta_k, M_k) \; p(\theta_k | M_k) \; d\theta_k$$
#
# * The ugly
# * The marginal likelihood depends **sensitively** on the specified prior for the parameters in each model $p(\theta_k \mid M_k)$.
#
# Notice that *the good* and *the ugly* are related. Using the marginal likelihood to compare models is a good idea because a penalization for complex models is already included (thus preventing us from overfitting) and, at the same time, a change in the prior will affect the computations of the marginal likelihood. At first this sounds a little bit silly; we already know that priors affect computations (otherwise we could simply avoid them), but the point here is the word **sensitively**. We are talking about changes in the prior that will keep inference of $\theta$ more or less the same, but could have a big impact in the value of the marginal likelihood.
# ## Computing Bayes factors
#
# The marginal likelihood is generally not available in closed-form except for some restricted models. For this reason many methods have been devised to compute the marginal likelihood and the derived Bayes factors, some of these methods are so simple and [naive](https://radfordneal.wordpress.com/2008/08/17/the-harmonic-mean-of-the-likelihood-worst-monte-carlo-method-ever/) that works very bad in practice. Most of the useful methods have been originally proposed in the field of Statistical Mechanics. This connection is explained because the marginal likelihood is analogous to a central quantity in statistical physics known as the _partition function_ which in turn is closely related to another very important quantity the _free-energy_. Many of the connections between Statistical Mechanics and Bayesian inference are summarized [here](https://arxiv.org/abs/1706.01428).
# ### Using a hierarchical model
#
# Computation of Bayes factors can be framed as a hierarchical model, where the high-level parameter is an index assigned to each model and sampled from a categorical distribution. In other words, we perform inference for two (or more) competing models at the same time and we use a discrete _dummy_ variable that _jumps_ between models. How much time we spend sampling each model is proportional to $p(M_k \mid y)$.
#
# Some common problems when computing Bayes factors this way is that if one model is better than the other, by definition, we will spend more time sampling from it than from the other model. And this could lead to inaccuracies because we will be undersampling the less likely model. Another problem is that the values of the parameters get updated even when the parameters are not used to fit that model. That is, when model 0 is chosen, parameters in model 1 are updated but since they are not used to explain the data, they only get restricted by the prior. If the prior is too vague, it is possible that when we choose model 1, the parameter values are too far away from the previous accepted values and hence the step is rejected. Therefore we end up having a problem with sampling.
#
# In case we find these problems, we can try to improve sampling by implementing two modifications to our model:
#
# * Ideally, we can get a better sampling of both models if they are visited equally, so we can adjust the prior for each model in such a way to favour the less favourable model and disfavour the most favourable one. This will not affect the computation of the Bayes factor because we have to include the priors in the computation.
#
# * Use pseudo priors, as suggested by Kruschke and others. The idea is simple: if the problem is that the parameters drift away unrestricted, when the model they belong to is not selected, then one solution is to try to restrict them artificially, but only when not used! You can find an example of using pseudo priors in a model used by Kruschke in his book and [ported](https://github.com/aloctavodia/Doing_bayesian_data_analysis) to Python/PyMC3.
#
# If you want to learn more about this approach to the computation of the marginal likelihood see [Chapter 12 of Doing Bayesian Data Analysis](http://www.sciencedirect.com/science/book/9780124058880). This chapter also discuss how to use Bayes Factors as a Bayesian alternative to classical hypothesis testing.
# ### Analytically
#
# For some models, like the beta-binomial model (AKA the _coin-flipping_ model) we can compute the marginal likelihood analytically. If we write this model as:
#
# $$\theta \sim Beta(\alpha, \beta)$$
# $$y \sim Bin(n=1, p=\theta)$$
#
# the _marginal likelihood_ will be:
#
# $$p(y) = \binom {n}{h} \frac{B(\alpha + h,\ \beta + n - h)} {B(\alpha, \beta)}$$
#
# where:
#
# * $B$ is the [beta function](https://en.wikipedia.org/wiki/Beta_function) not to get confused with the $Beta$ distribution
# * $n$ is the number of trials
# * $h$ is the number of success
#
# Since we only care about the relative value of the _marginal likelihood_ under two different models (for the same data), we can omit the binomial coefficient $\binom {n}{h}$, thus we can write:
#
# $$p(y) \propto \frac{B(\alpha + h,\ \beta + n - h)} {B(\alpha, \beta)}$$
#
# This expression has been coded in the following cell, but with a twist. We will be using the `betaln` function instead of the `beta` function, this is done to prevent underflow.
def beta_binom(prior, y):
"""
Compute the marginal likelihood, analytically, for a beta-binomial model.
prior : tuple
tuple of alpha and beta parameter for the prior (beta distribution)
y : array
array with "1" and "0" corresponding to the success and fails respectively
"""
alpha, beta = prior
h = np.sum(y)
n = len(y)
p_y = np.exp(betaln(alpha + h, beta+n-h) - betaln(alpha, beta))
return p_y
# Our data for this example consist on 100 "flips of a coin" and the same number of observed "heads" and "tails". We will compare two models one with a uniform prior and one with a _more concentrated_ prior around $\theta = 0.5$
y = np.repeat([1, 0], [50, 50]) # 50 "heads" and 50 "tails"
priors = ((1, 1), (30, 30))
for a, b in priors:
distri = beta(a, b)
x = np.linspace(0, 1, 100)
x_pdf = distri.pdf(x)
plt.plot (x, x_pdf, label=r'$\alpha$ = {:d}, $\beta$ = {:d}'.format(a, b))
plt.yticks([])
plt.xlabel('$\\theta$')
plt.legend()
# The following cell returns the Bayes factor
BF = (beta_binom(priors[1], y) / beta_binom(priors[0], y))
print(round(BF))
# We see that the model with the more concentrated prior $Beta(30, 30)$ has $\approx 5$ times more support than the model with the more extended prior $Beta(1, 1)$. Besides the exact numerical value this should not be surprising since the prior for the most favoured model is concentrated around $\theta = 0.5$ and the data $y$ has equal number of head and tails, consintent with a value of $\theta$ around 0.5.
# ### Sequential Monte Carlo
#
# The [Sequential Monte Carlo](SMC2_gaussians.ipynb) sampler is a method that basically progresses by a series of successive interpolated (or *annealed*) sequences from the prior to the posterior. A nice by-product of this process is that we get an estimation of the marginal likelihood. Actually for numerical reasons the returned value is the marginal log likelihood (this helps to avoid underflow).
# +
n_chains = 1000
models = []
traces = []
for alpha, beta in priors:
with pm.Model() as model:
a = pm.Beta('a', alpha, beta)
yl = pm.Bernoulli('yl', a, observed=y)
trace = pm.sample_smc(1000, random_seed=42)
models.append(model)
traces.append(trace)
# -
BF_smc = np.exp(models[1].marginal_log_likelihood - models[0].marginal_log_likelihood)
print(round(BF_smc))
# As we can see from the previous cell, SMC gives essentially the same answer as the analytical calculation!
#
# The advantage of using SMC is that we can use it to compute the _marginal likelihood_ for a wider range of models as a closed-form expression is no longer needed. The cost we pay for this flexibility is a more expensive computation. We should take into account that for more complex models a more accurate estimation of the _marginal likelihood_ will most likely need a larger number of `draws`. Additionally, a larger number of `n_steps` may help, specially if after stage 1 we notice that SMC uses a number of steps that are close to `n_steps`, i.e. SMC is having trouble to automatically reduce this number.
# ## Bayes factors and inference
#
# In this example we have used Bayes factors to judge which model seems to be better at explaining the data, and we get that one of the models is $\approx 5$ _better_ than the other.
#
# But what about the posterior we get from these models? How different they are?
az.summary(traces[0], var_names='a', kind='stats').round(2)
az.summary(traces[1], var_names='a', kind='stats').round(2)
# We may argue that the results are pretty similar, we have the same mean value for $\theta$, and a slightly wider posterior for `model_0`, as expected since this model has a wider prior. We can also check the posterior predictive distribution to see how similar they are.
_, ax = plt.subplots(figsize=(9, 6))
ppc_0 = pm.sample_posterior_predictive(traces[0], 100, models[0], size=(len(y), 20))
ppc_1 = pm.sample_posterior_predictive(traces[1], 100, models[1], size=(len(y), 20))
for m_0, m_1 in zip(ppc_0['yl'].T, ppc_1['yl'].T):
az.plot_kde(np.mean(m_0, 0), ax=ax, plot_kwargs={'color':'C0'})
az.plot_kde(np.mean(m_1, 0), ax=ax, plot_kwargs={'color':'C1'})
ax.plot([], label='model_0')
ax.plot([], label='model_1')
ax.legend()
ax.set_xlabel('$\\theta$')
ax.set_yticks([]);
# In this example the observed data $y$ is more consistent with `model_1` (because the prior is concentrated around the correct value of $\theta$) than `model_0` (which assigns equal probability to every possible value of $\theta$), and this difference is captured by the Bayes factors. We could say Bayes factors are measuring which model, as a whole, is better, including details of the prior that may be irrelevant for parameter inference. In fact in this example we can also see that it is possible to have two different models, with different Bayes factors, but nevertheless get very similar predictions. The reason is that the data is informative enough to reduce the effect of the prior up to the point of inducing a very similar posterior. As predictions are computed from the posterior we also get very similar predictions. In most scenarios when comparing models what we really care is the predictive accuracy of the models, if two models have similar predictive accuracy we consider both models as similar. To estimate the predictive accuracy we can use tools like WAIC, LOO or cross-validation.
| docs/source/notebooks/Bayes_factor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:tensorflow]
# language: python
# name: conda-env-tensorflow-py
# ---
# %matplotlib inline
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
df = pd.read_csv("Admission_Predict_Ver1.1.csv")
df.info()
df.head()
df.shape
df.rename(columns={ "University Rating": "Rating", "GRE Score": "GRE", "TOEFL Score":"TOEFL", "Chance_of_Admit": "COA"}, inplace=True)
df.head()
df['LOGCGPA'] = np.log(df.CGPA+1) # Get Log Transformed CGPA to find Linearity
# +
# Plot CGPA Distribution
plt.subplot(1, 2, 1)
(df.CGPA).plot.hist(bins=20,figsize=(15, 6), edgecolor = 'white')
plt.xlabel('CGPA', fontsize=12)
plt.title('CGPA Distribution', fontsize=12)
#_________________________________________________NEXT______________________________________________
#Plot Log CGPA Distribution
plt.subplot(1, 2, 2)
(df.LOGCGPA).plot.hist( figsize=(15,6), edgecolor='white')
plt.xlabel('log(price+1)', fontsize=12)
plt.title('Log CGPA Distribuition', fontsize=12)
plt.show()
# -
# visualize the relationship between the features and the response using scatterplots
fig, axs = plt.subplots(1, 3, sharey=True)
df.plot(kind='scatter', x='Rating', y='LOGCGPA', ax=axs[0], figsize=(15, 7))
df.plot(kind='scatter', x='GRE', y='LOGCGPA', ax=axs[1])
df.plot(kind='scatter', x='TOEFL', y='LOGCGPA', ax=axs[2])
# Single regression from TOEFL
X = df.TOEFL
X = X.values.reshape((len(X),1))
Y = df.CGPA
# +
# Import Linear Regression Model
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(X, Y)
# -
# Split data into train/test sets
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.20, random_state=42)
# +
# Initialize LR Model
lr = LinearRegression()
# Fit the model
lr.fit(X_train, y_train)
# Make predictions
predictions = lr.predict(X_test)
# +
# Store results in variable
r2 = format(r2_score(y_test, predictions),'.3f')
rmse = format(np.sqrt(mean_squared_error(y_test, predictions)),'.3f')
mae = format(mean_absolute_error(y_test, predictions),'.3f')
# Print out result
print("R Squared Score: ", format(r2_score(y_test, predictions),'.3f'))
print("Root Mean Squared Error: ", format(np.sqrt(mean_squared_error(y_test, predictions)),'.3f'))
print("Mean Absolute Error: ", format(mean_absolute_error(y_test, predictions),'.3f'))
# -
result = pd.DataFrame({'Model':['Single'],'R Squared':[r2], 'RMSE':[rmse], 'MAE':[mae]})
result
# Multiple regression
X = df.drop(['CGPA'], axis=1)
Y = df.CGPA
# Split data into train/test sets
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.20, random_state=42)
# +
# Initialize LR Model
lr = LinearRegression()
# Fit the model
lr.fit(X_train, y_train)
# Make predictions
predictions = lr.predict(X_test)
# +
# Store results in variable
r2 = format(r2_score(y_test, predictions),'.3f')
rmse = format(np.sqrt(mean_squared_error(y_test, predictions)),'.3f')
mae = format(mean_absolute_error(y_test, predictions),'.3f')
# Print out result
print("R Squared Score: ", format(r2_score(y_test, predictions),'.3f'))
print("Root Mean Squared Error: ", format(np.sqrt(mean_squared_error(y_test, predictions)),'.3f'))
print("Mean Absolute Error: ", format(mean_absolute_error(y_test, predictions),'.3f'))
# -
result_2 = pd.DataFrame({'Model':['Multiple'],'R Squared':[r2], 'RMSE':[rmse], 'MAE':[mae]})
result = result.append(result_2)
result
| Linear Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import io
import detectron2
# import some common detectron2 utilities
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
# import some common libraries
import numpy as np
import cv2
import torch
# Show the image in ipynb
from IPython.display import clear_output, Image, display
import PIL.Image
def showarray(a, fmt='jpeg'):
a = np.uint8(np.clip(a, 0, 255))
f = io.BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
# +
# Load VG Classes
data_path = 'data/genome/1600-400-20'
vg_classes = []
with open(os.path.join(data_path, 'objects_vocab.txt')) as f:
for object in f.readlines():
vg_classes.append(object.split(',')[0].lower().strip())
vg_attrs = []
with open(os.path.join(data_path, 'attributes_vocab.txt')) as f:
for object in f.readlines():
vg_attrs.append(object.split(',')[0].lower().strip())
MetadataCatalog.get("vg").thing_classes = vg_classes
MetadataCatalog.get("vg").attr_classes = vg_attrs
# -
cfg = get_cfg()
cfg.merge_from_file("../configs/VG-Detection/faster_rcnn_R_101_C4_attr_caffemaxpool.yaml")
cfg.MODEL.RPN.POST_NMS_TOPK_TEST = 300
cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.6
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.2
# VG Weight
cfg.MODEL.WEIGHTS = "http://nlp.cs.unc.edu/models/faster_rcnn_from_caffe_attr.pkl"
predictor = DefaultPredictor(cfg)
# +
# coding=utf-8
# Copyleft 2019 Project LXRT
import sys
import csv
import base64
import time
import numpy as np
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ["img_id", "img_h", "img_w", "objects_id", "objects_conf",
"attrs_id", "attrs_conf", "num_boxes", "boxes", "features"]
def load_obj_tsv(fname, topk=None):
"""Load object features from tsv file.
:param fname: The path to the tsv file.
:param topk: Only load features for top K images (lines) in the tsv file.
Will load all the features if topk is either -1 or None.
:return: A list of image object features where each feature is a dict.
See FILENAMES above for the keys in the feature dict.
"""
data = []
start_time = time.time()
print("Start to load Faster-RCNN detected objects from %s" % fname)
with open(fname) as f:
reader = csv.DictReader(f, FIELDNAMES, delimiter="\t")
for i, item in enumerate(reader):
for key in ['img_h', 'img_w', 'num_boxes']:
item[key] = int(item[key])
boxes = item['num_boxes']
decode_config = [
('objects_id', (boxes, ), np.int64),
('objects_conf', (boxes, ), np.float32),
('attrs_id', (boxes, ), np.int64),
('attrs_conf', (boxes, ), np.float32),
('boxes', (boxes, 4), np.float32),
('features', (boxes, -1), np.float32),
]
for key, shape, dtype in decode_config:
item[key] = np.frombuffer(base64.b64decode(item[key]), dtype=dtype)
item[key] = item[key].reshape(shape)
item[key].setflags(write=False)
data.append(item)
if topk is not None and len(data) == topk:
break
elapsed_time = time.time() - start_time
print("Loaded %d images in file %s in %d seconds." % (len(data), fname, elapsed_time))
return data
data = load_obj_tsv('data/caffe_coco_features/train2014_obj36.tsv')
# +
NUM_OBJECTS = 36
from torch import nn
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers, FastRCNNOutputs, fast_rcnn_inference_single_image
from detectron2.structures.boxes import Boxes
from detectron2.structures.instances import Instances
def doit(raw_image, raw_boxes):
# Process Boxes
raw_boxes = Boxes(torch.from_numpy(raw_boxes).cuda())
with torch.no_grad():
raw_height, raw_width = raw_image.shape[:2]
print("Original image size: ", (raw_height, raw_width))
# Preprocessing
image = predictor.transform_gen.get_transform(raw_image).apply_image(raw_image)
print("Transformed image size: ", image.shape[:2])
# Scale the box
new_height, new_width = image.shape[:2]
scale_x = 1. * new_width / raw_width
scale_y = 1. * new_height / raw_height
#print(scale_x, scale_y)
boxes = raw_boxes.clone()
boxes.scale(scale_x=scale_x, scale_y=scale_y)
# ----
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = [{"image": image, "height": raw_height, "width": raw_width}]
images = predictor.model.preprocess_image(inputs)
# Run Backbone Res1-Res4
features = predictor.model.backbone(images.tensor)
# Run RoI head for each proposal (RoI Pooling + Res5)
proposal_boxes = [boxes]
features = [features[f] for f in predictor.model.roi_heads.in_features]
box_features = predictor.model.roi_heads._shared_roi_transform(
features, proposal_boxes
)
feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1
# print('Pooled features size:', feature_pooled.shape)
# Predict classes pred_class_logits, pred_proposal_deltas = predictor.model.roi_heads.box_predictor(feature_pooled) and boxes for each proposal.
pred_class_logits, pred_attr_logits, pred_proposal_deltas = predictor.model.roi_heads.box_predictor(feature_pooled)
pred_class_prob = nn.functional.softmax(pred_class_logits, -1)
pred_scores, pred_classes = pred_class_prob[..., :-1].max(-1)
attr_prob = pred_attr_logits[..., :-1].softmax(-1)
max_attr_prob, max_attr_label = attr_prob.max(-1)
# Detectron2 Formatting (for visualization only)
roi_features = feature_pooled
instances = Instances(
image_size=(raw_height, raw_width),
pred_boxes=raw_boxes,
scores=pred_scores,
pred_classes=pred_classes,
attr_scores = max_attr_prob,
attr_classes = max_attr_label
)
return instances, roi_features
# +
# Show the boxes, labels, and features
#FIELDNAMES = ["img_id", "img_h", "img_w", "objects_id", "objects_conf",
#"attrs_id", "attrs_conf", "num_boxes", "boxes", "features"]
for datum in data:
im = cv2.imread('data/images/' + datum['img_id'] + '.jpg')
given_boxes = datum['boxes']
instances, features = doit(im, given_boxes)
pred = instances.to('cpu')
v = Visualizer(im[:, :, :], MetadataCatalog.get("vg"), scale=1.2)
v = v.draw_instance_predictions(pred)
showarray(v.get_image()[:, :, ::-1])
print("Feature Differences:", (features.cpu().numpy() - datum['features']).mean())
print("Object Ids Differences:", (instances.pred_classes.cpu().numpy() != datum['objects_id']).sum())
print("Attribute Ids Differences:", (instances.attr_classes.cpu().numpy() != datum['attrs_id']).sum())
# -
| demo/.ipynb_checkpoints/feature_correctness-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.4 64-bit (''.venv'': venv)'
# name: python3
# ---
# ## Preperations
# Lets first import the relevant libraries and then load the data into the dataframe df
# +
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
import statsmodels.api as sm
import statsmodels.formula.api as smf
# -
df = pd.read_csv("../data/King_County_House_prices_dataset.csv")
# ## Basic description of the data
# First, we want to take a first look at our data and analyse its contents
df.head(5)
df.info()
#
# 1. Non-null datapoints: Except the columns 'waterfront' and 'yr_renovated' all the columns only contain non-null datapoints. 21597 datapoints exist.
# 1. Datatype: 'date' and 'sqft_basement' have a object datatype, the rest are either int 64 or float 64
#
#
# Lets take a look at the distribution of prices since this is the independent value we are most interested in
#
plt.figure(figsize=(6, 3))
fig = sns.distplot(df.price, color='blue', bins=100, hist_kws={'alpha':0.4})
fig.set(xlabel='Price', ylabel='Density')
plt.show
df.price.describe()
#
# The distribution of the price seems to follow a normal distribution. However, at around 1,5 mil the distribution has a bump. the prices fo up to a maximum of 7.7 mil.
# Above 1 mill:
# +
plt.figure(figsize=(6, 3))
fig = sns.histplot(df.query('price > 1000000').price, color='blue', bins=100)
fig.set(xlabel='Price', ylabel='Density')
plt.show
# -
# Above a price of 4 mill we can still see some datapoints. Since our client is more interested in cheaper houses, we should concentrate on houses below 1 mill. Later, we will exclude values above 1 mill for our model
# We will plot now every column of the dataframe df (excluding the id column)
df_no_id = df.drop(labels='id', inplace=False, axis =1)
df_no_id.hist(bins =30, figsize = (15,19))
# bedrooms, bathrooms, sqft_living, sqft_lot, sqft_above, long, sqft_living15, sqft_lot15 seem to follow the same distribution like price
#
#
# *date and sqft_basement were not plotted since they have the datatype object. I was not too interested in these parameters, so I did not concentrate on them.*
# I will check now which factors correlate with the price:
df_no_id_corr = df_no_id.corr()['price'][1:]
best_correlators = df_no_id_corr[abs(df_no_id_corr)>0.5]
print ('correlations:\n' , df_no_id_corr, '\n\n')
print ('best correlators: \n' , best_correlators)
# Now I want to plot the price against the different factors
for i in range (0, len(df_no_id.columns), 5):
sns.pairplot(data=df_no_id, y_vars=['price'], x_vars=df_no_id.columns[i:i+5])
# Bedrooms, bathrooms, sqft_living, sqft_lot, grade, sqft_above, sqft_living15, sqft_lot15 seem to correlate with the price. Since zipcode is more like a categorical factor, it won`t correlate here with the price, when we just plot its value
# ## Hypotheses
# ### Testing every factor
# Now I will test if any of the factors shows a positve result when making an OLS model
smf.ols(formula='price ~ bedrooms + bathrooms + sqft_living + sqft_lot + floors + waterfront + view + condition + grade + sqft_above + yr_built + yr_renovated + zipcode + lat + long + sqft_living15 + sqft_lot15', data=df).fit().summary()
# The highest t-values are reached for bedrooms (-16.895), bathrooms (11.564), sqft_living (29.892), waterfront (29.679), view (20.696), grade (36.470), yr_built(-31.111), zipcode (-14.614), lat (47.578) and long (-13.964 )
#
# Since the zipcode are random numbers its correlation with the price does not make any sense here. Also the lat and long are most likely not good predictors for house prices, since cities can be complex with many different areas, regarding the housing prices.
# ### `My hypotheses:`
#
# 1. If there is a waterfront the price will be pricier
# <br><br>
# 1. The worse the grade and the condition the cheaper the house
# <br><br>
# 1. The price depends on the zip code
# <br><br>
# <br><br>
# *the longer the last renovation has been done the cheaper the price.
# year renovated: does not want frehsly renovated*
#
#
#
# # Old Hypothesis (not relevant)
# ### Price dependency on Waterfront
# First, I want to check the price dependency by the waterfront. The waterfront factor is a categorical factor (0 meaning no waterfront, 1 meaning with waterfront). 2376 values are unknown (NaN). They will not be included in the analysis.
# The mean value of the price already gives us a good hint that the waterfront has an influence on the price:
df.groupby('waterfront').price.mean()
# The mean price of a house with a waterfront is around three times higher.
# Now I want to plot the price and waterfront in a scatterplot with a logarithmic axis for price.
fig = df.plot(kind='scatter', x='waterfront', y='price', alpha=0.4)
fig.set(xlabel='Waterfront', ylabel='Price')
fig.set_yscale('log')
# Here we can also see the shift in price depending on the waterfront value
# Next I want to test my hypothesis using the OLS model.
smf.ols(formula='price ~ waterfront', data=df).fit().summary()
# price = 5.326 * 10<sup>05</sup> + 1.185 * 10<sup>06</sup> * waterfront
#
# The t-value of the coefficient and the slope seem quite high and the P>|t| quite low.
sns.lmplot(data=df, x='waterfront', y='price')
# Interpretation:
# <p>Usually an expensive house is not described by its location at a body of water. Of course there are many expensive houses that do not have a waterfront. That is probably the reason why the R<sup>2</sup> has such a low value (0.076)
# ### Price dependency on condition and grade
# Here, I want to analyse the dependence of the price on the condition and the grade. First, I want to get an overview over these factors. Therefore, I am grouping the dataset on the condition/grade and checking the mean price.
df.groupby('condition').price.mean()
df.groupby('grade').price.mean()
# We can already see that the grade is a very stable predictor of the mean price. For the condition this relationship is not so strong, however, it still seems to exist.
# Next I will plot these variables again:
fig = df.plot(kind='scatter', x='condition', y='price', alpha=0.4)
fig = df.plot(kind='scatter', x='grade', y='price', alpha=0.4)
fig.set_yscale('log')
# The plots underline the previous finding.
# Next I want to test my hypothesis using the OLS model.
smf.ols(formula='price ~ condition', data=df).fit().summary()
smf.ols(formula='price ~ grade', data=df).fit().summary()
# The t value for condition is not quite as high. Also with an R<sup>2></sup> of 0.001 this value is extremely low. However, the t value and its corresponding P>|t| are relatively high and low, respectively. Also the R<sup>2></sup> value (0.446) is much higher compared to condition.
# price = 4.709 x 10<sup>5</sup> + 2.036 x 10<sup>4</sup> x condition
# <br><br>
# price = -1.061 x 10<sup>6</sup> + 2.092 x 10<sup>5</sup> x grade
# Interpretation:
# <p>The price of the house seems to correlate with the grade of the house. This grade is based on King County grading system, which seems to be a much better predictor than the overall condition. It is not clear to me, how the overall condition was estimated.
#
# Lets plot the residuals:
# +
...
# -
sns.lmplot(data=df, x='condition', y='price')
sns.lmplot(data=df, x='grade', y='price')
# In the condition plot we can see that many pricy houses got a medium condition. It might be that these houses actually have a good grade but they need to get a renovation. I will exclude houses where the last rennovation was long ago.
# First, I will look at the distribution of yr_renovated, which shows the year of the last rennovation.
df.yr_renovated.plot(kind='hist',bins=100)
# Most of the values have a value of zero, which probably means that it is not known when they were renovated last, or they were never renovated. If we take a look of the distribution of the acutally renovated houses, we see this:
df[df['yr_renovated']>1750].yr_renovated.plot(kind='hist', bins =50)
# Lets check the condition distribution of these houses:
df_ren = df[df['yr_renovated']>1750]
df_ren.plot(kind='scatter',x='condition',y='price')
# If we make a regression model, we get the following:
smf.ols(formula='price ~ condition', data=df).fit().summary()
# Again, the R<sup>2</sup> value is extremly low.
# <p>Lets look at the yr_renovated distribution again.
# Now lets exclude houses that were not renovated after 1985
df_ren = df[df['yr_renovated']>1985]
df_ren.plot(kind='scatter',x='condition',y='price')
# The distribution still looks very similar. excluding certain values in yr_renovated did not change the appearance of the condition-distribution.
# I will quickly check with what the condition correlates:
df_no_id_corr = df_no_id.corr()['condition']
df_no_id_corr
# Condtion seems to correlate with the year the house was built.
df.yr_built.plot(kind='hist')
# lets exclude houses that were built before 1950 and plot the distribution of the condition again.
df_built = df[df['yr_built']>=1950]
df_built.plot(kind='scatter',x='condition',y='price')
# The distribution still looks the same. I cannot figure out, why the condition distirbution looks like that...
# ### Price Dependency on Zip Code
# Now I want to look if the price depends on the zip code. First I want to get an overview of the data by
# 1. grouping the houses with the same zip code together, calculating their mean value and sorting them on the price.
# 1. Next, I am creating a new dataframe from this Series, and turning the zipcodes into strings
# 1. Last, I want to plot this new dataframe as a scatterplot
# +
#1
sorted_df = df.groupby('zipcode').price.mean().sort_values()
#2
df_sorted_df = pd.DataFrame({'zipcode':sorted_df.index, 'price':sorted_df.values})
df_sorted_df['zipcode'] = df_sorted_df['zipcode'].apply(str)
#3
fig = df_sorted_df.plot(kind='scatter', x='zipcode', y='price')
for tick in fig.get_xticklabels():
tick.set_rotation(90)
plt.show
# -
# We can see here that different zip codes clearly have different average prices. Four zip codes seem to be exceptionally expensive. Furthermore, there seems to be one cluster of zip codes that are separated by the others by a jump in the price.
# We have a good hint that the zip codes influences the price. Since the zipcode is a categorical value we have to use dummy variables in otder to make linear regression:
# 1. Copying the dataframe into a new dataframe to avoid changes in the original one
# 1. create dummy variables for the zipcode
# 1. Creating a new dataframe with only the price as one columns. Then adding the zip_dummies to this new dataframe
# +
#1
df_edit = df
#2
zip_dummies = pd.get_dummies(data=df_edit['zipcode'], prefix='zip', drop_first=True)
#3
df_price = df.price
df_price = pd.DataFrame({'price':df_price.values})
df_edit = pd.concat([df_price, zip_dummies], axis=1)
df_edit.head()
# -
# As an example I will plot two of the zip codes:
fig = df_edit.plot(kind='scatter', x='zip_98002', y='price', alpha=0.4)
fig = df_edit.plot(kind='scatter', x='zip_98039', y='price', alpha=0.4)
#die idee war alle auf einmal zu nehmen.
# +
X = df_edit.drop(['price'], axis=1)
y = df_edit.price
X = sm.add_constant(X)
model = sm.OLS(y, X)
results = model.fit()
results.summary()
# -
# Lets re-plot the mean values of price sorted by the zipcode again:
df_sorted_df.plot(kind='scatter', x='zipcode', y='price')
# Can we make a regression model with these data?
# I will assign each zipcode an index
# +
labels = pd.factorize(df_sorted_df.zipcode)
df_sorted_df['idx_zipcode']=labels[0]
df_sorted_df
# -
smf.ols(formula='price ~ idx', data=df_sorted_df).fit().summary()
# In this model I found good values for t, P>|t|, and R<sup>2</sup>. However, I am not sure if this process is correct...
sns.lmplot(data=df_sorted_df, x='idx_zipcode', y='price')
| notebooks/Data_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import h5py
import numpy as np
import os,random
from tensorflow.keras.layers import Input,Reshape,ZeroPadding2D,MaxPool2D,Conv1D,Conv2D,Dropout,Flatten,Dense,Activation,MaxPooling2D,AlphaDropout
from tensorflow.keras import layers
import tensorflow.keras.models as Model
from tensorflow.keras.regularizers import *
from tensorflow.keras.optimizers import Adam
import seaborn as sns
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
import gc
# +
f = h5py.File('../ExtractDataset/part0.h5')
sample_num = f['X'].shape[0]
idx = np.random.choice(range(0,sample_num),size=30000)
# idx = np.random.choice(range(0,sample_num),size=60000)
X = f['X'][:][idx]
Y = f['Y'][:][idx]
Z = f['Z'][:][idx]
f.close()
for i in range(1,24):
# if i%1 == 0:
# # !free -m
# gc_num = gc.collect(2)
# print(gc_num)
'''if i == 10:
continue'''
filename = '../ExtractDataset/part'+str(i) + '.h5'
print(filename)
f = h5py.File(filename,'r')
X = np.vstack((X,f['X'][:][idx]))
Y = np.vstack((Y,f['Y'][:][idx]))
Z = np.vstack((Z,f['Z'][:][idx]))
f.close()
del f
gc_num = gc.collect(2)
print(gc_num)
print('X-size:',X.shape)
print('Y-size:',Y.shape)
print('Z-size:',Z.shape)
# -
# ## Data preprocessing, and obtain training set and test set
n_examples = X.shape[0]
# n_train = int(n_examples * 0.9877)
n_train = int(n_examples * 0.8)
train_idx = np.random.choice(range(0,n_examples), size=n_train, replace=False) #Randomly select training sample subscript
test_idx = list(set(range(0,n_examples))-set(train_idx)) #Test sample index
X_train = X[train_idx] #training samples
X_test = X[test_idx] #testing samples
Y_train = Y[train_idx]
Y_test = Y[test_idx]
print("X_train:",X_train.shape)
print("Y_train:",Y_train.shape)
print("X_test:",X_test.shape)
print("Y_test:",Y_test.shape)
X_train[0]
# +
classes = ['32PSK',
'16APSK',
'32QAM',
'FM',
'GMSK',
'32APSK',
'OQPSK',
'8ASK',
'BPSK',
'8PSK',
'AM-SSB-SC',
'4ASK',
'16PSK',
'64APSK',
'128QAM',
'128APSK',
'AM-DSB-SC',
'AM-SSB-WC',
'64QAM',
'QPSK',
'256QAM',
'AM-DSB-WC',
'OOK',
'16QAM']
def baseline_cnn_model(X_train,classes):
decay = 0.00001
in_shp = X_train.shape[1:] #Dimensions of each sample
#input layer
X_input = Input(in_shp)
X = Reshape([1,1024,2], input_shape=in_shp)(X_input)
x = Conv1D(64,3,padding='same', activation='relu',kernel_initializer='glorot_uniform',data_format="channels_first")(X)
x = MaxPool2D(pool_size=2,strides=2, padding='valid', data_format="channels_first")(x)
x = Conv1D(64,3,padding='same', activation='relu')(X)
x = MaxPool2D(pool_size=2,strides=2, padding='valid', data_format="channels_first")(x)
x = Conv1D(64,3,padding='same', activation='relu')(X)
x = MaxPool2D(pool_size=2,strides=2, padding='valid', data_format="channels_first")(x)
x = Conv1D(64,3,padding='same', activation='relu')(X)
x = MaxPool2D(pool_size=2,strides=2, padding='valid', data_format="channels_first")(x)
x = Conv2D(64,3,padding='same', activation='relu')(X)
x = MaxPool2D(pool_size=2,strides=2, padding='valid', data_format="channels_first")(x)
x = Conv1D(64,3,padding='same', activation='relu')(X)
x = MaxPool2D(pool_size=2,strides=2, padding='valid', data_format="channels_first")(x)
x = Conv1D(64,3,padding='same', activation='relu')(X)
x = MaxPool2D(pool_size=2,strides=2, padding='valid', data_format="channels_first")(x)
# x = EfficientNetB3( weights='imagenet', include_top=False)(input_image)
X = Flatten()(X)
X = Dense(128, activation='selu', kernel_initializer='he_normal', name="dense1")(X)
# X = AlphaDropout(0.3)(X)
#Full Con 2
X = Dense(128, activation='selu', kernel_initializer='he_normal', name="dense2")(X)
# X = AlphaDropout(0.3)(X)
#Full Con 3
X = Dense(len(classes), kernel_initializer='he_normal', name="dense3")(X)
#SoftMax
X = Activation('softmax')(X)
return tf.keras.models.Model(inputs=X_input, outputs=X)
model = baseline_cnn_model(X_train,classes)
model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])
#Create Model
# model = Model.Model(inputs=X_input,outputs=X)
model.summary()
# -
from tensorflow.keras.utils import plot_model
plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
# perform training ...
# - call the main training loop in keras for our network+dataset
print(tf.test.gpu_device_name())
# +
# mirrored_strategy = tf.distribute.MirroredStrategy(devices=["/gpu:0"], cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
# -
# %%time
filepath = 'cnn_model.h5'
history = model.fit(X_train,
Y_train,
batch_size=32,
epochs=100,
verbose=1,
# validation_data=(X_test, Y_test),
validation_split = 0.2,
callbacks = [
tf.keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='auto'),
# tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='auto')
])
# +
fig, ((ax1, ax2)) = plt.subplots(nrows=1, ncols=2,figsize=(20,6))
ax1.plot(history.history['accuracy'],'b', history.history['val_accuracy'], 'r')
ax1.set_ylabel('Accuracy Rate',fontsize=12)
ax1.set_xlabel('Iteration',fontsize=12)
ax1.set_title('Categorical Cross Entropy ',fontsize=14)
ax1.legend(['Training Accuracy','Validation Accuracy'],fontsize=12,loc='best')
ax2.plot(history.history['loss'], 'b',history.history['val_loss'],'r')
ax2.set_ylabel('Loss',fontsize=12)
ax2.set_xlabel('Iteration',fontsize=12)
ax2.set_title('Learning Curve ',fontsize=14)
ax2.legend(['Training Loss','Validation Loss'],fontsize=12,loc='best')
# plt.savefig('crosse_results.png')
plt.show()
# -
model = load_model(filepath)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues, labels=[]):
plt.figure(figsize=(10, 10))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels, rotation=45)
plt.yticks(tick_marks, labels)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Plot confusion matrix
batch_size = 1024
test_Y_hat = model.predict(X_test, batch_size=3000)
conf = np.zeros([len(classes),len(classes)])
confnorm = np.zeros([len(classes),len(classes)])
for i in range(0,X_test.shape[0]):
j = list(Y_test[i,:]).index(1)
k = int(np.argmax(test_Y_hat[i,:]))
conf[j,k] = conf[j,k] + 1
for i in range(0,len(classes)):
confnorm[i,:] = conf[i,:] / np.sum(conf[i,:])
plot_confusion_matrix(confnorm, labels=classes)
for i in range(len(confnorm)):
print(classes[i],confnorm[i,i])
# +
acc={}
Z_test = Z[test_idx]
Z_test = Z_test.reshape((len(Z_test)))
SNRs = np.unique(Z_test)
for snr in SNRs:
X_test_snr = X_test[Z_test==snr]
Y_test_snr = Y_test[Z_test==snr]
pre_Y_test = model.predict(X_test_snr)
conf = np.zeros([len(classes),len(classes)])
confnorm = np.zeros([len(classes),len(classes)])
for i in range(0,X_test_snr.shape[0]): #该信噪比下测试数据量
j = list(Y_test_snr[i,:]).index(1) #正确类别下标
k = int(np.argmax(pre_Y_test[i,:])) #预测类别下标
conf[j,k] = conf[j,k] + 1
for i in range(0,len(classes)):
confnorm[i,:] = conf[i,:] / np.sum(conf[i,:])
plt.figure()
plot_confusion_matrix(confnorm, labels=classes, title="ConvNet Confusion Matrix (SNR=%d)"%(snr))
cor = np.sum(np.diag(conf))
ncor = np.sum(conf) - cor
print ("Overall Accuracy %s: "%snr, cor / (cor+ncor))
acc[snr] = 1.0*cor/(cor+ncor)
# +
plt.plot(acc.keys(),acc.values())
plt.ylabel('ACC')
plt.xlabel('SNR')
plt.grid(True)
plt.show()
# -
| Submission/.ipynb_checkpoints/Classification-ultranet-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Science/ElectricityConsumptionMonitoring/electricity-consumption-monitoring.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# ## Instructions before you start:
# Once the notebook has loaded, click the fast forward button ">>" in the menu bar above. Click "Yes" to restart and run.
# + language="html"
# <button onclick="run_all()">Run All Cells</button>
# <script>
# function run_all(){
# Jupyter.actions.call('jupyter-notebook:run-all-cells-below');
# Jupyter.actions.call('jupyter-notebook:save-notebook');
# }
# </script>
# + language="html"
#
# <script>
# function code_toggle() {
# if (code_shown){
# $('div.input').hide('500');
# $('#toggleButton').val('Show Code')
# } else {
# $('div.input').show('500');
# $('#toggleButton').val('Hide Code')
# }
# code_shown = !code_shown
# }
#
# $( document ).ready(function(){ code_shown=false; $('div.input').hide() });
# </script>
#
# <form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form>
# -
# # Electricity Consumption Monitoring
#
# ## Introduction
#
# In this notebook we will be discussing what electricity is, how it is used and how it is monitored. We will also be discussing what circuit diagrams are and how they relate to electricity in your homes.
# ### What is Electricity?
#
# In simplest terms electricity is the presence or flow of electrons. If you don't remember what electrons are from your other science courses they are found in atoms. Atoms are the basic building blocks of matter.
#
# <img src="images/atom.png" alt="" width=500 align=middle>
#
# You can find electrons, protons and neutrons in an atom. Negatively charged electrons orbit around the nucleus of the atom which is made up of positively charged protons and neutrons with no charge. Electrons are attracted to the protons in the nucleus. Electrons are much lighter than the nucleus and can be transferred from one atom to another.
#
#
# ### Static and Current Electricity
#
# Two types of electricity will be discussed, static and current electricity. **Static Electricity** is the build up of electrons on an object. A static charge can be formed when the electrons on one object move to another. An example of this would be sliding down a slide and your hair standing on end.
#
# <img src="images/staticCharge.jpg" alt="" width=500 align=middle>
#
# This results from electrons being transferred from you to the slide leaving you with a net positive charge. You are left with positive charge at the ends of your hair causing them to be repelled from each other and making them stand up.
#
# The other type of electricity is current electricity. **Current Electricity** is the flow of electrons or electrical energy from one atom to another and in general terms from one place to another.
#
# <img src="images/electricCurrentAnimation.gif" alt="" width=500 align=middle>
#
# Current electricity is normally called **Electric Current**. An example of an electric current would be charging your cell phone.
#
# <img src="images/electricCurrent.jpg" alt="" width=500 align=middle>
#
# Electrons flow from your cell phone charger through the charging cord and into your cell phone creating an electric current. In this notebook we will be focusing on current electricity or electric current and how it is monitored and used throughout our homes. In the rest of the notebook electrical current will generally be referred to as just electricity.
#
# ### Electricity Units
# Before we move on to the rest of the notebook we need to cover the units that are used when measuring the amount of electricity being used. As electrical current travels through a device the electrons lose energy which can be used to do useful things like produce heat or do work like turning motors. We are often interested how fast this energy is being used, that is **Power**. The unit used to measure the power or the rate of electricity being used is **Watts (W)**. A Watt is one Joule of energy transferred per second. The power used by a device when it is on is given in Watts.
#
# Another unit of energy that is used is the **Watt-hour**. A **Watt-hour (Wh)** describes the total amount of electricity (energy) used over the course of an hour. It is a combination of how fast electrical power is being used. To get a better understanding of a Watt-hour let's convert it to Joules of energy.
#
# > A Watt measures the amount of Joules per second and can be written as:
# >
# > $\large 1 \hspace{1mm} \text{Watt} = 1 \hspace{1mm} \text{W} = 1\hspace{1mm}\frac{\text{Joule}}{\text{second}} = 1\hspace{1mm}\frac{\text{J}}{\text{s}}$
# >
# > In an hour there are 60 minutes and 60 seconds in each minute. The total number of seconds in an hour would be 60x60 = 3600 seconds
# >
# > $\large 1 \hspace{1mm} \text{hour} = 3600 \hspace{1mm} \text{seconds} = 3600 \hspace{1mm}\text{s}$
# >
# > Thus in 1 Watt-hour we have 1 J being used every second over the course of an hour. If a Joule is used every second of the hour and there are 3600 seconds in the hour then 1 Watt-hour is equivalent to 3600 Joules.
# >
# > $\large 1 \hspace{1mm} \text{Wh} = 1\hspace{1mm}\frac{\text{J}}{\text{s}} \times 3600 \hspace{1mm}\text{s} = 3600 \hspace{1mm}\text{J}$
# >
# > Compared to the Joule, a Watt-hour is a more convenient unit to measure the amount of energy consumed by an item over the course of some time.
#
# ### Kilo, Mega and Giga
#
# Some other terms you may see in this notebook are **kilowatts (kW)**, **megawatts (MW)** and **gigawatts (GW)**. A kilowatt is 1000 watts, a megawatt is 1 million watts and a gigawatt is 1 billion watts. You will also see the term **kilowatt-hours (kWh)** which is 1000 Watt-hours. It can also be thought of as using electricity for 1 hour at a rate of 1000 watts.
#
#
# ## Electricity Use
#
# It is common knowledge that electricity is used to power our homes, vehicles and the various electronic devices we use on a daily basis. So how much electrical power does it actually take to charge your smart phone, use your oven or run a refrigerator? In this section we will answer those questions and take a look at the electricity use of various electronic devices.
#
# ### Efficiency Labels
#
# If you have ever looked at the back of a washing machine, dryer or other electrical appliances you may have noticed a label giving a number of kilowatt-hours (kWh) per year. This label is known as an efficiency label giving energy-related information about the product. In Canada this label is known as the EnerGuide label. Examples of this label are below.
#
# <table>
# <tr>
# <td><img src="images/label1.jpg" alt="EnerGuide" width="450"/></td>
# <td><img src="images/label2.jpg" alt="EnerGuide&EnergyStar" width="450"/></td>
# </tr>
# </table>
#
# There are four key pieces of information that the EnerGuide label tells us:
#
# 1. It tells us the annual energy consumption of the appliance in kilowatt-hours.
# 2. On the label it has the annual energy consumption for the most efficienct and least efficient models and were the appliance falls in between them.
# 3. The energy consumption indicator positions the appliance model based on a comparison of other models in the same class.
# 4. The label also has the type and capacity of models that make up the appliance class as well as the model number.
#
# We are most interested in the annual energy consumption since it tells us the average amount of electricity the appliance uses over the course of a year. It should be noted that not all electrical appliances have an EnerGuide label as it is not manadatory for all electrical appliances.
# ### Device Electrical Power Use
#
# Now that we have gotten the terminology and brief introduction about electricity over we can look at how much electricity everyday devices actually use. Below is table of various devices and their average kWh usage.
#
# <table>
# <tr>
# <th>Electrical Devices/Appliances</th>
# <th>Device Power Usage</th>
# </tr>
# <tr>
# <td>Portable Heater (1500 W)</td>
# <td>1.5 kWh per hour</td>
# </tr>
# <tr>
# <td>Electric Furnace (with fan)</td>
# <td>10.0 kWh per hour</td>
# </tr>
# <tr>
# <td>Central Air Conditioner (3 ton)</td>
# <td>3.0 kWh per hour</td>
# </tr>
# <tr>
# <td>Ceiling Fan</td>
# <td>0.075 kWh per hour</td>
# </tr>
# <tr>
# <td>Oven</td>
# <td>2.3 kWh per hour</td>
# </tr>
# <tr>
# <td>Microwave Oven</td>
# <td>1.0 kWh per hour</td>
# </tr>
# <tr>
# <td>Coffee Maker</td>
# <td>0.12 kWh per brew</td>
# </tr>
# <tr>
# <td>Dishwasher</td>
# <td>0.6 kWh per load</td>
# </tr>
# <tr>
# <td>Toaster</td>
# <td>0.04 kWh per use</td>
# </tr>
# <tr>
# <td>Refrigerator</td>
# <td>0.05 kWh per hour</td>
# </tr>
# <tr>
# <td>Plasma TV (40-49 inches)</td>
# <td>0.4 kWh per hour</td>
# </tr>
# <tr>
# <td>LCD TV (40-49 inches)</td>
# <td>0.012 kWh per hour</td>
# </tr>
# <tr>
# <td>Desktop Computer</td>
# <td>0.15 kWh per hour</td>
# </tr>
# <tr>
# <td>Laptop</td>
# <td>0.03 kWh per hour</td>
# </tr>
# <tr>
# <td>Radio</td>
# <td>0.02 kWh per hour</td>
# </tr>
# <tr>
# <td>Clothes Washer (500 W)</td>
# <td>0.5 kWh per load</td>
# </tr>
# <tr>
# <td>Clothes Dryer (5000 W)</td>
# <td>5.0 kWh per hour</td>
# </tr>
# <tr>
# <td>Vacuum Cleaner</td>
# <td>0.75 kWh per hour</td>
# </tr>
# <tr>
# <td>Iron</td>
# <td>1.08 kWh per hour</td>
# </tr>
# <tr>
# <td>Hair Dryer</td>
# <td>1.5 kWh per hour</td>
# </tr>
# <tr>
# <td>Curling Iron</td>
# <td>0.05 kWh per hour</td>
# </tr>
# <tr>
# <td>Incandescent Light Bulb (60 W)</td>
# <td>0.06 kWh per hour</td>
# </tr>
# <tr>
# <td>Compact Flourescent (15 W)</td>
# <td>0.015 kWh per hour</td>
# </tr>
# <tr>
# <td>Halogen Light Bulb (300 W)</td>
# <td>0.3 kWh per hour</td>
# </tr>
# <tr>
# <td>Xbox One (Playing a Game)</td>
# <td>0.112 kWh per hour</td>
# </tr>
# <tr>
# <td>Playstation 4 (Playing a Game)</td>
# <td>0.137 kWh per hour</td>
# </tr>
# <tr>
# <td>Wii U (Playing a Game)</td>
# <td>0.034 kWh per hour</td>
# </tr>
# <tr>
# <td>Cellphone</td>
# <td>0.01 kWh per hour</td>
# </tr>
# </table>
#
#
# If you want to determine how much electricity you have used for a device on the table above you can use the calculator below. The energy used is just the power times the amount of time or number of usages. All you have to do is select the device and then enter the amount of hours the device was used for. Depending on the device selected you may enter the number of loads done or times it was used instead of the hours.
# +
import ipywidgets as widgets
from IPython.display import display, Math, Latex
import traitlets
from IPython.display import Markdown as md
import random
error = False
usage_list = [0.0, 1.5, 10.0, 3.0, 0.075, 2.3, 1.0, 0.12, 0.6, 0.04, 0.05, 0.4, 0.012, 0.15, 0.03, 0.02, 0.5, 5.0, 0.75, 1.08, 1.5, 0.05, 0.06, 0.015, 0.3, 0.112, 0.137, 0.034, 0.01]
output1 = widgets.HTML(
value='<font size="4"> Device: </font>',
)
output2 = widgets.HTML(
value='<font size="4"> Number of Hours: </font>',
)
calculation_output = widgets.HTML('')
error_output = widgets.HTML('')
device_choice = widgets.Dropdown(
options={' ':0,'Portable Heater': 1, 'Electric Furnace': 2, 'Air Conditioner': 3, 'Ceiling Fan': 4, 'Oven': 5, 'Microwave Oven': 6, 'Coffee Maker': 7, 'Dishwasher': 8, 'Toaster': 9, 'Refrigerator': 10, 'Plasma TV': 11, 'LCD TV': 12, 'Desktop Computer': 13, 'Laptop': 14, 'Radio': 15, 'Clothes Washer':16, 'Clothes Dryer': 17, 'Vacuum Cleaner': 18, 'Iron': 19, 'Hair Dryer': 20, 'Curling Iron': 21, 'Incandescent Bulb': 22, 'Compact Flourescent': 23, 'Halogen Bulb': 24, 'Xbox One': 25, 'Playstation 4': 26, 'Wii U': 27, 'Cellphone Charge': 28},
value=0,
description='',
)
hours_input = widgets.Text(
value='0',
placeholder='',
description='',
disabled=False
)
calculate_button = widgets.Button(
value=False,
description='Calculate',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Calculate the number of kWh',
continuous_update=True
)
device_choice.layout.width = '150px'
hours_input.layout.width = '100px'
device_widget = widgets.HBox(children=[output1, device_choice])
hours_widget = widgets.HBox(children=[output2, hours_input])
display_widget = widgets.VBox(children=[device_widget, hours_widget, calculate_button, calculation_output, error_output])
def calculate_output(change):
global error
global usage_list
calculation_output.value = ''
error_output.value = ''
error = False
check_error()
if(not(error)):
input_hours = float(hours_input.value)
kwh = usage_list[device_choice.value]*input_hours
kwh = round(kwh, 3)
calculation_output.value = '<font size="4"> ' + str(kwh) + ' kWh used.</font>'
def check_error():
global error
if(device_choice.value == 0):
error_output.value = '<font size="4"> ERROR: You have not selected a device.</font>'
error = True
else:
try:
input_hours = float(hours_input.value)
except ValueError:
error_output.value = '<font size="4"> ERROR: That is not a valid number. Please enter a valid number.</font>'
error = True
def check_value(change):
if(device_choice.value == 7):
output2.value='<font size="4"> Number of Brews: </font>'
elif(device_choice.value == 8):
output2.value='<font size="4"> Number of Loads: </font>'
elif(device_choice.value == 9):
output2.value='<font size="4"> Number of Uses: </font>'
elif(device_choice.value == 16):
output2.value='<font size="4"> Number of Loads: </font>'
else:
output2.value='<font size="4"> Number of Hours: </font>'
calculate_button.on_click(calculate_output)
device_choice.observe(check_value, names='value')
display_widget
# -
# ### Electrical Energy Monitoring
#
# To monitor your electrical energy use you would need to measure how many **Watts** are being used by your electrical devices over the time they are being used. Your local power company is able to monitor how much electrical energy you use from an electricity meter attached to the outside of your home. The electricity meter is monitoring the main electrical line running into your home so it can measure the electrical energy used by the entire home. Some examples of electricity meters that you may recognize are below. You may also notice on the images that the electricity meters measure the electrical energy in kWh that we learnt about above.
#
# <table>
# <tr>
# <td><img src="images/meter2.jpg" alt="1" width="300"/></td>
# <td><img src="images/meter.jpg" alt="2" width="300"/></td>
# <td><img src="images/meter3.jpg" alt="3" width="300"/></td>
# </tr>
# </table>
#
# There are also a variety of smaller electrical meters that can monitor the usage of individual devices and appliances. A few of these meters are below.
#
# <table>
# <tr>
# <td><img src="images/monitor2.png" alt="1" width="350"/></td>
# <td><img src="images/monitor1.jpg" alt="2" width="250"/></td>
# <td><img src="images/monitor3.jpg" alt="3" width="350"/></td>
# </tr>
# </table>
#
# You can use such devices to monitor the energy consumption of appliances in your house and potentially replace them to energy efficient appliances to save power.
# ## Circuit Diagrams
#
# Now that we have covered what electricity is, the various electrical devices and how it is monitored let's take a look at **circuit diagrams**. A circuit diagram is a graphical representation of an **electrical circuit**. An electrical circuit is a network of electrical devices connected together. All of the electronic devices in your home make up a very complicated electrical circuit that can be represented with a circuit diagram. We are just starting to learn about circuit diagrams so we will be using simple examples such a light bulb connected to a battery that you can see below.
#
# <img src="images/circuit1.png" alt="" width=500 align=middle>
#
# We will cover six different circuit components that can make up a circuit, namely switches, power sources, resistors, lights, motors and wires.
#
# 1. A **switch** can either be open or closed. If it is open the electricity can't flow through the swtich. If it is closed the electricity can flow through the switch. A switch can be thought of as a light switch. If the light switch is on the electricity is flowing and if it is off the electricity is not.
#
# 2. A **power source** is any type of device that provides power/electricity. The simple example of a power source we will use is a battery that provides electricity to electrical devices.
#
# 3. A **resistor** is a component that resists the flow of electricity when it flows through it. The resistor limits the flow of electricity in the circuit. Electrical energy will be lost as heat when electricity flows through a resistor. An example of needing to use a resistor would be when you have two devices in a circuit and one of the devices can only handle so much electricity. You would then need a resistor to limit the flow of electricity to the device.
#
# 4. A **light** will be some device that produces light. Electrical energy will be lost to this radiant energy as electricity powers the light. The simple example we will use is a light bulb/lamp that generates light.
#
# 5. A **motor** is a device that is used to convert electrical energy into mechanical energy. Motors are used in a variety of electrical devices ranging from a vehicle motor to a motor in a refrigerator.
#
# 6. A **wire** is a component that is conductive and allows electricity to flow through it. Wires are used to connect the other various components together.
#
#
# ### Units
#
# Before we continue on we need to cover some units that are commonly used for the components that make up a circuit. We have already covered **Watts** and **Joules**, we will also cover **Volts**, **Amperes**, **Ohms** and **Coulombs**.
#
# A **coulomb (C)** is defined as a of charge. One coulomb is the amount of charge contained in $6.24 \times 10^{18}$ electrons or 6.24 quintillion electrons.
#
# An **ampere (A)** is a unit used to measure electrical current (electricity). It is a measurement of the number of electrons flowing through a circuit. It is defined as one Coulomb of charge passing a certain point in a circuit in one second.
#
# A **volt (V)** is a unit of electric potential or electromotive force that causes the electrons to flow. A volt can be defined as the potential difference between two points. In terms of **Joules** and **Coulombs** one volt will impart one joule of energy per coulomb of charge that passes through the two points. The power sources in the circuit will have units of volts as they create the flow of electrons.
#
# An **ohm ($\Omega$)** is a unit of electrical resistance that can be defined as one volt per ampere ($\Omega = V/A)$ . The higher the resistance in ohms the lower the flow of electricity. As you can guess the resistors have units of ohms since they resist the flow of electricity. Each device in a circuit will also have some resistance to them that slows the flow of the electrons. For example the resistance in a lamp causes it to lose heat energy in addition to the energy emitted as light.
# ### Symbols
#
# Each of the components in a circuit are represented by specific symbols as listed below.
#
# 1. The symbol commonly used for a switch is below. A circuit is considered a closed circuit when all the switches in it are closed and electricity is flowing. When a switch is open in a circuit it is considered an open circuit.
#
# <table>
# <tr>
# <td>
# <div style="width:150 px; font-size:120%; text-align:center;">
# <img src="images/switch.png" alt="1" width="150" style="padding-bottom:0.5em"/>
# Open</div>
# </td>
# <td>
# <div style="width:150 px; font-size:120%; text-align:center;">
# <img src="images/switchclosed.png" alt="2" width="150" style="padding-bottom:0.5em"/>
# Closed</div>
# </td>
# </tr>
# </table>
#
# 2. The symbol for power sources of a single cell battery and double cell battery are below. You can see on the single cell battery there is a positive(+) and negative(-) sign on the two sides. The two signs represent the two terminals on the battery where the long line is the positive terminal and short line is the negative terminal. Electricity flows out of the negative terminal into the circuit and comes back in the positive terminal.
#
# <table>
# <tr>
# <td>
# <div style="width:150 px; font-size:120%; text-align:center;">
# <img src="images/cell.png" alt="1" width="150" style="padding-bottom:0.5em"/>
# 1V Battery</div>
# </td>
# <td>
# <div style="width:150 px; font-size:120%; text-align:center;">
# <img src="images/battery(2V).png" alt="2" width="150" style="padding-bottom:0.5em"/>
# 2V Battery</div>
# </td>
# </tr>
# </table>
#
# 3. The symbol for a resistor is below. There are two commonly used symbols to represent resistors that are below too.
#
# <table>
# <tr>
# <td>
# <div style="width:150 px; font-size:120%; text-align:center;">
# <img src="images/resistor.png" alt="1" width="150" style="padding-bottom:0.5em"/>
# Resistor</div>
# </td>
# <td>
# <div style="width:150 px; font-size:120%; text-align:center;">
# <img src="images/resistor(1ohm).png" alt="2" width="150" style="padding-bottom:0.5em"/>
# 1 Ohm Resistor</div>
# </td>
# <td>
# <div style="width:150 px; font-size:120%; text-align:center;">
# <img src="images/resistor(2ohm).png" alt="2" width="150" style="padding-bottom:0.5em"/>
# 2 Ohm Resistor</div>
# </td>
# </tr>
# </table>
#
# There is also a symbol for a variable resistor. This resistor allows the resistance to be adjusted.
#
# <table>
# <tr>
# <td>
# <div style="width:150 px; font-size:120%; text-align:center;">
# <img src="images/variableresistor.png" alt="1" width="150" style="padding-bottom:0.5em"/>
# Variable Resistor</div>
# </td>
# </tr>
# </table>
#
# 4. The symbol for a light bulb/lamp is below.
#
# <table>
# <tr>
# <td>
# <div style="width:150 px; font-size:120%; text-align:center;">
# <img src="images/lamp(1V).png" alt="1" width="150" style="padding-bottom:0.5em"/>
# 1V Lamp</div>
# </td>
# <td>
# <div style="width:150 px; font-size:120%; text-align:center;">
# <img src="images/lamp(2V).png" alt="2" width="150" style="padding-bottom:0.5em"/>
# 2V Lamp</div>
# </td>
# </tr>
# </table>
#
# 5. The symbol for a motor is below.
#
# <table>
# <tr>
# <td>
# <div style="width:150 px; font-size:120%; text-align:center;">
# <img src="images/motor.png" alt="1" width="150" style="padding-bottom:0.5em"/>
# Motor</div>
# </td>
# </tr>
# </table>
#
# 6. The symbol for wires are below.
#
# <table>
# <tr>
# <td>
# <div style="width:150 px; font-size:120%; text-align:center;">
# <img src="images/wire3.png" alt="1" width="150" style="padding-bottom:0.5em"/>
# Straight Wire</div>
# </td>
# <td>
# <div style="width:150 px; font-size:120%; text-align:center;">
# <img src="images/wire1.png" alt="2" width="150" style="padding-bottom:0.5em"/>
# Corner</div>
# </td>
# <td>
# <div style="width:150 px; font-size:120%; text-align:center;">
# <img src="images/wire2.png" alt="2" width="150" style="padding-bottom:0.5em"/>
# Link</div>
# </td>
# </tr>
# </table>
# ### Creating the Circuit Diagram
#
# When you draw a circuit there are two vital components that you will need to make it work.
#
# 1. A power source to provide electricity to the devices in the circuit
# 2. Wires to connect all the components together and allow electricity to flow through the circuit.
#
# The more devices you have connected in your circuit the greater the power sources you will need. This can be achieved by having multiple batteries in a row. For example most TV remotes require at least two batteries in them to give them enough electrical power.
#
# Please feel free to play around with the circuit builder program below and build and run your own circuit. The program is from the website http://www.cleo.net.uk/consultants_resources/science/circuitWorld/circuitworld.html and will require you to have Adobe Flash Player download and enabled in your web browser.
#
# #### Program Instructions
#
# Using this program is relatively simple. The various circuit components/devices we have covered are on the left side and can be clicked on and moved over to the grid to the right.
#
# Each of the devices and components will need to be connected together by the wires.
#
# Once everything has been connected together you can hit the run button on the bottom left to run the circuit. If the devices don't seem to be working, like a bulb not lighting up you may need to add more batteries to the circuit to provide more electricity.
#
# There are other devices and components on the left that we have not covered. They are an ammeter and a buzzer. An ammeter measures the current in a circuit in **amps** and a buzzer just buzzes as expected. Feel free to use those components and devices in the circuit.
from IPython.core.display import HTML
display(HTML('<embed src="images/circuitworld.swf" width="825" height="600"></embed></object>'))
display(HTML('© 2019 CLEO (Cumbria and Lancashire Education Online)'))
# ## Conclusion
#
# In this notebook we have covered a variety of topics related to electricity monitoring and consumption. We looked at what electricity is, the units that it is measured in and the devices used to measure and monitor its consumption. The efficiency of the various electronic devices and appliances used in our homes were looked into to see how much electricity they actually use. We also looked at what circuit diagrams are, how you create them and the various components that a used in them.
#
# We have covered a lot of information in this notebook related to electricity and the various devices and appliances that use it but these topics are quite complex so we weren't able to cover everything. I encourage the reader to continue looking into these topics as they are both unique and interesting.
# [](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| _sources/curriculum-notebooks/Science/ElectricityConsumptionMonitoring/electricity-consumption-monitoring.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="CQhd0TTQCMeh"
# ## Dataloaders for Machine Learning (Tensorflow & PyTorch)
#
# This tutorial acts as a step by step guide for fetching, preprocessing, storing and loading the [MS-COCO](http://cocodataset.org/#home) dataset for image captioning using deep learning. We have chosen **image captioning** for this tutorial not by accident. For such an application, the dataset required will have both fixed shape (image) and variably shaped (caption because it's sequence of natural language) data. This diversity should help the user to get a mental model about how flexible and easy is to plug Hangar to the existing workflow.
#
# You will use the MS-COCO dataset to train our model. The dataset contains over 82,000 images, each of which has at least 5 different caption annotations.
#
# This tutorial assumes you have downloaded and extracted the [MS-COCO dataset](http://cocodataset.org/#home) in the current directory. If you haven't yet, shell commands below should help you do it (beware, it's about 14 GB data). If you are on Windows, please find the equivalent commands to get the dataset downloaded.
#
#
# ```bash
# wget http://images.cocodataset.org/zips/train2014.zip
# unzip train2014.zip
# # # rm train2014.zip
# wget http://images.cocodataset.org/annotations/annotations_trainval2014.zip
# unzip annotations_trainval2014.zip
# # # rm annotations_trainval2014.zip
# ```
#
# Let's install the required packages in our environment. We will be using Tensorflow 1.14 in this tutorial but it should work in all the Tensorflow versions starting from 1.12. But do let us know if you face any hiccups. Install below-given packages before continue. Apart from Tensorflow and Hangar, we use [SpaCy](https://spacy.io/) for pre-processing the captions. SpaCy is probably the most widely used natural language toolkit now.
#
# ```bash
# tensorflow==1.14.0
# hangar==3.0
# spacy==2.1.8
# ```
#
# One more thing before jumping into the tutorial: we need to download the SpaCy English model `en_core_web_md` which cannot be dynamically loaded. Which means that it must be downloaded with the below command outside this runtime and should reload this runtime.
#
# ```bash
# python -m spacy download en_core_web_md
# ```
#
# Once all the dependencies are installed and loaded, we can start building our hangar repository.
#
#
# ### Hangar Repository creation and arrayset init
# We will create a repository and initialize one arrayset named `images` now for a quick demo of how Tensorflow dataloader work. Then we wipe the current repository and create new arraysets for later portions.
# + colab={} colab_type="code" id="HGXOwLJ3IWPq"
repo_path = 'hangar_repo'
username = 'hhsecond'
email = '<EMAIL>'
img_shape = (299, 299, 3)
image_dir = '/content/drive/My Drive/train2014'
annotation_file = ''
import logging
logging.getLogger("tensorflow").setLevel(logging.ERROR)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="fHehOEhwCMej" outputId="210f9b87-9c59-49ea-fd31-92ba18d140b3"
import os
from hangar import Repository
import tensorflow as tf
import numpy as np
tf.compat.v1.enable_eager_execution()
if not os.path.isdir(repo_path):
os.mkdir(repo_path)
repo = Repository(repo_path)
repo.init(user_name=username, user_email=email, remove_old=True)
co = repo.checkout(write=True)
images_aset = co.arraysets.init_arrayset('images', shape=img_shape, dtype=np.uint8, named_samples=False)
co.commit('arrayset init')
co.close()
# + [markdown] colab_type="text" id="QENDY8LvGGhb"
# ### Add sample images
# Here we add few images to the repository and show how we can load this data as Tensorflow dataloader. We use the idea we learn here in the later portions to build a fully fledged training loop.
# + colab={} colab_type="code" id="g61tY81hHr8c"
import os
from PIL import Image
co = repo.checkout(write=True)
images_aset = co.arraysets['images']
try:
for i, file in enumerate(os.listdir(image_dir)):
pil_img = Image.open(os.path.join(image_dir, file))
if pil_img.mode == 'L':
pil_img = pil_img.convert('RGB')
img = pil_img.resize(img_shape[:-1])
img = np.array(img)
images_aset[i] = img
if i != 0 and i % 2 == 0: # stopping at 2th image
break
except Exception as e:
print('Exception', e)
co.close()
raise e
co.commit('added image')
co.close()
# + [markdown] colab_type="text" id="dvFci5P8Lm7C"
# ### Let's make a Tensorflow dataloader
# Hangar provides `make_tf_dataset` & `make_torch_dataset` for creating Tensorflow & PyTorch datasets from Hangar arraysets. You can read more about it in the [documentation](https://hangar-py.readthedocs.io/en/latest/api.html#ml-framework-dataloaders). Next we'll make a Tensorflow dataset and loop over it to make sure we have got a proper Tensorflow dataset.
# + colab={} colab_type="code" id="Sc7XGXMVLuDO"
from hangar import make_tf_dataset
# + colab={"base_uri": "https://localhost:8080/", "height": 357} colab_type="code" id="tb5g_JrJVbqT" outputId="a8fe4e7d-243d-4dae-dc94-66364342a913"
from matplotlib.pyplot import imshow
co = repo.checkout()
image_aset = co.arraysets['images']
dataset = make_tf_dataset(image_aset)
for image in dataset:
imshow(image[0].numpy())
break
# + [markdown] colab_type="text" id="FTArZhtZfg7S"
# ### New arraysets
#
# For our example, we would need two arraysets. One for the image and another one for captions. Let's wipe our existing repository (`remove_old` argument in `repo.init` does this) and create these arraysets
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="ISMdkXtYHg2c" outputId="0e18d9d3-1a4f-4f75-d388-c8c0c316f69b"
repo = Repository(repo_path)
repo.init(user_name=username, user_email=email, remove_old=True)
co = repo.checkout(write=True)
images_aset = co.arraysets.init_arrayset('images', shape=img_shape, dtype=np.uint8, named_samples=False)
captions_aset = co.arraysets.init_arrayset(name='captions', shape=(60,), dtype=np.float, variable_shape=True, named_samples=False)
co.commit('arrayset init')
co.close()
# + [markdown] colab_type="text" id="z_fUUIpKCMen"
# ### Store image and captions to Hangar repo
# Each image will be converted to RGB channels with dtype `uint8`. Each caption will be prepended with `START` token and ended with `END` token before converting them to floats. We have another preprocessing stage for images later.
#
# We'll start with loading the caption file:
# + colab={} colab_type="code" id="VlX-su-gCMep"
import json
annotation_file = 'annotations/captions_train2014.json'
with open(annotation_file, 'r') as f:
annotations = json.load(f)
# + colab={} colab_type="code" id="UMcYzkWgCMes"
import spacy
# if you have installed spacy and the model in the same notebook session, you might need to restart the runtime to get it into the scope
nlp = spacy.load('en_core_web_md')
# + colab={} colab_type="code" id="wxpbxEvmCMev"
def sent2index(sent):
"""
Convert sentence to an array of indices using SpaCy
"""
ids = []
doc = nlp(sent)
for token in doc:
if token.has_vector:
id = nlp.vocab.vectors.key2row[token.norm]
else:
id = sent2index('UNK')[0]
ids.append(id)
return ids
# + [markdown] colab_type="text" id="RIvqFIHUCMey"
# ### Save the data to Hangar
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="__I8ntp3CMez" outputId="287685d1-2e7c-4d3f-94b7-87db73f966e3"
import os
from tqdm import tqdm
all_captions = []
all_img_name_vector = []
limit = 100 # if you are not planning to save the whole dataset to Hangar. Zero means whole dataset
co = repo.checkout(write=True)
images_aset = co.arraysets['images']
captions_aset = co.arraysets['captions']
all_files = set(os.listdir(image_dir))
i = 0
with images_aset, captions_aset:
for annot in tqdm(annotations['annotations']):
if limit and i > limit:
continue
image_id = annot['image_id']
assumed_image_paths = 'COCO_train2014_' + '%012d.jpg' % (image_id)
if assumed_image_paths not in all_files:
continue
img_path = os.path.join(image_dir, assumed_image_paths)
img = Image.open(img_path)
if img.mode == 'L':
img = img.convert('RGB')
img = img.resize(img_shape[:-1])
img = np.array(img)
cap = sent2index('sos ' + annot['caption'] + ' eos')
cap = np.array(cap, dtype=np.float)
co.arraysets.multi_add({
images_aset.name: img,
captions_aset.name: cap
})
if i % 1000 == 0 and i != 0:
if co.diff.status() == 'DIRTY':
co.commit(f'Added batch {i}')
i += 1
co.commit('Added full data')
co.close()
# + [markdown] colab_type="text" id="gXvSa2iCCMe2"
# ### Preprocess Images
#
# Our image captioning network requires a pre-processed input. We use transfer learning for this with a pretrained InceptionV3 network which is available in Keras. But we have a problem. Preprocessing is costly and we don't want to do it all the time. Since Hangar is flexible enough to create multiple arraysets and let you call the group of arrayset as a `dataset`, it is quite easy to do make a new arrayset for the processed image and we don't have to do the preprocessing online but keep a preprocessed image in the new arrayset in the same repository with the same key. Which means, we have three arraysets in our repository (all three has different samples with the same name):
# - `images`
# - `captions`
# - `processed_images`
#
# Although we need only the `processed_images` for the network, we still keep the bare image in the repository in case we need to look into it later or if we decided to do some other preprocessing instead of InceptionV3 (it is always advised to keep the source truth with you).
#
#
# + colab={} colab_type="code" id="QBGCS_ceCMe2"
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
image_model = tf.keras.applications.InceptionV3(include_top=False, weights='imagenet')
new_input = image_model.input
hidden_layer = image_model.layers[-1].output
image_features_extract_model = tf.keras.Model(new_input, hidden_layer)
def process_image(img):
img = tf.keras.applications.inception_v3.preprocess_input(img)
img = np.expand_dims(img, axis=0)
img = image_features_extract_model(img)
return tf.reshape(img, (-1, img.shape[3]))
# + colab={} colab_type="code" id="ANFPvYByCMe5"
from hangar import Repository
import numpy as np
repo_path = 'hangar_repo'
repo = Repository(repo_path)
co = repo.checkout(write=True)
images = co.arraysets['images']
sample_name = list(images.keys())[0]
prototype = process_image(images[sample_name]).numpy()
pimages = co.arraysets.init_arrayset('processed_images', prototype=prototype)
# + [markdown] colab_type="text" id="jWN6AxiHCMe7"
# #### Saving the pre-processed images to the new arrayset
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="HdFxmi5ECMe8" outputId="38dddea0-64f8-47cf-fc9d-6b14a6140135"
from tqdm import tqdm
with pimages:
for key in tqdm(images):
pimages[key] = process_image(images[key]).numpy()
co.commit('processed image saved')
co.close()
# + [markdown] colab_type="text" id="zacZutpTCMe_"
# ### Dataloaders for training
# We are using Tensorflow to build the network but how do we load this data from Hangar repository to Tensorflow?
#
# A naive option would be to run through the samples and load the numpy arrays and pass that to the `sess.run` of Tensorflow. But that would be quite inefficient. Tensorflow uses multiple threads to load the data in memory and its dataloaders can prefetch the data before-hand so that your training loop doesn't get blocked while loading the data. Also, Tensoflow dataloaders brings batching, shuffling, etc. to the table prebuilt. That's cool but how to load data from Hangar to Tensorflow using TF dataset? Well, we have `make_tf_dataset` which accepts the list of arraysets as a parameter and returns a TF dataset object.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="gcKsE3d4CMfA" outputId="a42c5c84-e62f-4178-cc3a-175dac08aa7c"
from hangar import make_tf_dataset
co = repo.checkout() # we don't need write checkout here
# + colab={"base_uri": "https://localhost:8080/", "height": 105} colab_type="code" id="TybRGUGaCMfC" outputId="8e75b46d-f8da-4dd3-c607-1174b23a15a0"
BATCH_SIZE = 1
EPOCHS = 2
embedding_dim = 256
units = 512
vocab_size = len(nlp.vocab.vectors.key2row)
num_steps = 50
captions_dset = co.arraysets['captions']
pimages_dset = co.arraysets['processed_images']
dataset = make_tf_dataset([pimages_dset, captions_dset], shuffle=True)
# + [markdown] colab_type="text" id="27mQc673CMfF"
# ### Padded Batching
#
# Batching needs a bit more explanation here since the dataset does not just consist of fixed shaped data. We have two dataset in which one is for captions. As you know captions are sequences which can be variably shaped. So instead of using `dataset.batch` we need to use `dataset.padded_batch` which takes care of padding the tensors with the longest value in each dimension for each batch. This `padded_batch` needs the shape by which the user needs the batch to be padded. Unless you need customization, you can use the shape stored in the `dataset` object by `make_tf_dataset` function.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="8tpHg3w2CMfF" outputId="e2145382-c73b-4acf-9076-40ff64554ade"
output_shapes = tf.compat.v1.data.get_output_shapes(dataset)
output_shapes
# + colab={} colab_type="code" id="imMQrtn7CMfI"
dataset = dataset.padded_batch(BATCH_SIZE, padded_shapes=output_shapes)
# + [markdown] colab_type="text" id="tY6Z7y8TCMfO"
# ### Build the network
#
# Since we have the dataloaders ready, we can now build the network for image captioning and start training. Rest of this tutorial is a copy of an official Tensorflow tutorial which is available at https://tensorflow.org/beta/tutorials/text/image_captioning. The content of Tensorflow tutorial page is licensed under the Creative Commons Attribution 4.0 License, and code samples are licensed under the Apache 2.0 License.
# Access date: Aug 20 2019
#
#
# In this example, you extract the features from the lower convolutional layer of InceptionV3 giving us a vector of shape (8, 8, 2048) and quash that to a shape of (64, 2048). We have stored the result of this already to our Hangar repo. This vector is then passed through the CNN Encoder (which consists of a single Fully connected layer). The RNN (here GRU) attends over the image to predict the next word.
# + colab={} colab_type="code" id="6Kc-yZ0iCMfO"
class BahdanauAttention(tf.keras.Model):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, features, hidden):
# features(CNN_encoder output) shape == (batch_size, 64, embedding_dim)
# hidden shape == (batch_size, hidden_size)
# hidden_with_time_axis shape == (batch_size, 1, hidden_size)
hidden_with_time_axis = tf.expand_dims(hidden, 1)
# score shape == (batch_size, 64, hidden_size)
score = tf.nn.tanh(self.W1(features) + self.W2(hidden_with_time_axis))
# attention_weights shape == (batch_size, 64, 1)
# you get 1 at the last axis because you are applying score to self.V
attention_weights = tf.nn.softmax(self.V(score), axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * features
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
# + colab={} colab_type="code" id="up0nVnIZO2_c"
class CNN_Encoder(tf.keras.Model):
# Since you have already extracted the features and dumped it using pickle
# This encoder passes those features through a Fully connected layer
def __init__(self, embedding_dim):
super(CNN_Encoder, self).__init__()
# shape after fc == (batch_size, 64, embedding_dim)
self.fc = tf.keras.layers.Dense(embedding_dim)
def call(self, x):
x = self.fc(x)
x = tf.nn.relu(x)
return x
# + colab={} colab_type="code" id="4qAEbanRO77k"
class RNN_Decoder(tf.keras.Model):
def __init__(self, embedding_dim, units, vocab_size):
super(RNN_Decoder, self).__init__()
self.units = units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc1 = tf.keras.layers.Dense(self.units)
self.fc2 = tf.keras.layers.Dense(vocab_size)
self.attention = BahdanauAttention(self.units)
def call(self, x, features, hidden):
# defining attention as a separate model
context_vector, attention_weights = self.attention(features, hidden)
# x shape after passing through embedding == (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
output, state = self.gru(x)
# shape == (batch_size, max_length, hidden_size)
x = self.fc1(output)
# x shape == (batch_size * max_length, hidden_size)
x = tf.reshape(x, (-1, x.shape[2]))
# output shape == (batch_size * max_length, vocab)
x = self.fc2(x)
return x, state, attention_weights
def reset_state(self, batch_size):
return tf.zeros((batch_size, self.units))
# + colab={} colab_type="code" id="9ZlfcS5VO_yA"
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
# + colab={} colab_type="code" id="s5kEPFlZCMfR"
@tf.function
def train_step(img_tensor, target):
loss = 0
# initializing the hidden state for each batch
# because the captions are not related from image to image
hidden = decoder.reset_state(batch_size=target.shape[0])
# TODO: do this dynamically: '<start>' == 2
dec_input = tf.expand_dims([2] * BATCH_SIZE, 1)
with tf.GradientTape() as tape:
features = encoder(img_tensor)
for i in range(1, target.shape[1]):
# passing the features through the decoder
predictions, hidden, _ = decoder(dec_input, features, hidden)
loss += loss_function(target[:, i], predictions)
# using teacher forcing
dec_input = tf.expand_dims(target[:, i], 1)
total_loss = (loss / int(target.shape[1]))
trainable_variables = encoder.trainable_variables + decoder.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
optimizer.apply_gradients(zip(gradients, trainable_variables))
return loss, total_loss
# + colab={} colab_type="code" id="cQeg3v4KCMfU"
encoder = CNN_Encoder(embedding_dim)
decoder = RNN_Decoder(embedding_dim, units, vocab_size)
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
# + [markdown] colab_type="text" id="pyYHHBHVCMfW"
# ### Training
#
# Here we consume the dataset we have made before by looping over it. The dataset returns the image tensor and target tensor (captions) which we will pass to `train_step` for training the network.
#
# The encoder output, hidden state (initialized to 0) and the decoder input (which is the start token) is passed to the decoder. The decoder returns the predictions and the decoder hidden state. The decoder hidden state is then passed back into the model and the predictions are used to calculate the loss. Use teacher forcing to decide the next input to the decoder. Teacher forcing is the technique where the target word is passed as the next input to the decoder. The final step is to calculate the gradients and apply it to the optimizer and backpropagate.
# + colab={} colab_type="code" id="l4gg61xSCMfX"
import time
loss_plot = []
for epoch in range(0, EPOCHS):
start = time.time()
total_loss = 0
for (batch, (img_tensor, target)) in enumerate(dataset):
batch_loss, t_loss = train_step(img_tensor, target)
total_loss += t_loss
if batch % 1 == 0:
print('Epoch {} Batch {} Loss {:.4f}'.format(
epoch + 1, batch, batch_loss.numpy() / int(target.shape[1])))
# storing the epoch and loss value to plot later
loss_plot.append(total_loss / num_steps)
print('Epoch {} Loss {:.6f}'.format(epoch + 1,
total_loss / num_steps))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
# + [markdown] colab_type="text" id="J7JPiJjtCMfb"
# #### Visualize the loss
# + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="M0icezYgCMfd" outputId="5c2bf016-120c-4ca9-f7d2-cef69eb216a0"
import matplotlib.pyplot as plt
# Below loss curve is not the actual loss image we have got
# while training and kept it here only as a reference
plt.plot(loss_plot)
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Loss Plot')
plt.show()
| docs/Tutorial-Dataloader.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import emat
import numpy
from matplotlib import pyplot as plt
from emat.util.distributions import pert, triangle, uniform, get_bounds
# + raw_mimetype="text/restructuredtext" active=""
# This page reviews some common continuous distributions used for exploratory and risk analysis.
# EMAT can also use any named continuous distribution from the :any:`scipy.stats` module.
# -
# ## Uniform Distribution
#
# The uniform distribution is defined by a probability density function that is a rectangle.
# It is parameterized using two parameters (minimum, maximum). It is a simple
# distribution that is easy to understand and explain, and is often assumed as the
# implied default distribution for exploratory analysis.
y = """---
scope:
name: demonstration
inputs:
uncertain_variable_name:
ptype: uncertainty
desc: Slightly More Verbose Description
default: 4
min: 1
max: 4
dist: uniform
dtype: float
outputs:
performance_measure_name:
kind: maximize
...
"""
s = emat.Scope('t.yaml', scope_def=y)
bounds = (0,5)
x = numpy.linspace(*bounds)
y = s['uncertain_variable_name'].dist.pdf(x)
_=plt.plot(x,y)
# It is also valid to include the `min` and `max` values under the `dist` key, instead of
# as top level keys for the parameter definition.
y = """---
scope:
name: demonstration
inputs:
uncertain_variable_name:
ptype: uncertainty
desc: Slightly More Verbose Description
default: 4
dist:
name: uniform
min: 1
max: 4
dtype: float
outputs:
performance_measure_name:
kind: maximize
...
"""
s = emat.Scope('t.yaml', scope_def=y)
bounds = (0,5)
x = numpy.linspace(*bounds)
y = s['uncertain_variable_name'].dist.pdf(x)
_=plt.plot(x,y)
# ## Triangle Distribution
# The triangle distribution is defined by a probability density function that is a triangle.
# It is parameterized using three parameters (minimum, peak, maximum). It is a simple
# distribution that is easy to understand and explain, and unlike the uniform distribution,
# it allow more likelihood to be directed towards some particular value.
x = numpy.linspace(0,5)
plt.plot(x, triangle(lower_bound=0, upper_bound=5, peak=0.0).pdf(x), label='Peak=0.0')
plt.plot(x, triangle(lower_bound=0, upper_bound=5, peak=0.5).pdf(x), label='Peak=0.5')
plt.plot(x, triangle(lower_bound=0, upper_bound=5, peak=1.0).pdf(x), label='Peak=1.0')
plt.plot(x, triangle(lower_bound=0, upper_bound=5, peak=2.5).pdf(x), label='Peak=2.5')
_=plt.legend()
# + raw_mimetype="text/restructuredtext" active=""
# .. autofunction:: emat.util.distributions.triangle
# -
y = """---
scope:
name: demonstration
inputs:
uncertain_variable_name:
ptype: uncertainty
desc: Slightly More Verbose Description
default: 4
min: 0
max: 5
dist:
name: triangle
peak: 4
outputs:
performance_measure_name:
kind: maximize
...
"""
s = emat.Scope('t.yaml', scope_def=y)
bounds = get_bounds(s['uncertain_variable_name'])
x = numpy.linspace(*bounds)
y = s['uncertain_variable_name'].dist.pdf(x)
_=plt.plot(x,y)
# It is also valid to include the `min` and `max` values under the `dist` key, instead of
# as top level keys for the parameter definition.
y = """---
scope:
name: demonstration
inputs:
uncertain_variable_name:
ptype: uncertainty
desc: Slightly More Verbose Description
default: 4
dist:
name: triangle
min: 0
peak: 4
max: 5
outputs:
performance_measure_name:
kind: maximize
...
"""
s = emat.Scope('t.yaml', scope_def=y)
bounds = get_bounds(s['uncertain_variable_name'])
x = numpy.linspace(*bounds)
y = s['uncertain_variable_name'].dist.pdf(x)
_=plt.plot(x,y)
# ## PERT Distribution
#
# The PERT distrubution ("PERT" is an acronym for "project evaluation and review techniques")
# is a generally bell-shaped curve that, unlike the normal distribution, has finite minimum and
# maximum values. It can be parameterized similar to the triangular distribution, using
# three parameters (minimum, peak, maximum). This allows a skew to be introduced, by setting
# the peak value to be other-than the midpoint between maximum and minimum values.
plt.plot(x, pert(lower_bound=0, upper_bound=5, peak=0.0).pdf(x), label='Peak=0.0')
plt.plot(x, pert(lower_bound=0, upper_bound=5, peak=0.5).pdf(x), label='Peak=0.5')
plt.plot(x, pert(lower_bound=0, upper_bound=5, peak=1.0).pdf(x), label='Peak=1.0')
plt.plot(x, pert(lower_bound=0, upper_bound=5, peak=2.5).pdf(x), label='Peak=2.5')
_=plt.legend()
# The relative peakiness (i.e., kurtosis) of the distribution can be controlled
# using the gamma parameter. The default value of gamma for a PERT distrubution is 4.0,
# but other positive numbers can be used as well, with
# higher numbers for a distribution that more favors outcomes
# near the peak, or smaller numbers for a distribution that gives less pronounced
# weight to value near the peak, and relatively more weight to the tails. In the limit,
# setting gamma to zero results in a uniform distribution.
plt.plot(x, pert(lower_bound=0, upper_bound=5, gamma=1).pdf(x), label='gamma=1')
plt.plot(x, pert(lower_bound=0, upper_bound=5, gamma=2).pdf(x), label='gamma=2')
plt.plot(x, pert(lower_bound=0, upper_bound=5, gamma=3).pdf(x), label='gamma=3')
plt.plot(x, pert(lower_bound=0, upper_bound=5, gamma=4).pdf(x), label='gamma=4', lw=3.0)
plt.plot(x, pert(lower_bound=0, upper_bound=5, gamma=5).pdf(x), label='gamma=5')
plt.plot(x, pert(lower_bound=0, upper_bound=5, gamma=10).pdf(x), label='gamma=10')
_=plt.legend()
# + raw_mimetype="text/restructuredtext" active=""
# .. autofunction:: emat.util.distributions.pert
# -
# The PERT distribution can be indicated in a yaml scope file using the name "pert",
# with optional values for other named arguments outlined in the function docstring
# shown above.
y = """---
scope:
name: demonstration
inputs:
uncertain_variable_name:
ptype: uncertainty
desc: Slightly More Verbose Description
default: 1.0
min: 0
max: 5
dist:
name: pert
peak: 4
gamma: 3
outputs:
performance_measure_name:
kind: maximize
...
"""
s = emat.Scope('t.yaml', scope_def=y)
bounds = get_bounds(s['uncertain_variable_name'])
x = numpy.linspace(*bounds)
y = s['uncertain_variable_name'].dist.pdf(x)
_=plt.plot(x,y)
# It is also valid to include the `min` and `max` values under the `dist` key, instead of
# as top level keys for the parameter definition.
y = """---
scope:
name: demonstration
inputs:
uncertain_variable_name:
ptype: uncertainty
desc: Slightly More Verbose Description
default: 1.0
dist:
name: pert
min: 0
max: 5
peak: 4
gamma: 3
outputs:
performance_measure_name:
kind: maximize
...
"""
s = emat.Scope('t.yaml', scope_def=y)
bounds = get_bounds(s['uncertain_variable_name'])
x = numpy.linspace(*bounds)
y = s['uncertain_variable_name'].dist.pdf(x)
_=plt.plot(x,y)
# ## Other Distributions
# + raw_mimetype="text/restructuredtext" active=""
# It is possible to use any other continuous distribution provided in the :any:`scipy.stats` module.
# As a demonstration, below we define a trapezoidal distribution for an uncertainty. Instead of
# using the more intuitively named keys shown above, it is necessary to fall back to the standard
# :any:`scipy.stats` names for each of the distribution parameters, and they must all be defined within
# the `dist` key, which may be less intuitive than the suggested distributions above. For example,
# note in the example below that the upper bound of the distribution is implictly set to 7 based
# on the parameters, and that upper bound is not explicitly identified in the yaml file.
# -
y = """---
scope:
name: demonstration
inputs:
uncertain_variable_name:
ptype: uncertainty
desc: Slightly More Verbose Description
default: 1.0
dist:
name: trapz
c: 0.2
d: 0.5
loc: 2
scale: 5
outputs:
performance_measure_name:
kind: maximize
...
"""
s = emat.Scope('t.yaml', scope_def=y)
bounds = get_bounds(s['uncertain_variable_name'])
x = numpy.linspace(*bounds)
y = s['uncertain_variable_name'].dist.pdf(x)
_=plt.plot(x,y)
| docs/source/emat.scope/continuous_distributions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Import required libraries
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
from datetime import datetime
from numpy import asarray
from numpy import save
from numpy import load
import ee, time
ee.Initialize()
# Declare the geographic to be downloaded
poly = ee.Geometry.Polygon(
[[-100.47791937504411,20.74346583603706],
[-100.4895917252222,20.713284159358064],
[-100.49508459795548,20.694658420906517],
[-100.50950337655922,20.688877557615758],
[-100.50332393344829,20.590569133640255],
[-100.48684533170285,20.573213991681257],
[-100.46418724893185,20.56164279811083],
[-100.47860603820642,20.533996968077325],
[-100.47520705713644,20.520338992188012],
[-100.43960678997206,20.499229802112993],
[-100.42561713526564,20.50461592451268],
[-100.40394327919928,20.482630683825533],
[-100.386863884725,20.481183476053257],
[-100.38609144982618,20.497905928329736],
[-100.35725388110177,20.50498026369214],
[-100.31966205174498,20.511893506441073],
[-100.27912532614545,20.530924710995205],
[-100.24702637427141,20.546516613350068],
[-100.23466742319654,20.559213994044974],
[-100.2342545745642,20.582246761632195],
[-100.18996831855864,20.58321094995086],
[-100.15426468975389,20.613097183954853],
[-100.22773183008509,20.665001209044437],
[-100.35716596338435,20.672067765281636],
[-100.3757043918599,20.680097577165935],
[-100.39561603676037,20.671104158200457],
[-100.42033393974086,20.68555760842041],
[-100.41861741654286,20.709001384894357],
[-100.41914468435314,20.74526087901061],
[-100.39133704734316,20.781855674344172],
[-100.37657497118374,20.81459086744436],
[-100.39545669532087,20.839939708856246],
[-100.45004204144581,20.83833548324541],
[-100.48299924265586,20.79790323713937],
[-100.47791937504411,20.74346583603706]])
# Load the array with desired dates
dates = load('../data/datesSAR.npy')
# Export the SAR images to google Drive
# +
today = datetime.today().strftime('%Y-%m-%d')
for date in dates:
#print('Generating data for ' + date)
image = ee.ImageCollection('COPERNICUS/S1_GRD')\
.filterBounds(poly)\
.filterDate(ee.Date(date), ee.Date(today))\
.filter(ee.Filter.listContains('transmitterReceiverPolarisation', 'VV'))\
.filter(ee.Filter.eq('instrumentMode', 'IW'))\
.select('VV')\
.filter(ee.Filter.eq('resolution_meters', 10))\
.filter(ee.Filter.eq('orbitProperties_pass', 'DESCENDING'))\
.first()\
.clip(poly)
gdexport = ee.batch.Export.image.toDrive(
image,
description = 'Qro_' + date,
folder = 'EarthEngineImages',
maxPixels = 1540907088,
scale = 10,
region = poly
)
gdexport.start()
time.sleep(5)
print('Data export tasks in Queue')
| code/.ipynb_checkpoints/CollectImages-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Jupyter Notebooks (libros de notas o cuadernos Jupyter)
# ==================
#
# * Puedes ejecutar un `Cell` (celda) pulsando ``[shift] + [Enter]`` o presionando el botón `Play` en la barra de herramientas.
#
# 
#
# * Puedes obtener ayuda sobre una función u objeto presionando ``[shift] + [tab]`` después de los paréntesis de apertura ``function(``
#
# 
#
# * También puedes obtener la ayuda ejecutando ``function?``
#
# 
# ## Matrices de Numpy
# Manipular matrices de `numpy` es un parte muy importante del aprendizaje automático en Python (en realidad, de cualquier tipo de computación científica). Esto será un repaso para la mayoría. En cualquier caso, repasemos las características más importantes.
# +
import numpy as np
# Semilla de números aleatorios (para reproducibilidad)
rnd = np.random.RandomState(seed=123)
# Generar una matriz aleatoria
X = rnd.uniform(low=0.0, high=1.0, size=(3, 5)) # dimensiones 3x5
print(X)
# -
# (tener en cuenta que los arrays en numpy se indexan desde el 0, al igual que la mayoría de estructuras en Python)
# +
# Acceder a los elementos
# Obtener un único elemento
# (primera fila, primera columna)
print(X[0, 0])
# Obtener una fila
# (segunda fila)
print(X[1])
# Obtener una columna
# (segunda columna)
print(X[:, 1])
# -
# Obtener la traspuesta
print(X.T)
# $$\begin{bmatrix}
# 1 & 2 & 3 & 4 \\
# 5 & 6 & 7 & 8
# \end{bmatrix}^T
# =
# \begin{bmatrix}
# 1 & 5 \\
# 2 & 6 \\
# 3 & 7 \\
# 4 & 8
# \end{bmatrix}
# $$
#
#
# Crear un vector fila de números con la misma separación
# sobre un intervalo prefijado
y = np.linspace(0, 12, 5)
print(y)
# Transformar el vector fila en un vector columna
print(y[:, np.newaxis])
# +
# Obtener la forma de un array y cambiarla
# Generar un array aleatorio
rnd = np.random.RandomState(seed=123)
X = rnd.uniform(low=0.0, high=1.0, size=(3, 5)) # a 3 x 5 array
print(X)
print(X.shape)
print(X.reshape(5, 3))
# -
# Indexar según un conjunto de números enteros
indices = np.array([3, 1, 0])
print(indices)
X[:, indices]
# Hay mucho más que aprender, pero esto cubre algunas de las cosas fundamentales que se tratarán en este curso.
# ## Matrices dispersas de SciPy
# No utilizaremos demasiado las matrices dispersas, pero son muy útiles en múltiples situaciones. En algunas tareas de aprendizaje automático, especialmente en aquellas asociadas con análisis de textos, los datos son casi siempre ceros. Guardar todos estos ceros es muy poco eficiente, mientras que representar estas matrices de forma que solo almacenemos `lo qué no es cero` es mucho más eficiente. Podemos crear y manipular matrices dispersas de la siguiente forma:
# +
from scipy import sparse
# Crear una matriz de aleatorios entre 0 y 1
rnd = np.random.RandomState(seed=123)
X = rnd.uniform(low=0.0, high=1.0, size=(10, 5))
print(X)
# -
# Poner a cero la mayoría de elementos
X[X < 0.7] = 0
print(X)
# Transformar X en una matriz CSR (Compressed-Sparse-Row)
X_csr = sparse.csr_matrix(X)
print(X_csr)
# Convertir la matriz CSR de nuevo a una matriz densa
print(X_csr.toarray())
# (puede que encuentres otra forma alternativa para convertir matrices dispersas a densas: `numpy.todense`; `toarray` devuelve un array numpy, mientras que `todense` devuelve una matriz numpy. En este tutorial trabajaremos con arrays numpy, no con matrices, ya que estas últimas no son soportadas por scikit-learn.
# La representación CSR puede ser muy eficiente para hacer cómputo, pero no tanto para añadir elementos. Para ello, la representación LIL (*List-In-List*) es mejor:
# +
# Crear una matriz LIL vacía y añadir algunos elementos
X_lil = sparse.lil_matrix((5, 5))
for i, j in np.random.randint(0, 5, (15, 2)):
X_lil[i, j] = i + j
print(X_lil)
print(type(X_lil))
# -
X_dense = X_lil.toarray()
print(X_dense)
print(type(X_dense))
# A menudo, una vez creamos la matriz LIL, es útil convertirla al formato CSR (muchos algoritmos de scikit-learn requieren formatos CSR)
X_csr = X_lil.tocsr()
print(X_csr)
print(type(X_csr))
# Los formatos dispersos disponibles que pueden ser útiles para distintos problemas son:
# - `CSR` (*compressed sparse row*).
# - `CSC` (*compressed sparse column*).
# - `BSR` (*block sparse row*).
# - `COO` (*coordinate*).
# - `DIA` (*diagonal*).
# - `DOK` (*dictionary of keys*).
# - `LIL` (*list in list*).
#
# El paquete [``scipy.sparse``](http://docs.scipy.org/doc/scipy/reference/sparse.html) tienen bastantes funciones para matrices dispersas, incluyendo álgebra lineal, algoritmos de grafos y mucho más.
# ## matplotlib
# Otra parte muy importante del aprendizaje automático es la visualización de datos. La herramienta más habitual para esto en Python es [`matplotlib`](http://matplotlib.org). Es un paquete extremadamente flexible y ahora veremos algunos elementos básicos.
#
# Ya que estamos usando los libros (*notebooks*) Jupyter, vamos a usar una de las [funciones mágicas](https://ipython.org/ipython-doc/3/interactive/magics.html) que vienen incluidas en IPython, el modo "*matoplotlib inline*", que dibujará los *plots* directamente en el libro.
# %matplotlib inline
import matplotlib.pyplot as plt
# Dibujar una línea
x = np.linspace(0, 10, 100)
plt.plot(x, np.sin(x));
# Dibujar un scatter
x = np.random.normal(size=500)
y = np.random.normal(size=500)
plt.scatter(x, y);
# +
# Mostrar imágenes usando imshow
# - Tener en cuenta que el origen por defecto está arriba a la izquierda
x = np.linspace(1, 12, 100)
y = x[:, np.newaxis]
im = y * np.sin(x) * np.cos(y)
print(im.shape)
plt.imshow(im);
# -
# Hacer un diagrama de curvas de nivel (contour plot)
# - El origen aquí está abajo a la izquierda
plt.contour(im);
# El modo "widget" en lugar de inline permite que los plots sean interactivos
# %matplotlib widget
# Plot en 3D
from mpl_toolkits.mplot3d import Axes3D
ax = plt.axes(projection='3d')
xgrid, ygrid = np.meshgrid(x, y.ravel())
ax.plot_surface(xgrid, ygrid, im, cmap=plt.cm.viridis, cstride=2, rstride=2, linewidth=0);
# Hay muchísimos tipos de gráficos disponibles. Una forma útila de explorarlos es mirar la [galería de matplotlib](http://matplotlib.org/gallery.html).
#
# Puedes probar estos ejemplos fácilmente en el libro de notas: simplemente copia el enlace ``Source Code`` de cada página y pégalo en el libro usando el comando mágico ``%load``.
# Por ejemplo:
# +
# # %load http://matplotlib.org/mpl_examples/pylab_examples/ellipse_collection.py
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import EllipseCollection
x = np.arange(10)
y = np.arange(15)
X, Y = np.meshgrid(x, y)
XY = np.hstack((X.ravel()[:, np.newaxis], Y.ravel()[:, np.newaxis]))
ww = X/10.0
hh = Y/15.0
aa = X*9
fig, ax = plt.subplots()
ec = EllipseCollection(ww, hh, aa, units='x', offsets=XY,
transOffset=ax.transData)
ec.set_array((X + Y).ravel())
ax.add_collection(ec)
ax.autoscale_view()
ax.set_xlabel('X')
ax.set_ylabel('y')
cbar = plt.colorbar(ec)
cbar.set_label('X+Y')
plt.show()
# -
| notebooks-spanish/02-herramientas_cientificas_python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/GodfreyMamauag/OOP-1-2/blob/main/OOP_Concept.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="kEm3tobp1w3w"
# Python classes and objects
# + id="jebvIylU1ebN"
class Myclass:
pass
# + colab={"base_uri": "https://localhost:8080/"} id="sFgEP52C1zvc" outputId="5b9e823c-0c1e-4ad9-c237-6cf6aa8d3016"
class OOP1_2:
X = 5
print(X)
# + colab={"base_uri": "https://localhost:8080/"} id="VeSY4Wj710zC" outputId="750f4c1b-d852-464d-c20c-ee30bb1b8856"
class OOP1_2:
def __init__(self,name,age): #__init__(parameter)
self.name = name #attributes
self.age = age
def identity(self):
print(self.name, self.age)
person = OOP1_2("Godfrey", 18) #create objects
print(person.name)
print(person.age)
print(person.identity)
# + colab={"base_uri": "https://localhost:8080/"} id="-PGG1mDR13-l" outputId="8da2a866-a2a1-47d3-a4cb-e1706e96303d"
person.name = "Godfrey"
print(person.name)
print(person.age)
# + colab={"base_uri": "https://localhost:8080/"} id="63FUReQv15RW" outputId="fd093370-99b7-44c5-951e-13395de8228a"
person.age = 18
print(person.name)
print(person.age)
# + id="fF-5rkWK17LN"
del person.name
# + colab={"base_uri": "https://localhost:8080/"} id="7pHz89lI19hN" outputId="cb60e575-32c0-43c7-c298-bccdf6b05124"
print(person.age)
print(person.name)
# + [markdown] id="Rzm3EE_o1_5k"
# Application 1 - Write a Python program that computes the area of a square, and name its class as Square, side as attributes
# + colab={"base_uri": "https://localhost:8080/"} id="Qnyaj6md2ATj" outputId="4e40ddf1-4dc9-4e09-baa8-5e641ccf25dc"
class Square:
def __init__(self,sides):
self.sides = sides
def area(self):
return self.sides*self.sides #formula to compute the area of the square
def display(self):
print("the area of the square is:", self.area())
square = Square(4)
print(square.sides)
square.display()
# + [markdown] id="uw4B4A-Z2EKO"
# Application 2 - Write a Python program that displays your full name, age, course, school. Create a class named MyClass, and name, age, school as attributes.
# + colab={"base_uri": "https://localhost:8080/"} id="xZMGS5hW2GNd" outputId="954d181b-2826-44ab-b0c4-b7e27924fc75"
class MyClass:
def __init__(self, name, age, course, school):
self.name = name
self.age = age
self.course = course
self.school = school
def identity(self):
print(self.name, self.age, self.course, self.school)
person = MyClass("Godfrey", 18, "BS Computer Engineering", "CVSU-Main Campus")
print(person.name)
print(person.age)
print(person.course)
print(person.school)
| OOP_Concept.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Convolutional Autoencoder
# 
# In this example we will demonstrate how you can create a convolutional autoencoder in Gluon
# +
import random
import matplotlib.pyplot as plt
import mxnet as mx
from mxnet import autograd, gluon
# -
# ## Data
#
# We will use the FashionMNIST dataset, which is of a similar format to MNIST but is richer and has more variance
batch_size = 512
ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu()
# +
transform = lambda x,y: (x.transpose((2,0,1)).astype('float32')/255., y)
train_dataset = gluon.data.vision.FashionMNIST(train=True)
test_dataset = gluon.data.vision.FashionMNIST(train=False)
train_dataset_t = train_dataset.transform(transform)
test_dataset_t = test_dataset.transform(transform)
train_data = gluon.data.DataLoader(train_dataset_t, batch_size=batch_size, last_batch='rollover', shuffle=True, num_workers=5)
test_data = gluon.data.DataLoader(test_dataset_t, batch_size=batch_size, last_batch='rollover', shuffle=True, num_workers=5)
# -
plt.figure(figsize=(20,10))
for i in range(10):
ax = plt.subplot(1, 10, i+1)
ax.imshow(train_dataset[i][0].squeeze().asnumpy(), cmap='gray')
ax.axis('off')
# ## Network
net = gluon.nn.HybridSequential()
encoder = gluon.nn.HybridSequential()
encoder.add(
gluon.nn.Conv2D(channels=4, kernel_size=3, padding=1, strides=(2,2), activation='relu'),
gluon.nn.BatchNorm(),
gluon.nn.Conv2D(channels=8, kernel_size=3, padding=1, strides=(2,2), activation='relu'),
gluon.nn.BatchNorm(),
gluon.nn.Conv2D(channels=16, kernel_size=3, padding=1, strides=(2,2), activation='relu'),
gluon.nn.BatchNorm(),
gluon.nn.Conv2D(channels=32, kernel_size=3, padding=0, strides=(2,2),activation='relu'),
gluon.nn.BatchNorm()
)
decoder = gluon.nn.HybridSequential()
decoder.add(
gluon.nn.Conv2D(channels=32, kernel_size=3, padding=2, activation='relu'),
gluon.nn.HybridLambda(lambda F, x: F.UpSampling(x, scale=2, sample_type='nearest')),
gluon.nn.BatchNorm(),
gluon.nn.Conv2D(channels=16, kernel_size=3, padding=1, activation='relu'),
gluon.nn.HybridLambda(lambda F, x: F.UpSampling(x, scale=2, sample_type='nearest')),
gluon.nn.BatchNorm(),
gluon.nn.Conv2D(channels=8, kernel_size=3, padding=2, activation='relu'),
gluon.nn.HybridLambda(lambda F, x: F.UpSampling(x, scale=2, sample_type='nearest')),
gluon.nn.BatchNorm(),
gluon.nn.Conv2D(channels=4, kernel_size=3, padding=1, activation='relu'),
gluon.nn.Conv2D(channels=1, kernel_size=3, padding=1, activation='sigmoid')
)
net.add(
encoder,
decoder
)
net.initialize(ctx=ctx)
net.summary(test_dataset_t[0][0].expand_dims(axis=0).as_in_context(ctx))
# We can see that the original image goes from 28x28 = 784 pixels to a vector of length 32. That is a ~25x information compression rate.
# Then the decoder brings back this compressed information to the original shape
l2_loss = gluon.loss.L2Loss()
l1_loss = gluon.loss.L1Loss()
trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': 0.001, 'wd':0.001})
net.hybridize(static_shape=True, static_alloc=True)
# ## Training loop
epochs = 20
for e in range(epochs):
curr_loss = 0.
for i, (data, _) in enumerate(train_data):
data = data.as_in_context(ctx)
with autograd.record():
output = net(data)
# Compute the L2 and L1 losses between the original and the generated image
l2 = l2_loss(output.flatten(), data.flatten())
l1 = l1_loss(output.flatten(), data.flatten())
l = l2 + l1
l.backward()
trainer.step(data.shape[0])
curr_loss += l.mean()
print("Epoch [{}], Loss {}".format(e, curr_loss.asscalar()/(i+1)))
# ## Testing reconstruction
# We plot 10 images and their reconstruction by the autoencoder. The results are pretty good for a ~25x compression rate!
plt.figure(figsize=(20,4))
for i in range(10):
idx = random.randint(0, len(test_dataset))
img, _ = test_dataset[idx]
x, _ = test_dataset_t[idx]
data = x.as_in_context(ctx).expand_dims(axis=0)
output = net(data)
ax = plt.subplot(2, 10, i+1)
ax.imshow(img.squeeze().asnumpy(), cmap='gray')
ax.axis('off')
ax = plt.subplot(2, 10, 10+i+1)
ax.imshow((output[0].asnumpy() * 255.).transpose((1,2,0)).squeeze(), cmap='gray')
_ = ax.axis('off')
# ## Manipulating latent space
# We now use separately the **encoder** that takes an image to a latent vector and the **decoder** that transform a latent vector into images
# We get two images from the testing set
# +
idx = random.randint(0, len(test_dataset))
img1, _ = test_dataset[idx]
x, _ = test_dataset_t[idx]
data1 = x.as_in_context(ctx).expand_dims(axis=0)
idx = random.randint(0, len(test_dataset))
img2, _ = test_dataset[idx]
x, _ = test_dataset_t[idx]
data2 = x.as_in_context(ctx).expand_dims(axis=0)
plt.figure(figsize=(2,2))
plt.imshow(img1.squeeze().asnumpy(), cmap='gray')
plt.show()
plt.figure(figsize=(2,2))
plt.imshow(img2.squeeze().asnumpy(), cmap='gray')
# -
# We get the latent representations of the images by passing them through the network
latent1 = encoder(data1)
latent2 = encoder(data2)
# We see that the latent vector is made of 32 components
latent1.shape
# We interpolate the two latent representations, vectors of 32 values, to get a new intermediate latent representation, pass it through the decoder and plot the resulting decoded image
# +
num = 10
plt.figure(figsize=(20, 5))
for i in range(int(num)):
new_latent = latent2*(i+1)/num + latent1*(num-i)/num
output = decoder(new_latent)
#plot result
ax = plt.subplot(1, num, i+1)
ax.imshow((output[0].asnumpy() * 255.).transpose((1,2,0)).squeeze(), cmap='gray')
_ = ax.axis('off')
# -
# We can see that the latent space learnt by the autoencoder is fairly smooth, there is no sudden jump from one shape to another
| example/autoencoder/convolutional_autoencoder.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import re
import sys
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import re
import string
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
import spacy
# +
df_news=[]
for i in range(1,2):
df_news.append(pd.read_csv('articles'+str(i)+'.csv', index_col='id'))
df_news = pd.concat(df_news)
print(df_news.shape)
df_news.head()
# -
df_news = df_news.drop(['Unnamed: 0', 'url'], axis=1)
df_news.isnull().sum()
x = df_news.publication.value_counts()
print(x[x>100].index[:])
x = df_news.author.value_counts()
print(x[x>100].index[:])
np.mean(len(df_news.content.iloc[:]))
df_news.content.value_counts(normalize=True)
X=df_news.content
y=df_news.publication
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=42)
# +
cv1 = CountVectorizer(ngram_range=(1,2), binary=False, stop_words='english',
lowercase=True, strip_accents='unicode',
max_df=0.5, min_df=0.05)
X_train_cv1 = cv1.fit_transform(X_train)
X_test_cv1 = cv1.transform(X_test)
DTM1 = pd.DataFrame(X_train_cv1.toarray(), columns=cv1.get_feature_names())
DTM1.head()
# +
cv2 = CountVectorizer(ngram_range=(1,2), binary=True, stop_words='english',
lowercase=True, strip_accents='unicode',
max_df=0.5, min_df=0.05)
X_train_cv2 = cv2.fit_transform(X_train)
X_test_cv2 = cv2.transform(X_test)
DTM2 = pd.DataFrame(X_train_cv2.toarray(), columns=cv2.get_feature_names())
DTM2.head()
# +
cv3 = CountVectorizer(ngram_range=(1,2), binary=True, stop_words='english',
strip_accents='unicode')
X_train_cv3 = cv3.fit_transform(X_train)
X_test_cv3 = cv3.transform(X_test)
DTM3 = pd.DataFrame(X_train_cv3.toarray(), columns=cv3.get_feature_names())
DTM3.shape
# +
cv4 = CountVectorizer(ngram_range=(1,2), binary=True, stop_words='english',
strip_accents='unicode', lowercase=True)
X_train_cv4 = cv4.fit_transform(X_train)
X_test_cv4 = cv4.transform(X_test)
DTM4 = pd.DataFrame(X_train_cv4.toarray(), columns=cv4.get_feature_names())
DTM4.shape
# +
cv5 = CountVectorizer(ngram_range=(1,2), binary=True, stop_words='english',
strip_accents='unicode', lowercase=True, decode_error='replace')
X_train_cv5 = cv5.fit_transform(X_train)
X_test_cv5 = cv5.transform(X_test)
DTM5 = pd.DataFrame(X_train_cv5.toarray(), columns=cv5.get_feature_names())
DTM5.shape
# +
cv6 = CountVectorizer(stop_words='english', lowercase=True, ngram_range=(1,2),
strip_accents='unicode',
max_df=0.5, min_df=0.05)
X_train_cv6 = cv6.fit_transform(X_train)
X_test_cv6 = cv6.transform(X_test)
DTM6 = pd.DataFrame(X_train_cv6.toarray(), columns=cv6.get_feature_names())
DTM6.shape
# -
# #### Create a logistic regression model to use
# from sklearn.linear_model import LogisticRegression
# lr = LogisticRegression()
print(y_test.value_counts(ascending=True))
lst = list(y_test.unique())
print(lst)
print(lst[::-1])
reversed(lst)
# Train the first model
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(penalty='l2', tol=0.01, C=1000, n_jobs=-1)
lr.fit(X_train_cv1, y_train)
y_pred_cv1 = lr.predict(X_test_cv1)
# +
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
def conf_matrix(actual, predicted):
true_neg=0
true_pos=0
false_neg=0
false_pos=0
cm = confusion_matrix(actual, predicted)
sns.heatmap(cm, xticklabels=['Atlantic', 'Breitbart', 'Bus_Insider', 'CNN', 'NYT'],
yticklabels=['Atlantic', 'Breitbart', 'Bus_Insider', 'CNN', 'NYT'], annot=True,
fmt='d', annot_kws={'fontsize':20}, cmap="YlGnBu");
print(cm.shape)
for i in range(0, cm.shape[0]-1):
for j in range(0, cm.shape[1]-1):
if i==j:
true_neg+=cm[i,j]/2
true_pos+=cm[i,j]/2
elif (j>i):
false_pos+=cm[i,j]
else:
false_neg+=cm[i,j]
accuracy = round((true_pos + true_neg) / (true_pos + true_neg + false_pos + false_neg),3)
precision = round((true_pos) / (true_pos + false_pos),3)
recall = round((true_pos) / (true_pos + false_neg),3)
f1 = round(2 * (precision * recall) / (precision + recall),3)
cm_results = [accuracy, precision, recall, f1]
print(cm_results)
return cm_results
# -
cm_lr = conf_matrix(y_test, y_pred_cv1)
# +
from sklearn.naive_bayes import MultinomialNB
mnb = MultinomialNB()
mnb.fit(X_train_cv1, y_train)
y_pred_mnb = mnb.predict(X_test_cv1)
# +
# Fit the second Naive Bayes model
from sklearn.naive_bayes import BernoulliNB
bnb = BernoulliNB()
bnb.fit(X_train_cv1, y_train)
y_pred_bnb = bnb.predict(X_test_cv1)
# -
# Here's the heat map for the first Naive Bayes model
cm_mnb = conf_matrix(y_test, y_pred_mnb)
# Here's the heat map for the second Naive Bayes model
cm_bnb = conf_matrix(y_test, y_pred_bnb)
results = pd.DataFrame(list(zip(cm_lr, cm_bnb, cm_mnb)))
results = results.set_index([['Accuracy', 'Precision', 'Recall', 'F1 Score']])
results.columns = ['LogReg', 'BNB', 'MNB']
results
# +
# Create TF-IDF versions of the Count Vectorizers created earlier in the exercise
tfidf = TfidfVectorizer(ngram_range=(1,2), stop_words='english', max_df=0.5, min_df=0.05, strip_accents='unicode')
X_train_tfidf = tfidf.fit_transform(X_train)
X_test_tfidf = tfidf.transform(X_test)
# -
lr.fit(X_train_tfidf, y_train)
y_pred_tfidf_lr = lr.predict(X_test_tfidf)
cm_tfidf_lr = conf_matrix(y_test, y_pred_tfidf_lr)
mnb.fit(X_train_tfidf.toarray(), y_train)
y_pred_tfidf_nb = mnb.predict(X_test_tfidf1)
cm_tfidf_mnb = conf_matrix(y_test, y_pred_tfidf_nb)
# +
results_tf = pd.DataFrame(list(zip(cm_tfidf_lr, cm_tfidf_mnb)))
results_tf = results_tf.set_index([['Accuracy', 'Precision', 'Recall', 'F1 Score']])
results_tf.columns = ['LR1-TFIDF', 'MNB-TFIDF']
results_tf
results = pd.concat([results, results_tf], axis=1)
results
# -
| Code/.ipynb_checkpoints/TopicMod-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/jproctor-rebecca/DS-Unit-2-Linear-Models/blob/master/module2-regression-2/LS_DS_212_assignment_RJProctor.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="W2DlcH3sXShW" colab_type="text"
# Lambda School Data Science
#
# *Unit 2, Sprint 1, Module 2*
#
# ---
# + [markdown] colab_type="text" id="7IXUfiQ2UKj6"
# # Regression 2
#
# ## Assignment
#
# You'll continue to **predict how much it costs to rent an apartment in NYC,** using the dataset from renthop.com.
#
# - [ ] Do train/validation split. Use data from April & May 2016 to train. Use data from June 2016 to test.
# - [ ] Engineer at least two new features. (See below for explanation & ideas.)
# - [ ] Fit a linear regression model with at least two features.
# - [ ] Get the model's coefficients and intercept.
# - [ ] Get regression metrics RMSE, MAE, and $R^2$, for both the train and test data.
# - [ ] What's the best test MAE you can get? Share your score and features used with your cohort on Slack!
# - [ ] As always, commit your notebook to your fork of the GitHub repo.
#
#
# #### [Feature Engineering](https://en.wikipedia.org/wiki/Feature_engineering)
#
# > "Some machine learning projects succeed and some fail. What makes the difference? Easily the most important factor is the features used." — <NAME>, ["A Few Useful Things to Know about Machine Learning"](https://homes.cs.washington.edu/~pedrod/papers/cacm12.pdf)
#
# > "Coming up with features is difficult, time-consuming, requires expert knowledge. 'Applied machine learning' is basically feature engineering." — <NAME>, [Machine Learning and AI via Brain simulations](https://forum.stanford.edu/events/2011/2011slides/plenary/2011plenaryNg.pdf)
#
# > Feature engineering is the process of using domain knowledge of the data to create features that make machine learning algorithms work.
#
# #### Feature Ideas
# - Does the apartment have a description?
# - How long is the description?
# - How many total perks does each apartment have?
# - Are cats _or_ dogs allowed?
# - Are cats _and_ dogs allowed?
# - Total number of rooms (beds + baths)
# - Ratio of beds to baths
# - What's the neighborhood, based on address or latitude & longitude?
#
# ## Stretch Goals
# - [ ] If you want more math, skim [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapter 3.1, Simple Linear Regression, & Chapter 3.2, Multiple Linear Regression
# - [ ] If you want more introduction, watch [<NAME>, Statistics 101: Simple Linear Regression](https://www.youtube.com/watch?v=ZkjP5RJLQF4)
# (20 minutes, over 1 million views)
# - [ ] Add your own stretch goal(s) !
# + colab_type="code" id="o9eSnDYhUGD7" colab={}
# %%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# !pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
# + colab_type="code" id="cvrw-T3bZOuW" colab={}
import numpy as np
import pandas as pd
# Read New York City apartment rental listing data
df = pd.read_csv(DATA_PATH+'apartments/renthop-nyc.csv',
parse_dates=['created'],
index_col='created')
assert df.shape == (49352, 33)
# Remove the most extreme 1% prices,
# the most extreme .1% latitudes, &
# the most extreme .1% longitudes
df = df[(df['price'] >= np.percentile(df['price'], 0.5)) &
(df['price'] <= np.percentile(df['price'], 99.5)) &
(df['latitude'] >= np.percentile(df['latitude'], 0.05)) &
(df['latitude'] < np.percentile(df['latitude'], 99.95)) &
(df['longitude'] >= np.percentile(df['longitude'], 0.05)) &
(df['longitude'] <= np.percentile(df['longitude'], 99.95))]
# + id="XYIoj3edQ2iy" colab_type="code" colab={}
# assign data to clearly labeled DataFrame
apartments = df
# drop NaNs
apartments.dropna(inplace=True)
# + id="M6Bc_eDynp-_" colab_type="code" colab={}
# convert to date time format
#apartments['created'] = pd.to_datetime(apartments['created'],
# infer_datetime_format=True)
# + [markdown] id="zaXTXk2LPKV5" colab_type="text"
# #### EDA
# + id="yXVDq86JNu-u" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 711} outputId="a07cc897-4312-4b16-fd76-5dc471808259"
apartments.info()
# + id="4knM4TQAN3WT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 547} outputId="f398adfd-001d-4848-d18e-dd5f08c863f1"
apartments.head()
# + id="dkQ0HbqYPOAI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 280} outputId="8dd800e8-cd5b-4462-d7ba-581b0f651369"
import matplotlib.pyplot as plt
plt.scatter(apartments['latitude'],
apartments['price'], )
plt.xlabel('latitude')
plt.ylabel('price');
# + id="VmxxdotuPv_Y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 280} outputId="9f9df56b-a1a5-44fc-f9a0-3d502ba64d78"
import matplotlib.pyplot as plt
plt.scatter(apartments['longitude'],
apartments['price'], )
plt.xlabel('longitude')
plt.ylabel('price');
# + id="2tVGBJc-P25M" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 280} outputId="a44f429e-f418-4d23-bda3-6ce3c967992c"
import matplotlib.pyplot as plt
plt.scatter(apartments['bedrooms'],
apartments['price'], )
plt.xlabel('bedrooms')
plt.ylabel('price');
# + id="i7P8SY4sP_MC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 280} outputId="761fe216-1191-444a-b254-61cc570f1cc8"
import matplotlib.pyplot as plt
plt.scatter(apartments['bathrooms'],
apartments['price'], )
plt.xlabel('bathrooms')
plt.ylabel('price');
# + [markdown] id="hSo1ChZAsqU7" colab_type="text"
# #### Feature Engineering
# + id="YmYHZkcYn329" colab_type="code" colab={}
# reindex by 'created'
#apartments = apartments.set_index('created')
#apartments.head()
# + id="mc4IE59jsuTH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 243} outputId="a335fcf4-a2e8-4e6d-8554-0f88370acd46"
# concatenating the columns
apartments['location'] = apartments['latitude'] + apartments['longitude']
print(apartments['location'])
# + id="NaNJvKNJujGL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 243} outputId="758d63d3-1088-4207-9d18-6fd6bba9f280"
# concatenating the columns
apartments['total_rooms'] = apartments['bedrooms'] + apartments['bathrooms']
print(apartments['total_rooms'])
# + id="-gk3atUJwmFs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 547} outputId="a8752bff-0565-4a28-dfac-98832041d8bc"
apartments.head()
# + [markdown] id="8Ce3L3LBMC9X" colab_type="text"
# #### Test/Validation Split
# + id="AhzOWZzlTLah" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="afd2498d-858b-4b91-d734-78df30775afc"
# create the 2D features matrix
X = apartments[['location','total_rooms']]
# create the target array
y = apartments['price']
print(X.shape)
print(y.shape)
# + id="9GecFqBIliB3" colab_type="code" colab={}
#TrainStart = '2016-04-01 00:00:00'
#TrainEnd = '2016-05-31 23:59:59'
#TestStart = '2016-06-01 00:00:00'
#TestEnd = '2016-06-30 23:59:59'
# + id="mbEXSQsMQajg" colab_type="code" colab={}
# split dataset into Training and Validation sets
# time series split
cutoff = '2016-05-31' # 23:59:59
mask = X.index < '2016-05-31' # 23:59:59
X_train, y_train = X.loc[mask], y.loc[mask]
X_val, y_val = X.loc[~mask], y.loc[~mask]
# + id="jMDhlTmjlGh2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="182081df-1e50-4bdf-fc17-9cd131884ba0"
print(mask)
# + id="Nl8cZ7DXmmI_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 503} outputId="fc45034f-38f6-46db-c176-b5962276f03e"
print(X_train)
print(y_train)
# + [markdown] id="TaPapxCcUM_I" colab_type="text"
# #### Establish Baseline
# + id="oGoSo2akL9r0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="76e90768-ad23-4a20-f03f-ee26c12f54c7"
# baseline
y_train.mean()
# + id="Jzi8Jy1lW2pp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 280} outputId="3b42a441-a299-4297-db5d-1dbd95d4c27b"
# map baseline model
# define initial/overlaying parameters
col = ['location']
# get the mean value for the length of dataset
y_pred = [y_train.mean()]*len(X_train)
# create raw data plot
plt.scatter(X_train[col], y_train)
# create baseline plot
plt.plot(X_train[col],
y_pred,
color='red',
label='Baseline Model')
# define model as baseline for linear regression as on
# training data
# define feature/properties parameters of plot(s)
plt.xlabel('Geographic location and Total Rooms')
plt.ylabel('Price')
# create legend and show plot(s)
plt.legend()
plt.show()
# + id="jGLl1DVEWo2f" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="fe7e4907-15ce-4f13-8255-ebfe76f0db31"
from sklearn.metrics import mean_absolute_error
print('Baseline MAE: ', mean_absolute_error(y_train, y_pred))
# + [markdown] id="aqlWRWx5d5Nr" colab_type="text"
# #### Build Model
# + id="g0Z6wA3wd8uK" colab_type="code" colab={}
from sklearn.linear_model import LinearRegression
model = LinearRegression() # instantiate model
model.fit(X_train[col], y_train) # fit model to training data
y_pred = model.predict(X_train[col]) # make predictions
# + id="RM04nNgQ09YS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="31337d13-fd54-4a99-eda6-d4ed36300c66"
# Slope (also called the model coefficient)
print(model.coef_)
# Intercept
print(model.intercept_)
# Print in equation form
#print(f'\nprice = ({model.coef_[0]} * location) + ({model.coef_[1]} * total rooms) + ({model.intercept_})')
# + id="qlh3Limae50K" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 280} outputId="dfb5a0a0-e9ca-45a8-81d0-69a8f32aad59"
# map regression model
# define initial/overlaying parameters
col = ['location']
# get the mean value for the length of dataset
#y_pred = [y_train.mean()]*len(X_train)
# create raw data plot
plt.scatter(X_train[col], y_train)
# create baseline plot
plt.plot(X_train[col],
y_pred,
color='red',
label='Regression Model')
# define model as Regression Model for linear regression as on
# training data
# define feature/properties parameters of plot(s)
plt.xlabel('Geographic location and Total Rooms')
plt.ylabel('Price')
# create legend and show plot(s)
plt.legend()
plt.show()
# + [markdown] id="vCCp93iJyUtv" colab_type="text"
# #### Check Metrics
#
# + id="40-y7xM3ydgQ" colab_type="code" colab={}
# Mean Absolute Error (MAE)
from sklearn.metrics import mean_absolute_error
print('Training MAE: ', mean_absolute_error(y_train, model.predict(X_train[[col]])))
print('Validation MAE: ', mean_absolute_error(y_val, model.predict(X_val[[col]])))
###This error occurs when there are spaces in the column names but I can't find any###
###I cannot get any of the metrics to work###
# + id="OYtZUWB-ymfa" colab_type="code" colab={}
# Root Mean Squared Error (RMSE)
from sklearn.metrics import mean_squared_error
print('Training RMSE:', mean_squared_error(y_train, model.predict(X_train[[col]]), squared=False))
print('Validation RMSE:', mean_squared_error(y_val, model.predict(X_val[[col]]), squared=False))
# + id="3m-m4ZFoyt3b" colab_type="code" colab={}
# R^2
from sklearn.metrics import r2_score
print('Training R2:', model.score(X_train[[col]], y_train))
print('Validation R2:', model.score(X_val[[col]], y_val))
# + [markdown] id="frQ1zUCLzKe3" colab_type="text"
# #### Multiple Linear Regression
#
# + id="q0tDnVwQzOVq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="bbbc95d1-892b-4433-a6cc-106d1545cf1b"
# pass data through your model a second time to account for noise of multiple variables
model2 = LinearRegression() # instantiate
model2.fit(X_train, y_train) # fit model to training data
# + id="GW-ibEpd0Fnc" colab_type="code" colab={}
# check metrics
# Mean Absolute Error (MAE)
print('Training MAE:', mean_absolute_error(y_train, model.predict(X_train[[col]])))
print('Validation MAE:', mean_absolute_error(y_val, model.predict(X_val[[col]])))
print()
# Root Mean Squared Error (RMSE)
print('Training RMSE:', mean_squared_error(y_train, model.predict(X_train[[col]]), squared=False))
print('Validation RMSE:', mean_squared_error(y_val, model.predict(X_val[[col]]), squared=False))
print()
# R^2 score
print('Training R2:', model2.score(X_train, y_train))
print('Training R2:', model2.score(X_val, y_val))
# + [markdown] id="BgxzONJQmApI" colab_type="text"
# ### Stretch 1
#
# #### Simple Linear Regression
#
# The ***RSME (RSE)*** it is the average amount that the response
# will deviate from the true regression line.
#
# The RSE is considered a measure of the lack of fit of the model (3.5) to
# the data. If the predictions obtained using the model are very close to the
# true outcome values. If the values are small then the model fits the data very well.
#
# On
# the other hand, if ˆyi is very far from yi for one or more observations, then
# the RSE may be quite large, indicating that the model doesn’t fit the data
# well.
#
# The ***R^2*** statistic provides an alternative
# measure of fit. It takes the form of a proportion, the proportion of variance
# explained, and so it always takes on a value between 0 and 1, and is
# independent of the scale of Y .
#
# R^2 measures the proportion
# of variability in Y that can be explained using X. An R^2 statistic that is
# close to 1 indicates that a large proportion of the variability in the response
# has been explained by the regression.
#
# A number near 0 indicates that the
# regression did not explain much of the variability in the response; this might
# occur because the linear model is wrong, or the inherent error σ2 is high,
# or both.
#
# #### Multiple Linear Regression
#
# Instead of fitting a separate simple linear regression model for each predictor,
# a better approach is to extend the simple linear regression model
# (3.5) so that it can directly accommodate multiple predictors. We can do
# this by giving each predictor a separate slope coefficient in a single model.
# In general, suppose that we have p distinct predictors. Then the multiple
# linear regression model takes the form
#
# Y = β0 + β1X1 + β2X2 + · · · + βpXp + ,
#
# where Xj represents the jth predictor and βj quantifies the association
# between that variable and the response. We interpret βj as the average
# effect on Y of a one unit increase in Xj , holding all other predictors fixed.
#
# As was the case in the simple linear regression setting, the regression coefficients
# β0, β1, . . . , βp in are unknown, and must be estimated.
# The parameters are estimated using the same least squares approach that
# we saw in the context of simple linear regression. We choose β0, β1, . . . , βp
# to minimize the sum of squared residuals.
#
# When we perform multiple linear regression, we usually are interested in
# answering a few important questions.
# 1. Is at least one of the predictors X1,X2, . . . , Xp useful in predicting
# the response?
# 2. Do all the predictors help to explain Y , or is only a subset of the
# predictors useful?
# 3. How well does the model fit the data?
# 4. Given a set of predictor values, what response value should we predict,
# and how accurate is our prediction?
#
#
# + [markdown] id="2lZ5xzmUsQp3" colab_type="text"
# ### Stretch 2
#
# #### Statistics 101: Linear Regression, The Very Basics
#
# simple linear regression
# model mathematically the relationship bewteen two variables
# good fit-compares relationship of one model with another model
#
# begin with a sample
#
# scatter plot y variable (sample)
#
# with only one variable - mean of y(dependant var/sample var) is best predictor of best-fit line
#
# measure each data point distance from mean-line (line of best-fit/observed values) - standard deviation or residuals/errors
#
# the residuals/errors always add up to zero
#
# square the residuals/errors (makes them all positive, emphasizes all deviations), sum the squared residuals/errors (SSE)
#
# sum_of_squares = the goal of simple linear regression is to create a linear model that minimizes (area) the sum of squares of the risiduals/errors(SSE)
#
# A regression line with a good-fit will minimize the residuals/errors(SSE) so the SSE will be smaller than the original sum_of_squares.
#
| module2-regression-2/LS_DS_212_assignment_RJProctor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="19fbd8a8-001" colab_type="text"
# #1. Install Dependencies
# First install the libraries needed to execute recipes, this only needs to be done once, then click play.
#
# + id="19fbd8a8-002" colab_type="code"
# !pip install git+https://github.com/google/starthinker
# + [markdown] id="19fbd8a8-003" colab_type="text"
# #2. Get Cloud Project ID
# To run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play.
#
# + id="19fbd8a8-004" colab_type="code"
CLOUD_PROJECT = 'PASTE PROJECT ID HERE'
print("Cloud Project Set To: %s" % CLOUD_PROJECT)
# + [markdown] id="19fbd8a8-005" colab_type="text"
# #3. Get Client Credentials
# To read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play.
#
# + id="19fbd8a8-006" colab_type="code"
CLIENT_CREDENTIALS = 'PASTE CREDENTIALS HERE'
print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS)
# + [markdown] id="19fbd8a8-007" colab_type="text"
# #4. Enter DV360 Bulk Targeting Editor Parameters
# Allows bulk targeting DV360 through Sheets and BigQuery.
# 1. Select <b>Load</b>, click <b>Save + Run</b>, a sheet called <b> DV Targeter</b> will be created.
# 1. In the <b>Partners</b> sheet tab, fill in <i>Filter</i> column then select <b>Load</b>, click <b>Save + Run</b>.
# 1. In the <b>Advertisers</b> sheet tab, fill in <i>Filter</i> column. then select <b>Load</b>, click <b>Save + Run</b>.
# 1. Check the First And Third Party option to load audiences, which may be slow. If not loaded, user will enter audience ids into the sheet manually.
# 1. On the <b>Line Items</b> sheet tab, the <i>Filter</i> is used only to limit drop down choices in the rest of the tool.
# 1. Optionally edit or filter the <i>Targeting Options</i> or <i>Inventory Sources</i> sheets to limit choices.
# 1. Make targeting updates, fill in changes on all tabs with colored fields (RED FIELDS ARE NOT IMPLEMENTED, IGNORE).
# 1. Select <i>Preview</i>, click <b>Save + Run</b> then check the <b>Preview</b> tabs.
# 1. Select <b>Update</b>, click <b>Save + Run</b> then check the <b>Success</b> and <b>Error</b> tabs.
# 1. Load and Update can be run multiple times.
# 1. If an update fails, all parts of the update failed, break it up into multiple updates.
# 1. To refresh the Partner, Advertiser, or Line Item list, remove the filters and run load.
# Modify the values below for your use case, can be done multiple times, then click play.
#
# + id="19fbd8a8-008" colab_type="code"
FIELDS = {
'auth_dv': 'user', # Credentials used for dv.
'auth_sheet': 'user', # Credentials used for sheet.
'auth_bigquery': 'service', # Credentials used for bigquery.
'recipe_name': '', # Name of Google Sheet to create.
'recipe_slug': '', # Name of Google BigQuery dataset to create.
'command': 'Load', # Action to take.
'first_and_third': False, # Load first and third party data (may be slow). If not selected, enter audience identifiers into sheet manually.
}
print("Parameters Set To: %s" % FIELDS)
# + [markdown] id="19fbd8a8-009" colab_type="text"
# #5. Execute DV360 Bulk Targeting Editor
# This does NOT need to be modified unless you are changing the recipe, click play.
#
# + id="19fbd8a8-010" colab_type="code"
from starthinker.util.project import project
from starthinker.script.parse import json_set_fields
USER_CREDENTIALS = '/content/user.json'
TASKS = [
{
'dataset': {
'__comment__': 'Ensure dataset exists.',
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 2,'default': '','description': 'Name of Google BigQuery dataset to create.'}}
}
},
{
'drive': {
'__comment__': 'Copy the default template to sheet with the recipe name',
'auth': 'user',
'copy': {
'source': 'https://docs.google.com/spreadsheets/d/1ARkIvh0D-gltZeiwniUonMNrm0Mi1s2meZ9FUjutXOE/',
'destination': {'field': {'name': 'recipe_name','suffix': ' DV Targeter','kind': 'string','order': 3,'default': '','description': 'Name of Google Sheet to create.'}}
}
}
},
{
'dv_targeter': {
'__comment': 'Depending on users choice, execute a different part of the solution.',
'auth_dv': {'field': {'name': 'auth_dv','kind': 'authentication','order': 1,'default': 'user','description': 'Credentials used for dv.'}},
'auth_sheets': {'field': {'name': 'auth_sheet','kind': 'authentication','order': 2,'default': 'user','description': 'Credentials used for sheet.'}},
'auth_bigquery': {'field': {'name': 'auth_bigquery','kind': 'authentication','order': 3,'default': 'service','description': 'Credentials used for bigquery.'}},
'sheet': {'field': {'name': 'recipe_name','suffix': ' DV Targeter','kind': 'string','order': 4,'default': '','description': 'Name of Google Sheet to create.'}},
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 5,'default': '','description': 'Name of Google BigQuery dataset to create.'}},
'command': {'field': {'name': 'command','kind': 'choice','choices': ['Clear','Load','Preview','Update'],'order': 6,'default': 'Load','description': 'Action to take.'}},
'first_and_third': {'field': {'name': 'first_and_third','kind': 'boolean','order': 6,'default': False,'description': 'Load first and third party data (may be slow). If not selected, enter audience identifiers into sheet manually.'}}
}
}
]
json_set_fields(TASKS, FIELDS)
project.initialize(_recipe={ 'tasks':TASKS }, _project=CLOUD_PROJECT, _user=USER_CREDENTIALS, _client=CLIENT_CREDENTIALS, _verbose=True, _force=True)
project.execute(_force=True)
| colabs/dv360_targeter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Check tf preprocessing pipeline with AZ videos ###
# +
import os
import glob
import pickle
import pandas as pd
import numpy as np
import time
import tensorflow as tf
from matplotlib import pyplot as plt
# Custom imports
from werdich_cfr.utils.processing import Videoconverter
from werdich_cfr.tfutils.TFRprovider import DatasetProvider
# -
# Video selection criteria
max_frame_time_ms = 33.34 # Maximum frame_time acceptable in ms
min_rate = 1/max_frame_time_ms*1e3
min_frames = 40 # Minimum number of frames at min_rate (2 s)
min_length = max_frame_time_ms*min_frames*1e-3
# Load BW metadata as example
dev_dir = os.path.normpath('/mnt/obi0/andreas/data/cfr_AZ')
bwmeta_file_name = 'bwh_metadata.parquet'
bwmeta = pd.read_parquet(os.path.join(dev_dir, bwmeta_file_name))
display(bwmeta.head(2))
bwmeta.iloc[0].filename
# ### Load AZ videos into memory ###
# Video list
az_dir = os.path.normpath('/mnt/obi0/sgoto/AZ_Project')
az_echo_dir = os.path.join(az_dir, 'npyFiles')
video_meta_file_name = 'metadata.tsv'
video_meta_file = os.path.join(az_dir, video_meta_file_name)
video_meta_df = pd.read_csv(video_meta_file, sep='\t')
# Some small adjustments to this data frame
video_meta_df = video_meta_df.dropna(subset=['frametime']).\
rename(columns={'frametime': 'frame_time'}).\
reset_index(drop=True)
video_meta_df['filename'] = video_meta_df['filename']+'.npy.lz4'
video_meta_df['dir'] = az_echo_dir
file_list = list(video_meta_df.filename.unique())[:16]
display(video_meta_df.head())
# +
#%% Load video data into memory and start preprocessing
print(f'Loading {len(file_list)} echos into memory from file: {os.path.basename(video_meta_file_name)}.')
# Image processing class
VC = Videoconverter(max_frame_time_ms=max_frame_time_ms, min_frames=min_frames, meta_df=video_meta_df)
image_array_file_list = []
image_array_list = []
meta_disqualified_list = []
start_time = time.perf_counter()
for f, filename in enumerate(file_list):
if (f+1) % 10 == 0:
time_passed = (time.perf_counter()-start_time)/60
print(f'Loading file {f+1} of {len(file_list)}: {filename}. Time: {time_passed:.2f}')
error, im = VC.process_video(filename)
if np.any(im):
image_array_list.append((im, np.asarray(im.shape, np.int32)))
image_array_file_list.append(filename)
else:
meta_disqualified_list.append(video_meta_df[video_meta_df.filename == filename].\
assign(err=[error]))
print('Skipping this one.')
if len(meta_disqualified_list) > 0:
echo_df_disqualified = pd.concat(meta_disqualified_list, ignore_index=True)
# Save disqualified metadata
print(f'Found {echo_df_disqualified.shape[0]} of {len(file_list)} disqualified videos.')
disqualified_filename = os.path.basename(video_meta_file_name).split('.')[0] + '_disqualified.parquet'
echo_df_disqualified.to_parquet(os.path.join(cfr_project_dir, disqualified_filename))
print(f'Loaded {len(image_array_list)} of {len(file_list)} videos into memory.')
# -
image_array_list[0]
# +
# Features for the model
meta_date = '200617'
cfr_project_dir = os.path.normpath('/mnt/obi0/andreas/data/cfr_AZ')
model_dir = os.path.join(cfr_project_dir, 'models')
feature_dict_name = 'feature_dict_' + 'tfr_' + meta_date+'.pkl'
feature_dict_file = os.path.join(model_dir, feature_dict_name)
with open(feature_dict_file, 'rb') as fl:
feature_dict = pickle.load(fl)
model_dir = os.path.join(cfr_project_dir, 'models')
checkpoint_list = 'cfr_correlations_bestmodels_30FPS.parquet'
checkpoint_df = pd.read_parquet(os.path.join(cfr_project_dir, 'models', checkpoint_list))
model_list = sorted(list(checkpoint_df.model_name.unique()))
# Model hyperparameters and output name
model_name = model_list[0]
model_dict_file_name = model_name + '_model_dict.pkl'
model_dict_file = os.path.join(model_dir, model_dict_file_name)
with open(model_dict_file, 'rb') as fl:
model_dict = pickle.load(fl)
model_output = model_dict['model_output']
# -
def get_im_generator(im_array_list):
""" Yield successive images from list """
def im_generator():
for element in im_array_list:
yield (element[0], element[1])
return im_generator
batch_size = 8
im_generator = get_im_generator(image_array_list)
dsp = DatasetProvider(augment=False,
im_scale_factor=model_dict['im_scale_factor'],
feature_dict=feature_dict)
dset = tf.data.Dataset.from_generator(generator=im_generator,
output_types=(tf.int32, tf.int32),
output_shapes=(tf.TensorShape([None, None, model_dict['n_frames']]),
tf.TensorShape([3])))
dset = dset.map(dsp._process_image)
dset = dset.map(lambda x: ({'video': x}, {'score_output': 0}))
dset = dset.batch(batch_size=batch_size, drop_remainder=False).repeat(count=1)
for out in dset.take(1):
output = out
image_batch = output[0]['video'].numpy()
image_batch.shape
# +
# Output batch is a list of dictionaries [INPUT, OUTPUT]:
image_batch = output[0]['video'].numpy()
print('Video batch shape:', image_batch.shape)
# Show some images from the batch
#sample_idx = np.random.randint(batch_size, size=6)
sample_idx = np.arange(0, 6)
n_frames = 40
frame_idx = np.random.randint(n_frames, size=3)
fig, ax = plt.subplots(nrows=len(sample_idx), ncols=len(frame_idx), figsize = (16, 32))
for row, sample in enumerate(sample_idx):
for col, frame in enumerate(frame_idx):
image = np.squeeze(image_batch[sample, frame, :, :, 0])
ax[row, col].imshow(image, cmap = 'gray')
plt.show()
# -
| notebooks/AZ_project/cfr_AZ_tf_output.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from sepal_ui import sepalwidgets as sw
import ipyvuetify as v
from utils import messages as ms
# +
#use a class to define your input and output in order to have mutable variables
class Mspa_process_io:
def __init__(self):
#set up your inputs
self.foreground_connectivity = None
self.edge_width = None
self.transition_core = True
self.separate_feature = True
self.statistics = True
mp_io = Mspa_process_io()
# -
#create an empty result tile that will be filled with displayable plot, map, links, text
id_ = "result_widget"
title = "Results"
mp_results = sw.Tile(id_, title, inputs=[ms.MSPA_NO_RESULTS])
# +
#create the output alert
mp_output = sw.Alert().add_msg(ms.MSPA_MESSAGE)
#create the button that start your process
mp_btn = sw.Btn(ms.MSPA_BTN)
#create the widgets following ipyvuetify requirements
mp_connectivity = v.Select(items=[4,8], label='Foreground connectivity', v_model=None)
mp_edge = v.Slider(label= 'Edge width (px)', max=100, class_="mt-5", thumb_label='always', v_model=0)
mp_transition = v.Switch(label="Transition Core - Loop/Bridge", v_model=True)
mp_feature = v.Switch(label="Separate internal from external features", v_model=True)
mp_statistics = v.Switch(label="Compute statistics", v_model=True)
mp_inputs = [
mp_connectivity,
mp_edge,
mp_transition,
mp_feature,
mp_statistics
]
#bind the widget to the inputs
mp_output.bind(mp_connectivity, mp_io, 'foreground_connectivity')
mp_output.bind(mp_edge, mp_io, 'edge_width')
mp_output.bind(mp_transition, mp_io, 'transition_core')
mp_output.bind(mp_feature, mp_io, 'separate_feature')
mp_output.bind(mp_statistics, mp_io, 'statistics')
#create a process tile
id_ = "input_widget"
title = 'Create fragmentation map'
mp_widget = sw.Tile(
id_,
title,
btn=mp_btn,
inputs=mp_inputs,
output=mp_output
)
# +
#bind the button to the process by writing a custom function
from scripts import run
from functools import partial
def process_start(widget, event, data, output, mp_results):
#toggle the loading button
widget.toggle_loading()
###################################
### load inputs ###
###################################
bin_map = getattr(bi_io, 'bin_map')
params = [
str(getattr(mp_io, 'foreground_connectivity')),
str(getattr(mp_io,'edge_width')),
str(int(getattr(mp_io, 'transition_core'))),
str(int(getattr(mp_io, 'separate_feature'))),
str(int(getattr(mp_io, 'statistics'))),
]
#check inputs
if not output.check_input(bin_map, ms.NO_MAP): return widget.toggle_loading()
for param in params:
if not output.check_input(param, ms.NO_INPUT):
return widget.toggle_loading()
#launch any process you want
try:
output_layout = run.mspa_analysis(
bin_map = bin_map,
params = params,
output = output
)
#display the image
mp_results.children[0].children = [mp_results.children[0].children[0]] + output_layout
except Exception as e:
output.add_live_msg(str(e), 'error')
#toggle the loading button
widget.toggle_loading()
mp_btn.on_event('click', partial(
process_start,
output=mp_output,
mp_results=mp_results
))
# -
mp_widget
mp_results
| mspa_ui.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
# # Classification
#
# *Supervised* machine learning techniques involve training a model to operate on a set of *features* and predict a *label* using a dataset that includes some already-known label values. You can think of this function like this, in which ***y*** represents the label we want to predict and ***X*** represents the vector of features the model uses to predict it.
#
# $$y = f([x_1, x_2, x_3, ...])$$
#
#
# *Classification* is a form of supervised machine learning in which you train a model to use the features (the ***x*** values in our function) to predict a label (***y***) that calculates the probability of the observed case belonging to each of a number of possible classes, and predicting an appropriate label. The simplest form of classification is *binary* classification, in which the label is 0 or 1, representing one of two classes; for example, "True" or "False"; "Internal" or "External"; "Profitable" or "Non-Profitable"; and so on.
# ## Binary Classification
#
# Let's start by looking at an example of *binary classification*, where the model must predict a label that belongs to one of two classes. In this exercsie, we'll train a binary classifier to predict whether or not a patient should be tested for diabetes based on some medical data.
#
# ### Explore the data
#
# Run the following cell to load a CSV file of patent data into a **Pandas** dataframe:
#
# > **Citation**: The diabetes dataset used in this exercise is based on data originally collected by the National Institute of Diabetes and Digestive and Kidney Diseases.
# +
import pandas as pd
# load the training dataset
diabetes = pd.read_csv('data/diabetes.csv')
diabetes.head()
# -
# This data consists of diagnostic information about some patients who have been tested for diabetes. Scroll to the right if necessary, and note that the final column in the dataset (**Diabetic**) contains the value ***0*** for patients who tested negative for diabetes, and ***1*** for patients who tested positive. This is the label that we will train our mode to predict; most of the other columns (**Pregnancies**,**PlasmaGlucose**,**DiastolicBloodPressure**, and so on) are the features we will use to predict the **Diabetic** label.
#
# Let's separate the features from the labels - we'll call the features ***X*** and the label ***y***:
# + tags=[]
# Separate features and labels
features = ['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']
label = 'Diabetic'
X, y = diabetes[features].values, diabetes[label].values
for n in range(0,4):
print("Patient", str(n+1), "\n Features:",list(X[n]), "\n Label:", y[n])
# -
# Now let's compare the feature distributions for each label value.
# +
from matplotlib import pyplot as plt
# %matplotlib inline
features = ['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']
for col in features:
diabetes.boxplot(column=col, by='Diabetic', figsize=(6,6))
plt.title(col)
plt.show()
# -
# For some of the features, there's a noticable difference in the distribution for each label value. In particular, **Pregnancies** and **Age** show markedly different distributions for diabetic patients than for non-diabetic patients. These features may help predict whether or not a patient is diabetic.
#
# ### Split the data
#
# Our dataset includes known values for the label, so we can use this to train a classifier so that it finds a statistical relationship between the features and the label value; but how will we know if our model is any good? How do we know it will predict correctly when we use it with new data that it wasn't trained with? Well, we can take advantage of the fact we have a large dataset with known label values, use only some of it to train the model, and hold back some to test the trained model - enabling us to compare the predicted labels with the already known labels in the test set.
#
# In Python, the **scikit-learn** package contains a large number of functions we can use to build a machine learning model - including a **train_test_split** function that ensures we get a statistically random split of training and test data. We'll use that to split the data into 70% for training and hold back 30% for testing.
# + tags=[]
from sklearn.model_selection import train_test_split
# Split data 70%-30% into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
print ('Training cases: %d\nTest cases: %d' % (X_train.size, X_test.size))
# -
# ### Train and Evaluate a Binary Classification Model
# OK, now we're ready to train our model by fitting the training features (**X_train**) to the training labels (**y_train**). There are various algorithms we can use to train the model. In this example, we'll use *Logistic Regression*, which (despite its name) is a well-established algorithm for classification. In addition to the training features and labels, we'll need to set a *regularization* parameter. This is used to counteract any bias in the sample, and help the model generalize well by avoiding *overfitting* the model to the training data.
#
# > **Note**: Parameters for machine learning algorithms are generally referred to as *hyperparameters* (to a data scientist, *parameters* are values in the data itself - *hyperparameters* are defined externally from the data!)
# + tags=[]
# Train the model
from sklearn.linear_model import LogisticRegression
# Set regularization rate
reg = 0.01
# train a logistic regression model on the training set
model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train)
print (model)
# -
# Now we've trained the model using the training data, we can use the test data we held back to evaluate how well it predicts. Again, **scikit-learn** can help us do this. Let's start by using the model to predict labels for our test set, and compare the predicted labels to the known labels:
# + tags=[]
predictions = model.predict(X_test)
print('Predicted labels: ', predictions)
print('Actual labels: ' ,y_test)
# -
# The arrays of labels are too long to be displayed in the notebook output, so we can only compare a few values. Even if we printed out all of the predicted and actual labels, there are too many of them to make this a sensible way to evaluate the model. Fortunately, **scikit-learn** has a few more tricks up its sleeve, and it provides some metrics that we can use to evaluate the model.
#
# The most obvious thing you might want to do is to check the *accuracy* of the predictions - in simple terms, what proportion of the labels did the model predict correctly?
# + tags=[]
from sklearn.metrics import accuracy_score
print('Accuracy: ', accuracy_score(y_test, predictions))
# -
# The accuracy is returned as a decimal value - a value of 1.0 would mean that the model got 100% of the predictions right; while an accuracy of 0.0 is, well, pretty useless!
#
# Accuracy seems like a sensible metric to evaluate (and to a certain extent it is), but you need to be careful about drawing too many conclusions from the accuracy of a classifier. Remember that it's simply a measure of how many cases were predicted correctly. Suppose only 3% of the population is diabetic. You could create a classifier that always just predicts 0, and it would be 97% accurate - but not terribly helpful in identifying patients with diabetes!
#
# Fortunately, there are some other metrics that reveal a little more about how our model is performing. Scikit-Learn includes the ability to create a *classification report* that provides more insight than raw accuracy alone.
# + tags=[]
from sklearn. metrics import classification_report
print(classification_report(y_test, predictions))
# -
# The classification report includes the following metrics for each class (0 and 1)
#
# > note that the header row may not line up with the values!
#
# * *Precision*: Of the predictons the model made for this class, what proportion were correct?
# * *Recall*: Out of all of the instances of this class in the test dataset, how many did the model identify?
# * *F1-Score*: An average metric that takes both precision and recall into account.
# * *Support*: How many instances of this class are there in the test dataset?
#
# The classification report also includes averages for these metrics, including a weighted average that allows for the imbalance in the number of cases of each class.
#
# Because this is a *binary* classification problem, the ***1*** class is considered *positive* and its precision and recall are particularly interesting - these in effect answer the questions:
#
# - Of all the patients the model predicted are diabetic, how many are actually diabetic?
# - Of all the ptients that are actually diabetic, how many did the model identify?
#
# You can retrieve these values on their own by using the **precision_score** and **recall_score** metrics in scikit-learn (which by default assume a binary classification model).
# + tags=[]
from sklearn.metrics import precision_score, recall_score
print("Overall Precision:",precision_score(y_test, predictions))
print("Overall Recall:",recall_score(y_test, predictions))
# -
# The precision and recall metrics are derived from four possible prediction outcomes:
# * *True Positives*: The predicted label and the actual label are both 1.
# * *False Positives*: The predicted label is 1, but the actual label is 0.
# * *False Negatives*: The predicted label is 0, but the actual label is 1.
# * *True Negatives*: The predicted label and the actual label are both 0.
#
# These metrics are generally tabulated for the test set and shown together as a *confusion matrix*, which takes the following form:
#
# <table style="border: 1px solid black;">
# <tr style="border: 1px solid black;">
# <td style="border: 1px solid black;color: black;" bgcolor="lightgray">TN</td><td style="border: 1px solid black;color: black;" bgcolor="white">FP</td>
# </tr>
# <tr style="border: 1px solid black;">
# <td style="border: 1px solid black;color: black;" bgcolor="white">FN</td><td style="border: 1px solid black;color: black;" bgcolor="lightgray">TP</td>
# </tr>
# </table>
#
# Note that the correct (*true*) predictions form a diagonal line from top left to bottom right - these figures should be significantly higher than the *false* predictions if the model is any good.
#
# In Python, you can use the **sklearn.metrics.confusion_matrix** function to find these values for a trained classifier:
# + tags=[]
from sklearn.metrics import confusion_matrix
# Print the confusion matrix
cm = confusion_matrix(y_test, predictions)
print (cm)
# -
# Until now, we've considered the predictions from the model as being either 1 or 0 class labels. Actually, things are a little more complex than that. Statistical machine learning algorithms, like logistic regression, are based on *probability*; so what actually gets predicted by a binary classifier is the probability that the label is true (**P(y)**) and the probability that the label is false (1 - **P(y)**). A threshold value of 0.5 is used to decide whether the predicted label is a 1 (*P(y) > 0.5*) or a 0 (*P(y) <= 0.5*). You can use the **predict_proba** method to see the probability pairs for each case:
# + tags=[]
y_scores = model.predict_proba(X_test)
print(y_scores)
# -
# The decision to score a prediction as a 1 or a 0 depends on the threshold to which the predicted probabilties are compared. If we were to change the threshold, it would affect the predictions; and therefore change the metrics in the confusion matrix. A common way to evaluate a classifier is to examine the *true positive rate* (which is another name for recall) and the *false positive rate* for a range of possible thresholds. These rates are then plotted against all possible thresholds to form a chart known as a *received operator characteristic (ROC) chart*, like this:
# +
from sklearn.metrics import roc_curve
from sklearn.metrics import confusion_matrix
import matplotlib
import matplotlib.pyplot as plt
# %matplotlib inline
# calculate ROC curve
fpr, tpr, thresholds = roc_curve(y_test, y_scores[:,1])
# plot ROC curve
fig = plt.figure(figsize=(6, 6))
# Plot the diagonal 50% line
plt.plot([0, 1], [0, 1], 'k--')
# Plot the FPR and TPR achieved by our model
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.show()
# -
# The ROC chart shows the curve of the true and false positive rates for different threshold values between 0 and 1. A perfect classifier would have a curve that goes straight up the left side and straight across the top. The diagonal line across the chart represents the probability of predicting correctly with a 50/50 random prediction; so you obviously want the curve to be higher than that (or your model is no better than simply guessing!).
#
# The area under the curve (AUC) is a value between 0 and 1 that quantifies the overall performance of the model. The closer to 1 this value is, the better the model. Once again, scikit-Learn includes a function to calculate this metric.
# + tags=[]
from sklearn.metrics import roc_auc_score
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
# -
# ### Perform preprocessing in a pipeline
#
# In this case, the ROC curve and its AUC indicate that the model performs better than a random guess which is not bad considering we performed very little preprocessing of the data.
#
# In practice, it's common to perform some preprocessing of the data to make it easier for the algorithm to fit a model to it. There's a huge range of preprocessing transformations you can perform to get your data ready for modeling, but we'll limit ourselves to a few common techniques:
#
# - Scaling numeric features so they're on the same scale. This prevents feaures with large values from producing coefficients that disproportionately affect the predictions.
# - Encoding categorical variables. For example, by using a *one hot encoding* technique you can create individual binary (true/false) features for each possible category value.
#
# To apply these preprocessing transformations, we'll make use of a Scikit-Learn feature named *pipelines*. These enable us to define a set of preprocessing steps that end with an algorithm. You can then fit the entire pipeline to the data, so that the model encapsulates all of the preprocessing steps as well as the regression algorithm. This is useful, because when we want to use the model to predict values from new data, we need to apply the same transformations (based on the same statistical distributions and catagory encodings used with the training data).
#
# >**Note**: The term *pipeline* is used extensively in machine learning, often to mean very different things! In this context, we're using it to refer to pipeline objects in Scikit-Learn, but you may see it used elsewhere to mean something else.
#
# + tags=[]
# Train the model
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.linear_model import LogisticRegression
import numpy as np
# Define preprocessing for numeric columns (normalize them so they're on the same scale)
numeric_features = [0,1,2,3,4,5,6]
numeric_transformer = Pipeline(steps=[
('scaler', StandardScaler())])
# Define preprocessing for categorical features (encode the Age column)
categorical_features = [7]
categorical_transformer = Pipeline(steps=[
('onehot', OneHotEncoder(handle_unknown='ignore'))])
# Combine preprocessing steps
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
# Create preprocessing and training pipeline
pipeline = Pipeline(steps=[('preprocessor', preprocessor),
('logregressor', LogisticRegression(C=1/reg, solver="liblinear"))])
# fit the pipeline to train a logistic regression model on the training set
model = pipeline.fit(X_train, (y_train))
print (model)
# -
# The pipeline encapsulates the preprocessing steps as well as model training.
#
# Let's use the model trained by this pipeline to predict labels for our test set, and compare the performance metrics with the basic model we created previously.
# + tags=[]
# Get predictions from test data
predictions = model.predict(X_test)
# Get evaluation metrics
cm = confusion_matrix(y_test, predictions)
print ('Confusion Matrix:\n',cm, '\n')
print('Accuracy:', accuracy_score(y_test, predictions))
print("Overall Precision:",precision_score(y_test, predictions))
print("Overall Recall:",recall_score(y_test, predictions))
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
# calculate ROC curve
y_scores = model.predict_proba(X_test)
fpr, tpr, thresholds = roc_curve(y_test, y_scores[:,1])
# plot ROC curve
fig = plt.figure(figsize=(6, 6))
# Plot the diagonal 50% line
plt.plot([0, 1], [0, 1], 'k--')
# Plot the FPR and TPR achieved by our model
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.show()
# -
# The results look a little better, so clearly preprocessing the data has made a difference.
#
# ### Try a different algorithm
#
# Now let's try a different algorithm. Previously we used a logistic regression algorithm, which is a *linear* algorithm. There are many kinds of classification algorithm we could try, including:
#
# - **Support Vector Machine algorithms**: Algorithms that define a *hyperplane* that separates classes.
# - **Tree-based algorithms**: Algorithms that build a decision tree to reach a prediction
# - **Ensemble algorithms**: Algorithms that combine the outputs of multiple base algorithms to improve generalizability.
#
# This time, We'll use the same preprocessing steps as before, but we'll train the model using an *ensemble* algorithm named *Random Forest* that combines the outputs of multiple random decision trees (for more details, see the [Scikit-Learn documentation](https://scikit-learn.org/stable/modules/ensemble.html#forests-of-randomized-trees)).
# + tags=[]
from sklearn.ensemble import RandomForestClassifier
# Create preprocessing and training pipeline
pipeline = Pipeline(steps=[('preprocessor', preprocessor),
('logregressor', RandomForestClassifier(n_estimators=100))])
# fit the pipeline to train a random forest model on the training set
model = pipeline.fit(X_train, (y_train))
print (model)
# -
# Let's look at the performance metrics for the new model.
# + tags=[]
predictions = model.predict(X_test)
cm = confusion_matrix(y_test, predictions)
print ('Confusion Matrix:\n',cm, '\n')
print('Accuracy:', accuracy_score(y_test, predictions))
print("Overall Precision:",precision_score(y_test, predictions))
print("Overall Recall:",recall_score(y_test, predictions))
auc = roc_auc_score(y_test,y_scores[:,1])
print('\nAUC: ' + str(auc))
# calculate ROC curve
y_scores = model.predict_proba(X_test)
fpr, tpr, thresholds = roc_curve(y_test, y_scores[:,1])
# plot ROC curve
fig = plt.figure(figsize=(6, 6))
# Plot the diagonal 50% line
plt.plot([0, 1], [0, 1], 'k--')
# Plot the FPR and TPR achieved by our model
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.show()
# -
# That looks better!
#
# ### Use the Model for Inferencing
# Now that we have a reasonably useful trained model, we can save it for use later to predict labels for new data:
# +
import joblib
# Save the model as a pickle file
filename = './models/diabetes_model.pkl'
joblib.dump(model, filename)
# -
# When we have some new observations for which the label is unknown, we can load the model and use it to predict values for the unknown label:
# + tags=[]
# Load the model from the file
model = joblib.load(filename)
# predict on a new sample
# The model accepts an array of feature arrays (so you can predict the classes of multiple patients in a single call)
# We'll create an array with a single array of features, representing one patient
X_new = np.array([[2,180,74,24,21,23.9091702,1.488172308,22]])
print ('New sample: {}'.format(list(X_new[0])))
# Get a prediction
pred = model.predict(X_new)
# The model returns an array of predictions - one for each set of features submitted
# In our case, we only submitted one patient, so our prediction is the first one in the resulting array.
print('Predicted class is {}'.format(pred[0]))
# -
# ## Multiclass Classification
#
# Binary classification techniques work well when the data observations belong to one of two classes or categories, such as "True" or "False". When the data can be categorized into more than two classes, you must use a multiclass classification algorithm.
#
# Fortunately, in most machine learning frameworks, including scikit-learn, implementing a multiclass classifier is not significantly more complex than binary classification - and in many cases, the classification algorithm classes used for binary classification implicitly support multiclass classification.
#
# ### Explore the data
#
# Let's start by examining a dataset that contains observations of multiple classes. We'll use a dataset that contains observations of three different species of penguin.
#
# > **Citation**: The penguins dataset used in the this exercise is a subset of data collected and made available by [Dr. Kristen
<NAME>](https://www.uaf.edu/cfos/people/faculty/detail/kristen-gorman.php)
# and the [Palmer Station, Antarctica LTER](https://pal.lternet.edu/), a
# member of the [Long Term Ecological Research
# Network](https://lternet.edu/).
# +
import pandas as pd
# load the training dataset
penguins = pd.read_csv('data/penguins.csv')
# Display a random sample of 10 observations
sample = penguins.sample(10)
sample
# -
# The dataset contains the following columns:
# * **CulmenLength**: The length in mm of the penguin's culmen (bill).
# * **CulmenDepth**: The depth in mm of the penguin's culmen.
# * **FlipperLength**: The length in mm of the penguin's flipper.
# * **BodyMass**: The body mass of the penguin in grams.
# * **Species**: An integer value that represents the species of the penguin.
#
# The **Species** column is the label we want to train a model to predict. The dataset includes three possible species, which are encoded as 0, 1, and 2. The actual species names are revealed by the code below:
# + tags=[]
penguin_classes = ['Amelie', 'Gentoo', 'Chinstrap']
print(sample.columns[0:5].values, 'SpeciesName')
for index, row in penguins.sample(10).iterrows():
print('[',row[0], row[1], row[2], row[3], int(row[4]),']',penguin_classes[int(row[4])])
# -
# Now that we know what the feaures and labels in the data represent, let's explore the dataset. First, let's see if there are any missing (*null*) values.
# + tags=[]
# Count the number of null values for each column
penguins.isnull().sum()
# -
# It looks like there are some missing feature values, but no missing labels. Let's dig a little deeper and see the rows that contain nulls.
# + tags=[]
# Show rows containing nulls
penguins[penguins.isnull().any(axis=1)]
# -
# There are two rows that contain no feature values at all (*NaN* stands for "not a number"), so these won't be useful in training a model. Let's discard them from the dataset.
# Drop rows containing NaN values
penguins=penguins.dropna()
#Confirm there are now no nulls
penguins.isnull().sum()
# Now that we've dealt with the missing values, let's explore how the features relate to the label by creating some box charts.
# +
from matplotlib import pyplot as plt
# %matplotlib inline
penguin_features = ['CulmenLength','CulmenDepth','FlipperLength','BodyMass']
penguin_label = 'Species'
for col in penguin_features:
penguins.boxplot(column=col, by=penguin_label, figsize=(6,6))
plt.title(col)
plt.show()
# -
# From the box plots, it looks like species 0 and 2 (Amelie and Chinstrap) have similar data profiles for culmen depth, flipper length, and body mass, but Chinstraps tend to have longer culmens. Species 1 (Gentoo) tends to have fairly clearly differentiated features from the others; which should help us train a good classification model.
#
# ### Prepare the data
#
# Just as for binary classification, before training the model, we need to separate the features and label, and then split the data into subsets for training and validation. We'll also apply a *stratification* technique when splitting the data to maintain the proportion of each label value in the training and validation datasets.
# + tags=[]
from sklearn.model_selection import train_test_split
# Separate features and labels
penguins_X, penguins_y = penguins[penguin_features].values, penguins[penguin_label].values
# Split data 70%-30% into training set and test set
x_penguin_train, x_penguin_test, y_penguin_train, y_penguin_test = train_test_split(penguins_X, penguins_y,
test_size=0.30,
random_state=0,
stratify=penguins_y)
print ('Training Set: %d, Test Set: %d \n' % (x_penguin_train.size, x_penguin_test.size))
# -
# ### Train and evaluate a multiclass classifier
#
# Now that we have a set of training features and corresponding training labels, we can fit a multiclass classification algorithm to the data to create a model. Most scikit-learn classification algorithms inherently supports multiclass classification. We'll try a logistic regression algorithm.
# + tags=[]
from sklearn.linear_model import LogisticRegression
# Set regularization rate
reg = 0.1
# train a logistic regression model on the training set
multi_model = LogisticRegression(C=1/reg, solver='lbfgs', multi_class='auto', max_iter=10000).fit(x_penguin_train, y_penguin_train)
print (multi_model)
# -
# Now we can use the trained model to predict the labels for the test features, and compare the predicted labels to the actual labels:
# + tags=[]
penguin_predictions = multi_model.predict(x_penguin_test)
print('Predicted labels: ', penguin_predictions[:15])
print('Actual labels : ' ,y_penguin_test[:15])
# -
# Let's look at a classification report.
# + tags=[]
from sklearn. metrics import classification_report
print(classification_report(y_penguin_test, penguin_predictions))
# -
# As with binary classification, the report includes *precision* and *recall* metrics for each class. However, while with binary classification we could focus on the scores for the *positive* class; in this case, there are multiple classes so we need to look at an overall metric (either the macro or weighted average) to get a sense of how well the model performs across all three classes.
#
# You can get the overall metrics separately from the report using the scikit-learn metrics score classes, but with multiclass results you must specify which average metric you want to use for precision and recall.
# + tags=[]
from sklearn.metrics import accuracy_score, precision_score, recall_score
print("Overall Accuracy:",accuracy_score(y_penguin_test, penguin_predictions))
print("Overall Precision:",precision_score(y_penguin_test, penguin_predictions, average='macro'))
print("Overall Recall:",recall_score(y_penguin_test, penguin_predictions, average='macro'))
# -
# Now let's look at the confusion matrix for our model:
# + tags=[]
from sklearn.metrics import confusion_matrix
# Print the confusion matrix
mcm = confusion_matrix(y_penguin_test, penguin_predictions)
print(mcm)
# -
# The confusion matrix shows the intersection of predicted and actual label values for each class - in simple terms, the diagonal intersections from top-left to bottom-right indicate the number of correct predictions.
#
# When dealing with multiple classes, it's generally more intuitive to visualize this as a heat map, like this:
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
plt.imshow(mcm, interpolation="nearest", cmap=plt.cm.Blues)
plt.colorbar()
tick_marks = np.arange(len(penguin_classes))
plt.xticks(tick_marks, penguin_classes, rotation=45)
plt.yticks(tick_marks, penguin_classes)
plt.xlabel("Actual Species")
plt.ylabel("Predicted Species")
plt.show()
# -
# The darker squares in the confusion matrix plot indicate high numbers of cases, and you can hopefully see a diagonal line of darker squares indicating cases where the predicted and actual label are the same.
#
# ### Preprocess data in a pipeline
#
# Again, just like with binary classification, you can use a pipeline to apply preprocessing steps to the data before fitting it to an algorithm to train a model. Let's see if we can improve the penguin predictor by scaling the numeric features in a transformation steps before training. We'll also try a different algorithm (a support vector machine), just to show that we can!
# + tags=[]
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
# Define preprocessing for numeric columns (scale them)
feature_columns = [0,1,2,3]
feature_transformer = Pipeline(steps=[
('scaler', StandardScaler())
])
# Create preprocessing steps
preprocessor = ColumnTransformer(
transformers=[
('preprocess', feature_transformer, feature_columns)])
# Create training pipeline
pipeline = Pipeline(steps=[('preprocessor', preprocessor),
('regressor', SVC())])
# fit the pipeline to train a linear regression model on the training set
multi_model = pipeline.fit(x_penguin_train, y_penguin_train)
print (multi_model)
# -
# Now we can evaluate the new model.
# + tags=[]
# Get predictions from test data
penguin_predictions = multi_model.predict(x_penguin_test)
# Overall metrics
print("Overall Accuracy:",accuracy_score(y_penguin_test, penguin_predictions))
print("Overall Precision:",precision_score(y_penguin_test, penguin_predictions, average='macro'))
print("Overall Recall:",recall_score(y_penguin_test, penguin_predictions, average='macro'))
# Confusion matrix
plt.imshow(mcm, interpolation="nearest", cmap=plt.cm.Blues)
plt.colorbar()
tick_marks = np.arange(len(penguin_classes))
plt.xticks(tick_marks, penguin_classes, rotation=45)
plt.yticks(tick_marks, penguin_classes)
plt.xlabel("Actual Species")
plt.ylabel("Predicted Species")
plt.show()
# -
# ### Use the model with new data observations
#
# Now let's save our trained model so we can use it again later.
# +
import joblib
# Save the model as a pickle file
filename = './models/penguin_model.pkl'
joblib.dump(multi_model, filename)
# -
# OK, so now we have a trained model. Let's use it to predict the class of a new penguin observation:
# + tags=[]
# Load the model from the file
multi_model = joblib.load(filename)
# The model accepts an array of feature arrays (so you can predict the classes of multiple penguin observations in a single call)
# We'll create an array with a single array of features, representing one penguin
x_new = np.array([[50.4,15.3,224,5550]])
print ('New sample: {}'.format(x_new[0]))
# The model returns an array of predictions - one for each set of features submitted
# In our case, we only submitted one penguin, so our prediction is the first one in the resulting array.
penguin_pred = multi_model.predict(x_new)[0]
print('Predicted class is', penguin_classes[penguin_pred])
# -
# You can also submit a batch of penguin observations to the model, and get back a prediction for each one.
# + tags=[]
# This time our input is an array of two feature arrays
x_new = np.array([[49.5,18.4,195, 3600],
[38.2,20.1,190,3900]])
print ('New samples:\n{}'.format(x_new))
# Call the web service, passing the input data
predictions = multi_model.predict(x_new)
# Get the predicted classes.
for prediction in predictions:
print(prediction, '(' + penguin_classes[prediction] +')')
# -
# ## Learn More
#
# Classification is one of the most common forms of machine learning, and by following the basic principles we've discussed in this notebook you should be able to train and evaluate classification models with scikit-learn. It's worth spending some time investigating classification algorithms in more depth, and a good starting point is the [Scikit-Learn documentation](https://scikit-learn.org/stable/user_guide.html).
| 03 - Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.7 64-bit
# name: python37764bita3da3130c1a84ed9abf6da54fdeb0dc3
# ---
import json
import spacy
# load the training data
with open('~/Documents/pvr_training_data.json') as fp:
training_data = json.load(fp)
| pvr_custom_ner_training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Customer Subscription Prediction For A Bank Campaign
# <br>
#
# #### Problem Statement:
# The data is related to direct marketing campaigns of a Portuguese banking institution. Predict if
# the client will subscribe to a term deposit based on a marketing campaign.
# <br>
#
# The data is related to direct marketing campaigns of a Portuguese banking institution. The
# marketing campaigns were based on phone calls. Often, more than one contact to the same
# client was required, in order to access if the product (bank term deposit) would be ('yes') or not
# ('no') subscribed.
# +
# import libraries required to carry out data preprocessing and visualization
import pandas as pd
import numpy as np
import seaborn as sns
#sns.set(font_scale = 1)
import matplotlib.pyplot as plt
# %matplotlib inline
#plt.rcParams['font.size'] = 5
#plt.rc('xtick', labelsize=10)
#plt.rc('ytick', labelsize=10)
import time
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import LabelEncoder
# import libraries to support modelling, hyperparameter tuning
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split, GridSearchCV
# import libraries to manage imbalanced data set
from imblearn.under_sampling import ClusterCentroids,RandomUnderSampler
from imblearn.over_sampling import RandomOverSampler,SMOTE
#import sklearn classifiers
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier,GradientBoostingClassifier
#import xgboost classifier
from xgboost import XGBClassifier
#import metrics required for model evaluation
from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score,confusion_matrix,roc_auc_score
from sklearn import metrics
# -
'''
#below code was taken from stack exchange, this centers the plots in notebooks
from IPython.display import display, HTML
display(HTML("""
<style>
.output {
display: flex;
align-items: center;
text-align: center;
}
</style>
"""))
'''
#the above code places all the plots in the center but it distorts dataframe head output
#read the data set, used separator as ';' instead of default ','
df = pd.read_csv('./data/bank-additional-full.csv',sep=';')
#peek top 5 rows of data
df.head(10)
#this dataset doesnt contain null values rather nulls are replaced by 'unknown' labels in catagorical features
print(df.isnull().any().any())
#basic overview of the data frame
df.info()
#reading discriptive stats of numerical features
df.describe().T
#reading discriptive stats for categorical column
df.describe(include='object')
#make copy of dataframe, we will process copied dataframe
cdf = df.copy()
#iterate through every catagorical column and replace 'unknown' label with mode of that column
cat_var = df.select_dtypes(include='object').columns.values.tolist()
for i in range(len(cat_var)):
cdf[cat_var[i]].replace({"unknown":cdf[cat_var[i]].mode()[0]},inplace=True)
# +
#cdf.to_csv('./data/mode-fill-bank.csv',index=False)
# -
#below functions takes data frame as an arguments and plots pretty heatmap of pearson correlations
def correlation_heatmap(df):
_ , ax = plt.subplots(figsize =(14, 12))
colormap = sns.diverging_palette(220, 10, as_cmap = True)
_ = sns.heatmap(
df.corr(),
cmap = colormap,
square=True,
cbar_kws={'shrink':.9 },
ax=ax,
annot=True,
linewidths=0.1,vmax=1.0, linecolor='white',
annot_kws={'fontsize':15 }
)
plt.title('Pearson Correlation of Features', y=1.05, size=15);
#calling function to plot heatmap
correlation_heatmap(cdf)
# ## Univariate plots
#the dataset has high imbalance in target variable which might required
#to be handled by re-sampling techniques
cdf['y'].value_counts().plot.bar(figsize=(5,5),title='customer subscribed');
#lets see the age distribution of customers who were part of the campaign
plt.hist(cdf['age'], bins = 100, edgecolor = 'k');
# ## Bivariate plots
#lets see whether customer age mattered for subscription or not
plt.hist(x = [cdf[cdf['y']=='yes']['age'], cdf[cdf['y']=='no']['age']],
bins=20, color = ['g','r'],stacked=True,label = ['yes','no'])
plt.title('age Histogram by subscribed')
plt.xlabel('age')
plt.ylabel('# of subscription')
plt.legend()
# We can see that although target is highly imbalanced, customers who are below 25 and above 60
# have higher subscription rate.
# +
#lets see if call duration affects subscription rate
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.hist(x = [cdf[cdf['y']=='yes']['duration'], cdf[cdf['y']=='no']['duration']],bins=50, color = ['g','r'],stacked=True,label = ['yes','no'])
plt.title('duration Histogram by subscribed')
plt.xlabel('duration')
plt.ylabel('# of subscription')
plt.legend()
plt.subplot(122)
plt.hist(x = [cdf[cdf['y']=='yes']['duration'], cdf[cdf['y']=='no']['duration']],bins=50, color = ['g','r'],stacked=True,label = ['yes','no'])
plt.title('duration Histogram by subscribed')
plt.xlabel('duration (seconds)')
plt.ylabel('# of subscription')
plt.ylim(0,5000)
plt.xlim(500,4000)
plt.legend()
# -
# In second plot we can clearly see that as call duration increases there are high chances of success,
# although note that we cannot use duration feature in modelling because at the time of prediction we dont know for how long customer will be on the call, we have to decide apriori.
#pdays- number of days that passed by after the client was last contacted from a previous campaign
#pdays=999 means the client was not contacted in previous campaign
plt.hist(x = [cdf[cdf['y']=='yes']['pdays'], cdf[cdf['y']=='no']['pdays']],bins=5, color = ['g','r'],stacked=False,label = ['yes','no'])
plt.title('pdays Histogram by subscribed')
plt.xlabel('pdays')
plt.ylabel('# of subscription')
plt.legend()
# Even though our target data is imbalanced we can see that for lower pdays we have higher success rates.
# To plot target against catagorical variables we will need to encode it.
le = LabelEncoder()
cdf['y']=le.fit_transform(cdf['y'])
#Does education matters for subscription, lets see.
plt.figure(figsize=(15,5))
plt.subplot(211)
sns.barplot(x='education', y = 'y', data=cdf)
plt.subplot(212)
sns.countplot(x='education',data=cdf)
# Even though we see illiterate have higher pecentage of 'yes' , as per countplot there are not many people in that category. all other categories have similar subscription rate.
#Does job matters for subscription?
plt.figure(figsize=(15,7))
plt.subplot(211)
sns.barplot(x='job', y = 'y', data=cdf)
plt.subplot(212)
sns.countplot(x='job',data=cdf)
# From the above plot we can see subscription rate varies for different job catagories.
#poutcome- outcome of the previous marketing campaign (categorical: "failure","nonexistent","success")
plt.figure(figsize=(10,4))
plt.subplot(121)
sns.barplot(x='poutcome', y = 'y', data=cdf)
plt.subplot(122)
sns.countplot('poutcome', data=cdf)
# Above plot shows if previous campaign was success for a particular client then there is high chance of client subscribing for term deposit. We can see there are many people that were not part of earlier
# campaign.
#how does month affects subsciption rate?
plt.figure(figsize=(10,6))
plt.subplot(211)
sns.barplot(x='month', y = 'y', data=cdf)
plt.subplot(212)
sns.countplot(x='month', data=cdf)
# We can see highest activity month for bank is May. Month of October, December, March and September have
# higher subscription rates.
#lets see how social and economic context attribute matters
plt.figure(figsize=(10,10))
plt.subplot(221)
sns.boxplot(y='euribor3m',x='y',data=cdf)
plt.subplot(222)
sns.boxplot(y='emp.var.rate',x='y',data=cdf)
plt.subplot(223)
sns.boxplot(y='cons.price.idx',x='y',data=cdf)
plt.subplot(224)
sns.boxplot(y='cons.conf.idx',x='y',data=cdf)
# Above boxplots shows that euribor3m and emp.var.rate does affect subscription rate. cons.price.idx and cons.conf.idx have almost same median for positive and negative class.
# ## Multi-variate plots
#how does marital status and education together affects subscription rate
plt.figure(figsize=(10,6))
sns.barplot(x='marital', y = 'y', hue='education',data=cdf)
# We can see that married and divorced people with different education have different subscription rate,
# unlike single which has almost similar subscription rates for different education background.
# Since some algorithms cannot work with categorical variables as is, it is best practise to encode categorical variables.
# +
edf = cdf.copy()
#create a list of categorical features
cat_var = edf.select_dtypes(include='object').columns.values.tolist()
le = LabelEncoder()
for i in range(0,len(cat_var)):
edf[cat_var[i]]=le.fit_transform(cdf[cat_var[i]])
#We will drop duration feature at it cannot be used while building the model
edf.drop('duration',axis=1,inplace=True);
# +
#create a list of machine learning classification algorithms to apply
MLA = [LogisticRegression(),RandomForestClassifier(),AdaBoostClassifier(),
GradientBoostingClassifier(),XGBClassifier()]
#create table to compare MLA metrics
MLA_columns = ['MLA Name', 'F1-score', 'AUC score']
MLA_compare = pd.DataFrame(columns = MLA_columns)
#get the target feature
y=edf.iloc[:,-1]
#we will do stratified split with 70% train and 30%test data
X_train,X_test,y_train,y_test = train_test_split(edf.iloc[:,:-1],y,
test_size=0.3,random_state=42,stratify=y)
row_index = 0
start = time.perf_counter()
for alg in MLA:
#set algorithm name
MLA_name = alg.__class__.__name__
MLA_compare.loc[row_index, 'MLA Name'] = MLA_name
#fit the model and do the predictions of test data
alg.fit(X_train,y_train)
y_pred = alg.predict(X_test)
#get the F1-score and AUC score and insert into the table
MLA_compare.loc[row_index, 'F1-score'] = f1_score(y_test,y_pred)
MLA_compare.loc[row_index, 'AUC score'] = roc_auc_score(y_test,y_pred)
row_index+=1
end = time.perf_counter()
print('total run time in seconds %0.2f'%(end - start));
# +
#below code plots pretty looking confusion matrix, code was taken from one of the kaggle karnel
import itertools
def plot_confusion_matrix(cm, classes,normalize=False,title='Confusion matrix',cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
#lets apply default random forest model and see if the above function works fine
alg = RandomForestClassifier()
alg.fit(X_train,y_train)
y_pred = alg.predict(X_test)
#function call to plot confusion matrix for default random forest prediction
plot_confusion_matrix(confusion_matrix(y_test,y_pred),classes=['no','yes'])
# -
MLA_compare
# As we can see in above table F1 score is not really good, we know that our dataset is imbalanced hence predictions are favoring majority class.
# Lets apply resampling techniques and evaluate the results. Remember we have to resample only the training set and not the test set.
#lets do downsampling using cluster centroid technique
cc = ClusterCentroids(random_state=0)
X_sample,y_sample = cc.fit_sample(X_train,y_train)
#rus = RandomUnderSampler(random_state=0)
#X_sample,y_sample = rus.fit_sample(X_train,y_train)
# +
#ros = RandomOverSampler(random_state=0)
#X_sample,y_sample = ros.fit_sample(X_train,y_train)
#apply default random forest and see metrics
rf = RandomForestClassifier()
rf.fit(X_sample,y_sample)
y_pred = rf.predict(X_test)
print(('roc score',roc_auc_score(y_pred,y_test)))
print(('f1 score',f1_score(y_pred,y_test)))
# +
#lets do oversampling using SMOTE
smote = SMOTE(random_state=0)
X_sample1, y_sample1 = smote.fit_sample(X_train,y_train)
rf = RandomForestClassifier()
rf.fit(X_sample1,y_sample1)
y_pred = rf.predict(X_test)
print(('roc score',roc_auc_score(y_pred,y_test)))
print(('f1 score',f1_score(y_pred,y_test)))
# -
# As you can see with oversampling evaluation metric were better compared to undersampling, so lets stick
# with oversampling. We should check to see if our default random forest works well across the folds.
kf = StratifiedKFold(n_splits=5,random_state=1,shuffle=True)
scores = cross_val_score(rf, X_sample1, y_sample1, cv=kf, scoring='f1_macro')
scores
# Since our y_sample1 is balanced we see good F1 score across the folds, the actual score on test data which will have imbalance would be much lower than this.
print('Type of X_sample is ',type(X_sample1))
print('Type of X_test is ',type(X_test))
# As you can see data types of sampled data and test data is different which creates issues with XGB classifier, so we will convert test data as ndarray.
y=y_sample1.copy()
X_test = np.array(X_test)
# +
MLA = [LogisticRegression(),RandomForestClassifier(),AdaBoostClassifier(),
GradientBoostingClassifier(),XGBClassifier()]
#create table to compare MLA metrics
MLA_columns = ['MLA Name', 'F1-score', 'AUC score']
MLA_compare = pd.DataFrame(columns = MLA_columns)
#X_train,X_test,y_train,y_test = train_test_split(X_sample,y,test_size=0.3,random_state=42,stratify=y)
row_index = 0
start = time.perf_counter()
for alg in MLA:
#set name of algorithms
MLA_name = alg.__class__.__name__
MLA_compare.loc[row_index, 'MLA Name'] = MLA_name
#fit the models and do predictions
alg.fit(X_sample1,y_sample1)
y_pred = alg.predict(X_test)
#insert the scores in table
MLA_compare.loc[row_index, 'F1-score'] = f1_score(y_test,y_pred)
MLA_compare.loc[row_index, 'AUC score'] = roc_auc_score(y_test,y_pred)
row_index+=1
end = time.perf_counter()
print('total run time in seconds %0.2f'%(end - start));
# -
MLA_compare
# Now its time to do hyperparameter tuning and see if our results get improve. I did gridsearch for all the above classifiers, since I dont have much computing resource on my local machine, I have shown individual models fitted with best parameters that I found from gridsearch.
#'n_estimators': [10, 50, 100, 300], #'learning_rate':[ 0.1, 0.2, 0.3] #'max_depth':[6,8,10]
param_grid={'max_features':['sqrt','log2',12]}
rf = RandomForestClassifier(n_estimators=100,max_depth=10)
grid_ = GridSearchCV(estimator=rf, param_grid=param_grid, scoring='f1',cv=kf)
grid_.fit(X_sample1,y_sample1)
grid_.best_score_
grid_.best_estimator_
clf3 = LogisticRegression(C=0.1)
clf3.fit(X_sample1,y_sample1)
y_pred3 = clf3.predict(X_test)
print(('roc score'),roc_auc_score(y_test,y_pred3))
print(('f1 score'),f1_score(y_test,y_pred3))
print(('accuracy score'),accuracy_score(y_test,y_pred3))
clf2 = XGBClassifier(max_depth=10,n_estimators=300)
clf2.fit(X_sample1,y_sample1)
y_pred2 = clf2.predict(X_test)
print(('roc score'),roc_auc_score(y_test,y_pred2))
print(('f1 score'),f1_score(y_test,y_pred2))
print(('accuracy score'),accuracy_score(y_test,y_pred2))
clf1 = RandomForestClassifier(n_estimators=100,max_depth=8)
clf1.fit(X_sample1,y_sample1)
y_pred1 = clf1.predict(X_test)
print(('roc score'),roc_auc_score(y_test,y_pred1))
print(('f1 score'),f1_score(y_test,y_pred1))
print(('accuracy score'),accuracy_score(y_test,y_pred1))
clf = AdaBoostClassifier(learning_rate=0.3,n_estimators=300)
clf.fit(X_sample1,y_sample1)
y_pred = clf.predict(X_test)
print(('roc score'),roc_auc_score(y_test,y_pred))
print(('f1 score'),f1_score(y_test,y_pred))
print(('accuracy score'),accuracy_score(y_test,y_pred))
# #### ROC AUC CURVE
fpr, tpr, _ = metrics.roc_curve(y_test,y_pred1)
auc = metrics.roc_auc_score(y_test,y_pred1)
plt.figure(figsize=(12,8))
plt.plot(fpr,tpr,label="validation, auc="+str(auc))
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc=4)
plt.title('ROC AUC CURVE',fontsize=15)
plt.show()
plot_confusion_matrix(confusion_matrix(y_test,y_pred1),classes=['no','yes'],normalize=False,title='Confusion matrix',cmap=plt.cm.Blues)
from sklearn import ensemble
vote_est = [('rfc', RandomForestClassifier(n_estimators=100,max_depth=8)),
('lr', LogisticRegression(C=0.1)),
('xgb', XGBClassifier(max_depth=10,n_estimators=300)),
('ada', AdaBoostClassifier(learning_rate=0.3,n_estimators=300))]
vote_hard = ensemble.VotingClassifier(estimators = vote_est , voting = 'soft')
vote_hard.fit(X_sample1,y_sample1)
y_pred = vote_hard.predict(X_test)
print(('roc score'),roc_auc_score(y_test,y_pred))
print(('f1 score'),f1_score(y_test,y_pred))
print(('accuracy score'),accuracy_score(y_test,y_pred))
# We can see that random forest classifier gave us better results after hyper parameter tuning.<br>
# In this notebook I tried to use various techniques that are usually encountered while doing classification problem. This is my first notebook on end to end problem, I hope you enjoyed it.
| greyatom-hackathon-1/bank-term-depo-subscription/final-notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Some more details on Floating point arithmetic discussed.
#
# <a href="http://www-users.math.umn.edu/~arnold/disasters/">List of disasters due to bad numerical computing - I</a>
#
# <a href="https://web.ma.utexas.edu/users/arbogast/misc/disasters.html">List of disasters due to bad numerical computing - II</a>
# $\newcommand{\magn}[1]{\lVert#1\rVert}$
# $\newcommand{\abs}[1]{\lvert#1\rvert}$
# $\newcommand{\Rb}{\mathbb{R}}$
# $\newcommand{\bkt}[1]{\left(#1\right)}$
#
# ### Condition number
#
# In almost all applications, we are interested in obtaining an output $f(x)$ for a given input $x$. However, there are inherent uncertainties in the input data $x$. These uncertainties could arise not only because of our inability to measure the input precisely but also due to the fact that numbers need not be represented exactly on the machine (as seen in the previous section). Hence, it is vital to understand how the solution to a problem gets affected by small perturbations in the input data. A given problem is said to be ***well-conditioned*** if "small" perturbations in $x$ result in "small" changes in $f(x)$. An ***ill-conditioned*** problem is one where "small" perturbations in $x$ lead to a "large" change in $f(x)$. The notion of "small" and "large" often depends on the problem and application of interest.
#
# <img src="./../images/Conditioning.png" width="200" height="40">Conditioning quantifies how small/large the change $\delta f$ in output is for a $\delta x$ perturbation of the input.</img>
#
#
# #### Absolute condition number
#
# One way to measure conditioning" of a problem is as follows. Let $\delta x$ denote a small perturbation of $x$ (the input) and let $\delta f = f(x+\delta x) - f(x)$ be the corresponding change in the output. The \emph{absolute condition number} $\hat{\kappa}\bkt{x,f}$ of the problem $f$ at $x$ is defined as
# $$\hat{\kappa} = \lim_{\delta \to 0} \sup_{\magn{\delta x} = \delta} \dfrac{\magn{\delta f}}{\magn{\delta x}}$$
# Note that if $f$ is differentiable at $x$, and $J(x)$ is the Jacobian of $f(x)$ at $x$, we obtain that
# $$\hat{\kappa} = \magn{J(x)}$$
#
# #### Relative condition number
# Note that since the input ($x$) and the output ($f(x)$) are on different spaces, a more appropriate measure of conditioning is to measure the changes in the input and output in terms of relative changes. The \emph{relative condition number} $\kappa\bkt{x,f}$ of the problem $f$ at $x$ is defined as
# $$\kappa = \lim_{\delta \to 0} \sup_{\magn{\delta x} = \delta} \bkt{\dfrac{\magn{\delta f}}{\magn{f}} \bigg/ \dfrac{\magn{\delta x}}{\magn{x}}}$$
# As before, if $f$ is differentiable at $x$, we can express this in terms of the Jacobian $J(x)$ as
# $$\kappa = \dfrac{\magn{J(x)}}{\magn{f(x)}/\magn{x}}$$
# Even though both the above notions have their uses, relative condition number is more appropriate since as we saw earlier, floating point arithmetic introduces only relative errors.
#
# #### Conditioning of subtraction
# Consider **subtracting two positive numbers**, i.e., $f(a,b) = a-b$. If we perturb $a$ by $a+\delta_a$ and $b$ by $b+\delta_b$, we have the condition number in $2$-norm to be
# $$\kappa(f,a,b) = \lim_{r \to 0} \sup_{\magn{\delta}_2 = r} \dfrac{\abs{\delta_a-\delta_b}/\abs{a-b}}{\sqrt{\delta_a^2+\delta_b^2}/\sqrt{a^2+b^2}} = \lim_{r \to 0} \sup_{\magn{\delta}_2 = r} \dfrac{\abs{\delta_a-\delta_b}/\abs{a-b}}{r/\sqrt{a^2+b^2}} = \dfrac{\sqrt{2}\sqrt{a^2+b^2}}{\abs{a-b}}$$
# Hence, we see that for large values of $a$ and $b$ such that $a-b$ is small (i.e., $a$ is close to $b$), the problem is ill-conditioned.
#
# #### Conditioning of solving for roots of polynomials
# Consider finding the **roots of the polynomial $ax^2+bx+c$**. Here the function $f: \Rb^3 \mapsto \Rb^2$, where $f(a,b,c) = \begin{bmatrix} r_1 & r_2 \end{bmatrix}$, where $r_1,r_2$ are the roots of $ax^2+bx+c$. Now let's look at the condition at $(a,b,c) = (1,-2,1)$. The roots are $1,1$. Let's perturb the $2$ by $\delta$. We have
# \begin{align*}
# \kappa & = \lim_{\delta \to 0} \dfrac{\magn{f(1,-(2+\delta),1)-f(1,-2,1)}/\magn{f(1,-2,1)}}{\magn{(1,-(2+\delta),1)-(1,-2,1)}/\magn{(1,-2,1)}} = \dfrac{\magn{(1,-2,1)}}{\magn{f(1,-2,1)}}\lim_{\delta \to 0} \dfrac{\magn{\dfrac{\delta+\sqrt{\delta^2+4\delta}}2,\dfrac{\delta-\sqrt{\delta^2+4\delta}}2}}{\delta}\\
# & = \dfrac{\sqrt6}{2\sqrt2} \lim_{\delta \to 0} \dfrac{\sqrt{2\delta^2+2\delta^2+8\delta}}{\delta} = \sqrt3 \lim_{\delta \to 0} \dfrac{\sqrt{\delta^2+2\delta}}{\delta} = \sqrt3 \lim_{\delta \to 0} \sqrt{1+2/\delta} = \infty
# \end{align*}
#
# #### Conditioning of matrix-vector products
#
# We have $f(x) = Ax$. The Jacobian is nothing but the matrix $A$. Hence, we have
# $$\kappa\bkt{x,Ax} = \dfrac{\magn{A}\magn{x}}{\magn{Ax}}$$
# Note that
# $$\magn{x} = \magn{A^{-1}\bkt{Ax}} \leq \magn{A^{-1}} \magn{Ax}$$
# Hence, we obtain that
# $$\kappa\bkt{x,Ax} = \dfrac{\magn{A}\magn{x}}{\magn{Ax}} \leq \dfrac{\magn{A}\magn{A^{-1}} \magn{Ax}}{\magn{Ax}} = \magn{A} \magn{A^{-1}}$$
# where the bound is independent of $x$. Hence, $\magn{A} \magn{A^{-1}}$ is called as the condition number of the matrix $A$ and is denoted as $\kappa(A)$.
#
# #### Conditioning of a system of equations
# We are interested in solving the linear system $Ax=b$. In this case, we have $f(b) = A^{-1}b$. The Jacobian of $f(b)$ is nothing but the matrix $A^{-1}$. Hence, we have
# $$\kappa\bkt{b,x} = \dfrac{\magn{A^{-1}}\magn{b}}{\magn{A^{-1}b}} = \dfrac{\magn{A^{-1}}\magn{A \bkt{A^{-1}b}}}{\magn{A^{-1}b}} \leq \dfrac{\magn{A^{-1}}\magn{A} \magn{A^{-1}b}}{\magn{A^{-1}b}} = \kappa(A)$$
| Lectures/Lecture_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
X = 2 * np.random.rand(100, 1)
y = 4 + 3 * X + np.random.randn(100, 1)
print(X.shape)
print(y.shape)
X_b = np.c_[np.ones((100, 1)), X]
theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)
theta_best
X_new = np.array([[0,], [2]])
X_new_b = np.c_[np.ones((2, 1)), X_new]
y_predict = X_new_b.dot(theta_best)
y_predict
from matplotlib import pyplot as plt
plt.plot(X_new, y_predict, "r-")
plt.plot(X, y, "b.")
plt.axis([0, 2, 0, 15])
plt.show()
#Now we are going to put in the lienar regression model from Scikit-Learn
#Even with integration linear regression we are still getting our array
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
lin_reg.intercept_, lin_reg.coef_
lin_reg.predict(X_new)
theta_best_svd, residuals, rank, s = np.linalg.lstsq(X_b, y, rcond=1e-6)
theta_best_svd
#Follwing computes the theta to X^y
np.linalg.pinv(X_b).dot(y)
#Gradient descent is used to update the parameters of the model
def update_weights(m, b, X, Y, learning_rate):
m_deriv = 0
b_deriv = 0
N = len(X)
for i in range(N):
# Calculate partial derivatives
# -2x(y - (mx + b))
m_deriv += -2*X[i] * (Y[i] - (m*X[i] + b))
# -2(y - (mx + b))
b_deriv += -2*(Y[i] - (m*X[i] + b))
# We subtract because the derivatives point in direction of steepest ascent
m -= (m_deriv / float(N)) * learning_rate
b -= (b_deriv / float(N)) * learning_rate
#The gradient curve can give us our minimum value
#Mean square equation helps us get our global minima, trying to avoid local minimums at all time
eta = 0.1 # learning rate
n_interations = 1000
m = 100
theta = np.random.randn(2, 1) #random initialization
for interation in range(n_interations):
gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y)
theta = theta - eta * gradients
theta
# +
#Always use matplot lib to plot out your functions import matplot.pyplot as plt
#Problem with batchgradient you are using the whol training set to compute the gradients at every step
#This one uses Stochastic (Random) Gradient Descent using simple learning schedule
n_epochs = 50
t0, t1 = 5, 50 #schuedling the hyperparementers
def learning_schedule(t):
return t0/(t + t1)
theta = np.random.randn(2, 1) # random initialization
for epoch in range(n_epochs):
for i in range(m):
random_index = np.random.randint(m)
xi = X_b[random_index:random_index+1]
yi = y[random_index:random_index+1]
gradients = 2 * xi.T.dot(xi.dot(theta) - yi)
eta = learning_schedule(epoch * m + i)
theta = theta - eta * gradients
# -
theta
print(X.shape)
print(y.shape)
#epochs = time periods in one life
from sklearn.linear_model import SGDRegressor
sgd_reg = SGDRegressor(max_iter=50, penalty=None, eta0=0.1)
y = y.ravel()
sgd_reg.fit(X, y)
sgd_reg.intercept_, sgd_reg.coef_
#Mini vs stochastic uses less computing power, con behind that may be a less accurate representaiton by holding onto the local minima more and not pertaining more to the global minima
#Different algorithms
#Mini batch GD Batch
#Stochastic GD Batch
#Batch GD
#SVD
#Normal Equation
#You can always fit your nonolinear data with a linear model using POLYNOMIAL REGRESSION
#Ex:
import numpy as np
from matplotlib import pyplot as plt
m = 100
X = 6 * np.random.rand(m, 1) - 3
y = 0.5 * X**2 + X + 2 + np.random.randn(m, 1)
from sklearn.preprocessing import PolynomialFeatures
poly_features = PolynomialFeatures(degree=2, include_bias=False)
X_poly = poly_features.fit_transform(X)
X[0]
lin_reg = LinearRegression()
lin_reg.fit(X_poly, y)
lin_reg.intercept_, lin_reg.coef_
# +
#Learning curves
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
def plot_learning_curves(model, X, y):
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
train_errors, val_errors = [], []
for m in range(1, len(X_train)):
model.fit(X_train[:m], y_train[:m])
y_train_predict = model.predict(X_train[:m])
y_val_predict = model.predict(X_train[:m])
y_val_predict = model.predict(X_val)
train_errors.append(mean_squared_error(y_train[:m], y_train_predict))
val_errors.append(mean_squared_error(y_val, y_val_predict))
plt.plot(np.sqrt(train_errors), "r-+", linewidth=2, label="train")
plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="val")
# -
lin_reg = LinearRegression()
plot_learning_curves(lin_reg, X, y)
#Regulariziung linear models
#focusing on ridge regression: A regularized term that takes the sum of all the variables and set it to the global minima
from sklearn.linear_model import Ridge
ridge_reg = Ridge(alpha=1, solver="cholesky")
ridge_reg.fit(X, y)
ridge_reg.predict([[1.5]])
#using Stochastic Gradient Descent algorithm for a ridge regression to show the accuracy of our global minimia
sgd_reg = SGDRegressor(penalty="l2")
sgd_reg.fit(X, y.ravel())
sgd_reg.predict([[1.5]])
#Lasso regression, completley eliminaties the weights of the least important features, just gets to our global minima to maximize accuracy
from sklearn.linear_model import Lasso
lasso_reg = Lasso(alpha=0.1)
lasso_reg.fit(X, y)
lasso_reg.predict([[1.5]])
#Elastic Net
from sklearn.linear_model import ElasticNet
elastic_net= ElasticNet(alpha=0.1, l1_ratio=0.5)
elastic_net.fit(X, y)
elastic_net.predict([[1.5]])
# +
#Early stoping, indicates where the the model has started to overfit the training data for the neural network
from sklearn.base import clone
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
#prepare the data
poly_scaler = Pipeline([
("poly_features", PolynomialFeatures(degree=90, include_bias=False)),
("std_scaler", StandardScaler()) ])
X_train_poly_scaled = poly_scaler.fit_transform(X_train)
X_val_poly_scaled = poly_scaler.transform(X_val)
sgd_reg = SGDRegressor(max_iter=1, tol=-np.infty, warm_start=True,
penalty=None, learning_rate="constant", eta0=0.0005)
minimum_val_error = float("inf")
best_epoch = None
best_model = None
for epoch in range(1000):
sgd_reg.fit(X_train_poly_scaled, y_train) #continuing where we left off in the schoatstic gradient batch
y_val_predict = sgd_reg.predicyt(X_val_poly_scaled)
val_error = mean_squared_error(y_val, y_val_predict)
if val_error < minimum_val_error:
minimum_val_error = val_error
best_epoch = epoch
best_model = clone(sgd_reg)
# -
| Numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: limo
# language: python
# name: limo
# ---
# # Python decorators
#
# - toc: true
# - badges: true
# - comments: true
# - categories: [python]
# My notes to understand how function decorators work (I don't use classes enough to worry about class decorators). Much of these notes is copied verbatim from the sources below.
# + [markdown] tags=[]
# ## Intro
#
# - A decorator is a function designed to wrap another function to enhance its capability at runtime.
# - They work as syntactic sugar for `decorated = decorator(decorated)`.
# - Decorators are run right after the decorated function is defined, not when it is run (i.e. they run at *import time*, not *runtime*).
#
# -
# ## Basic mechanics
# +
def decorator(func):
print('Running decorator')
return func
@ decorator
def greeter():
return 'Hello'
greeter()
# -
# The above is equivalent to:
# +
greeter = decorator(greeter)
greeter()
# -
# Hence, the in generaly, a decorator replaces the function it wraps with the return value of the decorator called with the function as its argument.
#
# `decorated = decorator(decorated)`
# ## Registration decorators
#
# The simplest kind of decorator performs some kind of action and returns the function itself.
# +
registry = []
def register(func):
registry.append(func)
return func
@register
def greeter():
print('Hello')
greeter(), registry
# -
# Notes:
#
# - `greeter = register(greeter)` assigns `greeter` to itself, as that's what's returned by `register`.
# ## Decorators that return a different function
# +
import time
def timer(func):
def wrapper(*args):
start = time.time()
result = func(*args)
elapsed = time.time() - start
name = func.__name__
arg_str = ', '.join(repr(arg) for arg in args)
print(f'[{elapsed:.6f}s] {name}({arg_str}) -> {result}')
return result
return wrapper
@timer
def factorial(n):
return 1 if n < 2 else n * factorial(n-1)
factorial(3)
# -
# ### Q&A:
# - How does this work? By running `factorial = timer(factorial)`, the decorator assigns `factorial` to `wrapper`. Thus, when we call `factorial` we really call `wrapper`, which returns the same result `factorial` would have, but also performs the extra functionality. We can check the name attribute of factorial to confirm this; the decorated `factorial` function points to `wrapper`, no longer to `factorial`.
factorial.__name__
# - How does `wrapper` have access to `factorial` without taking it as an argument? `factorial` is a variable of the local scope of the `timer` function. By accessing that variable, `wrapper` becomes a closure: a function with access to variables that are neither global nor defined in its function body (my notes on [closures](https://fabiangunzinger.github.io/blog/python/2020/10/05/python-functions.html#Closures)). The below confirms this.
factorial.__closure__[0].cell_contents
# - Where does `wrapper` get the arguments from `factorial` from? The short answer is: the arguments are passed directly to it when we call the decorated `factorial` function. This follows directlry from the answer to the first question above: once `factorial` is decorated, calling it calls `wrapper`.
# - Why don't we pass the function arguments as arguments to `timer` (i.e. why isn't it `timer(func, *args)`? Because all timer does is replace `factorial` with `wrapper(factorial)`, which is `wrapper`, which then gets called as `wrapper(*args)`. So, `timer` has no use for arguments.
#
#
# ## Decorators with state
# +
def logger(func):
calls = 0
def wrapper(*args, **kwargs):
nonlocal calls
calls +=1
print(f'Call #{calls} of {func.__name__}')
return func(*args, **kwargs)
return wrapper
@logger
def greeter():
print('Hello')
@logger
def singer():
print('lalala')
@logger
def congratulator():
print('Congratulations!')
greeter()
greeter()
singer()
congratulator()
# -
# ## Decorator with arguments
# Now I want the ability to deactivate the logger for certain functions. So I wrap the decorator in a decorator factory, like so:
# +
def param_logger(active=True):
def decorator(func):
calls = 0
def wrapper(*args, **kwargs):
nonlocal calls
if active:
calls +=1
print(f'Call #{calls} of {func.__name__}')
return func(*args, **kwargs)
return wrapper
return decorator
@param_logger()
def greeter():
print('Hello')
@param_logger(active=True)
def singer():
print('lalala')
@param_logger(active=False)
def congratulator():
print('Congratulations!')
greeter()
greeter()
singer()
congratulator()
# -
# ===== work in progress =====
#
# How does this work? I'm not completely confident, actually, but this is how I explain it to myself.
#
# How I think this works (not sure about this):
#
# 1. temp = param_logger(), returns `decorator` with access to nonlocal `active` argument.
# 2. Because we add () to decorator, `decorator` is immediately called and returns wrapper, which is also assigned to `temp`, i.e. `temp = decorator(func)`.
# 3. Finally, as always, we have `greeter = wrapper(greeter)`.
#
# How this works:
#
# 1. `temp = param_logger(active=True)`, decorator factory returns `decorator`
# 2. `greeter = temp(greeter)`, call decorator returned by decorator factory (as usual)
#
# In our initial logger function above, both the argument to the outer function (*func*) and the variable defined inside the outer function (*calls*) are free variables of the closure function wrapper, meaning that wrapper has access to them even though they are not bound inside wrapper.
#
# ===== work in progress =====
#
#
# If we remember that
@logger
def greeter():
print('Hello')
# is equivalent to
greeter = logger(greeter)
# and if we know that we can use `__code__.co_freevars` to get the free variables of a function, then follows that we can get a view of the free variables of the decorated greeter function like so:
logger(greeter).__code__.co_freevars
# This is as expected. Now, what are the free variables of param_logger?
param_logger().__code__.co_freevars
# This makes sense: *active* is the function argument and we do not define any additional variables inside the scope of param_logger, so given our result above, this is what we would expect.
#
# But param_logger is a decorator factory and not a decorator, which means it produces a decorator at the time of decoration. So, what are the free variables of the decorator is produces?
#
# Similar to above, remembering that
@param_logger
def greeter():
print('Hello')
# is equivalent to
greeter = param_logger()(greeter)
# we can inspect the decorated greeter function's free variables like so:
param_logger()(greeter).__code__.co_freevars
# We can see that active is now an additional free variable that our wrapper function has access to, which provides us with the answer to our question: decorator factories work by producing decorators at decoration time and passing on the specified keyword to the decorated function.
# ## Decorator factory beautifying
# A final point for those into aesthetics or coding consistency: we can tweak our decorator factory so that we can ommit the `()` if we pass no keyword arguments.
# +
def logger(func=None, active=True):
def decorator(func):
calls = 0
def wrapper(*args, **kwargs):
nonlocal calls
if active:
calls +=1
print(f'Call #{calls} of {func.__name__}')
return func(*args, **kwargs)
return wrapper
return decorator(func) if func else decorator
@logger
def greeter():
print('Hello')
@logger()
def babler():
print('bablebalbe')
@logger(active=True)
def singer():
print('lalala')
@logger(active=False)
def congratulator():
print('Congratulations!')
greeter()
greeter()
babler()
singer()
congratulator()
# -
# To understand what happens here, remember that decorating *func* with a decorator is equivalent to
func = decorator(func)
# While decorating it with a decorator factory is equivalent to
func = decorator()(func)
# The control flow in the final return statement of the above decorator factory simply switches between these two cases: if logger gets a function argument, then that's akin to the first scenario, where the func argument is passed into decorator directly, and so the decorator factory returns *decorator(func)* to mimic this behaviour. If *func* is not passed, then we're in the standard decorator factory scenario above, and we simply return the decorator uncalled, just as any plain decorator factory would.
# Recipe 9.6 in the [Python Cookbook](https://www.oreilly.com/library/view/python-cookbook-3rd/9781449357337/) discusses a neat solution to the above for a registration decorator using functools.partial(), which I haven't managed to a scenario with a decorator factory. Might give it another go later.
# ## Mistakes I often make
# I often do the below:
# +
from functools import wraps
def decorator(func):
@wraps
def wrapper(*args, **kwargs):
print('Func is called:', func.__name__)
return func(*args, **kwargs)
return wrapper
@decorator
def greeter(name):
return f'Hello {name}'
greeter('World')
# -
# What's wrong, there? `@wraps` should be `@wraps(func)`.
# +
from functools import wraps
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
print('Func is called:', func.__name__)
return func(*args, **kwargs)
return wrapper
@decorator
def greeter(name):
return f'Hello {name}'
greeter('World')
# -
# ## Application
# Pass kwargs to decorator and make factory return function result
# +
funcs = []
def factory(**kwargs):
def adder(func):
funcs.append(func(**kwargs))
return func
return adder
@factory(text='This is very cool!')
def shout(text='Hello'):
print(text.upper())
for f in funcs:
f
# -
# Create tuple and supply kwargs upon function call in make_data.py
# +
from collections import namedtuple
FunctionWithKwargs = namedtuple('FunctionWithKwargs', ['func', 'kwargs'])
funcs = []
def factory(func=None, **kwargs):
def adder(func):
funcs.append(FunctionWithKwargs(func, kwargs))
return func
return adder(func) if func else adder
@factory(text='Ha', mark='@')
def shout(text='Hello', mark='!'):
print(text.upper() + mark)
for f in funcs:
f.func(**f.kwargs)
# -
# Can I just alter the parametrisation of func inside the factory based on the kwargs and then return the newly parametrised function without having to call it?
# + [markdown] tags=[]
# ## Main sources
#
# - [Fluent Python](https://www.oreilly.com/library/view/fluent-python/9781491946237/)
# - [Python Cookbook](https://www.oreilly.com/library/view/python-cookbook-3rd/9781449357337/)
# - [Python Essential Reference](https://www.oreilly.com/library/view/python-essential-reference/9780768687040/)
# - [Learning Python](https://www.oreilly.com/library/view/learning-python-5th/9781449355722/)
# -
| backups/.ipynb_checkpoints/2020-09-30-decorators-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Computing the GP-DRT
# In this example we are going to use a simple model to illustrate how the GP-DRT can detect EIS data inconsistencies. We are going to use the
# +
# import the libraries needed
import importlib
import GP_hilbert
importlib.reload(GP_hilbert)
import numpy as np
import matplotlib.pyplot as plt
from math import pi, log10, sqrt
from scipy.optimize import minimize
import seaborn as sns
# -
# plot standards
plt.rc('font', family='serif', size=15)
plt.rc('text', usetex=True)
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
# ## 1) Define the synthetic impedance experiment $Z_{\rm exp}(\omega)$
# ### 1.1) Define frequency range
N_freqs = 33
freq_min = 10**-3
freq_max = 10
freq_vec = np.logspace(log10(freq_min), log10(freq_max), num=N_freqs, endpoint=True)
tau_vec = np.logspace(-log10(freq_max), -log10(freq_min), num=N_freqs, endpoint=True)
omega_vec = 2.*pi*freq_vec
# ### 1.2) Define the circuit parameters
# +
# from the conference article of Murer & coworkers
# DOI: http://dx.doi.org/10.5599/jese.725
R_omega = 50
R_p_0 = 500
C_dl = 2e-2
# increasing R_p
k_incr = 5e-6
time_vec = np.flip(np.cumsum(1./np.flip(freq_vec)))
R_p = R_p_0 + k_incr*time_vec**2
# -
# ### 1.3) Generate exact impedance $Z_{\rm exact}(\omega)$
# +
# reference (no drift)
Z_ref = R_omega + R_p_0/(1+1j*omega_vec*C_dl*R_p_0)
# exact (with drift)
Z_exact = R_omega + R_p/(1+1j*omega_vec*C_dl*R_p)
# external inductance
L_0 = 0 # F
# -
# ### 1.4) Generate stochastic experiment with $Z_{\rm exp}(\omega) = Z_{\rm exact}(\omega)+ \sigma_n (\varepsilon_{\rm re} + i\varepsilon_{\rm im})$ with $\varepsilon_{\rm re}$ and $\varepsilon_{\rm im}$ are independent and identically distributed normals centered with 0 mean and standard deviation equal to 1
rng = np.random.seed(2421975) # seed the random number generator
sigma_n_exp = 0.8
Z_exp = Z_exact + (sigma_n_exp)*(np.random.normal(0, 1, N_freqs)+1j*np.random.normal(0, 1, N_freqs))
# ### 1.5) Plot
# +
fig, ax = plt.subplots()
# data
plt.plot(np.real(Z_exact), -np.imag(Z_exact), linewidth=4, color='black', label='exact')
plt.plot(np.real(Z_exp), -np.imag(Z_exp), 'o', markersize=5, color='red', label='synth exp')
plt.plot(np.real(Z_ref), -np.imag(Z_ref), ':', linewidth=4, color='black', label='reference')
# marked freqs
plt.plot(np.real(Z_exp[0:25:8]), -np.imag(Z_exp[0:25:8]), 'p', markersize=7, color='black')
plt.xlabel(r'$Z_{\rm re}/\Omega$', fontsize = 20)
plt.ylabel(r'$-Z_{\rm im}/\Omega$', fontsize = 20)
plt.annotate(r'$10^{-3}$ Hz', xy=(np.real(Z_exp[0]), -np.imag(Z_exp[0])),
xytext=(np.real(Z_exp[0])-180, -30-np.imag(Z_exp[0])),
arrowprops=dict(arrowstyle='-', connectionstyle='arc'))
plt.annotate(r'$10^{-2}$ Hz', xy=(np.real(Z_exp[8]), -np.imag(Z_exp[8])),
xytext=(np.real(Z_exp[8])+30, 30-np.imag(Z_exp[8])),
arrowprops=dict(arrowstyle='-', connectionstyle='arc'))
plt.annotate(r'$10^{-1}$ Hz', xy=(np.real(Z_exp[16]), -np.imag(Z_exp[16])),
xytext=(np.real(Z_exp[16])+40, 10-np.imag(Z_exp[16])),
arrowprops=dict(arrowstyle='-', connectionstyle='arc'))
plt.annotate(r'$1$ Hz', xy=(np.real(Z_exp[24]), -np.imag(Z_exp[24])),
xytext=(np.real(Z_exp[24])+40, 10-np.imag(Z_exp[24])),
arrowprops=dict(arrowstyle='-', connectionstyle='arc'))
plt.legend(frameon=False, fontsize=15, loc='upper left')
plt.gca().set_aspect('equal', adjustable='box')
plt.axis('scaled')
plt.xticks(range(0, 701, 100))
plt.yticks(range(0, 401, 100))
plt.xlim(0, 700)
plt.ylim(0, 400)
plt.xlabel(r'$Z_{\rm re}/\Omega$', fontsize = 20)
plt.ylabel(r'$-Z_{\rm im}/\Omega$', fontsize = 20)
plt.show()
# -
# ## 2) Optimize the hyperparameters
# ### 2.1) Set the kernel options
# +
ker_opts = {
'sigma_DRT': 100.,
'sigma_SB': 0.5,
'ell': 15.,
'tau_max': 1E2,
'DRT': True,
'SB': True,
'SB_ker_type': 'IQ'
}
# these values are nominal
# -
# ### 2.2) Initialize the hyperparameters
sigma_n = 2.
sigma_DRT = 100.
sigma_SB = 0.5
ell = 15
sigma_L = 1E-10
#
# ### 2.3) Maximize the NMLL
# +
theta_0 = np.array([sigma_n, sigma_DRT, sigma_SB, ell, sigma_L])
seq_theta = np.copy(theta_0)
def print_results(theta):
global seq_theta
seq_theta = np.vstack((seq_theta, theta))
print('%.4E, %.4E, %.4E, %.4E, %.6E; evidence = %.8E'%(theta[0], theta[1], theta[2], theta[3], theta[4], GP_hilbert.NMLL_fct(theta, Z_exp.imag, omega_vec, ker_opts)))
res = minimize(GP_hilbert.NMLL_fct, theta_0, args=(Z_exp.imag, omega_vec, ker_opts, 'im'), method='Powell', \
callback=print_results, options={'disp': True, 'xtol': 1E-6, 'ftol': 1E-6})
res = minimize(GP_hilbert.NMLL_fct, res.x, args=(Z_exp.imag, omega_vec, ker_opts, 'im'), method='Nelder-Mead', \
callback=print_results, options={'disp': True, 'xatol': 1E-6, 'fatol': 1E-6})
theta = res.x
sigma_n, sigma_DRT, sigma_SB, ell, sigma_L = theta
# -
# update the values of the kernel
# as the GP_hilbert functions simply copy ker_opts
ker_opts['sigma_SB'] = sigma_SB
ker_opts['ell'] = ell
ker_opts['sigma_DRT'] = sigma_DRT
# ### 2.4) Check if the results are reasonable
# #### 2.4.1) Check $L_0$
# +
K_im = GP_hilbert.mat_K(omega_vec, omega_vec, ker_opts, 'im')
Sigma = (sigma_n**2)*np.eye(N_freqs)
K_full = K_im + Sigma + (sigma_L**2)*np.outer(omega_vec, omega_vec)
K_full_reg = K_im + Sigma
# inverse of K_full_ref
# begin FC - added
if not GP_hilbert.is_PD(K_full_reg):
K_full_reg = GP_hilbert.nearest_PD(K_full_reg)
# end FC - added
# cholesky factorization
L = np.linalg.cholesky(K_full_reg)
inv_L = np.linalg.inv(L)
inv_K_full_reg = np.dot(inv_L.T, inv_L)
num_L_0 = omega_vec@(inv_K_full_reg@Z_exp.imag)
den_L_0 = (sigma_L**-2) + omega_vec@(inv_K_full_reg@omega_vec)
L_0_est = num_L_0/den_L_0
print('L_0_est = %.6E F vs. L_0 = %.6E F'%(L_0_est, L_0))
# -
# #### 2.4.2) Check the fit of $\Im(Z_{\rm exp})$ against the GP
# ##### 2.4.2.1) Run the GP regression
# +
# begin FC - added
if not GP_hilbert.is_PD(K_full):
K_full = GP_hilbert.nearest_PD(K_full)
# end FC - added
# cholesky factorization
L = np.linalg.cholesky(K_full)
# covariance matrix
inv_L = np.linalg.inv(L)
inv_K_full = np.dot(inv_L.T, inv_L)
N_star_freqs = N_freqs
freq_star_vec = np.copy(freq_vec) #np.logspace(-4., 4., num=N_star_freqs, endpoint=True)
omega_star_vec = 2.*pi*freq_star_vec
mu_star_vec = np.zeros_like(omega_star_vec)
sigma_star_vec = np.zeros_like(omega_star_vec)
for index, omega_star in enumerate(omega_star_vec):
#print('iter = ', index+1, '/', N_star_freqs)
omega_star_np = np.array([omega_star])
k_star = GP_hilbert.mat_K(omega_vec, omega_star_np, ker_opts, 'im').flatten() + (sigma_L**2)*omega_vec*omega_star_np
k_star_star = GP_hilbert.mat_K(omega_star_np, omega_star_np, ker_opts, 'im').flatten() + (sigma_L**2)*omega_star_np**2
mu_star_vec[index] = k_star@(inv_K_full@Z_exp.imag)
sigma_star_vec[index] = np.sqrt(k_star_star - k_star@(inv_K_full@k_star))
# -
# ##### 2.4.2.2) Plot the outcome of the GP regression
plt.fill_between(freq_star_vec, -mu_star_vec-3*sigma_star_vec, -mu_star_vec+3*sigma_star_vec, facecolor='lightgrey')
plt.fill_between(freq_star_vec, -mu_star_vec-2*sigma_star_vec, -mu_star_vec+2*sigma_star_vec, facecolor='silver')
plt.fill_between(freq_star_vec, -mu_star_vec-sigma_star_vec, -mu_star_vec+sigma_star_vec, facecolor='grey')
plt.semilogx(freq_star_vec, -mu_star_vec, linewidth=4, color='black', label='GP-HT')
plt.semilogx(freq_vec, -Z_exp.imag, 'o', markersize=5, color='red', label='exp')
plt.xlim(freq_min, freq_max)
plt.xscale('log')
plt.xlabel(r'$f/{\rm Hz}$', fontsize=20)
plt.ylabel(r'$-Z_{\rm im}/\Omega$', fontsize=20)
plt.legend(frameon=False, fontsize = 15)
plt.show()
# ## 3) Compute and plot the GP-HT
# ### 3.1) Compute
# +
K_im = GP_hilbert.mat_K(omega_vec, omega_vec, ker_opts, 'im')
Sigma = (sigma_n**2)*np.eye(N_freqs)
K_full = K_im + Sigma + (sigma_L**2)*np.outer(omega_vec, omega_vec)
# begin FC - added
if not GP_hilbert.is_PD(K_full):
K_full = GP_hilbert.nearest_PD(K_full)
# end FC - added
# cholesky factorization
L = np.linalg.cholesky(K_full)
# covariance matrix
inv_L = np.linalg.inv(L)
inv_K_full = np.dot(inv_L.T, inv_L)
# omega star is overwritten
mu_star_vec = np.zeros_like(omega_star_vec)
sigma_star_vec = np.zeros_like(omega_star_vec)
for index, omega_star in enumerate(omega_star_vec):
omega_star_np = np.array([omega_star])
k_star = GP_hilbert.mat_K(omega_vec, omega_star_np, ker_opts, 'im-re').flatten()
k_star_star = GP_hilbert.mat_K(omega_star_np, omega_star_np, ker_opts, 're').flatten()
mu_star_vec[index] = k_star@(inv_K_full@Z_exp.imag)
sigma_star_vec[index] = np.sqrt(sigma_n**2 + k_star_star - k_star@(inv_K_full@k_star))
# -
# ### 3.2) Plot GP-HT prediction vs. experimental data
# +
mu_R_inf = np.mean(Z_exp.real[-29:]-mu_star_vec[-29:])
diff_re = mu_R_inf + mu_star_vec - Z_exp.real
plt.fill_between(freq_star_vec, mu_R_inf+mu_star_vec-3*sigma_star_vec, mu_R_inf+mu_star_vec+3*sigma_star_vec, facecolor='lightgrey')
plt.fill_between(freq_star_vec, mu_R_inf+mu_star_vec-2*sigma_star_vec, mu_R_inf+mu_star_vec+2*sigma_star_vec, facecolor='silver')
plt.fill_between(freq_star_vec, mu_R_inf+mu_star_vec-sigma_star_vec, mu_R_inf+mu_star_vec+sigma_star_vec, facecolor='grey')
plt.semilogx(freq_star_vec, mu_R_inf+mu_star_vec, linewidth=4, color='black', label='GP-HT')
plt.semilogx(freq_vec, Z_exp.real, 'o', markersize=5, color='red', label='exp')
plt.xlabel(r'$f/{\rm Hz}$', fontsize=20)
plt.ylabel(r'$Z_{\rm re}/\Omega$', fontsize = 20)
plt.xlim(freq_min, freq_max)
plt.xscale('log')
plt.legend(frameon=False, fontsize = 15)
plt.show()
# -
# ### 3.3) Show the residuals
# +
# Combine the scatter plot with the density distribution
fig = plt.figure(figsize=(12,4.2), constrained_layout=False)
gs1 = fig.add_gridspec(nrows=1, ncols=4, left=0.05, right=0.48, wspace=0.5)
ax1 = fig.add_subplot(gs1[0, :-1])
ax2 = fig.add_subplot(gs1[0, -1])
ax1.fill_between(freq_vec, -3*sigma_star_vec, 3*sigma_star_vec, facecolor='lightgrey')
ax1.fill_between(freq_vec, -2*sigma_star_vec, 2*sigma_star_vec, facecolor='silver')
ax1.fill_between(freq_vec, -sigma_star_vec, sigma_star_vec, facecolor='grey')
ax1.plot(freq_vec, diff_re, 'o', markersize=5, color='red')
ax1.set_xlim(freq_min, freq_max)
ax1.set_ylim(-100, 100)
ax1.set_xscale('log')
ax1.set_xlabel(r'$f/{\rm Hz}$', fontsize=20)
ax1.set_ylabel(r'$\left(R_\infty + Z_{\rm H, re} - Z_{\rm exp, re}\right)/\Omega$', fontsize=20)
# density distribution plot
sns.kdeplot(diff_re, ax=ax2, shade=True, color='grey', vertical=True)
sns.rugplot(diff_re, ax=ax2, color='black', vertical=True)
ax2.set_xlim(0, 0.1)
ax2.set_xticks(np.arange(0., 0.101, 0.05))
ax2.set_yticklabels([])
ax2.set_ylim(-100, 100)
ax2.set_xlabel(r'pdf',fontsize=20)
plt.show()
| tutorials/ex3_drift.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"is_executing": false}
# #### Unpacking a Sequence into Separate Variables
# -
# Unpacking actually works with any object that happens to be iterable, not just tuples or
# lists. This includes strings, files, iterators, and generators.
demo_string = "PyThOn"
a, b, c, d, e, f = demo_string
a
b
a, b, c, d, e = demo_string
# Throwaway Variable Name (like _ here)
a, _, c, d, e, _ = demo_string
# #### Unpacking Elements from Iterables of Arbitrary Length
| data_structures_python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
from configparser import ConfigParser
from functions.auth.encryption_tools import generate_new_key, encrypt_message, decrypt_message
from functions.auth.connections import postgres_connection
INI_KEY = os.getenv('INI_KEY')
# -
to_save = False
# # Example INI Creation
ini_key = generate_new_key()
ini_key
# # Example Encrypt and Decrypt
secret_enc = encrypt_message('secret', INI_KEY, enc='utf-8')
secret_enc
secret = decrypt_message(secret_enc, INI_KEY, enc='utf-8')
secret
# # Save Encrypted Credentials
# +
cfg = ConfigParser()
cfg['postgres'] = {'username': encrypt_message('secret', INI_KEY),
'password': encrypt_message('secret', INI_KEY),
'host': encrypt_message('secret', INI_KEY),
'port': encrypt_message('secret', INI_KEY)}
if to_save:
with open('cred/config.ini', 'w') as configfile:
cfg.write(configfile)
# -
# # Read Credentials
connection_uri = postgres_connection()
# connection_uri
| practice/authenticate_postgres_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import folium
import requests
import json
import pandas as pd
import numpy as np
## pip install openlostcat
from openlostcat.main_osm_categorizer import MainOsmCategorizer
from ipywidgets import interact
# -
budapest_req = """
[out:json];
(area["name"="Budapest"];) -> .searchArea;
node[tourism=hotel](area.searchArea);
out body;
"""
# +
# pip install openlostcat
from openlostcat.osmqueryutils.ask_osm import ask_osm
budapest_hotels = ask_osm(budapest_req)
len(budapest_hotels['elements'])
# -
bp_hotels = pd.DataFrame([[node['id'], node['lat'], node['lon'], node['tags'].get('name', 'NoName'),
node['tags']] for node in budapest_hotels['elements']],
columns = ['id','lat','lng', 'name', 'tags'])
# +
from openlostcat.osmqueryutils.ask_osm import ask_osm_around_point_df
osm = bp_hotels[["lat", "lng"]].apply(lambda x: ask_osm_around_point_df(x, distance = 300), axis = 1)
bp_hotels["osm"] = osm
# +
# len(bp_hotels.loc[bp_hotels.osm.isna(), "osm"])
# +
# if len(bp_hotels.loc[bp_hotels.osm.isna(), "osm"]) > 0:
# osm_isna = bp_hotels[bp_hotels.osm.isna()][["lat", "lng"]].apply(lambda x: ask_osm_around_point_df(x, distance = 300), axis = 1)
# bp_hotels.loc[bp_hotels.osm.isna(), "osm"] = osm_isna
# +
# len(bp_hotels.loc[bp_hotels.osm.isna(), "osm"])
# +
import time
while len(bp_hotels.loc[bp_hotels.osm.isna(), "osm"]) > 0:
time.sleep(10)
osm_isna = bp_hotels[bp_hotels.osm.isna()][["lat", "lng"]].apply(lambda x: ask_osm_around_point_df(x, distance = 300), axis = 1)
bp_hotels.loc[bp_hotels.osm.isna(), "osm"] = osm_isna
# +
import folium
zero_location = np.array(list(zip(bp_hotels["lat"],bp_hotels["lng"]))).mean(axis=0)
def show_geo(related_geo, color_map, get_color_func):
tmp = related_geo
# extract coordinates
coords = np.array(list(zip(tmp["lat"],tmp["lng"])))
# extract other resources
# Some coding issue: https://github.com/python-visualization/folium/issues/1320
names = list([str(name.encode('raw_unicode_escape'))[2:-1] for name in tmp.name])
tags = list([str(str(tag).encode('raw_unicode_escape'))[2:-1] for tag in tmp.tags])
colors = get_color_func(tmp, color_map)
m = folium.Map(
location=coords.mean(axis=0) if len(coords) > 0 else zero_location,
zoom_start=12,
tiles='Stamen Terrain'
)
for i, loc in enumerate(coords):
folium.Marker(loc, popup='<i>%s</i>' % tags[i], tooltip=str(names[i]), icon=folium.Icon(color=colors[i])).add_to(m)
display(m)
# -
# ## Public transport
# +
categorizer = MainOsmCategorizer('rules/publictransport_rules.json')
print(categorizer.get_categories_enumerated_key_map())
print(categorizer)
bp_hotels["pt_cat"] = [i[0] for i in bp_hotels.osm.map(categorizer.categorize)]
# -
bp_hotels.pt_cat.value_counts()
# +
color_map_pt = dict(zip([0, 1, 2], ["green", "orange", "red"]))
get_color_func_pt = lambda df, color_map: list(df["pt_cat"].apply(lambda x: color_map.get(x, "black")))
key_map_pt = dict(zip(['pt_primary_accessible', 'pt_accessible', 'pt_nonaccessible'], [0, 1, 2]))
transport = ['All', 'pt_primary_accessible', 'pt_accessible', 'pt_nonaccessible']
@interact(pt_cat=transport)
def get_transport(pt_cat):
selected = bp_hotels[bp_hotels.pt_cat == key_map_pt[pt_cat]] if pt_cat != 'All' else bp_hotels
show_geo(selected, color_map_pt, get_color_func_pt)
# -
# ## Nearby
# +
categorizer2 = MainOsmCategorizer('rules/nearby.json')
print(categorizer2.get_categories_enumerated_key_map())
print(categorizer2)
nb_cat = [[cat[0] for cat in cat_list] for cat_list in bp_hotels.osm.map(categorizer2.categorize)]
bp_hotels["water_nearby"] = [0 in cats for cats in nb_cat]
bp_hotels["calm_streets"] = [1 in cats for cats in nb_cat]
# -
print("calm_streets and water: " + str(bp_hotels[bp_hotels.water_nearby & bp_hotels.calm_streets].shape[0]))
print("only water: " + str(bp_hotels[(bp_hotels.water_nearby) & (bp_hotels.calm_streets == False)].shape[0]))
print("only calm_streets: " + str(bp_hotels[(bp_hotels.water_nearby == False) & (bp_hotels.calm_streets)].shape[0]))
print("nothing: " + str(bp_hotels[(bp_hotels.water_nearby == False) & (bp_hotels.calm_streets == False)].shape[0]))
color_map_nb = {
(True, False): "blue",
(False, True): "green",
(True, True): "purple",
(False, False): "black"
}
get_color_func_nb = lambda df, color_map: list(map(lambda x: color_map.get(x, "black"), list(zip(df.water_nearby, df.calm_streets))))
nearby = ['All', 'water_nearby', 'calm_streets', 'both', 'none']
@interact(nearby_cat=nearby)
def get_nerby(nearby_cat):
water_nearby = lambda x: x[x.water_nearby]
calm_streets = lambda x: x[x.calm_streets]
both = lambda x: x[x.water_nearby & x.calm_streets]
none = lambda x: x[(x.water_nearby == False) & (x.calm_streets == False)]
switch = {
'water_nearby': water_nearby,
'calm_streets': calm_streets,
'both': both,
'none': none
}
selected = switch.get(nearby_cat, lambda x: x)(bp_hotels)
show_geo(selected, color_map_nb, get_color_func_nb)
# ## Preferred: Mix
# +
categorizer3 = MainOsmCategorizer('rules/mix.json')
print(categorizer3.get_categories_enumerated_key_map())
print(categorizer3)
bp_hotels["preferred_cat"] = [i[0] for i in bp_hotels.osm.map(categorizer3.categorize)]
# -
bp_hotels.preferred_cat.value_counts()
# +
color_map_mix = dict(zip([0, 1, 2, 3], ["green", "blue", "orange", "black"]))
get_color_func_mix = lambda df, color_map: list(df["preferred_cat"].apply(lambda x: color_map.get(x, "black")))
key_map_mix = dict(zip(['1st_preferred_location', '2nd_preferred_location', '3rd_preferred_location', 'not_preferred_location'], [0, 1, 2, 3]))
preffered = ['All', '1st_preferred_location', '2nd_preferred_location', '3rd_preferred_location', 'not_preferred_location']
@interact(preffered_cat=preffered)
def get_nerby(preffered_cat):
selected = bp_hotels[bp_hotels.preferred_cat == key_map_mix[preffered_cat]] if preffered_cat != 'All' else bp_hotels
show_geo(selected, color_map_mix, get_color_func_mix)
# -
# +
categorizer4 = MainOsmCategorizer('rules/wheelchair.json')
print(categorizer4.get_categories_enumerated_key_map())
print(categorizer4)
bp_hotels["wheelchair"] = [i[0] for i in bp_hotels.osm.map(categorizer4.categorize)]
# -
bp_hotels.wheelchair.value_counts()
# +
color_map_wc = dict(zip([1, 2, 0], ["green", "blue", "black"]))
get_color_func_wc = lambda df, color_map: list(df["wheelchair"].apply(lambda x: color_map.get(x, "black")))
key_map_wc = dict(zip(['wheelchair_shopping_paradise', 'wheelchair_shopping', 'no_wheelchair_shopping'], [1, 2, 0]))
wc = ['All', 'wheelchair_shopping_paradise', 'wheelchair_shopping', 'no_wheelchair_shopping']
@interact(wc_cat=wc)
def get_wc(wc_cat):
selected = bp_hotels[bp_hotels.wheelchair == key_map_wc[wc_cat]] if wc_cat != 'All' else bp_hotels
show_geo(selected, color_map_wc, get_color_func_wc)
| examples/Budapest_hotels_categorization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.0 64-bit (''harmonizome'': venv)'
# name: python38064bitharmonizomevenve1fd62c035f347dbaffce382c9034785
# ---
# # Harmonizome ETL: Orphanet
# Created by: <NAME>
#
# Data Source: http://www.orphadata.org/cgi-bin/index.php
# appyter init
from appyter import magic
magic.init(lambda _=globals: _())
# +
import sys
import os
from datetime import date
import numpy as np
import pandas as pd
import itertools
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
# %matplotlib inline
import harmonizome.utility_functions as uf
import harmonizome.lookup as lookup
# -
# %load_ext autoreload
# %autoreload 2
# ### Notebook Information
print('This notebook was run on:', date.today(), '\nPython version:', sys.version)
# # Initialization
# +
# %%appyter hide_code
{% do SectionField(
name='data',
title='Upload Data',
img='load_icon.png'
) %}
# +
# %%appyter code_eval
{% do DescriptionField(
name='description',
text='The example below was sourced from <a href="http://www.orphadata.org/cgi-bin/index.php" target="_blank">www.orphadata.org</a>. If clicking on the example does not work, it should be downloaded directly from the source website.',
section='data'
) %}
{% set tree_file = FileField(
constraint='.*\.xml$',
name='gene_diseases',
label='Genes Associated with Rare Diseases (xml)',
default='en_product6.xml',
examples={
'en_product6.xml': 'http://www.orphadata.org/data/xml/en_product6.xml'
},
section='data'
) %}
# -
# ### Load Mapping Dictionaries
symbol_lookup, geneid_lookup = lookup.get_lookups()
# ### Output Path
# +
output_name = 'orphanet'
path = 'Output/Orphanet'
if not os.path.exists(path):
os.makedirs(path)
# -
# # Load Data
# +
# %%appyter code_exec
tree = ET.parse({{tree_file}})
root = tree.getroot()
# -
for disorder in itertools.islice(root.iter('Disorder'), 10):
print(disorder.find('Name').text)
# # Pre-process Data
# ## Extract Relevant Data
disorders = []
genes = []
for disorder in root.iter('Disorder'):
disorders.append(disorder.find('Name').text)
genes.append([gene.find('Symbol').text for gene in disorder.iter('Gene')])
df = pd.DataFrame({'Gene Symbol': genes, 'Disorder': disorders})
df.head()
# ## Split Gene Lists
df = df.explode('Gene Symbol').set_index('Gene Symbol')
df.head()
# # Filter Data
# ## Map Gene Symbols to Up-to-date Approved Gene Symbols
df = uf.map_symbols(df, symbol_lookup, remove_duplicates=True)
df.shape
# # Analyze Data
# ## Create Binary Matrix
binary_matrix = uf.binary_matrix(df)
binary_matrix.head()
binary_matrix.shape
uf.save_data(binary_matrix, path, output_name + '_binary_matrix',
compression='npz', dtype=np.uint8)
# ## Create Gene List
gene_list = uf.gene_list(binary_matrix, geneid_lookup)
gene_list.head()
gene_list.shape
uf.save_data(gene_list, path, output_name + '_gene_list',
ext='tsv', compression='gzip', index=False)
# ## Create Attribute List
attribute_list = uf.attribute_list(binary_matrix)
attribute_list.head()
attribute_list.shape
uf.save_data(attribute_list, path, output_name + '_attribute_list',
ext='tsv', compression='gzip')
# ## Create Gene and Attribute Set Libraries
uf.save_setlib(binary_matrix, 'gene', 'up', path, output_name + '_gene_up_set')
uf.save_setlib(binary_matrix, 'attribute', 'up', path,
output_name + '_attribute_up_set')
# ## Create Attribute Similarity Matrix
attribute_similarity_matrix = uf.similarity_matrix(binary_matrix.T, 'jaccard', sparse=True)
attribute_similarity_matrix.head()
uf.save_data(attribute_similarity_matrix, path,
output_name + '_attribute_similarity_matrix',
compression='npz', symmetric=True, dtype=np.float32)
# ## Create Gene Similarity Matrix
gene_similarity_matrix = uf.similarity_matrix(binary_matrix, 'jaccard', sparse=True)
gene_similarity_matrix.head()
uf.save_data(gene_similarity_matrix, path,
output_name + '_gene_similarity_matrix',
compression='npz', symmetric=True, dtype=np.float32)
# ## Create Gene-Attribute Edge List
edge_list = uf.edge_list(binary_matrix)
uf.save_data(edge_list, path, output_name + '_edge_list',
ext='tsv', compression='gzip')
# # Create Downloadable Save File
uf.archive(path)
# ### Link to download output files: [click here](./output_archive.zip)
| appyters/Orphanet_Harmonizome_ETL/Orphanet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <font color=blue>NOTE: Videos and graphs do not display immediately on all devices. If this is the case for you, click the "Cell" menu option, and select "Run All". If you still do not see the videos/graphs, try a different browser. If that also does not work, please let your instructor know by email.</font>
#
# # Definite Integrals for Future Value
#
# In this notebook, we concern ourselves with the determination of the future value of a quantity of interest (such as the amount of water in a reservoir, the amount of pollution in a lake, the number of indivuals in a population, etc.).
#
# In Sections 1-3, we return to a very familiar concept, namely the calculation of <b>net or cumulative change</b> of a quantity by integrating the known rate of change of that quantity with respect to time. This readily leads to the determination of the future value.
#
# In Sections 4-6, we introduce a new concept, namely the interaction of a renewal function (representing the rate at which the quantity increases) and a survival function (representing the proportion of a quantity that survives for a certain period of time). We use the "slicing and integrating method" to develop a formula for future value.
#
#
# ### 1. An Old Idea: Interpreting a Definite Integral in Terms of Net Change or Cumulative Change
#
# From the Fundamental Theorem of Calculus, we know that
# $$\int_a^b f(x) \ dx = F(b) - F(a),$$
# where $F$ is any antiderivative of $f(x)$.
#
# A common first application of the Fundamental Theorem of Calculus is in the determination of displacement of an object with given velocity function. Suppose the position of an object as a function of time is given by $s(t)$. The velocity of the object as a function of time is $v(t) = s'(t)$, that is, $v$ is an antiderivative of $s$. Then
# $$\int_a^b v(t) \ dt = s(b) - s(a).$$
# We interpret the latter as the displacement of the object from $t=a$ to $t=b$.
#
# If we let $a=0$ and $b=T$, then it immediately follows that
# $$s(T) = s(0) + \int_0^T v(t) \ dt.$$
#
# In words, we can interpret the latter as "the position at future time $T$ is the current position (at time 0) plus the net or cumulative change in position".
#
# ### 2. Generalization: Future Value from Net Change or Cumulative Change
#
# We can generalize the above ideas to other quantities, such as the amount of water in a reservoir, the amount of pollution in a lake, the number of indivuals in a population, etc.
#
# Suppose a quantity $Q$ changes over time at a known rate $Q'(t)$. Then the net (cumulative) change in Q between $t=a$ and $t=b$ is $$Q(b) - Q(a) = \int_a^b Q'(t) \ dt,$$
# and the <b>future value</b> of $Q$ at time $T$ is $$Q(T) = Q(0) + \int_0^T Q'(t) \ dt.$$
#
# ### 3. Example of Future Value from Net Change or Cumulative Change
#
# <font size="1">[Credit: <NAME> and <NAME>, Calculus: Early Transcendentals, 2011, Pearson Educationc Inc. ]</font>
#
# A culture of cells in a lab has a population of 100 cells when nutrients are added at time $t=0$. Suppose the population $N(t)$ increases at a rate given by $\displaystyle N'(t) = 90 e^{-0.1t}$ cells/hr.
#
# Then the population at future time $T$ is
# \begin{align}
# N(T)
# & = N(0) + \int_0^T N'(t) \ dt \\
# & = 100 + \int_0^T 90 e^{-0.1t} \ dt \\
# & = 100 + \left. \left( \frac{90}{-0.1} e^{-0.1t} \right) \right|_0^T \\
# & = 1000 - 900 e^{-0.1T}.
# \end{align}
#
# Note that this population increases, but at a decreasing rate. As $t \to \infty$, the population size approaches 1000 cells.
#
# ### 4. A New Idea: Future Value from Survival and Renewal
#
# For some quantities of interest, the effective rate of change is not known, but there is information about other processes (which we will refer to as survival and renewal) affecting change that allow us to determine the future value of the quantity.
#
# Renewal refers to the rate at which the quantity Q increases. Let $R(t)$ be the renewal function.
# - If $Q$ represents the size of a population, then $R(t)$ represents the rate at which new members are added to the population at time $t$.
# - If $Q$ represents the amount of a drug in the body, then $R(t)$ represents the amount of drug administered at time $t$.
#
# Survival refers to the proportion of the quantity $Q$ that survives or remains. Let $S(t)$ be the survival function.
# - If $Q$ represents the size of a population, then $S(t)$ represents the proportion of the population that survives at least $t$ time units from now.
# - If $Q$ represents the amount of a drug in the body, then $S(t)$ represents the proportion of the drug that remains in the body at least $t$ time units from now.
# >For example, if $S(10) = 0.7$, then 70% of the current quantity $Q$ survives/remains after $10$ time units.
#
# As before in Section 2, we are interested in determining $Q(T)$, the value of $Q$ at future time $T$. We need to do this from our knowledge of $Q(0)$ and the renewal and survival functions, $R(t)$ and $S(t)$, respectively.
#
# > <font color=red><b>It is key to realize that the renewal and survival processes referred to here are not independent; they interact and work simultaneously.</b> In general, more of a small quantity added late in time will survive/remain at future time $T$ than a small quantity added earlier in time.</font>
#
# ### 5. Slicing Time and Integrating to determine Future Value from Survival and Renewal
#
# Let's determine first what happens to the initial quantity $Q(0)$.
# - The <i>proportion</i> of $Q(0)$ that survives/remains at time $T$ is $S(T)$.
# - The <i>amount</i> of $Q(0)$ that survives/remains at time $T$ is the product, that is,
# $$S(T) \cdot Q(0).$$
#
# Now focus on what happens during the time interval $[0,T]$. We slice time, focus on a small infinitessimal time period $dt$ at generic time $t$, as illustrated below, and integrate.
#
# 
#
# - The small quantity added during this infinitessimal time period $dt$ is "rate of renewal times time", or $R(t) \ dt.$
# - The length of the time interval $[t,T]$ is $T-t$. Thus, the <i>proportion</i> of the small quantity added that survives/remains at time $T$ is $S(T-t).$
# - The <i>amount</i> of the small quantity that is added at time $t$ AND that survives/remains at time $T$ is the product of the above, that is, $$dQ = S(T-t) \cdot R(t) \ dt.$$
# - The total quantity that is added during the time period from $t=0$ to $t=T$ AND that survives/remains at time $T$ is $$\int dQ = \int_0^T S(T-t) \cdot R(t) \ dt.$$
#
# Putting everything together, we have that $Q(T)$ is the initial amount that survives/remains at time $T$ PLUS the amount that has been added during the time interval $[0,T]$ and survives/remains at time $T$, namely
# \begin{align}
# Q(T)
# & = S(T) \cdot Q(0) + \int dQ \\
# & = S(T) \cdot Q(0) + \int_0^T S(T-t) \cdot R(t) \ dt.
# \end{align}
#
# ### 6. Example of Future Value from Survival and Renewal
#
# <font size="1">[Credit: <NAME> and <NAME>, Biocalculus: Calculus for the Life Sciences, 2015, Cengage Learning.]</font>
#
# Consider a lake with trout. There currently are 5600 trout in the lake. The trout are reproducing at the rate $\displaystyle R(t) = 720 e^{0.1t}$ trout/year. However, polution is killing many of the trout; the proportion that survive after $t$ years is given by $\displaystyle S(t) = e^{-0.2t}$. How many trout will be in the lake 10 years from now?
#
# Let $P(t)$ be the number of trout in the lake at time $t$, measured in years.
#
# We are given $P(0) = 5600$ trout, and $T = 10$ years. Then
# \begin{align}
# P(10)
# & = S(10) \cdot P(0) + \int_0^{10} S(10-t) \cdot R(t) \ dt \\
# & = e^{-0.2(10)} \cdot 5600 + \int_0^{10} e^{-0.2(10-t)} \cdot 720 e^{0.1t} \ dt \\
# & = 5600 e^{-2} + 720 \int_0^{10} e^{-2+0.2t+0.1t} \ dt \\
# & = 5600 e^{-2} + 720 e^{-2} \int_0^{10} e^{0.3t} \ dt \\
# & = 5600 e^{-2} + 720 e^{-2} \left. \left( \frac{ e^{0.3t} }{0.3} \right) \right|_0^{10} \\
# & = 5600 e^{-2} + \frac{ 720 e^{-2} }{0.3} \left( e^{3} - 1 \right) \\
# & = 5600 e^{-2} + 2400 \left( e - e^{-2} \right) \\
# & \approx 6956.95.
# \end{align}
# Thus, we expect the number of trout in the lake 10 years from now to be about 6957.
#
# ### 7. Summary
#
# <b>Future Value from Net Change or Cumulative Change:</b>
#
# Suppose a quantity $Q$ changes over time at a known rate $Q'(t)$.
# - The net (cumulative) change in Q between $t=a$ and $t=b$ is $$Q(b) - Q(a) = \int_a^b Q'(t) \ dt.$$
# - The <b>future value</b> of $Q$ at time $T$ is $$Q(T) = Q(0) + \int_0^T Q'(t) \ dt.$$
#
# <b>Future Value from Survival and Renewal:</b>
#
# Suppose a quantity $Q(t)$ has initial value $Q(0)$, renewal function $R(t)$ that represents the rate at which the quantity increases, and survival function $S(t)$ that represents the proportion of the quantity that survives/remains after $t$ years.
# - The <b>future value</b> of $Q$ at time $T$ is
# $$Q(T) = S(T) \cdot Q(0) + \int_0^T S(T-t) \cdot R(t) \ dt.$$
#
# ### 8. Further Study
#
# Please refer to Section 6.3 in the textbook for additional treatment of this topic.
#
# ### 9. Don't Forget
#
# Don't forget to return to eClass to complete the pre-class quiz.
#
# <font color=blue>In the pre-class quiz feedback question, please let me know if this notebook would benefit from one or two videos. If so, which section would you like to see become a video? The derivation of Future Value in Section 5, or the example in Section 6, or both? Thank you!</font>
#
| DefiniteIntegralsForFutureValue.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Weighted Average calculation using GroupBy function with Pandas
# This notebook is usefull to calculate a weighted average based on a groupby condition in Python.
#
# Furthermore, you can find also a relevant information on the articles below:
#
# https://pbpython.com/weighted-average.html
#
# https://github.com/chris1610/pbpython/blob/master/notebooks/Learn_Pandas-Weighted_Average.ipynb
#
# https://www.reddit.com/r/learnpython/comments/5qojxx/perform_a_weighted_average_using_groupby/
#
#
import pandas as pd
# +
# Example Dataset
df = pd.read_excel('https://github.com/chris1610/pbpython/blob/master/data/sales-estimate.xlsx?raw=True', sheet_name='projections')
df
# -
# Weighted Average calculation using GroupBy
def weighted_avg(df, groupby_name, value, counts):
"""Define the arguments of function: 1. df = dataframe
2. groupby_name = the list of the groupby columns
3. value = the list of values
4. counts = the list of counts """
df_weighted_avg = df.groupby(groupby_name).apply(lambda x: (x[value] * x[counts]).sum()/ x[counts].sum())
df_weighted_avg = pd.DataFrame(df_weighted_avg, columns=['Weighted_avg'])
df_weighted_avg.reset_index(inplace=True)
return df_weighted_avg
# +
# Call Function "weighted_avg"
df_wavg = weighted_avg(df, ['Rep', 'Manager'], 'Current_Price', 'Quantity')
df_wavg
| weighted_avg_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 ('genv')
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
import adscores
from adscores import get_perclass_scores, score_perclass_2_records
scores = get_perclass_scores(dataset="mvtecad", metric="pixel_wise_auroc")
f"{len(scores)=} scores"
records = [
r
for s in scores
for r in score_perclass_2_records(s)
]
f"{len(records)=}"
import pandas as pd
df = pd.DataFrame.from_records(data=records)
df.columns
df.pivot_table(index="class", columns="method-abbreviation", values="score")
methods_df = df[["method", "method-abbreviation", "method-reference", 'source']].drop_duplicates()
methods_df
# +
def make_method_abbrev_ref_src(row):
return f"- {row['method']} ({row['method-abbreviation']}) \\cite{{{row['method-reference']}}} (source: \\cite{{{row['source']}}})"
methods_names_str = "\n".join(methods_df.apply(axis=1, func=make_method_abbrev_ref_src).values.tolist())
print(methods_names_str)
# -
table = df.pivot_table(
index=("dataset", "class"),
columns="method-abbreviation",
values="score",
).rename(columns=method_abbrev_to_rename)
table_latex_str = table.to_latex()
table
| dev/test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import os
from tqdm import tqdm_notebook as tqdm
# I'm going to move all the files so the campaign names are consistent.
curves = '/Users/localhost/Desktop/Projects/Working/CFA/visioneering-deeplearning/experiments/planet_discriminator/local_cache/original_k2_data/curves'
bls = '/Users/localhost/Desktop/Projects/Working/CFA/visioneering-deeplearning/experiments/planet_discriminator/local_cache/original_k2_data/bls'
curve_campaigns = os.listdir(curves)
curve_campaigns.remove('.DS_Store')
for campaign in tqdm(curve_campaigns):
old_file_name = campaign
new_file_name = campaign[:2].lower()
old_path = os.path.join(curves, old_file_name)
new_path = os.path.join(curves, new_file_name)
os.rename(old_path, new_path)
bls_campaigns = os.listdir(bls)
bls_campaigns.remove('.DS_Store')
for campaign in tqdm(bls_campaigns):
old_file_name = campaign
new_file_name = campaign[:2].lower()
old_path = os.path.join(bls, old_file_name)
new_path = os.path.join(bls, new_file_name)
os.rename(old_path, new_path)
| CFA/.ipynb_checkpoints/Format_Dataset-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py36]
# language: python
# name: conda-env-py36-py
# ---
# # 10.8 文本情感分类:使用卷积神经网络(textCNN)
# +
import os
import torch
from torch import nn
import torchtext.vocab as Vocab
import torch.utils.data as Data
import torch.nn.functional as F
import sys
sys.path.append("..")
import d2lzh_pytorch as d2l
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
DATA_ROOT = "/S1/CSCL/tangss/Datasets"
print(torch.__version__, device)
# -
# ## 10.8.1 一维卷积层
def corr1d(X, K):
w = K.shape[0]
Y = torch.zeros((X.shape[0] - w + 1))
for i in range(Y.shape[0]):
Y[i] = (X[i: i + w] * K).sum()
return Y
X, K = torch.tensor([0, 1, 2, 3, 4, 5, 6]), torch.tensor([1, 2])
corr1d(X, K)
# +
def corr1d_multi_in(X, K):
# 首先沿着X和K的第0维(通道维)遍历并计算一维互相关结果。然后将所有结果堆叠起来沿第0维累加
return torch.stack([corr1d(x, k) for x, k in zip(X, K)]).sum(dim=0)
X = torch.tensor([[0, 1, 2, 3, 4, 5, 6],
[1, 2, 3, 4, 5, 6, 7],
[2, 3, 4, 5, 6, 7, 8]])
K = torch.tensor([[1, 2], [3, 4], [-1, -3]])
corr1d_multi_in(X, K)
# -
# ## 10.8.2 时序最大池化层
class GlobalMaxPool1d(nn.Module):
def __init__(self):
super(GlobalMaxPool1d, self).__init__()
def forward(self, x):
# x shape: (batch_size, channel, seq_len)
return F.max_pool1d(x, kernel_size=x.shape[2]) # shape: (batch_size, channel, 1)
# ## 10.8.3 读取和预处理IMDb数据集
batch_size = 64
train_data = d2l.read_imdb('train', data_root=os.path.join(DATA_ROOT, "aclImdb"))
test_data = d2l.read_imdb('test', data_root=os.path.join(DATA_ROOT, "aclImdb"))
vocab = d2l.get_vocab_imdb(train_data)
train_set = Data.TensorDataset(*d2l.preprocess_imdb(train_data, vocab))
test_set = Data.TensorDataset(*d2l.preprocess_imdb(test_data, vocab))
train_iter = Data.DataLoader(train_set, batch_size, shuffle=True)
test_iter = Data.DataLoader(test_set, batch_size)
# ## 10.8.4 textCNN模型
class TextCNN(nn.Module):
def __init__(self, vocab, embed_size, kernel_sizes, num_channels):
super(TextCNN, self).__init__()
self.embedding = nn.Embedding(len(vocab), embed_size)
# 不参与训练的嵌入层
self.constant_embedding = nn.Embedding(len(vocab), embed_size)
self.dropout = nn.Dropout(0.5)
self.decoder = nn.Linear(sum(num_channels), 2)
# 时序最大池化层没有权重,所以可以共用一个实例
self.pool = GlobalMaxPool1d()
self.convs = nn.ModuleList() # 创建多个一维卷积层
for c, k in zip(num_channels, kernel_sizes):
self.convs.append(nn.Conv1d(in_channels = 2*embed_size,
out_channels = c,
kernel_size = k))
def forward(self, inputs):
# 将两个形状是(批量大小, 词数, 词向量维度)的嵌入层的输出按词向量连结
embeddings = torch.cat((
self.embedding(inputs),
self.constant_embedding(inputs)), dim=2) # (batch, seq_len, 2*embed_size)
# 根据Conv1D要求的输入格式,将词向量维,即一维卷积层的通道维(即词向量那一维),变换到前一维
embeddings = embeddings.permute(0, 2, 1)
# 对于每个一维卷积层,在时序最大池化后会得到一个形状为(批量大小, 通道大小, 1)的
# Tensor。使用flatten函数去掉最后一维,然后在通道维上连结
encoding = torch.cat([self.pool(F.relu(conv(embeddings))).squeeze(-1) for conv in self.convs], dim=1)
# 应用丢弃法后使用全连接层得到输出
outputs = self.decoder(self.dropout(encoding))
return outputs
embed_size, kernel_sizes, nums_channels = 100, [3, 4, 5], [100, 100, 100]
net = TextCNN(vocab, embed_size, kernel_sizes, nums_channels)
# ### 10.8.4.1 加载预训练的词向量
glove_vocab = Vocab.GloVe(name='6B', dim=100, cache=os.path.join(DATA_ROOT, "glove"))
net.embedding.weight.data.copy_(
d2l.load_pretrained_embedding(vocab.itos, glove_vocab))
net.constant_embedding.weight.data.copy_(
d2l.load_pretrained_embedding(vocab.itos, glove_vocab))
net.constant_embedding.weight.requires_grad = False
# ### 10.8.4.2 训练并评价模型
lr, num_epochs = 0.001, 5
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr)
loss = nn.CrossEntropyLoss()
d2l.train(train_iter, test_iter, net, loss, optimizer, device, num_epochs)
d2l.predict_sentiment(net, vocab, ['this', 'movie', 'is', 'so', 'great'])
d2l.predict_sentiment(net, vocab, ['this', 'movie', 'is', 'so', 'bad'])
| code/chapter10_natural-language-processing/10.8_sentiment-analysis-cnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from os import path
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
# +
# Numpy array of blogs
train_data = np.load("splits/split_0.npy", allow_pickle=True)
train_data = pd.DataFrame(data=train_data, columns=['ID', 'BLOG'])
train_data
# -
train_data['BLOG'] = train_data['BLOG'].str.replace('\W+', ' ').str.replace('\s+', ' ').str.strip()
train_data['BLOG'] = ' ' + train_data['BLOG'].str.lower() + ' '
vocabulary = list(set(train_data['BLOG'].str.split().sum()))
author_uses_word = pd.DataFrame(0, index=train_data.index, columns=vocabulary)
author_uses_word = author_uses_word.apply(lambda series: train_data['BLOG'].str.contains(r' %s ' % series.name))
author_uses_word = author_uses_word.loc[:, author_uses_word.sum(axis=0) > 5]
author_uses_word = pd.concat([train_data.drop(columns=['BLOG']), author_uses_word], axis=1)
author_uses_word
# +
def information_gain(contains_word):
parent_entropy = entropy(np.logical_or(contains_word, True))
proportion_true = split_proportion(contains_word)
true_entropy = entropy(contains_word)
proportion_false = split_proportion(np.logical_not(contains_word))
false_entropy = entropy(np.logical_not(contains_word))
return parent_entropy - (proportion_true * true_entropy + proportion_false * false_entropy)
def split_proportion(contains_word):
return contains_word.drop(columns=['ID']).sum() / len(contains_word)
def entropy(contains_word):
np.seterr('ignore')
authors = contains_word.groupby('ID').aggregate(np.sum)
author_proportion = authors / authors.sum()
author_proportion = author_proportion.replace([-np.nan, np.nan], 1.)
term = -1. * author_proportion * np.log2(author_proportion)
term = term.replace([-np.nan, np.nan], 0.)
np.seterr('warn')
return term.sum()
word_information_gain = information_gain(author_uses_word)
best_words = np.array(word_information_gain.sort_values(ascending=False)[:500].index, dtype=str)
# -
word_information_gain.sort_values(ascending=False)[:500]
best_words_sample = np.random.choice(best_words, len(best_words))[:50]
word_weights = {word: word_information_gain[word] for word in best_words_sample}
wordcloud = WordCloud().generate_from_frequencies(word_weights)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.show()
wordcloud.to_file('best_words_3.png')
| notebooks/develop/archive/best_words.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: .venv
# language: python
# name: .venv
# ---
# # Intro
#
# This is the tutorial of ipyvizzu - the Jupyter notebook integration of the free, open-source Javascript library Vizzu. You can create animated charts, data stories, and interactive explorers with it. This is an excellent place to start using ipyVizzu, as it walks you through the installation of the library, introduces the logic it employs and the different settings to control how your animated charts look and behave.
# ## Installation
#
# ipyvizzu requires `IPython` and `pandas` packages.
# However you can use it only in Jupyter Notebook therefore `notebook` project has to be installed.
#
# ```sh
# pip install ipyvizzu
# pip install notebook
# ```
#
# ## [Optional] Using locally installed Vizzu
#
# ipyvizzu downloads Vizzu from [jsDelivr](https://www.jsdelivr.com/package/npm/vizzu?version=~0.4.0) CDN by default,
# but a local copy of it can be used like this:
#
# ```sh
# npm install vizzu@~0.4.0
# ```
#
# To use the locally installed Vizzu, you have to set the Vizzu Javascript file's location in the constructor of the Chart class.
#
# ```python
# from ipyvizzu import Chart
#
# chart = Chart(vizzu="./node_modules/vizzu/dist/vizzu.min.js")
# ```
#
# ## The basic logic of Vizzu
#
# The foundation of a Vizzu chart used in ipyvizzu is the animation. The animation contains states describing the chart's configuration, such as the data series, coordinate system, labels, titles, etc. A static chart is the result of a single animation state. When there are more states, ipyvizzu automatically transitions between these. The animate method initiates the animation into a new state by describing the new chart and how Vizzu should transition to it. The return value of the animate method is a promise that will be resolved after the animation is completed. Using this logic you can create a promise chain of animation from state to state.
#
# <p align="center">
# <img src="https://github.com/vizzuhq/ipyvizzu/raw/main/docs/tutorial/code_structure.svg" alt="Vizzu" />
# </p>
#
# The animate method has two parameters in total. The first parameter sets the chart, and the (optional) second determines how Vizzu should animate to that state.
#
# The first parameter has the following three properties:
#
# data - this is where you add the data that you want to put on the charts.
# config - this is where you can add or remove series on the channels and set the general settings of the chart like the chart title, the geometry, the alignment etc.
# style - this is where you can set how your chart looks.
# Next chapter: [Adding data](./data.ipynb) ----- Back to the [Table of contents](../doc.ipynb#tutorial)
| docs/tutorial/intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
w = {}
import os
import tensorflow as tf
from dqn.ops import linear
from dqn.utils import get_time, save_pkl, load_pkl
from dqn.ops import conv2d
import numpy as np
obs=np.array([[[ 1.],
[ 0.]],
[[ 0.],
[ 1.]],
[[ 1.],
[ 0.]],
[[ 0.],
[ 1.]],
[[ 0.],
[ 1.]],
[[ 0.],
[ 1.]],
[[ 0.],
[ 1.]],
[[ 0.],
[ 1.]]])
w1 = np.expand_dims(np.array([[[1.,0.],[0., 1.]],[[1.,0.],[0.,1.]]]), axis=2)
print w1
obs = obs.transpose((2,0,1))
with tf.Session() as sess:
w = {}
t_w = {}
init1_w = tf.constant_initializer(w1)
init1_b = tf.constant_initializer(-1.0)
init2_w = tf.constant_initializer(1.)
init2_b = tf.constant_initializer(0.0)
activation_fn = tf.nn.relu
with tf.variable_scope('prediction', reuse=None):
s_t = tf.placeholder('float32',
[None, 1, 8, 2], name='s_t')
l1, w['l1_w'], w['l1_b'] = conv2d(s_t,
2, [2, 2], [2, 1], init1_b, init1_w, activation_fn, 'NCHW', name='l1' )
l2, w['l2_w'], w['l2_b'] = conv2d(l1,
1, [1, 1], [1, 1], init2_b, init2_w, activation_fn, 'NCHW', name='l2' )
shape = l2.get_shape().as_list()
l2_flat = tf.reshape(l2, [-1, reduce(lambda x, y: x * y, shape[1:])])
q, w['q_w'], w['q_b'] = linear(l2_flat, 5, activation_fn=activation_fn, name='q')
q_action = tf.argmax(q, dimension=1)
tf.initialize_all_variables().run()
action = q_action.eval({s_t: [obs]})[0]
l1 = l2.eval({s_t: [obs]})[0]
q_w = w['q_w'].eval({s_t: [obs]})[0]
print l1
print q_w
print action
w
# +
with tf.Session() as sess:
with tf.variable_scope('load_pred_from_pkl', reuse=True):
w_input = {}
w_assign_op = {}
for name in w.keys():
w_input[name] = tf.placeholder('float32', w[name].get_shape().as_list(), name=name)
w_assign_op[name] = w[name].assign(w_input[name])
for name in w.keys():
print name
print w_assign_op[name].eval({w_input[name]: load_pkl(os.path.join('weights', "%s.pkl" % name))})
# -
| Weight.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="e1XUz3f4F9-4"
# **Tokenization**
# + colab={"base_uri": "https://localhost:8080/"} id="LcY1sleuaVR_" outputId="d3938164-8442-44a5-808f-0ef75848dd0d"
import nltk
from nltk.tokenize import sent_tokenize
my_message = "Hello there. Goodbye everybody."
tokens = sent_tokenize(my_message)
print(tokens)
# + id="GjUz7ar8l1Ez" colab={"base_uri": "https://localhost:8080/"} outputId="4c1a257b-d90c-4b7a-d1a8-3ad1a568c2b3"
from nltk.tokenize import word_tokenize
nltk.download("punkt")
my_message = "@Everybody: Hello NLP-world!"
tokens = word_tokenize(my_message)
print(tokens)
# + id="693oQ6X1nUFX" colab={"base_uri": "https://localhost:8080/"} outputId="332eac5e-3518-48ed-dbb9-d746a9c840c9"
from nltk.tokenize import wordpunct_tokenize
tokens = wordpunct_tokenize(my_message)
print(tokens)
# + id="JxS7HjZun4kh" colab={"base_uri": "https://localhost:8080/"} outputId="b6cf1f8e-2356-4b62-874d-86aa89712157"
from nltk.tokenize import regexp_tokenize
tokens = regexp_tokenize(my_message, r"\w+")
print(tokens)
# + id="6ota288o3fDI" colab={"base_uri": "https://localhost:8080/"} outputId="6a897cb9-bde4-4acb-d8ea-e630f148de38"
from nltk.tokenize import regexp_tokenize
input_sentences = ["Hello world", "this is only an example"]
tokens = []
for word in input_sentences:
tokens.extend(regexp_tokenize(word, r"\w+"))
print(tokens)
# + [markdown] id="QmUC3n1rpbAZ"
# **Stemming**
# + id="3YE64x4spih6" colab={"base_uri": "https://localhost:8080/"} outputId="309fc462-a775-468f-ebaa-e57e83becec5"
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
tokens = ["Enjoy", "enjoying", "enjoys", "enjoyable"]
stems = [stemmer.stem(token) for token in tokens]
print(stems)
# + id="G_Mdv8wEtEB5" colab={"base_uri": "https://localhost:8080/"} outputId="4e7f507c-75a9-4a61-827c-55d6e0d8d1a0"
from nltk.stem.snowball import SnowballStemmer
stemmer = SnowballStemmer("english")
stems = [stemmer.stem(token) for token in tokens]
print(stems)
# + [markdown] id="dD2hCfsdtd0g"
# List-comprehension in Python
# + id="TokbWarcvADn" colab={"base_uri": "https://localhost:8080/"} outputId="8347f3d7-c5c5-4f3a-d4a6-aadeca3b5437"
input_list = ["red", "white", "purple", "yellow", "blue", "green", "black"]
# Example non-list comprehesion
output_list = []
for item in input_list:
output_list.append(item)
print(output_list)
# Example of list-comprehension
output_list = [item for item in input_list]
print(output_list)
# Example non-list comprehesion (with conditional)
output_list = []
for item in input_list:
if "u" in item:
output_list.append(item)
print(output_list)
# Example of list-comprehension (with conditional)
output_list = [item for item in input_list if "u" in item]
print(output_list)
# + [markdown] id="YSjbRREXq0fW"
# **Removing stop words**
# + id="Ju-QZPcYq205" colab={"base_uri": "https://localhost:8080/"} outputId="ab808da4-5d15-4cfa-9fe5-3b1ba032d1eb"
from nltk.corpus import stopwords
nltk.download("stopwords")
example_text = "This is an example sentence to test stopwords"
sw_en = stopwords.words("english")
text_no_stopwords = [word for word in example_text.split() if word not in sw_en]
print(example_text)
print(text_no_stopwords)
# + id="W5dCifvIsHBv" colab={"base_uri": "https://localhost:8080/"} outputId="dacd00e9-1f5a-422d-badd-a5a67fd87676"
from nltk.corpus import gutenberg
nltk.download("gutenberg")
words = gutenberg.words("shakespeare-hamlet.txt")
words_no_stopwords = [word for word in words if word not in sw_en]
stopwords_percentage = len(text_no_stopwords) * 100 / len(words)
print("The percentage of words without stopwords in Hamlet is", stopwords_percentage, "%")
# + [markdown] id="FPM-J7jCWayU"
# **Lemmatization**
# + id="cs0oJ25Az2Bt" colab={"base_uri": "https://localhost:8080/"} outputId="7a767f49-a2cd-4ec5-847a-0c5603975889"
import spacy
nlp = spacy.load("en")
sentence = "We are putting in efforts to enhance our understanding of Lemmatization"
lemmas = [token.lemma_ for token in nlp(sentence)]
print(lemmas)
lemmas = [w.lemma_ if w.lemma_ !='-PRON-' else w.text for w in nlp(sentence)]
print(lemmas)
# + [markdown] id="FuT4ZTZgQ-Nf"
# **POS tagging**
# + colab={"base_uri": "https://localhost:8080/"} id="v7uji_APRABp" outputId="d6b37b7a-d043-4870-c491-828aca02fb66"
import nltk
from nltk.tokenize import word_tokenize
from nltk import pos_tag
nltk.download("punkt")
nltk.download("averaged_perceptron_tagger")
sentence = "We are putting in efforts to enhance our understanding of Lemmatization"
tokens = word_tokenize(sentence)
pos_tagged = pos_tag(tokens)
print("tokens", tokens)
print("pos_tagged", pos_tagged)
# + colab={"base_uri": "https://localhost:8080/"} id="0dI-WRC0S5WS" outputId="cc8539c0-3d7f-44a9-e056-e15c9d61e853"
from nltk.corpus import wordnet
from nltk.stem.wordnet import WordNetLemmatizer
nltk.download("wordnet")
# Return tag compliance to WordNet lemmatization (a, n, r, v)
def get_wordnet_pos(treebank_tag):
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN # Nouns by default
lemmatizer = WordNetLemmatizer()
lemmas = [lemmatizer.lemmatize(word, pos=get_wordnet_pos(tag)) for word, tag in pos_tagged]
print("lemmas", lemmas)
| 2.Datasets_for_NLP/4_text_preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Batch Training
#
# Running algorithms which require the full data set for each update
# can be expensive when the data is large. In order to scale inferences,
# we can do _batch training_. This trains the model using
# only a subsample of data at a time.
#
# In this tutorial, we extend the
# [supervised learning tutorial](http://edwardlib.org/tutorials/supervised-regression),
# where the task is to infer hidden structure from
# labeled examples $\{(x_n, y_n)\}$.
# A webpage version is available at
# http://edwardlib.org/tutorials/batch-training.
# +
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import tensorflow as tf
from edward.models import Normal
# -
# ## Data
#
# Simulate $N$ training examples and a fixed number of test examples.
# Each example is a pair of inputs $\mathbf{x}_n\in\mathbb{R}^{10}$ and
# outputs $y_n\in\mathbb{R}$. They have a linear dependence with
# normally distributed noise.
#
# We also define a helper function to select the next batch of data
# points from the full set of examples. It keeps track of the current
# batch index and returns the next batch using the function
# ``next()``. We will generate batches from `data` during inference.
def build_toy_dataset(N, w):
D = len(w)
x = np.random.normal(0.0, 2.0, size=(N, D))
y = np.dot(x, w) + np.random.normal(0.0, 0.05, size=N)
return x, y
def generator(arrays, batch_size):
"""Generate batches, one with respect to each array's first axis."""
starts = [0] * len(arrays) # pointers to where we are in iteration
while True:
batches = []
for i, array in enumerate(arrays):
start = starts[i]
stop = start + batch_size
diff = stop - array.shape[0]
if diff <= 0:
batch = array[start:stop]
starts[i] += batch_size
else:
batch = np.concatenate((array[start:], array[:diff]))
starts[i] = diff
batches.append(batch)
yield batches
# +
ed.set_seed(42)
N = 10000 # size of training data
M = 128 # batch size during training
D = 10 # number of features
w_true = np.ones(D) * 5
X_train, y_train = build_toy_dataset(N, w_true)
X_test, y_test = build_toy_dataset(235, w_true)
data = generator([X_train, y_train], M)
# -
# ## Model
#
# Posit the model as Bayesian linear regression (Murphy, 2012).
# For a set of $N$ data points $(\mathbf{X},\mathbf{y})=\{(\mathbf{x}_n, y_n)\}$,
# the model posits the following distributions:
#
# \begin{align*}
# p(\mathbf{w})
# &=
# \text{Normal}(\mathbf{w} \mid \mathbf{0}, \sigma_w^2\mathbf{I}),
# \\[1.5ex]
# p(b)
# &=
# \text{Normal}(b \mid 0, \sigma_b^2),
# \\
# p(\mathbf{y} \mid \mathbf{w}, b, \mathbf{X})
# &=
# \prod_{n=1}^N
# \text{Normal}(y_n \mid \mathbf{x}_n^\top\mathbf{w} + b, \sigma_y^2).
# \end{align*}
#
# The latent variables are the linear model's weights $\mathbf{w}$ and
# intercept $b$, also known as the bias.
# Assume $\sigma_w^2,\sigma_b^2$ are known prior variances and $\sigma_y^2$ is a
# known likelihood variance. The mean of the likelihood is given by a
# linear transformation of the inputs $\mathbf{x}_n$.
#
# Let's build the model in Edward, fixing $\sigma_w,\sigma_b,\sigma_y=1$.
# +
X = tf.placeholder(tf.float32, [None, D])
y_ph = tf.placeholder(tf.float32, [None])
w = Normal(loc=tf.zeros(D), scale=tf.ones(D))
b = Normal(loc=tf.zeros(1), scale=tf.ones(1))
y = Normal(loc=ed.dot(X, w) + b, scale=1.0)
# -
# Here, we define a placeholder `X`. During inference, we pass in
# the value for this placeholder according to batches of data.
# To enable training with batches of varying size,
# we don't fix the number of rows for `X` and `y`. (Alternatively,
# we could fix it to be the batch size if training and testing
# with a fixed size.)
# ## Inference
#
# We now turn to inferring the posterior using variational inference.
# Define the variational model to be a fully factorized normal across
# the weights.
qw = Normal(loc=tf.Variable(tf.random_normal([D])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([D]))))
qb = Normal(loc=tf.Variable(tf.random_normal([1])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([1]))))
# Run variational inference with the Kullback-Leibler divergence.
# We use $5$ latent variable samples for computing
# black box stochastic gradients in the algorithm.
# (For more details, see the
# [$\text{KL}(q\|p)$ tutorial](http://edwardlib.org/tutorials/klqp).)
#
# For batch training, we will iterate over the number of batches and
# feed them to the respective placeholder. We set the number of
# iterations to be equal to the number of batches times the number of
# epochs (full passes over the data set).
# +
n_batch = int(N / M)
n_epoch = 5
inference = ed.KLqp({w: qw, b: qb}, data={y: y_ph})
inference.initialize(n_iter=n_batch * n_epoch, n_samples=5, scale={y: N / M})
tf.global_variables_initializer().run()
for _ in range(inference.n_iter):
X_batch, y_batch = next(data)
info_dict = inference.update({X: X_batch, y_ph: y_batch})
inference.print_progress(info_dict)
# -
# When initializing inference, note we scale $y$ by $N/M$, so it is as if the
# algorithm had seen $N/M$ as many data points per iteration.
# Algorithmically, this will scale all computation regarding $y$ by
# $N/M$ such as scaling the log-likelihood in a variational method's
# objective. (Statistically, this avoids inference being dominated by the prior.)
#
# The loop construction makes training very flexible. For example, we
# can also try running many updates for each batch.
# +
n_batch = int(N / M)
n_epoch = 1
inference = ed.KLqp({w: qw, b: qb}, data={y: y_ph})
inference.initialize(
n_iter=n_batch * n_epoch * 10, n_samples=5, scale={y: N / M})
tf.global_variables_initializer().run()
for _ in range(inference.n_iter // 10):
X_batch, y_batch = next(data)
for _ in range(10):
info_dict = inference.update({X: X_batch, y_ph: y_batch})
inference.print_progress(info_dict)
# -
# In general, make sure that the total number of training iterations is
# specified correctly when initializing `inference`. Otherwise an incorrect
# number of training iterations can have unintended consequences; for example,
# `ed.KLqp` uses an internal counter to appropriately decay its optimizer's
# learning rate step size.
#
# Note also that the reported `loss` value as we run the
# algorithm corresponds to the computed objective given the current
# batch and not the total data set. We can instead have it report
# the loss over the total data set by summing `info_dict['loss']`
# for each epoch.
# ## Criticism
#
# A standard evaluation for regression is to compare prediction accuracy on
# held-out "testing" data. We do this by first forming the posterior predictive
# distribution.
y_post = ed.copy(y, {w: qw, b: qb})
# This is equivalent to
# y_post = Normal(loc=ed.dot(X, qw) + qb, scale=tf.ones(N))
# With this we can evaluate various quantities using predictions from
# the model (posterior predictive).
# +
print("Mean squared error on test data:")
print(ed.evaluate('mean_squared_error', data={X: X_test, y_post: y_test}))
print("Mean absolute error on test data:")
print(ed.evaluate('mean_absolute_error', data={X: X_test, y_post: y_test}))
# -
# ## Footnotes
#
# Only certain algorithms support batch training such as
# `MAP`, `KLqp`, and `SGLD`. Also, above we
# illustrated batch training for models with only global latent variables,
# which are variables are shared across all data points.
# For more complex strategies, see the
# [inference data subsampling API](http://edwardlib.org/api/inference-data-subsampling).
| notebooks/batch_training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Import Data
#
# This is the completed solution for the [Import Data Tutorial](https://developers.arcgis.com/labs/data/python/import-data/).
#
# [ArcGIS tutorials](https://developers.arcgis.com/labs/) are short guides demonstrating the three phases of building geospatial apps: Data, Design, Develop.
# ### Important note
#
# If you have not completed the [Download data lab](https://developers.arcgis.com/labs/python/download-data), the code in the **The Download Data Lab** cell immediately below must be run before the **Import Data Lab** cells will run. These cells contain the code in the ArcGIS API for Python [Download data lab solution](download_data.ipynb).
#
# The `data/LA_Hub_Datasets` directory created in the previous lab is only available for the duration of a single notebooks.esri.com session.
# ## The Import Data Tutorial
from arcgis.gis import GIS
import os
# To create the GIS object, we pass your profile that contains the url and your login credentials. Please replace the credentials below with that of your org. To learn more about profiles, see [here](https://developers.arcgis.com/python/guide/working-with-different-authentication-schemes/#Storing-your-credentialls-locally).
gis = GIS('home')
parks_properties = {'title': 'Parks and Open Space',
'tags': 'parks, open data, devlabs',
'type': 'Shapefile'}
data_path = os.path.join('.', 'data', 'LA_Hub_datasets', 'Parks_and_Open_Space.zip')
parks_shp = gis.content.add(parks_properties, data=data_path)
parks_shp
parks_feature_layer = parks_shp.publish()
parks_feature_layer.url
# ## Challenge
trailheads_prop = {
'title': 'Trailheads',
'tags': 'parks, tutorials',
'type': 'CSV'
}
# +
## Import CSV data
trailheads_path = extract_path.joinpath('Trailheads.csv')
trailheads = gis.content.add(trailheads_prop, data=str(trailheads_path))
# -
trails_prop = {
'title': 'Trails',
'tags': 'parks, tutorials',
'type': 'GeoJson' # NOTE: `type` is __case-sensitive__
}
# +
# Import GeoJSON data
trails_path = extract_path.joinpath('Trails.geojson')
trails = gis.content.add(trails_prop, data=str(trails_path))
| labs/import_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
'hello'
# !ls
# !pwd
import tensorflow as tf
import warnings
from itertools import product
from dateutil.relativedelta import relativedelta
from tqdm import tqdm_notebook
from scipy import stats
import scipy.stats as scs
import statsmodels.api as sm
import statsmodels.tsa.api as smt
import statsmodels.formula.api as smf # 统计与计量
from scipy.optimize import minimize # 优化函数
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# %matplotlib inline
warnings.filterwarnings('ignore')
# [10 minutes to pandas](https://pandas.pydata.org/pandas-docs/stable/getting_started/10min.html)
import numpy as np
import pandas as pd
# Object creation
s=pd.Series([1,3,5,np.nan,6,8])
s
dates=pd.date_range('20200101',periods=10)
dates
df=pd.DataFrame(np.random.randn(10,4),index=dates,columns=list('abcd'))
df
| Index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Visualizing SSNs using an internactive network
#
# Networks have nodes and edges. A node is a connection point and edges are the connections between them. In this exercise each protein is a node and each edge indicates an evalue (expectation value) smaller than 10e-40.
#
# ## Examining the BLAST output
#
# <font color=blue><b>STEP 1:</b></font> Let's first look at the output of our BLASTP search by double clicking the file in the file browser. <b>N.B. if you had to relaunch the binder, you will need to upload this file from your personal computer!</b> If you can't find it, you should be able to download the file here: https://drive.google.com/drive/folders/1Qjx3u6T-LIoUV4d2xtyagPN2pXPqHpx_?usp=sharing.
# ***
# You should see three columns, with each column separated by a tab, they aren't labeled, but we dicated them using the outfmt command in our blast search. To refresh your memory here is the command we ran:
#
# ~~~python
# # !blastp -db files/finalpro_40 -query files/final_40.fasta -outfmt "6 qseqid sseqid evalue" -out files/BLASTe40_out -num_threads 4 -evalue 10e-40
# ~~~
#
# - qseqid is the query sequence id (we will call this the source)
# - sseqid is the subject sequence id (we will call this the target)
# - evalue is the expectation value
#
# <font color=blue><b>STEP 2:</b></font> Answer the following questions:
#
# 1. What is the range of expectations value when the source and target are the same (find a few in the BLASTe40_out)?
# 2. Given that the expectation value refers to the chance that a BLAST hit is found by chance, does your answer to question 1 make sense?
# 3. In the first ten results in the file, what is the expectation value of the closest non-identical match (give the source, target, and evalue)?
#
# ***
#
# ## Creating a Dataframe from the BLAST output
#
# A little lingo here. An API is an Application Programming Interface. APIs are pieces of software that allow applications to talk to each other. A dataframe is a popular API that resembles a spreadsheet. The BLAST output is a tab separated file, and we will use pandas - a "powerful Python data analysis toolkit" to read our file and convert it to a dataframe.
#
# <font color=blue><b>STEP 3:</b></font> Edit the code to replace the <b>\<<<your file here\>>></b> with the BLAST output file path (remember it is in the files directory). Then run the code below to convert the BLASTe40_out file into a dataframe. Since our BLAST output did not contain any headers (column labels), we can add them in.
#
# +
import pandas as pd # imports the pandas functions
headerList = ['source','target','evalue']
blast_data_con = pd.read_csv('<<<your file here>>>', sep='\t', header=None) # reads the BLAST output and looks for 'tab' to separate the values
blast_data_con.columns = ['source', 'target', 'evalue'] # assigns names to the columns
blast_data_con # show what is in the dataframe.
# -
# Note that the complete dataframe is not shown, but that it contains over 4000 connections, which we call edges in a network.
# ***
# ## Creating dataframes of edges and nodes
#
# ## Removing duplicates and self-references from edges
#
# The code below removes duplicates (e.g. if <font color="blue">a</font> finds <font color="blue">b</font> and <font color="blue">b</font> finds <font color="blue">a</font>, we only need to keep one of them) and self-references (e.g. remove all instances of <font color="blue">a</font> finds <font color="blue">a</font>).
#
# The code is a bit complicated and uses another function called numpy. Briefly, the code uses pandas and the numpy.sort function to create another dataframe with only the duplicates. We then "subtract" the dataframe containing duplicates from the original dataframe.
#
# We call the new dataframe "edges".
#
# <font color=blue><b>STEP 4:</b></font> Run the code below to create a dataframe of unique edges. We have named this dataframe "edges".
# +
import numpy as np
df = blast_data_con # we assign the variable df to our blast_data_con so we can retain the original dataframe.
#remove duplicates
m=pd.DataFrame(np.sort(df[['source','target']])).duplicated()
df = df[~m]
#removes self-reference
df = df[df.source != df.target]
edges = df # this is a unique set of edges
edges # show us the edges dataframe
# -
# ## Creating a unique list of nodes
#
# We will use the numpy.unique function to read through the sources and targets in the dataframe and make a list (called uniq_list) of nodes.
#
# Then we will use pandas to convert this list into a simple dataframe of nodes.
#
# <font color=blue><b>STEP 5:</b></font> Run the code below to create dataframe of unique nodes.
# +
uniq_list = np.unique(df[['source', 'target']].values) # find the unique values and put them in a list
nodes = pd.DataFrame(uniq_list, columns = ['id']) # make a node dataframe with the column header id
nodes # show us the nodes dataframe
# -
# ***
#
# ## Visualizing nodes and edges
#
# You now have a set of nodes and edges that you can visualize. We will import ipycytoscape, A Cytoscape widget for displaying interactive networks. You can find more about Cytoscape (a really cool stand alone software package here: https://cytoscape.org/ and ipycytoscape here: https://github.com/cytoscape/ipycytoscape.
#
# This code is borrowed and edited from https://github.com/joseberlines, who has done some neat work with ipycytoscape.
#
# <font color=blue><b>STEP 6:</b></font> Run the code box below to visualize the graph (this might take a few minutes and might take a few seconds to appear even after the asterisk disappears).
#
#
# +
# There isn't a real need to edit any of this since it is just making the network graph.
import json # json stands for JavaScript Object Notation and ipycytoscape reads the data in this format
import ipycytoscape # the widget to visualize interactive networks.
from ipywidgets import Output
def transform_into_ipycytoscape(nodes_df,edges_df):
nodes_dict = nodes_df.to_dict('records') # converts the nodes to a dictionary
edges_dict = edges_df.to_dict('records') # convertst the edges to a dictionary
# building nodes
data_keys = ['id'] #this is a list of keys in stations (nodes)
position_keys = ['position_x','position_y']
rest_keys = ['score','idInt','name','score','group','removed','selected','selectable','locked','grabbed'
'grabbable']
nodes_graph_list=[] #an empty list for making the json-like? file
for node in nodes_dict: #iterating over each node
dict_node = {}
data_sub_dict = {'data':{el:node[el] for el in data_keys}}
rest_sub_dict = {el:node[el] for el in node.keys() if el in rest_keys}
posi_sub_dict = {}
if 'position_x' in node.keys() and 'position_y' in node.keys():
posi_sub_dict = {'position':{el:node[el] for el in node.keys() if el in position_keys}}
dict_node = {**data_sub_dict,**rest_sub_dict,**posi_sub_dict}
nodes_graph_list.append(dict_node)
# building edges
data_keys = ['source','target','evalue'] #this is a list of keys in edges
data_keys2 = ['label','classes']
rest_keys = ['score','weight','group','networkId','networkGroupId','intn','rIntnId','group','removed','selected','selectable','locked','grabbed','grabbable','classes']
position_keys = ['position_x','position_y']
edges_graph_list = []
for edge in edges_dict:
dict_edge = {}
data_sub_dict = {el:edge[el] for el in data_keys}
data_sub_dict2 = {el:edge[el] for el in edge.keys() if el in data_keys2}
rest_sub_dict = {el:edge[el] for el in edge.keys() if el in rest_keys}
dict_edge = {'data':{**data_sub_dict,**data_sub_dict2},**rest_sub_dict}
edges_graph_list.append(dict_edge)
#print(edges_graph_list)
total_graph_dict = {'nodes': nodes_graph_list, 'edges':edges_graph_list}
#print(total_graph_dict)
# building the style
all_node_style = ['background-color','background-opacity',
'font-family','font-size','label','width',
'shape','height','width','text-valign','text-halign']
all_edge_style = ['background-color','background-opacity',
'font-family','font-size','label','width','line-color',
]
total_style_dict = {}
style_elements=[]
for node in nodes_dict:
node_dict = {'selector': f'node[id = \"{node["id"]}\"]'}
style_dict ={"style": { el:node[el] for el in node.keys() if el in all_node_style}}
node_dict.update(style_dict)
style_elements.append(node_dict)
for edge in edges_dict:
edge_dict = {'selector': f'edge[id = \"{edge["source"]}\"]'}
style_dict ={"style": { el:edge[el] for el in edge.keys() if el in all_edge_style}}
edge_dict.update(style_dict)
style_elements.append(edge_dict)
# the graph
data_graph = json.dumps(total_graph_dict)
json_to_python = json.loads(data_graph)
result_cyto = ipycytoscape.CytoscapeWidget()
result_cyto.graph.add_graph_from_json(json_to_python)
result_cyto.set_style(style_elements)
result_cyto.set_layout(name='grid') #concentric, cola, or grid
out = Output()
"""
def log_clicks(node):
with out:
print(f'clicked: {(node)}')
#def log_mouseovers(node):
#with out:
#print(f'mouseover: {pformat(node)}')
result_cyto.on('node', 'click', log_clicks)
#result_cyto.on('node', 'mouseover', log_mouseovers)
"""
return result_cyto, out
network, out =transform_into_ipycytoscape(nodes,edges)
display(network)
display(out)
# -
# <font color=blue><b>STEP 7:</b></font> Try zooming in and out of the network. You can also grab and move nodes.
#
# If you are underimpressed - you should be. We did make an interactive network of nodes and edges. <b>But</b>...the problem is that this network doesn't contain enough information to give us insight into the connections among DtxR-like sequences.
#
# <font color=blue><b>STEP 8:</b></font> The code below adds some information to the dataframe for our nodes. It includes color and labels and sets a default size for the nodes. Run the code below to update the dataframe and then run the code to visualize the network again. In case you would like to see the full range of named colors, check out this site: https://matplotlib.org/stable/gallery/color/named_colors.html.
#
# +
# Let's add some new columns to our dataframe
nodes['label'] = nodes['id'] # creates a label using the id
nodes['background-color']='cyan' # our default color is cyan, but could be anything.
nodes['width']='24'
nodes['height']='24'
nodes['font-size']='20'
nodes['text-valign']='center'
nodes['text-halign']='center'
nodes['count'] = '1'
nodes
# -
# ***
# The network is still very difficult to use. Knowledge of the number of connections might help us to find nodes that make many connections and nodes that make only a few connections easily.
#
# <font color=blue><b>STEP 9:</b></font> The code below creates a list using our node IDs. It goes through the list and counts the number of edges to targets. Then it changes the variable count in the dataframe for that item. Lastly, the height and width are determined using the size * 10. I just made that up, you could try any multiple or even an exponent (e.g. \**2).
#
# +
#Let's change the size of the nodes based on the number of connections.
col_one_list = nodes['id'].tolist() # make a list from the dataframe
for item in col_one_list:
size = len(edges[edges['source']==item]) + len(edges[edges['target']==item])
nodes.loc[nodes['id'] == item, 'count']=size
size = size*10
nodes.loc[nodes['id'] == item, 'width']=size
nodes.loc[nodes['id'] == item, 'height']=size
nodes['font-size']='50' # let's also increase the size of the font here.
nodes
# -
# This is finally starting to give us some information.
#
# <font color=blue><b>STEP 10:</b></font> Let's try another last amendment to our dataframe by giving a color to our knowns. Note that these are the IDs in the "dtxr_pdbs.fasta" file, and I just picked a different color for each. Run the code below and then rerun the network visualization.
# +
#Here we can assign colors to nodes that connect to one of our knowns!
nodes.loc[nodes['id'] == '1U8R_IDER','background-color'] = 'red'
nodes.loc[nodes['id'] == '1C0W_DTXR','background-color'] = 'orange'
nodes.loc[nodes['id'] == '6O5C_MTSR','background-color'] = 'yellow'
nodes.loc[nodes['id'] == '3HRT_SCAR','background-color'] = 'green'
nodes.loc[nodes['id'] == '5CVI_SLOR','background-color'] = 'blue'
nodes.loc[nodes['id'] == '3R60_MNTR','background-color'] = 'magenta'
nodes
# -
# <font color=blue><b>STEP 11:</b></font> Answer the question:
#
# 1. Was adding color to your knowns as helpful as you thought it might be? Why or why not?
#
# ***
#
# Let's see if adding more color can help us to generate clusters.
#
# <font color=blue><b>STEP 12:</b></font> In this step, if a node is identified by a known, we will color it the same as the known. Note that the last section recolors the known nodes just in case they were identified by another known directly. Run the code below and then rerun the network visualization.
# +
records = edges.to_records(index=False)
result = list(records)
for item in result:
#print(item)
if item[1] == '1U8R_IDER':
nodes.loc[nodes['id'] == item[0],'background-color'] = 'red'
if item[1] == '5CVI_SLOR':
nodes.loc[nodes['id'] == item[0],'background-color'] = 'blue'
if item[1] == '3HRT_SCAR':
nodes.loc[nodes['id'] == item[0],'background-color'] = 'green'
if item[1] == '1C0W_DTXR':
nodes.loc[nodes['id'] == item[0],'background-color'] = 'orange'
if item[1] == '3R60_MNTR':
nodes.loc[nodes['id'] == item[0],'background-color'] = 'magenta'
if item[1] == '6O5C_MTSR':
nodes.loc[nodes['id'] == item[0],'background-color'] = 'yellow'
nodes.loc[nodes['id'] == '1U8R_IDER','background-color'] = 'red'
nodes.loc[nodes['id'] == '1C0W_DTXR','background-color'] = 'orange'
nodes.loc[nodes['id'] == '6O5C_MTSR','background-color'] = 'yellow'
nodes.loc[nodes['id'] == '3HRT_SCAR','background-color'] = 'green'
nodes.loc[nodes['id'] == '5CVI_SLOR','background-color'] = 'blue'
nodes.loc[nodes['id'] == '3R60_MNTR','background-color'] = 'magenta'
nodes
# -
# ***
#
# <font color=blue><b>STEP 13:</b></font> Interact with the graph to create clusters. Briefly, cluster by first moving the larger known clusters to the outside of the grid graph. Then bring the similarly colored groups together. Finally, arrange the remaining cyan nodes nearer to their connections as dictated by the edges.
#
# <font color=blue><b>STEP 14:</b></font> Use your clustered graph to answer the following questions:
#
# 1. Which DtxR-like proteins are most closely related to each other?
# 2. What are the MNTRs connected to? What does this mean? What step or steps might you need to change to identify sequence connections to MNTRs?
# 3. What sequence (or sequences) connect the 5CVI_SLOR protein to the 1C0W_DTXR protein?
# <font color=blue><b>STEP 15:</b></font> <b>Challenge Question:</b> Having identified the sequence links between 5CVI_SLOR protein and 1C0W_DTXR protein (Step 14 question 3), create a fasta file that contains these linking sequences and the knowns (in dtxr_pdbs.fasta). Then create a multiple alignment from this fasta. Using the mutliple alignment output, assign functions to the linking sequences - choices are: DtxR/IdeR-like, SloR/ScaR-like, MtsR-like or unknown function.
| 3 - Visualizing_BLAST_data_v9.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# name: python3
# ---
from selenium import webdriver
import time
import urllib
from slugify import slugify
import requests
import random
import os
conference_url = "https://openreview.net/group?id=NeurIPS.cc/2021/Conference" # the conference url to download papers from
Edgedriver_path = 'C:\\Users\\xiaoyang\\Downloads\\edgedriver_win64\msedgedriver.exe' # the edgedriver.exe path
root = r'E:\OneDrive\academics\papers\conferences\NeurIPS-2021-ALL'.replace('\\','/') # file path to save the downloaded papers
# +
os.makedirs(root, exist_ok=True)
print(root)
driver = webdriver.Edge(Edgedriver_path)
driver.get(conference_url)
# +
from selenium.common.exceptions import NoSuchElementException, ElementClickInterceptedException
js = "window.scrollTo(0,0)"
pdfurllist = []
pdfnamelist = []
# three sections: oral, spotlight, poster
for num_section, section in enumerate(['Oral', 'Spotlight', 'Poster']):
# my_action.move_to_element(driver.find_elemen_by_partial_link_text(section)).perform()
driver.execute_script(js)
driver.find_element_by_partial_link_text(section).click()
time.sleep(3)
elementllist = driver.find_elements_by_xpath('//*[@id="{}-presentations"]/ul/li/h4'.format(section.lower()))
for i, element in enumerate(elementllist):
pdfnamelist.append(elementllist[i].text)
pdfurllist.append(elementllist[i].find_elements_by_xpath('a')[1].get_attribute('href'))
next_page = True
while next_page:
try:
driver.find_elements_by_class_name('right-arrow')[num_section*2].find_element_by_xpath('a').click()
time.sleep(1)
elementllist = driver.find_elements_by_xpath('//*[@id="{}-presentations"]/ul/li/h4'.format(section.lower()))
for i, element in enumerate(elementllist):
pdfnamelist.append(elementllist[i].text)
pdfurllist.append(elementllist[i].find_elements_by_xpath('a')[1].get_attribute('href'))
print(len(pdfnamelist))
except ElementClickInterceptedException as e:
time.sleep(1)
except NoSuchElementException as e:
print(section, 'no more pages', len(pdfnamelist))
next_page = False
# print(len(pdfnamelist))
# +
# check the retrieved urls
print('The first 5 pdf urls:\n')
for i in range(5):
print(pdfurllist[i])
print('\nThe last 5 pdf urls:\n')
for i in range(5):
print(pdfurllist[-(i+1)])
print('=======================================================')
# -
# check the retrieved paper titles
print('The first 5 pdf titles:\n')
for i in range(5):
print(pdfnamelist[i])
print('\nThe last 5 pdf titles:\n')
for i in range(5):
print(pdfnamelist[-(i+1)])
# +
print('The number of papers is ', len(pdfnamelist))
assert len(pdfnamelist)==len(pdfurllist), 'The number of titles and the number of urls are not matched. \
You might solve the problem by checking the HTML code in the \
website yourself or you could ask the author by raising an issue.'
# +
# download the papers one by one. The files are named after the titles (guaranteed to be valid file name after processed by slugify.)
print('Start downloading')
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:57.0) Gecko/20100101 Firefox/57.0'}
for i, url in enumerate(pdfurllist):
if url != None :
pdfname = slugify(pdfnamelist[i])
if i<60:
sub_directory = 'oral'
elif i<60+285:
sub_directory = 'spotlight'
else:
sub_directory = 'poster'
if os.path.isfile(root+'/'+sub_directory+'/'+pdfname+".pdf"):
print('existed', i, '\t', pdfnamelist[i], '\t', pdfurllist[i])
else:
print(i, '\t', pdfnamelist[i], '\t', pdfurllist[i])
try_download = True
while try_download:
try:
data = requests.get(pdfurllist[i], timeout=80, headers=headers).content
try_download = False
except ConnectionError as e:
_ = time.sleep(random.uniform(1,2)) # for anti-anti-crawler purpose
with open(root+'/'+sub_directory+'/'+pdfname+".pdf", 'wb') as f:
f.write(data)
# -
| download_neurips2021.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from pandas import DataFrame
import nltk
from nltk import pos_tag
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
# -
job_desc_df = pd.read_csv('ScrappedDataToCSV/NLP_filtered_job_desc.csv')
# ### Performing text analysis,
# - tokenize + filter + stemming + categorize tool/skill
# trimming down the job description
for i in range(len(job_desc_df)):
desc_list = job_desc_df['description'].iloc[i].split('\n')
# majority of job requirements and responsibility lines have a length of 45 and shorter
desc_arr = [i for i in desc_list if len(i) > 45]
joined_desc = ' '.join(desc_arr)
job_desc_df['description'].at[i] = joined_desc
# +
# helper function
ps = PorterStemmer()
wanted_tag = ['NN', 'JJ', 'VBP']
def preppingJobDesc(desc):
tokens = word_tokenize(desc.lower())
token_tag = pos_tag(tokens)
filtered_tokens = [tok for tok, tag in token_tag if tag in wanted_tag]
stemmed_tokens = [ps.stem(tok).lower() for tok in filtered_tokens]
return set(stemmed_tokens)
# +
# common list of tools and skills appeared in many postings from a quick glance.
tool_skill = ['server',
'cloud',
'sql',
'mysql',
'python',
'r',
'tableau',
'power bi',
'looker',
'powerpoint',
'sheet',
'gsuite',
'jupyter',
'javascript',
'java',
'excel',
'statistic',
'model',
'word',
'bi']
job_desc_df['desc_word_set'] = job_desc_df['description'].map(preppingJobDesc)
tool_keyword_set = set([ps.stem(tok) for tok in tool_skill]) # using set since we cant have duplicates
tool_keyword_dict = {ps.stem(tok):tok for tok in tool_skill}
# +
# create a list of all the tools asked in the job description
tool_list = []
num_of_jobs = len(job_desc_df)
for i in range(num_of_jobs):
desc_word_set = job_desc_df['desc_word_set'].iloc[i]
tool_word_intersect = tool_keyword_set.intersection(desc_word_set)
if len(tool_word_intersect) == 0:
tool_list.append('Not Specified')
tool_list += list(tool_word_intersect)
# +
# creating a dataframe from results
df_tool = pd.DataFrame(data = {'count': tool_list})
df_tool = df_tool.replace(tool_keyword_dict)
# matching keywords for the samething.
temp = df_tool['count'] == 'bi'
df_tool.loc[temp,'count'] = 'power bi'
temp = df_tool['count'] == 'mysql'
df_tool.loc[temp,'count'] = 'sql'
df_tool = df_tool.value_counts().reset_index().rename(columns = {'count':'tool', 0:'count'})
# -
# ### Quick visual using seaborn
# +
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
highest_perc = round(df_tool['count'].iloc[0] / len(job_desc_df), 2)
bins = np.arange(0,highest_perc, .08)
plot = sns.barplot(data = df_tool, x = df_tool['count'], y = df_tool['tool'], color = 'cyan')
plot.set(xlabel = '% of Employers', ylabel = 'Tool', title = 'Top Tools Required By Employers in SF Bay Area')
plot.set_xticklabels(['{:,.0%}'.format(x) for x in bins])
plt.savefig('Top Tools', format = 'png')
| DataAnalyticTopSkills/nlp_text_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="vg7jrWZvZBJ4"
from keras.datasets import cifar10
import matplotlib.pyplot as plt
import pandas as pd
import random
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
import seaborn as sns
# %matplotlib inline
# + id="ATCFryOWZBJ_"
(X_train,y_train),(X_test,y_test) = cifar10.load_data()
# + colab={"base_uri": "https://localhost:8080/"} id="HeWfIQk7ZBKC" outputId="3e3ef9f0-2404-4434-b37d-1db03f27d275"
print("Train Shape:",X_train.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="yurg1eiXZBKF" outputId="7587c54e-9a43-477f-e21a-78f36153d116"
print("Test Shape:",X_test.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="P2bXU8fXZBKZ" outputId="9d726ec1-2152-494f-b4f0-cc6989efe666"
y_train[:10]
# + id="yvhr3IuMZBKc"
y_train = pd.Series(y_train.reshape(-1,))
y_test = pd.Series(y_test.reshape(-1,))
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="ERiSPA9SZBKe" outputId="0ce57029-732e-4217-82e3-8ea2d0b193e7"
plt.bar(y_train.unique(),y_train.value_counts())
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="pnjjeTllZBKi" outputId="08684d0c-0b3d-46c6-b53f-295ba314d08f"
plt.bar(y_test.unique(),y_test.value_counts())
# + id="imbXH6PvZBKp"
y_train = pd.get_dummies(y_train)
y_test = pd.get_dummies(y_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 357} id="EkP2D695ZBKt" outputId="9d343497-06ee-42d2-d106-b3060b5aa108"
y_train.head(10)
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="7_ztS2kWZBKv" outputId="115fba22-8325-44bf-e29c-85c8a5567406"
plt.imshow(random.choice(X_train))
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="JiEM7Fm_ZBKx" outputId="6bfc89ba-0dff-4e58-e324-4717f5ef65ef"
plt.imshow(random.choice(X_test))
# + id="hQZMBhgJZBK1"
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train = X_train/255.0
X_test = X_test/255.0
# + id="MSkmkn4AZBK3"
model = Sequential()
model.add(Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(32, 32, 3)))
model.add(BatchNormalization())
model.add(Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.1))
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(32, 32, 3)))
model.add(BatchNormalization())
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
# + colab={"base_uri": "https://localhost:8080/"} id="PKuOg8JSly7m" outputId="13df8e3c-0a07-45f4-f6f4-57f309b11df3"
model.summary()
# + id="dKJIMQpxfGw2"
opt = SGD(learning_rate = 0.01, momentum = 0.9)
model.compile(optimizer= 'rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
# + id="j8sF8hQzfM2-"
gen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True)
it_train = gen.flow(X_train, y_train, batch_size=32)
steps = int(X_train.shape[0] / 32)
# + colab={"base_uri": "https://localhost:8080/"} id="9KX1dOVxZBK5" outputId="3c69106c-7385-4e17-83ea-d61b795678ad"
history = model.fit(X_train, y_train, batch_size = 32, epochs=10, validation_split = 0.2)
# + colab={"base_uri": "https://localhost:8080/", "height": 341} id="iHmXBpYTlred" outputId="a3aeb4e7-1ec4-403a-a776-990075278275"
pd.DataFrame(history.history).plot(figsize = (8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 2)
# + colab={"base_uri": "https://localhost:8080/"} id="hBO_fwR-ZBK6" outputId="fbc2e44f-3052-47b6-8c49-f9bbba355809"
_, acc = model.evaluate(X_test,y_test)
# + colab={"base_uri": "https://localhost:8080/"} id="n6i7x2HzZBK6" outputId="f2898d24-4c87-42b6-979a-0b8e2f782a2c"
print('> %.3f' % (acc * 100.0))
# + id="pbicgScerXul"
#Saving the model
model_json = model.to_json()
with open("CIFAR10_model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("CIFAR10_model.h5")
| Main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %%capture
import os
# check if this is a colab notebook and clone the repo if it is
if 'COLAB_GPU' in os.environ:
# %cd /content/
# !git clone https://github.com/Enver-group/twitch-web-analytics
# %cd twitch-web-analytics/notebooks
# !pip install -r requirements.txt
# !pip install -e .
# +
# Tambien es necesario settear los token de acceso a la API de twitch
# os.environ['TWITCH_CLIENT_ID'] = '<YOUR_CLIENT_ID>'
# os.environ['TWITCH_CLIENT_SECRET'] = '<YOUR_CLIENT_SECRET>'
# +
# autoreload modules
# %load_ext autoreload
# %autoreload 2
import src
from src.user import User
# -
root = User.from_name("ibai")
root_follows = root.follows
print(f"{root.name.capitalize()} follows {len(root_follows)} users")
users_that_root_follows = User.get_users(root_follows)
import pandas as pd
df = pd.DataFrame([root]+users_that_root_follows).sort_values("view_count", ascending=False)
df
# +
from src.data import make_data_from_root_user
# Obtiene datos de cada usuario del arbol empezando por Ibai y sus follows
# cada iteracion siguiente coge un usuario del arbol aleatorio para añadir sus follows
# y asi hasta llegar a max_users o que se interrumpa manualmente.
# Cada 50 iteraciones guarda el progreso actual en un archivo csv
make_data_from_root_user("rubius",output_filepath="../data/data.csv",max_users=500)
# +
from src.data import extract_num_followers_from_users_df, extract_follows_from_users_df
# Dataset obtained from the tree of user follows starting from rubius as root node/leaf
df = pd.read_csv("../data/data.csv",lineterminator='\n')
# Extract the ids of the follows of the top 1000 users in the dataset by view_count
df = extract_follows_from_users_df(df,only_top=1000)
# Extracting the num_followers of the top 1000 users of the dataset of the tree starting from rubius follows
new_df = extract_num_followers_from_users_df(df,only_top=1000)
new_df.to_csv("../data/data.csv",index=False)
new_df.sort_values("num_followers",ascending=False)
| notebooks/1-simon-twitch-api.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Django Rest API - WishList LuizaLabs
# ---
#
# API made as part of the process of LuizaLabs evaluation
#
# ---
# ## Requirements
# ---
# - [docker-compose](https://docs.docker.com/compose/install/)
# ---
# ## Run the Project
# ---
# ---
# ### Setup
# ---
# Start by cloning the project
# > ```$ git clone https://github.com/LucasSRocha/django_rest_llabs.git```
#
# Enter the project folder
# > ```$ cd django_rest_llabs```
#
# Build the docker image
# > ```$ docker-compose build```
#
# Start the docker
# > ```$ docker-compose up```
#
# Alternatively you can build the docker image and run it in a single command
# > ```$ docker-compose up --build```
# ### Acessing the docker shell
# You can access the image shell to further manipulate it
# > ```$ docker-compose exec api_web sh```
# ### Creating a super user
# Inside the api_web shell
# > ```$ python manage.py createsuperuser```
#
# You'll be prompted for a username, e-mail and a password.
# ### UserMagalu - Endpoint_allowed_methods(GET, POST, PATCH, DELETE)
# This will use our ```api/usermagalu/``` endpoint
#
# > ```$ curl -d "username=usuario teste&email=<EMAIL>" -X POST http://localhost:8000/api/usermagalu/```
# !curl -d "username=usuario teste&email=<EMAIL>" -X POST http://localhost:8000/api/usermagalu/
# ### Getting JWT Auth Token - Endpoint_allowed_methods(POST)
# The authentication used to modify the API objects is JWT Token and it's obtained though a endpoint
# **required_fields {"email": "< USER_EMAIL >", "password": "< USERNAME >"}**
#
# > ```$ curl -d "password=<PASSWORD>&email=<EMAIL>" -X POST http://localhost:8000/api/auth/login/```
#
# >```{"token":"<KEY>"}```
#
# To be able to access the informations you have to pass the auth token in the headers sections of your request
# !curl -d "password=<PASSWORD>&email=<EMAIL>" -X POST http://localhost:8000/api/auth/login/
# !curl -X GET http://localhost:8000/api/
# !curl -H "Authorization: JWT <KEY>" -X GET http://localhost:8000/api/
# ### Wishlist - Endpoint_allowed_methods(GET, POST, PATCH, DELETE)
# Using the ```/api/wishlist/``` endpoint you're able to create a wishlist for your user and name it if you want.
# !curl -H "Authorization: JWT <KEY>" -d "wishlist_name=lista de favoritos" -X POST http://localhost:8000/api/wishlist/
# The Users are restricted to a single Wishlist per account
# !curl -H "Authorization: JWT <KEY>" -d "wishlist_name=lista de favoritos" -X POST http://localhost:8000/api/wishlist/
# ### Products - Endpoint_allowed_methods(GET, POST, PATCH, DELETE)
# Using the ```/api/product/``` endpoint you're able to create a wishlist for your user and name it if you want.
# !curl -H "Authorization: JWT <KEY>" -d "product_name=produto x&product_id=4bd442b1-4a7d-2475-be97-a7b22a08a024" -X POST http://localhost:8000/api/product/
# !curl -H "Authorization: JWT <KEY>" -d "product_name=produto x&product_id=4bd442b1-4a7d-2475-be97-a7b22a08a024" -X POST http://localhost:8000/api/product/
# The model ensures that there's only unique products on a wishlist
| Docs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.3.1
# language: julia
# name: julia-1.3
# ---
# # Bechmark: Vern9 vs IRK8
#
# <ul id="top">
# <li><a href="#Loading-packages">
# Loading Packages</a></li>
#
# <li><a href="#Initial-value-problem:-Burrau-problem">
# Initial value problem: Burrau problem</a></li>
#
# <li><a href="#Integration-with-Vern9">
# Integration with Vern9</a></li>
#
# <li><a href="#Integration-with-IRK8-(Adaptive-step)">
# Integration with IRK8 (Adaptive-step)</a></li>
#
# <li><a href="#Error-in-energy">
# Error in energy</a></li>
#
# </ul>
#
# ## Loading packages
using Pkg
using IRK,LinearAlgebra,Plots,Statistics
using OrdinaryDiffEq
using DoubleFloats
# <a href="#top">Back to the top</a>
#
# ## Initial value problem: Burrau problem
# The vector $Gm = (G\, m_1, \ldots, G\, m_N)$
include("../examples/InitialBurrau.jl")
u0, Gm =InitialBurrau()
N = length(Gm)
size(u0)
# Just to check that the output of the function NbodyODE has the expected type:
include("../examples/Nbody.jl")
du0=similar(u0)
NbodyODE!(du0,u0,Gm,0.)
@show size(du0);
# +
t0 = 0.
T = 63. # Final time
dt = 10^(-4) # time-step length
tspan=(t0,T)
prob=ODEProblem(NbodyODE!,u0,tspan,Gm);
# -
# <a href="#top">Back to the top</a>
#
#
# ## Integration with Vern9
# Recommend methods: Non-Stiff Problems
#
# - For high accuracy non-stiff solving ( BigFloat and tolerances like <1e-12 ), JuliaDiffeq recommend
# the Vern9 method
#
sol0 = @time(solve(prob,Vern9(),abstol=1e-12,reltol=1e-12));
(length(sol0.t))
# ## Integration with IRK8 (Adaptive-step)
#
(sol1,iter1,steps1)=@time(solve(prob,IRK8(),dt=dt,saveat=dt,
reltol=1e-6, abstol=1e-6,myoutputs=true,initial_interp=true));
(length(steps1),minimum(steps1[2:end]),mean(iter1),sum(iter1))
# +
#plot(sol1.t[2:end],iter1[2:end], title="Iteration numbers", legend=false)
# +
#plot(sol1.t,steps1, title="step sizes in the integration", legend=false)
# -
# ### Plots
# +
bodylist = ["Body-1", "Body-2", "Body-3"]
ulist0 = sol0.u
tlist0 = sol0.t
pl1 = plot(title="Vern9",aspect_ratio=1)
for j = 1:3
xlist = map(u->u[2,1,j], ulist0)
ylist = map(u->u[2,2,j], ulist0)
pl1 = plot!(xlist,ylist, label = bodylist[j])
end
pl2 = plot(title="IRK8",aspect_ratio=1)
ulist1 = sol1.u[1:end]
tlist1 = sol1.t[1:end]
for j = 1:3
xlist = map(u->u[2,1,j], ulist1)
ylist = map(u->u[2,2,j], ulist1)
pl2 = plot!(xlist,ylist, label = bodylist[j])
end
#plot(pl)
plot(pl1,pl2,layout=2)
# -
plot(pl2)
savefig("Burrau.png")
# <a href="#top">Back to the top</a>
# ## Error in energy
#setprecision(BigFloat, 128)
#u0128, Gm =InitialBurrau(BigFloat)
u0128, Gm =InitialBurrau(Double64)
(typeof(u0128),typeof(Gm))
# +
E0=NbodyEnergy(u0128,Gm)
end1=length(steps1)
ulist = sol0.u
tlist = sol0.t
EnergyErrors=[NbodyEnergy(Double64.(sol0.u[j]),Gm)/E0-1 for j in 1:length(tlist)]
ulist1 = sol1.u[1:end]
tlist1 = sol1.t[1:end]
EnergyErrors1=[NbodyEnergy(Double64.(sol1.u[j]),Gm)/E0-1 for j in 1:length(tlist1)]
ylimit1=-18
ylimit2=-6
p1=plot(tlist[1:end],log10.(abs.(EnergyErrors)),
ylims=(ylimit1,ylimit2),
xlabel="t", title="Error in energy", label="Vern9")
p2=plot(tlist1[1:end],log10.(abs.(EnergyErrors1)),
ylims=(ylimit1,ylimit2),
label="IRK8")
plot(p1,p2,layout=2)
# -
(maximum(EnergyErrors),maximum(EnergyErrors1))
| Tutorials/.ipynb_checkpoints/Burrau-Copy1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Data Exploration
# **1. Find our experiment records**
# !rm data2/.DS_Store
from os import listdir
from os.path import isfile, join
data_raw = [f for f in listdir("data2") if isfile(join("data2", f))]
print(len(data_raw), "files:", str(data_raw))
# **2. Save into a dictionary**
import csv
import numpy as np
data = {}
for experiment in data_raw:
t, temp = [], []
with open("data2/"+experiment) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
t.append(int(row[0])/1e3)
temp.append(float(row[1]))
data[experiment[:-4]] = (np.array(t[:200]), np.array(temp[:200]))
# **3. Plot**
# +
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
plt.figure(1, figsize=(10, 8))
plt.title("Experimental Data")
plt.xlabel("Time (s)")
plt.ylabel("Temperature (°C)")
plt.xticks(np.arange(0, 160, step=20))
plt.xlim(0,150)
plt.ylim(10,61)
plt.yticks(np.arange(15, 61, step=5))
ordered_experiments = list(data.keys())
ordered_experiments.sort()
for experiment in ordered_experiments[::-1]:
t, temp = data[experiment]
plt.plot(t, temp, '--', label=r"$T_i$="+experiment)
if float(experiment) < 28:
label = round(temp.min() - float(experiment), 2)
else:
label = round(float(experiment) - temp.max(), 2)
plt.text(t[-1]+0.6,temp[-1]-0.3, r"$T$="+experiment+r", $\Delta$="+str(label))
#plt.axhline(y=float(experiment), linestyle='--')
#plt.legend()
plt.show()
# -
t_list = []
keys = []
for experiment in data.keys():
keys.append(float(experiment))
t, temp = data[experiment]
t_ = [(t[x]-t[x-7]) for x in range(30,40)]
print("time:", t[30], t[40])
t_sum = sum(t_)/len(t_)
t_list.append(t_sum)
# +
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
targets, temps = np.array(keys).reshape(-1, 1), np.array(t_list).reshape(-1, 1)
regr = LinearRegression()
regr.fit(temps, targets)
t_pred = regr.predict(temps)
plt.figure(2, figsize=(6, 6))
plt.scatter(temps, targets)
plt.plot(temps, t_pred, '--')
plt.show()
print("Equation: temp =", regr.coef_[0], "?? +", regr.predict([[0]])[0])
print("Mean squared error: %.3f" % mean_squared_error(targets, t_pred))
print('Variance score: %.3f' % r2_score(targets, t_pred)) # 1 is perfect prediction
# -
| regression-Copy3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/gecrooks/quantumflow-dev/blob/colab/notebooks/qf-tutorial-transpiling.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="zEYEt6mowqNl"
# # QuantumFlow Tutorial: Transpiling
# + [markdown] id="1BFW58kYsOpA"
# [QuantumFlow](https://https://github.com/gecrooks/quantumflow-dev) can convert circuits between a number of popular quantum libraries, such as `qiskit`, `braket`, and `cirq`. These additional libaries can be installed with the `[ext]` or `[dev]` options, e.g. `pip install quantumflow[ext]`
# + id="fDQE3APeC0tU" colab={"base_uri": "https://localhost:8080/"} outputId="7bd6dfe0-893e-41d0-b19d-a9426d1e86d7"
# !pip install -qqq git+https://github.com/gecrooks/quantumflow.git#egg=quantumflow[ext]
# + [markdown] id="fUZYiTJ3LdRX"
# Let's built a simple quantum circuit to make a 5 qubit Bell state using IBM's `qiskit`.
# + colab={"base_uri": "https://localhost:8080/"} id="k1_BE9ATsDqi" outputId="b798d729-809d-4924-df69-f1d5e4a14701"
import qiskit
qr = qiskit.QuantumRegister(5)
qiskit_circuit = qiskit.QuantumCircuit(qr)
qiskit_circuit.h(qr[0])
qiskit_circuit.cx(qr[0], qr[1])
qiskit_circuit.cx(qr[1], qr[2])
qiskit_circuit.cx(qr[2], qr[3])
qiskit_circuit.cx(qr[3], qr[4])
print(qiskit_circuit)
# + [markdown] id="r1SjOHsULjib"
# We can then convert this `qiskit` circuit to native `quantumflow`,
# + id="aJ10xHQwQqpf" colab={"base_uri": "https://localhost:8080/", "height": 130} outputId="2c8d6d18-7d41-44a2-be82-8fa5faa57a85"
import quantumflow as qf
qf_circuit = qf.transpile(qiskit_circuit)
qf_circuit
# + [markdown] id="9CNwk7OyLqD0"
# or to Google's `cirq`,
# + id="gwr77x22Qqdi" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="02ab33a5-70da-4fd8-a284-a1ab7a906a60"
cirq_circuit = qf.transpile(qiskit_circuit, output_format="cirq")
cirq_circuit
# + [markdown] id="Zzf1nMj6LtgH"
# or Amazon's `braket`.
# + id="QbMemDfhQqE3" colab={"base_uri": "https://localhost:8080/"} outputId="497bee4e-477c-46d3-a0c0-4995e567f533"
braket_circuit = qf.transpile(qiskit_circuit, output_format="braket")
print(braket_circuit)
# + [markdown] id="R6WXcmQVL1Nb"
# We can also transpile to `quirk`. Click on the link below to load the circuit in the web editor.
# + id="n336uzkvRKOR" colab={"base_uri": "https://localhost:8080/"} outputId="4c5773da-c009-49f9-cb67-63637a5278b5"
quirk_circuit = qf.transpile(qiskit_circuit, output_format="quirk")
print(quirk_circuit)
# + id="KqlhwjdORKLT" colab={"base_uri": "https://localhost:8080/"} outputId="d65b8e39-87f0-4bb4-88dd-d49dfb329fe8"
from quantumflow import xquirk
print(xquirk.quirk_url(quirk_circuit, escape="True"))
# + [markdown] id="j6B-XoTaNSNH"
# There are a couple of complications to transpiling circuits between libraries. The first problem is the labelling of qubits. QuantumFlow allows a wide variety of labels, including integers, strings, and tuples of integers or strings. In general any hashable, sortable python type. (But you shouldn't mix qubit label types in the same circuit.) However, other libraries are less flexible. Your best bet is to stick to non-negative integers.
#
# The second complication is that different quantum libraries implement different sets of quantum gates. For instance, lets add a controlled-Hadamard gate to our initial qiskit circuit,
# + id="zyVnlpF0NR_e" colab={"base_uri": "https://localhost:8080/"} outputId="c770dcc0-8d68-41bf-f548-3d5c97d09b24"
qiskit_circuit.ch(qr[3], qr[4])
print(qiskit_circuit)
# + [markdown] id="uskxicrQOZE5"
# and then transpile to `cirq`.
# + id="dd5gLCFXOfYG" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="8a3cb639-509b-44b8-d744-c1a28cf39766"
cirq_circuit = qf.transpile(qiskit_circuit, output_format="cirq")
cirq_circuit
# + [markdown] id="6j_lkrm2Otev"
# Google's `cirq` does not implement the controlled-Hadamard gate (CH), so QuantumFlow translates the CH gate into an equivalant sequence of gates that `cirq` does understand.
#
# QuantumFlow contains a rich set of gates, so that transpiling to QF is generally a direct literal gate-for-gate translation.
#
# The collection of currently supported transpiled formats can be found in `qf.TRANSPILE_FORMATS`.
# + id="M4N2znjoOtLS" colab={"base_uri": "https://localhost:8080/"} outputId="e7571e00-fc3a-48d2-c8f6-57c1980a4dc9"
qf.TRANSPILE_FORMATS
| tutorial/qf-tutorial-transpile.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''QST_BA'': conda)'
# name: python3
# ---
# # <NAME> MSBA '21
#
# # BA870 – Final Project #4 (June 2021)
#
# # Main Deliverables:
#
# - IPYNB with code and analysis
# - PDF of file
# - Presentation
# - PDF of Presentation
# - CSV file of data
#
# # Goals:
#
# - Comparing predictive power of SIC, NAICS, and GICS classification to Early and Late 2020 Returns
# - Determine most predictive industry classifier
# - Determining which industries where hit the hardest and why.
# - Determining which industries where bounced the most and why.
# - Calcualte metrics and ratios
# - Determine most predictive metrics and ratios
#
# # Importing Packages
# +
# pip install statsmodels
# -
import pandas as pd
import statsmodels.api as sm
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import mstats
from sklearn.preprocessing import StandardScaler
# # Importing Data
returns2020 = pd.read_csv("returns2020.csv")
classifications = pd.read_csv("classifications.csv")
metrics = pd.read_csv("metrics.csv")
gic_sub_industry = pd.read_csv("gic_sub_industry.csv")
returns2020.head()
# # Merging Data
# Merging Data
stock_class_test = pd.merge(returns2020, classifications, on="tic", how="inner")
stock_class_test.to_csv(r'stock_class_test.csv', index = False, header=True)
# # Classification Exploratory Data Analysis
# +
# stock_class_test.info()
# stock_class_test.describe().T
# +
classifier = {}
for classification in stock_class_test.iloc[:,3:].columns:
classifier[classification] = len(stock_class_test[classification].unique())
# print(f'There are {len(stock_class_test[classification].unique())} unique groups in {classification}')
classifier = pd.DataFrame(classifier, index = ["classifiers"])
classifier = classifier.T
classifier.sort_values("classifiers")
# classifier
# -
# # Creating Dummy Variables
df_gic_sub_industry = pd.concat([stock_class_test,pd.get_dummies(stock_class_test.gic_sub_industry)], axis=1)
df_gic_industry = pd.concat([stock_class_test,pd.get_dummies(stock_class_test.gic_industry)], axis=1)
df_gic_group = pd.concat([stock_class_test,pd.get_dummies(stock_class_test.gic_group)], axis=1)
df_gic_sector = pd.concat([stock_class_test,pd.get_dummies(stock_class_test.gic_sector)], axis=1)
df_naics_industry = pd.concat([stock_class_test,pd.get_dummies(stock_class_test.naics_industry)], axis=1)
df_naics_group = pd.concat([stock_class_test,pd.get_dummies(stock_class_test.naics_group)], axis=1)
df_naiscs_sector = pd.concat([stock_class_test,pd.get_dummies(stock_class_test.naiscs_sector)], axis=1)
df_sic_industry = pd.concat([stock_class_test,pd.get_dummies(stock_class_test.sic_industry)], axis=1)
df_sic_group = pd.concat([stock_class_test,pd.get_dummies(stock_class_test.sic_group)], axis=1)
df_sic_sector = pd.concat([stock_class_test,pd.get_dummies(stock_class_test.sic_sector)], axis=1)
# # Checking Adj R^2
# The best industry classification will have the highest predictive power. We will look at the Adj R ^2 to see if more granualr industry classifications systems help or hurt the model.
#
# The Adj R^2 seems to differ depending if it is RetEarly2020 or RetLate2020 thus I will run both
# +
early_gic_sub_industry = sm.OLS(df_gic_sub_industry.iloc[:,1:2],df_gic_sub_industry.iloc[:,13:]).fit()
early_gic_industry = sm.OLS(df_gic_industry.iloc[:,1:2],df_gic_industry.iloc[:,13:]).fit()
early_gic_group = sm.OLS(df_gic_group.iloc[:,1:2],df_gic_group.iloc[:,13:]).fit()
early_gic_sector = sm.OLS(df_gic_sector.iloc[:,1:2],df_gic_sector.iloc[:,13:]).fit()
early_naics_industry = sm.OLS(df_naics_industry.iloc[:,1:2],df_naics_industry.iloc[:,13:]).fit()
early_naics_group = sm.OLS(df_naics_group.iloc[:,1:2],df_naics_group.iloc[:,13:]).fit()
early_naics_sector = sm.OLS(df_naiscs_sector.iloc[:,1:2],df_naiscs_sector.iloc[:,13:]).fit()
early_sic_industry = sm.OLS(df_sic_industry.iloc[:,1:2],df_sic_industry.iloc[:,13:]).fit()
early_sic_group = sm.OLS(df_sic_group.iloc[:,1:2],df_sic_group.iloc[:,13:]).fit()
early_sic_sector = sm.OLS(df_sic_sector.iloc[:,1:2],df_sic_sector.iloc[:,13:]).fit()
late_gic_sub_industry = sm.OLS(df_gic_sub_industry.iloc[:,2:3],df_gic_sub_industry.iloc[:,13:]).fit()
late_gic_industry = sm.OLS(df_gic_industry.iloc[:,2:3],df_gic_industry.iloc[:,13:]).fit()
late_gic_group = sm.OLS(df_gic_group.iloc[:,2:3],df_gic_group.iloc[:,13:]).fit()
late_gic_sector = sm.OLS(df_gic_sector.iloc[:,2:3],df_gic_sector.iloc[:,13:]).fit()
late_naics_industry = sm.OLS(df_naics_industry.iloc[:,2:3],df_naics_industry.iloc[:,13:]).fit()
late_naics_group = sm.OLS(df_naics_group.iloc[:,2:3],df_naics_group.iloc[:,13:]).fit()
late_naics_sector = sm.OLS(df_naiscs_sector.iloc[:,2:3],df_naiscs_sector.iloc[:,13:]).fit()
late_sic_industry = sm.OLS(df_sic_industry.iloc[:,2:3],df_sic_industry.iloc[:,13:]).fit()
late_sic_group = sm.OLS(df_sic_group.iloc[:,2:3],df_sic_group.iloc[:,13:]).fit()
late_sic_sector = sm.OLS(df_sic_sector.iloc[:,2:3],df_sic_sector.iloc[:,13:]).fit()
# +
adjR2_early = []
for model in [early_gic_sub_industry, early_gic_industry, early_gic_group, early_gic_sector, early_naics_industry, early_naics_group, early_naics_sector, early_sic_industry, early_sic_group, early_sic_sector]:
adjR2_early.append(model.rsquared_adj)
adjR2_late = []
for model in [late_gic_sub_industry, late_gic_industry, late_gic_group, late_gic_sector, late_naics_industry, late_naics_group, late_naics_sector, late_sic_industry, late_sic_group, late_sic_sector]:
adjR2_late.append(model.rsquared_adj)
classifier["adjR2_early"] = adjR2_early
classifier["adjR2_late"] = adjR2_late
classifier["adjR2_mean"] = (classifier.adjR2_early + classifier.adjR2_late)/2
# -
classifier.sort_values("adjR2_mean", ascending = False)
# # Analyzing Adj R^2 results
#
# For Early 2020 Returns the top three were industry classifiers were:
# - gic_sub_industry
# - gic_industry
# - gic_group
#
# Very interesting to see as the GIS classifier got more granular the better it performed
#
# For Late 2020 Returns the top three were industry classifiers were:
# - naics_industry
# - gic_sub_industry
# - sic_industry
#
# Very intersting to see Returns increase closer to normal the more granual classifiers started to perform better.
#
# # Deciding which classifiers is best
# Although naics_industry was the highest for Late 2020 Returns which covered 9 months of the year, gic_sub_industry was more robust for all 2020 returns place highest for Early 2020 Returns and second highest Late 2020 Returns.
#
# Additionally when looking at mean Adj R^2 for early and late Returns gic_sub_industry was the highest.
#
# The industry classfier of choice is gic_sub_industry
#
# # Linear Regression on GIC Sub Industry Classifier
#
#
# +
stock_class = pd.merge(stock_class_test.iloc[:,:4], gic_sub_industry, on="gic_sub_industry", how="inner")
stock_class = pd.concat([stock_class, pd.get_dummies(stock_class.gic_sub_industry_name)], axis =1)
stock_class.to_csv(r'stock_class.csv', index = False, header=True)
stock_class.sample(3)
# -
# # Determining Industries that were Hit the Hardest
# ## Early 2020 Returns
# +
# Dependent Variables
X = stock_class.iloc[:,5:]
# Independent Variables
Y = stock_class.iloc[:,1:2]
# Fit Model
early_class = sm.OLS(Y, X).fit()
# # Predict Model
# predictions = model.predict(X)
# Summary Statisitcs
print(early_class.summary(title="Early 2020 Returns with GIC Sub Industry"))
early_class_results = pd.read_html(early_class.summary().tables[1].as_html(),header=0,index_col=0)[0]
# -
early_class_results.loc[early_class_results['P>|t|']<0.05,].sort_values("coef", ascending=False)
# ### **Early 2020 Return Class Explaintion**
#
# #### Adj R^2: **0.156**
#
# The industries hit the hardest at the 95% statisitcal signifiant level were:
# - Oil & Gas Drilling
# - Oil & Gas Exploration & Production
# - Oil & Gas Equipment & Services
# - Department Stores
# - Hotel & Resort REITs
# - Airlines
# - Mortgage REITs
# - Casinos & Gaming
# - Hotels, Resorts & Cruise Lines
# - Homefurnishing Retail
#
# This makes sense seeing as people stopped drive and demand for gas declined, people stopped going out to shop, travel and rereational activites were haulted, and many people were behind on their rent.
#
#
# The industries least affected at the 95% statisitcal signifiant level were:
# - Health Care Services
# - Life Sciences Tools & Services
# - Electric Utilities
# - Multi-Utilities
# - Health Care Supplies
# - Specialized REITs
# - Application Software
# - Packaged Foods & Meats
# - Biotechnology
# - Health Care Equipment
#
# This makes sense seeing as mask and other health care and related tools were high in demand, everyone was in their homes using alot of utlities, and people need software and packaged good to work from home.
#
# # Determining Industries that Bounced Back the Most
# ## Late 2020 Returns
# +
# Dependent Variables
X = stock_class.iloc[:,5:]
# Independent Variables
Y = stock_class.iloc[:,2:3]
# Fit Model
late_class = sm.OLS(Y, X).fit()
# # Predict Model
# predictions = model.predict(X)
# Summary Statisitcs
print(late_class.summary(title="Late 2020 Returns GIC Sub Industry"))
late_class_results = pd.read_html(late_class.summary().tables[1].as_html(),header=0,index_col=0)[0]
# -
late_class_results.loc[late_class_results['P>|t|']<0.05,].sort_values("coef", ascending=False)
# ### **Late 2020 Return Class Explaintion**
#
# #### Adj R^2: 0.179
#
#
# The industries bounced back the most at the 95% statisitcal signifiant level were:
# - Housewares & Specialties,
# - Home Improvement Retail
# - Automobile Manufacturers
# - Homefurnishing Retail
# - Heavy Electrical Equipment
# - Silver
# - Internet & Direct Marketing Retail
# - Computer & Electronics Retail
# - Home Furnishings
# - Copper
#
# Putting money from stimulus checks into home and auto makes sense espcially because of the low interest rates. Additonally focus on electrical, computer, and internet marketing makes sense with more people online. I can only attribute the rise in silver and copper becasue the are materials needed for tech and appliances
#
# The industries bounced back the least at the 95% statisitcal signifiant level were:
# - Asset Management & Custody Banks
# - Health Care Equipment
# - Financial Exchanges & Data
# - Environmental & Facilities Services
# - Aerospace & Defense
# - Research & Consulting Services
# - Thrifts & Mortgage Finance
# - Retail REITs
# - Regional Banks
# - Pharmaceuticals
#
# Some of these industries such as health care were already high and stayed high. With businesses suffering it was hard to justify paying for consultants, facility improvements, and other related business expenses. Some of the other categories I don't have great explaination for.
#
#
# # Evaluating Metrics
# +
metrics["book_value"] = metrics.book_value_share * metrics.shares_out # Book Value
metrics["acid_ratio"] = (metrics.current_assets - metrics.inventory)/metrics.current_liabilities # Acid Ratio
metrics["current_ratio"] = metrics.current_assets / metrics.current_liabilities # Current Ratio
metrics["coverage_ratio"] = metrics.interest_exp / metrics.ebit # Interest Coverage Ratio
metrics['bep'] = metrics.ebit / metrics.total_assets # Basic Earnings Power Ratio
metrics["inventory_turnover"] = (metrics.inventory/365) / metrics.cogs # Inventory Turnover
metrics["dso"] = metrics.receivables / (metrics.sales/365) # Days Sales Outstanding
metrics["roa"] = metrics.net_income / metrics.total_assets # Return on Assets
metrics["roe"] = metrics.net_income / metrics.total_equity # Return on Equity
metrics['debit_asset_ratio'] = metrics.total_liabilities / metrics.total_assets # Debit to Asset Ratio
metrics["debit_equity_ratio"] = metrics.total_liabilities / metrics.total_equity # Debit to Equity Ratio
metrics["book_price"] = metrics.total_equity / metrics.mkt_value # Price to Book
metrics["equity_price"] = metrics.net_income / metrics.mkt_value # Price to Equity
metrics["ebit_price"] = metrics.ebit / metrics.mkt_value # Price to EBIT
metrics["ebitda_price"] = metrics.ebitda / metrics.mkt_value # Price to EBITDA
metrics["sales_price"] = metrics.sales / metrics.mkt_value # Price to Sales Ratio
metrics["cash_assets"] = metrics.cash / metrics.total_assets
metrics["cash_current_liab"] = metrics.cash / metrics.current_liabilities
metrics["cash_total_liab"] = metrics.cash / metrics.total_liabilities
metrics["cash_interest"] = metrics.interest_exp / metrics.cash
metrics["fixed_total"] = metrics.ppe / metrics.total_assets
metrics["fixed_employees"] = metrics.ppe / metrics.employees
metrics["working_coverage"] = metrics.working_cap * metrics.coverage_ratio
# -
# # Metric Exploratory Data Analysis & Cleaning
metrics.info()
plt.figure(figsize=(10, 8))
plt.title("Null values")
sns.heatmap(metrics.isnull(),yticklabels=False,cbar=False)
plt.show()
metrics.dropna(axis =0, inplace = True)
metrics.describe().T
numeric_col = list(metrics.describe(include=[np.number]).columns)
metrics_numeric = metrics[numeric_col]
fig, axes = plt.subplots(len(numeric_col)//11,11, figsize=(15, 12))
i = 0
for triaxis in axes:
for axis in triaxis:
metrics_numeric.boxplot(column = numeric_col[i], ax=axis)
i = i+1
# After looking at the box and wisk plot many variables have outliers which need to be dealt with. I will be winsoring the outiliers to the 99.5 percentile to still maintain thier large quantity but not throw off the models.
# ## Metrics that need Winsorising
# +
def mstats_winsorize(s):
return mstats.winsorize(s, limits=[0.005, 0.005])
def winsorize(df):
temp = df.copy()
return temp.apply(mstats_winsorize, axis=0)
metrics_win = winsorize(metrics)
# infinity and nonnumeric values not able to be plotted
numeric_col = list(metrics_win.describe(include=[np.number]).columns)
metrics_win_numeric = metrics_win[numeric_col]
fig, axes = plt.subplots(len(numeric_col)//11,11, figsize=(15, 12))
i = 0
for triaxis in axes:
for axis in triaxis:
metrics_win_numeric.boxplot(column = numeric_col[i], ax=axis)
i = i+1
# -
# The box plots look significantly better outliers still persist but not at the level of before and most values have centralized.
# # Correlation Matrix
# +
stock_metrics = pd.merge(returns2020, metrics_win, on="tic", how="inner")
stock_metrics.to_csv(r'stock_metrics.csv', index = False, header=True)
corr_metrics = stock_metrics[['RetEarly2020', 'RetLate2020', 'current_assets', 'total_assets',
'current_liabilities', 'total_liabilities', 'total_equity', 'mkt_value',
'price_close', 'shares_out', 'book_value_share', 'ppe', 'employees',
'cash', 'receivables', 'sales', 'cogs', 'inventory', 'ebit', 'ebitda',
'interest_exp', 'working_cap', 'net_income',]]
corr_ratios = stock_metrics[['RetEarly2020', 'RetLate2020', 'book_value', 'acid_ratio',
'current_ratio', 'coverage_ratio', 'bep', 'inventory_turnover', 'dso',
'roa', 'roe', 'debit_asset_ratio', 'debit_equity_ratio', 'book_price',
'equity_price', 'ebit_price', 'ebitda_price', 'sales_price',
'cash_assets', 'cash_current_liab', 'cash_total_liab', 'cash_interest',
'fixed_total', 'fixed_employees', 'working_coverage']]
# -
len(corr_ratios.columns)
corr_matrix = sns.heatmap(corr_metrics.corr(), vmin=-1, vmax=1, annot=False)
# Give a title to the heatmap. Pad defines the distance of the title from the top of the heatmap.
corr_matrix.set_title('Correlation Matrix between Metrics and Returns', fontdict={'fontsize':12}, pad=12);
# plt.figsize=(15, 12)
# Many of the metrics are based off values of the balance sheet which as the name insuates must ultimately balance. Therefore, many of the values are highly correlated.
#
corr_matrix = sns.heatmap(corr_ratios.corr(), vmin=-1, vmax=1, annot=False)
# Give a title to the heatmap. Pad defines the distance of the title from the top of the heatmap.
corr_matrix.set_title('Correlation Matrix between Ratios and Returns', fontdict={'fontsize':12}, pad=12);
# plt.figsize=(15, 12)
# Many of the ratios share compents or explain similar things such as the acid ratio_and current_ratio. It is comforting to see there is a mix of highly correlated values and uncorrelated values.
# # Linear Model on Metrics
# ## Early 2020 Returns
# +
# Dependent Variables
X = stock_metrics.iloc[:,3:]
X = sm.add_constant(X)
# Independent Variables
Y = stock_metrics.iloc[:,1:2]
# Fit Model
early_metrics = sm.OLS(Y, X).fit()
# # Predict Model
# predictions = model.predict(X)
# Summary Statisitcs
print(early_metrics.summary(title="Early 2020 Returns with Metrics"))
early_metrics_results = pd.read_html(early_metrics.summary().tables[1].as_html(),header=0,index_col=0)[0]
# -
# # Determining the most predictive Metrics for Early 2020 Return
early_metrics_results.loc[early_metrics_results['P>|t|']<0.05,].sort_values("coef", ascending=False)
# ### **Early 2020 Return Metric Explaintion**
#
# #### Adj R^2: 0.182
#
# The top positive metrics at the 95% statisitcal signifiant level were:
# - sales_price
# - price_close
# - net_income
#
# The top negative metrics at the 95% statisitcal signifiant level were:
# - ebit_price
# - ebitda_price
# - equity_price
# - debit_asset_ratio
# - const
# - book_price
# - roe
# - ebitda
# ## Late 2020 Returns
# +
# Dependent Variables
X = stock_metrics.iloc[:,3:]
X = sm.add_constant(X)
# Independent Variables
Y = stock_metrics.iloc[:,2:3]
# Fit Model
late_metrics = sm.OLS(Y, X).fit()
# # Predict Model
# predictions = model.predict(X)
# Summary Statisitcs
print(late_metrics.summary(title="Late 2020 Returns with Metrics"))
late_metrics_results = pd.read_html(late_metrics.summary().tables[1].as_html(),header=0,index_col=0)[0]
# -
# # Determining the most predictive Metrics for Late 2020 Return
late_metrics_results.loc[late_metrics_results['P>|t|']<0.05,].sort_values("coef", ascending=False)
# ### **Late 2020 Return Metric Explaintion**
#
# #### Adj R^2: 0.107
#
# The top positive metrics at the 95% statisitcal signifiant level were:
# - ebitda_price
# - debit_asset_ratio
# - cash_assets
# - cash_total_liab
# - book_price
# - roe
# - sales_price
#
# The top negative metrics at the 95% statisitcal signifiant level were:
# - ebit_price
# - cash_current_liab
# - book_value_share
# ## **2020 Metric Returns Explained**
#
# The top positive metric at the 95% statisitcal significant level is: **sales_price**
# - Price to Sale was a postive statistcally significant variable for both Early and Late 2020 Returns. This makes sense because Sales is a the top of the balance sheet. High top line health on the balance sheet means money is coming in the company and connected with a high price, is a great signals for investors and subsequently postive stock returns.
#
#
# The top negative metrics at the 95% statisitcal signifiant level is: **ebit_price**
# - Price to EBIT was a negative statistcally significant variable for both Early and Late 2020 Returns. This makes sense because EBIT is low on the balance sheet just above interest and taxes. Low bottom line health on the balnce sheet means there isn't much to pay off Interest Expenses, Taxes, or Working Capital. This would mean low new income and stock holders equity, and combined with a low price, subsequently negative stock retutn
#
#
# # Linear Model on Top Metrics and Industry Classifier
stock = pd.merge(stock_class, stock_metrics.drop(["RetEarly2020","RetLate2020"], axis =1), on='tic', how="inner")
stock.to_csv(r'stock.csv', index = False, header=True)
stock.sample(3)
# ## Early 2020 Returns
# +
# Dependent Variables
X = stock.iloc[:,5:]
X = sm.add_constant(X)
# Independent Variables
Y = stock.iloc[:,1:2]
# Fit Model
early_stock = sm.OLS(Y, X).fit()
# # Predict Model
# predictions = model.predict(X)
# Summary Statisitcs
print(early_stock.summary(title="Early 2020 Returns with Metrics and Classifier"))
early_stock_results = pd.read_html(early_stock.summary().tables[1].as_html(),header=0,index_col=0)[0]
# -
# ## Late 2020 Returns
# +
# Dependent Variables
X = stock.iloc[:,5:]
X = sm.add_constant(X)
# Independent Variables
Y = stock.iloc[:,2:3]
# Fit Model
late_stock = sm.OLS(Y, X).fit()
# # Predict Model
# predictions = model.predict(X)
# Summary Statisitcs
print(late_stock.summary(title="Late 2020 Returns with Metrics and Classifier"))
late_stock_results = pd.read_html(late_stock.summary().tables[1].as_html(),header=0,index_col=0)[0]
# -
# # Final Results
early_stock_results.loc[early_stock_results['P>|t|']<0.05,].sort_values("coef", ascending=False)
# ### **Early 2020 Return Explaintion**
#
# #### Adj R^2: 0.218
# The top positve or least negative variables at the 95% statisitcal signifiant level were:
# - Integrated Telecommunication Services
# - bep
# - Health Care Distributors
# - Food Retail
# - Water Utilities
#
# Telecommunications, Health Care Distributors, and Water Utilites makes sense. I am surprised Food Retail scored so high.
#
# The top negative variables at the 95% statisitcal signifiant level were:
# - ebit_price
# - Oil & Gas Drilling
# - roa
# - Oil & Gas Exploration & Production
# - Oil & Gas Equipment & Services
#
# All of these variables were the top statisitcal signifiant negative variables in their respective models (metric and classification). Intersting to see more clasification than metrics. Additionally I must point out EBIT to Price persisted in magnitude and statisitcal signifiant unlike Sales to Price. Also iteresting to see Return on Assets (roa) showing up.
late_stock_results.loc[late_stock_results['P>|t|']<0.05,].sort_values("coef", ascending=False)
# ### **Late 2020 Return Explaintion**
#
# #### Adj R^2: 0.258
#
#
# The top positve variables at the 95% statisitcal signifiant level were:
# - Housewares & Specialties
# - Automobile Manufacturers
# - Home Improvement Retail
# - Life & Health Insurance
# - ebitda_price
#
# Intersting to see all top positve variables were classification. This means a big indicator of success in late 2020 was the industry.
#
# The top negative variables or least positive at the 95% statisitcal signifiant level were:
# - ebit_price
# - Integrated Telecommunication Services
# - bep
# - Gas Utilities
# - Commerical Printing
#
# Intersting to see all top negative variables were metrics. EBIT to Price remains an important indicator even for late 2020 returns. Basic Earning Power was a new predictor of low returns for late 2020. Telecommunications and Gas Utilities makes sense but im not so sure about Commerical Printing
#
#
| Final Project/BA 870 Final Project - Chiebuka Onwuzurike.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''py3x-tf2x'': conda)'
# name: python385jvsc74a57bd0c6e04f7a5ac130eeb20013286f1255cff64e2a49ad6e6267f06a989cb4e99f9c
# ---
# ------------
# ------------
# # *Top Venue Categories Clusters in Toronto*
# ### Developed by **<NAME>**
# ### Mini Project - **Applied Data Science Capstone**
# ------------
# ------------
# ## Project Description
# * Popular Venue Categories are determined by the frequency of venues of the same venue category identified within a given Neighborhood. For example:
# >**Starbucks, Barista, Espresso House** are all of the venue category, *Coffee Shop*. If all these venues are identified within a Neighborhood in Toronto then it adds to the frequency of the venue category, *Coffee Shop*.
#
# * Top 10 Venue Categories are identified for the chosen Neighborhoods individually.
# * Neighborhoods are clustered based on frequency of venue categories across each Neighborhood.
# # PART 1 - Data Extraction
# ## Import neccessary libraries
import numpy as np
import pandas as pd
import lxml
import lxml.etree
# ## Building Dataframe from webpage
url = 'https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M'
data = pd.read_html(url)
df = data[0]
df.head()
# ## Filtering Dataframe to remove 'Not Assigned' Boroughs
filter_df = df.loc[ df['Borough'] != 'Not assigned']
print(f"Not assigned Boroughs: {(filter_df.Borough == 'Not assigned').sum()}")
print(f"Not assigned Neighbourhoods: {(filter_df.Neighbourhood == 'Not assigned').sum()}")
filter_df.reset_index(drop=True, inplace=True)
filter_df.head()
print(f"Shape of filtered Dataframe: {filter_df.shape}")
# Save filter_df as pickle file
# --------
# --------
# # Save *filter_df* as pickle file - *venues.pkl*
# # PART 2 to PART 3.2 in file - *selectBoroughs.py*
# --------
# --------
# # PART 2 - Geospatial data (Latitudes & Longitudes) Extraction
# ## Import neccessary libraries
# #!pip install geocoder --quiet
import geocoder
from geopy.geocoders import Nominatim
# ## Using Postal codes for Latitudes & Longitudes
# +
#define empty lists for geospatial values and None values
lat = []
long = []
nones = []
#initialize geolocator
geolocator = Nominatim(user_agent = 'toronto_zipcodes_M')
#loop through filter_df['Postal Code'] values and append to the respective lists
for code in filter_df['Postal Code']:
location = geolocator.geocode(code)
if location is None:
nones.append(None)
nones.append(None)
else:
latitude = location.latitude
longitude = location.longitude
lat.append(latitude)
long.append(longitude)
# -
print(f"Total non-None values: {len(lat)}, {len(long)}")
print(f"Total None values: {len(nones)}")
# ## Adding Latitudes & Longitudes values in filter_df
filter_df['Latitude'] = lat
filter_df['Longitude'] = long
filter_df.head()
# # PART-3 Explore & Cluster Neighbourhoods
# ## PART-3.1 Explore Neighbourhoods in Boroughs ending with 'Toronto'
# ### Import necessary libraries
import re
import folium
import requests
# ### Apply *regex* function to extract Borough names ending with 'Toronto'
# +
string = filter_df['Borough'].values
regex = r"\w*\sToronto"
borough = []
none_vals = []
for row in string:
val = re.findall(regex, row)
if val == []:
none_vals.append(val)
else:
borough.append(val)
# -
chosen_boroughs = np.unique(np.array(borough))
print('Boroughs chosen for exploring venues:\n',chosen_boroughs)
# ### Subset Dataframe with chosen Boroughs for exploring
# +
explore_df = filter_df.loc[filter_df['Borough'].isin(chosen_boroughs)].reset_index(drop=True)
#change of spelling from 'Neighbourhood' to 'Neighborhood'
cols = {'Postal Code':'Postal Code', 'Borough':'Borough', 'Neighbourhood':'Neighborhood', 'Latitude':'Latitude', 'Longitude':'Longitude'}
explore_df.rename(columns = cols, inplace=True)
explore_df.head()
# -
print(f"Total unique Boroughs: {explore_df.Borough.nunique()}\nTotal unique Neighborhoods: {explore_df.Neighborhood.nunique()}")
# ### Map chosen Boroughs and their Neighbourhoods in Toronto
address = 'Toronto, Ontario, Canada'
geolocator = Nominatim(user_agent='Toronto_explorer')
location = geolocator.geocode(address)
latitude = location.latitude
longitude = location.longitude
print(f"Coordinates of Toronto are: {latitude}, {longitude}")
# +
toronto_map = folium.Map(location= [latitude, longitude], zoom_start=10)
#add Neighborhood markers on toronto_map
for lat, long, bor, hood in zip(explore_df['Latitude'], explore_df['Longitude'],explore_df['Borough'],explore_df['Neighborhood']):
label = folium.Popup('{}, {}'.format(bor, hood), parse_html=True)
folium.CircleMarker([lat, long],
radius=4,
popup=label,
color='cyan',
fill=True,
fill_color='blue',
fill_opacity=0.6,
parse_html=False).add_to(toronto_map)
toronto_map
# -
# ---------
# ---------
# # API details in file - *secret.py*
# # Function (getNearbyVenues) definition in file - *nearbyVenues.py*
# # Extract Venues, Encode & Group in file - *hoodVenues.py*
# # Export *toronto_grouped* dataframe as pickle file - *venueCat_grouped.pkl*
# ----------
# ----------
# ## PART-3.2 Build & Analyze nearby venues in Neighbourhoods
# ### Build nearby venues dataframe
#Credentials
CLIENT_ID = 'CXC1D1CNWMCS54XHC3M0VLPRLBCPQQMID0OZC04Z0VYTMSAU'
CLIENT_SECRET = '<KEY>'
VERSION = '20201201'
LIMIT = 100
# ### Using defined-function from *Neighborhoods-New-York* Lab for exploring Neighborhoods in chosen Boroughs
# #### * increased radius from 500 to 1000 of function getNearbyVenues, hence the number of Venues could be more
def getNearbyVenues(names, latitudes, longitudes, radius=1000):
venues_list=[]
for name, lat, long in zip(names, latitudes, longitudes):
print(name)
# create the API request URL
url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format(
CLIENT_ID,
CLIENT_SECRET,
VERSION,
lat,
long,
radius,
LIMIT)
# make the GET request
results = requests.get(url).json()["response"]['groups'][0]['items']
# return only relevant information for each nearby venue
venues_list.append([(
name,
lat,
long,
v['venue']['name'],
v['venue']['location']['lat'],
v['venue']['location']['lng'],
v['venue']['categories'][0]['name']) for v in results])
nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list])
nearby_venues.columns = ['Neighborhood',
'Neighborhood Latitude',
'Neighborhood Longitude',
'Venue',
'Venue Latitude',
'Venue Longitude',
'Venue Category']
return(nearby_venues)
toronto_venues = getNearbyVenues(explore_df.Neighborhood, explore_df.Latitude, explore_df.Longitude)
print(f"Total Rows:{toronto_venues.shape[0]}, Total Columns:{toronto_venues.shape[1]}")
toronto_venues.head()
print(f"Total unique Venue categories: {toronto_venues['Venue Category'].nunique()}")
# #### Important Assumption for Analysis:
#
# * Based on .nunique method, there are _'N'_ unique venue categories
# * BUT, one of the Venue Category is _'Neighborhood'_.
# * THIS makes .groupby('Neighborhood') after onehot-encoding error-prone ---> _(ValueError: Grouper for 'Neighborhood' not 1-dimensional)_
# * Hence, excluding _'Neighborhood'_ from Venue categories
# * ASSUMED Total unique Venue categories = _'N-1'_
print("Venues with 'Neighborhood' Venue Category:\n")
toronto_venues.loc[toronto_venues['Venue Category'] == 'Neighborhood']
# ### Analyze nearby venues dataframe
# #### one-hot encode Venue Category for statistics by Neighbourhood
encode_df = pd.get_dummies(toronto_venues[['Venue Category']], prefix='', prefix_sep='')
encode_df.head()
# drop the Venue category 'Neighborhood'
encode_df.drop('Neighborhood', axis=1, inplace=True)
# +
# add 'Neighborhood' columns with Neighborhood names
encode_df['Neighborhood'] = toronto_venues[['Neighborhood']]
# move Neighborhood column to the '0' position
fixed_columns = ['Neighborhood'] + list(encode_df.columns[encode_df.columns!='Neighborhood'])
encode_df = encode_df[fixed_columns]
encode_df.head()
# -
encode_df.shape
toronto_grouped = encode_df.groupby('Neighborhood').mean().reset_index()
toronto_grouped
# ---------
# ---------
# ## *Import venueCat_grouped.pkl file - venueCat_grouped dataframe*
# ## *Automate and define Function for commonly visited venues in file - popularVenues.py*
# ---------
# ---------
# ### Extract Top 10 venues from each Neighborhood (_using code from Neighborhoods-New-York Lab_)
def return_most_common_venues(row, num_top_venues):
row_categories = row.iloc[1:]
row_categories_sorted = row_categories.sort_values(ascending=False)
return row_categories_sorted.index.values[0:num_top_venues]
# +
num_top_venues = 10
indicators = ['st', 'nd', 'rd']
# create columns according to number of top venues
columns = ['Neighborhood']
for ind in np.arange(num_top_venues):
try:
columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind]))
except:
columns.append('{}th Most Common Venue'.format(ind+1))
# create a new dataframe
neighborhoods_top_venues = pd.DataFrame(columns=columns)
neighborhoods_top_venues['Neighborhood'] = toronto_grouped['Neighborhood']
for ind in np.arange(toronto_grouped.shape[0]):
neighborhoods_top_venues.iloc[ind, 1:] = return_most_common_venues(toronto_grouped.iloc[ind, :], num_top_venues)
neighborhoods_top_venues.head()
# -
# ----------
# ----------
# ## Import *popularVenues.py* & collect *topVenues dataframe*
# ## Cluster & Map *topVenues*
# ----------
# ----------
# ## PART-3.3 Clustering Neighborhoods by neighborhoods_top_venues dataframe
# ### Import necessary libraries
from sklearn.cluster import KMeans
import matplotlib.cm as cm
import matplotlib.colors as colors
# ### Clustering _toronto_grouped_ dataframe
# +
k = 4
#Drop 'Neighborhood' column for clustering
toronto_cluster = toronto_grouped.drop('Neighborhood', axis=1)
#KMeans clustering
km_cluster = KMeans(n_clusters=k, random_state=90)
km_cluster.fit(toronto_cluster)
labels = km_cluster.labels_
print(f"Cluster labels of toronto_grouped dataframe:\n{labels}")
# -
# ### Mapping clusters of _toronto_venues_cluster_
# +
#inserting Cluster labels
neighborhoods_top_venues.insert(0, 'Cluster Label', labels)
#joining cluster data with origin data
toronto_venues_cluster = explore_df.join(neighborhoods_top_venues.set_index('Neighborhood'), on='Neighborhood')
toronto_venues_cluster.head()
# -
print(f"Neighborhoods per Cluster:\n{toronto_venues_cluster.groupby('Cluster Label')['Neighborhood'].count()}")
# +
toronto_cluster_map = folium.Map(location= [latitude, longitude], zoom_start=10)
# set color scheme for the clusters
x = np.arange(k)
ys = [i + x + (i*x)**2 for i in range(k)]
colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))
rainbow = [colors.rgb2hex(i) for i in colors_array]
#add Neighborhood markers on toronto_map
for lat, long, hood, cluster in zip(toronto_venues_cluster['Latitude'], toronto_venues_cluster['Longitude'],toronto_venues_cluster['Neighborhood'], toronto_venues_cluster['Cluster Label'] ):
label = folium.Popup(f"{str(hood)} + Cluster:{str(cluster)}", parse_html=True)
folium.CircleMarker([lat, long],
radius=5,
popup=label,
color=rainbow[cluster],
fill=True,
fill_color=rainbow[cluster],
fill_opacity=0.9,
parse_html=False).add_to(toronto_cluster_map)
toronto_cluster_map
# -
| code/project1/popularVenues_Toronto.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="OLA-Ikza1cjM"
# # Checked
# + [markdown] id="xAAjP4PtamFM"
# Paper: http://people.cs.uchicago.edu/~ravenben/publications/pdf/translearn-usenixsec18.pdf
# + colab={"base_uri": "https://localhost:8080/"} id="HJOBPZ7cCeCM" outputId="b5069633-cb3e-4691-a3ac-a8ffacd92624"
# !git clone https://github.com/bolunwang/translearn.git
# %cd translearn
# + colab={"base_uri": "https://localhost:8080/"} id="Thabaz_29_Cv" outputId="a92643c7-a10f-4afb-830b-c86fa0373ace"
# !pip install -q ipdb
# + [markdown] id="fLr-ZGttpIt5"
# # Inspect dataset in HDF5
# + id="Rl-oUc3dpSz2"
import h5py
# + id="fNnBvXEspVwR"
dataset_path = "datasets/pubfig65_imagenet_test.h5"
f = h5py.File(dataset_path)
# + colab={"base_uri": "https://localhost:8080/"} id="vWht0AxfRuhk" outputId="273e0124-47dd-4da6-b645-17ea39eb13ff"
print("Data type:", type(f.get("X_test")[:]))
print("Shape:", f.get("X_test").shape)
# + colab={"base_uri": "https://localhost:8080/"} id="7UofAHwqUVbf" outputId="d48c88a7-2f1e-478a-8898-9fd40037dee6"
# lb = f.get("Y_test")[:]
f.get("Y_test")[0]
# + [markdown] id="BV68kPFpURp-"
# ## Convert numpy array to image
# + id="RxexApPzhDXx"
import numpy as np
from PIL import Image
from matplotlib import cm
# + [markdown] id="S2Ug052GjGNG"
# Convert numpy pixel array to image
# - https://stackoverflow.com/a/10967471
# - https://stackoverflow.com/a/2659378
# + id="hmjKD5eZhH5f"
sample_test_images = f.get("X_test")[:]
for num, image_array in enumerate(sample_test_images):
im = Image.fromarray(image_array)
im.save(f"sample_imagenet_test_{num}.png")
# + colab={"base_uri": "https://localhost:8080/"} id="KCAFj0iurO1Y" outputId="92ffa242-94e9-4e66-f345-4be346226a9b"
sample_test_images.shape
# + [markdown] id="wNei3BhLohIc"
# ## Test loading data
# + id="vdWlJGr8zYvM" colab={"base_uri": "https://localhost:8080/"} outputId="2093ab82-7fc8-4bad-f909-4fe9b06406d9"
from pubfig65_vggface_mimic_penalty_dssim import *
# + colab={"base_uri": "https://localhost:8080/"} id="HxV1VmGpznXo" outputId="7b596dfb-c093-4f62-a418-7c27e7a0a359"
(bottleneck_model, student_model) = load_and_build_models()
# + id="C377dvpaoyVh"
X, Y = load_dataset()
# + colab={"base_uri": "https://localhost:8080/"} id="YkJhS08RVSh9" outputId="d5ca4ea8-e2ba-4801-9969-0fa9ff805510"
print("X shape:", X.shape)
print("Y shape:", Y.shape)
# + id="SI4XuNe_VW04"
f.get("Y_test")[0]
# + id="l8qrn3Yiyzwq"
from pubfig65_vggface_mimic_penalty_dssim import *
# + id="QWhiQO9Xyw9n"
dataset = utils_translearn.load_dataset(
DATA_FILE,
keys=['X_test', 'Y_test'])
# + id="yWzY0XkqzBof"
X = dataset['X_test']
Y = dataset['Y_test']
X = X.astype(np.float32)
Y = Y.astype(np.float32)
# + id="t8TpUxsXzDLU"
X = utils_translearn.preprocess(X, INTENSITY_RANGE)
Y = np.argmax(Y, axis=1)
# + id="acJyynjBzWeR"
import numpy as np
from PIL import Image
from matplotlib import cm
image_array = X[0]
im = Image.fromarray(np.uint8(image_array))
im.save(f"sample_imagenet_test_2.png")
# + colab={"base_uri": "https://localhost:8080/"} id="g_slUZv9zNX1" outputId="c6c218a8-9aba-4cee-e005-23c548422a57"
# filter data points, keep only correctly predicted samples
print('filtering data')
X, Y = filter_data(X, Y, student_model)
# + id="Vd9JguWhzyc7"
Y_label = list(np.unique(Y))
all_pair_list = list(itertools.permutations(Y_label, 2))
pair_list = random.sample(
all_pair_list,
min(NB_PAIR, len(all_pair_list)))
# + colab={"base_uri": "https://localhost:8080/"} id="7s0L9Ei01qAU" outputId="eb77e0a4-c429-4480-cfa3-56f0ec8ffc03"
pair_list
# + id="L3HHQ9X411kO"
(source, target) = pair_list[0]
# sample images
(X_source, Y_source, X_target, Y_target) = select_source_target(
X, Y, source, target)
# + colab={"base_uri": "https://localhost:8080/"} id="ifHNaf4K1-qF" outputId="9bb94cb5-b3e3-46eb-c7b4-0dc7c75ff12d"
X_target.shape
# + id="hG51kYPm9wS0"
def load_and_build_models(student_model_file=STUDENT_MODEL_FILE,
teacher_model_file=TEACHER_MODEL_FILE,
cutoff_layer=CUTOFF_LAYER):
# load the student model
print('loading student model')
student_model = load_model(student_model_file)
print('loading teacher model')
teacher_model = load_model(teacher_model_file)
# load the bottleneck model
print('building bottleneck model')
import ipdb; ipdb.set_trace()
bottleneck_model = Model(teacher_model.input,
teacher_model.layers[cutoff_layer - 1].output)
bottleneck_model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return bottleneck_model, student_model
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="vEa7NvOb9zvV" outputId="1ac0186e-58b9-4189-b68a-6ef9cd4a7870"
(bottleneck_model, student_model) = load_and_build_models()
# + [markdown] id="5-223PvDCHqi"
# # Prepare to run
# + [markdown] id="Q8kuDMGnCKmo"
# Download exported model:
# - vggface.h5
# - pubfig65_vggface_trans_nbtrain_90.h5
# + id="zHcD92RW_ePb"
# !pip install -q gdown
# + colab={"base_uri": "https://localhost:8080/"} id="9QmW0qQaTwuT" outputId="1dfd0600-895c-48f5-a645-eb2692c3e348"
# !mkdir ./models
# !gdown "https://drive.google.com/uc?id=1f2wV9nOLVKFM6JmeiCm3n05BmWZIQHyl" -O "models/pubfig65_vggface_trans_nbtrain_90.h5"
# !gdown "https://drive.google.com/uc?id=1hQ-cVn1hy85rXSmgKFa-H0aIMUsVr1Tz" -O "models/vggface.h5"
# # !cp datasets/pubfig65_imagenet_test.h5 models/
# + colab={"base_uri": "https://localhost:8080/"} id="o4G3mXPTIwOz" outputId="8f5c68fb-c5c3-468d-b8b4-3363313545e8"
# Check CUDA version
# https://stackoverflow.com/questions/9727688/how-to-get-the-cuda-version
import tensorflow as tf
from tensorflow.python.platform import build_info as build
print(f"tensorflow version: {tf.__version__}")
print(f"Cuda Version: {build.build_info['cuda_version']}")
print(f"Cudnn version: {build.build_info['cudnn_version']}")
# + colab={"base_uri": "https://localhost:8080/"} id="hYmFFE0QNn3n" outputId="c77ac055-2624-4fd6-9843-4f91f9b1b598"
# !wget https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda-repo-ubuntu1704-9-0-local_9.0.176-1_amd64-deb
# !ls # Check if required cuda 9.0 amd64-deb file is downloaded
# !dpkg -i cuda-repo-ubuntu1704-9-0-local_9.0.176-1_amd64-deb
# !ls /var/cuda-repo-9-0-local | grep .pub
# !apt-key add /var/cuda-repo-9-0-local/7fa2af80.pub
# !apt-get update
# !sudo apt-get install cuda-9.0
# + colab={"background_save": true} id="EDEz5WXBKhuD" outputId="fed6d189-99cc-4782-a5dc-4c909d17bae0"
# !apt install python2.7
# + id="bsxb6fNWJTu9"
# !python2.7 -m pip install keras==2.2.0 numpy==1.14.0 tensorflow-gpu==1.8.0 h5py==2.8.0 ipdb
# + [markdown] id="CiDBAva3UclD"
# # Attack
# + colab={"base_uri": "https://localhost:8080/"} id="PCsDJVHmHcLQ" outputId="22c1783a-021a-4c74-c5df-df71bca08b1e"
# %%time
# !python2.7 pubfig65_vggface_mimic_penalty_dssim.py
# + [markdown] id="Ui8tx26PUgQS"
# # Fingerprinting
# + colab={"base_uri": "https://localhost:8080/"} id="Q5yqCk9oVXqJ" outputId="74f87d45-d223-449f-ee14-a42c2b04e331"
# %%time
# !python2.7 pubfig65_fingerprint_vggface.py
# + colab={"base_uri": "https://localhost:8080/"} id="5_jrwjz6KQFI" outputId="4b99eee8-45d4-4077-900a-024514c5d37d"
# %%time
# !python2.7 pubfig65_fingerprint_vgg16.py
# + [markdown] id="5YJvdYu51ChW"
# # Patch neural networks
# + id="57PQk5G4KyBZ"
# %%time
# !python2.7 pubfig65_patch_neuron_distance.py
# + [markdown] id="ypqQ9JyOUQv4"
# # Extension
# + id="JzQgfkn_USx_"
# !gdown "https://drive.google.com/uc?id=1JibtwaJ0PFynllvMVzqJBOJpMZt5J5kL" -O "models/iris_vgg16_trans.h5"
# !gdown "https://drive.google.com/uc?id=1-b4Swcr0XDZ2Car9Ed9E9-9Vgciqa87r" -O "models/gtsrb_vgg16_trans.h5"
# !gdown "https://drive.google.com/uc?id=1BR3g0Sq5WoqcaJe5GRvawfxDKmLoSLW7" -O "models/flower_resnet50_trans.h5"
# + [markdown] id="lTs8PFSK2Qzb"
# # Experiment on Food classification
# + colab={"base_uri": "https://localhost:8080/"} id="74i9pLWnXbDj" outputId="93d1d27b-6c35-4f8b-fb44-057abd34eb09"
# !mkdir ./food_models/
# !gdown "https://drive.google.com/uc?id=1QlwYG5HEU3SIG1FMVp_7VPZ1gyiGNwT4" -O "food_models/extractor__tl_model_v1.weights.best.hdf5"
# !gdown "https://drive.google.com/uc?id=1-9VarST9tJkHFVbstK7UlbxzntfMPJP2" -O "food_models/fine_tuned__tl_model_v1.weights.best.hdf5"
# !gdown "https://drive.google.com/uc?id=1qvP9MEyDLacUSBGOogTpN8UCS8OZav04" -O "food_models/vgg16.h5"
# + [markdown] id="T6_amPFaYPAO"
# Download and load food dataset
# + id="smB3LH55YNVe" colab={"base_uri": "https://localhost:8080/"} outputId="2421ea27-b355-491f-b010-71e1e7ce286e"
# !wget http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz
# !tar xzf food-101.tar.gz
# + [markdown] id="b_QexlalTgpA"
# ## Testing
# + [markdown] id="pOKWjlRfIZCl"
# \################################## Start test \##################################
# + [markdown] id="0NVMsS7sWkoe"
# ### Inspect model
# + id="KlxGzo98A8t1"
food_student_model = load_model("food_models/extractor__tl_model_v1.weights.best.hdf5")
# + colab={"base_uri": "https://localhost:8080/"} id="sIAHXIFGBIM1" outputId="f15d7000-7e36-435b-c999-74e86390a1d2"
from pprint import pprint
pprint(food_student_model.layers)
pprint(len(food_student_model.layers))
# + colab={"base_uri": "https://localhost:8080/"} id="iR0aOa8rIfE3" outputId="c27527b5-f403-4613-b19c-15cfb58c573c"
from tensorflow.keras.applications.vgg16 import VGG16
model=VGG16(include_top=True , weights="imagenet")
model.save("vgg16.h5")
# + colab={"base_uri": "https://localhost:8080/"} id="RS1L8cuOLruf" outputId="a1b738fc-b8ba-492a-eec0-a6512a12472d"
# !cp vgg16.h5
# + colab={"base_uri": "https://localhost:8080/"} id="1s8ZMAroKSSs" outputId="8d218903-912d-47a9-b84d-464c7b341ee0"
pprint(model.layers)
pprint(len(model.layers))
# + colab={"base_uri": "https://localhost:8080/"} id="otVYlOJKR1WT" outputId="ee1a1dd6-07dd-4045-d8df-d38d8a4e479d"
input_shape = (224, 224, 3)
conv_base = VGG16(include_top=False,
weights='imagenet',
input_shape=input_shape)
pprint(conv_base.layers)
pprint(len(conv_base.layers))
# + [markdown] id="c1oiqjX3feYy"
# ### Prepare dataset
# + id="KELGW2RLhaZ0"
# Move data from images to images/train or images/test:
import shutil
from collections import defaultdict
import json
from pathlib import Path
import os
def split_dataset(root_food_path):
"""Takes in the path for food-101 directory and creates train/test dirs of images"""
data_paths = {
'train': root_food_path/'meta/train.json',
'test': root_food_path/'meta/test.json'
}
for data_type, meta_path in data_paths.items():
# Make the train/test dirs
os.makedirs(root_food_path/data_type, exist_ok=True)
# Read the meta files.
# These are loaded as a dict of food names with a list of image paths
# E.g. {"<food_name>": ["<food_name>/<image_num>", ...], ...}
food_images = json.load(open(meta_path, 'r'))
for food_name, image_paths in food_images.items():
# Make food dir in train/test dir
os.makedirs(root_food_path/data_type/food_name, exist_ok=True)
# Move images from food-101/images to food-101/train (or test)
for image_path in image_paths:
image_path = image_path + '.jpg'
shutil.move(root_food_path/'images'/image_path, root_food_path/data_type/image_path)
# + id="Gb31IRAUfhSi"
import os
from keras.models import Model
from tensorflow.keras.optimizers import Adam
from keras.applications.vgg16 import VGG16, preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.layers import Dense, Dropout, Flatten
from pathlib import Path
import numpy as np
# + id="hFgljC8hg_pu"
BATCH_SIZE = 64
train_generator = ImageDataGenerator(rotation_range=90,
brightness_range=[0.1, 0.7],
width_shift_range=0.5,
height_shift_range=0.5,
horizontal_flip=True,
vertical_flip=True,
validation_split=0.15,
preprocessing_function=preprocess_input) # VGG16 preprocessing
test_generator = ImageDataGenerator(preprocessing_function=preprocess_input) # VGG16 preprocessing
# + id="ZDooMd_LhYvt"
# download_dir = Path('/content/')
download_dir = Path('.')
split_dataset(download_dir/'food-101')
# + id="Kx03CE4MhdnA"
train_data_dir = download_dir/'food-101/train'
test_data_dir = download_dir/'food-101/test'
class_subset = sorted(os.listdir(download_dir/'food-101/images'))[:10] # Using only the first 10 classes
# + colab={"base_uri": "https://localhost:8080/"} id="C8BQHSCphjHg" outputId="7df13db9-a3c2-45f5-d1f7-48a0905787ad"
traingen = train_generator.flow_from_directory(train_data_dir,
target_size=(224, 224),
class_mode='categorical',
classes=class_subset,
subset='training',
batch_size=BATCH_SIZE,
shuffle=True,
seed=42)
validgen = train_generator.flow_from_directory(train_data_dir,
target_size=(224, 224),
class_mode='categorical',
classes=class_subset,
subset='validation',
batch_size=BATCH_SIZE,
shuffle=True,
seed=42)
testgen = test_generator.flow_from_directory(test_data_dir,
target_size=(224, 224),
class_mode=None,
classes=class_subset,
batch_size=1,
shuffle=True,
seed=42)
# + id="CFMO4XT8h_Uu"
tmp = next(testgen)
# + colab={"base_uri": "https://localhost:8080/"} id="4xDGpdtHt1ck" outputId="817efa9e-9bf6-4d5a-e750-fc8db2f19298"
np.argmax(tmp)
# + colab={"base_uri": "https://localhost:8080/"} id="XK_Voepsw1Ml" outputId="b303ae3d-2e0f-47dd-8cbf-4f65f3acaee4"
np.mean(f.get("X_test")[0])
# + colab={"base_uri": "https://localhost:8080/"} id="MjJC4jqXiEtQ" outputId="66577a19-5105-4a1e-ac3c-76bfed8e5363"
np.mean(tmp[0])
# + id="lx9f3CZmmkXo"
labels = testgen.labels
# + id="Y0XdKzmWxMY8"
import numpy as np
from PIL import Image
from matplotlib import cm
image_array = f.get("X_test")[0]
im = Image.fromarray(np.uint8(image_array))
im.save(f"sample_imagenet_test_0.png")
# + colab={"base_uri": "https://localhost:8080/"} id="3DVxc2dknlw2" outputId="49f9cdfa-895c-490f-fa46-c6b916246f9a"
labels[:300]
# + id="GxnSPdtku0Ga"
def load_food():
"""Specific function for loading food dataset"""
# + [markdown] id="_Yy020a9WoB3"
# ### Check model classification
# + id="dw3nxTc_WrbB"
food_student_model = load_model("food_models/extractor__tl_model_v1.weights.best.hdf5")
# + [markdown] id="6qDVziCdIcV3"
# \################################## End test \##################################
# + [markdown] id="tQ6m_3JKTkNs"
# ## Conducting
# + id="KJiQ9ZiBYTrA"
# Move data from images to images/train or images/test:
import shutil
from collections import defaultdict
import json
from pathlib import Path
import os
def split_dataset(root_food_path):
"""Takes in the path for food-101 directory and creates train/test dirs of images"""
data_paths = {
'train': root_food_path/'meta/train.json',
'test': root_food_path/'meta/test.json'
}
for data_type, meta_path in data_paths.items():
# Make the train/test dirs
os.makedirs(root_food_path/data_type, exist_ok=True)
# Read the meta files.
# These are loaded as a dict of food names with a list of image paths
# E.g. {"<food_name>": ["<food_name>/<image_num>", ...], ...}
food_images = json.load(open(meta_path, 'r'))
for food_name, image_paths in food_images.items():
# Make food dir in train/test dir
os.makedirs(root_food_path/data_type/food_name, exist_ok=True)
# Move images from food-101/images to food-101/train (or test)
for image_path in image_paths:
image_path = image_path + '.jpg'
shutil.move(root_food_path/'images'/image_path, root_food_path/data_type/image_path)
# + id="8fnRf3jLYacq"
from pathlib import Path
download_dir = Path('/content/translearn/')
split_dataset(download_dir/'food-101')
# + id="hWgg26lC8eeG"
import os
class_subset = sorted(os.listdir(download_dir/'food-101/images'))[:10]
# + colab={"base_uri": "https://localhost:8080/"} id="QGbcBsxP8hec" outputId="d0a0cc5d-a7f8-4362-935e-7575c0773d19"
from keras.preprocessing.image import ImageDataGenerator
BATCH_SIZE = 32
train_generator = ImageDataGenerator(
rescale=1/255., # normalize pixel values between 0-1
brightness_range=[0.1, 0.7], # specify the range in which to decrease/increase brightness
width_shift_range=0.5, # shift the width of the image 50%
rotation_range=90, # random rotation by 90 degrees
horizontal_flip=True, # 180 degree flip horizontally
vertical_flip=True, # 180 degree flip vertically
validation_split=0.15 # 15% of the data will be used for validation at end of each epoch
)
# test for integration with adversarial model
testgen = train_generator.flow_from_directory(
download_dir/'food-101/test',
target_size=(224, 224),
batch_size=BATCH_SIZE,
class_mode='categorical',
classes=class_subset,
subset='validation',
shuffle=True,
seed=42
)
# + id="LhmAIJBlVIDG"
data_list = []
batch_index = 0
while batch_index <= testgen.batch_index:
data = testgen.next()
data_list.append(data[0])
batch_index = batch_index + 1
# now, data_array is the numeric data of whole images
data_array = np.asarray(data_list)
# + [markdown] id="Cs5Wf_GpYYrL"
# Prepare model for attacking
# + id="Z0C6ktzG2g-K"
from pathlib import Path
food_classifier_path = Path("/content/translearn/food_models/")
# Deep-layer feature extractor
dlfe_model = food_clssifier_path/"extractor__tl_model_v1.weights.best.hdf5"
# Mid-layer feature extractor (K=N-3)
mlfe_model = food_clssifier_path/"fine_tuned__tl_model_v1.weights.best.hdf5"
| [NT522]_With_Great_Training_Comes_Great.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# <h2>Working With Missing Data</h2>
# <h3>References:</h3>
# <h3>General techniques</h3>
# https://pandas.pydata.org/pandas-docs/stable/missing_data.html<br/>
# <h3>Missing Values in a Timeseries</h3>
# https://www.kaggle.com/juejuewang/handle-missing-values-in-time-series-for-beginners
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('VehicleTraffic.csv', parse_dates=[0], index_col=0)
# Measurements taken at different times
df
# Remove NaN values
df.dropna()
# Mean values of numeric columns
df.mean()
# Let's visualize vehicles data
# How does missing data show up?
plt.figure(figsize=(10,3))
plt.title('Vehicles')
plt.plot(df['Vehicles'], label='target')
plt.xticks(fontsize=14, rotation=45)
plt.legend()
plt.grid()
# +
# Replace missing values with zero
plt.figure(figsize=(10,3))
plt.title('Replace NaN with Zero')
plt.plot(df['Vehicles'], label='target')
# fillna to replace NaNs with provided value
vehicles = df['Vehicles'].fillna(0)
plt.plot(vehicles,ls='-.',alpha=0.8,label='zeroed')
plt.xticks(fontsize=14, rotation=45)
plt.legend()
plt.grid()
# +
# Replace missing values with mean value for that attribute
plt.figure(figsize=(10,3))
plt.title('Replace NaN with Mean')
plt.plot(df['Vehicles'], label='target')
# fillna to replace NaNs with provided value
vehicles = df['Vehicles'].fillna(df['Vehicles'].mean())
plt.plot(vehicles,ls='-.',alpha=0.8,label='mean')
plt.xticks(fontsize=14, rotation=45)
plt.legend()
plt.grid()
# +
# Replace missing values with interpolated value for that attribute
plt.figure(figsize=(10,3))
plt.title('Replace NaN with Interpolation')
plt.plot(df['Vehicles'], label='target')
vehicles = df['Vehicles'].interpolate()
plt.plot(vehicles,ls='-.',alpha=0.8,label='mean')
plt.xticks(fontsize=14, rotation=45)
plt.legend()
plt.grid()
# -
vehicles
# +
# Replace missing values with previous valid value for that attribute
plt.figure(figsize=(10,3))
plt.title('Replace NaN with Forward Fill')
plt.plot(df['Vehicles'], label='target')
vehicles = df['Vehicles'].fillna(method='ffill')
plt.plot(vehicles,ls='-.',alpha=0.8,label='forward fill')
plt.xticks(fontsize=14, rotation=45)
plt.legend()
plt.grid()
# -
vehicles
# +
# Replace missing values with next valid value for that attribute
plt.figure(figsize=(10,3))
plt.title('Replace NaN with Backward Fill')
plt.plot(df['Vehicles'], label='target')
vehicles = df['Vehicles'].fillna(method='bfill')
plt.plot(vehicles,ls='-.',alpha=0.8,label='back fill')
plt.xticks(fontsize=14, rotation=45)
plt.legend()
plt.grid()
# -
vehicles
df
# +
# Now that we know different ways of handling missing values
# Let's pick an appropriate scheme for replacing missing values
# Vehicles and Average Speed...interpolate
df['Vehicles'] = df['Vehicles'].interpolate()
df['Average Speed (mph)'] = df['Average Speed (mph)'].interpolate()
# Accidents...interpolate or use mean values
df['Accidents'] = df['Accidents'].fillna(df['Accidents'].mean())
# -
df
# <h3>Independent Data</h3>
# Example of data that is not time dependent
# Each row is independent
df = pd.read_csv('VehicleTrafficRoads.csv', index_col=0)
df
df.mean()
# +
# Substitute computed average of other rows
# In this case, Rows E and F look identical
# Data stored for Road E and F may not reflect reality
df.fillna(df.mean())
# -
# Better option here is to simply drop NA rows
# how = all Drop if all columns are NA
# how = any Drop if any one of the columns contain NA
df.dropna(how='all',inplace=True)
df
# <h3>Impute Missing Values from Similar Data</h3>
# Some instances have missing features
# There are three types of plants: Iris-setosa, Iris-virginica, Iris-versicolor
# In this case, we can find mean value of an attribute for each type of plant
# and use it to substitute the missing values
df = pd.read_csv('IrisMissingData.csv')
df
# Look for any columns that have NA
df.isna().any(axis=0)
# Look for any rows that have NA
rows_missing_values = df.isna().any(axis=1)
df[rows_missing_values]
# Find Summary Statistics for Each Class
# Impute values based on class
# https://stackoverflow.com/questions/19966018/pandas-filling-missing-values-by-mean-in-each-group
group_class = df.groupby('class')
# First few rows of each group
group_class.head(2)
# Attribute Mean value is different for each group
group_class.mean()
# Compared to mean value for entire dataset
df.mean()
# For each group, use group level averages to fill missing values
df['sepal_length'] = group_class['sepal_length'].transform(lambda x: x.fillna(x.mean()))
df['sepal_width'] = group_class['sepal_width'].transform(lambda x: x.fillna(x.mean()))
df['petal_length'] = group_class['petal_length'].transform(lambda x: x.fillna(x.mean()))
df['petal_width'] = group_class['petal_width'].transform(lambda x: x.fillna(x.mean()))
# Let's now check the rows that had missing values
df[rows_missing_values]
| 00 Introduction/ml_handling_missing_values.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Use the `pandas` module to access data from a variety of sources
#
# In this example we're fetching a `.xlsx` file from the web, but you can also load `.csv` and other file types from disk.
#
# By convention, `pandas` is often imported "`as pd`" and once you do this you utilize the library using `pd.function()`
import pandas as pd
# ### Define a URL or filepath and use `pd.read_csv()` or `pd.read_excel()`
#
# By convention, the `DataFrame` created by `pandas` is stored in an object called `df`
#
# This convention is only applicable if you have a single dataframe in use. If you're working with multiple dataframes you'll need to find a better naming approach.
# +
url = 'https://www1.nyc.gov/assets/nypd/downloads/excel/analysis_and_planning/stop-question-frisk/sqf-2018.xlsx'
df = pd.read_excel(url)
# -
# ### Once you've created a dataframe you can use any `pandas` operation on it
#
# #### Common initial steps include looking at the `.head()` or `.tail()` of the dataframe (i.e. first or last 5 rows of data)
df.head()
# #### You can use `df.columns` or `df.dtypes` to learn more about the structure of your data
df.dtypes
# #### It can also be helpful sometimes to rotate your data by 90 degrees
#
# This is done using `df.T` to "transpose" the data
df.head().T
| examples/pandas/pandas_data_01_reading.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from PIL import Image, ImageOps
import pytesseract
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import string
import re
import copy
import json
indir = './txt_boxes/test/'
for root, dirs, filenames in os.walk(indir):
for filename in filenames:
print('###################' + filename + '###################')
im = Image.open(indir + filename)
#region = crop_box(im)
#text = pytesseract.image_to_string(region, lang='eng', config='--oem 1 --psm 1') #oem 1 means Neural Nets LSTM engine
text = pytesseract.image_to_string(im, lang='eng')
print(text)
indir = './txt_boxes/test/'
for root, dirs, filenames in os.walk(indir):
for filename in filenames:
print('###################' + filename + '###################')
image = cv2.imread(indir + filename)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
result = cv2.threshold(gray_image,120,255,cv2.THRESH_BINARY)[1]
#new_image = cv2.threshold(gray_image,190,255,cv2.THRESH_BINARY)[1]
#kernel = np.ones((1,1), np.uint8)
#result = cv2.dilate(new_image, kernel, iterations=1)
result = cv2.bitwise_not(result)
#im = Image.open(indir + filename)
#region = crop_box(im)
#text = pytesseract.image_to_string(region, lang='eng', config='--oem 1 --psm 1') #oem 1 means Neural Nets LSTM engine
#plt.imshow(result, cmap='Greys')
#plt.show()
text = pytesseract.image_to_string(result, lang='eng')
print(text.split('\n')[0])
degree_sign = u'\N{DEGREE SIGN}'
print(degree_sign)
# +
indir = './txt_boxes/gps_box/'
image_dict = {}
bad_lats = {}
bad_lons = {}
imgs_w_ie = []
degree_sign = u'\N{DEGREE SIGN}'
remove = string.punctuation + string.ascii_uppercase + degree_sign + '’' + '”'
remove = remove.replace(".", "") # don't remove periods
def lat_txt_to_float(line, bad_lats, dict_key):
mod_string = line.translate({ord(char): None for char in remove})
#print(mod_string)
pattern = re.compile("(\d{2}\s\d{2}\s\d{2}\.\d)")
if not(re.match(pattern, mod_string)):
bad_lats[dict_key] = mod_string
else:
try:
return float(mod_string.split(' ')[0]) + float(mod_string.split(' ')[1])/60 + \
float(mod_string.split(' ')[2][:4])/3600
except ValueError as e:
raise ValueError(f"Check image: {filename.split('/')[-1]}") from e
def lon_txt_to_float(line, bad_lons, dict_key):
mod_string = line.translate({ord(char): None for char in remove})
#print(mod_string)
pattern = re.compile("(\d{2,3}\s\d{2}\s\d{2}\.\d)")
if not(re.match(pattern, mod_string)):
bad_lons[dict_key] = mod_string
else:
try:
return float(mod_string.split(' ')[0]) + float(mod_string.split(' ')[1])/60 + \
float(mod_string.split(' ')[2][:4])/3600
except ValueError as e:
raise ValueError(f"Check image: {filename.split('/')[-1]}") from e
loop_counter = 0
for entry in os.scandir(indir):
loop_counter += 1
if loop_counter % 100 == 0:
print("Loop counter: ", loop_counter)
GPS_dict = {}
filename = entry.path
#print('###################' + filename.split('/')[-1] + '###################')
image = cv2.imread(filename)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
result = cv2.threshold(gray_image,120,255,cv2.THRESH_BINARY)[1]
#new_image = cv2.threshold(gray_image,190,255,cv2.THRESH_BINARY)[1]
#kernel = np.ones((1,1), np.uint8)
#result = cv2.dilate(new_image, kernel, iterations=1)
result = cv2.bitwise_not(result)
#im = Image.open(indir + filename)
#region = crop_box(im)
#text = pytesseract.image_to_string(region, lang='eng', config='--oem 1 --psm 1') #oem 1 means Neural Nets LSTM engine
#plt.imshow(result, cmap='Greys')
#plt.show()
#text = pytesseract.image_to_string(result, lang='eng', config='-c tessedit_char_whitelist=’WN0123456789.° --oem 0')
text = pytesseract.image_to_string(result, lang='eng', config="""-c tessedit_char_whitelist=’WN0123456789.°\\'\\" --oem 0""")
#text = pytesseract.image_to_string(result, lang='eng')
dict_key = filename.split('/')[-1].split('.')[0]
try:
GPS_dict['Lat'] = lat_txt_to_float(text.split('\n')[0], bad_lats, dict_key)
except IndexError:
imgs_w_ie.append({dict_key:('Lat',text)})
try:
GPS_dict['Lon'] = lon_txt_to_float(text.split('\n')[1], bad_lons, dict_key)
except IndexError:
imgs_w_ie.append({dict_key:('Lon',text)})
image_dict[dict_key] = GPS_dict
#print(text.split('\n')[0])
#print(bad_lats)
#print(bad_lons)
# -
len(image_dict)
len(bad_lons)
#print a 10-sample of key-value pairs from bad_lons
{k: v for i, (k,v) in enumerate(dict((k, bad_lons[k]) for k in bad_lons.keys()).items()) if i < 10}
#print a 10-sample of key-value pairs from bad_lats
{k: v for i, (k,v) in enumerate(dict((k, bad_lats[k]) for k in bad_lats.keys()).items()) if i < 10}
len(bad_lats)
#how many frames have both poorly OCR'd latitude and longitude values
len(bad_lats.keys() & bad_lons.keys())
len(image_dict.keys() & bad_lats.keys())
len(image_dict.keys() & bad_lons.keys())
#size of dictionary containing key-value pairs from image_dict for keys that are also found in bad_lats dictionary
len(dict((k, image_dict[k]) for k in (image_dict.keys() & bad_lats.keys())))
#print a 10-sample of key-value pairs from image_dict for keys that are also found in bad_lats dictionary
{k: v for i, (k,v) in enumerate(dict((k, image_dict[k]) for k in (image_dict.keys() & bad_lats.keys())).items()) if i < 10}
n_none_lats, n_none_lons, n_none_both = (0,0,0)
#subset image_dict to frames that are NOT found in imgs_w_ie list of dictionaries
missing_keys = set().union(*(d.keys() for d in imgs_w_ie))
image_dict_rstr = dict((k, image_dict[k]) for k in (image_dict.keys() - missing_keys))
#count frames in image_dict that have None values in one or both of Lat and Lon
for gps_dict in image_dict_rstr.values():
if (gps_dict['Lat'] is None) & (gps_dict['Lon'] is None):
n_none_both += 1
elif gps_dict['Lat'] is None:
n_none_lats += 1
elif gps_dict['Lon'] is None:
n_none_lons += 1
print("Only Lat none:", n_none_lats, "\nOnly Lon none:", n_none_lons, "\nBoth none:", n_none_both)
len(image_dict_rstr)
#subset bad_lats dictionary to frames that are NOT found in imgs_w_ie list of dictionaries
missing_keys = set().union(*(d.keys() for d in imgs_w_ie))
bad_lats_rstr = dict((k, image_dict[k]) for k in (bad_lats.keys() - missing_keys))
len(bad_lats_rstr)
lat_n_digits = {}
for value in bad_lats.values():
n_digits = sum(c.isdigit() for c in value)
if n_digits not in lat_n_digits:
lat_n_digits[n_digits] = 1
else:
lat_n_digits[n_digits] += 1
print(lat_n_digits)
for key, value in bad_lats.items():
if sum(c.isdigit() for c in value) not in (7,8):
print(key, ',', value)
lon_n_digits = {}
for value in bad_lons.values():
n_digits = sum(c.isdigit() for c in value)
if n_digits not in lon_n_digits:
lon_n_digits[n_digits] = 1
else:
lon_n_digits[n_digits] += 1
print(lon_n_digits)
lon_8_digits = {'8, starts with 1': 0, '8, starts with other': 0}
for value in bad_lons.values():
n_digits = sum(c.isdigit() for c in value)
if n_digits == 8:
first_digit_1 = (value[0] == '1')
if first_digit_1:
lon_8_digits['8, starts with 1'] += 1
else:
lon_8_digits['8, starts with other'] += 1
print(lon_8_digits)
for key, value in bad_lons.items():
if sum(c.isdigit() for c in value) == 9:
print(key, ',', value)
len(imgs_w_ie)
imgs_w_ie
#extract keys of all dictionaries present in imgs_w_ie list of dictionaries
all_keys = set().union(*(d.keys() for d in imgs_w_ie))
all_keys
pattern = re.compile("(\d{2,3}\s\d{2}\s\d{2}\.\d)")
if re.match(pattern, '77 14 08.2'):
print("yes")
else:
print("no")
if not(re.match(pattern, '77 14 08 2')):
print("no")
# +
#dpi = im.info
#indir = './frames_lmtd/'
#im_full = Image.open(indir + 'frame40.jpg')
#dpi = im_full.info['dpi']
# -
n_stamps_per_frame = {}
for key, value in image_dict.items():
if len(value) not in n_stamps_per_frame:
n_stamps_per_frame[len(value)] = 1
else:
n_stamps_per_frame[len(value)] += 1
n_stamps_per_frame
# ### Fix OCR'd text in latitude and longitude stamps
#print a 10-sample of key-value pairs from bad_lats
{k: v for i, (k,v) in enumerate(dict((k, bad_lats[k]) for k in bad_lats.keys()).items()) if i < 10}
#make a shallow copy of bad_lats dictionary
bad_lats_copy = bad_lats.copy()
#remove all frames from bad_lats where number of digits in latitude value is not 7
bad_lats_copy = {k:v for (k,v) in bad_lats_copy.items() if sum(c.isdigit() for c in v) == 7}
len(bad_lats_copy)
#remove all spaces from values
bad_lats_copy = {k: v.replace(" ","") for (k,v) in bad_lats_copy.items()}
#check all existing positions of a period in values
pos_of_period = {}
for v in bad_lats_copy.values():
pos_list = [pos for pos, char in enumerate(v) if char == '.']
if tuple(pos_list) not in pos_of_period:
pos_of_period[tuple(pos_list)] = 1
else:
pos_of_period[tuple(pos_list)] += 1
pos_of_period
sum(pos_of_period.values())
#check latitude stamps that don't have periods
for v in bad_lats_copy.values():
if '.' not in v:
print(v)
#print a 10-sample of key-value pairs from bad_lats_copy
{k: v for i, (k,v) in enumerate(dict((k, bad_lats_copy[k]) for k in bad_lats_copy.keys()).items()) if i < 10}
#remove all frames where position of period is anything other than (6,) or (6,8), or there is no period at all
bad_lats_copy = {k: v for (k,v) in bad_lats_copy.items() if tuple([pos for pos, char in enumerate(v) if char == '.'])
in ((6,),(6,8),())}
len(bad_lats_copy)
#function to remove trailing periods, to insert period for stamps that need it, to insert spaces,
#and to convert text to float
def lat_clean_transform(stamp):
no_trail_prd = stamp[:8]
if '.' not in no_trail_prd:
stamp_new = no_trail_prd[:2] + ' ' + no_trail_prd[2:4] + ' ' + no_trail_prd[4:6] + '.' + no_trail_prd[6:6]
else:
stamp_new = no_trail_prd[:2] + ' ' + no_trail_prd[2:4] + ' ' + no_trail_prd[4:8]
return float(stamp_new.split(' ')[0]) + float(stamp_new.split(' ')[1])/60 + \
float(stamp_new.split(' ')[2])/3600
replaced_lats = {k: lat_clean_transform(v) for (k,v) in bad_lats_copy.items()}
len(replaced_lats)
#print a 10-sample of key-value pairs from replaced_lats
{k: v for i, (k,v) in enumerate(dict((k, replaced_lats[k]) for k in replaced_lats.keys()).items()) if i < 10}
sum(x is None for x in replaced_lats.values())
#print a 10-sample of key-value pairs from bad_lons
{k: v for i, (k,v) in enumerate(dict((k, bad_lons[k]) for k in bad_lons.keys()).items()) if i < 10}
#make a shallow copy of bad_lons dictionary
bad_lons_copy = bad_lons.copy()
#remove all frames from bad_lons where number of digits in longitude value is not 7 or 8 with first digit being 1
bad_lons_copy = {k:v for (k,v) in bad_lons_copy.items() if (sum(c.isdigit() for c in v) == 7) |
((sum(c.isdigit() for c in v) == 8) & (v.startswith('1')))}
len(bad_lons_copy)
#remove all spaces from values
bad_lons_copy = {k: v.replace(" ","") for (k,v) in bad_lons_copy.items()}
#print a 10-sample of key-value pairs from bad_lons_copy
{k: v for i, (k,v) in enumerate(dict((k, bad_lons_copy[k]) for k in bad_lons_copy.keys()).items()) if i < 10}
#check all existing positions of a period in values
pos_of_period = {}
for v in bad_lons_copy.values():
pos_list = [pos for pos, char in enumerate(v) if char == '.']
if tuple(pos_list) not in pos_of_period:
pos_of_period[tuple(pos_list)] = 1
else:
pos_of_period[tuple(pos_list)] += 1
pos_of_period
sum(pos_of_period.values())
#check longitude stamps that don't have periods
for v in bad_lons_copy.values():
if '.' not in v:
print(v)
#remove all frames where position of period is confusing
bad_lons_copy = {k: v for (k,v) in bad_lons_copy.items() if tuple([pos for pos, char in enumerate(v) if char == '.'])
in ((7,),(6,),(7,9),(6,8),())}
len(bad_lons_copy)
#print a 10-sample of key-value pairs from bad_lons_copy
{k: v for i, (k,v) in enumerate(dict((k, bad_lons_copy[k]) for k in bad_lons_copy.keys()).items()) if i < 10}
# +
#len({k: v for (k,v) in bad_lons_copy.items() if len(v) < 7})
# -
#function to remove trailing periods, to insert period for stamps that need it, to insert spaces,
#and to convert text to float
def lon_clean_transform(stamp):
if (sum(c.isdigit() for c in stamp) == 7) & (tuple([pos for pos, char in enumerate(stamp) if char == '.']) in ((6,),(6,8),())):
no_trail_prd = stamp[:8]
if '.' not in no_trail_prd:
stamp_new = no_trail_prd[:2] + ' ' + no_trail_prd[2:4] + ' ' + no_trail_prd[4:6] + '.' + no_trail_prd[6:6]
else:
stamp_new = no_trail_prd[:2] + ' ' + no_trail_prd[2:4] + ' ' + no_trail_prd[4:8]
return float(stamp_new.split(' ')[0]) + float(stamp_new.split(' ')[1])/60 + \
float(stamp_new.split(' ')[2])/3600
elif (sum(c.isdigit() for c in stamp) == 8) & (tuple([pos for pos, char in enumerate(stamp) if char == '.']) in ((7,),(7,9),())):
no_trail_prd = stamp[:9]
if '.' not in no_trail_prd:
stamp_new = no_trail_prd[:3] + ' ' + no_trail_prd[3:5] + ' ' + no_trail_prd[5:7] + '.' + no_trail_prd[7:7]
else:
stamp_new = no_trail_prd[:3] + ' ' + no_trail_prd[3:5] + ' ' + no_trail_prd[5:9]
return float(stamp_new.split(' ')[0]) + float(stamp_new.split(' ')[1])/60 + \
float(stamp_new.split(' ')[2])/3600
replaced_lons = {k: lon_clean_transform(v) for (k,v) in bad_lons_copy.items()}
len(replaced_lons)
#print a 10-sample of key-value pairs from replaced_lons
{k: v for i, (k,v) in enumerate(dict((k, replaced_lons[k]) for k in replaced_lons.keys()).items()) if i < 10}
sum(x is None for x in replaced_lons.values())
# +
#bad_lons_copy['frame28907']
# -
replaced_lats = {k: {'Lat':v} for (k,v) in replaced_lats.items()}
replaced_lons = {k: {'Lon':v} for (k,v) in replaced_lons.items()}
#print a 10-sample of key-value pairs from replaced_lats
{k: v for i, (k,v) in enumerate(dict((k, replaced_lats[k]) for k in replaced_lats.keys()).items()) if i < 10}
#print a 10-sample of key-value pairs from replaced_lons
{k: v for i, (k,v) in enumerate(dict((k, replaced_lons[k]) for k in replaced_lons.keys()).items()) if i < 10}
sum(x == {'Lat': None} for x in replaced_lats.values())
sum(x == {'Lon': None} for x in replaced_lons.values())
len(replaced_lats.keys() & replaced_lons.keys())
len(replaced_lats.keys() - replaced_lons.keys())
len(replaced_lons.keys() - replaced_lats.keys())
image_dict_final = copy.deepcopy(image_dict_rstr)
#number of keys in value dictionary in image_dict_final
keys_0 = 0
keys_1 = 0
keys_2 = 0
for v in image_dict_final.values():
n_keys = len(v.keys())
if n_keys == 0:
keys_0 += 1
elif n_keys == 1:
keys_1 += 1
elif n_keys == 2:
keys_2 += 2
print("0 keys:",keys_0,"\n1 key:",keys_1,"\n2 keys:",keys_2)
#number of keys in value dictionary in replaced_lons
keys_0 = 0
keys_1 = 0
keys_2 = 0
for v in replaced_lons.values():
n_keys = len(v.keys())
if n_keys == 0:
keys_0 += 1
elif n_keys == 1:
keys_1 += 1
elif n_keys == 2:
keys_2 += 2
print("0 keys:",keys_0,"\n1 key:",keys_1,"\n2 keys:",keys_2)
#print a 10-sample of key-value pairs from image_dict_final
{k: v for i, (k,v) in enumerate(dict((k, image_dict_final[k]) for k in image_dict_final.keys()).items()) if i < 10}
#check how many frames have no latitude or longitude value in image_dict_final before fixed values are inserted
lon_none = 0
lat_none = 0
lat_lon_none = 0
for v in image_dict_final.values():
if (v['Lat'] is None) & (v['Lon'] is None):
lat_lon_none += 1
elif v['Lat'] is None:
lat_none += 1
elif v['Lon'] is None:
lon_none += 1
print("Frames without Lat:",lat_none,"\nFrames without Lon:",lon_none,"\nFrames without both:",lat_lon_none)
# #copy fixed latitude and longitude stamps into final image_dict_final dictionary
for k, v in image_dict_final.items():
if (v['Lat'] is None) & (k in replaced_lats):
image_dict_final[k]['Lat'] = replaced_lats[k]['Lat']
if (v['Lon'] is None) & (k in replaced_lons):
image_dict_final[k]['Lon'] = replaced_lons[k]['Lon']
#check how many frames ended up having no latitude or longitude value
lon_none = 0
lat_none = 0
lat_lon_none = 0
for v in image_dict_final.values():
if (v['Lat'] is None) & (v['Lon'] is None):
lat_lon_none += 1
elif v['Lat'] is None:
lat_none += 1
elif v['Lon'] is None:
lon_none += 1
print("Frames without Lat:",lat_none,"\nFrames without Lon:",lon_none,"\nFrames without both:",lat_lon_none)
len(image_dict_final)
#remove frames without latitude and/or longitude value
for k in list(image_dict_final.keys()):
if (image_dict_final[k]['Lat'] is None) | (image_dict_final[k]['Lon'] is None):
del image_dict_final[k]
len(image_dict_final)
#check how many remaining latitude and longitude values fall outside the expected bounds
lat_out_of_bounds = 0
lon_out_of_bounds = 0
both_out_of_bounds = 0
for v in image_dict_final.values():
if ((v['Lat'] < 37.0) | (v['Lat'] > 47.7)) & ((v['Lon'] < 77.0) | (v['Lon'] > 122.33)):
both_out_of_bounds += 1
elif (v['Lat'] < 37.0) | (v['Lat'] > 47.7):
lat_out_of_bounds += 1
elif (v['Lon'] < 77.0) | (v['Lon'] > 122.33):
lon_out_of_bounds += 1
print("Both of out bounds:",both_out_of_bounds,"\nLat out of bounds:",lat_out_of_bounds,"\nLon out of bounds:",lon_out_of_bounds)
#remove these frames that have latitude and longitude values fall outside the expected bounds
for k in list(image_dict_final.keys()):
if ((image_dict_final[k]['Lat'] < 37.0) | (image_dict_final[k]['Lat'] > 47.7) |
(image_dict_final[k]['Lon'] < 77.0) | (image_dict_final[k]['Lon'] > 122.33)):
del image_dict_final[k]
len(image_dict_final)
#print a 10-sample of key-value pairs from image_dict_final
{k: v for i, (k,v) in enumerate(dict((k, image_dict_final[k]) for k in image_dict_final.keys()).items()) if i < 10}
json_txt = json.dumps(image_dict_final)
f = open("image_txt.json","w")
f.write(json_txt)
f.close()
#print a 10-sample of key-value pairs from replaced_lons for keys that are also found in replaced_lats dictionary
{k: v for i, (k,v) in enumerate(dict((k, replaced_lons[k]) for k in (image_dict.keys() & bad_lats.keys())).items()) if i < 10}
#from https://stackoverflow.com/a/7205107/9987623
#function to perform nested merge of dictionaries
def merge_dicts(a, b, path=None):
"merges b into a"
if path is None: path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
else:
raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))
else:
a[key] = b[key]
return a
dict({k_lat: v_lat.update({'Lon': None}) for (k_lat,v_lat) in replaced_lats.items() if k_lat in (replaced_lats.keys() -
replaced_lons.keys())}.items() + {k_lon: v_lon.update({'Lat': None}) for (k_lon,v_lon) in
replaced_lons.items() if k_lon in (replaced_lons.keys() - replaced_lats.keys())}.items() + \
{k_lat_lon: {'Lat': replaced_lats[k_lat_lon]['Lat'], 'Lon': replaced_lons[k_lat_lon]['Lon']} for k_lat_lon in
(replaced_lats.keys() & replaced_lons.keys())}.items())
#print a 10-sample of key-value pairs from replaced_lats
{k: v for i, (k,v) in enumerate(dict((k, replaced_lats[k]) for k in replaced_lats.keys()).items()) if i < 10}
{k_lat: v_lat.update({'Lon': None}) for (k_lat,v_lat) in replaced_lats.items() if k_lat in (replaced_lats.keys() -
replaced_lons.keys())}
#this probably modified the original replaced_lats dictionary
# +
{k_lat: v_lat.update({'Lon': None}) for (k_lat,v_lat) in replaced_lats.items()}
lats_not_lons = {k_lat: v_lat for (k_lat,v_lat) in replaced_lats.items() if k_lat
in (replaced_lats.keys() - replaced_lons.keys())}
{k_lon: v_lon.update({'Lat': None}) for (k_lon,v_lon) in replaced_lons.items()}
lons_not_lats = {k_lon: v_lon for (k_lon,v_lon) in replaced_lons.items() if k_lon
in (replaced_lons.keys() - replaced_lats.keys())}
lats_and_lons = {k_lat_lon: {'Lat': replaced_lats[k_lat_lon]['Lat'], 'Lon': replaced_lons[k_lat_lon]['Lon']}
for k_lat_lon in (replaced_lats.keys() & replaced_lons.keys())}
dicts_to_merge = [lats_not_lons, lons_not_lats, lats_and_lons]
merged_replacements = dict(ChainMap(*dicts_to_merge))
# merged_replacements = {**{k_lat: v_lat.update({'Lon': None}) for (k_lat,v_lat) in replaced_lats.items() if k_lat in (replaced_lats.keys() -
# replaced_lons.keys())}, **{k_lon: v_lon.update({'Lat': None}) for (k_lon,v_lon) in
# replaced_lons.items() if k_lon in (replaced_lons.keys() - replaced_lats.keys())},
# **{k_lat_lon: {'Lat': replaced_lats[k_lat_lon]['Lat'], 'Lon': replaced_lons[k_lat_lon]['Lon']} for k_lat_lon in
# (replaced_lats.keys() & replaced_lons.keys())}}
# -
len(lats_and_lons)
len(lats_not_lons)
len(replaced_lats.keys() - replaced_lons.keys())
#print a 10-sample of key-value pairs from lats_not_lons
{k: v for i, (k,v) in enumerate(dict((k, lats_not_lons[k]) for k in lats_not_lons.keys()).items()) if i < 10}
merged_replacements
len(merged_replacements)
sum(x is None for x in merged_replacements.values())
# +
{merge_dicts(copy.deepcopy(replaced_lats),copy.deepcopy(replaced_lons)) for replaced_lats}
merge_dicts(__, __) for dict(replaced_lats.items()
# -
{k_lat_lon: {'Lat': replaced_lats[k_lat_lon]['Lat'], 'Lon': replaced_lons[k_lat_lon]['Lon']} for k_lat_lon in replaced_lats.key() }
'''
{'frame8359': {'Lat': None, 'Lon': 85.89155555555556},
'frame6845': {'Lat': None, 'Lon': 84.04952777777778},
'frame7862': {'Lat': None, 'Lon': 85.25738888888888},
'frame32668': {'Lat': None, 'Lon': 115.90894444444444},
'frame467': {'Lat': None, 'Lon': 77.46622222222223},
'frame36963': {'Lat': None, 'Lon': None},
'frame24656': {'Lat': None, 'Lon': 107.78122222222223},
'frame6392': {'Lat': None, 'Lon': 83.43738888888889},
'frame9776': {'Lat': None, 'Lon': 87.869},
'frame5612': {'Lat': None, 'Lon': 82.36247222222222}}
'''
# +
a_test = {'a':'foo','b':'bar','c':'baz'}
b_test = {'a':'spam','c':'ham','x':'blah'}
r_test = dict(a_test.items() + b_test.items())
# -
| GPS_Text_Recognition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import pickle
import yaml
from scipy.spatial.distance import cdist
from shapely.ops import nearest_points
from shapely.geometry import Point
import geopandas as gpd
df_subways = pd.read_csv("../data/external/subways.csv", index_col=False)
df_subways = df_subways.round(2)
df_subways.shape
df_subways.head(5)
def extract_Points_df(df, lat_column, long_column, crs={'init', 'epsg:4326'}):
df_copy = df.copy()
geometry = [Point(xy) for xy in zip(df_copy[long_column], df_copy[lat_column])]
Points = gpd.GeoDataFrame(df_copy, crs=crs, geometry=geometry)
return Points
gdf_subways = extract_Points_df(df_subways, lat_column="o_lat", long_column="o_long")
gdf_subways.sample(5)
df_queries_test = pd.read_pickle('../data/processed/df_test.pickle')
gdf_queries_test = extract_Points_df(df_queries_test, lat_column="o_lat", long_column="o_long")
gdf_queries_test.sample(5)
pts3 = gdf_subways.geometry.unary_union
def near(point, pts=pts3):
# find the nearest point and return the corresponding Place value
nearest = gdf_subways.geometry == nearest_points(point, pts)[1]
#return gdf_subways[nearest].subway_name.get_values()[0]
return "%.4f" % (gdf_subways[nearest].geometry.get_values()[0].distance(point)*10.0)
test = gdf_queries_test.sample(100)
test['dist_nearest_sub'] = test.apply(lambda row: near(row.geometry), axis=1)
test.sample(5)
test.describe()
dftest = df_queries_test.sample(500)
def add_dist_nearest_subway(dataf):
'''
Creates 1 new column with the distance to the nearest subway station (from subways.csv)
'''
def extract_Points_df(df, lat_column, long_column, crs={'init', 'epsg:4326'}):
df_copy = df.copy()
geometry = [Point(xy) for xy in zip(df_copy[long_column], df_copy[lat_column])]
Points = gpd.GeoDataFrame(df_copy, crs=crs, geometry=geometry)
return Points
df_subways = pd.read_csv("../data/external/subways.csv", index_col=False).round(2)
if 'o_lat' not in dataf or 'o_long' not in dataf:
logger.error("The dataframe doesn't have the coordinates in the correct format. They need to be 'o_lat' and 'o_long'.")
gdf_subways = extract_Points_df(df_subways, lat_column="o_lat", long_column="o_long")
gdf_dataf = extract_Points_df(dataf, lat_column="o_lat", long_column="o_long")
pts3 = gdf_subways.geometry.unary_union
# https://gis.stackexchange.com/questions/222315/geopandas-find-nearest-point-in-other-dataframe
def near(point, pts=pts3):
# find the nearest point and return the corresponding Place value
nearest = gdf_subways.geometry == nearest_points(point, pts)[1]
return "%.3f" % (gdf_subways[nearest].geometry.get_values()[0].distance(point)*10.0)
gdf_dataf['dist_nearest_sub'] = gdf_dataf.apply(lambda row: near(row.geometry, pts3), axis=1)
gdf_dataf = gdf_dataf.drop('geometry', 1)
return gdf_dataf
output = add_dist_nearest_subway(dftest)
output.sample(10)
| notebooks/3_AB_subways.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Encoders: Categorical input and Categorical output Example
#
# Encoders can use Categorical input. I.e. `FeatureIndex` instances.
#
# The previous examples all used binary output, hence used BinaryCrossEntropy loss. That can be thought of as a __multi label, binary-class__ problem.
#
# From a loss perspective we can also turn this into a __multi-label, multi-class__ problem, where we see each categorical field as a different label to predict. And then use regular CrossEntropy (LogMax + Negative-Log-Likelihood) over all the labels. Intuitively we might assume this might have an equalizing effect on the loss. The loss might be less dependent on the cardinality of the feature.
#
# __Illustration of a multi-class problem__; Image classification is a typical multi-class problem where the networks gets an image and predicts the class it thinks the image is, for instance 1 for a person, 2 for a car, 3 for a plane etc... Standard image classification is *single*-label, we will have a *multi*-label, so here we are sort of doing a multi image classifier, with multiple images being predicted in one go. Each of our categorical features can be thought of as an image.
# ----
# #### Note on the data set
# The data set used here is not particularly complex and/or big. It's not really all that challenging to find the fraud. In an ideal world we'd be using more complex data sets to show the real power of Deep Learning. There are a bunch of PCA'ed data sets available, but the PCA obfuscates some of the elements that are useful.
# *These examples are meant to show the possibilities, it's not so useful to interpret their performance on this data set*
# ## Imports
# +
import torch
import numpy as np
import gc
import d373c7.features as ft
import d373c7.engines as en
import d373c7.pytorch as pt
import d373c7.pytorch.models as pm
import d373c7.plot as pl
# -
# ## Set a random seed for Numpy and Torch
# > Will make sure we always sample in the same way. Makes it easier to compare results. At some point it should been removed to test the model stability.
# Numpy
np.random.seed(42)
# Torch
torch.manual_seed(42)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# ## Define feature and read the File
# The base features are features found in the input file. They need to be defined after which the file can be read using the `EnginePandasNumpy`. Using the `from_csv` method.
# The `from_csv` method will read the file and return a Pandas DataFrame object
# Change this to read from another location
file = '../../../data/bs140513_032310.csv'
# +
age = ft.FeatureSource('age', ft.FEATURE_TYPE_CATEGORICAL)
gender = ft.FeatureSource('gender', ft.FEATURE_TYPE_CATEGORICAL)
merchant = ft.FeatureSource('merchant', ft.FEATURE_TYPE_CATEGORICAL)
category = ft.FeatureSource('category', ft.FEATURE_TYPE_CATEGORICAL)
amount = ft.FeatureSource('amount', ft.FEATURE_TYPE_FLOAT)
fraud = ft.FeatureSource('fraud', ft.FEATURE_TYPE_INT_8)
age_i = ft.FeatureIndex('age_index', ft.FEATURE_TYPE_INT_8, age)
gender_i = ft.FeatureIndex('gender_index', ft.FEATURE_TYPE_INT_8, gender)
merchant_i = ft.FeatureIndex('merchant_index', ft.FEATURE_TYPE_INT_16, merchant)
category_i = ft.FeatureIndex('category_index', ft.FEATURE_TYPE_INT_16, category)
amount_binned = ft.FeatureBin('amount_bin', ft.FEATURE_TYPE_INT_16, amount, 30)
fraud_label = ft.FeatureLabelBinary('fraud_label', ft.FEATURE_TYPE_INT_8, fraud)
learning_features = ft.TensorDefinition(
'learning',
[
age_i,
gender_i,
merchant_i,
category_i,
amount_binned
])
label = ft.TensorDefinition('label', [fraud_label])
model_features = ft.TensorDefinitionMulti([learning_features, label])
with en.EnginePandasNumpy() as e:
ft_df = e.from_csv(learning_features, file, inference=False)
lb_df = e.from_csv(label, file, inference=False)
ft_np = e.to_numpy_list(learning_features, ft_df)
lb_np = e.to_numpy_list(label, lb_df)
data_list = en.NumpyList(ft_np.lists + lb_np.lists)
print(data_list.shapes)
print(data_list.dtype_names)
# -
# ## Wrangle the data
# Time split the data. For time series data it is very important to keep the order of the data. This split will start from the end and work it's way to the front of the data. This way the training, validation and test data are nicely colocated in time
#
# > 1. Split out a test-set of size `test_records`. This is used for model testing.
# > 2. Split out a validation-set of size `validation_records`. It will be used to monitor overfitting during training
# > 3. All the rest is considered training data.
#
# __Important__. For auto-encoders we perform a 4th step, all fraud records will be removed from the training and validation data. The auto-encoder will only see *non-fraud* records during training.
# > 4. Remove fraud from training and validation
# +
test_records = 100000
val_records = 30000
train_data, val_data, test_data = data_list.split_time(val_records, test_records)
# Filter. Only keep non-fraud records with label 0.
train_data = train_data.filter_label(model_features, 0)
val_data = val_data.filter_label(model_features, 0)
print(f'Training Data shapes {train_data.shapes}')
print(f'Validation Data shapes {val_data.shapes}')
print(f'Test Data shapes {test_data.shapes}')
del data_list, ft_df, lb_df, ft_np, lb_np
gc.collect()
print('Done')
# -
# ## Set up Devices
device, cpu = pt.init_devices()
# ## Define model
#
# > Define a __GeneratedAutoEncoder__. As input it takes the size of the latent dimension. In this case *3*. And it takes a list of integers indicating the number and the size of the hidden dimensions. *We are defining it to have 1 hidden layer of size 16*.
#
# > From an input perspective this model has an embedding layer with one embedding per categorical feature.
#
# > From an output perspective it's different. It uses a `CategoricalLogSoftMax1d` layer. This layer does not turn the hidden layer into a single long vector of *Binary* features, but prepares the output for NNL Loss.
#
# > This model was defined in exactly the same manner as the 1st example in this single/encoders section. Because the __GeneratedAutoEncoder__ saw that the input was categorical and it matched the output to also predict multiple categorical outputs.
# +
# Setup Pytorch Datasets for the training and validation
batch_size = 128
train_ds = pt.NumpyListDataSetMulti(model_features, train_data)
val_ds = pt.NumpyListDataSetMulti(model_features, val_data)
# Wrap them in a Pytorch Dataloader
train_dl = train_ds.data_loader(cpu, batch_size, num_workers=2, shuffle=True)
val_dl = val_ds.data_loader(cpu, batch_size, num_workers=2)
# Create a model
m = pm.GeneratedAutoEncoder(model_features, linear_layers=[16], latent_features=3)
print(m)
# -
# Graphically this model more or less looks like below
#
# > Much like the previous example we compress to 3 latent features in the encoder
#
# > The latent features are decoded to 16 layers. But then rather than converting this into a single 112 length /
# rank __1__ tensor, we now make it a 5 by 51 rank __2__ tensor. So its effectively and array rather than a vector in previous examples. This tensor contains the so called `logits`. So one score per class per label. The columns corresponds to the number of classes we predict (We took the maximum over the input features + 1, so 51). The row corresponds to the number of labels we predict (i.e. the number of categorical feature). We *LogSoftMax* over the columns and *NNL-Loss* over the rows to calculate the __CrossEntropyLoss__
#
# > This ouput layer has a lot of parameters. That impacts the 'capacity' of the model. Due to the output layer the model has considerably more parameters as compared to the previous models.
# 
# ### First find a decent Learning Rate.
# > Create a trainer and run the find_lr function and plot.
t = pt.Trainer(m, device, train_dl, val_dl)
r = t.find_lr(1e-4, 1e-1, 200)
pl.TrainPlot().plot_lr(r)
# ### Start Training and plot the results
# > We train for __5 epochs__ and __learning rate 8e-3__. We see the model behaves fairly well during training. The loss goes up slightly in the middle of the training. This is the one_cycle logic which is reaching the max learning rate.
t = pt.Trainer(m, device, train_dl, val_dl)
h = t.train_one_cycle(5, 8e-3)
pl.TrainPlot().plot_history(h, fig_size=(10,10))
# +
test_ds = pt.NumpyListDataSetMulti(model_features, test_data)
test_dl = test_ds.data_loader(cpu, 128, num_workers=2)
ts = pt.Tester(m, device, test_dl)
tp = pl.TestPlot()
r = ts.score_plot()
tp.plot_scores(r, 50, fig_size=(6,6))
tp.plot_score_metrics(r, 50, fig_size=(6,6))
tp.print_classification_report(r, threshold=1.20)
tp.plot_confusion_matrix(r, fig_size=(6,6), threshold=1.20)
tp.plot_roc_curve(r, fig_size=(6,6))
tp.plot_precision_recall_curve(r, fig_size=(6,6))
# -
# ## Plot Embeddings
# > Because we now created an embedding (Like in one of the classifier examples) we can visualize it. The plot_embedding method will take the `FeatureIndex` named __category_i__, PCA it and display the 3 principal components.
#
# > It should be clear that this embedding is much different that the classifier example. The *es_leisure* us not longer a massive outlier and the first component no longer accounts for a incredible amount of variance. That should not be a surprise, the task we are trying to solve here is very different.
#
# > It is also different from the embedding if we use Binary Output. There is much more variation in the first component. As mentioned earlier this is the better guess work, but there is a chance this is due to the fact there is more balance in the way the feature contribute to the loss. (It should depend less on their cardinality)
w = m.embedding_weight(category_i, True)
pl.LayerPlot().plot_embedding(category_i, w, dims=3, fig_size=(10,10))
# # Conclusion
# We demonstrated an other way to calculate the loss and used CrossEntropy (LogSoftMax + NegativeLogLikelihood) Loss. Due to the data-set we can't read too much into the result, but this model seems to have one of the wider score ranges. It was still learning after 5 Epochs, but due to the increased capacity as compared to the previous models, might start over-fitting on this data as we train longer.
| notebooks/01_single/02_encoders/03_CategoricalToCategorical.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.4 ('base')
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# Beta distribution, where alpha = 1, beta = 3
plt.figure(figsize=(8,8))
data = np.random.beta(1,3,5000)
sns.distplot(data);
plt.show()
# Use Box-Cox transformation to transform it into as close to a normal distribution
from scipy.stats import boxcox
tdata = boxcox(data)[0]
plt.figure(figsize=(8,8))
sns.distplot(tdata)
plt.show()
| Box-Cox_Trans/Box-Cox_sample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Create Initial Island Topography
#
# *(Version GMD210606. Created by <NAME>, University of Colorado Boulder.)*
#
# Creates initial topography for a hypothetical oceanic plateau, roughly circular in outline, surrounded by deep water. Used as an initial condition for an example simulation illustrated in a manuscript submitted by Tucker et al. to Geoscientific Modeling Development, summer 2021.
from landlab import HexModelGrid, imshow_grid
from landlab.ca.hex_cts import HexCTS
from landlab.ca.celllab_cts import Transition
import numpy as np
# ## Initialize
#
# ### Set parameters
#
# Use these parameters to control the size and topographic properties of the island:
nrows = 257 # number of rows in grid
ncols = 257 # number of columns
dx = 1000.0 # spacing between grid points, m
fill_proportion = 0.5 # proportion of grid cells that are part of the initial island/plateau
mean_ocean_elev = -2500.0 # average mean elevation (-depth) of sea floor, m
stdev_elev = 400.0 # standard deviation of random noise applied to elevations (pre-smoothing), m
shield_slope = 0.01 # average slope of island topography
smoothing_length = 2000.0 # length scale for diffusive smoothing, m
# ### Define the PlateauGenerator class
#
# The `PlateauGenerator` is a simple cellular automaton model derived from Landlab's `HexCTS` continuous-time stochastic cellular modeling class, which uses a hexagonal grid. The idea is to use an Eden growth algorithm to create a quasi-circular island. The algorithm starts with a binary hex grid in which all nodes are initially zero ("out") except the middle node, which has the value 1 ("in"). Wherever an "in" node is adjacent to an "out" node (representing a point on the perimeter of the island), there is a fixed probably per unit time that the "out" node will be added to the island (by converting it to an "in") node. With the CTS framework, this is easy to accomplish: just define two transition types, one transitioning from a node pair with (0, 1) to (1, 1), and the other transitioninig from a node pair with (1, 0) to (1, 1). Both transitions are given a unit rate.
#
# The algorithm proceeds until a user-specified target fill proportion (i.e., the proportion of nodes in the grid that are in the "in" state) has been reached or slightly exceeded.
#
# For information about the CTS framework, see [Tucker et al. (2016)](https://doi.org/10.5194/gmd-9-823-2016), and the [online documentation](https://landlab.readthedocs.io/en/latest/user_guide/cell_lab_user_guide.html).
class PlateauGenerator(HexCTS):
def __init__(
self, nrows=11, ncols=11, dx=1.0, target_fill_proportion=0.5, dt=1.0, seed=0
):
self.create_grid(nrows, ncols, dx)
self.node_state = self.grid.at_node["node_state"]
middle_node = self.grid.number_of_nodes // 2
self.node_state[middle_node] = 1
self.target_fill_proportion = target_fill_proportion
self.dt = dt
node_state_dict = {0: "out", 1: "in"}
transition_list = [
Transition((0, 1, 0), (1, 1, 0), 1.0),
Transition((1, 0, 0), (1, 1, 0), 1.0),
]
super().__init__(
self.grid, node_state_dict, transition_list, self.node_state, seed=seed
)
def create_grid(self, nrows, ncols, dx):
"""Make the grid."""
self.grid = HexModelGrid((nrows, ncols), dx, node_layout="rect")
self.grid.add_zeros("node_state", at="node", dtype=np.int)
def proportion_filled(self):
return np.count_nonzero(self.node_state) / self.grid.number_of_core_nodes
def go(self):
current_time = 0.0
prop_filled = self.proportion_filled()
while prop_filled < self.target_fill_proportion:
self.run(run_to=current_time + self.dt)
prop_filled = self.proportion_filled()
print(str(np.round(100 * prop_filled)) + "% filled")
current_time += self.dt
# ## Run
#
# ### Create a binary map of island versus sea floor
#
# Run the generator to create a binary map of the island.
pg = PlateauGenerator(nrows=nrows, ncols=ncols, dx=dx, target_fill_proportion=fill_proportion)
pg.go()
imshow_grid(pg.grid, 'node_state')
# ### Assign elevations with some random noise
#
# Here we give the island some elevation. The island nodes gets a mean elevation of zero, and the sea floor gets a mean elevation specified above in the parameter `mean_ocean_elev`. To each grid node (except boundaries) we then add a normally distributed random variable with a mean of zero and a standard deviation of `stdev_elev`.
grid = pg.grid
z = grid.add_zeros('topographic__elevation', at='node', clobber=True)
ns = grid.at_node['node_state']
z[ns==1] = stdev_elev * np.random.standard_normal(len(z[ns==1]))
z[ns==0] = mean_ocean_elev + stdev_elev * np.random.standard_normal(len(z[ns==0]))
z[grid.boundary_nodes] = mean_ocean_elev # keep the boundary nodes at the mean sea floor elevation
imshow_grid(grid, z)
imshow_grid(grid, z, vmin=-100) # highlighting the island
imshow_grid(grid, z, vmax=mean_ocean_elev+2*stdev_elev) # highlighting the sea floor
# ### Smooth the noisy topography using diffusion
#
# To reduce the "spikiness" of the initial terrain, we'll smooth it using diffusion. Here we'll aim for a length scale of filtering of $\sigma$, which we achieve as follows:
#
# $\sigma = \sqrt{DT}$, where $D=1$ is diffusivity, and $T$ is duration of diffusive smoothing. Example: if we want 5 km of smoothing, then $T = 2.5e7$.
#
# We accomplish this using Landlab's `LinearDiffuser` component.
from landlab.components import LinearDiffuser
# +
duration = smoothing_length ** 2
dt = 0.1 * dx * dx
print('Duration = ' + str(duration) + ', dt = ' + str(dt))
ld = LinearDiffuser(grid, linear_diffusivity=1.0)
# -
num_steps = int(duration / dt)
print(str(num_steps) + ' steps')
for i in range(num_steps):
ld.run_one_step(dt)
imshow_grid(grid, z) # show smoothed topography
imshow_grid(grid, z, vmin=-100.0, vmax=100.0) # highlighting the island topography
# highlighting the sea floor
imshow_grid(grid, z, vmin=mean_ocean_elev-stdev_elev, vmax=mean_ocean_elev+stdev_elev)
# ### Modify the topography to give the island a shield-like shape
#
# We now take our smoothed-random topography and give it a shield-like shape. To do this, the `shieldify` function adds a user-specified height to the center-point of the island, and that added height declines linearly with distance from the center point. (This also has the side effect of making the sea floor also slope away from the island, which we might think of as representing the slope of a thermal swell.)
# Function to modify the initial topography
# to make it vaguely shield-shaped
def shieldify(grid, topo, slope, max_add_topo):
x = grid.x_of_node
y = grid.y_of_node
ctr_x = 0.5 * np.amax(x)
ctr_y = 0.5 * np.amax(y)
dist = np.sqrt((x - ctr_x)**2 + (y - ctr_y)**2)
topo[:] += max_add_topo - slope * dist
island_area = fill_proportion * np.amax(grid.x_of_node) * np.amax(grid.y_of_node)
island_radius = (island_area / np.pi) ** 0.5
max_ht = shield_slope * island_radius
shieldify(grid, z, shield_slope, max_ht)
# ## Finalize
#
# We display the final result, and save it to a file.
imshow_grid(grid, z)
from landlab.io.native_landlab import save_grid
save_grid(grid, 'initial_island.grid', clobber=True)
# Now we're ready to import this grid as an initial condition for a model of landscape/seascape evolution.
| create_island_topography.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Electrostatic force between the charges
# ### The Coulomb's force between two charges $q_1$ and $q_2$ separated at distance $r$ is given as;
# #### $ F = k \frac{q_1q_2}{r^2}$, where $ k = 9\times 10^9 Nm^2C^{-2}$
# - Write a code for function to calculate the Coulomb's force between two given charge at given distance
# - use list to calculate the variation of Coulomb's force between two charges (as you like) placed at a range of distance (choose range in your favour)
# - Make a plot that shows the variation of Coulomb's force with distance using list
# ### A code for function to calculate the Coulomb's force between two given charge at given distance.
def coulomb(q1,q2,d):
return((9*(10**9))*q1*q2)/d**2
coulomb(10,6,3) #Here, q1=10c,q2=6c,d=3
# ### Using list to calculate the variation of Coulomb's force between two charges placed at a range of distance.
#
# $ F = k \frac{q_1q_2}{r^2}$, where $ k = 9\times 10^9 Nm^2C^{-2}$
def coulomb(d):
q1=10
q2=6
F=((9*(10**9))*q1*q2)/d**2
return F
X=[] #empty list
Y=[]
for d in range (1,6):
X.append(d)
F1=coulomb(d)
Y.append(F1)
print(X,Y)
# ### A plot that shows the variation of Coulomb's force with distance using list.
import matplotlib.pyplot as plt #plt is arbitrary
# %matplotlib inline
plt.plot(X,Y,color='y',lw = 2)
plt.xlabel('r',fontsize=15)
plt.ylabel('F',fontsize=20)
plt.xticks(fontsize=20)
plt.title('Coulomb Force',fontsize=30)
plt.savefig('pyplot.jpg')
| Aakash_Home_work ..ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# +
#(a) 수리시간(Minutes) 와 부품의 수(Units) 를 관계시키는 선형 회귀 모형을 적합
setwd('D:/Working/03.Korea/회귀분석/report/Chapter4')
# 2.3 절 데이터
p031_data <- read.table("P031.txt", header = TRUE)
p031reg=lm(Minutes ~ Units, p031_data)
# 문제 4.3 추가 된 데이터
p124_data <- read.table("P124.txt", header = TRUE)
p124reg=lm(Minutes ~ Units, p124_data)
summary(p124reg)
# +
library(ggplot2)
library(gridExtra)
ggplotReg <- function (fit, title, type) {
p <- ggplot(fit$model, aes_string(x = names(fit$model)[2], y = names(fit$model)[1])) +
geom_point() +
expand_limits(x = 0, y = 0) +
ggtitle(title) + # Title 이름 설정 후
theme(plot.title = element_text(size = 10,
face = "bold",
color = "blue",
hjust = 0.5))# 원하는 테마 입히기.
if( type == 0 ) {
p <- p +
geom_abline(intercept = fit$coefficients[1],
slope = fit$coefficient[2],
color = 'red', size = 1)
} else {
p <- p +
geom_smooth(method = "loess", fill="magenta1") +
geom_smooth(method = "lm", col = "red", fill = "lawngreen")
}
p +
labs(subtitle = sprintf("Intercept [%f], slope [%f]",
fit$coefficients[1],
fit$coefficients[2]))
}
p1 <- ggplotReg(p031reg, '2.3 절 사례', 0)
p2 <- ggplotReg(p031reg, '2.3 절 사례', 1)
p3 <- ggplotReg(p124reg, '4.3 연습문제', 0)
p4 <- ggplotReg(p124reg, '4.3 연습문제', 1)
grid.arrange(p1, p2, p3, p4, ncol=2, nrow=2)
# -
# https://abluesnake.tistory.com/3
par(mfrow = c(2,2))
plot(p124reg, pch = 16, lwd = 2)
# 2.3 절 데이터 분석.
par(mfrow = c(2,2))
plot(p031reg, pch = 16, lwd = 2)
| 1st semester/01.LinearRegression/Chapter4/(HW)4_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import cv2
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
import pandas as pd
import os
import sys
import tensorflow as tf
proj_home_path="C:/vrsk.psk.family/Selva/BitsPilani/azure/semester4/semester4/SuperPoint"
sys.path.append(proj_home_path)
from superpoint.settings import EXPER_PATH
from utils import plot_imgs
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# -
excel_path_with_file = "C:\\vrsk.psk.family\\Selva\\BitsPilani\\shopee-product-matching\\train.csv"
patch_path = "C:\\vrsk.psk.family\\Selva\\BitsPilani\\azure\\semester4\\semester4\\data\\PM\\patches"
output_path = "C:\\vrsk.psk.family\\Selva\\BitsPilani\\azure\\semester4\\semester4\\data\\PM\\test_list.csv"
npz_path = "C:\\vrsk.psk.family\\Selva\\BitsPilani\\azure\\semester4\\semester4\\data\\PM\\test_list.npz"
df = pd.read_csv(excel_path_with_file)
df.head()
# +
def findMatch(grp, ifiles_list):
matches = []
data_df = grp[1]
for img in data_df.image:
if img in ifiles_list:
matches.append(img)
return list(set(matches))
# Sort data by label_group and select group by images
agg_match_list = []
ifiles_list = [str(f)+'.jpg' for f in os.listdir(patch_path)]
# print(ifiles_list)
grps = df.groupby('label_group')
for grp in grps:
# print('grp: ', grp)
match_list = findMatch(grp,ifiles_list)
if match_list != None and len(match_list) >= 2:
print('match: ', match_list)
if agg_match_list == None:
agg_match_list = [match_list]
else:
agg_match_list.append(match_list)
# -
f_list = np.concatenate(agg_match_list)
sorted_list = agg_match_list.sort(key = lambda x:len(x))
print(sorted_list,agg_match_list)
# +
pick_list = []
# Pick only matches > 2
for i in agg_match_list:
if len(i) == 3 or len(i) == 5:
pick_list.append(i)
final_list = np.concatenate(pick_list)
print(len(final_list))
print(final_list)
# -
df = pd.DataFrame({'image': final_list})
df.to_csv(output_path, index=False)
np.savez(npz_path, image = pick_list)
df = pd.read_csv(output_path)
filter_list_names = list(df.image)
print(filter_list_names)
| notebooks/.ipynb_checkpoints/Pm_Identify_Dataset_For_Matching-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
# %matplotlib inline
import pylab as plt
plt.style.use('fivethirtyeight')
# -
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report
# # Read data
data = pd.read_csv('./data/EF_battles_corrected.csv', parse_dates=['start', 'end'])
data['end_num'] = (data['end'].dt.year -1938) * 12 + data['end'].dt.month
data['start_num'] = (data['start'].dt.year -1938) * 12 + data['start'].dt.month
data['duration'] = (data['end_num'] - data['start_num']).clip(lower=1)
cols = [
'allies_infantry', 'axis_infantry',
'allies_tanks', 'axis_tanks',
'allies_guns', 'axis_guns'
]
data['result_num'] = data['result'].map({'axis':-1, 'allies':1}).fillna(0) # 0 for tie
data['result_num'].value_counts()
from sklearn.model_selection import train_test_split
mask = data[cols].isnull().any(1)
X = data.loc[~mask, cols]
y = data.loc[~mask, 'result_num']
Xtrain, Xtest, ytrain, ytest = train_test_split(X,
y,
test_size=0.2, random_state=2018)
# +
from sklearn.preprocessing import StandardScaler
s = StandardScaler()
Xtrains = s.fit_transform(Xtrain)
Xtests = s.transform(Xtest) # used mean and std from Xtrain
# -
# # 2. Supervised Learning
# ### KNN
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=5) # again, arbitrary number
model.fit(Xtrains, ytrain)
ypred1 = model.predict(Xtest)
accuracy_score(ytest, ypred1)
ytrain
# ### Showing "Neighbors" - similar records
Xtest.head(1)
Xtrain.iloc[model.kneighbors(Xtest.head(1))[1][0]]
# # Linear Regression
from sklearn.linear_model import LinearRegression
from sklearn.metrics import median_absolute_error
cols = [
'allies_infantry', 'axis_infantry',
'allies_tanks', 'axis_tanks',
'allies_guns', 'axis_guns',
'start_num'
]
mask = data[cols + ['allies killed']].isnull().any(1)
y = data.loc[~mask, 'allies killed']
X = data.loc[~mask, cols]
Xtrain, Xtest, ytrain, ytest = train_test_split(X,
y,
test_size=0.3, random_state=2019)
model = LinearRegression()
model.fit(Xtrain, ytrain)
ypred = model.predict(Xtest)
median_absolute_error(ytest, ypred)
(ypred - ytest)
pd.Series(model.coef_, index=X.columns)
# # Decision tree
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from io import StringIO
from IPython.display import Image
import pydotplus
cols = [
'allies_infantry', 'axis_infantry',
'allies_tanks', 'axis_tanks',
'allies_guns', 'axis_guns'
]
mask = data[cols].isnull().any(1)
X = data.loc[~mask, cols]
y = data.loc[~mask, 'result_num']
Xtrain, Xtest, ytrain, ytest = train_test_split(X,
y,
test_size=0.2, random_state=2019)
tree_model = DecisionTreeRegressor(random_state=2019)
tree_model.fit(Xtrain, ytrain)
accuracy_score(ytest, tree_model.predict(Xtest))
tree_model.predict(Xtest)
# ## Visualize the graph
# +
dot_data = StringIO()
export_graphviz(tree_model, out_file=dot_data,
filled=True, rounded=True,
special_characters=True, feature_names=cols)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
# -
| Chapter13/2_supervised_ml.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Selected Economic Characteristics: Poverty from the American Community Survey
#
# **[Work in progress]**
#
# This notebook downloads [selected economic characteristics (DP03)](https://data.census.gov/cedsci/table?tid=ACSDP5Y2018.DP03) from the American Community Survey 2018 5-Year Data.
#
# Data source: [American Community Survey 5-Year Data 2018](https://www.census.gov/data/developers/data-sets/acs-5year.html)
#
# Authors: <NAME> (<EMAIL>), <NAME> (<EMAIL>)
import os
import pandas as pd
from pathlib import Path
import time
pd.options.display.max_rows = None # display all rows
pd.options.display.max_columns = None # display all columsns
NEO4J_IMPORT = Path(os.getenv('NEO4J_IMPORT'))
print(NEO4J_IMPORT)
# ## Download selected variables
#
# * [Selected economic characteristics for US](https://data.census.gov/cedsci/table?tid=ACSDP5Y2018.DP03)
#
# * [List of variables as HTML](https://api.census.gov/data/2018/acs/acs5/profile/groups/DP03.html) or [JSON](https://api.census.gov/data/2018/acs/acs5/profile/groups/DP03/)
#
# * [Description of variables](https://www2.census.gov/programs-surveys/acs/tech_docs/subject_definitions/2018_ACSSubjectDefinitions.pdf)
#
# * [Example URLs for API](https://api.census.gov/data/2018/acs/acs5/profile/examples.html)
# ### Specify variables from DP03 group and assign property names
#
# Names must follow the [Neo4j property naming conventions](https://neo4j.com/docs/getting-started/current/graphdb-concepts/#graphdb-naming-rules-and-recommendations).
variables = {# PERCENTAGE OF FAMILIES AND PEOPLE WHOSE INCOME IN THE PAST 12 MONTHS IS BELOW THE POVERTY LEVEL
'DP03_0119PE': 'povertyAllFamiliesPct',
'DP03_0128PE': 'povertyAllPeoplePct'
}
fields = ",".join(variables.keys())
for v in variables.values():
if 'Pct' in v:
print('p.' + v + ' = toFloat(row.' + v + '),')
else:
print('p.' + v + ' = toInteger(row.' + v + '),')
print(len(variables.keys()))
# ## Download county-level data using US Census API
url_county = f'https://api.census.gov/data/2018/acs/acs5/profile?get={fields}&for=county:*'
df = pd.read_json(url_county, dtype='str')
df.fillna('', inplace=True)
df.head()
# ##### Add column names
df = df[1:].copy() # skip first row of labels
columns = list(variables.values())
columns.append('stateFips')
columns.append('countyFips')
df.columns = columns
# Remove Puerto Rico (stateFips = 72) to limit data to US States
#
# TODO handle data for Puerto Rico (GeoNames represents Puerto Rico as a country)
df.query("stateFips != '72'", inplace=True)
# Save list of state fips (required later to get tract data by state)
stateFips = list(df['stateFips'].unique())
stateFips.sort()
print(stateFips)
df.head()
# Example data
df[(df['stateFips'] == '06') & (df['countyFips'] == '073')]
df['source'] = 'American Community Survey 5 year'
df['aggregationLevel'] = 'Admin2'
# ### Save data
df.to_csv(NEO4J_IMPORT / "03a-USCensusDP03PovertyAdmin2.csv", index=False)
# ## Download zip-level data using US Census API
url_zip = f'https://api.census.gov/data/2018/acs/acs5/profile?get={fields}&for=zip%20code%20tabulation%20area:*'
df = pd.read_json(url_zip, dtype='str')
df.fillna('', inplace=True)
df.head()
# ##### Add column names
df = df[1:].copy() # skip first row
columns = list(variables.values())
columns.append('postalCode')
df.columns = columns
df.head()
# Example data
df.query("postalCode == '90210'")
df['source'] = 'American Community Survey 5 year'
df['aggregationLevel'] = 'PostalCode'
# ### Save data
df.to_csv(NEO4J_IMPORT / "03a-USCensusDP03PovertyZip.csv", index=False)
# ## Download tract-level data using US Census API
# Tract-level data are only available by state, so we need to loop over all states.
def get_tract_data(state):
url_tract = f'https://api.census.gov/data/2018/acs/acs5/profile?get={fields}&for=tract:*&in=state:{state}'
df = pd.read_json(url_tract, dtype='str')
time.sleep(1)
# skip first row of labels
df = df[1:].copy()
# Add column names
columns = list(variables.values())
columns.append('stateFips')
columns.append('countyFips')
columns.append('tract')
df.columns = columns
return df
df = pd.concat((get_tract_data(state) for state in stateFips))
df.fillna('', inplace=True)
df['tract'] = df['stateFips'] + df['countyFips'] + df['tract']
df['source'] = 'American Community Survey 5 year'
df['aggregationLevel'] = 'Tract'
# Example data for San Diego County
df[(df['stateFips'] == '06') & (df['countyFips'] == '073')].head()
# ### Save data
df.to_csv(NEO4J_IMPORT / "03a-USCensusDP03PovertyTract.csv", index=False)
df.shape
| notebooks/dataprep/03a-USCensusDP03Poverty.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## N Ways to GPU Programming
#
# ## Learning Objectives
# With the release of NVIDIA CUDA in 2007, different approaches to GPU programming have evolved. Each approach has its own advantages and disadvantages. By the end of this bootcamp session, participants will have a broader perspective on GPU programming approaches to help them select a programming model that better fits their application's needs and constraints. The bootcamp will teach how to accelerate a real-world scientific application using the following methods:
# * Standard: C++ stdpar, Fortran Do-Concurrent
# * Directives: OpenACC, OpenMP
# * Frameworks: Kokkos
# * Programming Language Extension: CUDA C, CUDA Fortran, Python CuPy, Python Numba
#
# Let's start by testing the CUDA Driver and GPU you are running the code on in this lab:
# !nvidia-smi
# ### Bootcamp Outline
#
# During this lab, we will be working on porting mini applications in Molecular Simulation (MD) domain to GPUs. You can choose to work with either version of this application. Please click on one of the below links to start N Ways to GPU Programming in **MD** for:
#
# - [C and Fortran](nways_MD/English/nways_MD_start.ipynb) domain
# - [Python](nways_MD/English/nways_MD_start_python.ipynb) domain
# ### Bootcamp Duration
# The lab material will be presented in an 8-hour session. A Link to the material is available for download at the end of the lab.
#
# ### Content Level
# Beginner, Intermediate
#
# ### Target Audience and Prerequisites
# The target audience for this lab are researchers/graduate students and developers who are interested in learning about various ways of GPU programming to accelerate their scientific applications.
#
# Basic experience with C/C++ or Python or Fortran programming is needed. No GPU programming knowledge is required.
#
# ---
#
# ## Licensing
#
# This material is released by OpenACC-Standard.org, in collaboration with NVIDIA Corporation, under the Creative Commons Attribution 4.0 International (CC BY 4.0).
| hpc/nways/nways_labs/nways_start.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: ipykernel_py2
# ---
# ## Variables
# *Suggested Answers follow (usually there are multiple ways to solve a problem in Python).*
# Create a variable with name "x" and a value of 10. Execute.
x = 10
# Tell the computer to show you the value of that variable.
x
# Can you think of a second way to obtain the same result?
print x
# On the same line, create four new variables: a,b,c, and d, that are equal to 10, 20, 30, and 40, respectively.
a,b,c,d = (10,20,30,40)
# Alternatively:
a,b,c,d = 10,20,30,40
# Tell the computer to show you the value corresponding to the variable "b".
b
# Alternatively:
print b
# Do the same for "d".
d
# Or:
print d
| 11 - Introduction to Python/2_Python Variables and Data Types/1_Variables (3:41)/Variables - Solution_Py2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/JohanvandenHeuvel/implementations/blob/main/CentralLimitTheorem.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="HbPk1-bGXmuB"
import numpy as np
import matplotlib.pyplot as plt
# + colab={"base_uri": "https://localhost:8080/", "height": 313} id="dBXCb81xXs2A" outputId="ff9a9e4b-26f4-4bb6-b826-c0269740cf7d"
sample_means = []
for i in range(10000):
sample_means.append(np.mean((np.random.uniform(size=2))))
plt.hist(sample_means, bins = 20, density=True)
plt.xlabel('x')
plt.ylabel('Density')
plt.title('Empirical Density')
# + colab={"base_uri": "https://localhost:8080/", "height": 313} id="HCR9_dV7X41d" outputId="eceab54e-0d02-4e39-ca61-a456da0f5bbb"
sample_means = []
for i in range(10000):
sample_means.append(np.mean((np.random.uniform(size=4))))
plt.hist(sample_means, bins = 20, density=True)
plt.xlabel('x')
plt.ylabel('Density')
plt.title('Empirical Density')
# + colab={"base_uri": "https://localhost:8080/", "height": 313} id="rKU1L3MbYIBw" outputId="9500c32a-087a-4032-dc79-0a9e781e3bd9"
sample_means = []
for i in range(10000):
sample_means.append(np.mean((np.random.uniform(size=8))))
plt.hist(sample_means, bins = 20, density=True)
plt.xlabel('x')
plt.ylabel('Density')
plt.title('Empirical Density')
# + colab={"base_uri": "https://localhost:8080/", "height": 313} id="bf8qBrcFYllm" outputId="03e410ed-86ea-4d99-ea4d-2fba693b06de"
sample_means = []
for i in range(10000):
sample_means.append(np.mean((np.random.uniform(size=16))))
plt.hist(sample_means, bins = 20, density=True)
plt.xlabel('x')
plt.ylabel('Density')
plt.title('Empirical Density')
# + id="NQnkFmKwYnmA"
| CentralLimitTheorem.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# (dimensionality)=
# # PyMC Dimensionality
# PyMC provides a number of ways to specify the dimensionality of its distributions. In this document we will not provide an exhaustive explanation but rather an overview and current best practices.
#
# ## Glossary
# In this document we'll be using the term dimensionality to refer to the idea of dimensions. Each of the terms below has a specific
# semantic and computational definition in PyMC. While we share them here they will make much more sense when viewed in the examples below.
#
# # + *Implied dimensions* → dimensionality that follows from inputs to the RV
# # + *Support dimensions* → dimensions you can NEVER get rid of
# # + *`ndim_support`* → smallest shape that can result from a random draw. This is a fixed attribute in the distribution definition
# # + *Shape* → final resulting tensor shape
# # + *Size* → shape minus the support dimensions
# # + *Dims* → An array of dimension names
# # + *Coords* → A dictionary mapping dimension names to coordinate values
#
#
# ## General Recommendations
# ### When prototyping implied and size are convenient
# Implied dimensions are easy to specify and great for quickly expanding an existing RV. F
#
# ### For reusable code we suggest dims
# For any more important work, or reuable work we suggest dims and coords as the labels will be passed to {class}'arviz.InferenceData'. This is both best practice transparency and readability for others. It also is useful in single developer workflows, for example, in cases where there is a 3 dimensional or higher RV it'll help indiciate which dimension corresponds to which model concept.
#
# ### Use shape if you'd like to be explicit
# Use shape if you'd like to bypass any dimensionality calculations implicit in PyMC. This will strictly specify the dimensionality to Aesara
#
# ### When debugging use unique prime numbers
# By using prime numbers it will be easier to determine where how input dimensionalities are being converted to output dimensionalities.
# Once confident with result then change the dimensionalities to match your data or modeling needs.
# -
# ## Code Examples
import pymc as pm
import numpy as np
# ## Scalar distribution example
# We can start with the simplest case, a single Normal distribution. We specify one as shown below
normal_dist = pm.Normal.dist()
# We can then take a random sample from that same distribution and print both the draw and shape
random_sample = normal_dist.eval()
random_sample, random_sample.shape
# In this case we end up with a single scalar value. This is consistent with the distributions `ndim_supp` as the smallest random draw dimension is a scalar which has a dimension of zero
pm.Normal.rv_op.ndim_supp
# ### Implied Example
# If we wanted three draws from differently centered Normals we instead could pass a vector to the parameters. When generating a random draw we would now expect a vector value, in this case a vector if size 3. This is a case of *implied dimensions*
random_sample = pm.Normal.dist(mu=[1,10,100], sigma=.0001).eval()
random_sample, random_sample.shape
# ### Shape and Size
# Alternatively we may just want three draws from identical distributions. In this case we could use either `shape` or `size` to specify this
random_sample = pm.Normal.dist(size=(3,)).eval()
random_sample, random_sample.shape
random_sample = pm.Normal.dist(shape=(3,)).eval()
random_sample, random_sample.shape
# ### Inspecting dimensionality with a model graph
# A powerful tool to understand and debug dimensionality in PyMC is the `pm.model_to_graphviz` functionality. Rather than inspecting array outputs we instead can read the Graphviz output to understand the dimensionality.
#
# In the example below the number on the bottom left of each box indicates the dimensionality of the Random Variable. With the scalar distribution it is implied to be one random draw of `ndim_support`
# +
with pm.Model() as pmodel:
pm.Normal("scalar") # shape=()
pm.Normal("vector (implied)", mu=[1,2,3])
pm.Normal("vector (from shape)", shape=(4,))
pm.Normal("vector (from size)", size=(5,))
pm.model_to_graphviz(pmodel)
# -
# ## Dims
# A new feature of PyMC is `dims` support. With many random variables it can become confusing which dimensionality corresponds to which "real world" idea, e.g. number of observations, number of treated units etc. The dims argument is an additional label to help.
# +
with pm.Model() as pmodel:
pm.Normal("red", size=2, dims="B")
pm.Normal("one", [1,2,3,4], dims="Dim_A") # (4,)
pm.Normal("two", dims="Dim_A")
pm.model_to_graphviz(pmodel)
# -
# Where dims can become increasingly powerful is with the use of `coords` specified in the model itself. With this it becomes easy to track. As an added bonus the coords and dims will also be present in the returned {class}'arviz.InferenceData' simplifying the entire workflow.
with pm.Model(coords={
"year": [2020, 2021, 2022],
}) as pmodel:
pm.Normal("Normal_RV", dims="year")
pm.model_to_graphviz(pmodel)
# ## Vector Distributions
# Some distributions by definition cannot return scalar values as random samples, but instead will return an array as their result. An example is the Multivariate Normal. The simplest possible return shape can be verified using `ndim_supp`. The value here indicates the smallest shape that can be returned is a vector
pm.MvNormal.rv_op.ndim_supp
# This can be verified with a random sample as well.
pm.MvNormal.dist(mu=[[1,2,3], [4,5,6]], cov=np.eye(3)*.0001).eval()
# Like scalar distributions we can also use all our dimensionality tools as well to specify a set of Multivariate normals
# +
with pm.Model(coords={
"year": [2020, 2021, 2022],
}) as pmodel:
mv = pm.MvNormal("implied", mu=[0, 0, 0], cov=np.eye(3))
print(mv.shape.eval())
# Multivariate RVs (ndim_supp > 0)
assert mv.ndim == 1
mv = pm.MvNormal("with size", mu=[0, 0], cov=np.eye(2), size=3, dims=("repeats", "implied"))
print(mv.shape.eval())
# ⚠ Size dims are always __prepended__
mv = pm.MvNormal("with shape", mu=[0, 0], cov=np.eye(2), shape=(3, ...), dims=("repeats", ...))
print(mv.shape.eval())
mv = pm.MvNormal("with coords", mu=[0, 0], cov=np.eye(2), dims=("year", ...))
print(mv.shape.eval())
pm.model_to_graphviz(pmodel)
# -
# ### User caution and practical tips
# While we provide all these tools for convenience, and while PyMC does it best to understand user intent, the result of mixed dimensionality tools may not always result in the final dimensionality intended. Sometimes the model may not indicate an error until sampling, or not indicate an issue at all. When working with dimensionality, particular more complex ones we suggest
#
# * Using GraphViz to visualize your model before sampling
# * Using the prior predictive to catch errors early
# * Inspecting the returned `az.InferenceData` object to ensure all array sizes are as intended
| docs/source/learn/core_notebooks/dimensionality.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# __Author__: <NAME>
#
# __Email__: <EMAIL>
#
# __Date__:3/4/2020
import cPickle as pickle
import numpy as np
import os
import matplotlib.pylab as plt
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['font.size']=15
matplotlib.rcParams['font.family']='Arial'
# #### Load data
data_folder = r'\data' #This is the folder containing the .tsv data files
save_data = r'\data'
# +
folder = data_folder
fid = open(folder+os.sep+r'genomic-scale-with transcription and nuclear bodies.tsv','r')
lines = np.array([ln[:-1].split('\t')for ln in fid if len(ln)>0])
zxy = np.array(lines[1:,:3][:],dtype=np.float)
zxy = zxy.reshape([-1,2082,3])/1000 #transform to um
head = list(lines[0])
experiment = np.array(lines[1::2082,head.index('experiment number')].astype(int))-3
experiment_ = np.array(lines[1::1041,head.index('experiment number')].astype(int))-3
# -
Transcr = []#np.array(['on' in e for e in lines[1:,8]])
for e in lines[1:,8]:
if 'on' in e:
Transcr.append(1)
elif (e=='') or (e=='nan'):
Transcr.append(np.nan)
else:
Transcr.append(0)
Transcr_WT = np.array(Transcr).reshape([-1,1041])
#Please see Part 3 how to calculate densities
#resWT = np.load(r'Data\densityIMR90Untreated.npy')
resWT = np.load(save_data+r'\densityIMR90Untreated.npy')
keep = slice(None)
AD = resWT[keep,0,0,:]
BD = resWT[keep,1,0,:]
Dratio_WT = AD/BD
Dratio_WT = Dratio_WT.reshape([-1,1041])[-len(Transcr_WT):]
good_WT = (~np.isnan(zxy[:,:,0])).reshape([-1,1041])
tot_distsNUC_WT_ = np.array(lines[1:,-2],dtype=np.float).reshape([-1,1041])/1000
tot_distsSC_WT_ = np.array(lines[1:,-1],dtype=np.float).reshape([-1,1041])/1000
tot_distsLAM_WT_ = np.array(lines[1:,-3],dtype=np.float).reshape([-1,1041])/1000
# ### 1. Calculate the nucleoli association rate
#distances are assumed in um
tot_dists = tot_distsNUC_WT_
asocs = 1.*np.sum(tot_dists<0.25,0)/np.sum(tot_dists>-1,0)
lens = [76, 80, 66, 63, 60, 55, 53, 48, 40, 43, 44, 44, 33, 30, 31, 30, 33, 33, 33, 33, 31, 31, 51]
edges = np.cumsum([0]+lens)
from matplotlib import cm
fig = plt.figure(figsize=(15,3))
colors = list(cm.tab20.colors)+list(cm.Dark2.colors)
for i in range(len(edges)-1):
plt.semilogy(np.arange(edges[i],edges[i+1]),asocs[edges[i]:edges[i+1]],'.-',color = colors[i])
# ### 2. Calculate the correlation between the lamina/speckle association rate and the A/B density ratio
# +
nuc_cutoff = 0.25
frLAM_WT = 1.*np.sum(tot_distsLAM_WT_<nuc_cutoff,0)/np.sum(good_WT,0)
frNUC_WT = 1.*np.sum(tot_distsNUC_WT_<nuc_cutoff,0)/np.sum(good_WT,0)
frSC_WT = 1.*np.sum(tot_distsSC_WT_<nuc_cutoff,0)/np.sum(good_WT,0)
dLAM_WT = np.nanmedian(tot_distsLAM_WT_,axis=0)
dSC_WT = np.nanmedian(tot_distsSC_WT_,axis=0)
dNUC_WT = np.nanmedian(tot_distsNUC_WT_,axis=0)
dRAT_WT = np.nanmedian(Dratio_WT,axis=0)
# -
fig=plt.figure(figsize=(5,5))
cor = np.round(np.corrcoef(np.log(frLAM_WT),np.log(dRAT_WT))[0,1],2)
plt.semilogy(frLAM_WT,dRAT_WT,'o',color='gray',markeredgecolor='k',label='r = '+str(cor))
plt.ylabel("A/B density ratio")
plt.xlabel("Lamina association rate")
plt.title('Cut-off distance: '+str(int(nuc_cutoff*1000))+' nm')
plt.legend()
fig=plt.figure(figsize=(5,5))
cor = np.round(np.corrcoef(np.log(frSC_WT),np.log(dRAT_WT))[0,1],2)
plt.loglog(frSC_WT,dRAT_WT,'o',color='orange',markeredgecolor='k',label='r = '+str(cor))
plt.ylabel("A/B density ratio")
plt.xlabel("Fraction of cells speckle-associated")
plt.title('Cut-off distance: '+str(int(nuc_cutoff*1000))+' nm')
plt.legend()
# ### 3. Calculate the median distance from the lamina for A and B regions
# +
nuc_cutoff = 0.25
good_WT = tot_distsLAM_WT_>-1
frLAM_WT = 1.*np.sum(tot_distsLAM_WT_<nuc_cutoff,0)/np.sum(good_WT,0)
frNUC_WT = 1.*np.sum(tot_distsNUC_WT_<nuc_cutoff,0)/np.sum(good_WT,0)
frSC_WT = 1.*np.sum(tot_distsSC_WT_<nuc_cutoff,0)/np.sum(good_WT,0)
dLAM_WT = np.nanmedian(tot_distsLAM_WT_,axis=0)
dSC_WT = np.nanmedian(tot_distsSC_WT_,axis=0)
dNUC_WT = np.nanmedian(tot_distsNUC_WT_,axis=0)
AB = 'B,B,A,A,B,B,A,A,A,B,A,A,A,B,A,B,B,A,B,B,B,B,B,B,B,A,B,B,A,A,A,B,B,B,B,B,B,B,B,A,nan,A,A,A,B,A,B,A,B,A,B,A,B,A,A,A,B,B,B,A,A,A,B,B,A,B,B,A,B,B,B,B,B,B,B,A,B,B,A,A,B,B,B,A,A,B,A,B,A,A,B,B,B,A,B,B,A,B,A,B,A,B,B,B,B,B,nan,A,B,A,B,B,A,B,B,A,B,B,B,B,A,B,B,A,B,A,B,B,A,B,B,A,A,A,B,B,A,B,A,A,B,B,A,B,B,B,B,A,A,B,A,B,A,B,B,A,B,B,B,B,A,B,B,A,B,A,A,B,B,A,A,A,B,B,A,B,B,A,A,B,B,B,B,B,A,B,nan,B,A,A,B,A,B,A,B,A,A,A,A,B,B,A,B,B,B,A,B,B,B,B,B,A,A,B,A,B,A,A,B,B,A,A,A,B,B,B,A,B,B,A,A,B,B,B,A,A,B,B,nan,A,A,B,B,B,B,B,B,B,B,B,A,B,B,B,A,B,B,B,B,A,B,A,A,A,B,B,B,A,A,B,B,A,B,B,A,B,B,B,B,B,A,B,A,B,A,B,B,A,B,B,B,B,B,B,B,A,B,A,B,B,nan,B,A,A,B,B,A,B,A,B,A,A,A,B,B,A,A,B,B,B,B,B,B,B,B,A,B,B,B,A,A,B,A,B,A,B,B,B,B,B,B,B,B,A,A,A,B,B,A,A,A,A,B,B,A,A,A,B,A,B,B,B,A,A,B,B,B,B,A,B,B,B,B,A,B,B,B,B,B,A,A,B,B,B,B,B,A,A,A,B,A,A,A,A,B,B,B,B,B,B,B,A,B,B,B,B,B,B,B,A,A,A,B,A,A,A,B,B,B,nan,B,A,B,B,A,A,A,A,B,B,A,B,A,A,A,A,B,B,A,B,B,B,A,B,A,A,B,B,B,B,B,B,B,B,B,A,B,B,A,B,B,B,A,B,B,A,A,nan,A,B,A,B,B,B,B,A,A,B,B,A,B,B,B,B,B,A,B,A,B,B,B,B,A,A,B,B,B,B,B,A,nan,B,B,B,B,B,B,B,B,A,B,B,A,B,nan,nan,B,B,B,B,B,B,B,B,B,B,A,A,B,A,B,A,A,B,B,A,A,A,A,B,B,B,A,B,A,A,A,B,B,B,A,A,B,nan,A,nan,A,B,B,B,B,B,A,A,A,A,B,B,A,B,A,B,B,A,B,B,B,B,B,B,B,B,B,B,A,B,A,A,B,B,B,A,B,B,A,A,B,B,B,A,nan,B,B,B,A,A,A,A,A,B,B,B,B,A,A,B,B,A,B,A,B,A,B,A,B,B,B,B,A,A,B,B,B,B,B,B,A,B,B,nan,B,B,B,A,A,A,A,B,B,A,B,B,B,A,B,B,B,A,A,B,B,B,A,B,B,B,B,B,A,B,B,A,nan,A,A,B,B,B,B,B,A,A,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,nan,nan,B,B,B,B,B,B,B,B,B,B,A,A,B,B,B,B,A,B,A,B,B,B,B,B,B,B,B,A,A,nan,nan,B,B,B,B,A,B,A,A,B,A,B,B,B,B,B,A,A,A,B,A,A,B,B,B,A,B,B,B,B,A,B,B,B,B,A,B,B,B,B,A,B,B,nan,B,B,B,A,B,B,B,A,A,B,B,B,B,B,A,A,A,A,A,B,B,B,A,A,B,nan,B,A,B,B,A,A,A,A,A,A,B,B,B,A,A,A,A,B,B,A,A,A,A,B,B,B,A,A,B,nan,nan,A,A,B,B,B,B,A,B,A,B,A,B,B,B,A,A,B,B,B,A,A,B,A,A,A,A,A,A,B,B,A,B,A,B,A,A,B,B,nan,nan,B,B,B,B,B,B,A,A,A,A,A,A,A,B,B,B,B,B,B,A,B,B,B,B,B,B,B,B,B,B,B,nan,nan,nan,A,A,A,B,B,B,B,B,B,A,B,B,B,B,B,B,A,nan,B,B,nan,nan,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,A,B,B,B,B,B,B,A,A,nan,nan,nan,nan,B,A,A,A,A,A,B,A,A,A,A,A,B,B,A,A,A,A,A,A,A,A,A,A,B,B,A,A,A,A,A,A,A,A,A,A,A,A,A,A,A,A,A,A,A,A,nan,A,A,A,A,A,A,A,A,A,A,A,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B,B'
AB=np.array(AB.split(','))
lens = [76, 80, 66, 63, 60, 55, 53, 48, 40, 43, 44, 44, 33, 30, 31, 30, 33, 33, 33, 33, 31, 31, 51]
edges = np.cumsum([0]+lens)
A,B = AB=='A',AB=='B'
dRAT_WT = np.nanmedian(Dratio_WT,axis=0)
# +
### Note - this is only for the last two datasets
fig = plt.figure()
res = np.linspace(0,2,25)[1:]
DAs_ = tot_distsLAM_WT_[:,A].ravel()
DAs_ = DAs_[DAs_>0]
hTEMP = plt.hist(DAs_,alpha=0.75,color='r',bins=res,normed=True)
DBs_ = tot_distsLAM_WT_[:,B].ravel()
DBs_ = DBs_[DBs_>0]
plt.hist(DBs_,alpha=0.75,color='b',bins=res,normed=True)
yvals = [0,0.025,0.05,0.075,0.1]
plt.yticks(np.array(yvals)*np.sum(hTEMP[0]),np.array(yvals)*100)
plt.xlabel('Distance from the nuclear periphery (um)')
plt.ylabel('% of loci')
# -
# ### 4. Calculate the association rate to the lamina and the nulcear speckles for A and B regions
fr_LAM__ = frLAM_WT#1.*np.sum(Asocs_LAM_WT,0)/np.sum(good_WT,0)
fig = plt.figure()
hTEMP=plt.hist(fr_LAM__[A],color='r',bins=20,alpha=0.75,label='A',normed=True)
plt.hist(fr_LAM__[B],color='b',bins=20,alpha=0.75,label='B',normed=True)
plt.ylabel(r"Probability density")
plt.xlabel("Median association frequency to lamina")
fr_SC__ = frSC_WT#1.*np.sum(Asocs_SC_WT,0)/np.sum(good_WT,0)
fig = plt.figure()
hTEMP=plt.hist(fr_SC__[A],color='r',bins=np.exp(np.linspace(-2.5,-0.5,20)),alpha=0.75,label='A',normed=True)
HTEMP = plt.hist(fr_SC__[B],color='b',bins=np.exp(np.linspace(-2.5,-0.5,20)),alpha=0.75,label='B',normed=True)
plt.ylabel("Probability density")
plt.xlabel("Median association frequency to speckles")
plt.gca().set_xscale("log")
plt.legend()
yvals = np.linspace(0,0.30,4)
# ### 5. Calculate the correlation between association rate to nuclear bodies and transcriptional activity
# +
nuc_cutoof = 0.5
Asocs_LAM_WT = tot_distsLAM_WT_<nuc_cutoof
Asocs_NUC_WT = tot_distsNUC_WT_<nuc_cutoof
Asocs_SC_WT = tot_distsSC_WT_<nuc_cutoof
dif = (1041+100)
good_WT = tot_distsLAM_WT_>-1
fig=plt.figure(figsize=(6,7))
Asocs = Asocs_LAM_WT##########################################
Transcr_WT = Transcr_WT==1
aSs = 1.*np.sum(Asocs*(~Transcr_WT),axis=0)/np.sum((~Transcr_WT)*good_WT,axis=0)
aTs = 1.*np.sum(Asocs*(Transcr_WT),axis=0)/np.sum(Transcr_WT*good_WT,axis=0)
anNs = 1.*np.sum(Transcr_WT*(~Asocs),axis=0)/np.sum((~Asocs)*good_WT,axis=0)
aNs = 1.*np.sum(Transcr_WT*(Asocs),axis=0)/np.sum(Asocs*good_WT,axis=0)
plt.semilogy(np.arange(len(aTs))+dif*0,aTs/aSs,'o',color='gray',markeredgecolor='k')
plt.plot(np.arange(len(aTs))+dif*0,[np.nanmean(aTs/aSs)]*len(aTs),lw=5,color='r')
fr = aTs/aSs
fr = 100.*np.sum(fr>1)/np.sum(fr>-1)
print(fr)
Asocs = Asocs_SC_WT##########################################
Transcr_WT = Transcr_WT==1
aSs = 1.*np.sum(Asocs*(~Transcr_WT),axis=0)/np.sum((~Transcr_WT)*good_WT,axis=0)
aTs = 1.*np.sum(Asocs*(Transcr_WT),axis=0)/np.sum(Transcr_WT*good_WT,axis=0)
anNs = 1.*np.sum(Transcr_WT*(~Asocs),axis=0)/np.sum((~Asocs)*good_WT,axis=0)
aNs = 1.*np.sum(Transcr_WT*(Asocs),axis=0)/np.sum(Asocs*good_WT,axis=0)
fr = aTs/aSs
fr = 100.*np.sum(fr>1)/np.sum(fr>-1)
print(fr)
plt.semilogy(np.arange(len(aTs))+dif*1,aTs/aSs,'o',color='orange',markeredgecolor='k')
plt.plot(np.arange(len(aTs))+dif*1,[np.nanmean(aTs/aSs)]*len(aTs),lw=5,color='r')
plt.plot([0,dif*2],[1,1],':r',lw=5)
plt.ylim([0.1,3.5])
ax1 = fig.get_axes()[0]
ax1.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.yticks([0.12,0.25,0.5,1,2])
plt.ylabel('Association enrichment')
plt.title('250nm cutoff')
# -
# ### 6. Caculate the correlation between A/B density and transcriptional activity in different nuclear environments
# +
nuc_cutoof = 0.25
Asocs_LAM_WT = tot_distsLAM_WT_<nuc_cutoof
Asocs_NUC_WT = tot_distsNUC_WT_<nuc_cutoof
Asocs_SC_WT = tot_distsSC_WT_<nuc_cutoof
dif = (1041+100)
fig=plt.figure(figsize=(13,7))#4,7
dif = (1041+100)
cols = [[0.2,0.2,0.2],'orange',[0,0.5,1],'white']
good_WT = good_WT>0
Asocslist = [good_WT,Asocs_SC_WT,Asocs_LAM_WT,(~Asocs_SC_WT)&(~Asocs_LAM_WT)&good_WT]
lims = [-0.4,0.6]
no_pts = []
for iAsocs,Asocs in enumerate(Asocslist):
Transcr =Transcr_WT==1
DratioT = Dratio_WT.copy()
DratioT[~(Transcr&Asocs&good_WT)]=np.nan
dT = np.nanmedian(DratioT,0)
DratioS = Dratio_WT.copy()
DratioS[~((~Transcr)&Asocs&good_WT)]=np.nan
dS = np.nanmedian(DratioS,0)
logTS = np.log(dT/dS)/np.log(2)
plt.plot(np.arange(1041)+dif*iAsocs,logTS,'o',color=cols[iAsocs],markeredgecolor='k')
plt.plot(np.arange(1041)+dif*iAsocs,[np.nanmedian(logTS)]*1041,lw=5,color='r')#cols[iAsocs])
fr = 1.*np.sum((dT/dS)>1)/np.sum((dT/dS)>0)
no_pts.append([np.sum(logTS<lims[0]),np.sum(logTS>lims[1])])
print(np.sum(logTS<lims[0]),np.sum(logTS>lims[1]))
print("Number above 0:"+str(fr))
from scipy.stats import wilcoxon as wilcoxon
print wilcoxon(np.log(dT),np.log(dS))
#plt.title('B regions')
#plt.semilogy(np.arange(len(frLAM_am))+dif*0,1./frLAM_am*frLAM_WT,'o',color='gray',markeredgecolor='k')
#plt.semilogy(np.arange(len(frLAM_am))+dif*0,[np.nanmean(1./frLAM_am*frLAM_WT)]*1041,lw=5,color='gray')
#plt.semilogy(np.arange(len(frNUC_am))+dif*1,1./frNUC_am*frNUC_WT,'o',color='cyan',markeredgecolor='k')
#plt.semilogy(np.arange(len(frNUC_am))+dif*1,[np.nanmean(1./frNUC_am*frNUC_WT)]*1041,lw=5,color='cyan')
plt.plot([0,dif*len(Asocslist)],[0,0],':r',lw=5)
plt.ylabel('Log2 difference of \n trans A/B density ratio')
ax1 = fig.get_axes()[0]
ax1.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
#plt.yticks([0.5,1,2])
plt.ylim(lims)
plt.xticks([dif*iAsocs+dif/2 for iAsocs in range(len(Asocslist))],
['All data','Speckle associated','Lamina-associated','Not lamina-associated\n and not speckle-associated'],rotation=0)
plt.title('Transcribing/Silent -'+str(int(nuc_cutoof*1000))+'nm cut-off')#\nNo of cut points [above,below]:'+str(no_pts))
# -
from tqdm import tqdm_notebook as tqdm
dic_tr_A = {}
dic_tr_B = {}
lines_ = lines[1:,7:9].reshape([-1,1041,2])[experiment_==exp]
lines_A = lines_[:,A].reshape([-1,2])
lines_B = lines_[:,B].reshape([-1,2])
for nms,trs in tqdm(lines_A):
if trs!= 'nan' and nms!='':
for gn,tr in zip(nms.split(','),trs.split(',')):
dic_tr_A[gn] = dic_tr_A.get(gn,[])+[tr]
for nms,trs in tqdm(lines_B):
if trs!= 'nan' and nms!='':
for gn,tr in zip(nms.split(','),trs.split(',')):
dic_tr_B[gn] = dic_tr_B.get(gn,[])+[tr]
len(dic_tr_A.keys()),len(dic_tr_B.keys()),np.sum(A)+np.sum(B)
# +
frsB = np.array([np.mean(np.array(dic_tr_B[key])=='on') for key in dic_tr_B])#/np.log(10)
frsA = np.array([np.mean(np.array(dic_tr_A[key])=='on') for key in dic_tr_A])#/np.log(10)
fig=plt.figure()
plt.hist(frsA,bins=10**(np.linspace(-2.5,0,20)),color='r',alpha=0.75,label='A regions')
plt.hist(frsB,bins=10**(np.linspace(-2.5,0,20)),color='b',alpha=0.75,label='B regions')
plt.gca().set_xscale("log")
plt.legend()
plt.xlabel('Bursting frequency')
plt.ylabel('Number of genes')
plt.show()
# -
| combinatorial_tracing/PostAnalysis/Part4_NuclearLandmarks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from component.tiles import *
parameters = Parameters()
planet_parameters = PlanetParameters()
map_ = AlertMap(parameters=parameters, planet_parameters=planet_parameters)
map_.layout.height="100vh"
# +
process_tile = sw.Tile(id_='ui', title='', inputs=[map_])
appBar = sw.AppBar(title='Planet active fires explorer')
content = [
process_tile,
]
#create a drawer
item_process = sw.DrawerItem('Map',
'mdi-map-marker-check',
card="ui").display_tile(content)
code_link = 'https://github.com/ingdanielguerrero/planet_active_fires_explorer'
wiki_link = 'https://github.com/ingdanielguerrero/planet_active_fires_explorer/blob/main/README.md'
issue = 'https://github.com/ingdanielguerrero/planet_active_fires_explorer/issues/new'
items = [
item_process,
]
drawer = sw.NavDrawer(items,
code = code_link,
wiki = wiki_link,
issue = issue,
mini_variant=True).display_drawer(appBar.toggle_button)
#build the app
app = sw.App(
appBar = appBar,
tiles=content,
navDrawer=drawer
).show_tile('ui')
#display the app
app
| UI.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Standard Library Imports
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from collections import Counter
# %matplotlib inline
# +
# Read the Dataset
event=pd.read_csv('event_transactions_CB.csv')
# -
event.head(5)
event.tail(10)
event.describe()
# Check for null values
print(event.isnull().sum())
event.info()
# Correlations
plt.figure(figsize=(20,10))
c=event.corr()
sns.heatmap(c,cmap="BrBG",annot=True)
# Which age group are the major attendees
sns.distplot(event['age_cat'], bins=15, kde=False)
plt.ylabel('Count')
plt.title('Age')
# People aged less than 21 are the major attendees.
# Does gender affect Purchase Type
sns.distplot(event['gender_cat'], bins=15, kde=False)
plt.ylabel('purchase_type')
plt.title('Gender VS Purchase Type')
# There is no gender bias in Purchase Type.
# How does Age and Gender affect Attendee Frequency
ax = sns.factorplot(x="age_cat", y="attendee_frequency", hue="gender_cat", data=event,
kind="bar",aspect=1.5, size=6)
ax.set(ylabel='Attendee Frequency')
plt.title('Frequent Attendees by Age and Gender');
# +
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import pylab
fig = plt.figure(figsize=(12,10))
ax = fig.add_subplot(111, projection='3d')
x = event['age_cat']
y = event['attendee_frequency']
z = event['purchase_type']
c = event['gender_cat']
cb1=ax.scatter(x, y, z, c=c, cmap=plt.cm.get_cmap('RdBu'),s=100)
plt.colorbar(cb1, ax=ax)
ax.set_xlabel('Age',fontsize=20)
ax.set_ylabel('Frequency',fontsize=20)
ax.set_zlabel('PurchaseType',fontsize=20)
plt.show()
| EDA_Part 1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Satwato/Assignment-2/blob/master/TensorFlow_CodeLab_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="UbeBBKV9H8CC" colab_type="code" colab={}
import tensorflow as tf
import numpy as np
from tensorflow import keras
# + id="gptbKz7EH9dS" colab_type="code" colab={}
model= tf.keras.Sequential([keras.layers.Dense(units=1,input_shape=[1])])
# + id="sb51QgpYIlL-" colab_type="code" colab={}
model.compile(optimizer='sgd',loss='mean_squared_error')
# + id="GagYyXK5I7SM" colab_type="code" colab={}
xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)
ys = np.array([-2.0, 1.0, 4.0, 7.0, 10.0, 13.0], dtype=float)
# + id="TrQKZH9KJFh1" colab_type="code" outputId="0a79219c-060e-493b-f8aa-a1fe6785e110" colab={"base_uri": "https://localhost:8080/", "height": 1000}
model.fit(xs,ys,epochs=1000)
# + id="q6lB-l9cLJgI" colab_type="code" outputId="df2444a9-a908-4499-a06e-e6c225f4420e" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(model.predict([10.0]))
# + id="jX06V2AYLZNX" colab_type="code" colab={}
| TensorFlow_CodeLab_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Health and Lifestyle Survey Questions Tutorial
#
# In this tutorial, we showcase how the ProtoDash explainer algorithm from AI Explainability 360 Toolkit implemented through the _ProtoDashExplainer_ class could be used to summarize the National Health and Nutrition Examination Survey (NHANES) datasets ([Study 1](#study1)) available through the Center for Disease Control and Prevention (CDC). Moreover, we also show how the algorithm could be used to distill interesting relationships between different facets of life (i.e. early childhood and income), which were found by scientists ([Study 2](#study2)) through decades of rigorous experimentation. This study shows that in using ProtoDash, one can potentially uncover such insights cheaply, which could then be reaffirmed through rigorous experimentation.
#
# Data from this survey is typically used in epidemiological studies and health science research, which helps develop public health policy, direct and design health programs and services, and expand health knowledge. Thus, the impact of understanding these datasets and the relationships that may exist between them are far reaching for a social scientist.
# <a name="intro"></a>
# ## Introduction to Center for Disease Control and Prevention (CDC) datasets
#
# The [NHANES CDC questionnaire datasets](https://wwwn.cdc.gov/nchs/nhanes/search/datapage.aspx?Component=Questionnaire&CycleBeginYear=2013) are surveys conducted by the organization involving thousands of civilians about various facets of their daily lives. There are 44 questionnaires that collect data about income, occupation, health, early childhood and many other behavioral and lifestyle aspects of individuals living in the US. These questionnaires are thus a rich source of information indicative of the quality of life of many civilians.
#
# This tutorial presents two studies. We first see how a CDC questionaire answered by thousands of individuals could be summarized by looking at answers given by a few prototypical users. Next, an interesting endeavor is to uncover relationships between different aspects of life by analyzing data across the different CDC questionnaires. In the second study, we do exactly that with the help of the ProtoDash explainer algorithm. We show how the algorithm is able to uncover an interesting [insight](https://www.theatlantic.com/business/archive/2016/07/social-mobility-america/491240/) known only through decades of experimentation, solely from the questionnaire datasets. This by no means suggests the method as a substitute for rigorous experimentation, but showcases it as an avenue for obtaining interesting insights at low cost, which could inspire further indepth studies. The manner in which this is accomplished is by finding prototypical individuals for each of the questionnaires and then evaluating how well they represent the income questionnaire (w.r.t. the method's objective function). The more representative these prototypes are, the more that questionnaire is indicative/representative of income.
#
# For this use case, we are selecting prototypes from specific questionnaires. Hence, the group we want to explain is the dataset itself, which — in this case — are the questionnaires. We are not training an AI model. Rather, we are trying to summarize each questionnaire, which was filled by thousands of people, by selecting a few representative individuals for each of them.
#
#
# The rest of the tutorial is organized as follows: <br>
# [Explore Income questionaire](#explore)<br>
# [Study 1: Summarize Income Questionnaire using Prototypes](#study1)<br>
# [Study 2: Find Questionnaire/s most representative of Income](#study2)<br>
#
#
# ###### [ProtoDash: Fast Interpretable Prototype Selection](https://arxiv.org/abs/1707.01212)
#
# We now provide a brief overview of the method. The method takes as input a datapoint (or group of datapoints) that we want to explain with respect to instances in a training set belonging to the same feature space. The method then tries to minimize the maximum mean discrepancy (MMD metric) between the datapoints we want to explain and a prespecified number of instances from the training set that it will select. In other words, it will try to select training instances that have the same distribution as the datapoints we want to explain. The method does greedy selection and has quality guarantees with it also returning importance weights for the chosen prototypical training instances indicative of how similar/representative they are.
#
#
# ###### Why ProtoDash?
#
# Before we showcase the two studies, we provide some motivation for using this method. The method is able to select in a deterministic fashion examples from a dataset, which we term as prototypes that represent the different segments in a dataset. For example, if we take people that answered the income questionnaire, we might find that there are three categories of people: i) those that are high earners, ii) those that are middle class and iii) those that don't earn much or are unemployed and receive unemployment benefits. ProtoDash would be able to find these segments by pointing to specific individuals that lie in these categories. Looking at the objective function value of ProtoDash, one would also be able to say that three segments is the right number here as adding one more segment may not improve the objective value by much.
#
# Compared with other methods such as k-medoids, it has the advantage that it is deterministic and does not have randomizations as in, say, k-medoids clustering, where the centers a typically randomly initialized. So the solutions are repeatable and it picks prototypes that are representative as well as diverse, which may not be the case with standard distance metrics such as euclidean distance. Diversity is important in practical settings (viz. income example above) where we want to capture all the different segments/modes in the dataset, not missing any of the key behaviors.
#
# Another benefit of the method is that, since it performs distribution matching between the user/users in question and those available in the training set, it could, in principle, also be applied in non-iid settings such as for time series data. Other approaches which find similar profiles using standard distance measures (viz. euclidean, cosine) do not have this property. Additionally, we can also highlight important features for the different prototypes that made them similar to the user/users in question.
# #### Import Statements
#
# Import relevant libraries, datasets and ProtoDash explainer algorithm.
# +
import os
import requests
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder
from aix360.algorithms.ProtoDash import ProtoDashExplainer
from aix360.datasets.cdc_dataset import CDCDataset
# -
# #### Load CDC dataset
nhanes = CDCDataset()
nhanes_files = nhanes.get_csv_file_names()
(nhanesinfo, _, _) = nhanes._cdc_files_info()
# <a name="explore"></a>
# ## Explore Income questionnaire
#
# Now let us explore the income questionnaire dataset and find out the types of responses received in the survey. Each column in the dataset corresponds to a question and each row denotes the answers given by a respondent to those questions. Both column names and answers by respondents are encoded. For example, 'SEQN' denotes the sequence number assigned to a respondent and 'IND235' corresponds to a question about monthly family income. As seen below, in most cases a value of 1 implies "Yes" to the question, while a value of 2 implies "No." More details about the income questionaire and how questions and answers are encoded can be seen [here](https://wwwn.cdc.gov/Nchs/Nhanes/2013-2014/INQ_H.htm)
#
# |Column |Description | Values and Meaning|
# |-------|----------------------------|---------|
# |SEQN | Respondent sequence number |
# |INQ020 | Income from wages/salaries |1->Yes, 2->No, 7->Refused, 9->Don't know|
# |INQ012 | Income from self employment|1->Yes, 2->No, 7->Refused, 9->Don't know|
# |INQ030 | Income from Social Security or RR |1->Yes, 2->No, 7->Refused, 9->Don't know|
# |INQ060 | Income from other disability pension |1->Yes, 2->No, 7->Refused, 9->Don't know|
# |INQ080 | Income from retirement/survivor pension |1->Yes, 2->No, 7->Refused, 9->Don't know|
# |INQ090 | Income from Supplemental Security Income |1->Yes, 2->No, 7->Refused, 9->Don't know|
# |INQ132 | Income from state/county cash assistance |1->Yes, 2->No, 7->Refused, 9->Don't know|
# |INQ140 | Income from interest/dividends or rental |1->Yes, 2->No, 7->Refused, 9->Don't know|
# |INQ150 | Income from other sources |1->Yes, 2->No, 7->Refused, 9->Don't know|
# |IND235 | Monthly family income |1-12->Increasing income brackets, 77->Refused, 99->Don't know|
# |INDFMMPI | Family monthly poverty level index |0-5->Higher value more affluent|
# |INDFMMPC | Family monthly poverty level category |1-3->Increasing INDFMMPI brackets, 7->Refused, 9->Don't know|
# |INQ244 | Family has savings more than $5000 |1->Yes, 2->No, 7->Refused, 9->Don't know|
# |IND247 | Total savings/cash assets for the family |1-6->Increasing savings brackets, 77->Refused, 99->Don't know|
# replace encoded column names by the associated question text.
df_inc = nhanes.get_csv_file('INQ_H.csv')
df_inc.columns[0]
dict_inc = {
'SEQN': 'Respondent sequence number',
'INQ020': 'Income from wages/salaries',
'INQ012': 'Income from self employment',
'INQ030':'Income from Social Security or RR',
'INQ060': 'Income from other disability pension',
'INQ080': 'Income from retirement/survivor pension',
'INQ090': 'Income from Supplemental Security Income',
'INQ132': 'Income from state/county cash assistance',
'INQ140': 'Income from interest/dividends or rental',
'INQ150': 'Income from other sources',
'IND235': 'Monthly family income',
'INDFMMPI': 'Family monthly poverty level index',
'INDFMMPC': 'Family monthly poverty level category',
'INQ244': 'Family has savings more than $5000',
'IND247': 'Total savings/cash assets for the family'
}
qlist = []
for i in range(len(df_inc.columns)):
qlist.append(dict_inc[df_inc.columns[i]])
df_inc.columns = qlist
print("Answers given by some respondents to the income questionnaire:")
df_inc.head(5).transpose()
# Now, to get more of a feel for the dataset, let us look at the distribution of responses for two questions related to family financial status.
# +
print("Number of respondents to Income questionnaire:", df_inc.shape[0])
print("Distribution of answers to \'monthly family income\' and \'Family savings\' questions:")
fig, axes = plt.subplots(1, 2, figsize=(10,5))
fig.subplots_adjust(wspace=0.5)
hist1 = df_inc['Monthly family income'].value_counts().plot(kind='bar', ax=axes[0])
hist2 = df_inc['Family has savings more than $5000'].value_counts().plot(kind='bar', ax=axes[1])
plt.show()
# -
# <a name="Dplot"></a>
# Observe that the majority of individuals responded with a "12" o the question related to monthly family income, which means their income is above USD 8400 as explained [here](https://wwwn.cdc.gov/Nchs/Nhanes/2013-2014/INQ_H.htm#IND235). Similarly, to the question of whether the family has savings more than USD 5000, the majority of individuals responded with a "2", which means "No".
# <a name="study1"></a>
# ## Study 1: Summarize Income Questionnaire using Prototypes
#
# We just explored the income dataset and looked at the distribution of answers for a couple of questions. Now, consider a social scientist who would like to quickly obtain a summary report of this dataset in terms of types of people that span this dataset. Is it possible to summarize this dataset by looking at answers given by a few representative/prototypical respondents?
#
# We now show how the ProtoDash algorithm can be used to obtain a few prototypical respondents (about 10 in this example) that span the diverse set of individuals answering the income questionnaire making it easy for the social scientist to summarize the dataset.
# +
# convert pandas dataframe to numpy
data = df_inc.to_numpy()
#sort the rows by sequence numbers in 1st column
idx = np.argsort(data[:, 0])
data = data[idx, :]
# replace nan's (missing values) with 0's
original = data
original[np.isnan(original)] = 0
# delete 1st column (sequence numbers)
original = original[:, 1:]
# one hot encode all features as they are categorical
onehot_encoder = OneHotEncoder(sparse=False)
onehot_encoded = onehot_encoder.fit_transform(original)
explainer = ProtoDashExplainer()
# call ProtoDash explainer
# S contains indices of the selected prototypes
# W contains importance weights associated with the selected prototypes
(W, S, _) = explainer.explain(onehot_encoded, onehot_encoded, m=10)
# sort the order of prototypes in set S
idx = np.argsort(S)
S = S[idx]
W = W[idx]
# -
# Display the prototypes along with their computed weights
inc_prototypes = df_inc.iloc[S, :].copy()
# Compute normalized importance weights for prototypes
inc_prototypes["Weights of Prototypes"] = np.around(W/np.sum(W), 2)
inc_prototypes.transpose()
# #### Explanation:
# The 10 people shown above (i.e. 10 prototypes) are representative of the income questionnaire according to ProtoDash. Firstly, in the distribution plot for family finance related questions, we saw that there roughly were five times as many people not having savings in excess of $5000 compared with others. Our prototypes also have a similar spread, which is reassuring. Also, for monthly family income, we get a more even spread over the more commonly occurring categories. This is a kind of spot check to see if our prototypes actually match the distribution of values in the dataset.
#
# Looking at the other questions in the questionnaire and the corresponding answers given by the prototypical people above, the social scientist realizes that most people are employed (3rd question) and work for an organization earning through salary/wages (1st two questions). Most of them are also young (5th question) and fit to work (4th question). However, they don't seem to have much savings (last question). The insights that the social scientist acquired from studying the prototypes could also be conveyed to the appropriate government authorities that affect future public policy decisions.
# <a name="study2"></a>
# ## Study 2: Find Questionnaire/s that are most representative of Income
#
# We now move on to our second study, where we want to see how the remaining 39 questionnaires represent or relate to income. This will provide us with an idea of which lifestyle factors are likely to affect income the most. To do this we compute prototypes for each of the questionnaires and evaluate how well they represent the income questionnaire relative to our objective function.
# #### Compute prototypes for all questionaires
#
# This step uses ProtoDash explainer to compute 10 prototypes for each of the questionaires and saves these for further evaluation.
# +
# Iterate through all questionnaire datasets and find 10 prototypes for each.
prototypes = {}
for i in range(len(nhanes_files)):
f = nhanes_files[i]
print("processing ", f)
# read data to pandas dataframe
df = nhanes.get_csv_file(f)
# convert data to numpy
data = df.to_numpy()
#sort the rows by sequence numbers in 1st column
idx = np.argsort(data[:, 0])
data = data[idx, :]
# replace nan's with 0's.
original = data
original[np.isnan(original)] = 0
# delete 1st column (contains sequence numbers)
original = original[:, 1:]
# one hot encode all features as they are categorical
onehot_encoder = OneHotEncoder(sparse=False)
onehot_encoded = onehot_encoder.fit_transform(original)
explainer = ProtoDashExplainer()
# call ProtoDash explainer
# S contains indices of the selected prototypes
# W contains importance weights associated with the selected prototypes
(W, S, _) = explainer.explain(onehot_encoded, onehot_encoded, m=10)
prototypes[f]={}
prototypes[f]['W']= W
prototypes[f]['S']= S
prototypes[f]['data'] = data
prototypes[f]['original'] = original
# -
# #### Evaluate the set of prototypical respondents from various questionaires using their income questionaire.
#
# Now that we have the prototypes for each of the questionnaires we evaluate how well the prototypes of each questionaire represent the Income questionnaire based on the objective function that ProtoDash uses. We see below a ranked list of different questionnaires with their objective function values in ascending order. The higher a questionaire appears in the list, the better its prototypes represent the income questionaire. The values on the right indicate our objective value where lower value is better.
# +
#load income dataset INQ_H and its prototypes
X = prototypes['INQ_H.csv']['original']
Xdata = prototypes['INQ_H.csv']['data']
# Iterate through all questionnaires and evaluate how well their prototypes represent the income dataset.
objs = []
for i in range(len(nhanes_files)):
#load a dataset, its prototypes & weights
f = nhanes_files[i]
Ydata = prototypes[f]['data']
S = prototypes[f]['S']
W = prototypes[f]['W']
# sort the order of prototypes in set S
idx = np.argsort(S)
S = S[idx]
W = W[idx]
# access corresponding prototypes in X.
XS = X[np.isin(Xdata[:, 0], Ydata[S, 0]), :]
#print(Ydata[S, 0])
#print(Xdata[np.isin(Xdata[:, 0], Ydata[S, 0]), 0])
temp = np.dot(XS, np.transpose(X))
u = np.sum(temp, axis=1)/temp.shape[1]
K = np.dot(XS, XS.T)
# evaluate prototypes on income based on our objective function with dot product as similarity measure
obj = 0.5 * np.dot(np.dot(W.T, K), W) - np.dot(W.T, u)
objs.append(obj)
# sort the objectives (ascending order)
index = np.argsort(np.array(objs))
# load the results in a dataframe to print
evalresult = []
for i in range(1,len(index)):
evalresult.append([ nhanesinfo[index[i]], objs[index[i]] ])
df_evalresult = pd.DataFrame.from_records(evalresult)
df_evalresult.columns = ['Questionaire', 'Prototypes representative of Income']
df_evalresult
# -
# #### Insight from ProtoDash
#
# Looking at the table above, what is interesting is that early childhood represents income the most. The early childhood questionnaire has information about the environment that the child was born and raised in. This is consistent with a long term study (https://www.theatlantic.com/business/archive/2016/07/social-mobility-america/491240/) which talks about significant decrease in social mobility in recent times, stressing the fact that your childhood impacts how monetarily successful you are likely to be. It is interesting that our method was able to uncover this relationship with access to just these survey questionnaires. Other such insights could be obtained and ones that a social scientist or policy maker finds interesting could potentially spawn long-term studies like the one just mentioned.
| examples/tutorials/CDC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Fajardo47/CPEN_21A_BS_ECE_2-3/blob/main/Lab_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="CFRDHHnPgie1"
# #Laboratory 1
#
# + colab={"base_uri": "https://localhost:8080/"} id="5jb5GTBLgpD3" outputId="17e2f6c3-33dc-422b-da9c-3cadaa1731fc"
print("Welcome to Python Programming")
# + colab={"base_uri": "https://localhost:8080/"} id="zHi3w72PhqnL" outputId="58c99014-56cc-45b4-88d4-f98b847ae5eb"
name="<NAME>"
Address = "Blk 14 Lot 4 St. Francis Village, brgy. Alingaro, General trias, Cavite"
Age = "19yrs old"
print("Name:",name)
print("Address:", Address)
print("Age:", Age)
| Lab_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
import panel as pn
pn.extension()
# The ``Button`` widget allows triggering events when the button is clicked. Unlike other widgets, it does not have a ``value`` parameter. Instead it has a ``clicks`` parameter that can be watched to trigger events.
#
# For more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb).
#
# #### Parameters:
#
# For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb).
#
# ##### Core
#
# * **``clicks``** (int): Number of clicks (can be listened to)
#
# ##### Display
#
# * **``button_type``** (str): A button theme; should be one of ``'default'`` (white), ``'primary'`` (blue), ``'success'`` (green), ``'info'`` (yellow), or ``'danger'`` (red)
# * **``disabled``** (boolean): Whether the widget is editable
# * **``name``** (str): The title of the widget
#
# ___
button = pn.widgets.Button(name='Click me', button_type='primary')
button
# The ``clicks`` parameter will report the number of times the button has been pressed:
button.clicks
# The ``Button`` name string may contain Unicode characters, providing a convenient way to define common graphical buttons:
# +
backward = pn.widgets.Button(name='\u25c0', width=50)
forward = pn.widgets.Button(name='\u25b6', width=50)
search = pn.widgets.Button(name='🔍', width=100)
pn.Row(backward, forward, search)
# -
# The color of the button can be set by selecting one of the available button types:
pn.Column(*(pn.widgets.Button(name=p, button_type=p) for p in pn.widgets.Button.param.button_type.objects))
| examples/reference/widgets/Button.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Database Reconstruction Attacks
# ## Privacy
#
# [Dinur & Nissim (2003)](http://www.cse.psu.edu/~ads22/privacy598/papers/dn03.pdf), roughly, states that if too many queries on a database are answered too accurately, almost the entire database can be reconstructed from the answeries to the queries. Before this fact was properly understood, statistical releases from databases were typically limited in ad-hoc ways. For example, an interactive querying system (in which analysts propose their own queries) may not allow queries that clearly single out individual elements of the data set. Similarly, in a one-shot release (e.g. the U.S. Census Bureau releasing statistical tables) the releasing party may decide not to release statistics on groups with fewer than some fixed number of elements.
#
# Releasing statistics in a differentially private manner has been posed as a principled way to ensure the privacy of individuals included in a database and prevent database reconstruction. Notably, the U.S. Census is moving to use differentially private algorithms to perform statistical releases for the 2020 Census, for fear that their old methods of privatization (a.k.a. statistical disclosure limitation) are not sufficient to prevent database reconstruction. See [here](https://dl.acm.org/ft_gateway.cfm?id=3295691&ftid=2020934&dwn=1) for more information.
#
# In this notebook we will focus on the one-shot release model and simulate the process of a data owner releasing aggregate statistics from a sensitive database, both with and without differential privacy. We will then attempt to reconstruct the original database from each set of releases.
# ### Statistical Release Setup
#
# The underlying sensitive data from which we are releasing is a partly-synthetic version of the PUMS data that are used in many other notebooks. The data are $(500 \times 6)$ with the columns taking values as follows:
#
# - educ: {1, 2}
# - race: {1, 2}
# - agebinned: {'[20, 25)', '[25, 30)', ..., '[45, 50)'}
# - sex: {0, 1}
# - married: {0, 1}
# - income: [0, 420,500]
#
# We will assume that `income` is the only sensitive variable in the database, and so the data owner is concerned only with whether or not an attacker can reconstruct the `income` variable.
#
# We will first construct calculate a number of statistics -- `mean`, `median`, `min`, and `max` by subgroup, where a subgroup is defined as a set of database elements that are equivalent with respect to some subset of variables. For example, valid subgroups include the set of all elements with `educ == 2`, and the set of all elements with `educ == 1 && race == 2 && married == 0`. The `income` variable is never included in these subgroup definitions, as it is the variable we are trying to reconstruct.
#
# Throughout the notebook, we will examine various cases in which the data owner releases different subsets of these statistics.
#
# +
import reconstruction_module as rec
# load data
orig_data, data = rec.load_data()
non_income_data = data.drop('income', axis = 1)
# get plausible variable combinations and subset of length 5 plausible combinations
plausible_variable_combinations = rec.get_plausible_variable_combinations(non_income_data)
plausible_variable_combinations_names = ['__'.join(combination) for combination in plausible_variable_combinations]
five_way_interactions = [combination for combination in plausible_variable_combinations if len(combination) == 5]
five_way_interactions_names = ['__'.join(combination) for combination in five_way_interactions]
# get dictionaries of private and non-private releases (up to 5-way interactions)
count_dict, priv_count_dict, mean_income_dict, priv_mean_income_dict, median_income_dict, priv_median_income_dict, min_income_dict, priv_min_income_dict, max_income_dict, priv_max_income_dict = rec.create_dicts(data, non_income_data, plausible_variable_combinations)
# get string representations of each element associated with each tuple representing the 5-way interactions
elem_dict, priv_elem_dict = rec.create_elem_dicts(count_dict, priv_count_dict, five_way_interactions, five_way_interactions_names)
# -
# ### Statistical Release and Reconstruction Attack
#
# We are nearly ready to consider an actual set of statistical releases and how this can enable a reconstruction attack, but it will be useful to first consider some details of our attacker.
#
# The attacker's goal is to reconstruct the `income` column as closely as possible (by some definition of closeness), using a combination of the statistical releases and pre-existing knowledge of the database. We will assume for now that the attacker has no pre-existing knowledge, and thus bases their reconstruction only on the statistical releases. Following [<NAME> (2018)](https://dl.acm.org/doi/pdf/10.1145/3291276.3295691), we attempt to construct a database that is consistent with the releases using a SAT/SMT solver. We will be using [Z3](https://github.com/Z3Prover/z3), but other SAT/SMT solvers could serve the same purpose.
#
# Z3 takes a boolean formula with associated variables and returns whether or not the formula is satisfiable; that is, whether or not there exists a set of variable values that makes the boolean formula `TRUE`. For example, let $A,B$ be boolean variables. The formula $A \land B$ is satisfiable because it is true if $A = B = TRUE$. The formula $A \land B \land \neg B$ is not satisfiable.
#
# This idea can be applied to finding a database that satisfies a set of statistical releases. Say we know that a subgroup has 3 elements, $\{i_1, i_2, i_3\}$ with median income $50,000$, mean income $60,000$, and min income $0$. Note that, because elements within a subgroup are equivalent with respect to the subset of variables that dedine the subset, we are free to index however we wish within the subset. We choose to enforce the element values to be montonically non-decreasing in the index. So, we can represent each statistical release as a statement we'll call an `application`:
#
# $$ (i_1 \leq i_2 \leq i_3) \land (i_1 = 0) \land (i_2 = 50,000) \land \left( (i_1 + i_2 + i_3)/3 = 60,000 \right). $$
#
# In this case, we get satisfying values of $i_1, i_2$ for free and can find $i_3 = 130,000$ by plugging in $i_1, i_2$ to the last equation. So $\{0, 50,000, 130,000\}$ is a database that is consistent with our statistical release.
#
# Note that, as long as the set of statistical releases is internally consistent (results do not contradict one another), Z3 should be able to find at least one satisfying assignment.
#
# +
import z3
# single satisfying assignment
i_1, i_2, i_3 = z3.Ints('i_1 i_2 i_3')
z3.solve(i_1 <= i_2, i_2 <= i_3, i_1 == 0, i_2 == 50_000, z3.Sum(i_1, i_2, i_3) == 180_000)
# -
# In the case above there is only one satisfying assignment, and so the result that Z3 provided is the only database consistent with our statistical releases. Let's now imagine the same case, but without a released median.
# +
# many satisfying assignments
i_1, i_2, i_3 = z3.Ints('i_1 i_2 i_3')
z3.solve(i_1 <= i_2, i_2 <= i_3, i_1 == 0, z3.Sum(i_1, i_2, i_3) == 180_000)
z3.solve(i_1 <= i_2, i_2 <= i_3, i_1 == 0, z3.Sum(i_1, i_2, i_3) == 180_000)
z3.solve(i_1 <= i_2, i_2 <= i_3, i_1 == 0, z3.Sum(i_1, i_2, i_3) == 180_000)
# -
# Again we get a satisfying assignment, but this time the assignment is not unique. That is, there are multiple databases consistent with our statistical releases (only one of which is the true underlying data). As a result, the attacker has not learned the entire underlying database with certainty. The more satisfying assignments exist, and the more variability between these assignments, the less the attacker is expected to learn from reconstructing the data.
#
# Now that we have a taste for Z3, we can move to the final stage of setting up the attack, encoding the statistical releases as applications. We treat each individual's income as its own variable and construct applications by asserting that the values of these variables must be consistent with statistical releases, much like we did above (but on a larger scale). For this example, we will assume that the mean and median of every subgroup is released, regardless of subgroup size.
# set applications
applications, priv_applications = rec.get_applications(five_way_interactions, five_way_interactions_names,
plausible_variable_combinations, plausible_variable_combinations_names,
count_dict, priv_count_dict,
mean_income_dict, priv_mean_income_dict,
median_income_dict, priv_median_income_dict,
min_income_dict, priv_min_income_dict,
max_income_dict, priv_max_income_dict,
elem_dict, priv_elem_dict, lowest_allowable_count = 1,
use_medians = True, use_mins = False, use_maxes = False)
# remove duplicate applications
applications = list(set(applications))
priv_applications = list(set(priv_applications))
# ### Attack #1
#
# We start with an attack in a very basic scenario. As described above, the data curator releases the mean and median for each subgroup we defined (up to 5-way interactions between our variables). The attacker then uses only this information to attempt to reconstruct the private data.
# +
# initialize solvers
solver_1, solver_list_1 = rec.applications_to_solver(applications)
priv_solver_1, priv_solver_list_1 = rec.applications_to_solver(priv_applications)
# get results (models)
model_1 = rec.check_solution(solver_1)
if model_1:
print('non-private: sat')
else:
print('non-private: unsat')
# attempt to resconstruct data
recon_data = rec.reconstruct_data(model_1, elem_dict)
# -
# As expected, Z3 produces a satisfying assignment based on the non-private statistical releases.
#
# We mentioned earlier that there may exist multiple satisfying assignments and that we can consider the number and variability of these assignments to examine the extent to which an attacker can learn from a satisfying assignment. We will not do that here, instead opting to just compare the assignment to the true underlying data (a luxury the attacker will not have).
# +
# compare original and reconstructed data
orig_data, recon_data, exact, within_2k, within_5k = rec.compare_data(orig_data, recon_data)
print('Of 500 total incomes:')
print(' {0} incomes reconstructed exactly'.format(exact))
print(' {0} incomes resconstructed within $2,000'.format(within_2k))
print(' {0} incomes resconstructed within $5,000'.format(within_5k))
# -
# ### Attack #2
#
# In the example above, we did not make any effort at statistical disclosure limitation. We can try the attack again, but in this case imagine that no information (counts, means, or medians) are released for subgroups of size $< 10$. As before, we assume that the attacker does not have access to any information about the data other than what was released by the data curator.
# +
# set applications
applications_2, priv_applications_2 = rec.get_applications(five_way_interactions, five_way_interactions_names,
plausible_variable_combinations, plausible_variable_combinations_names,
count_dict, priv_count_dict,
mean_income_dict, priv_mean_income_dict,
median_income_dict, priv_median_income_dict,
min_income_dict, priv_min_income_dict,
max_income_dict, priv_max_income_dict,
elem_dict, priv_elem_dict, lowest_allowable_count = 10,
use_medians = True, use_mins = False, use_maxes = False)
# remove duplicate applications
applications_2 = list(set(applications_2))
priv_applications_2 = list(set(priv_applications_2))
# initialize solvers
solver_2, solver_list_2 = rec.applications_to_solver(applications_2)
priv_solver_2, priv_solver_list_2 = rec.applications_to_solver(priv_applications_2)
# get results (models)
model_2 = rec.check_solution(solver_2)
if model_2:
print('non-private: sat')
else:
print('non-private: unsat')
# attempt to resconstruct data
recon_data_2 = rec.reconstruct_data(model_2, elem_dict)
# compare original and reconstructed data
orig_data, recon_data_2, exact_2, within_2k_2, within_5k_2 = rec.compare_data(orig_data, recon_data_2)
print('Of 500 total incomes:')
print(' {0} incomes reconstructed exactly'.format(exact_2))
print(' {0} incomes resconstructed within $2,000'.format(within_2k_2))
print(' {0} incomes resconstructed within $5,000'.format(within_5k_2))
# -
# After suppressing all statistics from subgroups of size $< 10$, the attacker is able to reconstruct the data with some success (though less than in the setting with no suppression).
#
# ### Attack #3
#
# To this point, we have considered the setting in which the attacker knows only the values of the aggregate statistics released. However, it is possible that attackers have access to information not contained in this set of statistical releases. For example, perhaps they have access to statistical releases from another source, or they happen to already know exact values for some elements in the database.
# Imagine that the attacker has access to the following information outside of the statistical releases:
#
# - There is at least one person in the data with `educ == 2`, `race == 2`, `agebinned == [45, 50)`, `sex == 0`, and `married == 1` with an income of $95,000.
#
# - There is only one person in the data with `educ == 2`, `race == 2`, `agebinned == [45, 50)`, `sex == 1`, and `married == 1` and they have an income of $31,000.
# +
# set applications
applications_3, priv_applications_3 = rec.get_applications(five_way_interactions, five_way_interactions_names,
plausible_variable_combinations, plausible_variable_combinations_names,
count_dict, priv_count_dict,
mean_income_dict, priv_mean_income_dict,
median_income_dict, priv_median_income_dict,
min_income_dict, priv_min_income_dict,
max_income_dict, priv_max_income_dict,
elem_dict, priv_elem_dict, lowest_allowable_count = 10,
use_medians = True, use_mins = False, use_maxes = False)
# remove duplicate applications
applications_3 = list(set(applications_3))
priv_applications_3 = list(set(priv_applications_3))
# initialize solvers
solver_3, solver_list_3 = rec.applications_to_solver(applications_3)
priv_solver_3, priv_solver_list_3 = rec.applications_to_solver(priv_applications_3)
# add applications encoding existing attacker knowledge
group_1_def = 'educ_2__race_2__agebinned_45,50__sex_0__married_1'
group_1_elems = [z3.Int('{0}_{1}'.format(group_1_def, i)) for i in range(count_dict[group_1_def])]
solver_3.add(z3.Or([elem == 95_000 for elem in group_1_elems]))
group_2_def = 'educ_2__race_2__agebinned_45,50__sex_1__married_1'
solver_3.add(z3.Int( '{0}_{1}'.format(group_2_def, 0)) == 31_000)
# get results (models)
model_3 = rec.check_solution(solver_3)
if model_3:
print('non-private: sat')
else:
print('non-private: unsat')
# attempt to resconstruct data
recon_data_3 = rec.reconstruct_data(model_3, elem_dict)
# compare original and reconstructed data
orig_data, recon_data_3, exact_3, within_2k_3, within_5k_3 = rec.compare_data(orig_data, recon_data_3)
print('Of 500 total incomes:')
print(' {0} incomes reconstructed exactly'.format(exact_3))
print(' {0} incomes resconstructed within $2,000'.format(within_2k_3))
print(' {0} incomes resconstructed within $5,000'.format(within_5k_3))
# -
# This looks much like the performance from Attack #2, but notice that what might appear to be two extra pieces of information can lead to reconstruction of more than two additional data elements.
#
# In each of the three scenarios above, we also set up (but did not test) reconstruction attempts with differentially private statistical releases. Let's now see what happens if we try to reconstruct the underlying data from the private statistics.
# +
# get results (models)
priv_model_1 = rec.check_solution(priv_solver_1)
if priv_model_1:
print('model_1 private: sat')
else:
print('model_1 private: unsat')
priv_model_2 = rec.check_solution(priv_solver_2)
if priv_model_2:
print('model_2 private: sat')
else:
print('model_2 private: unsat')
priv_model_3 = rec.check_solution(priv_solver_3)
if priv_model_3:
print('model_3 private: sat')
else:
print('model_3 private: unsat')
# -
# In each case, Z3 cannot find a satisfying assignment (a database that is consistent with the released statistics).
# ## References
#
# Dinur, Irit, and <NAME>. "Revealing information while preserving privacy." In Proceedings of the twenty-second ACM SIGMOD-SIGACT-SIGART symposium on Principles of database systems, pp. 202-210. 2003.
#
# Garfinkel, Simson, <NAME>, and <NAME>. "Understanding database reconstruction attacks on public data." Queue 16, no. 5 (2018): 28-53.
| attacks/reconstruction/reconstruction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Задание 1
# Решите уравнение
# sin(x)/x=0.
from scipy.optimize import fsolve
import numpy as np
from matplotlib import pyplot as plt
# %matplotlib inline
def func(x):
return (np.sin(x)/x)
x = np.linspace(0.01, 50, 1000)
plt.grid()
plt.plot(x, func(x))
plt.scatter(fsolve(func, 0.01), [0])
plt.show()
# Ответ: функция затухающая, предел стремится к нулю, уравнение имеет бесконечное число решений (по 3 корня на каждые 10х), частное решение - 3.14
# ## Задание 2
# Даны три прямые y=k1*x+b1, y=k2*x+b2, y=k3*x+b3. Как узнать, пересекаются они в одной точке или нет?
# Ответ: чтобы прямые пересекались необходимо чтобы следующая система уравнений имела решение:
# \begin{equation}
# \left\{
# \begin{array}{ll}
# y = k_1x+b_1 \\
# y = k_2x+b_2 \\
# y = k_3x+b_3
# \end{array}
# \right.
# \end{equation}
# ## Задание 3
# На листе тетради «в линейку» (расстояние между линиями равно а) лежит игла (длиной b). Координаты нижней точки иглы (х,у), игла лежит под углом alfa. Пересекает ли игла линию или нет?
#
# Ответ: Необходимо чтобы проекция на ось Y была больше a, т.е. $|b\sin(\alpha)| > a$
# ## Задание 4
# Решите аналитически и потом численно (в программе) уравнение, зависящее от параметра а:
# sin(а*x)=0
# при условии: 0.01<a<0.02, 100<х<500.
# Т.е. надо найти решение х как функцию параметра а - построить график x=x(а).
# Если численным методом не получается найти все ветви решения x(а), то отыщите хотя бы одну.
#
# `¯\_(ツ)_/¯`
# ## Задание 4
# #### Найти угол между прямыми 4y - 3x + 12 = 0 и 7y + x - 14 = 0
# $y = \frac{3}{4}x - 3$ и $y = -\frac{1}{7}x + 2$
np.tan(3/4)
np.tan(-1/7)
# угол первой прямой примерно 43 градуса, второй - примерно 94 градуса
# Ответ: угол между прямыми получается 94 - 43 = 51 градус
# #### Найти угол между прямыми $x = \sqrt{2}$ и $x = -\sqrt{3}$
# Ответ: обе прямые вертикальны и параллельны между собой. Угол между параллельными прямыми равен 0
# ## Задание 5. Выяснить тип кривых второго порядка, порожденных следующими уравнениями
# 1. $y^2 - 2x - 2y - 5 =0$
# $y^2 - 2y + 1 - 1 - 2x - 5 =0$
# $$(y-1)^2 = 2x + 6$$
# Ответ: парабола
# 2. $3x^2 + 5y^2 + 12x - 30y + 42 = 0$
# $3(x^2 + 4x + 4 - 4) + 5(y^2 -6y + 9 - 9) + 42 = 0$
# $3(x+2)^2 + 5(y-3)^2 = 15$
# $$\frac{(x+2)^2}{5} + \frac{(y-3)^2}{3} = 1$$
# Ответ: эллипс
# 3. $2x^2 - y^2 + 6y - 7 = 0$
# $-(y^2 -6y + 9 - 9) + 2x^2 - 7 = 0$
# $-(y-3)^2 + 9 + 2x^2 -7=0$
# $-(y-3)^2 + 2x^2 = -2$
# $$\frac{(y-3)^2}{2} - x^2 = 1$$
# Ответ: гипербола
# 4. $2x^2 - 3y^2 - 28x - 42y - 55 = 0$
# $2(x^2 - 14x + 49 - 49) - 3(y^2 + 14y + 49 - 49) - 55 = 0$
# $2((x-7)^2 - 49) - 3((y+7)^2 - 49) - 55 = 0$
# $2(x-7)^2 - 3(y+7)^2 = 6$
# $$\frac{(x-7)^2}{3} - \frac{(y + 7)^2}{2} = 1$$
# Ответ: гипербола
| homework04.ipynb |