code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### MIT License (c) 2018 by <NAME>
# #### Jupyter notebook written in Python 3. It illustrates the simulation of multivariate normal samples with a given covariance matrix, and also the construction of univariate histograms.
from sympy import init_printing
init_printing()
import json
import numpy as np
from numpy import *
# Read the file that contains the covariance matrix (adjust the path in the next line accordingly).
file = open('Cov.json','r')
Cov=array(json.load(file))
Cov
# First check if this is indeed a covariance matrix:
Cov-Cov.T
# It is indeed selfadjoint. Now check its rank.
rnk=np.linalg.matrix_rank(Cov)
rnk
# You can also find the eigenvalues and the associated eigenvectors.
np.linalg.eig(Cov)
# Or simply compute the determinant.
np.linalg.det(Cov)
# Now find the Cholesky decomposition:
CovU=np.linalg.cholesky(Cov)
CovU
# Test if this is what you want:
dot((CovU),CovU.T)-Cov
# The next function generates a Gaussian vector with independent standard normal (${\cal N}(0,1)$) components (the exact dimension is supplied as an argument).
def gauss_vec(dimen):
vec=[]
for i in range(dimen):
vec+=[np.random.normal(0,1)]
return vec
gauss_vec(3)
gauss_vec(rnk)
# Now create a sample from a Gaussian vector with covariance matrix Cov.
dot((CovU),gauss_vec(rnk))
list(dot((CovU),gauss_vec(rnk)))
# Now we do the same by using the $LDL^\dagger$ decomposition.
D,U=np.linalg.eig(Cov)
CovL=dot(U,diag(sqrt(D)))
# Check if this indeed the intended matrix:
dot((CovL),CovL.T)-Cov
list(dot((Cov),gauss_vec(rnk)))
| Multivariate_Normal_Dist_Examples_Python.ipynb |
# +
# Installing required packages
try:
dbutils.library.installPyPI("koalas")
dbutils.library.installPyPI("dateparser")
dbutils.library.installPyPI("datefinder")
except:
pass
# Importing required libraries
import dateparser, datefinder, databricks.koalas as ks, pandas as pd, numpy as np
from pyspark.sql.types import TimestampType,IntegerType,DoubleType,StringType
from pyspark import SparkConf
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, min, max, sum, stddev_pop, mean, count, countDistinct, variance, when, isnan, isnull
# Initializing Variables
file_name = "dbfs:/FileStore/tables/Sample_Data.csv"
dp = lambda x: dateparser.parse(x)
dfp = lambda x: datefinder.parser.parse(x)
inf = lambda x: int(x)
fl = lambda x: float(x)
str1 = lambda x: str(x)
metrics=[]
# Creating Spark Session
def spark_session():
conf = SparkConf().set('spark.driver.host', '127.0.0.1')
session = SparkSession.builder.config(conf=conf).master("local").appName("cloud-dq-tests").getOrCreate()
return session
# Reading Data
def read_data(session):
dataframe = session.read.csv(file_name,header="true")
return dataframe
# Inferring DataTypes of columns on a random population sample -> Return inferred datatypes for corresponding columns
def infer_types(dataframe):
date_cols=[]
dataframe = dataframe.sample(False,0.1)
# Using Koalas for inferring datatypes
ks_dataframe = ks.DataFrame(dataframe)
for i in list(ks_dataframe.columns):
# Checking if column has integer values
try:
ks_dataframe[i].apply(inf)
date_cols.append("Integer")
continue
except:
# Checking if column has double or floating point values
try:
ks_dataframe[i].apply(fl)
date_cols.append("Double")
continue
except:
pass
try:
# Checking if column has date or timestamp values
try:
# Parsing datetime values using dateparser package
ks_dataframe[i]=ks_dataframe[i].apply(dp)
except:
# Parsing datetime values using datefinder package
ks_dataframe[i]=ks_dataframe[i].apply(dfp)
date_cols.append("Date")
continue
# By Default: Column has string values
except:
date_cols.append("String")
continue
return date_cols
# Converting Datatypes of columns to inferred datatypes -> Returns new dataframe with inferred datatypes
def convert_datatypes(dataframe,date_cols):
for i,column in enumerate(dataframe.columns):
# If inferred data type is string, convert column to String type
if(date_cols[i]=="String"):
dataframe = dataframe.withColumn(column, col(column).cast(StringType()))
# If inferred data type is Integer, convert column to Integer type
elif(date_cols[i]=="Integer"):
dataframe = dataframe.withColumn(column, col(column).cast(IntegerType()))
# If inferred data type is Double, convert column to Double type
elif(date_cols[i]=="Double"):
dataframe = dataframe.withColumn(column, col(column).cast(DoubleType()))
# If inferred data type is Datetime/Timestamp, convert column to Datetime/Timestamp type
else:
dataframe = dataframe.withColumn(column, col(column).cast(TimestampType()))
return dataframe
# Returns of sum of Numeric Column
def sum_num(dataframe,column):
return dataframe.select(sum(column)).first()[0]
# Returns of mean of Numeric Column
def mean_num(dataframe,column):
return dataframe.select(mean(column)).first()[0]
# Returns of minimum date of Timestamp Column
def minimum_date(dataframe, column):
return dataframe.select(min(column)).first()[0].isoformat()
# Returns of minimum length of String Column
def minimum_string(dataframe,column):
return len(dataframe.select(min(column)).first()[0])
# Returns of minimum value of Numeric Column
def minimum_num(dataframe,column):
return dataframe.select(min(column)).first()[0]
# Returns of maximum value of Numeric Column
def maximum_num(dataframe,column):
return dataframe.select(max(column)).first()[0]
# Returns of Maximum date of Timestamp Column
def maximum_date(dataframe, column):
return dataframe.select(max(column)).first()[0].isoformat()
# Returns of Maximum length of String Column
def maximum_string(dataframe,column):
return len(dataframe.select(max(column)).first()[0])
# Returns total number of records in a column
def count_records(dataframe,column):
return dataframe.select(column).count()
# Returns distinct number of records in a column
def countdistinct(dataframe,column):
return dataframe.select(countDistinct(column)).first()[0]
# Returns number of records having NULL / NAN values in a column
def countnull(dataframe,column):
return dataframe.select(count(when(isnull(column) | isnan(column),column))).first()[0]
# Returns number of records having NULL values in a Datetime column
def countnulldate(dataframe,column):
return dataframe.select(count(when(isnull(column),column))).first()[0]
# Returns of Standard Deviation of Numeric Column
def std_dev(dataframe,column):
return dataframe.select(stddev_pop(column)).first()[0]
# Returns of Variance of Numeric Column
def variance_num(dataframe,column):
return dataframe.select(variance(column)).first()[0]
# Returns of Quantiles (5th, 25th, 50th, 75th, 95th) of Numeric Column
def quantiles(dataframe,column):
return dataframe.approxQuantile(column,[0.05,0.25,0.5,0.75,0.95],0.0)
# Creating Spark Session
session=spark_session()
# Reading data
df=read_data(session)
# Inferring Datatypes of Dataframe's columns
date_cols = infer_types(df)
# Converting Dataframe's columns into inferred datatypes
df = convert_datatypes(df,date_cols)
# Checking datatype of new column
for val in df.dtypes:
# Adding new metrics for every column
metric={}
# If datatype is 'String'
if(val[1]=='string'):
# Computes the required metrics for a string datatype column
record_count=count_records(df,val[0])
distinct_record_count = countdistinct(df,val[0])
record_null_count = countnull(df,val[0])
completeness = round(((record_count - record_null_count)/record_count),3)
min1=minimum_string(df,val[0])
max1=maximum_string(df,val[0])
# Storing computed metrics in a dictionary for that column
metric['Column'] = val[0]
metric['Number_of_Records'] = record_count
metric['Distinct_Number_of_Records'] = distinct_record_count
metric['Number_of_Null_Records'] = record_null_count
metric['Completeness'] = completeness
metric['Minimum_Length'] = min1
metric['Maximum_Length'] = max1
metrics.append(metric)
# If datatype is 'TimeStamp'
elif(val[1]=='timestamp'):
# Computes the required metrics for a TimeStamp datatype column
record_count=count_records(df,val[0])
distinct_record_count = countdistinct(df,val[0])
record_null_count = countnulldate(df,val[0])
completeness = round(((record_count - record_null_count)/record_count), 3)
min1=minimum_date(df,val[0])
max1=maximum_date(df,val[0])
# Storing computed metrics in a dictionary for that column
metric['Column'] = val[0]
metric['Number_of_Records'] = record_count
metric['Distinct_Number_of_Records'] = distinct_record_count
metric['Number_of_Null_Records'] = record_null_count
metric['Completeness'] = completeness
metric['Minimum_Date'] = min1
metric['Maximum_Date'] = max1
metrics.append(metric)
# If datatype is 'Numeric', i.e. , Integer or Double
elif(val[1]=='int' or val[1] == 'double'):
# Computes the required metrics for a Numeric datatype column
record_count=count_records(df,val[0])
distinct_record_count = countdistinct(df,val[0])
record_null_count = countnull(df,val[0])
completeness = round(((record_count - record_null_count)/record_count) ,3)
sum1=round(sum_num(df,val[0]),3)
mean1=round(mean_num(df,val[0]),3)
min1=round(minimum_num(df,val[0]),3)
max1=round(maximum_num(df,val[0]),3)
range1 = round(max1 - min1 , 3)
stddev1=round(std_dev(df,val[0]),3)
variance1=round(variance_num(df,val[0]),3)
percentile_5, percentile_25, percentile_50, percentile_75, percentile_95 = [round(j,3) for j in quantiles(df,val[0])]
# Storing computed metrics in a dictionary for that column
metric['Column'] = val[0]
metric['Number_of_Records'] = record_count
metric['Distinct_Number_of_Records'] = distinct_record_count
metric['Number_of_Null_Records'] = record_null_count
metric['Completeness'] = completeness
metric['Sum'] = sum1
metric['Mean'] = mean1
metric['Minimum'] = min1
metric['Maximum'] = max1
metric['Range'] = range1
metric['Standard_Deviation'] = stddev1
metric['Variance'] = variance1
metric['5th_Quartile'] = percentile_5
metric['25th_Quartile'] = percentile_25
metric['50th_Quartile'] = percentile_50
metric['75th_Quartile'] = percentile_75
metric['95th_Quartile'] = percentile_95
metric['95th_Quartile'] = percentile_95
metrics.append(metric)
# Printing Computed Data Profiling Metrics
print("\nMetrics: \n")
for i in metrics:
print(i)
print("\n")
# -
| Profiling_Prod.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %run ../Python_files/util_data_storage_and_load.py
# %run ../Python_files/load_dicts.py
# %run ../Python_files/util.py
import numpy as np
from numpy.linalg import inv
# +
# load link flow data
import json
with open('../temp_files/link_day_minute_Jul_dict_JSON_adjusted.json', 'r') as json_file:
link_day_minute_Jul_dict_JSON = json.load(json_file)
# +
# week_day_Jul_list = [2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 16, 17, 18, 19, 20, 23, 24, 25, 26, 27, 30, 31]
# testing set 1
week_day_Jul_list_1 = [20, 23, 24, 25, 26, 27, 30, 31]
# testing set 2
week_day_Jul_list_2 = [11, 12, 13, 16, 17, 18, 19]
# testing set 3
week_day_Jul_list_3 = [2, 3, 4, 5, 6, 9, 10]
# +
link_flow_testing_set_Jul_PM_1 = []
for link_idx in range(24):
for day in week_day_Jul_list_1:
key = 'link_' + str(link_idx) + '_' + str(day)
link_flow_testing_set_Jul_PM_1.append(link_day_minute_Jul_dict_JSON[key] ['PM_flow'])
link_flow_testing_set_Jul_PM_2 = []
for link_idx in range(24):
for day in week_day_Jul_list_2:
key = 'link_' + str(link_idx) + '_' + str(day)
link_flow_testing_set_Jul_PM_2.append(link_day_minute_Jul_dict_JSON[key] ['PM_flow'])
link_flow_testing_set_Jul_PM_3 = []
for link_idx in range(24):
for day in week_day_Jul_list_3:
key = 'link_' + str(link_idx) + '_' + str(day)
link_flow_testing_set_Jul_PM_3.append(link_day_minute_Jul_dict_JSON[key] ['PM_flow'])
# -
len(link_flow_testing_set_Jul_PM_1)
# +
testing_set_1 = np.matrix(link_flow_testing_set_Jul_PM_1)
testing_set_1 = np.matrix.reshape(testing_set_1, 24, 8)
testing_set_1 = np.nan_to_num(testing_set_1)
y = np.array(np.transpose(testing_set_1))
y = y[np.all(y != 0, axis=1)]
testing_set_1 = np.transpose(y)
testing_set_1 = np.matrix(testing_set_1)
testing_set_2 = np.matrix(link_flow_testing_set_Jul_PM_2)
testing_set_2 = np.matrix.reshape(testing_set_2, 24, 7)
testing_set_2 = np.nan_to_num(testing_set_2)
y = np.array(np.transpose(testing_set_2))
y = y[np.all(y != 0, axis=1)]
testing_set_2 = np.transpose(y)
testing_set_2 = np.matrix(testing_set_2)
testing_set_3 = np.matrix(link_flow_testing_set_Jul_PM_3)
testing_set_3 = np.matrix.reshape(testing_set_3, 24, 7)
testing_set_3 = np.nan_to_num(testing_set_3)
y = np.array(np.transpose(testing_set_3))
y = y[np.all(y != 0, axis=1)]
testing_set_3 = np.transpose(y)
testing_set_3 = np.matrix(testing_set_3)
# -
np.size(testing_set_1, 1), np.size(testing_set_3, 0)
testing_set_3[:,:1]
# +
# write testing sets to file
zdump([testing_set_1, testing_set_2, testing_set_3], '../temp_files/testing_sets_Jul_PM.pkz')
# -
| 05_1_cross_validation_uni_class_cdc16/create_testing_sets_Jul_PM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/MHaley206265/DS-Unit-2-Applied-Modeling/blob/master/module2-wrangle-ml-datasets/Matt_Haley_DSPT6_232_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="YE9WLGDh2SDQ" colab_type="code" outputId="39e61ff2-e62d-499f-ccc6-31b6baeb81ab" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# !pip install category_encoders==2.*
# !pip install pandas-profiling==2.*
# + id="HRiuwt3_vty0" colab_type="code" colab={}
# imports
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline
import category_encoders as ce
from sklearn.impute import SimpleImputer
import matplotlib.pyplot as plt
# + id="cISGfYijv3B4" colab_type="code" outputId="b19f1a17-3e98-43d0-cad6-66f0195566e2" colab={"base_uri": "https://localhost:8080/", "height": 238}
stars = pd.read_csv('pulsar_stars.csv')
stars.head()
# + id="vJ9SNhkKwF1U" colab_type="code" outputId="b4b41896-096e-462f-d172-349308d03015" colab={"base_uri": "https://localhost:8080/", "height": 34}
stars.shape
# + id="5Mkvz222zKFw" colab_type="code" outputId="a8e5f3e7-d6ea-409b-84e6-a9580caa19dd" colab={"base_uri": "https://localhost:8080/", "height": 68}
stars['target_class'].value_counts(normalize=True)
# + id="TM0vgX6P0RjE" colab_type="code" outputId="296a4876-5558-4a43-d75d-34c7ebd11811" colab={"base_uri": "https://localhost:8080/", "height": 34}
train, test = train_test_split(stars, test_size=.2, random_state=42, stratify=stars['target_class'])
train.shape, test.shape
# + id="uoatrvPX1qo8" colab_type="code" colab={}
train, val = train_test_split(train, test_size=.2, random_state=42, stratify=train['target_class'])
# + id="XQJVv2R215o4" colab_type="code" outputId="3725e8b8-b7aa-42f4-dcde-eb620911a055" colab={"base_uri": "https://localhost:8080/", "height": 34}
train.shape, val.shape, test.shape
# + id="ZC_vGkyr3Ag4" colab_type="code" colab={}
target = 'target_class'
features = list(stars.drop(columns=target).columns)
# + id="4FdH7ST23V2v" colab_type="code" colab={}
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
y_test = test[target]
# + id="CWGMI_4yzYdG" colab_type="code" colab={}
# from pandas_profiling import ProfileReport
# profile = ProfileReport(train, minimal=True).to_notebook_iframe()
# profile
# + id="BFvuTbYl2jjm" colab_type="code" colab={}
pipe = make_pipeline(
ce.ordinal.OrdinalEncoder(),
SimpleImputer(),
RandomForestClassifier(random_state=42, n_jobs=-1)
)
# + id="8Qx3WxT429Um" colab_type="code" outputId="af10283a-64e5-49f7-9f34-69589ad7ff93" colab={"base_uri": "https://localhost:8080/", "height": 391}
pipe.fit(X_train, y_train)
# + id="5sHgBJf-3n_R" colab_type="code" outputId="5093c91a-5141-48cf-8a51-d5278c25467f" colab={"base_uri": "https://localhost:8080/", "height": 34}
pipe.score(X_val, y_val)
# + id="bGnSOQDO5Pli" colab_type="code" colab={}
y_pred_proba = pipe.predict_proba(X_val)[:, -1]
# + id="zTPol0cM4yAC" colab_type="code" colab={}
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(y_val, y_pred_proba)
# + id="R3RTFZpr48Te" colab_type="code" outputId="fd792e81-6e3d-4b33-b6d3-3372c3bc7209" colab={"base_uri": "https://localhost:8080/", "height": 295}
plt.scatter(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate');
# + id="EEgTdaf30Tiq" colab_type="code" colab={}
from sklearn.linear_model import LinearRegression
lin_pipe = make_pipeline(
ce.ordinal.OrdinalEncoder(),
SimpleImputer(),
LinearRegression()
)
# + id="0WQPNotB1afQ" colab_type="code" outputId="a8955467-86b4-4e6d-e07e-8e0ef187f45c" colab={"base_uri": "https://localhost:8080/", "height": 238}
lin_pipe.fit(X_train, y_train)
# + id="WcT42yRU1tzM" colab_type="code" outputId="dae093a4-eb81-47d3-98cf-d5d5709ee348" colab={"base_uri": "https://localhost:8080/", "height": 34}
lin_pipe.score(X_val, y_val)
| module2-wrangle-ml-datasets/Matt_Haley_DSPT6_232_assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # VacationPy
# ----
#
# #### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# +
## -------------------------
## HEADER COMMENTS
## Python-API-Challenge-VacationPy-Homework
## Date Due April 18, 2020
## Author: <NAME>
## FILE NAME: VacationPy.ipynb
## VERSION: VacationPy_robgauer_vfinal.ipynb # Final code is a duplicate copy of 'VacationPy.ipynb'
## -------------------------
# Read 'Preview README.md'
#
## DESCRIPTION
# Part II - VacationPy
# Now let's use your skills in working with weather data to plan future vacations.
# Use jupyter-gmaps and the Google Places API for this part of the assignment.
# Note: if you having trouble displaying the maps try running jupyter nbextension
# enable --py gmaps in your environment and retry.
#
## Your final notebook must:
## -------------------------
# Create a heat map that displays the humidity for every city from the part I of the homework.
# Narrow down the DataFrame to find your ideal weather condition. For example:
# A max temperature lower than 80 degrees but higher than 70.
# Wind speed less than 10 mph.
# Zero cloudiness.
# Drop any rows that don't contain all three conditions. You want to be sure the weather is ideal.
# Using Google Places API to find the first hotel for each city located within 5000 meters of your coordinates.
# Plot the hotels on top of the humidity heatmap with each pin containing the Hotel Name, City, and Country.
# Create a new GitHub repository for this project called API-Challenge.
# You must complete your analysis using a Jupyter notebook.
#
## -------------------------
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
import json
# Output Files (CSV)
output_data_file_hotels = "output_data/hotels_name_list.csv"
# Initialization and define URL to obtain data query end-points
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
# -
# Import API key
from api_keys import g_key
g_key
# ### Store Part I results into DataFrame
# * Load the csv exported in Part I to a DataFrame
# Load the csv exported file from part 1 'WeatherPy' to a DataFrame
cities_csv_file="output_data/cities-cleaned.csv"
cities_cleaned_df=pd.read_csv(cities_csv_file)
cities_cleaned_df.head()
# DataFrame load creates column 'Unnamed: 0'. Remove the column with df.drop
cities_cleaned_df.drop('Unnamed: 0',axis=1, inplace=True)
# DataFrame prepared for processing...
cities_cleaned_df.head()
# ### Humidity Heatmap
# * Configure gmaps.
# * Use the Lat and Lng as locations and Humidity as the weight.
# * Add Heatmap layer to map.
# +
# Create a heat map that displays the humidity for every city from the part I of the homework.
gmaps.configure(api_key=g_key)
#fig = gmaps.figure(layout=figure_layout)
humidity=cities_cleaned_df["Humidity"].astype(float)
locations=cities_cleaned_df[["Latitude","Longitude"]].astype(float)
fig = gmaps.figure()
# Add marker layer on top of heat map
figure_layout = {
'width': '960px',
'height': '600px',
'border': '1px solid black',
'padding': '1px',
'margin': '0 auto 0 auto'
}
fig = gmaps.figure(layout=figure_layout,zoom_level=2,center=(15,25))
# Create heat layer
heat_layer=gmaps.heatmap_layer(locations,
weights=humidity,dissipating=False,max_intensity=max(cities_cleaned_df["Humidity"]),
point_radius=2)
# Add layers
fig.add_layer(heat_layer)
# Display figure
fig
# -
# Save Heatmap to file
plt.savefig("output_data/heatmap-humidity-cities-cleaned-df.png")
# ### Create new DataFrame fitting weather criteria
# * Narrow down the cities to fit weather conditions.
# * Drop any rows will null values.
# +
## DATAFRAME WEATHER CRITERIA
# A max temperature lower than 80 degrees but higher than 70.
# Wind speed less than 10 mph.
# Zero cloudiness.
# Drop any rows that don't contain all three conditions. You want to be sure the weather is ideal.
# Create new datafrome for the identification and removal DataFrame Weather Criteria above...
reduced_cities_df=cities_cleaned_df
reduced_cities_df.count()
# -
## PERFORM DATAFRAME CRITERIA CALCULATIONS
# A max temperature lower than 80 degrees but higher than 70.
reduced_cities_df = reduced_cities_df.loc[(reduced_cities_df["Maximum Temperature"] < 80) & (reduced_cities_df["Maximum Temperature"] > 70)]
# Wind speed less than 10 mph.
reduced_cities_df = reduced_cities_df.loc[reduced_cities_df["Wind Speed"] < 10]
# Zero cloudiness.
reduced_cities_df = reduced_cities_df.loc[reduced_cities_df["Cloudiness"] == 0]
# Drop any rows with null values
reduced_cities_df = reduced_cities_df.dropna()
reduced_cities_df
# Determine quantity of cities that meet DataFrame Weather Criteria
reduced_cities_df.count()
# ### Hotel Map
# * Store into variable named `hotel_df`.
# * Add a "Hotel Name" column to the DataFrame.
# * Set parameters to search for hotels with 5000 meters.
# * Hit the Google Places API for each city's coordinates.
# * Store the first Hotel result into the DataFrame.
# * Plot markers on top of the heatmap.
# Determine quantity of cities that meet DataFrame Weather Criteria
reduced_cities_df.describe()
# Create new datafrome for the identification and removal DataFrame Weather Criteria above...
hotel_cities_df=reduced_cities_df
hotel_cities_df
# PARAMS Dictionary updates each iteration
params = {
"radius": 5000,
"types": "lodging",
"key": g_key
}
for index, city in hotel_cities_df.iterrows():
# get lat, lng from df
lat = city["Latitude"]
lng = city["Longitude"]
# Change location for each iteration while leaving original params in place
params["location"] = f"{lat},{lng}"
# Create url and make API request
print(f"Retrieving Results for Index {index}: {city['Weather City']}.")
response = requests.get(base_url, params=params).json()
# Extract results
results = response['results']
try:
print(f"Closest hotel is {results[0]['name']}.")
hotel_cities_df.loc[index, 'Hotel Name'] = results[0]['name']
except (KeyError, IndexError):
print("EXCEPTION: City name is not found. Skip to next city...")
print("-------------------------------------------------------")
# Display output of hotel_cities_df with added Hotel Name column and data
hotel_cities_df
# Export the 'Hotes_Cities' DataFrame output to a csv
hotel_cities_df.to_csv(output_data_file_hotels)
# +
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>Weather City</dt><dd>{Weather City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**city) for index, city in hotel_cities_df.iterrows()]
locations = hotel_cities_df[["Latitude", "Longitude"]]
# +
# Add marker layer on top of heat map
figure_layout = {
'width': '960px',
'height': '600px',
'border': '1px solid black',
'padding': '1px',
'margin': '0 auto 0 auto'
}
fig = gmaps.figure(layout=figure_layout,zoom_level=2,center=(15,25))
# Create hotel symbol layer
hotel_layer = gmaps.marker_layer(
locations,info_box_content=[info_box_template.format(**city) for index, city in hotel_cities_df.iterrows()]
)
# Add layers
fig.add_layer(heat_layer)
fig.add_layer(hotel_layer)
# Display figure
fig
# -
# Save Heatmap to file
plt.savefig("output_data/heatmap-hotel-cities-df.png")
# +
## ---EOF--- ##
| WeatherPy/.ipynb_checkpoints/VacationPy-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import syft as sy
import torch as th
import numpy as np
sy.create_sandbox(globals(), False, False)
# -
from syft.frameworks.torch.tensors.interpreters.numpy import NumpyTensor
x = NumpyTensor(numpy_tensor=np.array([[1,2,3,4]])).wrap()
y = x.dot(x.transpose())
assert (y.child.child == np.array([[30]])).all()
| examples/experimental/NumpyTensor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
incidents = pd.read_csv("./data/Canadian Railway Crossing Incidents.csv")
incidents.set_index(['Region','EventType'],inplace=True)
incidents.sort_index(inplace=True)
incidents.head(10)
incidents.reset_index(inplace=True)
incidents.set_index(["EventType","Region"],inplace=True)
incidents.head()
incidents.sort_index(inplace=True)
incidents.head()
incidents.swaplevel().head()
incidents.swaplevel().sort_index().head()
incidents = incidents.swaplevel().sort_index()
incidents.head()
| Multiindex_tabele_przestawne/Swaplevel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## How to make a 3D web visualisation without a single line of code
#
# In this notebook we use QGIS to create a shareable terrain model with a data overlay, which can be shared on a web server, without typing a single line of code. Let's go!
#
# To inetract with the maps we make below, check this notebook out in the ipython notebook viewer:
#
# http://nbviewer.jupyter.org/github/adamsteer/nci-notebooks/blob/master/How%20to%20make%203D%20visualisations%20from%20NCI%20data%20without%20any%20coding%20%28nearly%29.ipynb
# ### 1. Decide on some datasets
#
# Let's use Canberra. We need:
# * a terrain model
# * some data - what about... false colour LandSAT imagery? or NVDI? or how about vegetation types?
# * some more data - how about OpenStreetMap buildings?
# ### 2. Get the topography data and import into QGIS
#
# In this approach we download all our data from NCI to our machine (or could use it in the VDI), because we need to modify bands - which QGIS is not happy to do for web services (WMS or WCS).
#
# For topography we need a terrain model in a raster format, e.g. GeoTIFF, which covers Canberra. We can head to the NCI Elevation collection here:
#
# http://dapds00.nci.org.au/thredds/catalog/rr1/Elevation/catalog.html
#
# ...and look for SRTM 1 second elevation - good enough for this job. If you are happy with ESRI grids, navigate to the tile collection here:
#
# http://dapds00.nci.org.au/thredds/catalog/rr1/Elevation/1secSRTM_DEMs_v1.0/DEM/Tiles/catalog.html
#
# Data are organised in folders named by longitide and latitude of the south west (bottom left) corner. For Canberra we need 149, -36.
#
# ...and for now, this is probably the best method. We could make an OpenDAP or WCS request for a subset, but that would be coding! The pull of the dark side is strong - so here is a link that gets a GeoTIFF from the SRTM tile, using WCS:
#
# http://dapds00.nci.org.au/thredds/wcs/rr1/Elevation/NetCDF/1secSRTM_DEMs_v1.0/DEM/Elevation_1secSRTM_DEMs_v1.0_DEM_Tiles_e149s36dem1_0.nc?service=WCS&version=1.0.0&request=GetCoverage&Coverage=elevation&bbox=149.0,-36,149.9,-35&format=GeoTIFF
#
# Now, import the resulting GeoTIFF into QGIS as a raster data source:
#
# 
# ### 3. Install a QGIS plugin and use it.
#
# This is seriously not coding - just head to the "plugins" menu, click "manage and install plugins", and find the Qgis2threejs plugin. Install it:
#
# 
# ### 4. Set up your first interactive map
#
# Now zoom into the DEM so that it covers the entire display window, then head to the 'web' menu. Choose 'qgis2threejs'. In the resulting dialog box, click 'world' in the left pane, and find 'vertical exaggeration'. Set it to 10.
#
# In 'DEM', the one active layer (your GeoTIFF) should be preselected. Click 'run', and you should get a web browser opening with a 3D model inside!
#
# The expected output is shown below - try viewing the notebook here:
#
# http://nbviewer.jupyter.org/github/adamsteer/nci-notebooks/blob/master/How%20to%20make%203D%20visualisations%20from%20NCI%20data%20without%20any%20coding%20%28nearly%29.ipynb
#
# ...you should be able to move the map, zoom in and out, and generally inspect a terrain model.
###ignore this block of code - it is required only to show the map in iPython - you won't need it!
from IPython.core.display import display, HTML
display(HTML('<iframe width="800" height="600" frameborder="1" scrolling ="no" src="./qgis2threejs/ACT_elevs_test_1.html"></iframe>'))
# #### and there - your first 3D interactive map, made with no coding and using web data services!
#
# ...but it isn't very useful or informative. How can we fix that?
# ## 5. Add some more data
#
# We might be interested in land cover attributes for our region, so let's get them! How about photosynthetic vegetation for 2015?
#
# http://dapds00.nci.org.au/thredds/catalog/ub8/au/FractCov/PV/catalog.html?dataset=ub8-au/FractCov/PV/FractCover.V3_0_1.2015.aust.005.PV.nc
#
# Using WCS again - click on the WCS link, and look for a <name> tag - it says 'PV'. This is the coverage we need to get. So we form a WCS request like this:
#
# * the dataset: http://dapds00.nci.org.au/thredds/wcs/ub8/au/FractCov/PV/FractCover.V3_0_1.2015.aust.005.PV.nc
# * the service: service=WCS
# * the service version: version=1.0.0
# * the thing we want to do (get a coverage): request=GetCoverage
# * the coverage (or layer) we want to get: Coverage=PV
# * the boundary of the layer we want: bbox=149.0,-36,149.9,-35
# * the format we want to get our coverage as: format=GeoTIFF
#
# ...so we put a question mark after the dataset name, then add the rest of the labels describing the thing we want afterward, in any order, separated by ampersands:
#
# http://dapds00.nci.org.au/thredds/wcs/ub8/au/FractCov/PV/FractCover.V3_0_1.2015.aust.005.PV.nc?service=WCS&version=1.0.0&request=GetCoverage&Coverage=PV&bbox=149.0,-36,149.9,-35&format=GeoTIFF
#
# (woah. There was a glitch in the matrix - if we didn't write out that URL, you would have had to code just now).
#
# Add the resulting GeoTIFF to QGIS as a raster data source, just like the DEM! Once you have a style you're happy with, soom to the desired extent and use the qgis2threejs plugin to make a new map.
# #### ACT DEM coloured by green vegetation content
# Click on the map to inspect features, click and drag to move, scroll to zoom.
display(HTML('<iframe width="800" height="600" frameborder="1" scrolling ="no" src="./qgis2threejs/act_elevs_plus_greenveg.html"></iframe>'))
# Now we have an elevation map coloured by green vegetation! But that's only a pretty picture.
# ### 6. Now lets do some analysis!
#
# We will try to show:
#
# * Standard deviaton of elevation of blocks as a proxy for hilliness, plotted as a volume on the elevation map
# * Sum of fractional PV cover (maybe bare ground, or tree cover?) for each block, also as a volume on the map
# * Interactive layer selection using three.js maps
#
# ...and no code, Only button clicking.
#
# Back to QGIS. We need another plugin - zonal statistics! We know how to install plugins, so get to it. When ready, we'll make a hilliness proxy first.
#
# Next we will make a vegetation cover proxy
#
# ..and finally, can we come up with a metric of vegetation cover as a function of block hilliness? A question here might be 'do land owners tend to clear flatter blocks more than hilly blocks?'. Can we answer it using web services data, QGIS, some clicks - and then make a very pretty, and interactive map?
#
# On to the first question - how do we get ACT block data? Head to ACTMAPi and find ACT blocks. Here's a shortcut:
#
# http://actmapi.actgov.opendata.arcgis.com/datasets/afa1d909a0ae427cb9c1963e0d2e80ca_4
#
# Find the 'API' menu box, click the down arrow and then copy the URL inside the JSON option:
#
# http://actmapi.actgov.opendata.arcgis.com/datasets/afa1d909a0ae427cb9c1963e0d2e80ca_4.geojson
#
# In QGIS, add a new vector layer. Choose 'service' from the options, and paste the URL into the appropriate box:
#
#
#
# Wait a while! Now because QGIS you'll need to save the result as a shapefile before we can do anything with it. Save the layer, close the GeoJSON layer and reopen your new ACT blocks shapefile.
#
# ...it's probably simpler to just download and open the shapefile - but now we know how to add vector layers from a web service> either way, the result is something like this:
#
# We don't need to make the blocks pretty yet, We'll do something!
# ### Block hilliness
#
# Using the SRTM DEM, let's make a proxy of block hilliness. Check that you have the QGIS zonal statistics plugin (Raster -> zonal statistics). If not, install it the same way you installed qgis2three.js. Once you have it, open the plugin. Choose the DEM as the raster layer, and use band 1. Then choose your ACT blocks vector layer. In the statistics to calculate, pick an appropriate set - but include *standard deviation* - this is our roughness proxy.
#
# Run the plugin, then open the properties box of the ACT blocks layer and colour your blocks by standard deviation. The ZOnal statistics plugin has looped through all the polygons in the blocks layer and computed descriptive statistics of the underlying DEM. And there you have it - ACT blocks coloured by the standard deviation of the elevation they contain, as a proxy for hilliness.
#
# Now lets make a cool map! zoom in so that you have a region you're happy with occupying your map view, and open the qgis2threejs plugin.
#
# In the left pane, under polygon option choose your ACT blocks layer. Options for styling it appear on the right - which I'll recreate again because I crashed QGIS again at a bad time (Save, fella, save!)
#
#
#
# #### Result - 3D block hilliness visualised as height of a block polygon
#
# What we see here are cadastral blocks, with our 'hilliness proxy' displayed by colour and extruded column height. Darker blue, and taller columns are hillier! Click on the map to inspect features, click and drag to move, scroll to zoom.
display(HTML('<iframe width="800" height="600" frameborder="1" scrolling ="no" src="./qgis2threejs/act_block_hilliness_proxy.html"></iframe>'))
# ## Now, let's add some complexity
#
# We have some elevation data, we have some cadastral data, we have some data about photosynthetic vegetation cover. We can do our quick visualisation of whether block hilliness (as determined by SRTM height standard deviation for each block) is related to photosynthetic vegetation cover (as determined by the median of vegetation cover inside each block). We can set this up as follows:
#
# * classify and colour blocks by vegetation cover
# * visualise block hilliness as the height of an extruded column
#
# In this scheme, if our hypothesis is that hillier blocks are less cleared, dark green blocks will visualise as taller columns. Lets test it out!
display(HTML('<iframe width="800" height="600" frameborder="1" scrolling ="no" src="./qgis2threejs/veg_mean_colours.html"></iframe>'))
# ### So what do our results mean?
#
# We have a lot of short, dark green blocks! What does this mean?
#
# Let's take a google maps image of our site:
| qgis/How to make 3D visualisations from NCI data without any coding (nearly).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook was prepared by [<NAME>](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# # Challenge Notebook
# ## Problem: Given two strings, find the longest common subsequence.
#
# * [Constraints](#Constraints)
# * [Test Cases](#Test-Cases)
# * [Algorithm](#Algorithm)
# * [Code](#Code)
# * [Unit Test](#Unit-Test)
# * [Solution Notebook](#Solution-Notebook)
# ## Constraints
#
# * Can we assume the inputs are valid?
# * No
# * Can we assume the strings are ASCII?
# * Yes
# * Is this case sensitive?
# * Yes
# * Is a subsequence a non-contiguous block of chars?
# * Yes
# * Do we expect a string as a result?
# * Yes
# * Can we assume this fits memory?
# * Yes
# ## Test Cases
#
# * str0 or str1 is None -> Exception
# * str0 or str1 equals 0 -> ''
# * General case
#
# str0 = 'ABCDEFGHIJ'
# str1 = 'FOOBCDBCDE'
#
# result: 'BCDE'
# ## Algorithm
#
# Refer to the [Solution Notebook](). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
# ## Code
class StringCompare(object):
def longest_common_subseq(self, str0, str1):
# TODO: Implement me
pass
# ## Unit Test
# **The following unit test is expected to fail until you solve the challenge.**
# +
# # %load test_longest_common_subseq.py
from nose.tools import assert_equal, assert_raises
class TestLongestCommonSubseq(object):
def test_longest_common_subseq(self):
str_comp = StringCompare()
assert_raises(TypeError, str_comp.longest_common_subseq, None, None)
assert_equal(str_comp.longest_common_subseq('', ''), '')
str0 = 'ABCDEFGHIJ'
str1 = 'FOOBCDBCDE'
expected = 'BCDE'
assert_equal(str_comp.longest_common_subseq(str0, str1), expected)
print('Success: test_longest_common_subseq')
def main():
test = TestLongestCommonSubseq()
test.test_longest_common_subseq()
if __name__ == '__main__':
main()
# -
# ## Solution Notebook
#
# Review the [Solution Notebook]() for a discussion on algorithms and code solutions.
| recursion_dynamic/longest_common_subsequence/longest_common_subseq_challenge.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.1
# language: julia
# name: julia-1.5
# ---
# +
# INPUT:
# nIter = Number of iterations
# covs = d × d × n array containing covariance matrices
# sqrt_covs = d × d × n array containing square roots of matrices in covs
# objective = length nIter vector to save barycenter objective values in
# times = length nIter vector to save timings in
# η = step size
# distances = length nIter vector to save (W₂)² distance to (sqrt_best)² over training
# sqrt_best = square root of a d × d matrix that we calculate distances to throughout training (ideally taken to be the true barycenter)
# OUTPUT:
# square root of d × d matrix that achieves best barycenter functional throughout training
function GD!(nIter, covs, sqrt_covs, objective, times, η, distances, sqrt_best)
start = time()
d = size(covs)[1]
n = size(covs)[3]
X = zeros(d, d)
X .= covs[:,:,1]
# Cache variables for memory efficiency. T refers to transport map
T = zeros(d,d)
evals = zeros(d)
evecs = zeros(d,d)
bestval = Inf
candidate_best = zeros(d,d)
for i in 1:nIter
T .= zeros(d,d)
for j in 1:n
sq = @view sqrt_covs[:,:,j]
e = eigen(Symmetric(sq*X*sq))
evals .= e.values
evecs .= e.vectors
objective[i] += tr(covs[:,:,j].-2*evecs*diagm(evals.^(0.5))*evecs')
T .= T .+ sq*evecs*diagm(evals.^(-0.5))*evecs'*sq
end
objective[i] = objective[i]/n + tr(X)
if objective[i] < bestval
candidate_best .= X
bestval = objective[i]
end
T .= T./n
distances[i] = bures(sqrt_best, X)
X .= Symmetric(((1 - η).*I(d) .+ η.*T)*X*((1-η).*I(d) + η.*T))
times[i] = time()-start
end
return candidate_best^.5
end
# INPUT:
# nIter = Number of iterations
# covs = d × d × n array containing covariance matrices
# sqrt_covs = d × d × n array containing square roots of matrices in covs
# objective = length nIter vector to save barycenter objective values in
# times = length nIter vector to save timings in
# η = step size
# α = lower eigenvalue to threshold at (should be ∼ average minimum eigenvalue of covs)
# β = upper eigenvalue to threshold at (should be ∼ average maximum eigenvalue of covs)
# distances = length nIter vector to save (W₂)² distance to (sqrt_best)² over training
# sqrt_best = square root of a d × d matrix that we calculate distances to throughout training (ideally taken to be the true barycenter)
function EGD!(nIter, covs, sqrt_covs, objective, times, η, α, β, distances, sqrt_best)
start = time()
d = size(covs)[1]
n = size(covs)[3]
X = zeros(d, d)
X .= covs[:,:,1]
# Cache variables for memory efficiency. T refers to transport map
T = zeros(d,d)
evals = zeros(d)
evecs = zeros(d,d)
for i in 1:nIter
T .= zeros(d,d)
for j in 1:n
sq = @view sqrt_covs[:,:,j]
e = eigen(Symmetric(sq*X*sq))
evals .= e.values
evecs .= e.vectors
objective[i] += tr(covs[:,:,j].-2*evecs*diagm(evals.^(0.5))*evecs')
T .= T .+ sq*evecs*diagm(evals.^(-0.5))*evecs'*sq
end
objective[i] = objective[i]/n + tr(X)
times[i] = time()-start
distances[i] = bures(sqrt_best, X)
T .= T./n
X .= Symmetric(X .- η.*(I(d) .- T))
clip!(X, α, β)
end
end
# INPUT
# covs = d × d × n array containing covariance matrices
# sqrt_covs = d × d × n array containing square roots of matrices in covs
# X = starting covariance matrix
# objective = length nIter vector to save barycenter objective values in
# times = length nIter vector to save timings in
# ηs = array of length n of stepsizes, or single number (in which case that step size is used for all steps)
# sqrt_bary = square root of true barycenter
function SGD!(covs, sqrt_covs, X, objective, times, ηs; sqrt_bary = nothing)
start = time()
d = size(covs)[1]
n = size(covs)[3]
if isnothing(sqrt_bary)
sqrt_bary = I(d)
end
# Cache variables for memory efficiency. T refers to transport map
T = zeros(d,d)
evals = zeros(d)
evecs = zeros(d,d)
for i in 1:n
if length(ηs) == 1
η = ηs[1]
else
η = ηs[i]
end
sq = @view sqrt_covs[:,:,i]
e = eigen(Symmetric(sq*X*sq))
evals .= e.values
evecs .= e.vectors
T .= sq*evecs*diagm(evals.^(-0.5))*evecs'*sq
times[i] = time()-start
objective[i] = bures(sqrt_bary, X)
X .= Symmetric(((1 - η).*I(d) .+ η.*T)*X*((1-η).*I(d) + η.*T))
end
end
# INPUT
# covs = d × d × n array containing covariance matrices
# sqrt_covs = d × d × n array containing square roots of matrices in covs
# objective = length nIter vector to save barycenter objective values in
# times = length nIter vector to save timings in
# ηs = array of length n of stepsizes, or single number (in which case that step size is used for all steps)
# α = lower eigenvalue to threshold at (should be ∼ average minimum eigenvalue of covs)
# β = upper eigenvalue to threshold at (should be ∼ average maximum eigenvalue of covs)
function ESGD!(covs, sqrt_covs, objective, times, ηs, α, β)
start = time()
d = size(covs)[1]
n = size(covs)[3]
X = zeros(d, d)
X .= covs[:,:,1]
# Cache variables for memory efficiency. T refers to transport map
T = zeros(d,d)
evals = zeros(d)
evecs = zeros(d,d)
for i in 1:n
if length(ηs) == 1
η = ηs[1]
else
η = ηs[i]
end
sq = @view sqrt_covs[:,:,i]
e = eigen(Symmetric(sq*X*sq))
evals .= e.values
evecs .= e.vectors
T .= sq*evecs*diagm(evals.^(-0.5))*evecs'*sq
times[i] = time()-start
objective[i] = bures(I(d), X)
X .= Symmetric(X .- η.*(I(d) .- T))
clip!(X, α, β)
end
end
# INPUT
# covs = d × d × n array containing covariance matrices
# verbose = boolean indicating whether SDP solver should be verbose
# maxIter = maximum number of iterations
# OUTPUT
# the barycenter (d × d matrix)
function SDP(covs; verbose=false, maxIter = 5000)
d = size(covs)[1]
n = size(covs)[3]
Σ = Variable(d,d)
Ss = [Variable(d,d) for _ in 1:n]
constr = [([covs[:,:,i] Ss[i]; Ss[i]' Σ] ⪰ 0) for i in 1:n]
problem = minimize(tr(Σ) - 2*mean(tr.(Ss)))
problem.constraints += constr
problem.constraints += (Σ ⪰ 0)
optimizer = SCS.Optimizer(verbose = verbose)
MOI.set(optimizer, MOI.RawParameter("max_iters"), maxIter)
solve!(problem, optimizer)
return Σ.value
end
# Function that clips eigenvalues of X to specified range, in place
function clip!(X, α, β)
e = eigen(X)
X .= Symmetric(e.vectors*diagm(clamp.(e.values, α, β))*e.vectors')
end
# Calculates (W₂)² between (sq)^2 and x
function bures(sq,x)
e = eigen(sq*x*sq)
return tr(x+sq*sq- 2 .* e.vectors*diagm(e.values.^(.5))*e.vectors')
end
# Calculates barycenter functional of X over the dataset [sqrt_covs[:,:,i]² for i in 1:n]
function barycenter_functional(sqrt_covs, X)
return mean(bures(sqrt_covs[:,:,i], X) for i in 1:size(sqrt_covs)[3])
end
| Barycenter_algorithms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#(109) 1부터 10까지 정수 10개를 원소르 가지는 튜플을 하나 만들어, 원소들의 합을 출력하는 프로그램을 작성하시오.
n = int(input("insert number"))
def num_sum(n):
num_list = []
for i in range(1, n+1):
num_list.append(i)
num_list = sum(tuple(num_list))
return num_list
num_sum(n)
# +
#(109) 1부터 10까지 정수 10개를 원소르 가지는 튜플을 하나 만들어, 원소들의 합을 출력하는 프로그램을 작성하시오.
a = []
for i in range(1,11):
a.append(i)
sum(tuple(a))
# +
#(127) 16과 30의 공약수를 만드세요.
set_a = []
set_b = []
for i in range(1,17):
if 16 % i == 0:
set_a.append(i)
for i in range(1,31):
if 30 % i == 0:
set_b.append(i)
set_a = set(set_a)
set_b = set(set_b)
set_a & set_b
# +
#(127) 16과 30의 공약수를 만드세요.(2)
def common_divisor(n):
divisor_list = []
for i in range(1,n+1):
if n % i == 0:
divisor_list.append(i)
return set(divisor_list)
common_divisor(16) & common_divisor(30)
# -
#(128) 10개수를 입력받아 최대값을 출력해주는것을 만드시오.
nums = input("insert 10 numbers. each number must be separated by space. you put: ").split(" ")
max(nums)
# +
#(128) 10개수를 입력받아 최대값을 출력해주는것을 만드시오.
def max_number():
a = []
for i in range(10):
a.append(int(input()))
return max(a)
# +
#(129) 글자 길이가 5인것만 출력하기
a = ["alpha", "bravo", "charlie", "delta", "echo", "foxtrot", "golf", "hotel", "india"]
b = []
for i in a:
if len(i) == 5:
b.append(i)
b
# +
#(131) 각 학생의 성적을 딕셔너리로 저장 -> 그리고 각 학생의 평균, 전체평균 구하세용
# -
# |이름|수학|과학|영어|
# |:---:|:---:|:---:|:---:|
# |Dohee|76|89|93|
# |Juwon|88|87|100|
# |Seohee|86|93|82|
# +
scores = {"Dohee":{"수학":76, "과학":89, "영어":93},
"Juwon":{"수학":88, "과학":87, "영어":100},
"seohee":{"수학":86, "과학":93, "영어":82},}
each_average = []
for i in scores:
a=sum(scores[i].values()) / len(scores[i].keys())
print("{}의 평균은 {}입니다".format(i, a))
each_average.append(a)
total_average = sum(each_average) / len(each_average)
print("전체의 평균은 {}입니다".format(total_average))
# +
scores = {"Dohee": {"수학" : 76, "과학":89, "영어":93},\
"Juwon": {"수학" : 88, "과학":87, "영어":100},\
"Seohee": {"수학" : 86, "과학":93, "영어":82}}
each_average = []
for i in scores:
a = statistics.mean(scores[i].values())
each_average.append(a)
print(a)
print(statistics.mean(each_average))
| problems-review/101~131.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:RoboND]
# language: python
# name: conda-env-RoboND-py
# ---
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
# %matplotlib notebook
image = mpimg.imread('example_grid1.jpg')
plt.imshow(image)
plt.show()
# +
import cv2
import numpy as np
def perspect_transform(img, src, dst):
# Get transform matrix using cv2.getPerspectivTransform()
M = cv2.getPerspectiveTransform(src, dst)
# Warp image using cv2.warpPerspective()
# keep same size as input image
warped = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))
# Return the result
return warped
# Define source and destination points
source = np.float32([[119.812 ,95.1895 ], [197.877 ,95.1895 ], [14.006 ,140.996 ], [301.748 ,139.706 ]])
destination = np.float32([[150 ,140 ], [160 ,140 ], [150 , 150 ], [160 , 150 ]])
warped = perspect_transform(image, source, destination)
plt.imshow(warped)
plt.show()
# -
| Set Perspective.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Data enrichment: add land-use pattern to accident records
#
# Input: data/data_s2.csv
#
# Download POI information from Baidu API and cluster zones according to their land-use
# patterns. Assign crash records with the zone and land-use cluster.
#
# Output: data/data_s3.csv
# + pycharm={"name": "#%%\n"}
# %load_ext autoreload
# %autoreload 2
# + pycharm={"name": "#%%\n"}
import pandas as pd
import geopandas as gpd
import os
from pathlib import Path
import json
import geo_functions as gf
import data_retrieve_functions as drf
import visualisation_functions as vf
from sklearn.decomposition import PCA
from sklearn import manifold
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
from sklearn import metrics
from tqdm import tqdm
def eval_k_means(estimator, data, n_clusters):
estimator.fit(data)
print('Clusters %s - Silhouette_score %s' % (n_clusters,
metrics.silhouette_score(data,
estimator.labels_)))
return estimator.labels_
# + pycharm={"name": "#%%\n"}
data_path = os.path.join(Path(os.getcwd()).parent.parent, 'data/')
with open(os.path.join(data_path, 'value_mapping.txt'), encoding='utf-8') as json_file:
value_dict = json.load(json_file)
df = pd.read_csv(os.path.join(data_path, "data_s2.csv"))
df.iloc[0:3].transpose()
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 1 Find the zone of the accidents
# This part focuses on those records with geolocation info: lat, long.
# + pycharm={"name": "#%%\n"}
# Load grids
gdf_grid = gpd.GeoDataFrame.from_file(os.path.join(data_path, 'geo/grids.shp'))
# Drop duplicated records due to multiple involved persons
df_nd = df.drop_duplicates(subset=["id"])
# Convert the records with geolocation into GeoDataFrame and find which zone they fall into
gdf_acc = gf.df2gdf_point(df_nd.loc[~df_nd.lat.isna(), :], 'long', 'lat')
gdf_acc.loc[:, 'zone'] = gf.point2zone(gdf_grid, gdf_acc, var2return="zone")
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 1.1 Count the number of accidents in each zone and save it
# + pycharm={"name": "#%%\n"}
## Count the number of accidents in each zone
gdf_acc_stats = gdf_acc.groupby('zone')[['id']].count().reset_index().rename(columns={'id': 'acc_num'})
gdf_acc_stats.loc[:, 'zone'] = gdf_acc_stats.loc[:, 'zone'].astype(int)
## Save the visited zones into a new shapefile
gdf_grid_visit = gdf_grid.loc[gdf_grid['zone'].isin(gdf_acc.loc[:, 'zone'].unique()), :]
gdf_grid_visit = pd.merge(gdf_grid_visit, gdf_acc_stats, on='zone').loc[:, ['zone', 'acc_num', 'geometry']]
## Get centroid of each polygon
gdf_grid_visit.loc[:, 'X'] = gdf_grid_visit.geometry.apply(lambda x: x.centroid.x)
gdf_grid_visit.loc[:, 'Y'] = gdf_grid_visit.geometry.apply(lambda x: x.centroid.y)
gdf_grid_visit.to_file(os.path.join(data_path, 'geo/grids_acc.shp'))
gdf_grid_visit.head()
# -
# ### 1.2 Join zone to the data of accident records
# + pycharm={"name": "#%%\n"}
# Drop a few accidents' location outside the study area
gdf_acc = gdf_acc.loc[~gdf_acc.loc[:, 'zone'].isnull(), :]
# Merge df_nd back to the records with zone info
df_acc = pd.DataFrame(gdf_acc.drop(columns="geometry"))
df_nd = pd.concat([df_nd.loc[df_nd.lat.isna(), :], df_acc])
# Merge zone info back to the complete accident records
df = pd.merge(df, df_nd.loc[:, ["id", "zone"]], left_on="id", right_on="id", how="left")
# Fill unknown zones
df.zone.fillna("Unknown", inplace=True)
df.zone = df.zone.apply(lambda x: str(int(x)) if x != "Unknown" else x)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 2 Retrieve POI data from Baidu API
# ### 2.1 Prepare query data
# + pycharm={"name": "#%%\n"}
df_query = drf.poi_req_prep(gdf_grid_visit, data_path)
df_query.head()
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 2.2 Send queries to Baidu Place API
# This takes a few hours to download.
# + pycharm={"name": "#%%\n"}
target_file = "zone_visit_poi_output.csv"
df_q = pd.read_csv(os.path.join(data_path, "zone_visit_poi.csv"))
df_q_results = pd.read_csv(os.path.join(data_path, target_file))
## Load all the done queries in case the api access is interrupted where a resume is needed
list_df = [(x,y) for x,y in df_q_results.loc[:, ["zone", "POI"]].values]
tqdm.pandas(desc="POI searching")
df_q.progress_apply(lambda row: drf.zone_poi_search(row, data_path, list_df, target_file), axis=1)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 3 Process POIs to enrich crash dataset
# ### 3.1 Preprocess obtained POIs number by zone x POI_type
# + pycharm={"name": "#%%\n"}
target_file = "zone_visit_poi_output.csv"
## Load POI_type_dict
POI_type_dict = value_dict['POI_type']
POI_type_dict_reverse = {v: k for k,v in POI_type_dict.items()}
## Load POI data from API results and convert it into zone, poi_1,...,poi_19 format
df_poi = pd.read_csv(os.path.join(data_path, target_file))
df_poi_list = []
for zone, frame in df_poi.groupby('zone'):
fr = pd.DataFrame(frame.loc[:, ['POI_num']].transpose().values,
columns=["POI_" + POI_type_dict_reverse[x] for x in frame.loc[:, 'POI'].transpose().values])
fr.loc[:, 'zone'] = int(zone)
df_poi_list.append(fr)
df_poi = pd.concat(df_poi_list)
df_poi = df_poi.astype(int)
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 3.2 Land use cluster to merge 1000+ zones into smaller groups by their land use patterns revealed by POIs
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 3.2.1 Process POI matrix
# + pycharm={"name": "#%%\n"}
poi_list = ['food', 'hotel', 'shopping', 'life', 'beauty', 'tourism', 'leisure',
'sports', 'education', 'media', 'medical', 'auto', 'finance',
'real_estate', 'company', 'gov_org', 'access', 'nature', 'transport']
X = df_poi
X = X.fillna(0)
X = X.loc[(X.T != 0).any(), :]
zones = X["zone"].astype(int).values
X = X.drop(columns=["zone"]).values
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
# -
# #### 3.2.2 PCA
# + pycharm={"name": "#%%\n"}
pca = PCA(n_components = 0.95)
pca.fit(X)
X_reduced = pca.transform(X)
# -
# #### 3.2.3 K-means to find the optimal number of clusters
# + pycharm={"name": "#%%\n"}
for n_clusters in range(2,21):
labels = eval_k_means(KMeans(n_clusters=n_clusters, random_state=0),
data=X_reduced,
n_clusters=n_clusters)
# -
# #### 3.2.4 Show the results of selected number of clusters
# K = 6 according to the Silhouette score where it appears as the first local maximum value.
# + pycharm={"name": "#%%\n"}
n_clusters = 6
labels = eval_k_means(KMeans(n_clusters=n_clusters, random_state=0),
data=X_reduced,
n_clusters=n_clusters)
df_zone = pd.DataFrame([(x, y) for x,y in zip(zones, labels)], columns=["zone", "cluster"])
df_zone.loc[:, 'cluster'] = df_zone.loc[:, 'cluster'] + 1
df_zone.zone = df_zone.zone.astype(str)
df_zone.to_csv(os.path.join(data_path, 'zone_poi_cluster.csv'), index=False)
# -
# #### 3.2.5 Visualise the results of cluster
# + pycharm={"name": "#%%\n"}
# Visualise the clusters that have distinct land use pattern
figure_path = os.path.join(Path(os.getcwd()).parent.parent, 'figures/')
vf.parallel_coordinates(X,
df_zone.loc[:, 'cluster'].values,
fig_folder=figure_path,
fig_name="clusters_parallel.png",
save=True)
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 3.3 Add cluster info back to the complete accident dataset
# Remove some columns.
# + pycharm={"name": "#%%\n"}
df = pd.merge(df, df_zone.loc[:, ['zone', 'cluster']], left_on='zone', right_on='zone', how='left')
df.cluster.fillna('Unknown', inplace=True)
df.cluster = df.cluster.apply(lambda x: str(int(x)) if x != 'Unknown' else x)
df.drop(columns=['lat', 'long', 'zone'], inplace=True)
df.iloc[0:3].transpose()
# + pycharm={"name": "#%%\n"}
df.to_csv(os.path.join(data_path, 'data_s3.csv'), index=False)
| src/analysis/s3_land-use-clusters.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Grid geometry
#
# In the previous example, we have interpolated the data over a cylindrical grid-of-interest. This geometry however is irrelevant to the interpolation itself, as nodes of the grid are considered each independently of the others. Therefore the library offers three options for the grid's geometry, namely cartesian, spherical, and cylindrical. Here are a non-exhaustive list of advantages and drawbacks of each geometry.
#
# The cartesian geometry might be one's first choice. It might be more intuitive for visualisatoin purposes. However we will see in the next paragraphs that most of the physically relevant cuts and profiles can be more accurately and as easily retrieved from the spherical geometry. One great interest of the cartesian geometry is the straighforward calculation of VDF velocity derivatives, using a finite-difference method.
#
# The cylindrical geometry allows a direct averaging over the gyro-angle (angle around the B-field direction), when working in a B-field aligned frame. It then provides a 2-dimensional array with cartesian coordinates in the $(v_{para}, v_{perp})$-plane, as used above for a direct comparison with the cartesian binned data.
#
# The spherical geometry is also defined with this physically relevant gyro-angle, together with the total speed (radial component) and the pitch-angle, the angle between a velocity vector and the magnetic field. It is for many purposes the most useful geometry to use, allowing a straightforward averaging over the gyro-angle, and allowing to treat, process the data along the pitch-angle, independently from the speed, or energy, as used in the scaled and normalised views of the VDF (cf. [1](./mms_overview.ipynb) and [2](./whistler_cyclotron.ipynb)).
#
# The spherical symmetry allows to easely select some physically relevant profiles and cuts, by simply averaging over one of the three dimensions, over a selected range of the coordinates along that dimension. If one is interested in a parallel profile of the interpolated VDF,
| 02_velocity_distribution_tool/03_grid_geometry.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.1 64-bit
# name: python391jvsc74a57bd063fd5069d213b44bf678585dea6b12cceca9941eaf7f819626cde1f2670de90d
# ---
# ___
# # Analyzing Outliers
# ## Analyzing extreme values with univariate methods.
# ___
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn import datasets
# %matplotlib inline
rcParams['figure.figsize'] = 7.5, 6
iris = datasets.load_iris()
df = pd.DataFrame(iris.data, columns= iris.feature_names)
# -
# ### Using boxplots to identify outliers
df.boxplot()
plt.show()
width_sepal = df['sepal width (cm)']
iris_outliers = (width_sepal > 4)
df[iris_outliers]
iris_outliers = (width_sepal < 2.05)
df[iris_outliers]
# ### Applying tukey's method for identifying outliers
pd.options.display.float_format = '{:.2f}'.format
X_df = pd.DataFrame(df['sepal width (cm)'])
summary = X_df.describe()
print(summary)
# +
#### Calculating inf and sup limits beteen quartiles
# -
q1 = summary.loc['75%'] - summary.loc['25%']
q1
inf_limit = summary.loc['25%'] - q1*1.5
inf_limit
sup_limit = summary.loc['75%'] + q1*1.5
sup_limit
| Jupyter/Univariate_methods.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Imports:
import os
import pandas as pd
import re
import numpy as np
# Filenames and paths:
# +
#path for input and output
inputpath = './exampleData/inputs'
outputpath = './exampleData'
#filepath and name for metabolite file
f_met = inputpath + '/' + 'sampleTransformationFile.txt'
#filepath and name for peak lists
f1name = inputpath +'/' + 'Time0.xlsx'
f2name = inputpath + '/' + 'Time5.xlsx'
# -
# Import metabolite list and calculate element columns to match format:
# +
#import dataframe of metabolites
df_met = pd.read_csv(f_met,sep="\t")
df_met.rename(columns = {'no':'Formula'}, inplace = True)
#add columns for each element
df_met['C'] = pd.Series([0] * len(df_met.index))
df_met['H'] = pd.Series([0] * len(df_met.index))
df_met['N'] = pd.Series([0] * len(df_met.index))
df_met['O'] = pd.Series([0] * len(df_met.index))
df_met['S'] = pd.Series([0] * len(df_met.index))
df_met['P'] = pd.Series([0] * len(df_met.index))
#iterate through dataframe (should vectorize for better performance) and populate element columns
#turn off warning
pd.options.mode.chained_assignment = None # default='warn'
for row in range(len(df_met.index)):
a = re.findall(r'([A-Z][a-z]*)(\d*)', df_met['Formula'][row])
for b in a:
if b[1] == '':
df_met[b[0]][row] = 1
else:
df_met[b[0]][row] = int(b[1])
print('Metabolites:')
print(df_met)
# -
# Do subtraction and comparison:
# +
#import two peak lists
df1 = pd.read_excel(f1name)
df2 = pd.read_excel(f2name)
#output file
f_out_name = outputpath + '/' + 'CountForEachMetabolite.csv'
f_out = open(f_out_name,'w')
f_out.write('Name,Count\n')
#slice dataframes into only columns to compare
df1comp = df1.loc[:, ['C', 'H', 'N','O','S','P']]
df2comp = df1.loc[:, ['C', 'H', 'N','O','S','P']]
df_metcomp = df_met.loc[:, ['C', 'H', 'N','O','S','P']]
for row in range(len(df_met.index)):
met = df_metcomp.loc[row]
df1sub = df1comp-met
df = pd.merge(df1sub, df2comp, how='left', indicator='Exist')
df['Exist'] = np.where(df.Exist == 'both', True, False)
matches = df1[df.Exist==True]
print(matches)
name = outputpath + '/met' + str(row) + '_' + df_met['Isomeric'][row] + '.csv'
matches.to_csv(name)
print(df_met.loc[row][0],'\t',len(matches.index))
f_out.write(str(df_met.loc[row][0])+','+str(len(matches.index))+'\n')
f_out.close()
# -
| SOM_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# **[ODE-01]** 必要なモジュールをインポートして、乱数のシードを設定します。
# +
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import cPickle as pickle
np.random.seed(20160703)
tf.set_random_seed(20160703)
# -
# **[ODE-02]** データファイル「ORENIST.data」から画像とラベルのデータを読み込みます。
with open('ORENIST.data', 'rb') as file:
images, labels = pickle.load(file)
# **[ODE-03]** 画像データにフィルターとプーリング層を適用する計算式を用意します。
# +
x = tf.placeholder(tf.float32, [None, 784])
x_image = tf.reshape(x, [-1,28,28,1])
W_conv = tf.Variable(tf.truncated_normal([5,5,1,2], stddev=0.1))
h_conv = tf.abs(tf.nn.conv2d(x_image, W_conv,
strides=[1,1,1,1], padding='SAME'))
h_conv_cutoff = tf.nn.relu(h_conv-0.2)
h_pool =tf.nn.max_pool(h_conv_cutoff, ksize=[1,2,2,1],
strides=[1,2,2,1], padding='SAME')
# -
# **[ODE-04]** プーリング層からの出力を全結合層とソフトマックス関数からなる「拡張された出力層」に入力する計算式を用意します。
# +
h_pool_flat = tf.reshape(h_pool, [-1, 392])
num_units1 = 392
num_units2 = 2
w2 = tf.Variable(tf.truncated_normal([num_units1, num_units2]))
b2 = tf.Variable(tf.zeros([num_units2]))
hidden2 = tf.nn.tanh(tf.matmul(h_pool_flat, w2) + b2)
w0 = tf.Variable(tf.zeros([num_units2, 3]))
b0 = tf.Variable(tf.zeros([3]))
p = tf.nn.softmax(tf.matmul(hidden2, w0) + b0)
# -
# **[ODE-05]** 誤差関数 loss、トレーニングアルゴリズム train_step、正解率 accuracy を定義します。
t = tf.placeholder(tf.float32, [None, 3])
loss = -tf.reduce_sum(t * tf.log(p))
train_step = tf.train.AdamOptimizer().minimize(loss)
correct_prediction = tf.equal(tf.argmax(p, 1), tf.argmax(t, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# **[ODE-06]** セッションを用意して、Variable を初期化します。
sess = tf.Session()
sess.run(tf.initialize_all_variables())
# **[ODE-07]** パラメーターの最適化を200回繰り返します。
i = 0
for _ in range(200):
i += 1
sess.run(train_step, feed_dict={x:images, t:labels})
if i % 10 == 0:
loss_val, acc_val = sess.run(
[loss, accuracy], feed_dict={x:images, t:labels})
print ('Step: %d, Loss: %f, Accuracy: %f'
% (i, loss_val, acc_val))
# **[ODE-08]** 最適化後の畳込みフィルターの値と、最初の9個分の画像データに対して、畳み込みフィルターとプーリング層を適用した結果を取得します。
filter_vals, conv_vals, pool_vals = sess.run(
[W_conv, h_conv_cutoff, h_pool], feed_dict={x:images[:9]})
# **[ODE-09]** 畳込みフィルターを適用した結果を画像として表示します。
# +
fig = plt.figure(figsize=(10,3))
v_max = np.max(conv_vals)
for i in range(2):
subplot = fig.add_subplot(3, 10, 10*(i+1)+1)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.imshow(filter_vals[:,:,0,i],
cmap=plt.cm.gray_r, interpolation='nearest')
for i in range(9):
subplot = fig.add_subplot(3, 10, i+2)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.set_title('%d' % np.argmax(labels[i]))
subplot.imshow(images[i].reshape((28,28)), vmin=0, vmax=1,
cmap=plt.cm.gray_r, interpolation='nearest')
subplot = fig.add_subplot(3, 10, 10+i+2)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.imshow(conv_vals[i,:,:,0], vmin=0, vmax=v_max,
cmap=plt.cm.gray_r, interpolation='nearest')
subplot = fig.add_subplot(3, 10, 20+i+2)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.imshow(conv_vals[i,:,:,1], vmin=0, vmax=v_max,
cmap=plt.cm.gray_r, interpolation='nearest')
# -
# **[ODE-10]** 畳込みフィルターとプーリング層を適用した結果を画像として表示します。
# +
fig = plt.figure(figsize=(10,3))
v_max = np.max(pool_vals)
for i in range(2):
subplot = fig.add_subplot(3, 10, 10*(i+1)+1)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.imshow(filter_vals[:,:,0,i],
cmap=plt.cm.gray_r, interpolation='nearest')
for i in range(9):
subplot = fig.add_subplot(3, 10, i+2)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.set_title('%d' % np.argmax(labels[i]))
subplot.imshow(images[i].reshape((28,28)), vmin=0, vmax=1,
cmap=plt.cm.gray_r, interpolation='nearest')
subplot = fig.add_subplot(3, 10, 10+i+2)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.imshow(pool_vals[i,:,:,0], vmin=0, vmax=v_max,
cmap=plt.cm.gray_r, interpolation='nearest')
subplot = fig.add_subplot(3, 10, 20+i+2)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.imshow(pool_vals[i,:,:,1], vmin=0, vmax=v_max,
cmap=plt.cm.gray_r, interpolation='nearest')
| Chapter04/ORENIST dynamic filter example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Active Subspaces Example Function: Piston Cycle Time
#
# ##### <NAME>, CO School of Mines, <<EMAIL>>
# ##### <NAME>, CO School of Mines, <<EMAIL>>
#
# <br>
#
# In this tutorial, we'll be applying active subspaces to the function
#
# $$
# C = 2\pi\sqrt{\frac{M}{k+S^2\frac{P_0V_0}{T_0}\frac{T_a}{V^2}}},
# $$
#
# where $$
# V = \frac{S}{2k}\left(\sqrt{A^2+4k\frac{P_0V_0}{T_0}T_a}-A\right),\\
# A=P_0S+19.62M-\frac{kV_0}{S},
# $$as seen on [http://www.sfu.ca/~ssurjano/piston.html](http://www.sfu.ca/~ssurjano/piston.html). This function models the cycle time of a piston within a cylinder, and its inputs and their distributions are described in the table below.
#
# Variable|Symbol|Distribution (U(min, max))
# :-----|:-----:|:-----
# piston Weight|$M$|U(30, 60)
# piston Surface Area|$S$|U(.005, .02)
# initial Gas Volume|$V_0$|U(.002, .01)
# spring Coefficient|$k$|U(1000, 5000)
# atmospheric Pressure|$P_0$|U(90000, 110000)
# ambient Temperature|$T_a$|U(290, 296)
# filling Gas Temperature|$T_0$|U(340, 360)
# +
import active_subspaces as ac
import numpy as np
# %matplotlib inline
# The piston_functions.py file contains two functions: the piston function (piston(xx))
# and its gradient (piston_grad(xx)). Each takes an Mx7 matrix (M is the number of data
# points) with rows being normalized inputs; piston returns a column vector of function
# values at each row of the input and piston_grad returns a matrix whose ith row is the
# gradient of piston at the ith row of xx with respect to the normalized inputs
from piston_functions import *
# -
# First we draw M samples randomly from the input space.
# +
M = 1000 #This is the number of data points to use
#Sample the input space according to the distributions in the table above
M0 = np.random.uniform(30, 60, (M, 1))
S = np.random.uniform(.005, .02, (M, 1))
V0 = np.random.uniform(.002, .01, (M, 1))
k = np.random.uniform(1000, 5000, (M, 1))
P0 = np.random.uniform(90000, 110000, (M, 1))
Ta = np.random.uniform(290, 296, (M, 1))
T0 = np.random.uniform(340, 360, (M, 1))
#the input matrix
x = np.hstack((M0, S, V0, k, P0, Ta, T0))
# -
# Now we normalize the inputs, linearly scaling each to the interval $[-1, 1]$.
# +
#Upper and lower limits for inputs
xl = np.array([30, .005, .002, 1000, 90000, 290, 340])
xu = np.array([60, .02, .01, 5000, 110000, 296, 360])
#XX = normalized input matrix
XX = ac.utils.misc.BoundedNormalizer(xl, xu).normalize(x)
# -
# Compute gradients to approximate the matrix on which the active subspace is based.
#output values (f) and gradients (df)
f = piston(XX)
df = piston_grad(XX)
# Now we use our data to compute the active subspace.
#Set up our subspace using the gradient samples
ss = ac.subspaces.Subspaces()
ss.compute(df=df, nboot=500)
# We use plotting utilities to plot eigenvalues, subspace error, components of the first 2 eigenvectors, and 1D and 2D sufficient summary plots (plots of function values vs. active variable values).
# +
#Component labels
in_labels = ['M', 'S', 'V0', 'k', 'P0', 'Ta', 'T0']
#plot eigenvalues, subspace errors
ac.utils.plotters.eigenvalues(ss.eigenvals, ss.e_br)
ac.utils.plotters.subspace_errors(ss.sub_br)
#manually make the subspace 2D for the eigenvector and 2D summary plots
ss.partition(2)
#Compute the active variable values
y = XX.dot(ss.W1)
#Plot eigenvectors, sufficient summaries
ac.utils.plotters.eigenvectors(ss.W1, in_labels=in_labels)
ac.utils.plotters.sufficient_summary(y, f)
| tutorials/test_functions/piston/piston_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp experiment.mining.ir.unsupervised.w2v
# -
# # Experimenting Neural Unsupervised Approaches for Software Information Retrieval [w2v]
#
# > Just Paper. Full Experimentation. This module is dedicated to experiment with word2vec. Consider to Copy the entire notebook for a new and separeted empirical evaluation.
# > Implementing mutual information analysis
# > Author: @danaderp April 2020
# > Author: @danielrc Nov 2020
# This copy is for Cisco purposes. It was adapted to process private github data from cisco.
from ds4se.mining.ir import *
from prg import prg
import ds4se as ds
import numpy as np
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
# # Artifacts Similarity with BasicSequenceVectorization
#
# We test diferent similarities based on [blog](https://www.kdnuggets.com/2017/08/comparing-distance-measurements-python-scipy.html) and [blog2](https://www.kdnuggets.com/2019/01/comparison-text-distance-metrics.html)
# ## Experients Set-up
path_data = '../dvc-ds4se/' #dataset path
#Experiments 1.1.2 <<-- word2vec
path_model_prefix = path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_32k'
path_to_trained_model = path_data+'/models/wv/bpe32k/[word2vec-Py-Java-SK-500-20E-32k-1593748814.350487].model'
def sacp_params():
return {
"vectorizationType": VectorizationType.word2vec,
"linkType": LinkType.issue2src,
"system": 'sacp-python-common',
"path_to_trained_model": path_to_trained_model,
"source_type": SoftwareArtifacts.PR.value,
"target_type": SoftwareArtifacts.PY.value,
"system_path_config": {
"system_path": '/tf/data/cisco/sacp_data/[sacp-python-common-all-corpus-1609224778.517111].csv',
"sep": '~',
"names": ['ids','bpe32k'],
"prep": Preprocessing.bpe
},
"path_mappings": "/tf/data/cisco/sacp_data/sacp-pr-mappings.csv",
"saving_path": path_data + 'metrics/traceability/experiments1.1.x/',
"names": ['Source','Target','Linked?'],
"model_prefix": path_model_prefix
}
parameters = sacp_params()
parameters
# # Artifacts Similarity with Word2Vec
#[step 1]Creating the Vectorization Class
word2vec = ds.mining.ir.Word2VecSeqVect( params = parameters, logging = logging )
#[step 2]NonGroundTruth Computation
metric_list = [DistanceMetric.WMD,DistanceMetric.SCM,EntropyMetric.MSI_I,EntropyMetric.MI]
#metric_list = [EntropyMetric.MSI_I,EntropyMetric.MI]
word2vec.ComputeDistanceArtifacts( sampling=False, samples = 100, metric_list = metric_list )
word2vec.df_nonground_link.head()
word2vec.df_nonground_link.head()
word2vec.df_nonground_link['Target'][1]
#[step 3]Saving Non-GroundTruth Links
word2vec.SaveLinks()
#Loading Non-GroundTruth Links (change the timestamp with the assigned in the previous step)
df_nonglinks = ds.mining.ir.LoadLinks(timestamp=1614004624.212459, params=parameters, logging=logging)
df_nonglinks.head()
word2vec.df_nonground_link = df_nonglinks # Only to load links from file
#[step 4]GroundTruthMatching Testing
#TODO change the path for a param
path_to_ground_truth = parameters['path_mappings']
word2vec.MatchWithGroundTruth(path_to_ground_truth, semeru_format=True)
word2vec.df_ground_link
word2vec.df_ground_link[word2vec.df_ground_link ['Linked?']==1]
word2vec.df_ground_link['Source'][0]
# ## 4.1 Only SACP
#[step 4.1]GroundTruthMatching Testing For CISCO Mappings
word2vec.MatchWithGroundTruth(from_mappings=True)
word2vec.df_ground_link
path_to_ground_truth = parameters['path_mappings']
#[optional]GroundTruth Direct Processing
ground_links = word2vec.ground_truth_processing(path_to_ground_truth)
ground_links # A tuple
#[step 5]Saving GroundTruth Links
word2vec.SaveLinks(grtruth = True)
#Loading Non-GroundTruth Links (change the timestamp with the assigned in the previous step)
df_glinks = ds.mining.ir.LoadLinks(timestamp=1614046787.460911, params=parameters,grtruth = True, logging=logging)
df_glinks.head()
# ### Generating Documentation
# ! nbdev_build_docs #<-------- [Activate when stable]
# ! nbdev_build_lib
from nbdev.export import notebook2script
notebook2script()
# +
# #! pip install -e .
# -
| nbs/5.0_experiment.mining.ir.unsupervised.w2v-exp4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Angel30cansicio/LinearAlgebra_2ndSem/blob/main/Assignment_4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="reEds6aaxl6a"
# + [markdown] id="WDyeGd8nxrxs"
# # Linear Algebra for CHE
# ## Laboratory 4 Matrices
# + [markdown] id="C9MbRrcsyENt"
# # Discussion
# + id="xBf9d21jyKPb"
import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg as la
# %matplotlib inline
# + [markdown] id="RPuQk2hYzUNO"
# # Matrices
#
# The notation and use of matrices is probably one of the fundamentals of modern computing. Matrices are also handy representations of complex equations or multiple inter-related equations from 2-dimensional equations to even hundreds and thousands of them. A matrix is a type of two-dimensional array in which each data piece has the same size. As a result, every matrix is also a two-dimensional array, but not the other way around. Matrices are crucial data structures for a wide range of mathematical and scientific calculations.
# + [markdown] id="PiIUT7NtzYdl"
#
# + [markdown] id="COEpnWOhz44M"
# $$
# A = \left\{
# \begin{array}\
# x + y \\
# 4x - 10y
# \end{array}
# \right. \\
# B = \left\{
# \begin{array}\
# x+y+z \\
# 3x -2y -z \\
# -x + 4y +2z
# \end{array}
# \right. \\
# C = \left\{
# \begin{array}\
# w-2x+3y-4z \\
# 3w- x -2y +2 \\
# 2w -x + 3y - 2z
# \end{array}
# \right. $$
#
#
#
# + [markdown] id="hw4Ah_OH1czh"
# A=\begin{bmatrix} 1 & 1 \\ 4 & {-10}\end{bmatrix} \\
# B=\begin{bmatrix} 1 & 1 & 1 \\ 3 & -2 & -1 \\ -1 & 4 & 2\end{bmatrix}\\
# C=\begin{bmatrix}1 & -2 & 3 & -4 \\ 3 & -1 & -2 & 1 \\ 2 & -1 & 3 & -2\end{bmatrix}
# $$
# + [markdown] id="Q6MYIthy3eLI"
# $$A=\begin{bmatrix}
# a_{(0,0)}&a_{(0,1)}&\dots&a_{(0,j-1)}\\
# a_{(1,0)}&a_{(1,1)}&\dots&a_{(1,j-1)}\\
# \vdots&\vdots&\ddots&\vdots&\\
# a_{(i-1,0)}&a_{(i-1,1)}&\dots&a_{(i-1,j-1)}
# \end{bmatrix}
# $$
#
# + id="Yf9XpFV235jd"
## since we'll keep on describing matrices. Let's make a function
def describe_mat(matrix):
print(f'Matrix:\n{matrix}\n\nShape:\t{matrix.shape}\nRank:\t{matrix.ndim}\n')
# + colab={"base_uri": "https://localhost:8080/"} id="gNfPEsWBA1Ez" outputId="54f52a56-4dff-4aa7-d4ca-fb428d21e5d4"
## Declaring a 2 x 2 matrix
A = np.array ([
[1, 2],
[3, 1]
])
describe_mat(A)
# + colab={"base_uri": "https://localhost:8080/"} id="ILJSKv9UBIOk" outputId="a106801a-3b7f-4a52-dee0-ed01ec3b5082"
## Declaring a 3 x 2 matrix
B = np.array ([
[1, 2],
[3, 1],
[5, 8]
])
describe_mat(B)
# + [markdown] id="cL9OMGrE-tZk"
# ## Categorizing Matrices
# Row and Column Matrices
#
# We define a row and column matrix in this code by using np.array, which adds strong data structures to Python that ensure rapid calculations with arrays and matrices, as well as a large library of high-level mathematical functions that operate on these arrays and matrices. A matrix can be implemented as a nested list in Python (list inside a list). Each element is regarded as a matrix row. Columns are the designations we assign to the two dimensions of a matrix or more by convention.
# + colab={"base_uri": "https://localhost:8080/"} id="b9es99MxBQ59" outputId="7333584f-009a-42a5-b9ab-666840aed2d1"
## Declaring a Row Matrix
row_mat_1D = np.array ([
1, 3, 2
]) ## this is a 1-D Matrix with a shape of (3,), it's not really considered as a row matrix
row_mat_2D = np.array ([
[1,2,3,-4]
]) ## this is a 2
describe_mat(row_mat_1D)
describe_mat(row_mat_2D)
# + colab={"base_uri": "https://localhost:8080/"} id="1nZa_Y-hBHna" outputId="fbafe27d-6c27-42b0-c5ec-297917790137"
## Declaring a Column Matrix
col_mat = np.array ([
[3],
[5],
[1]
]) ## this is a 2-D Matrix with a shape of (3,1)
describe_mat(col_mat)
# + [markdown] id="aXNZM6eJBoQ5"
# ## Square Matrices
# A matrix is a rectangular data or number structure. It's a rectangular array of data or numbers, in other words. In a matrix, the horizontal entries are referred to as 'rows,' while the vertical elements are referred to as 'columns.'
#
# + id="zcKYZbiG97kK"
def describe_mat(matrix):
is_square = True if matrix.shape[0] == matrix.shape[1] else False
print(f'Matrix:\n{matrix}\n\nShape:\t{matrix.shape}\nRank:\t{matrix.ndim}\nIs square: {is_square}\n')
# + colab={"base_uri": "https://localhost:8080/"} id="3uZhECowCB8S" outputId="7fc36b47-6206-4730-8905-c0880bd38d2b"
square_mat = np.array ([
[3,5,1],
[1,2,3],
[1,5,9]
])
non_square_mat = np.array ([
[3,5,2],
[1,2,4]
])
describe_mat(square_mat)
describe_mat(non_square_mat)
# + [markdown] id="tn5ipqn9CM6h"
# ## Null Matrix
# In a relational database, a null value is used when a column's value is unknown or missing. A null value is neither an empty string nor a zero value (for character or datetime data types) (for numeric data types). It's a unique object that symbolizes the lack of a value. A None is returned by any function that does not return anything.
# + id="QtyzLBVzCmff"
def describe_mat(matrix):
if matrix.size > 0:
is_square = True if matrix.shape[0] == matrix.shape[1] else False
print(f'Matrix:\n{matrix}\n\nShape:\t{matrix.shape}\nRank:\t{matrix.ndim}\nIs Square: {is_square}\n')
else:
print('Matrix is Null')
# + colab={"base_uri": "https://localhost:8080/"} id="0A-xmAH_FJei" outputId="750a2efb-11a2-426f-80f0-102845c68245"
null_mat = np.array([])
describe_mat(null_mat)
# + [markdown] id="fX_YuC8-DFJR"
# ## Zero Matrix
# Simple solutions to algebraic equations involving matrices are possible with Zero Matrices. The zero matrix, for example, can be defined as an additive group, making it a useful variable in situations when an unknown matrix must be solved.
#
# + colab={"base_uri": "https://localhost:8080/"} id="x9Ant5YHDIiy" outputId="d70742f6-9c22-4717-869d-ac7873145ddc"
zero_mat_row = np.zeros ((1,2))
zero_mat_sqr = np.zeros ((2,2))
zero_mat_rct = np.zeros ((3,2))
print(f'Zero Row Matrix: \n{zero_mat_row}')
print(f'Zero Square Matrix: \n{zero_mat_sqr}')
print(f'Zero Rectangle Matrix: \n{zero_mat_rct}')
# + colab={"base_uri": "https://localhost:8080/"} outputId="1b532f05-6e95-4c4e-de39-d3e4a7688e37" id="B3HLQia5D7S_"
ones_mat_row =np.ones ((2,3))
ones_mat_sqr= np.ones((3,3,))
ones_mat_rct=np.ones ((4,3))
print(f'Ones Row Matrix: \n{ones_mat_row}')
print(f'Ones Square Matrix: \n{ones_mat_sqr}')
print(f'Ones Rectangle Matrix: \n{ones_mat_row}')
# + [markdown] id="JUTF1rxfE47p"
# ## Diagonal Matrix
# Many parts of linear algebra use diagonal matrices. Because of the above-mentioned straightforward description of the matrix operation and eigenvalues/eigenvectors, a diagonal matrix is commonly used to describe a given matrix or linear map. We can extract a diagonal element from a matrix and output it as a one-dimensional matrix.
# + colab={"base_uri": "https://localhost:8080/"} id="vczZzxjCFV9f" outputId="4b31657b-f309-45ac-f866-42a07c136b8e"
np.array ([
[3,5,1],
[0,3,1],
[7,8,9]
])
# + colab={"base_uri": "https://localhost:8080/"} id="2EoGw9nLFoLN" outputId="60c0d483-fcb0-4721-b748-167b73c95551"
d = np.diag([2,3,4,5])
d.shape[0]==d.shape[1]
d
# + [markdown] id="W27DnmLBF1V7"
# ## Identity Matrix
# A squared matrix (infinite ratio of rows and columns) with all diagonal values equal to 1 is an identity matrix. All of the other spots, on the other hand, have a value of 0. The NumPy identity() method assists us with this and delivers an identity matrix as asked.
# + colab={"base_uri": "https://localhost:8080/"} id="-AwvFhtbF3_C" outputId="5a157f0e-a874-4f36-d1ed-e74fde33853d"
np.eye (3)
# + colab={"base_uri": "https://localhost:8080/"} id="3MBxz2C1GWEw" outputId="e92aa1be-9b15-4ff1-db30-f594aa1fcdf3"
np.identity(5)
# + [markdown] id="hkNlGbvbGhE4"
# ## Upper Triangular Matrix
# All entries below the significant diagonal are zero in the upper triangular matrix. A suitable triangular matrix is the upper triangular matrix, whereas a left triangular matrix is the lower triangular matrix.
# + colab={"base_uri": "https://localhost:8080/"} id="LiiKL37DGm4r" outputId="445147e3-a438-417a-f354-7633d18e3f0e"
np.array([
[2,3,4,5],
[1,1,8,1],
[0,0,8,0],
[0,0,0,3]
])
# + [markdown] id="q3xUC1NbHG9k"
# ## Lower Triangular Matrix
# A lower triangular matrix has entries that are zero above the main diagonal. Lower triangular matrices are also known as left triangular matrices. Lower triangular matrices are square matrices with zero entries above the main diagonal.
# + colab={"base_uri": "https://localhost:8080/"} id="7wwQZKbqfp50" outputId="5fb52116-b3f3-4c64-a864-931cab189953"
np.array([
[1,0,0],
[5,3,0],
[3,5,1]
])
# + colab={"base_uri": "https://localhost:8080/"} id="ymTEAqjRgeWd" outputId="7229b242-46ce-4e6b-f1c8-0a240282aa42"
np.array([
[1,0,0],
[5,3,0],
[3,5,1]
])
# + [markdown] id="mnxq7RvXHbqp"
# ## Practice 1
# 1. Given the linear combination below, try to create a corresponding matrix representing it.
# + [markdown] id="L0i7vLEWJ01e"
# :$$\theta = 5x + 3y - z$$
#
# + [markdown] id="McEbjrIELqHf"
#
# $$
# \theta = \begin{bmatrix} 5 & 3 & -1\end{bmatrix} \\
# $$
#
# ```
#
#
# + id="IAn4cXvTPSbJ"
def describe_mat (matrix):
print(f'Matrix:\n{matrix}\n\nShape:\t{matrix.shape}\nRank:\t{matrix.ndim}\n')
# + id="SHkKSOhKPRw3" colab={"base_uri": "https://localhost:8080/"} outputId="e2d7caaa-9a94-4e12-c29f-1e9673b9fb41"
theta = np.array ([
[5,3,-1]
])
describe_mat (theta)
# + [markdown] id="KQVZUXaoHt-A"
# 2. Given the system of linear combinations below, try to encode it is amatrix. Also describe the matrix.
# + [markdown] id="dSFAGg6FKIeW"
# $$
# A = \left\{\begin{array}
# 5x_1 + 2x_2 +x_3\\
# 4x_2 - x_3\\
# 10x_3
# \end{array}\right.
# $$
#
# + id="mju-EsLFOK6P"
import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg as la
# %matplotlib inline
# + id="VL7OLFnUNiWP"
def describe_mat(matrix):
is_square = True if matrix.shape[0] == matrix.shape[1] else False
print(f'Matrix:\n{matrix}\n\nShape:\t{matrix.shape}\nRank:\t{matrix.ndim}\nIs square: {is_square}\n')
# + colab={"base_uri": "https://localhost:8080/"} id="ug7mvc5tNj4n" outputId="f76380a5-543f-45b4-fe5f-686d78abc099"
A = np.array ([
[1,2,1],
[0,4,-1],
[0,0,10]
])
describe_mat(A)
# + [markdown] id="Ab7_FjkvH8FG"
# 3. Given the matrix below, express it as a linear combination in a markdown and a LaTeX markdown
#
#
# + id="bcOflwqSK4uH"
G = np.array([
[1,7,8],
[2,2,2],
[4,6,7]
])
# + [markdown] id="lhurVH18PkYb"
# $$
# G = \begin{bmatrix} 1 & 7 & 8 \\ 2 & 2 & 2 \\ 4 & 6 & 7\end{bmatrix}
# $$
# + [markdown] id="RQIStnsjPoYa"
# $$
# G = \left\{
# \begin{array}\
# 1 + 7 + 8 \\
# 2 + 2 + 2 \\
# 4 + 6 + 7 \\
# \end{array}
# \right. \\
# $$
# + [markdown] id="_uwNRM-VJK4y"
# 4. Given the matrix below, display the output as a LaTeX markdown also express it as a system of linear combinations.
#
# + id="p8pQ1TdPSZPr" colab={"base_uri": "https://localhost:8080/"} outputId="f3f03b1b-ef2a-4d81-c4fc-b8a6e120a753"
H = np.tril(G)
H
# + colab={"base_uri": "https://localhost:8080/"} id="k8weE6YdSBNV" outputId="7241a3b4-4e01-4f44-8bae-8b5c98631f50"
([[1,0,0],
[2,2,0],
[4,6,7]])
# + [markdown] id="drahQWJfSgEH"
# $$
# H = \begin{bmatrix} 1 & 0 & 0 \\ 2 & 2 & 0 \\ 4 & 6 & 7\end{bmatrix}
# $$
# + [markdown] id="zuGuHVaCSrgI"
# $$
# H = \left\{
# \begin{array}\
# 1 + 0 + 0 \\
# 2 + 2 + 0 \\
# 4 + 6 + 7 \\
# \end{array}
# \right. \\
# $$
# + [markdown] id="iGRZiAdhLHUT"
# ## Matrix Algebra
# Addition
# They are adding two matrices by adding the corresponding elements together, known as matrix addition in mathematics. However, other operations for matrices, such as the direct sum and the Kronecker sum, can also be termed addition.
# + colab={"base_uri": "https://localhost:8080/"} id="fQCqHMZRLK-z" outputId="23e416e9-3ea3-4e17-b987-332463d03bc9"
A = np.array([
[1,2],
[3,4],
[6,7]
])
B = np.array([
[2,2],
[0,4],
[5,1]
])
A+B
# + colab={"base_uri": "https://localhost:8080/"} id="REhCCNxVLV1a" outputId="38f06b8b-0fb9-4d04-ab91-bbd239cf5caa"
3+A ##Broadcasting
# + [markdown] id="CH6gk5IlLW7i"
# ## Subtraction
# If two matrices have the same order or dimensions, they can be subtracted. To subtract two or more matrices, they must each have the same number of rows and columns. If the elements of two matrices are in the same order, subtracting one from the other is feasible.
# + colab={"base_uri": "https://localhost:8080/"} id="2C6z9QBBLcSm" outputId="e97111ce-9a41-4511-ada4-ecbfce8bd339"
A = np.array([
[1,2],
[3,4],
[6,7]
])
B = np.array([
[2,2],
[0,4],
[5,1]
])
A-B
# + colab={"base_uri": "https://localhost:8080/"} id="1nIx1laVLhuf" outputId="d0b0024f-3c87-49e7-f578-95f1af549b07"
6-B ##Broadcasting
# + [markdown] id="wjTeXfBpLovw"
# ## Element-wise Multiplication
# Elements of the first matrix are multiplied by the corresponding component of the second matrix in element-wise matrix multiplication (also known as Hadamard Product). Each matrix must be of the exact dimensions when doing element-wise matrix multiplication.
# + colab={"base_uri": "https://localhost:8080/"} id="rJ8c81Z_LsWF" outputId="45bbef44-9736-4151-a06e-33483abb22a5"
A*B
# + colab={"base_uri": "https://localhost:8080/"} id="YQs25MdaLvjY" outputId="0855da7e-db99-4c06-963f-97299cdcf914"
4*A
# + [markdown] id="BfuArObZg0Qn"
# ## Activity 2
#
# + [markdown] id="7DKLZmD6g8wi"
# ## Task 1
# + [markdown] id="Yy2q9ECEg_Uc"
# Create a function named `mat_desc()` that througouhly describes a matrix, it should: <br>
# 1. Displays the shape, size, and rank of the matrix. <br>
# 2. Displays whether the matrix is square or non-square. <br>
# 3. Displays whether the matrix is an empty matrix. <br>
# 4. Displays if the matrix is an identity, ones, or zeros matrix <br>
#
# Use 3 sample matrices in which their shapes are not lower than $(3,3)$.
# In your methodology, create a flowchart discuss the functions and methods you have done. Present your results in the results section showing the description of each matrix you have declared.
# + id="fh4Nd-k7oysq" colab={"base_uri": "https://localhost:8080/"} outputId="e38f1e18-3d60-46bd-f943-5beda636b406"
import numpy as np
X = int(input("Number of rows:"))
Y = int(input("Number of columns:"))
print("Elements per row (values will be separatade by space): ")
entries = list(map(int, input().split()))
matrix = np.array(entries).reshape(X, Y)
def describe_mat(matrix):
is_square = True if matrix.shape[0] == matrix.shape[1] else False
print(f'\nMatrix:\n{matrix}\n\nShape:\t{matrix.shape}\nRank:\t{matrix.ndim}\nIs Square: {is_square}')
describe_mat(matrix)
is_empty = matrix == 0
if False:
print('The matrix is empty')
else:
print('The matrix is not empty')
point=0
for m in range(len(matrix)):
for s in range(len(matrix[0])):
if m == s and matrix[m][s] != 1:
point=1
break
elif m!=s and matrix[m][s]!=0:
point=1
break
arr = matrix
is_all_zero = np.all((arr == 0))
if is_all_zero:
print('The matrix only have 0')
else:
print('The matrix has non-zero items')
arr = matrix
is_all_zero = np.all((arr == 1))
if is_all_zero:
print('The matrix only have 1')
else:
print('The matrix non-ones items')
# + [markdown] id="-SVuUDpdhL2k"
# ## Function
# + id="dyzL9n8UhK0c"
## Function
# + id="xlrXdfcRhhIp"
## Matrix Declarations
# + id="R_9NYbGlhlsR"
## Test Areas
# + [markdown] id="y9w9cAV4hpMn"
# ## Task 2
# + [markdown] id="NnOV4vXxhrng"
# Create a function named mat_operations() that takes in two matrices a input parameters it should:
#
# Determines if the matrices are viable for operation and returns your own error message if they are not viable.
# Returns the sum of the matrices.
# Returns the differen of the matrices.
# Returns the element-wise multiplication of the matrices.
# Returns the element-wise division of the matrices.
# Use 3 sample matrices in which their shapes are not lower than (3,3) . In your methodology, create a flowchart discuss the functions and methods you have done. Present your results in the results section showing the description of each matrix you have declared.
# + colab={"base_uri": "https://localhost:8080/"} id="q7iWviTZIDTG" outputId="fc59e5b7-21db-43fd-e789-fd49e056ecf4"
import numpy as np
def mat_operation(a,b):
r1=len(a)
r2=len(b)
c1=len(a[0])
c2=len(b[0])
if r1!=r2 and c1!=c2 :
return "Operation not possible"
#Compute the sum of the array
s=[[0]*c1]*r1
d=[[0]*c1]*r1
p=[[0]*c1]*r1
d=[[0]*c1]*r1
for i in range(r1):
for j in range(c1):
s[i][j]=a[i][j]+b[i][j]
d[i][j]=a[i][j]-b[i][j]
p[i][j]=a[i][j]*b[i][j]
d[i][j]=a[i][j]/b[i][j]
return [s,d,p,d]
#generate the 2 random matrices
x = np.random.randint(100, size=(3, 3))
y = np.random.randint(100, size=(3, 3))
print("x= ",x)
print("y= ",y)
[s,d,p,d]=mat_operation(x,y)
print("x+y",s)
print("x-y",d)
print("x*y",p)
print("x/y",d)
# + [markdown] id="zxDEp1hYhy-V"
# ## Conclusion
# + [markdown] id="xYCr5zJ2h1mN"
# For your conclusion synthesize the concept and application of the laboratory. Briefly discuss what you have learned and achieved in this activity. Also answer the question: "How can matrix operations solve problems in technology?"
| Assignment_4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from campy.gui.events.timer import pause
from breakoutgraphics import BreakoutGraphics, Start
import keyboard
FRAME_RATE = 1000 / 120 # 120 frames per second.
NUM_LIVES = 5
def main():
start = Start()
if keyboard.read_key() == "space":
graphics = BreakoutGraphics()
start.window.close()
lives = NUM_LIVES
while True:
if lives <= 0:
break
else:
pause(FRAME_RATE)
graphics.reflect()
graphics.move()
graphics.remove_and_score()
graphics.countlives()
if graphics.ball.y >= graphics.window.height:
lives -= 1
graphics.reset_ball()
graphics.switch_off()
if lives <= 0:
break
finished = graphics.finished()
if finished:
break
if __name__ == '__main__':
main()
# -
| Ball Game (Version-4).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
df = pd.read_csv('titanic_full.csv')
df.head(5)
df.tail(5)
df.columns
df = df.drop(columns='Unnamed: 0')
df.shape
df.info()
df.describe(include=np.object)
df.isnull().sum()
df.dropna(axis=1,thresh=500,inplace=True) # Thershold....
df.info()
df[df['Embarked'].isnull()] # Onde estão os valores nulos ?
# +
#Filtrando -+ onde esta a maioria
df[df['Survived']==1][df['Pclass']==1][df['Sex']=='female'][df['Fare']==80]
# -
df['Embarked'].fillna('C',inplace=True)
df[df['Embarked'].isnull()] # Onde estão os valores nulos ?
df.info()
df[df['Age'].isnull()]
df['Age'].isnull().sum()
df[df['Age'],df['Sex']=='male'].mean()
df['Age'].describe()
df['Age'].fillna(df['Age'].mean(),inplace=True)
df.info()
df['Age'].describe()
df.head(5)
df.info()
df['Embarked'].nunique()
# #esse é o código básico para o boxplot com seaborn (as sns)
# sns.boxplot(x=’dado_categórico’, y=’dados_numérico’, data=df)
#
import seaborn as sns
sns.boxplot(x='Sex',y='Age',data=df)
# Criando classes baseada na idades
#vamos criar os limites das faixas etárias para os nossos dados
age_cat_edges = [0, 3, 12, 18, 35, 60, 1000]
#crie uma nova coluna :: selecione a coluna base
df['age_cat'] = pd.cut(df['Age'],
#aplique os limites de cada categoria
age_cat_edges,
#nomei cada categoria
labels=['Babies','Children','Teen',
'Adult','MidAge','Senior'])
df
sns.boxplot(x='age_cat',y='Age',data=df)
pd.pivot_table(df,values=['Survived'],index=['Sex','age_cat'],columns=['Pclass'],aggfunc='count',margins=True).round()
# Criando um Heat Map c/ crosstab
pd.crosstab(index=df["Pclass"],columns=[df['Sex'],df['age_cat']],normalize=True)
cross = pd.crosstab(index=df["Pclass"],columns=[df['Sex'],df['age_cat']],normalize=True).T
cross
sns.heatmap(cross,annot=True)
import matplotlib.pyplot as plt
plt.clf
df.hist(figsize=(10,7)) # transforma meu Data Frame em distribuições...
plt.tight_layout()
plt.show()
sns.distplot(df['Age'])
df.plot('Sex','Survived',kind='scatter',alpha='0.1')
df.plot('Age','Fare',kind='scatter',alpha='0.1')
df.plot('Fare','Pclass',kind='scatter',alpha='0.1')
df.corr()
sns.heatmap(df.corr().round(2),annot=True,vmin=-1,vmax=1)
# PairPlot
# E se fosse possível com um só comando visualizar as relações e distribuições entre de todas as variáveis do dataset em uma matriz? É exatamente isso o que faz o Pair Plot da biblioteca Seaborn.
# Essa função cria uma espécie de sumário visual de seus dados. Plotando todos contra todos em um scatter plot e na diagonal das features com elas mesmas, ela exibe um gráfico de distribuição ou um histograma.
#
sns.pairplot(df)
# Pandas profiling
# Agora é hora da verdadeira bruxaria. Uma análise detalhada que inspeciona todas as linhas e colunas, fazendo alertas e tudo o mais. Mas tudo isso vem com um enorme custo computacional. Então cuidado. Use com sabedoria.
#
# #pelo cmd do anaconda: conda install -c conda-forge pandas-profiling
from pandas_profiling import ProfileReport
profile= ProfileReport(df, Title='Titanic Dataset',
html={'stile':{'full_width':True}})
| Pandas/EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import random
import string
import numpy as np
import healpy as hp
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import seaborn as sns
import pandas as pd
import tqdm
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# -
import sys
sys.path.append("../")
from models.kernels import CustomRBFKernel
from models.gp_regression import HealpixGPRegressionModel
# +
# Load plot settings
import sys
sys.path.append("../../spectral_distortions_perturbations/notebooks/")
from plot_params import params
pylab.rcParams.update(params)
cols_default = plt.rcParams['axes.prop_cycle'].by_key()['color']
# +
import torch
import gpytorch
import pyro
from pyro.infer.autoguide import AutoMultivariateNormal, init_to_mean
from pyro.infer import SVI, Trace_ELBO, Predictive, MCMC, NUTS
import pyro.optim as optim
import pyro.distributions as dist
from torch.distributions import constraints
pyro.set_rng_seed(101)
# -
import warnings
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
def randomString(stringLength=8):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
nside = 64
# +
import sys
sys.path.append("../")
import utils.create_mask as cm
ps_mask = hp.ud_grade(np.load("../data/fermi_data/fermidata_pscmask.npy") > 0, nside_out=nside)
ps_mask = hp.ud_grade(np.load("../data/mask_3fgl_0p8deg.npy") > 0, nside_out=nside)
mask = cm.make_mask_total(nside=nside, band_mask = True, band_mask_range = 2,
mask_ring = True, inner = 0, outer = 20,
custom_mask=ps_mask
)
hp.mollview(mask,title='',cbar=False,min=0,max=1)
# -
fermi_counts = hp.ud_grade(np.load('../data/fermi_data/fermidata_counts.npy'), nside_out=nside, power=-2)
temp_bub = hp.ud_grade(np.load('../data/fermi_data/template_bub.npy'), nside_out=nside, power=-2)
temp_dsk = hp.ud_grade(np.load('../data//fermi_data/template_dsk.npy'), nside_out=nside, power=-2)
temp_psc = hp.ud_grade(np.load('../data/fermi_data/template_psc.npy'), nside_out=nside, power=-2)
temp_iso = hp.ud_grade(np.load('../data/fermi_data/template_iso.npy'), nside_out=nside, power=-2)
temp_dif = hp.ud_grade(np.load('../data/fermi_data/template_dif.npy'), nside_out=nside, power=-2)
mO_ics = hp.ud_grade(np.load("../data/fermi_data/ModelO_r25_q1_ics.npy"), nside_out=nside, power=-2)
mO_pibrem = hp.ud_grade(np.load("../data/fermi_data/ModelO_r25_q1_pibrem.npy"), nside_out=nside, power=-2)
mO_tot = mO_ics + mO_pibrem
temp_bub /= np.mean(temp_bub)
X = torch.tensor(np.radians(hp.pix2ang(nside, np.arange(hp.nside2npix(nside)), lonlat=True)))
Y = np.random.poisson(13. * mO_tot + 1. * temp_bub + 1. * 1. * temp_psc + 1. * temp_iso)
train_x = torch.tensor(X).float().T
train_y = torch.tensor(Y).float()
train_x = train_x[~mask]
train_y = train_y[~mask]
len(train_y)
model = HealpixGPRegressionModel(temp_dif=temp_dif, temps=[temp_bub, temp_iso, temp_psc],
poiss_priors=[dist.Uniform(torch.tensor(0.02), torch.tensor(2.)),
dist.Uniform(torch.tensor(0.02), torch.tensor(2.)),
dist.Uniform(torch.tensor(0.02), torch.tensor(2.))],
temps_label=['bub','iso','psc'], mask=mask, name_prefix=randomString(8),
num_inducing=80, nside=nside)
fwd = pyro.sample("blah", model.pyro_guide(train_x)).detach().numpy()
fwd_map = np.zeros(hp.nside2npix(nside))
fwd_map[~mask] = fwd
hp.mollview(fwd_map)
# +
# plt.plot(np.exp(fwd))
# -
model.covar_module.base_kernel.lengthscale = 0.2
fwd = torch.exp(model.forward(train_x).sample())
fwd_map = np.zeros(hp.nside2npix(nside))
fwd_map[~mask] = fwd
hp.mollview(fwd_map)
# +
# a_locs = pyro.param("a_scales", torch.full((3,), 1.0))
# a_locs
# from pyro.distributions.util import eye_like
# from torch.distributions import constraints
# pyro.param("a_scales", torch.full((3,3), 1.))
# +
num_iter = 10000
num_particles = 16
optimizer = pyro.optim.Adam({"lr": 5e-3})
# elbo = pyro.infer.Trace_ELBO(num_particles=num_particles, vectorize_particles=True, retain_graph=True, )
elbo = pyro.infer.Trace_ELBO(retain_graph=True)
svi = pyro.infer.SVI(model=model.model, guide=model.guide, optim=optimizer, loss=elbo)
model.train()
iterator = tqdm.notebook.tqdm(range(num_iter))
# pyro.clear_param_store()
for i in iterator:
model.zero_grad()
loss = svi.step(train_x, train_y)
iterator.set_postfix(loss=loss, lengthscale=model.covar_module.base_kernel.lengthscale.item())
# +
num_iter = 5000
num_particles = 64
optimizer = pyro.optim.Adam({"lr": 4e-4})
elbo = pyro.infer.Trace_ELBO(num_particles=num_particles, vectorize_particles=True, retain_graph=True, )
svi = pyro.infer.SVI(model=model.model, guide=model.guide, optim=optimizer, loss=elbo)
model.train()
iterator = tqdm.notebook.tqdm(range(num_iter))
for i in iterator:
model.zero_grad()
loss = svi.step(train_x, train_y)
iterator.set_postfix(loss=loss, lengthscale=model.covar_module.base_kernel.lengthscale.item())
# -
# Here's a quick helper function for getting smoothed percentile values from samples
def percentiles_from_samples(samples, percentiles=[0.05, 0.5, 0.95]):
num_samples = samples.size(0)
samples = samples.sort(dim=0)[0]
# Get samples corresponding to percentile
percentile_samples = [samples[int(num_samples * percentile)] for percentile in percentiles]
return percentile_samples
# +
test_x = train_x
model.eval()
with torch.no_grad():
output = model(test_x)
samples = output(torch.Size([100])).exp()
lower, median, upper = percentiles_from_samples(samples)
# -
model.eval()
with torch.no_grad():
predictive = Predictive(model.model, guide=model.guide, num_samples=1000)
pred = predictive(train_x, train_y)
pred.keys()
mm = (mO_tot / temp_dif)[~mask]
lower, median, upper = np.percentile((pred['xvnaogrd.f(x)']).exp(), [5, 50, 95], axis=0)
# +
# lower, median, upper = lower.detach().numpy(), median.detach().numpy(), upper.detach().numpy()
# +
lw_truth = 1.1
fs_title = 18.
fig = plt.figure(constrained_layout=False, figsize=(25, 10))
ax = [None] * 4
gs = fig.add_gridspec(nrows=3, ncols=3, wspace=0.3, hspace=0.6)
ax[0] = fig.add_subplot(gs[:-1, :])
ax[1] = fig.add_subplot(gs[-1, 0])
ax[2] = fig.add_subplot(gs[-1, 1])
ax[3] = fig.add_subplot(gs[-1, 2])
ax[0].fill_between(np.arange(len(lower)), lower, upper, alpha=0.3, color=cols_default[1], label="GP post. draws")
ax[0].plot(median, color=cols_default[1], lw=1.)
ax[0].plot(13 * mm, color=cols_default[0], label="Truth", lw=lw_truth)
ax[0].set_xlabel("Pixel index")
ax[0].set_ylabel(r"Norm\,$\times$\,Mismodeling")
ax[0].set_title(r"\bf{Diffuse mismodeling}", fontsize=fs_title)
ax[0].legend()
ax[0].set_xlim(0, len(train_x))
# ax[0].set_ylim(0, 20)
ax[1].hist((pred['bub'].detach().numpy()), label="Post. draws", color=cols_default[1], alpha=0.5, bins=np.linspace(0.5, 1.5, 50));
ax[1].axvline(1., label="Truth", color=cols_default[0], lw=lw_truth)
ax[1].set_xlabel("Bubbles norm.")
ax[1].set_title(r"\bf{Bubbles template}", fontsize=fs_title)
ax[1].legend()
ax[2].hist((pred['iso'].detach().numpy()), label="Post. draws", color=cols_default[1], alpha=0.5, bins=np.linspace(0.5, 1.5, 50));
ax[2].axvline(1., label="Truth", color=cols_default[0], lw=lw_truth)
ax[2].set_xlabel("Iso norm.")
ax[2].set_title(r"\bf{Isotropic template}", fontsize=fs_title)
ax[2].legend()
ax[3].hist((pred['psc'].detach().numpy()), label="Post. draws", color=cols_default[1], alpha=0.5, bins=np.linspace(0.5, 1.5, 50));
ax[3].axvline(1., label="Truth", color=cols_default[0], lw=lw_truth)
ax[3].set_xlabel("PS norm.")
ax[3].set_title(r"\bf{Point source template}", fontsize=fs_title)
ax[3].legend()
# -
from getdist import plots, MCSamples
import getdist
names = ["Bubbles norm", "Iso norm", "PS norm"]
labels = names
samples = pred['states'].detach().numpy()
samples = MCSamples(samples=samples, names=names, labels=labels)
g = plots.get_subplot_plotter()
g.triangle_plot([samples], filled=True)
| notebooks/02_poiss-gp-healpix-gpytorch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ramonVDAKKER/teaching/blob/main/EMAS_intro_data_science_short_demo_image_classification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="TgFB9rhmv-VC"
# # Demo image classification, using pretrained model, and transfer learning
#
#
# + [markdown] id="w2UVDj07wBS9"
# ## 0. Imports
# + id="B2A2TnFevwHp"
import numpy as np
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.keras.applications.imagenet_utils import decode_predictions
import matplotlib.pyplot as plt
from PIL import Image
from urllib import request
from io import BytesIO
from keras.models import Sequential
from keras.models import Model
from keras import optimizers
from keras.layers import GlobalAveragePooling2D, Dropout, Dense
import tensorflow_hub as hub
import tensorflow as tf
# + [markdown] id="FZz8rihnwJP9"
# ## 1. Load model
# + [markdown] id="iVfwVLvLwN_K"
# We load the InceptionV3 model which is included in the keras package.
# + id="QEUt19tYwGas"
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_v3 import preprocess_input
model = InceptionV3()
model.summary()
# + [markdown] id="ypOmVORqwxpy"
# # 2. Using the model
# + [markdown] id="bx_Ygqhhx48P"
# Choose a photo.
# + id="zhcrqXYkx8m4"
url = "https://uitgelatenhond.nl/wp-content/uploads/2020/12/Newfoundlander.jpg"
res = request.urlopen(url).read()
image = Image.open(BytesIO(res)).resize((299, 299))
plt.imshow(image)
# + [markdown] id="sVghrGq_3Ki8"
# Transform the image to numerical data.
# + id="UskU5fldyRnq"
image = img_to_array(image)
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
image = preprocess_input(image)
print(image)
# + [markdown] id="KcyBkYnI3TY-"
# Use the model to classify the image.
#
# + id="EhC7ET6lhMfd"
model.predict(image)
# + id="PLx-f5nzz8vX"
predicted_label = decode_predictions(model.predict(image))
for item in predicted_label[0]:
print(f"Label {item[1]} has probability: {item[2]}.")
# + [markdown] id="H9ieiBTUyivq"
# ## 3. Transfer learning
# + [markdown] id="r1303tID5zLe"
# Code partially taken from https://www.tensorflow.org/hub/tutorials/tf2_image_retraining
# + [markdown] id="VFg1mq9659a8"
# Load dataset with flower images:
# + id="EfeFWnPo0_tH"
data_dir = tf.keras.utils.get_file("flower_photos",
"https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz", untar=True)
normalization_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1. / 255)
def build_dataset(subset):
dataset = tf.keras.preprocessing.image_dataset_from_directory(data_dir,
validation_split=.30,
subset=subset,
label_mode="categorical",
seed=123,
image_size=(299, 299),
batch_size=1)
size = dataset.cardinality().numpy()
classes = tuple(dataset.class_names)
dataset = dataset.unbatch().batch(32)
# dataset = dataset.repeat()
dataset = dataset.map(lambda images, labels: (normalization_layer(images), labels))
#dataset = dataset.map(lambda images, labels: (preprocess_input(images), labels))
return dataset, classes, size
train_data, class_names, train_size = build_dataset("training")
validation_data, _, validation_size = build_dataset("validation")
# + [markdown] id="_j_-4NnnMJoz"
# Let us inspect a few images:
# + id="NZs1n1K20Htj"
x, y = next(iter(validation_data))
k = 5
predicted_label = decode_predictions(model.predict(x[0 : k, :, :, :]))
for i in range(0, k):
image = x[i, :, :, :]
true_index = np.argmax(y[i])
plt.imshow(image)
plt.axis('off')
plt.title(class_names[true_index])
plt.show()
# + [markdown] id="uX7rYZO19zor"
# InceptionV3, with our setting to consider 1,000 classes, is not specific enough to identify the flowers:
# + id="KT7ZPCa50kk6"
for item in predicted_label[i]:
print(f"Label {item[1]} has probability: {item[2]}.")
# + [markdown] id="Ew87QGiC-Fuz"
# Using the small (!) flower dataset and InceptionV3 we will train a neural network. First we specify the model:
# + id="RvL7uwnp_bwF"
base_model = InceptionV3(include_top=False, input_shape=(299, 299) + (3,))
base_model.trainable = False
model = Sequential([
tf.keras.layers.InputLayer(input_shape=(299, 299) + (3,)),
hub.KerasLayer("https://tfhub.dev/google/imagenet/inception_v3/feature_vector/5", trainable=False),
tf.keras.layers.Dropout(rate=0.4),
tf.keras.layers.Dense(5, kernel_regularizer=tf.keras.regularizers.l2(0.0001))
])
model.build([None, 299, 299, 3])
model.summary()
# + id="JpUClCPdEnHH"
model.compile(
optimizer=tf.keras.optimizers.SGD(),
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True, label_smoothing=0.1),
metrics=['accuracy'])
steps_per_epoch = train_size // 32
validation_steps = validation_size // 32
hist = model.fit(train_data, epochs=50, steps_per_epoch=70, validation_data=validation_data).history
| EMAS_intro_data_science_short_demo_image_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# # Loading Data
# read in the extracted text file
with open('data/text8') as f:
text = f.read()
# # Pre-processing
#
# 1) Relpace punctuation with tokenized words
#
# 2) Split the data by words
#
# 3) Remove words that appear less than six times
#
# 4) Sort the words from least to most frequent
#
# 5) Create an input embedding layer ('the': '0')
#
# 6) Create an output embedding layer ('0': 'the')
# +
import re
from collections import Counter
def preprocess(text):
# Replace punctuation with tokens so we can use them in our model
text = text.lower()
text = text.replace('.', ' <PERIOD> ')
text = text.replace(',', ' <COMMA> ')
text = text.replace('"', ' <QUOTATION_MARK> ')
text = text.replace(';', ' <SEMICOLON> ')
text = text.replace('!', ' <EXCLAMATION_MARK> ')
text = text.replace('?', ' <QUESTION_MARK> ')
text = text.replace('(', ' <LEFT_PAREN> ')
text = text.replace(')', ' <RIGHT_PAREN> ')
text = text.replace('--', ' <HYPHENS> ')
text = text.replace('?', ' <QUESTION_MARK> ')
# text = text.replace('\n', ' <NEW_LINE> ')
text = text.replace(':', ' <COLON> ')
words = text.split()
# Remove all words with 5 or fewer occurences
word_counts = Counter(words)
trimmed_words = [word for word in words if word_counts[word] > 5]
return trimmed_words
# + tags=[]
# get list of words
words = preprocess(text)
# print some stats about this word data
print("Total words in text: {}".format(len(words)))
print("Unique words: {}".format(len(set(words))))
# -
def create_lookup_tables(words):
"""
Create lookup tables for vocabulary
:param words: Input list of words
:return: Two dictionaries, vocab_to_int, int_to_vocab
"""
word_counts = Counter(words)
# sorting the words from most to least frequent in text occurrence
sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)
# create int_to_vocab dictionaries
int_to_vocab = {ii: word for ii, word in enumerate(sorted_vocab)}
vocab_to_int = {word: ii for ii, word in int_to_vocab.items()}
return vocab_to_int, int_to_vocab
vocab_to_int, int_to_vocab = create_lookup_tables(words)
int_words = [vocab_to_int[word] for word in words]
# # Subsampling
#
# For each word $w_i$ in the training set, we'll discard it with probability given by
#
# $$ P(w_i) = 1 - \sqrt{\frac{t}{f(w_i)}} $$
#
# where $t$ is a threshold parameter and $f(w_i)$ is the frequency of word $w_i$ in the total dataset.
# +
from collections import Counter
import random
import numpy as np
threshold = 1e-5
word_counts = Counter(int_words)
total_count = len(int_words)
freqs = {word: count/total_count for word, count in word_counts.items()}
p_drop = {word: 1 - np.sqrt(threshold/freqs[word]) for word in word_counts}
# discard some frequent words, according to the subsampling equation
# create a new list of words for training
train_words = [word for word in int_words if random.random() < (1 - p_drop[word])]
# +
def get_target(words, idx, window_size=5):
''' Get a list of words in a window around an index. '''
R = np.random.randint(1, window_size+1)
start = idx - R if (idx - R) > 0 else 0
stop = idx + R
target_words = words[start:idx] + words[idx+1:stop+1]
return list(target_words)
def get_batches(words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words)//batch_size
# only full batches
words = words[:n_batches*batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx+batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x]*len(batch_y))
yield x, y
# + tags=[]
int_text = [i for i in range(20)]
x,y = next(get_batches(int_text, batch_size=4, window_size=5))
# -
# # Validation
def cosine_similarity(embedding, valid_size=16, valid_window=100, device='cpu'):
""" Returns the cosine similarity of validation words with words in the embedding matrix.
Here, embedding should be a PyTorch embedding module.
"""
# Here we're calculating the cosine similarity between some random words and
# our embedding vectors. With the similarities, we can look at what words are
# close to our random words.
# sim = (a . b) / |a||b|
embed_vectors = embedding.weight
# magnitude of embedding vectors, |b|
magnitudes = embed_vectors.pow(2).sum(dim=1).sqrt().unsqueeze(0)
# pick N words from our ranges (0,window) and (1000,1000+window). lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(1000,1000+valid_window), valid_size//2))
valid_examples = torch.LongTensor(valid_examples).to(device)
valid_vectors = embedding(valid_examples)
similarities = torch.mm(valid_vectors, embed_vectors.t())/magnitudes
return valid_examples, similarities
# # Skip Gram Negative Sampling Implementation of Word2Vec Algorithm
# +
import torch
from torch import nn
import torch.optim as optim
# Custom Loss function
class NegativeSamplingLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_vectors, output_vectors, noise_vectors):
batch_size, embed_size = input_vectors.shape
# Input vectors should be a batch of column vectors
input_vectors = input_vectors.view(batch_size, embed_size, 1)
# Output vectors should be a batch of row vectors
output_vectors = output_vectors.view(batch_size, 1, embed_size)
# bmm = batch matrix multiplication
# correct log-sigmoid loss
out_loss = torch.bmm(output_vectors, input_vectors).sigmoid().log()
out_loss = out_loss.squeeze()
# incorrect log-sigmoid loss
noise_loss = torch.bmm(noise_vectors.neg(), input_vectors).sigmoid().log()
noise_loss = noise_loss.squeeze().sum(1) # sum the losses over the sample of noise vectors
# negate and sum correct and noisy log-sigmoid losses
# return average batch loss
return -(out_loss + noise_loss).mean()
class SkipGramNeg(nn.Module):
def __init__(self, n_vocab, n_embed, noise_dist=None):
super().__init__()
self.n_vocab = n_vocab
self.n_embed = n_embed
self.noise_dist = noise_dist
# define embedding layers for input and output words
self.in_embed = nn.Embedding(n_vocab, n_embed)
self.out_embed = nn.Embedding(n_vocab, n_embed)
# Initialize embedding tables with uniform distribution
# I believe this helps with convergence
self.in_embed.weight.data.uniform_(-1, 1)
self.out_embed.weight.data.uniform_(-1, 1)
def forward_input(self, input_words):
input_vectors = self.in_embed(input_words)
return input_vectors
def forward_output(self, output_words):
output_vectors = self.out_embed(output_words)
return output_vectors
def forward_noise(self, batch_size, n_samples):
""" Generate noise vectors with shape (batch_size, n_samples, n_embed)"""
if self.noise_dist is None:
# Sample words uniformly
noise_dist = torch.ones(self.n_vocab)
else:
noise_dist = self.noise_dist
# Sample words from our noise distribution
noise_words = torch.multinomial(noise_dist,
batch_size * n_samples,
replacement=True)
device = "cuda" if model.out_embed.weight.is_cuda else "cpu"
noise_words = noise_words.to(device)
noise_vectors = self.out_embed(noise_words).view(batch_size, n_samples, self.n_embed)
return noise_vectors
# -
# # Training
# + tags=["outputPrepend"]
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Get our noise distribution
# Using word frequencies calculated earlier in the notebook
word_freqs = np.array(sorted(freqs.values(), reverse=True))
unigram_dist = word_freqs/word_freqs.sum()
noise_dist = torch.from_numpy(unigram_dist**(0.75)/np.sum(unigram_dist**(0.75)))
# instantiating the model
embedding_dim = 300
model = SkipGramNeg(len(vocab_to_int), embedding_dim, noise_dist=noise_dist).to(device)
# using the loss that we defined
criterion = NegativeSamplingLoss()
optimizer = optim.Adam(model.parameters(), lr=0.003)
print_every = 1500
steps = 0
epochs = 5
# train for some number of epochs
for e in range(epochs):
# get our input, target batches
for input_words, target_words in get_batches(train_words, 512):
steps += 1
inputs, targets = torch.LongTensor(input_words), torch.LongTensor(target_words)
inputs, targets = inputs.to(device), targets.to(device)
# input, output, and noise vectors
input_vectors = model.forward_input(inputs)
output_vectors = model.forward_output(targets)
noise_vectors = model.forward_noise(inputs.shape[0], 5)
# negative sampling loss
loss = criterion(input_vectors, output_vectors, noise_vectors)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# loss stats
if steps % print_every == 0:
print("Epoch: {}/{}".format(e+1, epochs))
print("Loss: ", loss.item()) # avg batch loss at this point in training
valid_examples, valid_similarities = cosine_similarity(model.in_embed, device=device)
_, closest_idxs = valid_similarities.topk(6)
valid_examples, closest_idxs = valid_examples.to('cpu'), closest_idxs.to('cpu')
for ii, valid_idx in enumerate(valid_examples):
closest_words = [int_to_vocab[idx.item()] for idx in closest_idxs[ii]][1:]
print(int_to_vocab[valid_idx.item()] + " | " + ', '.join(closest_words))
print("...\n")
# -
# # Visualize Results
# +
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
# getting embeddings from the embedding layer of our model, by name
embeddings = model.in_embed.weight.to('cpu').data.numpy()
viz_words = 380
tsne = TSNE()
embed_tsne = tsne.fit_transform(embeddings[:viz_words, :])
fig, ax = plt.subplots(figsize=(16, 16))
for idx in range(viz_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
# -
| DeepLearning/Networks/Word2Vec/model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hubble Source Catalog API Notebook
# ### August 2019, <NAME>
#
# A [new MAST interface](https://catalogs.mast.stsci.edu/hsc) supports queries to the current and previous versions of the [Hubble Source Catalog](https://archive.stsci.edu/hst/hsc). It allows searches of the summary table (with multi-filter mean photometry) and the detailed table (with all the multi-epoch measurements). It also has an associated [API](https://catalogs.mast.stsci.edu/docs/hsc.html), which is used in this notebook.
#
# This is based on [HSC Use Case #3](https://archive.stsci.edu/hst/hsc/help/use_case_3_v2.html).
# * It searches the HSC for variable objects in the vicinity of dwarf galaxy IC 1613,
# * shows the positions of those objects in a color-magnitude diagram,
# * extracts light curves for an example object, and
# * displays cutout images from the Hubble observations that were used for the light curve measurements.
#
# The whole process takes only 30 seconds to complete.
#
# This notebook is available for [download](hscv3_api.ipynb). Another [simple notebook](hscv3_smc_api.html) generates a color-magnitude diagram for the Small Magellanic Cloud in only a couple of minutes. A more complex notebook that shows how to access the proper motion tables using the HSC API is also [available](sweeps_hscv3p1_api.html).
# # Instructions:
# * Complete the initialization steps [described below](#Initialization).
# * Run the notebook.
#
# Running the notebook from top to bottom takes about 30 seconds.
#
#
# # Table of Contents
# * [Initialization](#Initialization)
# * [Get metadata on available HSC columns](#metadata)
# * [Find variable objects in IC 1613](#ic1613)
# * [Use MAST name resolver](#resolver)
# * [Search HSC summary table](#summary)
# * [Plot variability index versus magnitude](#variability)
# * [Show variable objects in a color-magnitude diagram](#cmd)
# * [Get HSC light curve for a variable](#lightcurve)
# * [Extract HLA cutout images for the variable](#cutouts)
# # Initialization <a class="anchor" id="Initialization"></a>
# ### Install Python modules
#
# _This notebook requires the use of **Python 3**._
#
# This needs the `requests` and `pillow` modules in addition to the common requirements of `astropy`, `numpy` and `scipy`. For anaconda versions of Python the installation commands are:
#
# <pre>
# conda install requests
# conda install pillow
# </pre>
# +
# %matplotlib inline
import astropy, pylab, time, sys, os, requests, json
import numpy as np
from pprint import pprint
from astropy.table import Table
from astropy.io import ascii
from PIL import Image
from io import BytesIO
# Set page width to fill browser for longer output lines
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
# set width for pprint
astropy.conf.max_width = 150
# -
# ## Useful functions
#
# Execute HSC searches and resolve names using [MAST query](https://mast.stsci.edu/api/v0/MastApiTutorial.html).
# +
hscapiurl = "https://catalogs.mast.stsci.edu/api/v0.1/hsc"
def hsccone(ra,dec,radius,table="summary",release="v3",format="csv",magtype="magaper2",
columns=None, baseurl=hscapiurl, verbose=False,
**kw):
"""Do a cone search of the HSC catalog
Parameters
----------
ra (float): (degrees) J2000 Right Ascension
dec (float): (degrees) J2000 Declination
radius (float): (degrees) Search radius (<= 0.5 degrees)
table (string): summary, detailed, propermotions, or sourcepositions
release (string): v3 or v2
magtype (string): magaper2 or magauto (only applies to summary table)
format: csv, votable, json
columns: list of column names to include (None means use defaults)
baseurl: base URL for the request
verbose: print info about request
**kw: other parameters (e.g., 'numimages.gte':2)
"""
data = kw.copy()
data['ra'] = ra
data['dec'] = dec
data['radius'] = radius
return hscsearch(table=table,release=release,format=format,magtype=magtype,
columns=columns,baseurl=baseurl,verbose=verbose,**data)
def hscsearch(table="summary",release="v3",magtype="magaper2",format="csv",
columns=None, baseurl=hscapiurl, verbose=False,
**kw):
"""Do a general search of the HSC catalog (possibly without ra/dec/radius)
Parameters
----------
table (string): summary, detailed, propermotions, or sourcepositions
release (string): v3 or v2
magtype (string): magaper2 or magauto (only applies to summary table)
format: csv, votable, json
columns: list of column names to include (None means use defaults)
baseurl: base URL for the request
verbose: print info about request
**kw: other parameters (e.g., 'numimages.gte':2). Note this is required!
"""
data = kw.copy()
if not data:
raise ValueError("You must specify some parameters for search")
if format not in ("csv","votable","json"):
raise ValueError("Bad value for format")
url = "{}.{}".format(cat2url(table,release,magtype,baseurl=baseurl),format)
if columns:
# check that column values are legal
# create a dictionary to speed this up
dcols = {}
for col in hscmetadata(table,release,magtype)['name']:
dcols[col.lower()] = 1
badcols = []
for col in columns:
if col.lower().strip() not in dcols:
badcols.append(col)
if badcols:
raise ValueError('Some columns not found in table: {}'.format(', '.join(badcols)))
# two different ways to specify a list of column values in the API
# data['columns'] = columns
data['columns'] = '[{}]'.format(','.join(columns))
# either get or post works
# r = requests.post(url, data=data)
r = requests.get(url, params=data)
if verbose:
print(r.url)
r.raise_for_status()
if format == "json":
return r.json()
else:
return r.text
def hscmetadata(table="summary",release="v3",magtype="magaper2",baseurl=hscapiurl):
"""Return metadata for the specified catalog and table
Parameters
----------
table (string): summary, detailed, propermotions, or sourcepositions
release (string): v3 or v2
magtype (string): magaper2 or magauto (only applies to summary table)
baseurl: base URL for the request
Returns an astropy table with columns name, type, description
"""
url = "{}/metadata".format(cat2url(table,release,magtype,baseurl=baseurl))
r = requests.get(url)
r.raise_for_status()
v = r.json()
# convert to astropy table
tab = Table(rows=[(x['name'],x['type'],x['description']) for x in v],
names=('name','type','description'))
return tab
def cat2url(table="summary",release="v3",magtype="magaper2",baseurl=hscapiurl):
"""Return URL for the specified catalog and table
Parameters
----------
table (string): summary, detailed, propermotions, or sourcepositions
release (string): v3 or v2
magtype (string): magaper2 or magauto (only applies to summary table)
baseurl: base URL for the request
Returns a string with the base URL for this request
"""
checklegal(table,release,magtype)
if table == "summary":
url = "{baseurl}/{release}/{table}/{magtype}".format(**locals())
else:
url = "{baseurl}/{release}/{table}".format(**locals())
return url
def checklegal(table,release,magtype):
"""Checks if this combination of table, release and magtype is acceptable
Raises a ValueError exception if there is problem
"""
releaselist = ("v2", "v3")
if release not in releaselist:
raise ValueError("Bad value for release (must be one of {})".format(
', '.join(releaselist)))
if release=="v2":
tablelist = ("summary", "detailed")
else:
tablelist = ("summary", "detailed", "propermotions", "sourcepositions")
if table not in tablelist:
raise ValueError("Bad value for table (for {} must be one of {})".format(
release, ", ".join(tablelist)))
if table == "summary":
magtypelist = ("magaper2", "magauto")
if magtype not in magtypelist:
raise ValueError("Bad value for magtype (must be one of {})".format(
", ".join(magtypelist)))
def mastQuery(request, url='https://mast.stsci.edu/api/v0/invoke'):
"""Perform a MAST query.
Parameters
----------
request (dictionary): The MAST request json object
url (string): The service URL
Returns the returned data content
"""
# Encoding the request as a json string
requestString = json.dumps(request)
r = requests.post(url, data={'request': requestString})
r.raise_for_status()
return r.text
def resolve(name):
"""Get the RA and Dec for an object using the MAST name resolver
Parameters
----------
name (str): Name of object
Returns RA, Dec tuple with position
"""
resolverRequest = {'service':'Mast.Name.Lookup',
'params':{'input':name,
'format':'json'
},
}
resolvedObjectString = mastQuery(resolverRequest)
resolvedObject = json.loads(resolvedObjectString)
# The resolver returns a variety of information about the resolved object,
# however for our purposes all we need are the RA and Dec
try:
objRa = resolvedObject['resolvedCoordinate'][0]['ra']
objDec = resolvedObject['resolvedCoordinate'][0]['decl']
except IndexError as e:
raise ValueError("Unknown object '{}'".format(name))
return (objRa, objDec)
# -
# ## Get metadata on available columns <a name="metadata"></a>
#
# The `metadata` query returns information on the columns in the table. It works for any of the tables in the API (`summary`, `detailed`, `propermotions`, `sourcepositions`).
#
# Note that the summary table has a huge number of columns! Each of the 133 filter/detector combinations has 3 columns with the magnitude, median absolute deviation (MAD, a robust measure of the scatter among the measurements), and the number of independent measurements in the filter. The filter name includes a prefix for the detector (`A`=ACS/WFC, `W3`=WFC3/UVIS or WFC3/IR, `W2`=WFPC2) followed by the standard name of the filter. So for instance all three instruments have an F814W filter, so there are columns for `A_F814W`, `W3_F814W`, and `W2_F814W`.
meta = hscmetadata("summary")
print(len(meta),"columns in summary")
filterlist = meta['name'][19::3].tolist()
print(len(filterlist),"filters")
pprint(filterlist, compact=True)
meta[:19]
# ## Find variable objects in the dwarf irregular galaxy IC 1613 <a name="ic1613"></a>
#
# This is based on [HSC Use Case #3](https://archive.stsci.edu/hst/hsc/help/use_case_3_v2.html), which shows an example of selecting objects from the HSC in portal. This is simple to do using the HSC API.
# ### Use MAST name resolver to get position of IC 1613 <a name="resolver"></a>
target = 'IC 1613'
ra, dec = resolve(target)
print(target,ra,dec)
# ### Select objects with enough measurements to determine variability <a name="summary"></a>
#
# This searches the summary table for objects within 0.5 degrees of the galaxy center that have at least 10 measurements in both ACS F475W and F814W.
# +
# save typing a quoted list of columns
columns = """MatchID,MatchRA,MatchDec,NumFilters,NumVisits,NumImages,StartMJD,StopMJD,
A_F475W, A_F475W_N, A_F475W_MAD,
A_F814W, A_F814W_N, A_F814W_MAD""".split(",")
columns = [x.strip() for x in columns]
columns = [x for x in columns if x and not x.startswith('#')]
constraints = {'A_F475W_N.gte': 10, 'A_F814W_N.gte': 10}
t0 = time.time()
tab = ascii.read(hsccone(ra,dec,0.5,table="summary",release='v3',columns=columns,verbose=True,**constraints))
print("{:.1f} s: retrieved data and converted to {}-row astropy table".format(time.time()-t0, len(tab)))
# clean up the output format
tab['A_F475W'].format = "{:.3f}"
tab['A_F475W_MAD'].format = "{:.3f}"
tab['A_F814W'].format = "{:.3f}"
tab['A_F814W_MAD'].format = "{:.3f}"
tab['MatchRA'].format = "{:.6f}"
tab['MatchDec'].format = "{:.6f}"
tab['StartMJD'].format = "{:.5f}"
tab['StopMJD'].format = "{:.5f}"
tab
# -
# ### Plot object positions on the sky
#
# We mark the galaxy center as well. Note that this field is in the outskirts of IC 1613. The 0.5 search radius (which is the maximum allowed in the API) allows finding these objects.
pylab.rcParams.update({'font.size': 16})
pylab.figure(1,(10,10))
pylab.plot(tab['MatchRA'], tab['MatchDec'], 'bo', markersize=1,
label='{} HSC measurements'.format(len(tab)))
pylab.plot(ra,dec,'rx',label=target,markersize=10)
pylab.gca().invert_xaxis()
pylab.gca().set_aspect('equal')
pylab.xlabel('RA [deg]')
pylab.ylabel('Dec [deg]')
pylab.legend(loc='best')
# ### Plot MAD variability index versus magnitude in F475W <a name="variability"></a>
#
# The median absolute deviation is measured among the ~12 magnitude measurements in the catalog. Some scatter is expected from noise (which increases for fainter objects). Objects with MAD values that are high are likely to be variable.
#
# Select variable objects that are not too faint.
wvar = np.where((tab['A_F475W_MAD']>0.1) & (tab['A_F475W']<24) & (tab['A_F475W']>21))[0]
pylab.rcParams.update({'font.size': 16})
pylab.figure(1,(10,10))
pylab.plot(tab['A_F475W'], tab['A_F475W_MAD'], 'bo', markersize=2,
label='{} HSC measurements near {}'.format(len(tab),target))
pylab.plot(tab['A_F475W'][wvar], tab['A_F475W_MAD'][wvar], 'ro', markersize=5,
label='{} variable candidates'.format(len(wvar)))
pylab.xlabel('A_F475W [mag]')
pylab.ylabel('A_F475W_MAD [mag]')
pylab.legend(loc='best')
# ### Check positions of variable objects in a color-magnitude diagram <a name="cmd"></a>
#
# Note that these objects are generally located in the Cepheid instability strip.
pylab.rcParams.update({'font.size': 16})
pylab.figure(1,(10,10))
b_minus_i = tab['A_F475W'] - tab['A_F814W']
pylab.plot(b_minus_i, tab['A_F475W'], 'bo', markersize=2,
label='{} HSC measurements near {}'.format(len(tab),target))
pylab.plot(b_minus_i[wvar], tab['A_F475W'][wvar], 'ro', markersize=5,
label='{} variable candidates'.format(len(wvar)))
pylab.ylabel('A_F475W [mag]')
pylab.xlabel('A_F475W - A_F814W [mag]')
pylab.gca().invert_yaxis()
pylab.legend(loc='best')
# ### Query the API for the light curve for one of the objects <a name="lightcurve"></a>
#
# Select the most variable object as an example.
wvar = wvar[np.argsort(-tab['A_F475W_MAD'][wvar])]
iselect = wvar[0]
print("MatchID {} B = {:.3f} B-I = {:.3f}".format(
tab['MatchID'][iselect], tab['A_F475W'][iselect], b_minus_i[iselect]))
tab[wvar]
# Get column metadata for detailed observation table (which has time-dependent magnitudes).
meta = hscmetadata("detailed")
print(len(meta),"columns in detailed")
pprint(meta['name'].tolist(), compact=True)
# ### Get separate light curves for F475W and F814W from the detailed table
# +
columns = """MatchID,SourceID,StartMJD,Detector,Filter,MagAper2,Flags,ImageName""".split(",")
columns = [x.strip() for x in columns]
columns = [x for x in columns if x and not x.startswith('#')]
constraints = {'MatchID': tab['MatchID'][iselect], 'Detector': 'ACS/WFC'}
t0 = time.time()
f475 = ascii.read(hscsearch(table="detailed",release='v3',columns=columns,Filter='F475W',**constraints))
f814 = ascii.read(hscsearch(table="detailed",release='v3',columns=columns,Filter='F814W',**constraints))
print("{:.1f} s: retrieved data and converted to {} (F475W) and {} (F814W) row astropy tables".format(time.time()-t0, len(f475), len(f814)))
f475.sort('StartMJD')
f814.sort('StartMJD')
f475['MagAper2'].format = "{:.3f}"
f475['StartMJD'].format = "{:.5f}"
f814['MagAper2'].format = "{:.3f}"
f814['StartMJD'].format = "{:.5f}"
f475
# -
# ### Plot the light curves
#
# The light curves appear well-behaved and are closely correlated in the two filters.
# +
pylab.rcParams.update({'font.size': 16})
pylab.figure(1,(10,10))
pylab.subplot(211)
pylab.plot(f475['StartMJD'], f475['MagAper2'], 'bo', label='ACS/WFC F475W')
pylab.gca().invert_yaxis()
pylab.ylabel('F475W [mag]')
pylab.legend(loc='best')
xlim = pylab.xlim()
pylab.subplot(212)
pylab.plot(f814['StartMJD'], f814['MagAper2'], 'ro', label='ACS/WFC F814W')
pylab.gca().invert_yaxis()
pylab.ylabel('F814W [mag]')
pylab.xlabel('MJD [days]')
pylab.xlim(xlim)
pylab.legend(loc='best')
# -
# ### Extract HLA cutout images for the F475W images <a name="cutouts"></a>
#
# Get HLA F475W cutout images for the example variable. The `get_hla_cutout` function reads a single cutout image (as a JPEG grayscale image) and returns a PIL image object. See the documentation on the [fitscut image cutout service](http://hla.stsci.edu/fitscutcgi_interface.html) for more information on the web service being used.
#
# Examination of the images can be useful to identified cosmic-ray contamination and other possible image artifacts. In this case, no issues are seen, so the light curve is likely to be reliable.
# +
def get_hla_cutout(imagename,ra,dec,size=33,autoscale=99.5,asinh=1,zoom=1):
"""Get JPEG cutout for an image"""
url = "https://hla.stsci.edu/cgi-bin/fitscut.cgi"
r = requests.get(url, params=dict(ra=ra, dec=dec, size=size,
format="jpeg", red=imagename, autoscale=autoscale, asinh=asinh, zoom=zoom))
im = Image.open(BytesIO(r.content))
return im
# sort images by magnitude from faintest to brightest
isort = np.argsort(-f475['MagAper2'])
imagename = f475['ImageName'][isort]
mag = f475['MagAper2'][isort]
mjd = f475['StartMJD'][isort]
nim = len(imagename)
ncols = 4 # images per row
nrows = (nim+ncols-1)//ncols
imsize = 15
mra = tab['MatchRA'][iselect]
mdec = tab['MatchDec'][iselect]
pylab.rcParams.update({"font.size":11})
pylab.figure(1,(15, (15/ncols)*nrows))
t0 = time.time()
for k in range(nim):
im1 = get_hla_cutout(imagename[k],mra,mdec,size=imsize)
pylab.subplot(nrows,ncols,k+1)
pylab.imshow(im1,origin="upper",cmap="gray")
pylab.title('{:.5f} f475w={:.3f}'.format(mjd[k],mag[k]))
if ((k+1) % 10)==0:
print("{:.1f} s: finished {} of {}".format(time.time()-t0,k+1,nim))
pylab.tight_layout()
print("{:.1f} s: finished {}".format(time.time()-t0,nim))
# -
| hscv3_api.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # AWS Transcribe
#
# Transcribe an audio file to text.
#
# <a href="https://docs.aws.amazon.com/transcribe/index.html#lang/en_us">AWS Transcribe Documentation</a>
#
# This notebook uploads an audio file to S3, transcribes it, and then deletes the file from S3.
import boto3
import time
import json
import urllib.request
# +
def aws_s3_upload(file_name, bucket_name):
s3 = boto3.resource('s3')
# Create bucket if it doesn't already exist
bucket_names = [b.name for b in s3.buckets.all()]
if bucket_name not in bucket_names:
s3.create_bucket(Bucket=bucket_name,
CreateBucketConfiguration={'LocationConstraint': 'EU'})
print("Bucket {} created.".format(bucket_name))
s3.meta.client.upload_file(file_name, bucket_name, file_name)
print("{} uploaded to {}.".format(file_name, bucket_name))
return
def aws_s3_delete(file_name, bucket_name, del_bucket=False):
s3 = boto3.resource('s3')
try:
s3.meta.client.delete_object(Bucket=bucket_name, Key=file_name)
print("{} deleted from {}.".format(file_name, bucket_name))
except:
print("Unable to delete {} from {}.".format(file_name, bucket_name))
if del_bucket:
try:
s3.meta.client.delete_bucket(Bucket=bucket_name)
print("Bucket {} deleted.".format(bucket_name))
except:
print("Unable to delete bucket {}.".format(bucket_name))
return
# -
def aws_transcribe(file_name, bucket_name):
client = boto3.client(service_name='transcribe',
region_name='eu-west-1',
use_ssl=True)
job_name = 'example_%s' % round(time.time())
job_uri = 's3://%s/%s' % (bucket_name, file_name)
client.start_transcription_job(
TranscriptionJobName=job_name,
Media={'MediaFileUri': job_uri},
MediaFormat=file_name[-3:],
LanguageCode='en-US'
)
tic = time.time()
while True:
status = client.get_transcription_job(TranscriptionJobName=job_name)
if status['TranscriptionJob']['TranscriptionJobStatus'] in ['COMPLETED', 'FAILED']:
break
toc = time.time()
print("Transcription still processing... cumulative run time: {:.1f}s".format(toc-tic))
time.sleep(10)
print("Transcription completed! Total run time: {:.1f}s".format(toc-tic))
json_url = status['TranscriptionJob']['Transcript']['TranscriptFileUri']
with urllib.request.urlopen(json_url) as url:
text = json.loads(url.read().decode())
return text['results']['transcripts'][0]['transcript']
file_name = 'the_raven.mp3'
bucket_name = 'your_bucket_name'
aws_s3_upload(file_name, bucket_name)
result = aws_transcribe(file_name, bucket_name)
aws_s3_delete(file_name, bucket_name)
# Print transcription
print(result)
| AWS/AmazonTranscribe/aws-transcribe.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
pd.options.display.max_columns = 200
pd.options.display.max_rows = 1000
sales_pdf = pd.read_csv('../UseCase_3_Datasets/sales_granular.csv')
sales_pdf.shape
sales_pdf.columns
#Create a data_frame with simple sum
sales_simple_sum = sales_pdf.copy(deep=True)
sales_simple_sum = sales_simple_sum.fillna(0)
sales_simple_sum['total_sales'] = sales_pdf.sum(axis=1)
sales_simple_sum.to_csv('../UseCase_3_Datasets/sales_daily_simple_sum.csv')
col_list = sales_pdf.columns
processed_col_list = []
for col in col_list[1:]:
if col.split(" ")[0] not in processed_col_list[:] :
processed_col_list.append(col.split(" ")[0])
# +
#processed_col_list[:10]
# +
def merge_df_day(df):
df = df.fillna(0)
temp_df = pd.DataFrame()
for col in df.columns[1:]:
#print (col.split(" "))
if col.split(" ")[0] in temp_df.columns:
temp_df[col.split(" ")[0]] = temp_df[col.split(" ")[0]]+df[col]
else :
temp_df[col.split(" ")[0]] = df[col]
temp_df['store_code']=df['store_code']
return temp_df
def merge_df_monthly(df):
df = df.fillna(0)
temp_df = pd.DataFrame()
for col in df.columns[1:]:
if 'store_code' not in col :
month_str = (col.split("/")[0]+"/"+col.split("/")[2])
if month_str in temp_df.columns:
temp_df[month_str] = temp_df[month_str]+df[col]
else :
temp_df[month_str] = df[col]
temp_df['store_code']=df['store_code']
return temp_df
def merge_df_yearly(df):
df = df.fillna(0)
temp_df = pd.DataFrame()
for col in df.columns[1:]:
if 'store_code' not in col :
yr_str = (col.split("/")[1])
if yr_str in temp_df.columns:
temp_df[yr_str] = temp_df[yr_str]+df[col]
else :
temp_df[yr_str] = df[col]
temp_df['store_code']=df['store_code']
return temp_df
# -
sales_pdf_daily = merge_df_day(sales_pdf)
sales_pdf_daily.head()
sales_pdf_daily.to_csv("../UseCase_3_Datasets/sales_daily.csv",index=False)
# +
# Importing libraries necessary for this analysis
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
# To identify missing records in the dataset
# Yellow - missing values
plt.figure(figsize = (20, 8))
sns.heatmap(sales_pdf_daily.isin([0]),yticklabels=False,cbar=False,cmap='viridis')
# +
# X = sales_pdf_daily.loc[:, sales_pdf_daily.columns != 'store_code']
# y = sales_pdf_daily['store_code']
# X.cumsum().plot(figsize=(10,10))
# plt.show()
# -
sales_pdf_monthly = merge_df_monthly(sales_pdf_daily)
sales_pdf_monthly.head()
sales_pdf_monthly.to_csv("../UseCase_3_Datasets/sales_monthly.csv",index=False)
plt.figure(figsize = (20, 8))
sns.heatmap(sales_pdf_monthly.isin([0]),yticklabels=False,cbar=False,cmap='viridis')
sales_pdf_yearly = merge_df_yearly(sales_pdf_monthly)
sales_pdf_yearly.to_csv("../UseCase_3_Datasets/sales_pdf_yearly.csv",index=False)
sales_pdf_yearly.shape[1]
# +
#Yearly Trend
sales_pdf_yearly['trend'] = 0
for i in range(0,sales_pdf_yearly.shape[1]-3):
#print (i)
diff_df = sales_pdf_yearly.iloc[:,[i+1]].sub(sales_pdf_yearly.iloc[:,[i]],fill_value =0)
#print(diff_df.sum(axis=1).head())
sales_pdf_yearly['trend'] = sales_pdf_yearly['trend'].add(diff_df.sum(axis=1),fill_value=0)
sales_pdf_yearly['trend'] = sales_pdf_yearly['trend'].apply(lambda x : x/(sales_pdf_yearly.shape[1]-3))
sales_pdf_yearly.to_csv("../UseCase_3_Datasets/sales_trend_yearly.csv",index=False)
# -
# +
#diff_df.head(10)
# -
sales_pdf_yearly.head(10)
#Monthly treand
sales_pdf_monthly['trend'] = 0
for i in range(0,sales_pdf_monthly.shape[1]-3):
diff_df = sales_pdf_monthly.iloc[:,[i+1]].sub(sales_pdf_monthly.iloc[:,[i]],fill_value =0)
sales_pdf_monthly['trend'] = sales_pdf_monthly['trend'].add(diff_df.sum(axis=1),fill_value=0)
sales_pdf_monthly['trend'] = sales_pdf_monthly['trend'].apply(lambda x : x/(sales_pdf_monthly.shape[1]-3))
sales_pdf_monthly.to_csv("../UseCase_3_Datasets/sales_trend_monthly.csv",index=False)
sales_pdf_monthly.head()
#Daily trend
sales_pdf_daily['trend'] = 0
for i in range(0,sales_pdf_daily.shape[1]-3):
diff_df = sales_pdf_daily.iloc[:,[i+1]].sub(sales_pdf_daily.iloc[:,[i]],fill_value =0)
sales_pdf_daily['trend'] = sales_pdf_daily['trend'].add(diff_df.sum(axis=1),fill_value=0)
sales_pdf_daily['trend'] = sales_pdf_daily['trend'].apply(lambda x : x/(sales_pdf_daily.shape[1]-3))
sales_pdf_daily.to_csv("../UseCase_3_Datasets/sales_trend_daily.csv",index=False)
sales_pdf_daily.head(20)
# To identify missing records in the dataset
# Yellow - missing values
plt.figure(figsize = (20, 8))
sns.heatmap(sales_pdf_daily.isin([0]),yticklabels=False,cbar=False,cmap='viridis')
sales_pdf = sales_pdf.fillna(0)
print(sales_pdf.shape[1]-2)
#Hourly Trend
sales_pdf['trend'] = 0
for i in range(1,sales_pdf.shape[1]-2):
diff_df = sales_pdf.iloc[:,[i+1]].sub(sales_pdf.iloc[:,[i]],fill_value =0)
sales_pdf['trend'] = sales_pdf['trend'].add(diff_df.sum(axis=1),fill_value=0)
# sales_pdf['trend'] = sales_pdf['trend'].apply(lambda x : x/(sales_pdf.shape[1]-3))
sales_pdf.to_csv("../UseCase_3_Datasets/sales_trend_hourly.csv",index=False)
sales_pdf.head(20)
# +
# plt.figure(figsize = (20, 8))
# sns.heatmap(sales_pdf.isin([0]),yticklabels=False,cbar=False,cmap='viridis')
# +
#Target 1 : Daily Aggregated Trend from Oct 16 onwards :
#Filter all columns with date < 10/1/16
#Calculate Trend
#Find f-regression analysis.
from datetime import datetime as dt
col_list = sales_pdf_daily.columns
for col in col_list[:-2]:
if dt.strptime(col,"%m/%d/%y") < dt.strptime("10/1/16", "%m/%d/%y"):
#print ("Cols to delete", col)
sales_pdf_daily.drop(col, axis = 1, inplace = True)
sales_pdf_daily.head()
#Caluclate trend figure
#Daily trend
sales_pdf_daily['trend'] = 0
for i in range(0,sales_pdf_daily.shape[1]-3):
diff_df = sales_pdf_daily.iloc[:,[i+1]].sub(sales_pdf_daily.iloc[:,[i]],fill_value =0)
sales_pdf_daily['trend'] = sales_pdf_daily['trend'].add(diff_df.sum(axis=1),fill_value=0)
sales_pdf_daily['trend'] = sales_pdf_daily['trend'].apply(lambda x : x/(sales_pdf_daily.shape[1]-3))
sales_pdf_daily.to_csv("../UseCase_3_Datasets/sales_filtered_trend_daily.csv",index=False)
# -
sales_pdf_daily.describe()
| .ipynb_checkpoints/Create_Targets-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # _How to Interpolate Data with Scipy_
#
# ## Dr. <NAME>
#
# ---
from scipy.interpolate import interp1d
import numpy as np, matplotlib.pyplot as plt
from scipy import interpolate
NUM_DATA = 11
NUM_INTERPOLATE = 41
# ## Linear interpolation
# +
x = np.linspace(0, 10, num=NUM_DATA, endpoint=True)
y = x**2+2*x-31
f1 = interp1d(x, y, kind='linear')
xnew = np.linspace(0, 10, num=NUM_INTERPOLATE, endpoint=True)
plt.scatter(x, y)
plt.show()
plt.scatter(x, y)
plt.plot(xnew, f1(xnew), color='orange',linestyle='--')
plt.show()
# +
x = np.linspace(0, 10, num=NUM_DATA, endpoint=True)
y = 0.1*x**3+0.25*x**2-7*x+11
f1 = interp1d(x, y, kind='linear')
xnew = np.linspace(0, 10, num=NUM_INTERPOLATE, endpoint=True)
fig, ax = plt.subplots(1,2,figsize=(6,3),dpi=120)
ax[0].scatter(x, y)
ax[0].set_title("Original data")
ax[1].scatter(x, y)
ax[1].plot(xnew, f1(xnew), color='red',linestyle='--')
ax[1].set_title("Interpolation")
plt.show()
# -
# ## Non-polynomial data
x = np.linspace(0, 10, num=11, endpoint=True)
y = np.cos(-x**2/9.0)+np.sin(x/6)
f = interp1d(x, y)
f3 = interp1d(x, y, kind='cubic')
# +
xnew = np.linspace(0, 10, num=41, endpoint=True)
fig, ax = plt.subplots(1,3,figsize=(10,2.5),dpi=120)
ax[0].scatter(x,y)
ax[0].set_title("Original data")
ax[1].plot(x, y, 'o')
ax[1].plot(xnew, f(xnew), color='orange',linestyle='-')
ax[1].legend(['Original','Linear'])
ax[1].set_title("Linear interpolation only")
ax[2].plot(x, y, 'o')
ax[2].plot(xnew, f(xnew), color='orange',linestyle='-')
ax[2].plot(xnew, f3(xnew), color='red',linestyle='--')
ax[2].legend(['Original','Linear','Cubic'])
ax[2].set_title("Linear and cubic splines")
plt.show()
# -
# ## Interpolation and curve-fitting are different
x = np.linspace(0, 10, num=NUM_DATA, endpoint=True)
y = 0.1*x**3+0.25*x**2-7*x+11+ x*np.random.normal(size=NUM_DATA)
# +
f1 = interp1d(x, y, kind='linear')
from scipy.optimize import curve_fit
def func(x, a, b, c):
return a*x**2+b*x+c
def fitted_func(x):
a1,b1,c1 = popt
return a1*x**2+b1*x+c1
popt, _ = curve_fit(func, x, y)
# +
xnew = np.linspace(0, 10, num=41, endpoint=True)
fig, ax = plt.subplots(1,3,figsize=(10,2.5),dpi=120)
ax[0].scatter(x,y)
ax[0].set_title("Original data")
ax[1].plot(x, y, 'o')
ax[1].plot(xnew, f1(xnew), color='orange',linestyle='-')
ax[1].legend(['Original','Interpolated'])
ax[1].set_title("Linear interpolation")
ax[2].plot(x, y, 'o')
ax[2].plot(xnew, f1(xnew), color='orange',linestyle='-')
ax[2].plot(xnew, fitted_func(xnew), color='red',linestyle='--')
ax[2].legend(['Original','Interpolation','Curve-fitting'])
ax[2].set_title("Interpolation and curve-fitting")
plt.show()
# -
# ## Two-dimensional example
x_edges, y_edges = np.mgrid[-1:1:21j, -1:1:21j]
x = x_edges[:-1, :-1] + np.diff(x_edges[:2, 0])[0] / 2.
y = y_edges[:-1, :-1] + np.diff(y_edges[0, :2])[0] / 2.
z = (x+y) * np.exp(-6.0*(x*x+y*y))
plt.figure(dpi=120)
lims = dict(cmap='RdBu_r', vmin=-0.25, vmax=0.25)
plt.pcolormesh(x_edges, y_edges, z, shading='flat', **lims)
plt.colorbar()
plt.title("Sparsely sampled function with 20 x 20 grid")
plt.show()
xnew_edges, ynew_edges = np.mgrid[-1:1:71j, -1:1:71j]
xnew = xnew_edges[:-1, :-1] + np.diff(xnew_edges[:2, 0])[0] / 2.
ynew = ynew_edges[:-1, :-1] + np.diff(ynew_edges[0, :2])[0] / 2.
interp = interpolate.bisplrep(x, y, z, s=0)
znew = interpolate.bisplev(xnew[:,0], ynew[0,:], interp)
plt.figure(dpi=120)
plt.pcolormesh(xnew_edges, ynew_edges, znew, shading='flat', **lims)
plt.colorbar()
plt.title("Interpolated data in a 70 x 70 grid")
plt.show()
| Scipy-interpolate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Datatypes
# --
# >int - Plain integer
#
# >long - long integer, with 'L' suffix. It is deprecated.
#
# >float - Floating point number
#
# >complex - Complex no, Real and Imaginary part
#
# >boolean - True and False
#
# >sequence - string, list, tuple
#
# >mapping - Dictionary
#
# >set - unordered collection of distinct objects
#
# >file - used to make file system objects
# Typecasting
# --
# String/float to int
# type your code here2
int(3.56789)
type(2020)
type('2020')
int('2020')
float('2020.1245')
int('C')
int(float('2020.1245'))
int('Darshan')
float(30)
# type your code here2
# type your code here1
# String/int to float
# type your code here3
float('12.868')
# __Homework:__
#
# As you can see I have Autocomplete feature in my NB. The NB suggests code for me.
# Implement the same in your Jupyter NB.
#
# __Hint: Use "Hinterland", use NBExtensions__
# type your code here2
# + active=""
# # int, float, list, tuple, dictionary to string
# Note: [] is for list
# () is for tuple
# str(3.122)
# -
# type your code here6
l= [1,33,43.21,"Darshan", 3+4j]
print(l)
print(type(l))
l= (1,33,43.21,"Darshan", 3+4j)
print(l)
print(type(l))
# string, tuple, dict to list
# type your code here7
str(12.674)
str(122)
str([1,33,43.21,"Darshan", 3+4j])
str((1,33,43.21,"Darshan", 3+4j))
# type your code here8
list("Darshan")
tuple("Darshan")
# type your code here1-5
# tuple in list
list((1,33,43.21,"Darshan", 3+4j))
# type your code here11
tup = (10,12,15,20)
list(tup)
# Operators
# --
#
# >Arithmetic: + - * / %
# >// Floor Division
# >** exponentiation
#
# >Relational: < <= > >= != ==
#
# >Logical: and or not
#
# >Bitwise: & | ^ ~ << >>
#
# >Membership: in, not in
#
# >Identity: is,is not
# type your code here3
5%2
5//2
5/2
# type your code here9
# 5>2
# 2>89
# 3>=3
# 3==3
# 3!=3
4!=98
# +
# type your code here3
3>1 and 5>2
# -
3>1 and 5<2
3<1 or 5>2
# not(True)
not(False)
not(3<1 or 5>2)
# type your code here19
s = 2
t = 4
print(s&t)
s = 2
t = 4
print(s|t)
x=2
print(x<<1)
print(x<<1)
print(x<<4)
# +
# type your code here2.0
# Membership: in, not in
l = [1,2,3,4,6,5,"Darshan"]
6 in l
# -
100 in l
# "darshan" in l
"Darshan" in l
577 not in l
"darshan" not in l
"r" in "Darshan"
"x" in "Darshan"
# type your code here21
# type your code here22
# +
# type your code here2/3
x = 4
y = 4
# x is y
x is not y
# -
2.4
# type your code here2.5
l = [10,20,30,40,50,60]
print(l)
print(l[0])
print(l[5])
# print(l[55])
# Looping Statements:
# --
# type your code here4-4
l = [10,20,30,40,50,60]
for i in l:
print(i)
l = [10,11,12,13,14,15,16,17,18,19,20]
for i in l:
if i%2==0:
print(i," is even number")
else:
print(i," is odd number")
# type your code here4-5
name = ["Darshan", "Lets Upgrade", "Smart Students", "Learners", 1999, 2020]
for dell in name:
print(dell)
# +
# type your code here4-6
# -
# range function:
# --
#
# >range(5): It goes from 0 to 4 to generate a sequence of numbers
#
# >range(start, stop, stepsize): Default stepsize is 1
range(5)
# type your code here1*3
for i in range(5):
print(i)
for i in range(2,5):
print(i)
for i in range(2,10,2):
print(i)
for i in range(2,10,3):
print(i)
# +
# Program to display sum of numbers
# type your code here1.9
sum = 0
for i in range(5):
sum = sum + i
print("Sum=",sum)
# +
# Program to display even numbers in given range
# type your code here
# -
# while
# type your code here4-7
i = 1
while i<=5:
print(i)
i = i+1 # i++ is not supported in Python
i = 10
while i>=0:
print(i,end="\t")
i = i-2 # i++ is not supported in Python
# type your code here5--/2
name = " Darshan"
i = 0
while i<=7:
d = name[i]
print(d)
i = i+1
# type your code here2+0
for i in range(5):
for j in range(i):
print("*", end=" ")
print("\n")
# Conditional execution
# --
# type your code here 31
x = 3
if x==3:
print("Hi. Condition met")
print("Lets upgrade")
print("I am happy")
# type your code here 31
x = 3
if x==3:
print("Hi. Condition met")
print("Lets upgrade")
print("I am happy")
x = 99
if x==3:
print("Hi. Condition met")
print("Lets upgrade")
print("I am happy")
x = 99
if x==3:
print("Hi. Condition met")
print("Lets upgrade")
print("I am happy")
x = 99
if x > 3:
print("Hi. Condition met")
print("Lets upgrade")
print("I am happy")
x = 0
if x == 50:
print("Party")
elif x>50:
print("Movie")
print("Hi")
elif x<50:
print("Sit Chup chap at home")
# +
# WAP to check if the input no is even or odd
# type your code here3/3
no = 33
if no%2==0:
print(no, " is Even")
else:
print(no, " is Odd")
# +
# type your code here34
# +
# if statement
# type your code here3.5
# -
# type your code here3+9
a = 10
if a==10:
print("a is 10")
elif a==15:
print("a is 15")
elif a==17:
print("a is 17")
elif a==20:
print("a is 20")
else:
print("a is unknown")
# +
# Chained Conditionals
# type your code here4.0
# +
# Nested Conditionals
# type your code here4*1
x = 35
if x > 35:
print(x," is greater than 35")
else:
if x<35:
print(x," is less than 35")
else:
print(x," is equal than 35")
# +
# Nested Conditionals
# type your code here42
# + active=""
# If there is an else clause, it has to be at the end, but there doesnt have to be one.
# +
# break
# type your code here2+7
n = [10,20,30,40,50,60]
for i in n:
if i == 30:
break
else:
print(i)
print("Hi students")
print("Happy learning")
# -
# continue
# n = (1,2,3,4,5)
# type your code here2.-9
n = [10,20,30,40,30,50,60]
for i in n:
if i == 30:
continue
else:
print(i)
| Day6/2_Datatypes_Operators_and_Looping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import json
from tqdm.notebook import tqdm
import utilities
import datetime
from dateutil.parser import parse
import re
from urllib import parse as URLparse
from openwpm_utils import domain as du
import base64
import hashlib
from collections import OrderedDict
from prettytable import PrettyTable
from tqdm.notebook import tqdm
# +
base_directory = 'khaleesi/data/'
non_interactive_http_dir = base_directory + 'crawl-http-labeled.json'
non_interactive_js_dir = base_directory + 'crawl-js-connected-labeled.json'
# -
http_chains = utilities.read_json(non_interactive_http_dir)
js_chains = utilities.read_json(non_interactive_js_dir)
# # Helper function for getting identifiers
# +
def get_identifier_cookies(cookie_string, cookie_length = 8):
cookie_set = set()
for cookie in cookie_string.split('\n'):
cookie = cookie.split(';')[0]
if cookie.count('=') >= 1:
cookie = cookie.split('=', 1)
cookie_set |= set(re.split('[^a-zA-Z0-9_=-]', cookie[1]))
cookie_set.add(cookie[0])
else:
cookie_set |= set(re.split('[^a-zA-Z0-9_=-]', cookie))
# remove cookies with length < 8
cookie_set = set([s for s in list(cookie_set) if len(s) >= cookie_length])
return cookie_set
# -
def get_identifiers_from_qs(url, qs_item_length = 8):
qs = URLparse.parse_qsl(URLparse.urlsplit(url).query)
qs_set = set()
for item in qs:
qs_set |= set(re.split('[^a-zA-Z0-9_=-]', item[0]))
qs_set |= set(re.split('[^a-zA-Z0-9_=-]', item[1]))
qs_set = set([s for s in list(qs_set) if len(s) >= qs_item_length])
return qs_set
def get_identifiers_from_uncommon_headers(header_prop, item_length = 8):
splitted_header_prop_set = set()
splitted_header_prop = set(re.split('[^a-zA-Z0-9_=-]', header_prop))
splitted_header_prop_set = set([s for s in list(splitted_header_prop) if len(s) >= item_length])
return splitted_header_prop_set
def get_domain_or_hostname(url):
# we stop if we cannot retrieve the domain or hostanmes
# we won't be able to link domains/hostnames if they are empty or unavailable
current_domain_or_hostname = du.get_ps_plus_1(url)
if current_domain_or_hostname == '' or current_domain_or_hostname == None:
current_domain_or_hostname = du.urlparse(url).hostname
if current_domain_or_hostname == '' or current_domain_or_hostname == None:
return False, ''
return True, current_domain_or_hostname
known_http_headers = set()
known_http_headers_raw = utilities.read_file_newline_stripped('common_headers.txt')
for item in known_http_headers_raw:
if item.strip() != '':
known_http_headers.add(item.strip().lower())
def check_csync_events(identifiers, next_identifiers, key, current_domain_or_hostname, next_url, csync_domains):
for identifier in identifiers:
next_domain_or_hostname = get_domain_or_hostname(next_url)
if not next_domain_or_hostname[0]:
break
next_domain_or_hostname = next_domain_or_hostname[1]
domain_domain = current_domain_or_hostname + '|' + next_domain_or_hostname
if domain_domain not in csync_domains:
csync_domains[domain_domain] = {}
csync_domains[domain_domain]['chains'] = []
csync_domains[domain_domain]['b64_chains'] = []
csync_domains[domain_domain]['md5_chains'] = []
csync_domains[domain_domain]['sha1_chains'] = []
base64_identifier = base64.b64encode(identifier.encode('utf-8')).decode('utf8')
md5_identifier = hashlib.md5(identifier.encode('utf-8')).hexdigest()
sha1_identifier = hashlib.sha1(identifier.encode('utf-8')).hexdigest()
if identifier in next_url or identifier in next_identifiers:
csync_domains[domain_domain]['chains'].append({'chain': key, 'identifier': identifier})
elif base64_identifier in next_url or base64_identifier in next_identifiers:
csync_domains[domain_domain]['b64_chains'].append({'chain':key, 'identifier': identifier, 'encoded': base64_identifier})
elif md5_identifier in next_url or md5_identifier in next_identifiers:
csync_domains[domain_domain]['md5_chains'].append({'chain':key, 'identifier': identifier, 'encoded': md5_identifier})
elif sha1_identifier in next_url or sha1_identifier in next_identifiers:
csync_domains[domain_domain]['sha1_chains'].append({'chain':key, 'identifier': identifier, 'encoded': sha1_identifier})
return csync_domains
# # Cookie syncing identification code
def run_csync_heuristic(json_representation, known_http_headers, csync_domains):
pbar = tqdm(total=len(json_representation), position=0, leave=True)
for key in json_representation:
pbar.update(1)
for idx, item in enumerate(json_representation[key]['content']):
current_url = item['url']
current_referrer = item['referrer']
current_identifiers = set()
current_domain_or_hostname = get_domain_or_hostname(current_url)
if not current_domain_or_hostname[0]:
continue
current_domain_or_hostname = current_domain_or_hostname[1]
sent_cookies = ''
for s_item in item['request_headers']:
if s_item[0].lower() == 'cookie':
current_identifiers |= get_identifier_cookies(s_item[1])
if s_item[0].lower() not in known_http_headers:
current_identifiers |= get_identifiers_from_uncommon_headers(s_item[1])
recieved_cookies = ''
for s_item in item['response_headers']:
if s_item[0].lower() == 'set-cookie':
current_identifiers |= get_identifier_cookies(s_item[1])
if s_item[0].lower() not in known_http_headers:
current_identifiers |= get_identifiers_from_uncommon_headers(s_item[1])
current_identifiers |= get_identifiers_from_qs(current_url)
current_identifiers |= get_identifiers_from_qs(current_referrer)
if key.startswith('J|'):
end = len(json_representation[key]['content'])
else:
end = idx + 2
if end > len(json_representation[key]['content']):
continue
for item_1 in json_representation[key]['content'][idx+1:end]:
next_url = item_1['url']
next_headers = item_1['request_headers']
next_identifiers = set()
for s_item in next_headers:
if s_item[0].lower() == 'cookie':
next_identifiers |= get_identifier_cookies(s_item[1])
if s_item[0].lower() not in known_http_headers:
next_identifiers |= get_identifiers_from_uncommon_headers(s_item[1])
csync_domains = check_csync_events(current_identifiers, next_identifiers, key, current_domain_or_hostname, next_url, csync_domains)
return csync_domains
current_csync = {}
current_csync = run_csync_heuristic(http_chains, known_http_headers, results_dict, current_csync)
current_csync = run_csync_heuristic(js_chains, known_http_headers, results_dict, current_csync)
# # Clean up cysnc events
def cysnc_clean_up(csync_domains):
to_delete = set()
for domain_domain in csync_domains:
if len(csync_domains[domain_domain]['chains']) == 0 and \
len(csync_domains[domain_domain]['b64_chains']) == 0 and \
len(csync_domains[domain_domain]['md5_chains']) == 0 and \
len(csync_domains[domain_domain]['sha1_chains']) == 0:
to_delete.add(domain_domain)
for key in to_delete:
del csync_domains[key]
return csync_domains
print(len(current_csync))
current_csync = cysnc_clean_up(current_csync)
print(len(current_csync))
# ## Helper function for cookie syncing statistics
def count_csync_events(_from, _to, sending_json_obj, receiving_json_obj):
if _from not in sending_json_obj:
sending_json_obj[_from] = {}
sending_json_obj[_from]['count'] = 1
sending_json_obj[_from]['domains'] = set({_to})
else:
sending_json_obj[_from]['count'] += 1
sending_json_obj[_from]['domains'].add(_to)
if _to not in receiving_json_obj:
receiving_json_obj[_to] = {}
receiving_json_obj[_to]['count'] = 1
receiving_json_obj[_to]['domains'] = set({_from})
else:
receiving_json_obj[_to]['count'] += 1
receiving_json_obj[_to]['domains'].add(_from)
return sending_json_obj, receiving_json_obj
def get_csynced_chains(chains, chains_synced):
for item in chains:
if item['chain'] not in chains_synced:
chains_synced[item['chain']] = {}
chains_synced[item['chain']]['count'] = 1
else:
chains_synced[item['chain']]['count'] += 1
# break
return chains_synced
def get_unique_domains_in_chains(json_representation, khaleesi_detections):
all_domains = set()
for key in json_representation:
if key not in khaleesi_detections:
continue
for idx, item in enumerate(json_representation[key]['content']):
current_domain_or_hostname = get_domain_or_hostname(item['url'])
if not current_domain_or_hostname[0]:
continue
all_domains.add(current_domain_or_hostname[1])
return all_domains
# # Finding cookie syncing stats
def compute_csync_stats(csync_domains, no_of_chains, no_of_domains):
all_domains = set()
sending_to = {}
recieved_from = {}
b64_sending_to = {}
b64_recieved_from = {}
md5_sending_to = {}
md5_recieved_from = {}
sha1_sending_to = {}
sha1_recieved_from = {}
chains_synced_simple = {}
chains_synced_b64 = {}
chains_synced_md5 = {}
chains_synced_sha1 = {}
for domain_domain in csync_domains:
_from = domain_domain.split('|')[0]
_to = domain_domain.split('|')[1]
if _from == _to:
continue
if len(csync_domains[domain_domain]['chains']) > 0:
sending_to, recieved_from = count_csync_events(_from, _to, sending_to, recieved_from)
chains_synced_simple = get_csynced_chains(csync_domains[domain_domain]['chains'], chains_synced_simple)
if len(csync_domains[domain_domain]['b64_chains']) > 0:
sending_to, recieved_from = count_csync_events(_from, _to, sending_to, recieved_from)
b64_sending_to, b64_recieved_from = count_csync_events(_from, _to, b64_sending_to, b64_recieved_from)
chains_synced_b64 = get_csynced_chains(csync_domains[domain_domain]['b64_chains'], chains_synced_b64)
if len(csync_domains[domain_domain]['md5_chains']) > 0:
sending_to, recieved_from = count_csync_events(_from, _to, sending_to, recieved_from)
md5_sending_to, md5_recieved_from = count_csync_events(_from, _to, md5_sending_to, md5_recieved_from)
chains_synced_md5 = get_csynced_chains(csync_domains[domain_domain]['md5_chains'], chains_synced_md5)
if len(csync_domains[domain_domain]['sha1_chains']) > 0:
sending_to, recieved_from = count_csync_events(_from, _to, sending_to, recieved_from)
sha1_sending_to, sha1_recieved_from = count_csync_events(_from, _to, sha1_sending_to, sha1_recieved_from)
chains_synced_sha1 = get_csynced_chains(csync_domains[domain_domain]['sha1_chains'], chains_synced_sha1)
# csync domain statistics
csync_domains = set(sending_to.keys()).union(set(recieved_from.keys())).\
union(set(b64_sending_to.keys())).union(set(b64_recieved_from.keys())).\
union(set(md5_sending_to.keys())).union(set(md5_recieved_from.keys())).\
union(set(sha1_sending_to.keys())).union(set(sha1_recieved_from.keys()))
# csync chain statistics
csync_chains = set(chains_synced_simple.keys()).union(set(chains_synced_b64.keys()))\
.union(set(chains_synced_md5.keys()))\
.union(set(chains_synced_sha1.keys()))
# csync encoded chain statistics
csync_encoded = set(b64_sending_to.keys()).union(set(b64_recieved_from.keys()))\
.union(set(md5_sending_to.keys())).union(set(md5_recieved_from.keys()))\
.union(set(sha1_sending_to.keys())).union(set(sha1_recieved_from.keys()))
# encoded cookie syncing stats can also be returned
return csync_domains, sending_to, recieved_from
csync_domains, sending_to, recieved_from = compute_csync_stats(current_csync)
# # Print top csync domains
# +
def print_table(json_obj, count_limit = 20):
count = 0
t = PrettyTable(['Domains', 'Csync count'])
for key in json_obj:
count += 1
if count <= count_limit:
t.add_row([key, json_obj[key]['count']])
print(t)
def average_sharing(syncing_domains):
total = 0
for key in syncing_domains:
total += syncing_domains[key]['count']
print(total / len(syncing_domains))
# -
def get_top_csyncs(sending_to, recieved_from):
sending_to_sorted = OrderedDict(sorted(sending_to.items(), key=lambda k: k[1]['count'], reverse=True))
recieved_from_sorted = OrderedDict(sorted(recieved_from.items(), key=lambda k: k[1]['count'], reverse=True))
print_table(sending_to_sorted)
average_sharing(sending_to)
print_table(recieved_from_sorted)
average_sharing(recieved_from)
get_top_csyncs(sending_to, recieved_from)
| code/cookie_syncing_heuristic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Saving data in the background
#
# The QCoDeS Measurement allows for the actual data saving to take place in a background thread. This notebook provides an example of using that feature to reduce the overall time spent running the measurement.
#
# ## Can I haz speedup?
#
# If the time you spend actually writing the data to disk is comparable to the time it takes you to acquire the data (e.g. by waiting for a lock-in amplifier to integrate or an algorithm to return a result), then you can indeed expect a speedup of up to a factor of 2. If your measurement time is clearly dominated by either the acquisition or the writing to the DB file, then writing the data in the background won't offer you much of a speedup.
# ## Example Measurement
#
# We'll acquire a 2D heatmap and pretend that the acquisition is fairly slow. We'll also print how long the acquisition and the saving takes. We have chosen a *fair* example showing *some* speed-up. It is indeed possible to tailor situations where the speed-up is larger, e.g. by saving big (smth like 3 x 100_000_000 points per save) numpy arrays, but such datasets are not easily visaulised. In this notebook we get a decent speed-up and two nice heatmaps.
# +
import os
import time
import tempfile
import numpy as np
from qcodes.instrument.parameter import Parameter
from qcodes.dataset.measurements import Measurement
from qcodes.dataset.plotting import plot_dataset
from qcodes.dataset.experiment_container import new_experiment
from qcodes.dataset.sqlite.database import initialise_or_create_database_at
# -
dbname = os.path.join(tempfile.gettempdir(), os.urandom(24).hex()) + ".db"
initialise_or_create_database_at(dbname)
new_experiment('saving_data_in_bg', 'no_sample')
# +
def gaussian(x, mu, sig):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
def get_response():
"""
Simulated slow instrument response
"""
freqs = frequency.get()
volt = voltage.get()
time.sleep(0.1)
volt += 0.2*np.random.rand()
noise = 0.01*np.random.randn(len(freqs))
return gaussian(freqs, volt, 2) + 0.01* noise
# +
voltage = Parameter('voltage', unit='V', set_cmd=None, get_cmd=None)
frequency = Parameter('frequency', unit='Hz', set_cmd=None, get_cmd=None)
response = Parameter('response', unit='V^2/Hz', get_cmd=get_response)
meas = Measurement()
meas.register_parameter(voltage)
meas.register_parameter(frequency)
meas.register_parameter(response, setpoints=[voltage, frequency])
# -
N = 10_000
M = 10
# ## Data saving in the main thread
#
# This is the default QCoDeS behaviour.
# +
t0 = time.perf_counter()
saving_time = 0
generation_time = 0
with meas.run() as datasaver:
init_time = time.perf_counter() - t0
for volt in np.sin(np.linspace(-np.pi, np.pi, M)):
t1 = time.perf_counter()
freqs = np.linspace(-10, 10, N)
frequency(freqs)
voltage(volt)
resp = response()
t2 = time.perf_counter()
generation_time += t2 - t1
datasaver.add_result((frequency, freqs),
(voltage, volt),
(response, resp))
t3 = time.perf_counter()
saving_time += t3 - t2
t4 = time.perf_counter()
saving_time += t4 - t3
print('Report:')
print(f'Number of data points saved: {M} x {N} points')
print(f'Init time: {init_time} s')
print(f'Data generation time: {generation_time} s')
print(f'Data saving time: {saving_time} s')
print(f'Total time: {t4-t0} s')
# -
_ = plot_dataset(datasaver.dataset)
# ## Saving in a background thread
#
# To save in a background thread, simply pass the `write_in_background` kwarg as you `run` the measurement.
# +
t0 = time.perf_counter()
saving_time = 0
generation_time = 0
# ONLY DIFFERENCE IN THE NEXT LINE
with meas.run(write_in_background=True) as datasaver: # <---- THIS LINE DIFFERENT
# THE PREVIOUS LINE CHANGED
init_time = time.perf_counter() - t0
for volt in np.sin(np.linspace(-np.pi, np.pi, M)):
t1 = time.perf_counter()
freqs = np.linspace(-10, 10, N)
frequency(freqs)
voltage(volt)
resp = response()
t2 = time.perf_counter()
generation_time += t2 - t1
datasaver.add_result((frequency, freqs),
(voltage, volt),
(response, resp))
t3 = time.perf_counter()
saving_time += t3 - t2
t4 = time.perf_counter()
saving_time += t4 - t3
print('Report:')
print(f'Number of data points saved: {M} x {N} points')
print(f'Init time: {init_time} s')
print(f'Data generation time: {generation_time} s')
print(f'Data saving time: {saving_time} s')
print(f'Total time: {t4-t0} s')
# -
_ = plot_dataset(datasaver.dataset)
# ## Conclusion
#
# For an example experiment saving 10 rows of 10,000 data points, out-of-thread data writing reduced the time spent in the main thread saving data by almost 70% and thus the overall measurement time by more than 35%.
| docs/examples/DataSet/Saving_data_in_the_background.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import re
df = pd.read_csv("../data/covid-data.csv",
usecols=['new_cases','gdp_per_capita','cardiovasc_death_rate',
'female_smokers', 'male_smokers'])
df.dropna(inplace = True)
df.head()
percent = [.25,.50,.75] #list of percentiles
dtypes = ['float','int', 'object'] #List of data types
get_summary = df.describe(percentiles = percent, include = dtypes)
get_summary
import pandas as pd
import numpy as np
df= pd.DataFrame({'Number': np.random.randint(1, 100, 5)})
df['Bins'] = pd.cut(x=df['Number'], bins=[10, 20, 50, 60])
print(df)
df['Bins'].unique() #displays the frequency of each bin.
import pandas as pd
import numpy as np
data_series = pd.Series(['eggs', 'milk', np.nan, 'fish',
' ', ' ', np.nan])
output = data_series.str.isspace()
print('data_series output:\n\n', output)
import pandas as pd
df = pd.DataFrame({'Name': ['<NAME>'],
'Age': 32})
print(df.to_html())
import pandas as pd
df = pd.DataFrame({"A":[50, 30, 60, 45],
"B":[11, 26, 45, 39],
"C":[41, 32, 80, 55],
"D":[56, 74, 92, 38]})
df.cov()
# +
import pandas as pd
series_1 = pd.Series(['Lagos', 'Dubai', 'New York', 'London', 'Tokyo'])
series_1.index = ['W.Africa', 'Middle-East', 'N.America', 'Europe', 'Asia']
print(series_1)
print()
series_2 = pd.Series(['Togo', 'Kuwait', 'California', 'Lisbon', 'Beijing'])
series_2.index = ['W.Africa', 'Middle-East', 'N.America', 'Europe', 'Asia']
print(series_2)
# -
series_1.where(series_1 =='Lagos', series_2)
# +
import pandas as pd
df = pd.DataFrame({"RES_1":[50, 30, 60, 45],
"RES_2":[11, 26, 45, 39],
"RES_3":[41, 32, 80, 55],
"RES_4":[56, 74, 92, 38]})
print(df.mad(axis = 0)) # Mean abs deviation over the index axis
print()
print(df.mad(axis = 1)) # Mean abs deviation over the column axis
# -
import pandas as pd
df = pd.DataFrame({'Month':['Jan','Feb','March','May'],
'Year':[2012, 2014, 2013, 2014],
'Sales($)':[100, 300, 500, 1500]})
df.to_numpy()
# +
import pandas as pd
df = pd.DataFrame({'Date & Log_Time': [20201010103000,
20201020204500,
20201025213500],
'Status': ['Approved', 'Not Approved ',
'Pending']})
df['Date & Log_Time'] = pd.to_datetime(df['Date & Log_Time'],
format='%Y%m%d%H%M%S')
df
# -
df.dtypes
# +
import pandas as pd
df = pd.DataFrame({'Year': ['2016', '2017', '2018', '2019'],
'Region': ['W.Africa','Asia Pacific', 'N.America', 'Middle-East'],
'PAFT($Billion)':['50.12', '100.56', '70.78', '90.67']
})
# Convert the PAFT($Billlion) column to floating type numbers
df['PAFT($Billion)'] = pd.to_numeric(df['PAFT($Billion)'],
errors = 'coerce')
print(df)
print (df.dtypes) #Display the data types
# -
import pandas as pd
#Let's create a Data Frame for Car sales.
df = pd.DataFrame({'Month':['Jan','Feb','March','May'],
'Year':[2012, 2014, 2013, 2014],
'Sales($)':[100, 300, 500, 1500]})
# +
from sqlalchemy import create_engine
#Create reference for SQL Library
engine = create_engine('sqlite://', echo = False)
#Pass the dataframe into SQL
df.to_sql('Car_Sales', con = engine)
print(engine.execute("SELECT * FROM Car_Sales").fetchall())
# -
#Let's access the Sales($) Column only
sales = pd.read_sql('Car_Sales',
con = engine,
columns = ["Sales($)"])
print(sales)
# +
import pandas as pd
import numpy as np
Weather_data = np.array([['Newyork', '30.4°F'],
['Calgary', '22°F'],
['Paris', '45°F']])
Weather_report = pd.DataFrame(Weather_data, columns = ['City', 'Temp'])
Weather_report
# +
Weather_json = Weather_report.to_json()
print(Weather_json)
Weather_json_split = Weather_report.to_json(orient ='split')
print("Weather_json_split = ", Weather_json_split, "\n")
Weather_json_records = Weather_report.to_json(orient ='records')
print("Weather_json_records = ", Weather_json_records, "\n")
Weather_json_index = Weather_report.to_json(orient ='index')
print("Weather_json_index = ", Weather_json_index, "\n")
Weather_json_columns = Weather_report.to_json(orient ='columns')
print("Weather_json_columns = ", Weather_json_columns, "\n")
Weather_json_values = Weather_report.to_json(orient ='values')
print("Weather_json_values = ", Weather_json_values, "\n")
Weather_json_table = Weather_report.to_json(orient ='table')
print("Weather_json_table = ", Weather_json_table, "\n")
# -
import pandas as pd
jersey = pd.Series([10, 20, 30, 40])
j_index = ['Nike', 'Adidas', 'Diadora', 'Kappa']
jersey.index = j_index
print(jersey)
Nike_pop = jersey.pop(item ='Nike')
print(jersey)
import pandas as pd
import numpy as np
jersey = pd.DataFrame({'Nike':[10, 30, np.nan],
'Adidas': [20, 60, np.nan],
'Diadora':[40, 50, 60],
'Kappa': [np.nan, 50, 70]
})
jersey
jersey.notna()
import pandas as pd
Time = pd.Timestamp(year = 2020, month = 1,
day = 1, hour = 9,
second = 50, tz = 'Europe/Paris')
Time
Time.now() #Return the current time in local timezone.
# +
import pandas as pd
df = pd.read_csv('../data/covid-data.csv',
usecols = ['location', 'gdp_per_capita',
'diabetes_prevalence', 'life_expectancy'])
df.sort_values('gdp_per_capita', inplace=True)
dup_df = df['gdp_per_capita'].duplicated()
df[dup_df].head() # Display Duplicate Values.
# +
#Remove Duplicate Values in the DataFrame.
dup_df = df['gdp_per_capita'].duplicated(keep=False)
df.info()
print() #This Prints an empty line
df[~dup_df] #Remove Duplicate Values
# -
import pandas as pd
df = pd.DataFrame({'Resistivity': [100,450,230,400],
'Array':['wenner','schLUMberger',
'dipole-DipOLe', 'wenNEr']})
df
# Method 1
df['Array'] = df['Array'].str.capitalize()
df
df = pd.DataFrame({'Resistivity': [100,450,230,400],
'Array':['wenner','schLUMberger',
'dipole-DipOLe', 'wenNEr']})
df
# Method 2
df['Array'].apply(lambda x: x.capitalize())
# +
import pandas as pd
import numpy as np
np.random.seed(0)
df = pd.DataFrame(np.random.random([3, 3]),
columns =["Point_A", "Point_B", "Point_C"])
df
# -
df.round(2)
df
df.round({'Point_A': 3, 'Point_B': 2, 'Point_C':1})
# +
import pandas as pd
df_1 = pd.DataFrame({'Nike':[10, 30, 40],
'Adidas': [20, 60, 80],
'Diadora':[40, 50, 60],
'Kappa': [30, 50, 70]},
index = ['J1','J2','J3'])
df_2 = pd.DataFrame({'Nike':[100, 300, 400],
'Adidas': [200, 600, 800],
'Diadora':[400, 500, 600],
'Kappa': [300, 500, 700]},
index = ['J2','J3','J4'])
print(df_1) # Display the first DataFrame
print()
print(df_2) # Display the second Dataframe
# -
#Find matching Indexes
df_1.reindex_like(df_2)
df_1.reindex_like(df_2, method='ffill')
import pandas as pd
df = pd.read_csv('../data/pew.csv',
usecols=['religion','<$10k', '$10-20k',
'$30-40k', '$40-50k', '>150k'])
df.head()
rel_cath = df['religion'].isin(['Catholic'])
df[rel_cath]
# +
import pandas as pd
countries= pd.Series(['Nigeria', 'Dubai', 'United States',
'Spain', 'China'])
countries.index = ['W.Africa', 'Middle-East',
'N.America', 'Europe', 'Asia']
countries
# -
countries.xs(key = 'Europe')
# +
import pandas as pd
df = pd.DataFrame({'>18yrs': [100, 344, 232, 247, 543, 690, 341],
'<18yrs': [398, 344, 250, 527, 819, 902, 341],
'Region': ['N.Central', 'S.West', 'S.East',
'N.East', 'S.South', 'S.East' ,'S.West'],
'State': ['Kwara', 'Ondo', 'Imo', 'Borno',
'Rivers', 'Anambra', 'Lagos'],
'City': ['Ilorin','Akure', 'Owerri', 'Maiduguri',
'Port Harcourt','Awka', 'Ikeja']})
df = df.set_index(['Region', 'State', 'City'])
df
# -
df.xs(key='S.West')
# +
import pandas as pd
df = pd.read_csv('../data/covid-data.csv' ,
usecols=['iso_code','continent',
'location', 'date','total_cases'])
df.dropna(inplace = True)
df.tail()
# -
import pandas as pd
import numpy
jersey = pd.Series([10, 20, 30, 40])
j_index = ['Nike', 'Adidas', 'Diadora', 'Kappa']
jersey.index = j_index
print(jersey)
import pandas as pd
import numpy as np
soc_boots = pd.DataFrame({'Nike':[100, np.nan, 400],
'Adidas': [200, 600, 800],
'Kappa': [300, 500, np.nan]})
soc_boots
soc_boots.eq(100)
# +
# Create a Pandas Series Object
series = pd.Series([200,300,400])
# Compare the data frame and the series object
soc_boots.eq(series, axis=0)
# -
import pandas as pd
df = pd.read_csv('../data/pew.csv',
usecols=['religion','<$10k', '$10-20k',
'$30-40k', '$40-50k', '>150k'])
df.head()
# Max over the index axis
df.max(axis=0)
import pandas as pd
import numpy as np
df = pd.DataFrame({'Month':['Jan','Feb','March','May'],
'Year':[2012, 2013, 2014, 2015],
'Sales($)':[np.nan, 300, 500, np.nan]})
df
df.max(axis=1, skipna = True)
import pandas as pd
df = pd.read_csv('../data/gapminder.tsv', sep='\t')
df.dropna(inplace=True)
df.head()
lifeExp_new = df['lifeExp']*20
df['gdpPercap < lifeExp_new'] = df['gdpPercap'].lt(lifeExp_new)
df.head()
# +
import pandas as pd
import numpy as np
#Generate a binomial distribution
from scipy.stats import nbinom
np.random.seed(0)
dist_1 = nbinom.rvs(5, 0.1, size=4)
dist_2 = nbinom.rvs(20, 0.1, size=4)
dist_3 = nbinom.rvs(30, 0.1, size=4)
dist_4 = nbinom.rvs(50, 0.1, size=4)
#Create a data data frame
# pass the binomial distribution as key:value pairs
df = pd.DataFrame({'bin_1':dist_1,
'bin_2':dist_2,
'bin_3':dist_3,
'bin_4':dist_4})
df
# -
# Call the stack() method to convert to long/tidy form
df.stack()
# Simplify the multi-index created from the stack() method.
df.stack().reset_index()
# +
import pandas as pd
#Limit the max columns to be displayed
pd.set_option('display.max_columns', 12)
#Read the wide csv file
df = pd.read_csv('../data/weather.csv')
#Display the fist five rows
df.head()
# -
weather_new = df.melt(id_vars = ['id', 'year', 'month', 'element'],
var_name='day', value_name='temp')
weather_new.head()
weather_new.pivot_table(index=['id', 'year', 'month', 'day'],
columns='element',
values='temp').reset_index().head()
import pandas as pd
import numpy as np
jersey = pd.DataFrame({'Nike':[10, 30, np.nan],
'Adidas': [20, 60, np.nan],
'Diadora':[40, 50, 60],
'Kappa': [np.nan, 50, 70]
})
jersey
# Write dataframe to a csv file
jersey.to_csv('jersey_brands.csv')
# Write dataframe to a tsv file
jersey.to_csv('jersey_brands.tsv', sep='\t')
# +
# Dataframe to tsv file without index
jersey.to_csv('jersey_brands.tsv', sep='\t', index=False)
# Dataframe to csv file without index
jersey.to_csv('jersey_brands.csv', index=False)
# -
import pandas as pd
import numpy as np
jersey = pd.DataFrame({'Nike':[10, 30, np.nan],
'Adidas': [20, 60, np.nan],
'Diadora':[40, 50, 60],
'Kappa': [np.nan, 50, 70]
})
jersey
# +
# Write dataframe to gzipped csv file
jersey.to_csv('jersey_brands.csv.gz',
index='False',
compression='gzip')
# Wrtie dataframe to zipped csv file
jersey.to_csv('jersey_brands.csv.zip',
index=False,
compression='zip')
# -
import pandas as pd
data = pd.read_csv('../data/pew.csv',
usecols=['religion','<$10k','$10-20k',
'$20-30k','$30-40k'])
data.head()
# Check the data types in the dataframe
data.dtypes
# Convert data type to best data type
data.convert_dtypes().dtypes
import pandas as pd
data = pd.read_csv('../data/pew.csv',
usecols=['religion','<$10k','$10-20k',
'$20-30k','$30-40k'])
print(len(data))
import pandas as pd
df = pd.read_csv('../data/gapminder.tsv', sep='\t')
df.dropna(inplace=True)
df.head()
df.sort_values(['year','lifeExp','gdpPercap'],
ascending=[False, False, False],
inplace=True)
df.head()
import pandas as pd
df = pd.read_csv('../data/gapminder.tsv', sep='\t')
df.dropna(inplace=True)
df.head()
df['country_split']= df['country'].apply(lambda x: [item for
elem in [y.split() for y in x]
for item in elem])
df.head()
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
tips = sns.load_dataset("tips")
tips.head()
# +
f,ax=plt.subplots(1,2,figsize=(12,7))
sns.violinplot('smoker','total_bill',
hue='time',
data=tips,
split=True,
ax=ax[0])
ax[0].set_title('smoker and total_bill vs time')
ax[0].set_yticks(range(0,80,10))
sns.violinplot('sex','total_bill',
hue='time',
data=tips,
split=True,
ax=ax[1])
ax[1].set_title('sex and total_bill vs time')
ax[1].set_yticks(range(0,80,10))
plt.show()
# -
import pandas as pd
jersey = pd.Series([50, 60, 20, 20])
j_index = ['Nike', 'Adidas', 'Diadora', 'Kappa']
jersey.index = j_index
print(jersey)
# Display the n largest elements
# Where n=5 by default
jersey.nlargest()
# Display n largest elements where n=3
jersey.nlargest(3)
jersey.nlargest(3, keep='last')
jersey.nlargest(3, keep='all')
import pandas as pd
df = pd.DataFrame({'Occupancy':[550, 750, 350],
'Check_outs':[100, 200, 150]},
index=['Hyatt', 'Royal Palace', 'Sheraton'])
df
for row in df.itertuples():
print(row)
for row in df.itertuples(index=False):
print(row)
for row in df.itertuples(name="Hotels"):
print(row)
import pandas as pd
df_1 = pd.DataFrame({'counts_1':[100,100], 'counts_2':[500,500]})
df_2 = pd.DataFrame({'counts_1':[200,200], 'counts_2':[300,300]})
larger_column = lambda x1, x2: x1 if x1.sum() > x2.sum() else x2
df_1.combine(df_2, larger_column)
# +
import pandas as pd
import numpy as np
df_1 = pd.DataFrame({'counts_1':[100,100], 'counts_2':[500,np.nan]})
df_2 = pd.DataFrame({'counts_1':[np.nan,200], 'counts_2':[300,300]})
larger_column = lambda x1, x2: x1 if x1.sum() > x2.sum() else x2
df_1.combine(df_2, larger_column, fill_value=150)
# +
import pandas as pd
jersey = pd.Series([50, 60, 20, 20], name="Quantity")
j_index = ['Nike', 'Adidas', 'Diadora', 'Kappa']
jersey.index = j_index
print(jersey.to_markdown())
# -
# Tabulate option for markdown
print(jersey.to_markdown(tablefmt='grid'))
import pandas as pd
# Absolute numeric values in a Series
# Real Numbers
series = pd.Series([1.02,-3.50,-2.30,4.5])
series.abs()
# Absolute numeric values in a Series
# Complex numbers
s_cmplx = pd.Series([0.5 + 2j])
s_cmplx.abs()
# Absolute numeric values in a Series
# Timedelta element
timeSeries=pd.Series([pd.Timedelta('7 days')])
timeSeries.abs()
import pandas as pd
df = pd.DataFrame({'x': [10, 20, 30, 40],
'y': [100, 200, 300, 400],
'z': [1000, 500, -450, -750]
})
df
# Select rows closest to 50
y = 50
df.loc[(df.x - y).abs().argsort()]
import pandas as pd
df = pd.DataFrame({'Hotel': ['Hyatt', 'Royal Palace', 'Sheraton',
'Golden Tulip','Palm Jumeirah'],
'Occupancy':[550, 750, 350, 400, 800],
'Check_Outs':[100, 200, 150, 250, 300]},
index = [1, 2, 3, 4, 5])
df
df.truncate(before=1, after=3)
# Truncate Rows for Series
df['Hotel'].truncate(before=1, after=3)
# +
# Truncate Columns of a DataFrame
df = pd.DataFrame({'A': ['a', 'b', 'c', 'd'],
'B': ['f', 'g', 'h', 'i',],
'C': ['k', 'l', 'm', 'n']},
index=[0, 1, 2, 3])
df.truncate(before='A', after='B', axis=1)
# +
import pandas as pd
df = pd.DataFrame({'Hotel': ['Hyatt', 'Royal Palace', 'Sheraton',
'Golden Tulip','Palm Jumeirah'],
'Occupancy':[550, 750, 350, 400, 800],
'Check_Outs':[100, 200, 150, 250, 300]},
index = [1, 2, 3, 4, 5])
#Copy DataFrame to clipboard
df.to_clipboard(sep=',')
# +
import pandas as pd
import numpy as np
np.random.seed(0)
#Create Random Samples from a Gaussian distribution
series = np.random.normal(loc=0.5, scale=10, size=150)
# Find the cumulative
cum_sum = np.cumsum(series)
#Pass cumulative sum to a Pandas Series
time_series = pd.Series(cum_sum)
# Generate a Lag plot
time_series.plot()
# +
import pandas as pd
df = pd.DataFrame({'Hostel':['Alexander',
'Dalmatian',
'Hilltop'],
'Available_Rooms':[250, 300, 150]})
df
# -
occupied_rooms = 100
pd.eval('Total_Rooms = df.Available_Rooms + occupied_rooms',
target=df)
# +
import pandas as pd
my_index = pd.Index([0, 1, 2, 3, 4])
# Check if index is Categorical
my_index.is_categorical()
# -
my_index = pd.Index(['BMW', 'Toyota','GMC'
'Hyundai', 'BMW', 'Ford']
).astype('category')
#Check if index is categorical
my_index.is_categorical()
| notebooks/Tips and tricks book.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Analysis
import csv
import numpy as np
import time
import datetime
# %matplotlib inline
from matplotlib import pyplot as plt
with open('data/hackthon.csv', 'r') as f:
lines = []
for line in csv.reader(f):
lines.append(line)
print("Headers:", dict(zip(range(len(lines[0])), lines[0])))
print("Event types:", set([x[4] for x in lines[1:]]))
# +
def get_epoch_time(h, d):
# h+' '+d = '4 2019-01-02'
return time.mktime(datetime.datetime.strptime(h+' '+d, "%H %Y-%m-%d").timetuple())
def get_selection(event):
selection = []
for line in lines:
if line[4] == event:
selection.append(line)
return(selection)
def gen_plot(selection):
x = [get_epoch_time(s[0], s[1]) for s in selection]
y = [int(s[3]) for s in selection]
print(selection[0][4], max(x), max(y))
plt.figure(figsize=(20,5))
plt.xticks(x[::200], [s[0]+' '+s[1] for s in selection][::200], rotation='vertical')
# plt.yticks(range(0, 100), range(0, 100, 10))
plt.plot(x, y, '+')
plt.show()
plt.close()
# -
gen_plot(get_selection('nO'))
gen_plot(get_selection('cFE'))
gen_plot(get_selection('bST'))
gen_plot(get_selection('aS'))
gen_plot(get_selection('fL'))
gen_plot(get_selection('aO'))
gen_plot(get_selection('nR'))
gen_plot(get_selection('bBCA'))
gen_plot(get_selection('sD'))
gen_plot(get_selection('bBCD'))
| code/Data Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=[]
import numpy as np
import scipy
import scipy.linalg
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
from numerical_eqs.pde.sdole import SDOLEPDESolver
from numerical_eqs.pde.fisher import FisherEQSolver
# +
meshsize = 100
mesh = np.linspace(0, 10, meshsize)
u0 = np.zeros(mesh.shape)
t0, t1 = (0, 12)
boundaries = (
{'type': 'dirichlet', 'f': lambda t: (np.exp(4*t) - 1) / np.exp(4*t)},
{'type': 'neumann', 'f': lambda t: 0},
)
hc_func = lambda x: 1.0
d_func = lambda x: 0.5
time_points = [1, 4, 8, 12]
plot_points = np.linspace(t0, t1, 10)
explicit_times = {
'time points': time_points + plot_points.tolist(),
}
def f_func(x, u):
f_eval = np.maximum(u*(1-u), 0)
df = np.where( u*(1-u) > 0, 1-2*u, 0 )
return f_eval
pde = FisherEQSolver(
heat_capacity_func = hc_func,
diffusion_func = d_func,
f_func = f_func
)
res = pde.solve(
mesh = mesh,
u0 = u0,
t0 = t0,
t1 = t1,
# Add in boundaries
boundaries = boundaries,
explicit_times = explicit_times,
# Show the progress bar
progress = True
)
sol_y = res['ys']
sol_t = res['time']
# + tags=[]
k = 3
cols = 2
rows = int(np.ceil(k / cols))
fig, axs = plt.subplots(rows, cols, figsize=(5*cols,3*rows))
axs = np.asarray(axs).flatten()
j = np.zeros(sol_t.shape)
for t in plot_points:
j = np.logical_or(j, sol_t == t)
# Find times that satisfy
times = np.nonzero( j )
# Plot this using the colorbar
cf = axs[0].contourf(
sol_y[times, :][0,:,:]
)
fig.colorbar(cf, ax=axs[0])
axs[0].set_title('Visual representation of solution Ut')
axs[0].set_xlabel('mesh x')
axs[0].set_ylabel('Time')
j = np.zeros(sol_t.shape)
for t in time_points:
j = np.logical_or(j, sol_t == t)
# Find times that satisfy
times = np.asarray(np.nonzero( j )).flatten()
for i, t in zip(times, time_points):
axs[1].plot(
mesh,
sol_y[i,:],
label='t={0:.2f}'.format(t)
)
axs[1].set_title('U at t in {0}'.format(time_points))
axs[1].set_xlabel('mesh x')
axs[1].set_ylabel('Solution Ut')
axs[1].legend()
axs[1].grid()
for i in range(0, len(mesh), 25):
axs[2].plot(
sol_t,
sol_y[:,i],
label='x={0:.2f}'.format(mesh[i])
)
axs[2].set_title('Found solution')
axs[2].set_xlabel('Time t')
axs[2].set_ylabel('Solution Ut')
axs[2].legend()
axs[2].grid()
axs[3].plot(
sol_t[:-1],
np.log(sol_t[1:] - sol_t[:-1]),
)
axs[3].set_title('SDOLE time step dt')
axs[3].set_xlabel('Time t')
axs[3].set_ylabel('dt')
axs[3].grid()
fig.tight_layout()
plt.show()
None
# -
| numerical_eqs/pde/fisher_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="G77Q51FrVMJ0"
# # **Sentiment Analysis System**
# + id="EXquiINS_jt8"
# import libraries
import numpy as np
import pandas as pd
import re
import nltk
from nltk.stem.porter import PorterStemmer
# + id="_NRVU6Vc_u6T"
# dataset import using pandas
dataset = pd.read_csv('./reviews.tsv', delimiter='\t', quoting = 3)
# + id="kkwti_mTAGiO"
# for stemming the word - prefix/sufix
ps = PorterStemmer()
# uncomment this line if 'stopwords are not downloaded'
# nltk.download('stopwords')
# + id="k9GWUObjANUI"
# imporing stopwords
from nltk.corpus import stopwords
# + id="1IhNIbn0AZVc"
corpus = []
# filtering the text
for i in range(0,1000):
review = re.sub('[^a-zA-Z]', ' ', dataset['Review'][i]).lower()
review = review.split()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus.append(review)
# + id="eBxsHuxVAc1i"
# Creating the Bag of Words model
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
X = cv.fit_transform(corpus).toarray()
y = dataset.iloc[:, 1].values
# + id="zsEEq1edAfhJ"
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 18)
# + id="mnYtrCoHAsxM" colab={"base_uri": "https://localhost:8080/"} outputId="162dc475-17a4-44ce-c215-1e3683695c54"
# Fitting Naive Bayes to the Training set
# you can use MultinomialNB, BernoulliNB or GaussianNB
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train, y_train)
# + id="TB1_t2DuAyY6"
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="KybWPX-nU0Nl" outputId="4677803c-0d52-4171-b8a5-2fdb70576e92"
# Making the Confusion Matrix to check accurate answers
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
# + colab={"base_uri": "https://localhost:8080/"} id="F7mHvGcwVACk" outputId="6d9239c1-4707-4e10-e8e2-3f6bd45826bd"
# function that reply whether the review is good or bad
def ask():
arr = list()
input_string = str(input('submit comment = '))
review = re.sub('[^a-zA-Z]', ' ', input_string).lower()
review = review.split()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
arr.append(review)
cou = cv.transform(arr).toarray()
pred_orig = classifier.predict(cou)
if(pred_orig[0] == 1):
print('good review')
else:
print('bad review')
ask()
| code/main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercise 2: Measurement Error Mitigation
#
# Present day quantum computers are subject to noise of various kinds. The principle behind error mitigation is to reduce the effects from a specific source of error. Here we will look at mitigating measurement errors, i.e., errors in determining the correct quantum state from measurements performed on qubits.
#
# <img src="mitigation.png" width="900"/>
# <center>Measurement Error Mitigation</center>
#
# In the above picture, you can see the outcome of applying measurement error mitigation. On the left, the histogram shows results obtained using the device `ibmq_vigo`. The ideal result should have shown 50% counts $00000$ and 50% counts $10101$. Two features are notable here:
#
# - First, notice that the result contains a skew toward $00000$. This is because of energy relaxation of the qubit during the measurement process. The relaxation takes the $\vert1\rangle$ state to the $\vert0\rangle$ state for each qubit.
# - Second, notice that the result contains other counts beyond just $00000$ and $10101$. These arise due to various errors. One example of such errors comes from the discrimination after measurement, where the signal obtained from the measurement is identified as either $\vert0\rangle$ or $\vert1\rangle$.
#
# The picture on the right shows the outcome of performing measurement error mitigation on the results. You can see that the device counts are closer to the ideal expectation of $50%$ results in $00000$ and $50%$ results in $10101$, while other counts have been significantly reduced.
#
#
# ## How measurement error mitigation works
#
#
# We start by creating a set of circuits that prepare and measure each of the $2^n$ basis states, where $n$ is the number of qubits. For example, $n = 2$ qubits would prepare the states $|00\rangle$, $|01\rangle$, $|10\rangle$, and $|11\rangle$ individually and see the resulting outcomes. The outcome statistics are then captured by a matrix $M$, where the element $M_{ij}$ gives the probability to get output state $|i\rangle$ when state $|j\rangle$ was prepared. Even for a state that is in an arbitrary superposition $|\psi \rangle = \sum_j \alpha_j |j\rangle$, the linearity of quantum mechanics allows us to write the noisy output state as $|\psi_{noisy}\rangle = M |\psi\rangle$.
#
# The goal of measurement error mitigation is not to model the noise, but rather to apply a classical correction that undoes the errors. Given a noisy outcome, measurement error mitigation seeks to recover the initial state that led to that outcome. Using linear algebra we can see that given a noisy outcome $|\psi_{noisy}\rangle$, this can be done by applying the inverse of the matrix $M$, i.e., $|\psi \rangle = M^{-1} |\psi_{noisy}\rangle$. Note that the matrix $M$ recovered from the measurements is usually non-invertible, thus requiring a generalized inverse method to solve. Additionally, the noise is not deterministic, and has fluctuations, so this will in general not give you the ideal noise-free state, but it should bring you closer to it.
#
# You can find a more detailed description of measurement error mitigation in [Chapter 5.2](https://qiskit.org/textbook/ch-quantum-hardware/measurement-error-mitigation.html) of the Qiskit textbook.
#
# **The goal of this exercise is to create a calibration matrix $M$ that you can apply to noisy results (provided by us) to infer the noise-free results.**
#
# ---
# For useful tips to complete this exercise as well as pointers for communicating with other participants and asking questions, please take a look at the following [repository](https://github.com/qiskit-community/may4_challenge_exercises). You will also find a copy of these exercises, so feel free to edit and experiment with these notebooks.
#
# ---
#
# In Qiskit, creating the circuits that test all basis states to replace the entries for the matrix is done by the following code:
# +
#initialization
# %matplotlib inline
# Importing standard Qiskit libraries and configuring account
from qiskit import IBMQ
from qiskit.compiler import transpile, assemble
from qiskit.providers.ibmq import least_busy
from qiskit.tools.jupyter import *
from qiskit.tools.monitor import job_monitor
from qiskit.visualization import *
from qiskit.ignis.mitigation.measurement import complete_meas_cal, CompleteMeasFitter
provider = IBMQ.load_account() # load your IBM Quantum Experience account
# If you are a member of the IBM Q Network, fill your hub, group, and project information to
# get access to your premium devices.
# provider = IBMQ.get_provider(hub='', group='', project='')
from may4_challenge.ex2 import get_counts, show_final_answer
num_qubits = 5
meas_calibs, state_labels = complete_meas_cal(range(num_qubits), circlabel='mcal')
# -
# Next, run these circuits on a real device! You can choose your favorite device, but we recommend choosing the least busy one to decrease your wait time in the queue. Upon executing the following cell you will be presented with a widget that displays all the information about the least busy backend that was selected. Clicking on the "Error Map" tab will reveal the latest noise information for the device. Important for this challenge is the "readout" (measurement) error located on the left (and possibly right) side of the figure. It is common to see readout errors of a few percent on each qubit. These are the errors we are mitigating in this exercise.
# find the least busy device that has at least 5 qubits
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= num_qubits and
not x.configuration().simulator and x.status().operational==True))
backend
# Run the next cell to implement all of the above steps. In order to average out fluctuations as much as possible, we recommend choosing the highest number of shots, i.e., `shots=8192` as shown below.
#
# The call to `transpile` maps the measurement calibration circuits to the topology of the backend being used. `backend.run()` sends the circuits to the IBM Quantum device returning a `job` instance, whereas `%qiskit_job_watcher` keeps track of where your submitted job is in the pipeline.
# run experiments on a real device
shots = 8192
experiments = transpile(meas_calibs, backend=backend, optimization_level=3)
job = backend.run(assemble(experiments, shots=shots))
print(job.job_id())
# %qiskit_job_watcher
# Note that you might be in the queue for quite a while. You can expand the 'IBMQ Jobs' window that just appeared in the top left corner to monitor your submitted jobs. Make sure to keep your job ID in case you ran other jobs in the meantime. You can then easily access the results once your job is finished by running
#
# ```python
# job = backend.retrieve_job('YOUR_JOB_ID')
# ```
#
# Once you have the results of your job, you can create the calibration matrix and calibration plot using the following code. However, as the counts are given in a dictionary instead of a matrix, it is more convenient to use the measurement filter object that you can directly apply to the noisy counts to receive a dictionary with the mitigated counts.
# get measurement filter
cal_results = job.result()
meas_fitter = CompleteMeasFitter(cal_results, state_labels, circlabel='mcal')
meas_filter = meas_fitter.filter
#print(meas_fitter.cal_matrix)
meas_fitter.plot_calibration()
# In the calibration plot you can see the correct outcomes on the diagonal, while all incorrect outcomes are off-diagonal. Most of the latter are due to T1 errors depolarizing the states from $|1\rangle$ to $|0\rangle$ during the measurement, which causes the matrix to be asymmetric.
#
# Below, we provide you with an array of noisy counts for four different circuits. Note that as measurement error mitigation is a device-specific error correction, the array you receive depends on the backend that you have used before to create the measurement filter.
#
# **Apply the measurement filter in order to get the mitigated data. Given this mitigated data, choose which error-free outcome would be most likely.**
#
# As there are other types of errors for which we cannot correct with this method, you will not get completely noise-free results, but you should be able to guess the correct results from the trend of the mitigated results.
# ## i) Consider the first set of noisy counts:
# get noisy counts
noisy_counts = get_counts(backend)
plot_histogram(noisy_counts[0])
# apply measurement error mitigation and plot the mitigated counts
mitigated_counts_0 = meas_filter.apply(noisy_counts[0])
plot_histogram(mitigated_counts_0)
# ## Which of the following histograms most likely resembles the *error-free* counts of the same circuit?
# a) <img src="hist_1a.png" width="500">
# b) <img src="hist_1b.png" width="500">
# c) <img src="hist_1c.png" width="500">
# d) <img src="hist_1d.png" width="500">
# uncomment whatever answer you think is correct
#answer1 = 'a'
#answer1 = 'b'
answer1 = 'c'
# answer1 = 'd'
# ## ii) Consider the second set of noisy counts:
# plot noisy counts
plot_histogram(noisy_counts[1])
# apply measurement error mitigation
# insert your code here to do measurement error mitigation on noisy_counts[1]
mitigated_counts_1 = meas_filter.apply(noisy_counts[1])
plot_histogram(mitigated_counts_1)
# ## Which of the following histograms most likely resembles the *error-free* counts of the same circuit?
# a) <img src="hist_2a.png" width="500">
# b) <img src="hist_2b.png" width="500">
# c) <img src="hist_2c.png" width="500">
# d) <img src="hist_2d.png" width="500">
# uncomment whatever answer you think is correct
#answer2 = 'a'
#answer2 = 'b'
# answer2 = 'c'
answer2 = 'd'
# ## iii) Next, consider the third set of noisy counts:
# plot noisy counts
plot_histogram(noisy_counts[2])
# apply measurement error mitigation
# insert your code here to do measurement error mitigation on noisy_counts[2]
mitigated_counts_2 = meas_filter.apply(noisy_counts[2])
plot_histogram(mitigated_counts_2)
# ## Which of the following histograms most likely resembles the *error-free* counts of the same circuit?
# a) <img src="hist_3a.png" width="500">
# b) <img src="hist_3b.png" width="500">
# c) <img src="hist_3c.png" width="500">
# d) <img src="hist_3d.png" width="500">
# uncomment whatever answer you think is correct
#answer3 = 'a'
answer3 = 'b'
#answer3 = 'c'
#answer3 = 'd'
# ## iv) Finally, consider the fourth set of noisy counts:
# plot noisy counts
plot_histogram(noisy_counts[3])
# apply measurement error mitigation
# insert your code here to do measurement error mitigation on noisy_counts[3]
mitigated_counts_3 = meas_filter.apply(noisy_counts[3])
plot_histogram(mitigated_counts_3)
# ## Which of the following histograms most likely resembles the *error-free* counts of the same circuit?
# a) <img src="hist_4a.png" width="500">
# b) <img src="hist_4b.png" width="500">
# c) <img src="hist_4c.png" width="500">
# d) <img src="hist_4d.png" width="500">
# uncomment whatever answer you think is correct
# answer4 = 'a'
answer4 = 'b'
# answer4 = 'c'
#answer4 = 'd'
# The answer string of this exercise is just the string of all four answers. Copy and paste the output of the next line on the IBM Quantum Challenge page to complete the exercise and track your progress.
# answer string
show_final_answer(answer1, answer2, answer3, answer4)
# Now that you are done, move on to the next exercise!
| ibm/error_correction/Challenge2_MeasurementErrorMitigation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
#Import scikit learn dataset library
from sklearn import datasets
churn=pd.read_csv('C:/Users/aksha/Desktop/ai/datasets/Churn_Modelling.csv')
churn.head()
churn.shape
# 1)The above data indicates the list of 10000 employess in a bank to know why people are leaving the bank.
#
# 2)Number is Serial No which is independent.
#
# 3)Customer Id is IDno provided by the bank which is independent.
#
# 4)Surname is the surname of the customer which is independent.
#
# 5)Credit Score is the a measure of an individual’s ability to pay back the borrowed amount. It is the numerical representation of their creditworthiness. A credit score is a 3 digit number that falls in the range of 300-900, 900 being the highest it is independent.
#
# 6)Geography is the location in which the customer is located in data the location is from France,Spain and Germany.It is a independent variable.
#
# 7)Gender indicates sex of the customer.It is dependent invariable.
#
# 8)Tenure indicates how long the customer has been associated with the bank.It is independent variable.
#
# 9)Balance indicates how much amount the customer is maintaing at present in his bank account.It is independent variable.
#
# 10)HasCrCard indicates whether the customer has the credit card or not. It is dependent variable.0 indicates no credit card 1
# indicates the customer has incredit card.
#
# 11)IsActiveMember indindicates whether the customer is active customer or not. It is independent variable.0 indicates not active member 1 indicates the customer is an active member.
#
# 12)Estimated salary indicates salary indicates the salary earnt by the customer at work. It is independent variable.
#
# 13)Exited indicated whether the customer has actually exited the bank. It is dependent variable.0 indicates not exited 1 indicates exited
# independent variable taken for study:
# 1)Gender
#
# 2)Credit Score
#
# 3)Tunure
#
# 4)Balance
#
# 5)Numof Products
#
# 6)HasCrCard
#
# 7)Estimated Salary
#
# 8)Geography
#
# 9)Age
#
# 10)IsActiveMember
#
# Dependedent variable taken for study:
#
# 1)Exited
#Importing packages
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
#Checking null components in the table
churn.isnull()
# +
#Visualizing the number of customers left the bank through countplot
sb.countplot(x='Exited',data=churn)
# -
# By looking at the above countplot 2000 customers have left the bank
#Analysing the number of customers left the bank on the basis of gender
sb.countplot(x='Exited',hue='Gender',data=churn)
# by looking at the above count plot it shows around 900 male persons have left the bank and 1100 female persons have left the bank.
# Analyzing the number of customers left through Credit Score
sb.boxplot(x='Exited',y='CreditScore',data=churn)
# The boxplot plotted under 0 shows that the customers are still continuing with the bank. The median credit score is 650.The person who is with still with the bank has a maximum credit score of 900 and a minimum score of 400. The cluster of data is within 590 to 720.
#
# The boxplot plotted under 1 shows that the customers are not continuing with the bank. The median credit score is 648. The person has left the bank had the maximum credit score of 900 and a minimum score of 300.The cluster of data is within 585 to 720.
# Analyzing the customers who left by Tenure
sb.boxplot(x='Exited',y='Tenure',data=churn)
# The boxplot plotted under 0 shows that the customers are still continuing with the bank. The median tenure is 5 years.The person who is with still with the bank has a maximum tenure of 10 years and a minimum tenure of 0 years. The cluster of data is within 3 to 7 years.
#
# The boxplot plotted under 1 shows that the customers are not continuing with the bank. The median tenure is 5 years. The person has left the bank had the maximum tenure of 10 years and a minimum score of 0 years.The cluster of data is within 2 to 8 years.
#Visualizing the exit of customer through balance in their account
sb.boxplot(x='Exited',y='Balance',data=churn)
# The boxplot plotted under 0 shows that the customers are still continuing with the bank. The median balabce is 99000.The person who is with still with the bank has a maximum balance of 10 years and a minimum balance of 225000. The cluster of data is within 0 to 125000.
#
# The boxplot plotted under 1 shows that the customers are not continuing with the bank. The median balance is 105000. The person has left the bank had the maximum balance of 250000 and a minimum balance of 0.The cluster of data is within 40000 to 130000.
#Analysing the number of products used by customer
sb.violinplot(x='Exited',y='NumOfProducts',data=churn)
plt.show
# The boxplot plotted under 0 shows that the customers are still continuing with the bank. Maximum number of products used is 3. Minimum number of products used is 1. Cluster is 2
#
# The boxplot plotted under 1 shows that the customers are not continuing with the bank. Maximum number of products used is 4. Minimum number of products used is 1. Cluster is 1.
#Analysing the number of customers left and used the credit card
sb.countplot(x='Exited',hue='HasCrCard',data=churn)
# 1 indicates the persons who left the bank. 500 persons without credit card left the bank.1500 left the bank having the credit card facility with them
#Analysing the number of customer who left by location
sb.countplot(x='Exited',hue='Geography',data=churn)
# Same number of customers left the bank from France and Germany whereas Spain had least number of customers leaving the bank.
# On the other hand France has highest number of customers staying with the bank and Germany had lowest number of customers staying with the bank
#Analysing the number of active and passive customers leaving the bank
sb.countplot(x='Exited',hue='IsActiveMember',data=churn)
# More no of passive members left the bank compared to active members
#Number of Customers leaving the bank on basis of age
sb.boxplot(x='Exited',y='Age',data=churn)
# Minimum age of continuing with the bank is 19. Maximum age is 55.Median is 35.5. Cluster is between 31 to 41.
#
# Minimum age of leaving the bank is 20. Maximum age is 70.Median is 55.Cluster is between 40 to 50.
#Number of Customers leaving the bank on basis of income
sb.violinplot(x='Exited',y='EstimatedSalary',data=churn)
plt.show()
# Minimun estimated salary of customer who are staying with bank and leaving the bank is 0.
#
# Maximum estimated salary of customer who are staying with bank or leaving the bank is 200000.
#
# Median for both is 100000.
churn.drop('CustomerId',axis=1,inplace=True)
churn.info()
pd.get_dummies(churn, columns=["Gender",'Geography']).head()
churn.drop(['RowNumber','Surname','Gender','Geography'],axis=1,inplace=True)
churn.head()
#Train test split
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(churn.drop('Exited',axis=1),churn['Exited'],test_size=0.30)
#Training and Predicting
from sklearn.linear_model import LogisticRegression
logmodel=LogisticRegression()
logmodel.fit(X_train,y_train)
predictions=logmodel.predict(X_test)
logmodel.score(X_test,y_test)
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
| churncasestudy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Importing librairies
import tweepy
import json
#Keys for API authentication accessed from twitter developer
consumer_key = ""
consumer_secret = ""
access_key = ""
access_secret = ""
# Function to extract tweets
def get_tweets(username):
# Authorization to consumer key and consumer secret
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
# Access to user's access key and access secret
auth.set_access_token(access_key, access_secret)
# Calling api
api = tweepy.API(auth)
# Extracting all tweets for a given username
tweets = tweepy.Cursor(api.user_timeline, screen_name=username).items()
# Dumping of responses into JSONlines file
tweets_for_json = [tweet for tweet in tweets]
with open('result.jsonl','w') as f:
for j in tweets_for_json:
f.write(json.dumps(j._json)+"\n")
# Driver code
if __name__ == '__main__':
# Here goes the twitter handle for the user whose tweets are to be extracted.
get_tweets("midasIIITD")
# +
# define a new variable for tweets
tweets=[]
# import tweets from JSONline file
for line in open('result.jsonl'):
tweets.append(json.loads(line))
# +
import pandas as pd
# using DataFrame to show desired result in tabular format
df = pd.DataFrame()
df['Text'] = list(map(lambda tweet: tweet['text'], tweets))
df['Date & Time'] = list(map(lambda tweet: tweet['created_at'], tweets))
df['Favorites'] = list(map(lambda tweet: tweet['favorite_count'], tweets))
df['Retweets'] = list(map(lambda tweet: tweet['retweet_count'], tweets))
# -
# # Unable to fetch number of images
# 
#
# ### In the official documentation of twitter, it is written that if an image is attached to a tweet then 'entities' section will contain a 'media' array containing a single media object.
# ### Also even though we can attach up to four images, only the first one will be listed in the 'entities' section.
# ### So we can tell only if an image is attached or not to a tweet.
#adding image column to DataFrame which shows if a tweet contains image or not
df['Image'] = list(map(lambda tweet: 'YES' if 'media' in tweet['entities'] else 'NO', tweets))
# showing result in tabular format
df
# saving the result to csv file
df.to_csv('result.csv')
| python_problem/twitter_extract.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/akashmavle5/--akash/blob/main/Copy_of_rapids_colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="scfLT2i0MLyD"
# # Environment Sanity Check #
#
# Click the _Runtime_ dropdown at the top of the page, then _Change Runtime Type_ and confirm the instance type is _GPU_.
#
# Check the output of `!nvidia-smi` to make sure you've been allocated a Tesla T4, P4, or P100.
# + id="B0C8IV5TQnjN" colab={"base_uri": "https://localhost:8080/"} outputId="d4d860c0-59d9-4fe2-9bde-7aee203900ff"
# !nvidia-smi
# + [markdown] id="CtNdk7PSafKP"
# #Setup:
# Set up script installs
# 1. Install most recent Miniconda release compatible with Google Colab's Python install (3.7.10)
# 1. removes incompatible files
# 1. Install RAPIDS' current stable version of its libraries, including:
# 1. cuDF
# 1. cuML
# 1. cuGraph
# 1. cuSpatial
# 1. cuSignal
# 1. xgboost
# 1. Set necessary environment variables
# 1. Copy RAPIDS .so files into current working directory, a workaround for conda/colab interactions
#
# + id="m0jdXBRiDSzj" colab={"base_uri": "https://localhost:8080/"} outputId="bf1753eb-123e-4c2c-d48e-5140ced8f293"
# Install RAPIDS
# Created by <NAME> using Google Colab and Nvidia RAPIDS
# !git clone https://github.com/rapidsai/rapidsai-csp-utils.git
# !bash rapidsai-csp-utils/colab/rapids-colab.sh stable
import sys, os
dist_package_index = sys.path.index('/usr/local/lib/python3.7/dist-packages')
sys.path = sys.path[:dist_package_index] + ['/usr/local/lib/python3.7/site-packages'] + sys.path[dist_package_index:]
sys.path
exec(open('rapidsai-csp-utils/colab/update_modules.py').read(), globals())
# + id="z2bz_X0T7rWt"
filename = 'cuml_random_forest_model.sav'
# save the trained cuml model into a file
pickle.dump(cuml_model, open(filename, 'wb'))
# delete the previous model to ensure that there is no leakage of pointers.
# this is not strictly necessary but just included here for demo purposes.
del cuml_model
# load the previously saved cuml model from a file
pickled_cuml_model = pickle.load(open(filename, 'rb'))
# + colab={"base_uri": "https://localhost:8080/"} id="_oRhPret7xIJ" outputId="1be6ffa9-ee56-4db8-b301-dc42c09ba32c"
# %%time
pred_after_pickling = pickled_cuml_model.predict(X_cudf_test)
fil_acc_after_pickling = accuracy_score(y_test.to_numpy(), pred_after_pickling)
# + colab={"base_uri": "https://localhost:8080/"} id="2Ubo2cmG72Iq" outputId="4cb2eb0b-5386-4ed8-b966-bb77d3d8d0e2"
print("CUML accuracy of the RF model before pickling: %s" % fil_acc_orig)
print("CUML accuracy of the RF model after pickling: %s" % fil_acc_after_pickling)
# + colab={"base_uri": "https://localhost:8080/"} id="pRlu3IzF73Px" outputId="08f7f9e5-5983-43b1-d3b5-bf65559a5546"
print("SKL accuracy: %s" % sk_acc)
print("CUML accuracy before pickling: %s" % fil_acc_orig)
# + id="zx3RKHW48BnA" colab={"base_uri": "https://localhost:8080/"} outputId="beb308f0-8eb7-480d-9ec1-fd6cf3f335db"
from google.colab import drive
drive.mount('/content/drive')
# + id="8cnCU7Jq8tr8"
import cudf
from cuml import make_regression, train_test_split
from cuml.linear_model import LinearRegression as cuLinearRegression
from cuml.metrics.regression import r2_score
from sklearn.linear_model import LinearRegression as skLinearRegression
# + id="vQ7ar6qb835C"
n_samples = 2**20 #If you are running on a GPU with less than 16GB RAM, please change to 2**19 or you could run out of memory
n_features = 399
random_state = 23
# + id="g9oJRB-m86WD" colab={"base_uri": "https://localhost:8080/"} outputId="ad659e1f-ad95-43a1-83e5-389844d16887"
# %%time
X, y = make_regression(n_samples=n_samples, n_features=n_features, random_state=random_state)
X = cudf.DataFrame(X)
y = cudf.DataFrame(y)[0]
X_cudf, X_cudf_test, y_cudf, y_cudf_test = train_test_split(X, y, test_size = 0.2, random_state=random_state)
# + id="Y7D8sEYZ89yb"
# Copy dataset from GPU memory to host memory.
# This is done to later compare CPU and GPU results.
X_train = X_cudf.to_pandas()
X_test = X_cudf_test.to_pandas()
y_train = y_cudf.to_pandas()
y_test = y_cudf_test.to_pandas()
# + id="hebVKYYb9DDZ" colab={"base_uri": "https://localhost:8080/"} outputId="ef673758-277e-4b11-dd29-e8ae35a822be"
# %%time
ols_sk = skLinearRegression(fit_intercept=True,
normalize=True,
n_jobs=-1)
ols_sk.fit(X_train, y_train)
# + id="jGI2AF659E9q" colab={"base_uri": "https://localhost:8080/"} outputId="4832c043-5cf8-4166-ff7b-cfa647094a10"
# %%time
predict_sk = ols_sk.predict(X_test)
# + id="UpLcQvvJ9Iur" colab={"base_uri": "https://localhost:8080/"} outputId="c53e4732-5b2e-4aa6-a945-13e6cc187389"
# %%time
r2_score_sk = r2_score(y_cudf_test, predict_sk)
# + id="npLD5CsR9JdZ" colab={"base_uri": "https://localhost:8080/"} outputId="64ac23b1-d438-47d9-9901-de9d5fe43a07"
# %%time
ols_cuml = cuLinearRegression(fit_intercept=True,
normalize=True,
algorithm='eig')
ols_cuml.fit(X_cudf, y_cudf)
# + id="J9_zfN309MZt" colab={"base_uri": "https://localhost:8080/"} outputId="a975c257-ee41-4398-d4d0-acf72ecd0fb0"
# %%time
predict_cuml = ols_cuml.predict(X_cudf_test)
# + id="HhhqPpZ39OSk" colab={"base_uri": "https://localhost:8080/"} outputId="9dfdb1d6-829c-4a7e-820a-8fae38dd5cd1"
# %%time
r2_score_cuml = r2_score(y_cudf_test, predict_cuml)
# + id="cERm6ZeN9QQp" colab={"base_uri": "https://localhost:8080/"} outputId="14dc6c33-5828-468e-da8a-ce478f730685"
print("R^2 score (SKL): %s" % r2_score_sk)
print("R^2 score (cuML): %s" % r2_score_cuml)
# + [markdown] id="9oOCJ4NYMjY7"
# # cuDF and cuML Examples #
#
# Now you can run code!
#
# What follows are basic examples where all processing takes place on the GPU.
# + [markdown] id="V38dg-oUJtEO"
# #[cuDF](https://github.com/rapidsai/cudf)#
#
# Load a dataset into a GPU memory resident DataFrame and perform a basic calculation.
#
# Everything from CSV parsing to calculating tip percentage and computing a grouped average is done on the GPU.
#
# _Note_: You must import nvstrings and nvcategory before cudf, else you'll get errors.
# + id="EwaJSKuswsNi" colab={"base_uri": "https://localhost:8080/"} outputId="191920b7-a6b5-42ea-a09e-5ec1c15331af"
import cudf
import io, requests
# download CSV file from GitHub
url="https://github.com/plotly/datasets/raw/master/tips.csv"
content = requests.get(url).content.decode('utf-8')
# read CSV from memory
tips_df = cudf.read_csv(io.StringIO(content))
tips_df['tip_percentage'] = tips_df['tip']/tips_df['total_bill']*100
# display average tip by dining party size
print(tips_df.groupby('size').tip_percentage.mean())
# + [markdown] id="Ul3UZJdUJqlT"
# #[cuML](https://github.com/rapidsai/cuml)#
#
# This snippet loads a
#
# As above, all calculations are performed on the GPU.
# + id="dCE8WhO3HpL_" colab={"base_uri": "https://localhost:8080/"} outputId="b1a171cd-9eb2-4962-e65d-5f2a7fb7acf0"
import cuml
# Create and populate a GPU DataFrame
df_float = cudf.DataFrame()
df_float['0'] = [1.0, 2.0, 5.0]
df_float['1'] = [4.0, 2.0, 1.0]
df_float['2'] = [4.0, 2.0, 1.0]
# Setup and fit clusters
dbscan_float = cuml.DBSCAN(eps=1.0, min_samples=1)
dbscan_float.fit(df_float)
print(dbscan_float.labels_)
# + id="-wkbvG4V6K-N"
import cudf
import cupy
import matplotlib.pyplot as plt
from cuml.cluster import KMeans as cuKMeans
from cuml.datasets import make_blobs
from sklearn.cluster import KMeans as skKMeans
from sklearn.metrics import adjusted_rand_score
# %matplotlib inline
# + id="GyUEChHd6O5C"
n_samples = 100000
n_features = 2
n_clusters = 5
random_state = 0
# + id="mYfvnhWd6Sfb"
device_data, device_labels = make_blobs(n_samples=n_samples,
n_features=n_features,
centers=n_clusters,
random_state=random_state,
cluster_std=0.1)
device_data = cudf.DataFrame(device_data)
device_labels = cudf.Series(device_labels)
# + id="RMd0xglY6W1q"
# Copy dataset from GPU memory to host memory.
# This is done to later compare CPU and GPU results.
host_data = device_data.to_pandas()
host_labels = device_labels.to_pandas()
# + colab={"base_uri": "https://localhost:8080/"} id="ZkCY8f3Q6bOJ" outputId="e2a9d9c5-ecbf-439a-e0a5-1b2b4ffe4aab"
# %%time
kmeans_sk = skKMeans(init="k-means++",
n_clusters=n_clusters,
n_jobs=-1,
random_state=random_state)
kmeans_sk.fit(host_data)
# + colab={"base_uri": "https://localhost:8080/"} id="8ip9DpQv6ejS" outputId="cb8dfe8a-c43a-4b0a-810d-4cdf40ab9502"
# %%time
kmeans_cuml = cuKMeans(init="k-means||",
n_clusters=n_clusters,
oversampling_factor=40,
random_state=random_state)
kmeans_cuml.fit(device_data)
# + colab={"base_uri": "https://localhost:8080/", "height": 607} id="oxRK3OQA6hza" outputId="95661318-dfe6-4d82-ceff-347138f168b1"
fig = plt.figure(figsize=(16, 10))
plt.scatter(host_data.iloc[:, 0], host_data.iloc[:, 1], c=host_labels, s=50, cmap='viridis')
#plot the sklearn kmeans centers with blue filled circles
centers_sk = kmeans_sk.cluster_centers_
plt.scatter(centers_sk[:,0], centers_sk[:,1], c='blue', s=100, alpha=.5)
#plot the cuml kmeans centers with red circle outlines
centers_cuml = kmeans_cuml.cluster_centers_
plt.scatter(cupy.asnumpy(centers_cuml[0].values),
cupy.asnumpy(centers_cuml[1].values),
facecolors = 'none', edgecolors='red', s=100)
plt.title('cuml and sklearn kmeans clustering')
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="1D9sAgHh6vGN" outputId="d3d6ebdb-10c1-4433-9831-bfed6f9668e6"
# %%time
cuml_score = adjusted_rand_score(host_labels, kmeans_cuml.labels_.to_array())
sk_score = adjusted_rand_score(host_labels, kmeans_sk.labels_)
# + colab={"base_uri": "https://localhost:8080/"} id="sqtW5xVt6x86" outputId="d54c628e-0c68-441e-dc52-ca4e1098d0da"
threshold = 1e-4
passed = (cuml_score - sk_score) < threshold
print('compare kmeans: cuml vs sklearn labels_ are ' + ('equal' if passed else 'NOT equal'))
# + id="M-rxYQ_L6_9K"
import cudf
import numpy as np
import pandas as pd
import pickle
from cuml.ensemble import RandomForestClassifier as curfc
from cuml.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier as skrfc
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
# + id="thKOaF9k7CG6"
# The speedup obtained by using cuML'sRandom Forest implementation
# becomes much higher when using larger datasets. Uncomment and use the n_samples
# value provided below to see the difference in the time required to run
# Scikit-learn's vs cuML's implementation with a large dataset.
# n_samples = 2*17
n_samples = 2**12
n_features = 399
n_info = 300
data_type = np.float32
# + colab={"base_uri": "https://localhost:8080/"} id="i-dK_ei67FB1" outputId="49063981-d91a-43c1-c144-6af5a2493fbe"
# %%time
X,y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=n_info,
random_state=123, n_classes=2)
X = pd.DataFrame(X.astype(data_type))
# cuML Random Forest Classifier requires the labels to be integers
y = pd.Series(y.astype(np.int32))
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.2,
random_state=0)
# + colab={"base_uri": "https://localhost:8080/"} id="bdTcEDs-7Hdr" outputId="4b05b2dd-fa0c-4989-866c-fe2712c23d55"
# %%time
X_cudf_train = cudf.DataFrame.from_pandas(X_train)
X_cudf_test = cudf.DataFrame.from_pandas(X_test)
y_cudf_train = cudf.Series(y_train.values)
# + colab={"base_uri": "https://localhost:8080/"} id="Hpu4GzW07MFq" outputId="0f6e6408-52c5-47ea-f897-ff264fa626e9"
# %%time
sk_model = skrfc(n_estimators=40,
max_depth=16,
max_features=1.0,
random_state=10)
sk_model.fit(X_train, y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="vg9oE72u7Zme" outputId="58354be7-29a7-4adb-f3cb-4e123a25993f"
# %%time
sk_predict = sk_model.predict(X_test)
sk_acc = accuracy_score(y_test, sk_predict)
# + colab={"base_uri": "https://localhost:8080/"} id="HAheFUCh7ayb" outputId="34482d8a-38bb-4a51-ef3e-04c5084ed848"
# %%time
cuml_model = curfc(n_estimators=40,
max_depth=16,
max_features=1.0,
seed=10)
cuml_model.fit(X_cudf_train, y_cudf_train)
# + colab={"base_uri": "https://localhost:8080/"} id="1ZL4Ggqu7fHl" outputId="8d6c0e0e-14eb-44df-f111-75fca4c9840d"
# %%time
fil_preds_orig = cuml_model.predict(X_cudf_test)
fil_acc_orig = accuracy_score(y_test.to_numpy(), fil_preds_orig)
# + [markdown] id="Dlsyk9m9NN2K"
# # Next Steps #
#
# For an overview of how you can access and work with your own datasets in Colab, check out [this guide](https://towardsdatascience.com/3-ways-to-load-csv-files-into-colab-7c14fcbdcb92).
#
# For more RAPIDS examples, check out our RAPIDS notebooks repos:
# 1. https://github.com/rapidsai/notebooks
# 2. https://github.com/rapidsai/notebooks-contrib
| Copy_of_rapids_colab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
import cPickle as pickle
from pydtw import dtw2d
from joblib import Parallel, delayed
def pickle_this(data, filename='distance_matrix.p'):
pickle.dump(data, open(filename, 'wb'))
def inner_loop(sample, i, n):
row = np.zeros((n,))
for j in xrange(i, n):
row[j] = dtw2d(sample[i], sample[j])[-1,-1]
return row
sample = pickle.load( open( 'sample_pitches.p', 'rb' ) )
n = 4
idxs = np.random.choice(len(sample), n)
subset = sample[idxs]
distance_matrix = np.asmatrix(Parallel(n_jobs=6)(delayed(inner_loop)(subset, i, n) for i in range(n)))
print distance_matrix
| Parallel_DTW.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
import copy
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(color_codes=True)
# -
def annotate_upper_left(ax, text, annotation_offset=(-50, 30)):
ax.annotate(text, xy=(0, 1), xycoords='axes fraction', fontsize=18,
xytext=annotation_offset, textcoords='offset points',
ha='left', va='top')
# +
f = np.load('../output/components_results.npz')
flipped_idx = f['flipped_idx']
influences = f['influences']
influences_without_train_error = f['influences_without_train_error']
influences_without_hessian = f['influences_without_hessian']
influences_without_both = f['influences_without_both']
test_image = f['test_image']
harmful_train_image = f['harmful_train_image']
# +
sns.set_style('white')
color_vec = np.array(['g'] * len(flipped_idx))
color_vec[flipped_idx] = 'r'
color_vec = list(color_vec)
fig = plt.figure(figsize=(17, 3))
ax = fig.add_subplot(1, 5, 1)
axs = [ax] + [fig.add_subplot(1, 5, i, sharey=ax) for i in [2, 3]]
ax = fig.add_subplot(1, 5, 4)
axs = axs + [ax] + [fig.add_subplot(1, 5, 5)]
axs[0].scatter(influences_without_train_error, influences, c=color_vec)
axs[0].set_xlabel('$-\mathcal{I}_\mathrm{up, loss}$ (without train loss)', fontsize=14)
axs[0].set_ylabel('$-\mathcal{I}_\mathrm{up, loss}$', fontsize=14)
annotate_upper_left(axs[0], '(a)', (-40, 0))
axs[1].scatter(influences_without_hessian, influences, c=color_vec)
# axs[1].set_xlabel('Influence (no $H$)')
axs[1].set_xlabel('$-\mathcal{I}_\mathrm{up, loss}$ (without $H_{\hat\\theta}$)', fontsize=14)
axs[2].scatter(influences_without_both, influences, c=color_vec)
# axs[2].set_xlabel('Influence (no train error and $H$)')
axs[2].set_xlabel('$-\mathcal{I}_\mathrm{up, loss}$ (without train loss & $H_{\hat\\theta}$)', fontsize=14)
axs[3].imshow(np.reshape(test_image, (28, 28)), cmap='gray', interpolation='none')
axs[3].set_title('Test image', fontsize=14)
axs[3].set_xlabel('Label: 7', fontsize=14)
annotate_upper_left(axs[2], '(b)', (175, 0))
axs[4].imshow(np.reshape(harmful_train_image, (28, 28)), cmap='gray', interpolation='none')
axs[4].set_title('Harmful training image', fontsize=14)
axs[4].set_xlabel('Label: 7', fontsize=14)
for ax in axs[:3]:
plt.setp(ax.get_yticklabels(), visible=False)
for ax in axs[3:]:
bbox = ax.get_position()
bbox.y0 -= 0.06
bbox.y1 -= 0.06
bbox.x0 += 0.01
bbox.x1 += 0.01
ax.set_position(bbox)
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
# plt.savefig(
# '../figs/fig-components.png',
# dpi=600, bbox_inches='tight')
# -
| scripts/fig1_influence_components.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import f1_score
from sklearn.metrics import precision_recall_fscore_support
# %matplotlib inline
data=pd.read_csv('labelled_data_new.csv')
y=data.Label
X=data.drop(['Label'],axis=1)
data
data.columns
prec=[0,0,0,0,0]
rec=[0,0,0,0,0]
f1=[0,0,0,0,0]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.7,random_state=10)
X_test2=X_test.copy()
sc=StandardScaler(copy=False)
sc.fit_transform(X_train)
sc.transform(X_test)
X_train
clf=SVC(kernel='linear')
clf.fit(X_train,y_train)
pre=clf.predict(X_test)
prec[0],rec[0],f1[0],_=precision_recall_fscore_support(y_test,pre,average='macro')
print("SVM with linear kernel - Precision = %0.3f, Recall = %0.3f,
F1-score = %0.3f" %(prec[0],rec[0],f1[0]))
clf2=SVC(kernel='rbf')
clf2.fit(X_train,y_train)
pre2=clf2.predict(X_test)
prec[1],rec[1],f1[1],_=precision_recall_fscore_support(y_test,pre2,average='macro')
print("SVM with rbf kernel - Precision = %0.3f, Recall = %0.3f, F1-score = %0.3f"
%(prec[1],rec[1],f1[1]))
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
clf3 = GridSearchCV(SVC(), tuned_parameters,scoring='f1_macro')
clf3.fit(X_train, y_train)
print("Best parameters set found on development set:")
print(clf3.best_params_)
means = clf3.cv_results_['mean_test_score']
for mean, params in zip(means, clf3.cv_results_['params']):
print("%0.3f for %r"% (mean, params))
pre3=clf3.predict(X_test)
prec[2],rec[2],f1[2],_=precision_recall_fscore_support(y_test,pre3,average='macro')
print("Gridsearch with SVM - Precision = %0.3f, Recall = %0.3f, F1-score = %0.3f"
%(prec[2],rec[2],f1[2]))
clf4=RandomForestClassifier(random_state=0)
clf4.fit(X_train,y_train)
pre4=clf4.predict(X_test)
prec[3],rec[3],f1[3],_=precision_recall_fscore_support(y_test,pre4,average='macro')
print("Random forest classifier - Precision = %0.3f, Recall = %0.3f,
F1-score = %0.3f" %(prec[3],rec[3],f1[3]))
param2={'n_estimators' : [10,100,500,1000]}
clf5=GridSearchCV(RandomForestClassifier(random_state=0),param2,scoring='f1_macro')
clf5.fit(X_train,y_train)
print("Best parameters set found on development set:")
print(clf5.best_params_)
means1 = clf5.cv_results_['mean_test_score']
for mean1, params1 in zip(means1, clf5.cv_results_['params']):
print("%0.3f for %r"% (mean1, params1))
pre5=clf5.predict(X_test)
prec[4],rec[4],f1[4],_=precision_recall_fscore_support(y_test,pre5,average='macro')
print("Gridsearch with Random forst classifier - Precision = %0.3f, Recall = %0.3f,
F1-score = %0.3f" %(prec[4],rec[4],f1[4]))
plt.figure(figsize=(15,5))
sns.lineplot(x=['SVM linear ','SVM rbf','SVM grid search','RandomForest','RandomForest gridsearch'],y=prec,label='precision')
sns.lineplot(x=['SVM linear ','SVM rbf','SVM grid search','RandomForest','RandomForest gridsearch'],y=rec,label='recall')
sns.lineplot(x=['SVM linear ','SVM rbf','SVM grid search','RandomForest','RandomForest gridsearch'],y=f1,label='f1-score')
pd.concat([X_test2,y_test],axis=1)
pd.Series(pre3,index=X_test.index)
| Model-implementation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_is590dv-default)
# language: python
# name: conda_is590dv-default
# ---
# %matplotlib inline
import cartopy
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams["figure.dpi"] = 300
states = cartopy.io.shapereader.natural_earth(resolution='110m', category='cultural',
name='admin_1_states_provinces_lakes_shp')
reader = cartopy.io.shapereader.Reader(states)
fig = plt.figure()
ax = fig.add_subplot(111, projection = cartopy.crs.PlateCarree())
ax.coastlines()
plt.title("Equirectangular");
ny_lon, ny_lat = -75, 43
def make_proj(proj_name):
fig = plt.figure()
proj = getattr(cartopy.crs, proj_name)
ax = fig.add_subplot(111, projection = proj())
ax.gridlines()
ax.coastlines()
ax.set_global()
plt.title(proj_name)
plt.savefig("images/{}.png".format(proj_name.lower()))
ax.tissot(500, alpha=0.25, facecolor='red')
plt.savefig("images/{}_tissot.png".format(proj_name.lower()))
for proj in ["Mercator", "PlateCarree", "Gnomonic", "TransverseMercator", "LambertCylindrical", "Mollweide", "Sinusoidal"]:
make_proj(proj)
print("Done with", proj)
champaign_lat, champaign_lon = 40.1164, -88.2434
antananarivo_lat, antananarivo_lon = -18.8792, 47.5079
fig = plt.figure()
ax = fig.add_subplot(111, projection = cartopy.crs.PlateCarree())
ax.gridlines()
ax.coastlines()
ax.set_global()
ax.plot([champaign_lon, antananarivo_lon], [champaign_lat, antananarivo_lat], transform = cartopy.crs.PlateCarree())
ax.plot([champaign_lon, antananarivo_lon], [champaign_lat, antananarivo_lat], transform = cartopy.crs.Geodetic())
fig = plt.figure()
ax = fig.add_subplot(111, projection = cartopy.crs.Mollweide())
ax.gridlines()
ax.coastlines()
ax.set_global()
ax.plot([champaign_lon, antananarivo_lon], [champaign_lat, antananarivo_lat], transform = cartopy.crs.PlateCarree())
ax.plot([champaign_lon, antananarivo_lon], [champaign_lat, antananarivo_lat], transform = cartopy.crs.Geodetic())
| week07/prep_notebook_week08.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import json
import matplotlib.pyplot as plt
# %matplotlib inline
def get_info_json(filename):
with open(os.path.join("/media/urw7rs/hdd/bdd100k/info/100k/train", filename)) as f:
raw_json = json.load(f)
return raw_json
# +
location_keys = label["locations"][0].keys()
print(location_keys)
loc = {}
for k in location_keys:
loc[k] = [l[k] for l in label["locations"]]
if "gps" in label.keys():
gps_keys = label["gps"][0].keys()
print(gps_keys)
gps = {}
for k in gps_keys:
gps[k] = [l[k] for l in label["gps"]]
# +
import json
def graph_imu(filename, gyro=True, accel=True, gps=True, locations=True, bias=0.0, delay=0):
label = get_info_json(filename)
if gyro:
gyro = label["gyro"]
gyro_t = np.array([l["timestamp"] for l in gyro])
gyro_t -= gyro_t[0]
gyro_x = np.array([l["x"] for l in gyro])
gyro_y = np.array([l["y"] for l in gyro])
gyro_z = np.array([l["z"] for l in gyro])
fig, axs = plt.subplots(3, 1, figsize=(24, 36))
axs[0].plot(gyro_t, 180 * gyro_x / np.pi, label="x")
axs[1].plot(gyro_t, 180 * gyro_y / np.pi, label="y", color="orange")
axs[2].plot(gyro_t, 180 * gyro_z / np.pi, label="z", color="green")
axs[0].set_title("x axis gyro", fontsize=25);
axs[1].set_title("y axis gyro", fontsize=25);
axs[2].set_title("z axis gyro", fontsize=25);
for i in range(3):
axs[i].set_xlabel("ms", fontsize=18);
axs[i].set_ylabel("degree/s", fontsize=18);
plt.show()
if accel:
accel = label["accelerometer"]
accel_t = np.array([l["timestamp"] for l in accel])
accel_t -= accel_t[0]
accel_x = np.array([l["x"] for l in accel])
accel_y = np.array([l["y"] for l in accel])
accel_z = np.array([l["z"] for l in accel])
fig, axs = plt.subplots(3, 1, figsize=(24, 36))
axs[0].plot(accel_t, accel_x, label="x");
accel_y = accel_y[1:] - accel_y[:-1]
axs[1].plot(accel_t[1:], accel_y, label="y", color="orange", linewidth=1);
axs[2].plot(accel_t, accel_z, label="z", color="green");
axs[0].set_title("x axis accelerometer", fontsize=25);
axs[1].set_title("y axis accelerometer", fontsize=25);
axs[2].set_title("z axis accelerometer", fontsize=25);
for i in range(3):
axs[i].set_xlabel("ms", fontsize=18);
axs[i].set_ylabel("m/s^2", fontsize=18);
plt.show()
if gps:
if "gps" in label.keys():
gps_keys = label["gps"][0].keys()
gps = {}
for k in gps_keys:
gps[k] = [l[k] for l in label["gps"]]
vel = np.array(gps["speed"])
vel_t = np.array(gps["timestamp"])
vel_t -= vel_t[0]
plt.figure(figsize=(24, 12))
plt.plot(vel_t, vel, label="speed")
plt.title("speed", fontsize=25)
plt.xlabel("ms", fontsize=18)
plt.ylabel("m/s", fontsize=18)
plt.show()
gravity = np.array([l["x"] for l in label["accelerometer"]]).mean()
if locations:
if "locations" in label.keys():
loc_keys = label["locations"][0].keys()
loc = {}
for k in loc_keys:
loc[k] = [l[k] for l in label["locations"]]
course = loc["course"]
course = np.array(course)
yaw_rate = course[1:] - course[:-1]
plt.figure(figsize=(24, 12))
if delay:
plt.scatter(range(1000, (len(yaw_rate) + 1) * 1000, 1000), yaw_rate, label="yaw rate", color="red")
else:
plt.scatter(range(delay, (len(yaw_rate) + delay // 1000)* 1000, 1000), yaw_rate, label="yaw rate", color="red")
gyro = label["gyro"]
gyro_t = np.array([l["timestamp"] for l in gyro])
gyro_t -= gyro_t[0]
gyro_x = np.array([l["x"] for l in gyro])
if gravity < 0:
gyro_x *= -1
plt.plot(gyro_t, 180 * gyro_x / np.pi + bias, label="gyro", alpha=0.5)
plt.legend(fontsize=25)
plt.title("yaw rate", fontsize=25)
plt.xlabel("s", fontsize=18)
plt.ylabel("degrees per second", fontsize=18)
plt.show()
plt.figure(figsize=(24, 12))
plt.plot(course, label="course")
plt.title("course", fontsize=25)
plt.xlabel("s", fontsize=18)
plt.ylabel("degrees", fontsize=18)
plt.show()
print("stats")
print(f"gravity: {gravity}")
def check_gravity(filename):
laebl = get_info_json(filename)
accel_x = np.array([l["x"] for l in accel])
if accel_x.mean() < -0.5:
return False
else:
return True
# +
import os
import numpy as np
json_list = os.listdir("/media/urw7rs/hdd/bdd100k/info/100k/train")
#index = 222 # car doesn't move at all but yaw rate has non-zero values
#index = 223 # synchronizes at 0
index = 1532 # gyro is biased
#index = 26326 # reverses
index = 1
print(json_list[index])
graph_imu(json_list[index], gyro=True, accel=True, gps=True, locations=True, bias=-0.5, delay=1100)
yaw
# -
count = 0
n = 0
for name in json_list[1549:]:
# stop kernel from hanging
break
n += 1
if n % 1000:
print(n)
if not check_gravity(name):
count += 1
print(count)
#print(name)
with open("/media/urw7rs/hdd/bdd100k/info/100k/train/82c4aa0a-3c44e82d.json") as f:
x = f.read().replace("\'", "\"")
x = x.replace("\'", "\"")
n = 0
for name in json_list:
label = get_info_json(name)
n += 1
if n % 1000 == 0:
print(label.keys())
if "locations" in label.keys() and "gps" in label.keys():
print(name)
break
label = get_info_json("3e0f1745-670fdc20.json")
print(label.keys())
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.8 64-bit
# metadata:
# interpreter:
# hash: 4cd7ab41f5fca4b9b44701077e38c5ffd31fe66a6cab21e0214b68d958d0e462
# name: python3
# ---
# # Line Plots and Filling Areas
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# +
years = [1.1, 1.3, 1.5, 2.0, 2.2, 2.9, 3.0, 3.2, 3.2, 3.7, 3.9, 4.0, 4.0, 4.1, 4.5, 4.9, 5.1, 5.3, 5.9, 6.0, 6.8, 7.1, 7.9, 8.2, 8.7, 9.0, 9.5, 9.6, 10.3, 10.5]
salary = [39343.00, 46205.00, 37731.00, 43525.00, 39891.00, 56642.00, 60150.00, 54445.00, 64445.00, 57189.00, 63218.00, 55794.00, 56957.00, 57081.00, 61111.00, 67938.00, 66029.00, 83088.00, 81363.00, 93940.00, 91738.00, 98273.00, 101302.00, 113812.00, 109431.00, 105582.00, 116969.00, 112635.00, 122391.00, 121872.00]
# -
# ## Line Plots
# +
plt.plot(years,
salary,
marker="o",
markersize=5,
lw=2,
ls="-",
)
plt.xlabel("Years of Experience")
plt.ylabel("Salary")
plt.title("Salary and Years of Experience")
plt.savefig("../assets/matplotlib/line_plots.jpg")
# -
# ## Filling Areas on Line Plots
#
# Documentation of [matplotlib.pyplot.fill_between](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.fill_between.html)
# ### Fill between zero and values
# +
plt.fill_between(years,
salary,
alpha=.4,
color="green",
edgecolor="black",
lw=3)
plt.xlabel("Years of Experience")
plt.ylabel("Salary")
plt.title("Salary and Years of Experience")
plt.savefig("../assets/matplotlib/filling_area.jpg")
# -
# ### Fill between values and assigned value
salary_mean = np.mean(salary)
salary_mean
# +
plt.fill_between(years,
salary,
salary_mean,
where=(salary > salary_mean),
alpha=.4,
color="green",
edgecolor="black",
interpolate=True,
label="On Average"
)
plt.fill_between(years,
salary,
salary_mean,
where=(salary <= salary_mean),
alpha=.4,
color="red",
edgecolor="black",
interpolate=True,
label="Under Average"
)
plt.title("Salary and Years of Experience")
plt.xlabel("Years of Experience")
plt.ylabel("Salary")
plt.legend()
plt.savefig("../assets/matplotlib/filling_area_with_mean.jpg")
# -
# # Reference
#
# - https://matplotlib.org/stable/gallery/lines_bars_and_markers/simple_plot.html
# - https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.fill_between.html
# - https://www.youtube.com/watch?v=x0Uguu7gqgk
| matplotlib/plot_and_filling_area.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import sqlite3
def load_day(day):
header = ['timestamp', 'line_id', 'direction', 'jrny_patt_id', 'time_frame', 'journey_id', 'operator',
'congestion', 'lon', 'lat', 'delay', 'block_id', 'vehicle_id', 'stop_id', 'at_stop']
types = {'timestamp': np.int64,
'journey_id': np.int32,
'congestion': np.int8,
'lon': np.float64,
'lat': np.float64,
'delay': np.int8,
'vehicle_id': np.int32,
'at_stop': np.int8}
file_name = 'data/siri.201301{0:02d}.csv'.format(day)
df = pd.read_csv(file_name, header=None, names=header, dtype=types, parse_dates=['time_frame'], infer_datetime_format=True)
null_replacements = {'line_id': 0, 'stop_id': 0}
df = df.fillna(value=null_replacements)
df['line_id'] = df['line_id'].astype(np.int32)
df['stop_id'] = df['stop_id'].astype(np.int32)
# df['timestamp'] = pd.to_datetime(df['timestamp'], unit='us')
return df
def haversine_np(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
All args must be of equal length.
Taken from here: https://stackoverflow.com/questions/29545704/fast-haversine-approximation-python-pandas#29546836
"""
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2
#c = 2 * np.arcsin(np.sqrt(a))
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))
meters = 6372000.0 * c
return meters
def calculate_durations(data_frame, vehicle_id):
one_second = np.timedelta64(1000000000, 'ns')
dv = data_frame[data_frame['vehicle_id']==vehicle_id]
ts = dv.timestamp.values
dtd = ts[1:] - ts[:-1]
dt = np.zeros(len(dtd) + 1)
dt[1:] = dtd / one_second
return dt
def calculate_distances(data_frame, vehicle_id):
dv = data_frame[data_frame['vehicle_id']==vehicle_id]
lat = dv.lat.values
lon = dv.lon.values
dxm = haversine_np(lon[1:], lat[1:], lon[:-1], lat[:-1])
dx = np.zeros(len(dxm) + 1)
dx[1:] = dxm
return dx
def filter_columns(df):
columns = ['timestamp', 'direction', 'journey_id', 'congestion', 'lon', 'lat', 'delay', 'vehicle_id', 'stop_id', 'at_stop']
return df[columns]
def create_table(conn):
c = conn.cursor()
c.execute("CREATE TABLE locations (id integer primary key, timestamp, direction, journey_id, congestion, lon, lat, vehicle_id, stop_id, at_stop)")
conn.commit()
def insert_row(conn, row):
c = conn.cursor()
parameters = (row['timestamp'], row['direction'], row['journey_id'], row['congestion'], row['lon'], row['lat'], row['vehicle_id'], row['stop_id'], row['at_stop'])
c.execute("insert into locations (timestamp, direction, journey_id, congestion, lon, lat, vehicle_id, stop_id, at_stop) values (?,?,?,?,?,?,?,?,?)", parameters)
conn.commit()
# +
conn = sqlite3.connect("data/dublin-bus.db")
create_table(conn)
conn.close()
# +
conn = sqlite3.connect("data/dublin-bus.db")
for d in range(31):
print("Day {0}".format(d+1))
day = filter_columns(load_day(d+1))
day['dt'] = 0.0
day['dx'] = 0.0
day['speed'] = 0.0
vehicles = day['vehicle_id'].unique()
for v in vehicles:
vehicle_selector = day['vehicle_id']==v
day.loc[vehicle_selector,'dt'] = calculate_durations(day, v)
day.loc[vehicle_selector,'dx'] = calculate_distances(day, v)
speed_selector = day['dt'] > 0
day.loc[speed_selector,'speed'] = day[speed_selector].dx / day[speed_selector].dt * 3.6
# Filter invalid points (speeds over 100 km/h)
day = day[day['speed'] < 100.0]
day.apply(lambda row: insert_row(conn, row))
conn.close()
# -
| convert-db.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Occupation
# ### Introduction:
#
# Special thanks to: https://github.com/justmarkham for sharing the dataset and materials.
#
# ### Step 1. Import the necessary libraries
import pandas as pd
from IPython.display import display
# ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user).
# ### Step 3. Assign it to a variable called users.
users = pd.read_csv("https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user", sep="|")
display(users.columns)
display(users.head())
display(users.info())
# ### Step 4. Discover what is the mean age per occupation
users.groupby("occupation").age.mean()
# ### Step 5. Discover the Male ratio per occupation and sort it from the most to the least
users.groupby("occupation").apply(lambda gg: pd.Series([len(gg[gg.gender == "M"]) / len(gg)], index=["male_ratio"])).sort_values("male_ratio",ascending=False)
# ### Step 6. For each occupation, calculate the minimum and maximum ages
users.groupby("occupation").age.agg(["min", "max"])
# ### Step 7. For each combination of occupation and gender, calculate the mean age
users.groupby(["occupation", "gender"]).age.mean()
# ### Step 8. For each occupation present the percentage of women and men
users.groupby("occupation").apply(lambda gg: pd.Series([len(gg[gg.gender == "M"]) / len(gg), len(gg[gg.gender=="F"])/len(gg)], index=["male_ratio", "female_ratio"]))
| other/pandas_exercises/03_Grouping/Occupation/Exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="http://imgur.com/1ZcRyrc.png" style="float: left; margin: 20px; height: 55px">
#
# ## Homework: Plotting With Pandas
#
# _Authors: <NAME> (L.A.)_
#
# ---
#
#
# Welcome!
# #### Pandas: Plotting Practice Problems
#
# In this homework, you're going to write code for a few problems.
#
# You'll practice the following programming concepts we've covered in class:
# * Plotting with Pandas.
# * Determining best plot given a data set.
# #### #1. Import Pandas, `Matplotlib.pyplot`, and NumPy. Don't forget the line that makes `matplotlib` render in a Jupyter Notebook!
# #### #2. Read in the NBA players `csv` into a variable called `nba_df`.
# This is a data set of NBA players from 2015. The filename is `NBA_players_2015.csv`.
# #### #3. Look at the first five rows of the data set.
# #### #4. Create a histogram of the `age` column.
# #### #5. Create a histogram of the `age` column, but change the number of bins to `20`.
# #### #6. Discuss the difference in the two plots and the implications.
# While skewed, the plot with fewer bins leads one to believe that the bin to the right of the highest-numbered age bin is the second largest. The second-largest bin occurs right after 22 and before 25.
# #### #7. Rename the `position` column `'pos'` with the following `C:5`, `G:1`, and `F:3`. Then create a scatter matrix plot with the `'pos'`, `'pts'`, `'age'`, and `'fg'` columns.
# #### #8. Plot the number of guards, centers, and forwards in this data set.
# The end!
# #### Great job!
| unit-6-pandas/instructor-resources/hw-10wk-13d/pandas_plotting_hw.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Languages/FrenchVerbCodingConjugation/French-Verb-Conjugation.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# + tags=["hide-input"] language="html"
#
# <script>
# function code_toggle() {
# if (code_shown){
# $('div.input').hide('500');
# $('#toggleButton').val('Show Code')
# } else {
# $('div.input').show('500');
# $('#toggleButton').val('Hide Code')
# }
# code_shown = !code_shown
# }
#
# $( document ).ready(function(){
# code_shown=false;
# $('div.input').hide()
# });
# </script>
# <p> Code is hidden for ease of viewing. Click the Show/Hide button to see. </>
# <form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form>
# + tags=["hide-input"]
import numpy as np
#import matplotlib.pyplot as plt
from IPython.display import display, Math, Latex, HTML, clear_output, Markdown, Javascript
import ipywidgets as widgets
from ipywidgets import interact, FloatSlider, IntSlider, interactive, Layout
from traitlets import traitlets
#module to conjugate
#import mlconjug
#from functools import partial
#import pickle
import plotly as py
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
# + [markdown] hidden=false
# # French Verb Conjugation
#
# ----
#
# ## Introduction
#
# In this Jupyter Notebook by Callysto you will learn about French verb conjugation. Mastering the basics of verb conjugation is essential to reading and writing in French. There are some basic rules (and exceptions) that we will address.
#
# Because much of conjugation is algorithmic, one can write computer code to do the task for us. If you are interested in the programming aspects, please see the related notebook [French-Verb-Coding](CC-186-French-Verb-Coding.ipynb).
#
# #### Necessary background
# - Some basic knowledge of French
# - Elementary Python syntax
#
# #### Outline of this notebook
#
# We will cover several important topics
# - a review of personal pronouns in French
# - two important verbs, Être and Avoir
# - the regular verbs, with endings "-er", "-ir" and "-re"
# - exceptions to the regular verbs
#
# #### Allons-y!
# + [markdown] hidden=false
# ## Personal pronouns
#
# Conjugation is the processing of force the verb in a sentence to "agree" with the subject of that sentence. Typically, the subject of a sentence is a pronoun, so to start conjugating verbs, we can review the personal pronouns in French.
#
# Below is table showing the subject pronouns in French. These will be used to separate the different cases of verb conjugation.
# + tags=["hide-input"]
#table for personal pronouns using plotly
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
english = ['I','you','she, he, one','we','you (plural or formal)','they']
person = ['First','Second','Third','First (plural)','Second (plural)','Third (plural)']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Person','French','English'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [person,french,english],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(
width=750,
height=450
)
# margin=go.layout.Margin(
# l=0,
# r=0,
# b=0,
# t=0,
# pad=0
# )
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
# + [markdown] hidden=false
# Our verb conjugation rules will be based on these personal pronouns, so it is good to get familiar with their translations. French makes a distinction between all of these different tense based on their person, whether or not they are masculine or feminine, and if they are plural or singular.
#
#
# ## Two Important Verbs
#
# Let's jump right to conjugating the two (arguably) most important verbs: To Be and To Have.
#
#
#
# ## 1. Être (to be)
# + tags=["hide-input"]
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
etre_conjug = ['suis','es','est','sommes','êtes','sont']
trace0 = go.Table(
columnorder = [1,2],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,etre_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(
width=500,
height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
# + [markdown] hidden=false
# To use these in a sentence, you could write something like:
# - Je suis un garçon.
# - Elle est une fille.
# - Nous sommes tous les humaines.
#
# Notice how in each sentence, the form of the verb changes to match subject pronoun.
#
# "Être" is an irregular verb, that does not obey a certain format, if you will, for conjugating verbs in the present tense. There many examples of exceptions, which we will explore further. But first, the next most important verb:
# + [markdown] hidden=false
# ## 2. Avoir (to have)
# + tags=["hide-input"]
french = ["j'",'tu','elle, il, on','nous','vous','elles, ils']
avoir_conjug = ['ai','as','a','avons','avez','ont']
trace0 = go.Table(
columnorder = [1,2],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,avoir_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
# + [markdown] hidden=false
# Notice for the first person singular we have *j'* instead of *je*, this is due to the fact that the verb starts a vowel. This rule is similar to using "a" and "an" in English.
# + [markdown] hidden=false
# ## The Regular Verbs
#
# There are three types of regular verbs, which are identified by their endings. They are:
# - the "-er" verbs, such as "parler" (to speak)
# - the "-ir" verbs, such as "finir" (to finish)
# - the "-re" verbs, such as "vendre" (to sell)
#
# Each of these three type has its own pattern for conjugation, which is shared by all other regular verbs of the same typs. Let's have a look at these.
#
# ## 1. The "-er" Regular Verbs
#
# There is a general rubric for conjugating verbs that end in **er** in the present tense.
#
# We will illustrate this with the verb "parler" (to speak). The stem of the verb parler is "parl-". We conjugate it by adding on the endings "e", "es", "e", "ons", "ez" "ent" for the corresponding pronouns, as follows:
# + tags=["hide-input"]
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
stem = ['parl-','parl-','parl-','parl-','parl-','parl-']
ending = ['e','es','e','ons','ez','ent']
parler_conjug = ['parle','parles','parle','parlons','parlez','parlent']
trace0 = go.Table(
columnorder = [1,2],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,parler_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
# -
# This can be taken as the general rule for conjugating **er** verbs in the present tense. All you need to do is find the stem of the verb, which was parl- in this case and then apply these endings to figure out how to conjugate the verb for every personal pronoun.
#
# For instance, try this yourself with the verb "changer" (to sing). The stem is "chant-", so what are the corresponding six conjucations, as in the table above?
#
# This pattern works for most "er" verbs, and there are hundreds of them. Some common ones are:
#
#
# - aimer (to like/love)
# - arriver (to arrive, to happen)
# - brosser (to brush)
# - chanter (to sing
# - chercher (to look for)
# - danser (to dance)
# - demander (to ask for)
# - détester (to hate)
# - donner (to give)
# - écouter (to listen to)
# - étudier (to study)
# - gagner (to win, to earn)
# - habiter (to live)
# - jouer (to play)
# - manquer (to miss)
# - marcher (to walk, to function)
# - parler (to talk, to speak)
# - penser (to think)
# - regarder (to watch, to look at)
# - travailler (to work)
# - trouver (to find)
# - visiter (to visit (a place)
#
# There are also many exception for hte **er** verbs, which we will discuss below.
# + [markdown] hidden=false
# ## 2. The "-ir" Regular Verbs
#
# There is a general rubric for conjugating verbs that end in **ir** in the present tense.
#
# We will illustrate this with the verb "finir" (to finish). The stem of the verb finit is "fin-". We conjugate it by adding on the endings "is", "is", "it", "issons", "issez" "issent" for the corresponding pronouns, as follows:
# + tags=["hide-input"]
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
finir_stem = ['fin-','fin-','fin-','fin-','fin-','fin-']
ir_ending = ['is','is','eit','issons','issez','issent']
finir_conjug = ['finis','finis','finit','finisson','finissez','finissent']
trace0 = go.Table(
columnorder = [1,2],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,finir_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
# + [markdown] hidden=false
# This can be taken as the general rule for conjugating **ir** verbs in the present tense. All you need to do is find the *stem* of the verb, which was fin- in this case and then apply these endings to figure out how to conjugate the verb for every personal pronoun.
#
# For instance, try this yourself with the verb "grandir" (to grow). The stem is "grand-", so what are the corresponding six conjucations, as in the table above?
#
# This pattern works for most "ir" verbs, and there are hundreds of them. Some common ones are:
#
# - applaudir (to applaud)
# - bâtir (to build)
# - choisir (to choose)
# - désobéir (to disobey)
# - finir (to finish)
# - grandir (to grow up)
# - grossir (to gain weight)
# - guérir (to heal, to get well)
# - maigrir (to lose weight)
# - obéir (to obey)
# - punir (to punish)
# - réfléchir (to think, to reflect)
# - remplir (to fill)
# - réussir (to succeed)
# - vieillir (to grow old)
#
# Again, though, there will be exceptions...
# + [markdown] hidden=false
# ## 3. The "-re" Regular Verbs
#
# There is a general rubric for conjugating verbs that end in **re** in the present tense.
#
# We will illustrate this with the verb "vendre" (to sell). The stem of the verb finit is "vend-". We conjugate it by adding on the endings "s", "s", "nothing", "ons", "ez" "ent" for the corresponding pronouns, as follows:
# + tags=["hide-input"]
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
vendre_stem = ['vend-','vend-','vend-','vend-','vend-','vend-']
re_ending = ['s','s','','ons','ez','ent']
vendre_conjug = ['vends','vends','vend','vendons','vendez','vendent']
trace0 = go.Table(
columnorder = [1,2],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,vendre_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
# + [markdown] hidden=false
# This can be taken as the general rule for conjugating **re** verbs in the present tense. All you need to do is find the *stem* of the verb, which was vend- in this case and then apply these endings to figure out how to conjugate the verb for every personal pronoun.
#
# For instance, try this yourself with the verb "grandir" (to grow). The stem is "grand-", so what are the corresponding six conjugations, as in the table above?
#
# This pattern works for most "re" verbs, and there are many of them. Some common ones are:
#
# attendre (to wait)
# défendre (to defend)
# descendre (to descend)
# entendre (to hear)
# étendre (to stretch)
# fondre (to melt)
# pendre (to hang, or suspend)
# perdre (to lose)
# prétendre (to claim)
# rendre (to give back, or return)
# répondre (to answer)
# vendre (to sell)
#
# Again, though, there will be exceptions...
# + [markdown] hidden=false
# ## 1. Exceptions to the regular er verbs
#
# French is filled with exceptions, which makes it a bit of a difficult language to master as one has to basically dedicate the exceptions to memory. An exception for a verb means that it is not (maybe just partially) conjugating using the endings given above. Most exceptions arise in an alteration of the stem of the verb.
#
# Thankfully there are not many exceptions for the **er** verbs. Here are three notable ones:
#
# ## 1a. The "-oyer" and "-uyer" exceptions:
#
# For verbs like "envoyer" (to send) or "ennuyer" (to annoy) the stem changes the "y" to an "i" for all pronouns except nous and vous:
# + tags=["hide-input"]
french = ["j'",'tu','elle, il, on','nous','vous','elles, ils']
envoyer_conjug = ['envoie', 'envoies','envoie','envoyons','envoyez','envoient']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,envoyer_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
# + [markdown] hidden=false
# ## 1b. The "e_er" or "é_er" exceptions:
#
# Verbs like "acheter" (to buy) or "préférer" (to prefer) also follow an exception rule. The accent aigue becomes an accent grave, that is, é becomes è, except in the nous and vous cases, where it does not change. Note this means the pronunciation of the letter changes as well.
# + tags=["hide-input"]
preferer_conjug = ['préfère','préfères','préfère','préférons','préférez','préfèrent']
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,preferer_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
# + [markdown] hidden=false
# ## 1c. The " –eler " and " -eter " exceptions:
#
# For verbs like "appeler" (to call) or "rejeter" (to reject) the letters "l"
# or "t" get doubled. Again, this does not hold for the nous and vous cases.
#
# + tags=["hide-input"]
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
appeler_conjug = ['appelle','appelles','appelle','appelons','appelez','appellent']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,appeler_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
# + [markdown] hidden=false
# It's important to be aware of these exceptions, as you will be able to identify patterns in verbs of these forms and the exceptions themselves, like how it doesn't apply for nous and vous. Knowledge of the exceptions is crucial to mastering the language!
# -
# ## 2. Exceptions to the regular ir verbs
#
# Unfortunately, with the **ir** verbs, there are many, many exceptions. Three important ones are as follows:
#
# ## 2a. Verbs like partir (to leave):
#
# For "partir" (to leave), the keep is to drop the "t" from the stem in the singular case, and add the endings "s", "s", "t". For the plural case, you keep the "t". The conjgations go like this:
# + tags=["hide-input"]
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
partir_conjug = ['pars','pars','part','partons','partez','partent']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,partir_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
# -
# Other irregular ir verbs like partir include
# - dormir (to sleep)
# - mentir (to lie)
# - partir (to leave)
# - sentir (to feel)
# - servir (to serve)
# - sortir (to go out)
#
# ## 2b. Verbs that end in -llir, -frir, or -vrir
#
# Curiously, these verbs conjugate like an "er" verb. Just take the stem and add the endings "e", "es", "s", "ons", "ez", "emt." For instance, here is the conjugation for ouvrir (to open):
#
# + tags=["hide-input"]
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
ouvrir_conjug = ['ouvre','ouvres','ouvre','ouvrons','ouvrez','ouvrent']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,ouvrir_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
# -
# Other ir verbs that follow this pattern include:
# - couvrir (to cover)
# - cueillir (to pick)
# - offrir (to offer)
# - ouvrir (to open)
# - souffrir (to suffer)
#
# ## 2c. Verbs that end in -enir
#
# These ones all follow a similar pattern. The stem changes in the singular cases and the endings are just like the first irregular ir case (like partir). Here is the conjugation for tenir (to hold):
# + tags=["hide-input"]
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
tenir_conjug = ['tiens','tiens','tient','tenons','tenez','tenent']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,tenir_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
# -
# Other verbs in this irregular category include:
# - appartenir (to belong)
# - contenir (to contain)
# - convenir (to suit)
# - devenir (to become)
# - maintenir (to maintain)
# - obtenir (to obtain)
# - parvenir (to reach, or achieve)
# - prévenir (to warn, or prevent)
# - retenir (to retain)
# - revenir (to come back)
# - soutenir (to support)
# - (se) souvenir (to remember)
# - tenir (to hold)
# - venir (to come)
# ## 2d. Other very irregular ir verbs
#
# There are a dozen or so irregular ir verbs that don't fit any pattern. These include many that end in oir, as well as other like acquérir, asseoir, avoir, courir, devoir, falloir, mourir, pleuvoir, pouvoir, recevoir, savoir, servir, valoir, voir. You just have to learn these conjugations individually.
#
#
#
# ## 3. Exceptions to the re verbs
#
# As with the other two regular classes, the **re** verbs also have several exceptions. In all cases, the changes involve adding or dropping a consonant in the stem, and possibly adjusting the endings. A quick summary is to say that the unusual changes have to do with making the spelling match the prononciation of the verb forms. In some sense, it is easier to learn what the verbs sound like, and then spell them to match.
#
# There are four basic exceptions, as follows:
#
# ## 3a. The verb prendre (to take) and its relatives
#
# Here, you just drop the "d" from the stem in the plural form, and add an extra "n" in the last case:
#
# + tags=["hide-input"]
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
prendre_conjug = ['prends','prends','prend','prenons','prenez','prennent']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,prendre_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
# -
# ## 3b. The verbs battre (to fight) and mettre (to put)
#
# Here, you just drop one "t" from the stem in the singular form:
#
# + tags=["hide-input"]
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
battre_conjug = ['bats','bats','bat','battons','battez','battent']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,battre_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
# -
# ## 3c. The verbs rompre (to break) and its relatives
# This one is such a tiny exception: an extra t in the third person singular:
#
# + tags=["hide-input"]
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
rompre_conjug = ['romps','romps','rompt','rompons','rompez','rompent']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,rompre_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
# -
# ## 3d. Finally, Verbs Ending in –aindre, –eindre, and –oindre
#
# In this case, the dre/tre is dropped to form the stem, and in the plural cases, the letter g is inserted. Again, this is to get the prononciation to match the spelling.
# + tags=["hide-input"]
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
craindre_conjug = ['crains','crains','craint','craignons','craignez','craignent']
joindre_conjug = ['joins','joins','joint','joignon','joignez','joignent']
peintre_conjug = ['peins','peins','peint','peignons','peignez','peignent']
trace0 = go.Table(
columnorder = [1,2,3,4,5],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Craindre','Joindre','Peintre'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center','center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,craindre_conjug,joindre_conjug,peintre_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center','center','center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
# + [markdown] hidden=false
# ## Coding Examples
#
# ---
#
# How could one write code to see if someone conjugated a verb correctly? If you are interested in the programming aspects, please see the related notebook [French-Verb-Coding](CC-186-French-Verb-Coding.ipynb).
#
#
# + hidden=false
#perhaps show how this work for a different verb and subject.
# + hidden=false
#manipulate this code for 'ir' verbs or try to write your own code to handle the exceptions above.
#remember to use the list user_answer for the user_inputs and don't forget to enter some inputs yourself ;)
# user_answer = [je.value,tu.value,elle.value,nous.value,vous.value,elles.value]
# french = ['je','tu','elle/il/on','nous','vous','elles/ils']
# endings = ['e','es','e','ons','ez','ent']
# for i in range(0,len(endings)):
# n = len(endings[i])
# #feel free to change what happens if they get it right or wrong.
# if user_answer[i] != '': #So that it doesn't print if nothing has been entered
# if user_answer[i][-n:] != endings[i]:
# print('The conjugation for "'+french[i]+'" is incorrect')
# if user_answer[i][-n:] == endings[i]:
# print('The conjugation for "'+french[i]+'" is correct!')
# + [markdown] hidden=false
# ---
# ## Conclusion
#
# In this Jupyter Notebook by Callysto you learned the basics of French verb conjugation in the present tense. In a related noteboo, we see we can expose the structure of the French verb conjugation rules to compose a program that checks if a user input the correct answers to conjugate a verb in the present tense. This is somewhat of a hallmark of coding. Taking some sort of structure of the problem at hand and exposing in the form of generalizable and applicable written code. Breaking down problems in this fashion is essential to computational thinking.
#
# Je te remercie pour avoir essayer les exercises donner.
# -
# [](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| _build/html/_sources/curriculum-notebooks/Languages/FrenchVerbCodingConjugation/french-verb-conjugation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
df = pd.read_csv('../fact/words.csv')
# -
df.sort_values('latio_pos', ascending=False).head(50)
df[df.term == 'save']
from nltk.corpus import wordnet
syns = wordnet.synsets("carried")
print(syns)
df_biterms = pd.read_csv('../fact/biterms.csv')
df_biterms.sort_values('latio_pos', ascending=False).head(50)
| notebook/adhoc_synonyms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split,GridSearchCV,RandomizedSearchCV
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model import LogisticRegression
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import pickle
# %matplotlib inline
df=pd.read_csv('loanstat.csv')
df.head()
for col in df.select_dtypes(exclude='object').columns:
if df[col].isnull().any():
df[col].fillna(df[col].median(),inplace=True)
for col in df.columns:
if df[col].isnull().any():
if df[col].dtype == 'object':
df[col].fillna(df[col].mode()[0],inplace=True)
df.columns.isnull().sum()
df.loc[df['Dependents']=='3+','Dependents']='3'
T=df.drop("Loan_ID",axis=1)
T.loc[T['Loan_Status']=='Y','Loan_Status']=1
T.loc[T['Loan_Status']=='N','Loan_Status']=0
T['Loan_Status']=T['Loan_Status'].astype(int)
T['Total']=T['ApplicantIncome']+T['CoapplicantIncome']
T['Debt/Income']=(T['LoanAmount']*1000)/T['Total']
T['EMI']=((T['LoanAmount']*(0.09)*((1.09)**T['Loan_Amount_Term']))/((1.09)**(T['Loan_Amount_Term']-1)))
T.drop(['LoanAmount','Loan_Amount_Term','ApplicantIncome','CoapplicantIncome'],axis=1,inplace=True)
T['Dependents']=T['Dependents'].astype(int)
T
lis=[]
for col in T.columns:
if T[col].dtype=='object':
lis.append(col)
one= OneHotEncoder(handle_unknown='ignore',sparse=False)
K=pd.DataFrame(one.fit_transform(T[lis]))
K.index=T.index
L=T.drop(lis,axis=1)
M=pd.concat([K,L],axis=1)
Xt=M.drop(['Loan_Status'],axis=1)
yt=M['Loan_Status']
X_train,X_test,y_train,y_test=train_test_split(Xt,yt,train_size=0.7,test_size=0.3,random_state=1)
print(X_train.shape)
print(y_train.shape)
X_train['Total_log']=np.log(X_train['Total'])
X_train.drop(['Total'],axis=1,inplace=True)
X_test['Total_log']=np.log(X_test['Total'])
X_test.drop(['Total'],axis=1,inplace=True)
X_train
X_test
parame={'penalty':('l1','l2',None), 'C':(0.001,0.01,0.1,1,10,100,1000)}
ld=LogisticRegression(max_iter=1000)
gd=GridSearchCV(ld,param_grid=parame,cv=5,n_jobs=3)
gd.fit(X_train,y_train)
gp=gd.best_estimator_
gpp=gp.predict(X_test)
print(accuracy_score(gpp,y_test))
print(gd.best_params_)
param={'min_samples_split':np.arange(2,20), 'max_depth': np.arange(2,20),'n_estimators':np.arange(100,900,100)}
rd=RandomForestClassifier()
fd=GridSearchCV(rd,param_grid=param,cv=5,n_jobs=3)
fd.fit(X_train,y_train)
fp=fd.best_estimator_
fpp=fp.predict(X_test)
print(accuracy_score(fpp,y_test))
print(fd.best_params_)
para={'min_samples_split':np.arange(2,26), 'max_depth': np.arange(2,30),'max_leaf_nodes':np.arange(2,50), 'criterion':['gini', 'entropy']}
cd=DecisionTreeClassifier()
md=GridSearchCV(cd,param_grid=para,cv=5,n_jobs=3)
md.fit(X_train,y_train)
pp=md.best_estimator_
ppp=pp.predict(X_test)
print(accuracy_score(ppp,y_test))
print(md.best_params_)
| loancomp-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import netCDF4
import math
import xarray as xr
import dask
import numpy as np
import time
import scipy
import matplotlib.pyplot as plt
from matplotlib import animation
from matplotlib import transforms
from matplotlib.animation import PillowWriter
path_to_file = '/DFS-L/DATA/pritchard/gmooers/Workflow/MAPS/SPCAM/Small_Sample/New_SPCAM5/TimestepOutput_Neuralnet_SPCAM_216/run/Cpac_gridcell_rcat.nc'
test_ds = xr.open_dataset(path_to_file)
#test_ds.variables
T = np.squeeze(test_ds.T.values)
P = np.squeeze(test_ds.lev.values)
Q = test_ds.QBP.values
# Frozen moist static energy
#
# $FSME = \int_0^{P_s}c_p*T+g*z+L_v*q-L_f*q_{ice}$
#
# Did not create output in SPCAM for this var - will maybe add to next run?
# Potential Temperature, $\theta$
#
# $\theta = T*(\frac{p_0}{p})^{\frac{R}{c_p}}$
# +
def theta_gen(t_array, p_array):
theta_array = t_array
for i in range(len(p_array)):
theta_array[:,i] = t_array[:,i]*(1013.25/p_array[i])**(287.0/1004.0)
return theta_array
# -
theta = theta_gen(T, P)
def plotting(datas, varname, title, levels):
plt.plot(datas, levels, linewidth = 4)
plt.ylabel('Pressure Level', fontsize = 20)
plt.xlabel(varname, fontsize = 20)
plt.gca().invert_yaxis()
plt.title('Snapshot of '+title+' location')
var = 'Potential Temperature (K)'
location = 'surface'
plotting(theta[0, :], var, location, P)
# Equivelent Potential Temperature, $\theta_e$
#
# $\theta_e = \theta e^{\frac{L*q}{c_p*T}}$
# +
def theta_e_gen(t_array, q_array, p_array):
theta_e_array = t_array
theta_array = theta_gen(t_array, p_array)
for i in range(len(theta_e_array)):
for j in range(len(theta_e_array[i])):
theta_e_array[i, j] = theta_array[i,j]*math.exp((2501000.0*q_array[i,j])/(1004.0*t_array[i,j]))
return theta_e_array
# -
theta_e = theta_e_gen(T, Q, P)
var = 'Equivelent Potential Temperature (K)'
location = 'surface'
plotting(theta_e[0, :], var, location, P)
# Integrated Sensible Heat $\frac{w}{m^2}$
#
# $SH = \int_0^{P_s} \frac{dp}{g}*c_p*T$
# Not entirely sure if I am using Scipy's built in trapz function correctly, so for now, I will code a function for a numerical implementation of integration via trapziodal rule:
#
# $SH = \frac{cp}{g}\sum_{p=0}^{P_s}\frac{T_i+T_{i+1}}{2}*\delta p_i$
ps = test_ds.PS.values
levs = np.squeeze(test_ds.lev.values)
hyai = test_ds.hyai.values
hybi = test_ds.hybi.values
g = 9.81
cp = 1004.0
PS = 1e5
P0 = 1e5
P = P0*hyai+PS*hybi # Total pressure [Pa]
dp = P[1:]-P[:-1] # Differential pressure [Pa]
#convert from k/s to w/m^2
def vert_integral(values, diffs):
integrated = np.zeros(shape=len(values)-1)
integrated[:] = np.nan
integrate = 0
for i in range(len(values)):
for j in range(len(values[i])-1):
integrate += 0.5*(values[i,j]+values[i, j+1])*diffs[j]*1004.0/9.81
integrated[i] = integrate
integrate = 0
return integrated
# Integrated Latent Heat $\frac{w}{m^2}$
#
# $LH = \int_0^{P_s} \frac{dp}{g}*L_v*q$
# Mass Weighted Integral w
#
# $W = \int_0^{P_s}dpw$
W = np.squeeze(test_ds.CRM_W.values)
print(W.shape)
# Integrated Vertical KE
#
# $IVKE = \int_0^{P_s} \frac{dP}{g}*\rho *\frac{w^2}{2}$
#
#
| MAPS/other/SPCAM5_Other_Visualizations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <span style="color:#336699">Trabalho Final - Introdução a Programação</span>
# <hr style="border:2px solid #0077b9;">
#
# ## Cálculo do Balanço de Radiação - Landsat 5##
# Em Sensoriamento Remoto, o cálculo do balanço de radiação permite que o usuário possa gerar conversões radiométricas das imagens inicialmente em números digitais, bem como entender as influências atmosféricas exercidas, como atenuação da radiação eletromagnética (REM) por meio de espalhamento e absorção. São diversos os parâmetros que devem ser considerados para esse tipo de processamento, portanto automatizar esse processo torna-se uma grande contribuição, visto que a probabilidade de erros é reduzida já que o usuário não precisa refazer o procedimento passo a passo, principalmente se a pesquisa for embasada em múltiplas imagens. Dessa forma, o objetivo desse trabalho é facilitar esse tipo de processamento ao usuário, o qual deverá inserir apenas informações encontradas nos metadados da imagem ou de características gerais da área de estudo. Este trabalho é baseado na apostila do Prof.Dr. <NAME> da UFCG (Universidade Federal de Campina Grande), que realiza o cálculo de balanço da radiação com imagens do sensor TM - Landsat 5 por meio do *Model Maker* do *software* Erdas.
#
# O sensor TM - Landsat 5 mede a radiância espectral dos alvos e a armazena na forma de níveis de cinza, ou intensidade do pixel, ou ainda número digital (ND), cujos valores variam de 0 a 255 (8 bits), tendo uma resolução espacial de 30 m nas bandas 1, 2, 3, 4, 5 e 7, e de 120 m na banda 6 (banda termal, que possibilita obter a temperatura da superfície). Informações sobre essas bandas estão inclusas na Figura 1. Também estão incluídos os valores da irradiância solar monocromática (Kλ,b) das bandas reflectivas (bandas 1-5 e 7), que correspondem à radiação solar incidente sobre uma superfície normal à direção dos raios solares no topo da atmosfera terrestre, à distância de uma Unidade Astronômica (1 UA = 149,5 milhões de km) do Sol.
#
# 
# *Figura 1. Descrição das bandas do Mapeador Temático (TM) do Landsat 5, com os correspondentes intervalos de comprimento de onda, coeficientes de calibração (radiância mínima – a e máxima – b para o período: antes de maio/2003 (b1), maio/2003 a abril/2007 (b2), após abril 2007(b3)) e irradiâncias espectrais no topo da atmosfera (TOA) (Chander et al., 2009).*
#
# O trabalho proposto depende de um pré processamento em que essas imagens em números digitais serão convertidas para valores de radiância e posteriormente para valores de reflectância. Com os valores de reflectância é possível a aplicação dos índices espectrais (NDVI, SAVI e IAF) e o cálculo do albedo planetário. Com a imagem termal em valores de radiância, calcula-se a emissividade e temperatura de superfície terrestre. Por fim, é possível o cômputo das radiações solar, atmosférica e de superfície para a determinação do saldo final de radiação (Figura 2).
#
# 
# *Figura 2. Diagrama ilustrativo das etapas destinadas à obtenção do saldo de radiação à superfície.*
# ### Início do programa: importação das bibliotecas e da imagem do Landsat 5
#Códigos
from osgeo import gdal
from gdalconst import *
import numpy as np
from matplotlib import pyplot as plt
import math
import datetime as dt
from osgeo import osr
# +
b1 = "C:/ser347/imagens_land5/LANDSAT_5_TM_20050222_220_075_L2_BAND1.tif"
b2 = "C:/ser347/imagens_land5/LANDSAT_5_TM_20050222_220_075_L2_BAND2.tif"
b3 = "C:/ser347/imagens_land5/LANDSAT_5_TM_20050222_220_075_L2_BAND3.tif"
b4 = "C:/ser347/imagens_land5/LANDSAT_5_TM_20050222_220_075_L2_BAND4.tif"
b5 = "C:/ser347/imagens_land5/LANDSAT_5_TM_20050222_220_075_L2_BAND5.tif"
b6 = "C:/ser347/imagens_land5/LANDSAT_5_TM_20050222_220_075_L2_BAND6_reamostragem_2_tif.tif"
b7 = "C:/ser347/imagens_land5/LANDSAT_5_TM_20050222_220_075_L2_BAND7.tif"
try:
dataset_b1 = gdal.Open(b1, GA_ReadOnly)
dataset_b2 = gdal.Open(b2, GA_ReadOnly)
dataset_b3 = gdal.Open(b3, GA_ReadOnly)
dataset_b4 = gdal.Open(b4, GA_ReadOnly)
dataset_b5 = gdal.Open(b5, GA_ReadOnly)
dataset_b6 = gdal.Open(b6, GA_ReadOnly)
dataset_b7 = gdal.Open(b7, GA_ReadOnly)
print (dataset_b1.GetGeoTransform(), dataset_b2.GetGeoTransform(), dataset_b3.GetGeoTransform(),
dataset_b4.GetGeoTransform(), dataset_b5.GetGeoTransform(), dataset_b6.GetGeoTransform(),
dataset_b7.GetGeoTransform())
except:
print ("Erro na abertura de algum arquivo!")
banda1 = dataset_b1.GetRasterBand(1)
banda2 = dataset_b2.GetRasterBand(1)
banda3 = dataset_b3.GetRasterBand(1)
banda4 = dataset_b4.GetRasterBand(1)
banda5 = dataset_b5.GetRasterBand(1)
banda6 = dataset_b6.GetRasterBand(1)
banda7 = dataset_b7.GetRasterBand(1)
numpy_banda1 = banda1.ReadAsArray()
numpy_banda2 = banda2.ReadAsArray()
numpy_banda3 = banda3.ReadAsArray()
numpy_banda4 = banda4.ReadAsArray()
numpy_banda5 = banda5.ReadAsArray()
numpy_banda6 = banda6.ReadAsArray()
numpy_banda7 = banda7.ReadAsArray()
# -
def salvar_banda(matriz_de_pixels, nome_do_arquivo, dataset_de_referencia):
# obter metadados
linhas = dataset_de_referencia.RasterYSize
colunas = dataset_de_referencia.RasterXSize
bandas = 1
# definir driver
driver = gdal.GetDriverByName('GTiff')
# copiar tipo de dados da banda já existente
#data_type = dataset_de_referencia.GetRasterBand(1).DataType
data_type = gdal.GDT_Float64
# criar novo dataset
dataset_output = driver.Create(nome_do_arquivo, colunas, linhas, bandas, data_type)
# copiar informações espaciais da banda já existente
dataset_output.SetGeoTransform(dataset_de_referencia.GetGeoTransform())
# copiar informações de projeção
dataset_output.SetProjection(dataset_de_referencia.GetProjectionRef())
# escrever dados da matriz NumPy na banda
dataset_output.GetRasterBand(1).WriteArray(matriz_de_pixels)
# salvar valores
dataset_output.FlushCache()
# fechar dataset
dataset_output = None
# ### Calibração Radiométrica
#
# Na calibração radiométrica os números digitais (ND) de cada pixel da imagem são convertidos em radiância espectral. Essas radiâncias representam a energia solar refletida por cada pixel, por unidade de área, de tempo, de ângulo sólido e de comprimento de onda, medidas ao nível do satélite Landsat (705 Km de altitude), para as bandas 1-5 e 7. Já para a banda 6, essa radiância representa a energia eletromagnética emitida por cada pixel. A calibração é efetivada segundo a seguinte equação (Chander et al., 2009):
#
# $$L_λ= a + \left(\dfrac{b - a} {255}\right) * (ND) $$
#
# ***Onde:***
#
# ***a, b:*** Coeficientes de Calibração;
#
# ***ND:*** Números Digitais;
# +
#Códigos
a = [-1.52, -2.84, -1.17, -1.51, -0.37, 1.2378, -0.15]
b = [193, 365, 264, 221, 30.2, 15.303, 16.5]
radiancia_b1 = (a[0] + (((b[0] - a[0]) / 255) * numpy_banda1.astype(float)))
radiancia_b2 = (a[1] + (((b[1] - a[1]) / 255) * numpy_banda2.astype(float)))
radiancia_b3 = (a[2] + (((b[2] - a[2]) / 255) * numpy_banda3.astype(float)))
radiancia_b4 = (a[3] + (((b[3] - a[3]) / 255) * numpy_banda4.astype(float)))
radiancia_b5 = (a[4] + (((b[4] - a[4]) / 255) * numpy_banda5.astype(float)))
radiancia_b6 = (a[5] + (((b[5] - a[5]) / 255) * numpy_banda6.astype(float)))
radiancia_b7 = (a[6] + (((b[6] - a[6]) / 255) * numpy_banda7.astype(float)))
# -
# ### Conversão - Reflectância
#
# Representa o cômputo da reflectância monocromática de cada banda, definida como sendo a razão entre o fluxo da radiação solar refletido e o fluxo da radiação solar incidente, que é obtida segundo a equação:
#
# $$ ρ_λ = \dfrac{π * L_λ} {K * cos Z* d_r} $$
#
# ***Onde:***
#
# $L_λ$**:** Radiância de cada banda;
#
# ***K:*** Irradiância solar espectral de cada banda no topo da atmosfera;
#
# ***Z:*** Ângulo zenital;
#
# $d_r$**:** Fator de variação da distância Sol-Terra;
#
# ---
#
# O $d_r$ é o quadrado da razão entre a distância média Terra-Sol ($r_o$) e a distância Terra-Sol (r) em dado dia do ano (DSA), que de acordo com Iqbal (1983), é dado por:
#
# $$ d_r = 1 + 0,033*cos \left(\dfrac{DSA * 2π} {365}\right) $$
#
# ***Onde:***
#
# ***DSA:*** Dia do Ano Juliano;
elevacao = float(input("Digite o ângulo de elevação: "))
# +
grau = math.radians(elevacao)
cosz = math.sin(grau)
print("Cosseno do Ângulo Zenital:", cosz)
# -
dia = int(input("Dia:"))
mes = int(input("Mês:"))
ano = int(input("Ano:"))
data = dt.date(ano, mes, dia)
data.timetuple()
doa = data.timetuple().tm_yday
print("Dia Juliano:",doa)
# +
dr = 1 + 0.033 * math.cos(doa * 2 * math.pi / 365 )
print("dr:", dr)
# -
k = [1957,1796,1536,1031,220,83.44]
r_b1 = (math.pi * radiancia_b1) / (k[0] * cosz * dr)
r_b2 = (math.pi * radiancia_b2) / (k[1] * cosz * dr)
r_b3 = (math.pi * radiancia_b3) / (k[2] * cosz * dr)
r_b4 = (math.pi * radiancia_b4) / (k[3] * cosz * dr)
r_b5 = (math.pi * radiancia_b5) / (k[4] * cosz * dr)
r_b7 = (math.pi * radiancia_b7) / (k[5] * cosz * dr)
# ### Albedo Planetário
#
# O albedo planetário é obtido no topo da atmosfera do nosso planeta, ou seja, o albedo da superfície sem correção atmosférica. Portanto, o albedo planetário é obtido pela combinação linear das reflectâncias monocromáticas:
#
# $$ α_{toa} = 0,293ρ_1 + 0,274ρ_2 + 0,233ρ_3 + 0,157ρ_4 + 0,033ρ_5 + 0,011ρ_7 $$
#
# ***Onde:***
#
# $ ρ_1 , ρ_2 , ρ_3 , ρ_4 , ρ_5 , ρ_7 $**:** Albedo Planetário das bandas 1-5, 7;
#
# ---
#
# ### Albedo da Superfície
#
# O albedo com os efeitos atmosféricos corrigidos pode ser feito mediante a equação:
#
# $$ α = \dfrac{α_{toa} - α_p} {Ƭ_{sw}^2} $$
#
# ***Onde:***
#
# $α_{toa}$**:** Albedo Planetário;
#
# $α_p$**:** Reflectância da atmosfera;
#
# $Ƭ_{sw}$**:** Transmissividade atmosférica;
#
# ---
#
# ### Transmissividade Atmosférica
#
# A transmissividade atmosférica, para condições de céu claro, pode ser obtida por (Allen et al., 2002):
#
# $$ Ƭ_{sw} = 0,75 + 2*10^{-5} * h_{alt} $$
#
# ***Onde:***
#
# $h_{alt}$ **:** Altitude de cada pixel;
#
atoa=(0.293*r_b1)+(0.274*r_b2)+(0.233*r_b3)+(0.157*r_b4)+(0.033*r_b5)+(0.011*r_b7)
# +
#Transmitância:
alt=int(input("Digite a altitude(metros): "))
t=0.75+((2 * 10**-5)*alt)
#Albedo:
alb=(atoa-0.03)/(t**2)
nome_do_arquivo = "C:/ser347/imagens_land5/imagens_teste/albsup.tif"
salvar_banda(alb, nome_do_arquivo, dataset_b1)
# -
# ### Índices Espectrais (NDVI, SAVI, IAF):
#
# #### NDVI:
# O Índice de Vegetação da Diferença Normalizada (*Normalized Difference Vegetation Index* - NDVI) é obtido através da razão entre a diferença das refletividades do IV-próximo e do vermelho, pela soma das mesmas:
#
# $$ NDVI = \dfrac{ρ_{IV} - ρ_V} {ρ_{IV} + ρ_V} $$
#
# ***Onde:***
#
# $ρ_{IV}$ **:** Reflectância da banda do infravermelho;
#
# $ρ_V$ **:** Reflectância da banda do vermelho;
#
# ---
#
# O NDVI é um indicador sensível da quantidade e da condição da vegetação verde. Seus valores variam de –1 a +1 e para superfícies com alguma vegetação o NDVI varia de 0 e 1, já para a água e nuvens o NDVI geralmente é menor que zero.
# +
#Códigos
ndvi = (r_b4 - r_b3) / (r_b4 + r_b3)
ndvi
nome_do_arquivo_ndvi = "C:/ser347/imagens_land5/imagens_teste/ndvi.tif"
salvar_banda(ndvi, nome_do_arquivo_ndvi, dataset_b1)
# -
# #### SAVI:
#
# Para o cálculo do Índice de Vegetação Ajustado para os Efeitos do Solo (*Soil Adjusted Vegetation Index* - SAVI) que é um índice que busca amenizar os efeitos do *background* do solo, temsido utilizada a expressão (Huete, 1988):
#
# $$ SAVI = \dfrac{(1 + F_s) *(ρ_{IV} - ρ_V)} {(F_s + ρ_{IV} + ρ_V)} $$
#
# ***Onde:***
#
# $F_s$ **:** Fator do tipo de solo;
#
# $ρ_{IV}$ **:** Reflectância da banda do infravermelho;
#
# $ρ_V$ **:** Reflectância da banda do vermelho;
#
# ---
# O fator $F_s$ é uma função do tipo de solo. Em estudo recente, utilizamos $F_s$ = 0,1.
# +
#Códigos
savi = ((1.1) * (r_b4 - r_b3)) / (0.1 + r_b4 + r_b3)
nome_do_arquivo_savi = "C:/ser347/imagens_land5/imagens_teste/savi.tif"
salvar_banda(savi, nome_do_arquivo_savi, dataset_b1)
# -
# #### IAF:
#
# O Índice de Área Foliar (IAF) é definido pela razão entre a área foliar de toda a vegetação por unidade de área utilizada por essa vegetação. O IAF é um indicador da biomassa de cada pixel da imagem e o mesmo foi computado pela seguinte equação empírica obtida por Allen et al. (2002):
#
# $$ IAF = - \dfrac{ln\left(\dfrac{0,69 - SAVI} {0,59}\right)} {0,91} $$
#
# +
#Códigos
teste_savi = np.where(savi>0.69, 0.689, savi)
iaf = -(np.log((0.69-teste_savi)/0.59)/0.91)
nome_do_arquivo_iaf = "C:/ser347/imagens_land5/imagens_teste/iaf.tif"
salvar_banda(iaf, nome_do_arquivo_iaf, dataset_b1)
# -
# ### Emissividades
#
# Para a obtenção da temperatura da superfície, é utilizada a equação de Planck invertida, válida para um corpo negro. Como cada pixel não emite radiação eletromagnética como um corpo negro, há a necessidade de introduzir a emissividade de cada pixel no domínio espectral da banda termal $ε_{NB}$ , qual seja: 10,4 – 12,5 μm. Por sua vez, quando do cômputo da radiação de onda longa emitida por cada pixel, há de ser considerada a emissividade no domínio da banda larga $ε_0$ (5 – 100 μm). Segundo Allen et al. (2002), as emissividades podem ser obtidas, para NDVI > 0 e IAF < 3, segundo:
#
# $$ ε_{NB} = 0,97 + 0,0033 * IAF $$
#
# $$ ε_0 = 0,95 + 0,01 * IAF $$
#
# ***Onde:***
#
# $ε_{NB}$ **:** Emissividade de cada pixel na banda Termal;
#
# $ε_0$ **:** Emissividade no domínio da banda larga;
# +
#Códigos
enb = 0.97 + 0.0033 * iaf
eo = 0.95 + 0.01 * iaf
# -
# ### Temperatura de Superfície
#
# Para a obtenção da temperatura da superfície ($T_s$) são utilizadas a radiância espectral da banda termal e a emissividade obtida na etapa anterior. Dessa forma, obtém-se a temperatura da superfície (K) pela seguinte expressão:
#
# $$ T_s = \dfrac{K_2} {ln\left(\dfrac{ε_{NB} * K_1} {L_{λ6}}+1\right)} $$
#
# ***Onde:***
#
# ***K1, K2*** **:** Parâmetros de calibração para a banda Termal, obtidas no metadado;
#
# $L_{λ6}$**:** Radiância da banda Termal;
#
# $ε_{NB}$ **:** Emissividade de cada pixel na banda Termal;
# +
#Códigos
kt1 = 607.76
kt2 = 1260.56
ts = kt2 / np.log(((enb*kt1)/radiancia_b6)+1)
nome_do_arquivo_ts = "C:/ser347/imagens_land5/imagens_teste/temp_sup.tif"
salvar_banda(ts, nome_do_arquivo_ts, dataset_b1)
# -
# ### Radiação de Onda Longa Emitida
#
# A radiação de onda longa emitida pela superfície $R_{ol,emi}$ (W/m²) é obtida através da equação de Stefan-Boltzman:
#
# $$ R_{ol,emi} = ε_0 * σ * T_S ^4 $$
#
# ***Onde:***
#
# $T_S$ **:** Temperatura de Superfície;
#
# ***σ :*** Constante de Stefan-Boltzman;
#
# $ε_0$ **:** Emissividade no domínio da banda larga;
#
#Códigos
boltzman = 5.67 * 10 ** -8
Rol = eo * boltzman * (ts**4)
nome_do_arquivo_Rol = "C:/ser347/imagens_land5/imagens_teste/Rol.tif"
salvar_banda(Rol, nome_do_arquivo_Rol, dataset_b1)
# ### Radiação de Onda Curta Incidente
#
# A radiação de onda curta incidente $R_{sol,inc}$ (W/m²) é o fluxo de radiação solar direta e difusa que atinge a superfície terrestre, que para condição de céu claro é dada pela seguinte expressão (Allen et al., 2002):
#
# $$ R_{sol,inc} = S * cosZ * d_r * Ƭ_{sw} $$
#
# ***Onde:***
#
# ***S :*** Constante Solar;
#
# ***Z :*** Ângulo zenital;
#
# $Ƭ_{sw}$**:** Transmissividade atmosférica;
#
# $d_r$**:** Fator de variação da distância Sol-Terra;
# +
#Códigos
S = 1367
Rsol = S * cosz * dr * t
# -
# ### Radiação de onda longa incidente
#
# A radiação de onda longa incidente emitida pela atmosfera na direção da superfície $R_{ol,atm}$ (W/m²), pode ser computada pela equação de Stefan-Boltzmann:
#
# $$ R_{ol,atm} = ε_a * σ * T_{ar} ^4 $$
#
# ***Onde:***
#
# $ε_a$**:** Emissividade da atmosfera;
#
# $T_{ar}$**:** Temperatura do ar;
#
# ***σ :*** Constante de Stefan-Boltzman;
#
#Códigos
ta=float(input("Digite a temperatura média do ar: "))
ea = 0.85 * ((-math.log(t))**0.09)
Rolatm = ea * boltzman * ((ta+273.15)**4)
# ### Saldo de radiação
#
# O saldo de radiação à superfície $R_n$ (W/m²) é computado utilizando-se a seguinte equação do balanço de radiação à superfície:
#
# $$ R_n = R_{sol,inc}(1 - α_{sup}) - R_{ol,emi} + R_{ol,atm} - (1 - ε_0)R_{ol,atm} $$
#
# ***Onde:***
#
# $α_{sup}$**:** Albedo de cada pixel corrigido;
#
# $ε_0$ **:** Emissividade no domínio da banda larga;
#
# $R_{sol,inc}$**:** Radiação de Onda Curta incidente;
#
# $R_{ol,emi}$**:** Radiação de Onda Longa emitida;
#
# $R_{ol,atm}$**:** Radiação de Onda Longa incidente;
#
# +
#Códigos
Rn = (Rsol * (1 - alb)) - Rol + Rolatm - ((1-eo)*Rolatm)
nome_do_arquivo_rn = "C:/ser347/imagens_land5/imagens_teste/rn.tif"
salvar_banda(Rn, nome_do_arquivo_rn, dataset_b1)
# -
# Valores dos indices de vegetação, temperatura de superfície, saldo de radiação, radiação de onda longa emitida
# específicos por coordenadas
# +
srs_wgs84 = osr.SpatialReference()
srs_wgs84.ImportFromProj4("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
srs_wgs84.ExportToWkt()
srs_wkt = dataset_b1.GetProjection()
srs_utm = osr.SpatialReference()
srs_utm.ImportFromWkt(srs_wkt)
srs_utm.ExportToProj4()
srs_transform = osr.CoordinateTransformation(srs_wgs84, srs_utm)
lat=float(input("Digite a latitude: "))
long=float(input("Digite a longitude: "))
cols = dataset_b1.RasterXSize
rows = dataset_b1.RasterYSize
transform = dataset_b1.GetGeoTransform()
xOrigin = transform[0]
yOrigin = transform[3]
pixelWidth = transform[1]
pixelHeight = transform[5]
data = banda1.ReadAsArray(0, 0, cols, rows)
x, y, z = srs_transform.TransformPoint(long, lat)
col = int( (x - xOrigin) / pixelWidth )
linha = int( (y - yOrigin) / pixelHeight )
l = dataset_b1.RasterYSize
c = dataset_b1.RasterXSize
if linha in range(0,l) and col in range(0, c):
print("NDVI:", ndvi[linha, col], "SAVI:", savi[linha, col], "IAF:", iaf[linha, col],
"TS:", ts[linha, col], "Rol", Rol[linha, col],"Rn:", Rn[linha, col])
else:
print("Coordenada fora da imagem. Por gentileza, insira uma coordenada dentro da imagem.")
# -
# ### Referências
#
# (Chander et al., 2009)
# Iqbal (1983)
# (Allen et al., 2002)
# (Huete, 1988)
# (Huete &Warrick, 1990; Accioly et al., 2002; Boegh et al., 2002
| scripts/IOP_AOPs/scripts/Termal/LandSat/Balanco_termal.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Spatial subsets and averages
#
# Finch includes processes to subset and average gridded data. Subsetting processes return a rectangular portion of the original grid. This portion can either be defined as a bounding box over geographic coordinates (longitude, latitude), or as a polygon outline.
#
# When subsetting a polygon or over a curvilinear grid, the subset usually does not align with the grid dimensions. In this case, the subset functions will return data over a rectangular array, but mask grid cells that fall outside the subset.
import os
import xarray as xr
from birdy import WPSClient
pavics_url = 'https://pavics.ouranos.ca/twitcher/ows/proxy/finch/wps'
url = os.environ.get('WPS_URL', pavics_url)
verify_ssl = True if 'DISABLE_VERIFY_SSL' not in os.environ else False
wps = WPSClient(url, verify=verify_ssl)
# For the examples in this notebook, we are using a year of daily data, minimum temperature over southern Québec.
# URL to a netCDF of minimum daily temperature over southern Quebec
tasmin_url = "https://pavics.ouranos.ca/twitcher/ows/proxy/thredds/dodsC/birdhouse/testdata/xclim/NRCANdaily/nrcan_canada_daily_tasmin_1990.nc"
# ## Bounding box subsetting
#
# The `subset_bbox` process returns the grid portion within longitude and latitude bounds. Western hemisphere longitudes can be given as negative numbers or positive ones, no matter what range the dataset uses (longitudes are either in `[-180, 180[` or `[0, 360[`).
#
# The box chosen here is over the city of Montréal and its surroundings.
resp = wps.subset_bbox(tasmin_url, lon0=-74.2, lon1=-73.3, lat0=45.25, lat1=45.80, variable=['tasmin'],
start_date='1990-01-01', end_date='1990-01-08')
# NBVAL_IGNORE_OUTPUT
ds_mtl = resp.get(asobj=True).output
ds_mtl.tasmin.isel(time=0).plot();
# ## Polygon subsetting
#
# The subset processes of finch also support subsetting from a polygon input. Polygons are passed as GeoJSON or zipped-ShapeFiles files. They can be either local or remote.
#
# In the next example, we retrieve a dataset of polygons from a geospatial data server. Communication with this server uses the Web Feature Services (WFS) standard, an Open Geospatial Consortium (OGC) protocol.
#
# We then inspect and select the polygon using GeoPandas, write it down in a local file and send the subsetting request to finch.
#
# The subsetting methods are provided under-the-hood by `clisops`. Note that the grid cells returned are those whose centroid lies within the polygon. If the polygon overlays a corner of a grid cell, it will not be included in the subset, or will be masked. Thus, this algorithm might not be appropriate for small or intricate polygons relative to the underlying grid.
# +
from owslib.wfs import WebFeatureService
import json
import geopandas as gpd
from pathlib import Path
# Connect to GeoServer WFS service.
wfs_url = 'https://boreas.ouranos.ca/geoserver/wfs'
wfs = WebFeatureService(wfs_url, version='1.1.0')
# -
# #### Downloading and handling polygons locally
#
# This first example shows how to download a collection of polygons from the GeoServer using WFS and `owslib`. The polygon selection is done with `geopandas`, locally, allowing for more polygon processing if needed.
# NBVAL_IGNORE_OUTPUT
# Get the json as a binary stream
# Here we select Quebec's MRCs polygons
# We select only a few properties to be returned.
data = wfs.getfeature(
typename='public:quebec_admin_boundaries',
#bbox=(-93.1, 41.1, -75.0, 49.6),
outputFormat='json',
propertyname=['the_geom', 'RES_NM_REG']
)
# Load into a GeoDataFrame by reading the json on-the-fly
all_shapes = gpd.GeoDataFrame.from_features(json.load(data))
all_shapes
# NBVAL_IGNORE_OUTPUT
# Select 3 regions around the city of Montréal (drop unused "bbox" dimension)
poly_mtl = all_shapes[all_shapes.RES_NM_REG.isin(['Montérégie', 'Montréal', 'Laval'])].drop(columns=['bbox'])
poly_mtl
# Write to file
poly_file = Path('/tmp/mtl_raw.geojson')
poly_mtl.to_file(poly_file, driver='GeoJSON')
# Now that we have a (multipart) polygon, we can call the WPS subset process. When passing a local file, birdy requires the path to be absolute, our use of a `Path` object makes this easy. Before subsetting, the three polygons of our GeoJSON will be merged as one.
resp = wps.subset_polygon(tasmin_url, poly_file.absolute(), variable=['tasmin'],
start_date='1990-01-01', end_date='1990-01-08')
# NBVAL_IGNORE_OUTPUT
ds_mtl = resp.get(asobj=True).output
ds_mtl.tasmin.isel(time=0).plot();
# ## Averaging over polygons
#
# Finch also provides an `average_subset` process. The call is similar, but it instead averages the data over each polygon in the GeoJSON. In this case, in opposition to `subset_shape`, the intersection between polygon and grid cells is exact: partial overlaps between the polygon and grid cell outline are considered, as well as holes within the polygon itself.
#
# The computation of these overlaps can be time-consuming, especially when the polygon is defined at high resolution. In turn, long execution time can cause time-outs on the server side. As the second example below demonstrates, a strategy to speed up execution is to simplify the polygon shape.
#
#
# ### Basic averaging
# Basic example, let's simply reuse the same geojson:
resp = wps.average_polygon(tasmin_url, poly_file.absolute(), variable=['tasmin'],
start_date='1990-01-01', end_date='1990-01-08')
# NBVAL_IGNORE_OUTPUT
ds_avgmtl = resp.get(asobj=True).output
ds_avgmtl.tasmin.plot(hue='geom');
# NBVAL_IGNORE_OUTPUT
ds_avgmtl
# ### High-resolution polygons
#
# The pavics finch server limits the uploads to 3 MB, so our polygon file is limited to that size. In any case, a 3 MB polygon is already quite large and will take a long time to process. Here, we download another polygon (the whole Québec province) and simplify it _before_ sending it to finch.
# NBVAL_IGNORE_OUTPUT
# Get polygon dataset from GeoServer
data = wfs.getfeature(
typename='public:canada_admin_boundaries',
# This bbox represents our input data's coverage.
# Only polygons touching that zone will be returned in the response.
bbox=(-74.5, 45.2, -73, 46),
outputFormat='JSON'
)
# Directly read into a GeoDataFrame, drop the bbox column.
all_shapes = gpd.GeoDataFrame.from_features(json.load(data)).drop(columns=['bbox'])
all_shapes
# Create a Dataframe with simplified polygons
poly_simple = all_shapes.copy()
poly_simple['geometry'] = all_shapes.simplify(tolerance=0.01)
# +
# Write polygons to geojson files.
poly_file = Path('/tmp/prov_simp.geojson')
poly_simple.to_file(poly_file, driver='GeoJSON')
# Write the unsimplfied version for comparison only
all_shapes.to_file('/tmp/prov_raw.geojson', driver='GeoJSON')
# -
# NBVAL_IGNORE_OUTPUT
# As we can see, the simplified version is way smaller than the raw.
# Finch will not accept inputs larger than 3 Mo (raises "BrokenPipe" error)
# But even with ~2 Mo, the process is to slow and might time out
# !du -hs /tmp/*.geojson
resp = wps.average_polygon(
resource=tasmin_url,
shape=poly_file.absolute(),
variable='tasmin',
start_date='1990-01-01',
end_date='1990-01-08'
)
# NBVAL_IGNORE_OUTPUT
avg = resp.get(asobj=True).output
avg
| docs/source/notebooks/subset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pynq.overlays.base import BaseOverlay
from pynq.lib.video import *
base = BaseOverlay("base.bit")
hdmi_in = base.video.hdmi_in
hdmi_out = base.video.hdmi_out
# +
hdmi_in.configure()
hdmi_out.configure(hdmi_in.mode)
hdmi_in.start()
hdmi_out.start()
# -
hdmi_in.tie(hdmi_out)
# +
import time
numframes = 600
start = time.time()
for _ in range(numframes):
f = hdmi_in.readframe()
hdmi_out.writeframe(f)
end = time.time()
print("Frames per second: " + str(numframes / (end - start)))
# -
type(f)
# +
import cv2
import numpy as np
numframes = 10
grayscale = np.ndarray(shape=(hdmi_in.mode.height, hdmi_in.mode.width),
dtype=np.uint8)
result = np.ndarray(shape=(hdmi_in.mode.height, hdmi_in.mode.width),
dtype=np.uint8)
start = time.time()
for _ in range(numframes):
inframe = hdmi_in.readframe()
cv2.cvtColor(inframe,cv2.COLOR_BGR2GRAY,dst=grayscale)
inframe.freebuffer()
cv2.Laplacian(grayscale, cv2.CV_8U, dst=result)
outframe = hdmi_out.newframe()
cv2.cvtColor(result, cv2.COLOR_GRAY2BGR,dst=outframe)
hdmi_out.writeframe(outframe)
end = time.time()
print("Frames per second: " + str(numframes / (end - start)))
# -
hdmi_out.close()
hdmi_in.close()
#
# +
base.download()
hdmi_in.configure()
hdmi_out.configure(hdmi_in.mode)
hdmi_out.cacheable_frames = False
hdmi_in.cacheable_frames = False
hdmi_out.start()
hdmi_in.start()
# -
hdmi_in.tie(hdmi_out)
# +
import PIL.Image
frame = hdmi_in.readframe()
# -
type(frame)
frame
image = PIL.Image.fromarray(frame)
image
import numpy
arr = numpy.asarray(frame)
type(arr)
hdmi_out.writeframe(frame)
import cv2
import numpy as np
img = cv2.circle(frame, (200, 200), 100, (0, 0, 0))
hdmi_out.writeframe(frame)
len(img)
i = 0
for _ in range(len(img)):
f = hdmi_in.readframe()
img = cv2.circle(f, (i, 200), 100, (0, 0, 0))
hdmi_out.writeframe(img)
i=i+1
| .ipynb_checkpoints/test HDMI-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/rajeevak40/Course_AWS_Certified_Machine_Learning/blob/master/handwriting.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="iMNhJuoryWTB"
import tensorflow as tf
import matplotlib.pyplot as plt
# + colab={"base_uri": "https://localhost:8080/"} id="M9CymislzwPt" outputId="43b8282f-090e-47d9-d542-970572da0e7c"
handwriting = tf.keras.datasets.mnist
(train_data, train_lable), (test_data, test_lable)= handwriting.load_data()
# + id="mfEm0PFJ3uU_"
train_data= train_data/255
test_data=test_data/255
# + id="tpDc68Ao0ctw"
model = tf.keras.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, input_shape=(28,28), activation='relu'),
tf.keras.layers.Dense(64,activation='relu' ),
tf.keras.layers.Dense(10, activation='softmax')
])
# + colab={"base_uri": "https://localhost:8080/"} id="tJEzvO1f5iIt" outputId="42e8f894-1b0b-4317-d602-9f0ad604977d"
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(train_data, train_lable, epochs=10)
# + colab={"base_uri": "https://localhost:8080/"} id="rbXHAzWL7Ikb" outputId="9e928c81-9968-45ef-a7b6-eac3c6177c96"
model.evaluate(test_data, test_lable)
# + [markdown] id="Q4Dz3WLrJFZ8"
# # New Section
| handwriting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import glob
import json
from PIL import Image
import numpy as np
import pandas as pd
class Structure(object):
"""Structure which contains the images, masks and labels."""
def __init__(self, label_path, image_path, mask_path):
"""Init.
:param label_path: (str) path of json file
:param image_path: (str) path of images
:param mask_path: (str) path of masks
"""
self.label_path = label_path
self.image_path = image_path
self.mask_path = mask_path
self.width = 512
self.height = 512
self.base_path_masks = str()
self.base_path_images = str()
self.data = dict()
self.labels_df = pd.DataFrame()
@staticmethod
def find_all_files(path):
"""Find all the files in a directory.
:param path: (str) path of the images or masks
:return: (list) all paths
"""
return glob.glob(os.path.join(path, "*"))
@staticmethod
def read_json(path):
"""Read json file.
:param path: (str) path of the json file
:return: (dict) labels of each images
"""
with open(path, 'r') as f:
labels = json.load(f)
return labels
@staticmethod
def base_path(path):
"""Find the basic path for the masks & images
:param path: (str) path of an image or a mask
:return: (str) base path
"""
return "/".join(path.split("/")[:-1]) + "/{}." + path.split(".")[-1]
@staticmethod
def find_images_id(paths):
"""Find the images & masks id
:param paths: (list) all paths
:return: (list) images id
"""
return [path.split("/")[-1].split(".")[0] for path in paths]
def find_images_with_masks(self, images_id, masks_id):
"""Find all the images which contains a masks.
:param images_id: (list) images_id
:param masks_id: (list) masks_id
:return: (tuple) ids, masks_paths, images_paths
"""
ids = set(images_id).intersection(set(masks_id))
masks_paths = [self.base_path_masks.format(path) for path in ids]
images_paths = [self.base_path_images.format(path) for path in ids]
return ids, masks_paths, images_paths
@staticmethod
def read_image(path, width, height, mode="RGB"):
"""Read image as a numpy array.
:param path: (str) path of the image
:param width: (int) new width size
:param height: (int) new height size
:param mode: (str) 'RGB' or 'L' image in color or gray
:return: (np.array) image as np.array
"""
image = Image.open(path)
image = image.resize([width, height], Image.BILINEAR)
return np.array(image.convert(mode))
def read_masks_images_by_id(self, ids, masks_path, images_path):
"""Read the masks and images as array.
:param ids: (set) ids of the images
:param masks_path: (list) all masks paths
:param images_path: (list) all images paths
:return: (dict) {'id': {'mask': np.array, 'image': np.array'}}
"""
ids = sorted(ids)
masks_path = sorted(masks_path)
images_path = sorted(images_path)
data = dict()
for i in range(len(ids)):
data[ids[i]] = {
"mask": self.read_image(masks_path[i], self.height, self.width),
"image": self.read_image(images_path[i], self.height, self.width)
}
return data
@staticmethod
def transform_labels_to_df(labels, ids):
"""Transform the json into dataframe.
:param labels: (dict) labels data
:param ids: (set) ids of the images
:return: (pd.DataFrame) [bbox, iscrowd, filename, isthing, name, supercategory]
"""
df_labels = pd.DataFrame()
for label in labels["annotations"]:
filename = label["file_name"].split(".")[0]
if filename in ids:
df = pd.DataFrame(label["segments_info"])
df["filename"] = filename
df_labels = pd.concat([df_labels, df])
labels_df = pd.merge(df_labels, pd.DataFrame(labels["categories"]), left_on="category_id", right_on="id")
labels_df = labels_df.drop(["area", "category_id", "id_x", "id_y"], axis=1)
return labels_df
def prepare(self):
"""Prepare the dataset.
:return: tbd
"""
masks_path = self.find_all_files(self.mask_path)
images_path = self.find_all_files(self.image_path)
labels = self.read_json(self.label_path)
masks_id = self.find_images_id(masks_path)
images_id = self.find_images_id(images_path)
self.base_path_masks = self.base_path(masks_path[0])
self.base_path_images = self.base_path(images_path[0])
ids, masks_path, images_path = self.find_images_with_masks(images_id, masks_id)
self.data = self.read_masks_images_by_id(ids, masks_path, images_path)
self.labels_df = self.transform_labels_to_df(labels, ids)
# -
import matplotlib.pyplot as plt
# %matplotlib inline
struct = Structure(label_path="../data/raw/maskrcnn/labels.json", image_path="../data/raw/maskrcnn/images", mask_path="../data/raw/maskrcnn/masks")
struct.prepare()
plt.imshow(struct.data["000000000885"]["image"])
plt.imshow(struct.data["000000000885"]["mask"])
struct.labels_df.head()
| notebooks/MASK RCNN _ prepare data .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## GrEx 4 - Enron Email and Elasticsearch ##
# ## Part 1 ##
# <h3> Steps </h3>
# > 1. We need to retrieve some Enron email data from the DSCC for further analysis
# > 2. Next, we will load the list of 250k+ Message IDs from a pickle file
# > 3. Create a local Python elasticsearch client which connects to the remote Elasticsearch (ES) server
# > 4. Download the email data, store it as a DataFrame and pickle the DataFrame
# Let's load all the packages we will need
import pandas as pd
from pandas.io.json import json_normalize
import pickle
from elasticsearch import Elasticsearch, helpers
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import functools
import time
import re
from collections import Counter
import matplotlib
import numpy as np
# Let's get the list of Message IDs
with open ("id_lst_250k.pkl", "rb") as list_file:
id_list = pickle.load(list_file)
# Divide id_list into lists of 1000 IDs
id_list_chunks = [id_list[i:i + 1000] for i in range(0, len(id_list), 1000)]
# Now we need to create low-level client that will be used to connect to enron index in the ES database
es=Elasticsearch('http://enron:spsdata@172.16.58.3:9200')
# Create timing decorator
def timeit(fn):
@functools.wraps(fn)
def inner(*args,**kwargs):
start_time = time.time()
retval = fn(*args,**kwargs)
duration = time.time()- start_time
print('{} took {:4.2f} sec.'.format(fn,duration))
return retval
return inner
# The following code will be used to get the messages one chunk at a time
results_list = []
@timeit
def get_messages():
for lst in id_list_chunks:
query={"query" : {"terms" : {"_id":lst}}}
results_list.append(es.search(size=len(lst),index='enron',doc_type='email',body=query))
# Now we can run get_messages
get_messages()
# Put the messages in one list
message_list=[msg['_source'] for i in range(len(results_list)) for msg in results_list[i]['hits']['hits']]
# See how many messages exist
# Should be 250756
len(message_list)
# We will use 'json_normalize' function to create our Enron email DataFrame - enron_email_df
enron_email_df = json_normalize(message_list)
# Let's preview the first 5 rows
enron_email_df.head()
# Remove the "headers." prefix
enron_email_df.rename(columns=lambda x: re.sub(r'^headers.','',x),inplace=True)
# Display the first 5 columns again to make sure headers are fixed
enron_email_df.head()
# We will pickle our DataFrame for future use
enron_email_df.to_pickle('enron_email_df.pk1')
# ## Part 2 ##
# <h3> Answer the following questions and provide code/comments on how the answer was determined </h3>
# > 1. Obtain the messages from the enron index that include a Ken Lay email address in them in a message header. How many email messages are these?
# > 2. How many different Ken Lay email addresses are there in these messages? Provide a count of how many times each one occurs in the messages
# > 3. Determine how many of the messages are "To:" Ken Lay, and how many are "From:" Ken Lay. Provde a count for each of these.
# > 4. Who did Lay send the most emails to? How many did he send to this recipient? Who did he receive the most from? How many did he receive from this sender?
# > 5. Did the volume of emails sent by Lay increase or decrease after Enron filed for bankruptcy? How many did he send before the filing? How many, after?
# > 6. How many of the email messages in 4., above, mention <NAME>, Enron’s accounting firm?
#Our first goal is to scan the message headers for any email addresses that belong to Ken Lay
#Let's start with the "From" column
#We will check the "From" column and a couple keywords to see how many emails in the "From" have those keywords in them
#There are many emails that came up from our initial query
enron_email_df[enron_email_df.From.str.contains('chairman|ken|kenneth|chair|klay|kennethlay',case=False)].From.value_counts()
#Create a "from_emails" variable that will be a subset of the "enron_email_df" based on keyword filtering we completed earlier on the "From" column
#Exclude NaN values from the "From" column first
from_emails = enron_email_df[enron_email_df["From"].notnull()]
from_emails = enron_email_df[enron_email_df.From.str.contains('chairman|ken|kenneth|chair|klay|kennethlay',case=False)]
#To see if an email address belongs to Ken Lay, we need to inspect the body of the emails sent from any address we suspect might belong to him
#Let's start with "<EMAIL>"-- which was listed in our "From" query
#We were told that 20 emails were sent from that address
#This code will preview the some of the body of those 20 emails
from_emails[from_emails["From"]=="<EMAIL>"].body
#In order to open the body of any email, we need to use the Message ID number listed on the left
#Let's look at Message ID 131462
from_emails[from_emails["From"]=="<EMAIL>"].body[131462]
#It appears that an Administrative Assistant had control of this email address and would respond on behalf of <NAME>!
#Before we go any further, let's check our "enron_email_df" to see if we have any missing values in our columns
enron_email_df.isnull().sum()
#That's interesting that the "To" column has missing values
#We will now check the "To" column and a couple keywords to see how many email addresses in the "To" have those keywords in them
#First, we need to exclude NaN values in the "To" column
to_emails = enron_email_df[enron_email_df["To"].notnull()]
to_emails = to_emails[to_emails.To.str.contains('chairman|ken|kenneth|chair|klay|kennethlay',case=False)]
#Let's see how many different email addresses are in the "To" column based on our keyword filtering
to_emails[to_emails.To.str.contains('chairman|ken|kenneth|chair|klay|kennethlay',case=False)].To.value_counts()
#As the results show, this is complicated by the fact that the "To" column contains list of strings (i.e. multiple email recipients)
#Another way to see if an email address belongs to Ken Lay, is by inspecting the body of the emails sent to any address we suspect might belong to him
#Let's use "<EMAIL>"-- which came from our "To" query
#We were told that 799 emails were sent to that address
#This code will preview the body of 20 of those emails
to_emails[to_emails["To"]=="<EMAIL>"].body.head(20)
#In order to open the body of any email, we need to use the Message ID number listed on the left
#Let's look at Message ID 75927
to_emails[to_emails["To"]=="<EMAIL>"].body[75927]
#It appears that an employee sent an email to Ken Lay asking for help resolving multiple HR issues. Yikes!
#Another way we can investigate if an email belongs to Ken Lay, is by searching the "Subject" column
#We will use the same keywords as prior queries
enron_email_df[enron_email_df.Subject.str.contains('chairman|ken|kenneth|chair|klay|kennethlay',case=False)].Subject.value_counts()
#The "Message from Ken Lay" sounds promising which demands further inspection
#We will create a "subject_emails" variable which includes the message headers after filtering the "Subject" column by our keywords
#Let's exclude any Nan values
subject_emails = enron_email_df[enron_email_df["Subject"].notnull()]
subject_emails =subject_emails[subject_emails.Subject.str.contains('chairman|ken|kenneth|chair|klay|kennethlay',case=False)]
#As our "Subject" query showed earlier, there are 30 emails that contain the "Subject" "Message from Ken Lay"
#We need to figure out the email address that was used to send emails with that Subject header
subject_emails[subject_emails["Subject"]=="Message from Ken Lay"]
#The "<EMAIL>" email needs to be further inspected to see if it belonged to Ken Lay
#Let's display a preview of the body for 20 of those emails which come from "<EMAIL>"
subject_emails[subject_emails["Subject"]=="Message from Ken Lay"].body.head(20)
#Finally, let's open up Message ID 29847
subject_emails[subject_emails["Subject"]=="Message from Ken Lay"].body[29847]
#It appears that this email was sent from Ken Lay announcing his resignation, which is quite historic!
#The "<EMAIL>.com" definitely belongs to Ken Lay
#After crowdsourcing, web searches and self-investigation for email addresses belonging to <NAME>, we will make a variable "kenlay_email_addresses"
#that are the email address addresses belonging to Ken Lay
#The variable will be a list of strings
kenlay_email_addresses = ["<EMAIL>", "<EMAIL>","<EMAIL>","<EMAIL>","<EMAIL>", "<EMAIL>", "<EMAIL>"
,"<EMAIL>"
,"<EMAIL>"
,"<EMAIL>"
,"<EMAIL>"
,"<EMAIL>"
,"<EMAIL>"
,"<EMAIL>"
,"<EMAIL>"
,"<EMAIL>"
,"<EMAIL>"
,"<EMAIL>"
,"<EMAIL>"
,"<EMAIL>"
,"<EMAIL>"
,"<EMAIL>"
,"<EMAIL>"
,"<EMAIL>"
,"<EMAIL>"
,"<EMAIL>"]
#Based on the "kenlay_email_addresses", we will now calculate how many messages in "enron_email_df" contained a Ken Lay email in a message header
#We will use the "From" and "To" message headers to calculate the messages to build a new DataFrame -- kenlay_df-- based on our criteria
#Let's start by using "kenlay_email_addresses_" to create a DataFrame if the "From" column contains a Ken Lay email
from_kenlay_emails= enron_email_df[enron_email_df["From"].isin(kenlay_email_addresses)]
from_kenlay_emails.head(5)
#Next, let's create a DataFrame if the "To" column contains a Ken Lay email
#For the sake of my analysis, I am only interested in learning about the emails where Ken Lay was the only recipient
#Different analysis can be done where perhaps Ken Lay was included as a recipient among others
to_kenlay_emails = enron_email_df[enron_email_df["To"].isin(kenlay_email_addresses)]
to_kenlay_emails.head(5)
#Now we will concatenate the two DataFrames we just made to create "kenlay_df"
#This DataFrame includes all email messages either "To" Ken Lay or "From" Ken Lay based on our criteria
frames= [from_kenlay_emails, to_kenlay_emails]
kenlay_df = pd.concat(frames)
kenlay_df.head(5)
#Let's remove any duplicate rows from kenlay_df, if any
kenlay_df.drop_duplicates(keep = False, inplace=True)
#Now, we can answer Question 1 by using the len() function on "kenlay_df"
len(kenlay_df)
#There are 2461 emails messages that contain a Ken Lay email address in the "From" or "To" columns
#We determined that there are 26 Ken Lay email addresses earlier on which answers part of Question 2
len(kenlay_email_addresses)
#Now let's answer the second part of Question 2
#Let's calculate how many times each of Ken Lay's email addresses appear in the messages
#We will start with checking the "To" column
to_kenlay_emails['To']=to_kenlay_emails.To.str.replace('\s+|\r|\n','')
to_kenlay_list = to_kenlay_emails.To.str.cat(sep=',').split(',')
Counter(to_kenlay_list).most_common()
#Next, we will check the "From" column
from_kenlay_emails['From']=from_kenlay_emails.From.str.replace('\s+|\r|\n','')
from_kenlay_list = from_kenlay_emails.From.str.cat(sep=',').split(',')
Counter(from_kenlay_list).most_common()
#Question 3 asks how many messages are "To" Ken Lay and how many are "From" Ken Lay
len(kenlay_df[kenlay_df["To"].isin(kenlay_email_addresses)])
#There are 1510 email messages "To" Ken Lay
#There are 951 email messages "From" Ken Lay
len(kenlay_df[kenlay_df["From"].isin(kenlay_email_addresses)])
#Question 4 asks who did Ken Lay send the most email messages to and how many did he send
#Let's use our 'from_kenlay_emails' DataFrame to figure that out
from_kenlay_emails['To'].value_counts().idxmax()
#Looks like he sent the most emails to "<EMAIL>" which is most likely an All Employee inbox
#He sent 412 emails to "<EMAIL>"
from_kenlay_emails['To'].value_counts().max()
#Question 4 also asks who did Ken Lay receive the most email messages from and how many did he receive
#Let's use our 'to_kenlay_emails' DataFrame to figure that out
to_kenlay_emails['From'].value_counts().idxmax()
#Looks like he received the most emails from <NAME>
#He received 26 emails from "<EMAIL>"
to_kenlay_emails['From'].value_counts().max()
#Question 5 asks if the volume of emails sent by Ken Lay increase or decrease after Enron filed for bankruptcy
#We want to know how many he sent before and after as well, therefore we will be using the "from_kenlay_emails" DataFrame
#First, we will modify how the "Date" values appear using the following code
#After executing the code, the values will be in the format - Year, Month, Day
from_kenlay_emails['Date']=from_kenlay_emails['Date'].\
apply(lambda date_str: pd.to_datetime(date_str).date())
#Let's display the first five rows of the "Date" column again to see if the changes took place
from_kenlay_emails["Date"].head()
#Now we need to create a timestamp for the date Enron filed for bankruptcy which was on December 2nd, 2001
#We will place this date in a variable called "bankruptcy_date"
bankruptcy_date=pd.to_datetime('2 December 2001').date()
bankruptcy_date
#Next, we can use the "from_kenlay_emails" DataFrame along with "bankruptcy_date" to see how if he sent more or less emails after the bankruptcy
before_bankruptcy_emails = from_kenlay_emails[from_kenlay_emails.Date<bankruptcy_date]
len(before_bankruptcy_emails)
#<NAME> sent 801 emails before Enron filed for bankruptcy
#Let's see how many he sent afterwards
after_bankruptcy_emails = from_kenlay_emails[from_kenlay_emails.Date>bankruptcy_date]
len(after_bankruptcy_emails)
#Ken Lay sent 144 emails after Enron Filed for bankruptcy, so we can conclude that sent LESS emails after the bankruptcy date
#Another item that we find interesting is to see graphically the volume of emails sent during a 30 day span that includes the bankruptcy date
#We will look at 15 days before and after the bankruptcy date
#Using "Timedelta" function will allow us to create specific dates we need as part of our range
fifteen_days = pd.Timedelta('15 days')
#Let's make our starting date 11/17/2001
start_date = bankruptcy_date-fifteen_days
start_date
#Let's make our ending date 12/17/2001
end_date = bankruptcy_date+fifteen_days
end_date
#Let's make a DataFrame that includes the emails sent within our range
bankruptcy_emails_range = from_kenlay_emails[(from_kenlay_emails.Date > start_date) & (from_kenlay_emails.Date < end_date)]
bankruptcy_emails_range.shape
#It looks like there were only 12 emails in our range
#we need to sort the email volumes by date
range_volumes=bankruptcy_emails_range.Date.value_counts().sort_index()
range_volumes
#Now we can make our plot using "matplotlib" function
# %matplotlib inline
range_volumes.plot(kind='bar',title='Volume of Emails Sent',rot=0)
#Question 6 asks to see how many of emails from Question 4 mention <NAME>
#We will first create two new DataFrames that only include emails "From" '<EMAIL>' and "To" '<EMAIL>'
stevenkean_emails = kenlay_df[kenlay_df['From']=='<EMAIL>']
worldwide_emails = kenlay_df[kenlay_df['To']=='<EMAIL>']
#Let's see how many times <NAME> was mentioned in emails from <NAME>
#Since there aren't that many emails, we could open up the body of all the emails but instead we will search the "body" column for certain keywords
stevenkean_emails=stevenkean_emails[stevenkean_emails['body'].str.contains('Arthur|Andersen', case=False)]
stevenkean_emails
#Based off searching "Arthur" and "Anderson" in the body of Steven's emails, there were no emails that mentioned the accounting firm
#Let's also search through the "body" column of the emails sent to the '<EMAIL>' address for any mention of <NAME>
worldwide_emails = worldwide_emails[worldwide_emails['body'].str.contains('Arthur|Andersen', case=False)]
worldwide_emails
#Those emails did not contain any mention of the accounting firm either
#Note, that our result for Question 6 does not provide us anything meaningful. Our result is affected by the criteria we chose
#when defining our "kenlay_df" DataFrame--we could have found more email addresses related to Ken Lay, could have included emails where a Ken Lay email
#address was part of multiple recipients and could have been more creative in filtering for <NAME> (i.e. perhaps a code name was used)
# +
#Simply put, our overall results provide a narrow view of Ken Lay's emails which could be augmented in a future analysis by using more
#advanced techniques
# -
| GrEx4_OscarHernandez.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.4 32-bit
# name: python37432bit21c61af867d8453f8401e7eca06b3261
# ---
# # Data preparation for automatic music generation.
# ### Created by <NAME>
# #### Github: Juanju97
# ---
# This notebook is intended to explore the data and generate some insights as well as a suitable preprocessing method which prepares data to be used by the model.
# ## Creating vocabulary and data
# ---
# In this section, the goal is to analize all midi files and generate a vocabulary contaning all different types of notes and chords.
# We also want to store all midi notes in individual arrays (data object).
# +
from music21 import converter, instrument, chord, note, pitch
import glob
vocabulary = set([])
data = []
i = 0
for file in glob.glob("../data/classic_dataset/*.mid"):
midi = converter.parse(file)
tracks = instrument.partitionByInstrument(midi)
if tracks:
main_track = tracks.parts[0].recurse()
else:
main_track = midi.flat.notes
file_notes = []
for e in main_track:
if isinstance(e, note.Note):
element = e.nameWithOctave + "_" + str(e.duration.quarterLength)
vocabulary.add(element)
file_notes.append(element)
elif isinstance(e, note.Rest):
element = "rest_" + str(e.duration.quarterLength)
vocabulary.add(element)
file_notes.append(element)
elif isinstance(e, chord.Chord):
chord_notes = '.'.join([str(p) for p in e.pitches])
element = chord_notes + "_" + str(e.duration.quarterLength)
vocabulary.add(element)
file_notes.append(element)
data.append(file_notes)
i += 1
print(i)
# +
import pickle
pickle.dump(sorted(vocabulary), open("./data/classic_dataset/vocabulary.p", "wb"))
pickle.dump(data, open("./data/classic_dataset/data.p", "wb"))
# -
# ## Mapping the vocabulary and formating data
# ---
# Now we want to feed the model with this data. The model performs better with numbers, so we need to map each value of the vocabulary into a number so the model can process it.
# +
import pickle
vocabulary = pickle.load(open("./data/classic_dataset/vocabulary.p", "rb"))
map_voc = dict((element, number) for number, element in enumerate(vocabulary))
# -
# Now, we have to apply the vocabulary to codificate the data.
# +
import pandas
import numpy
formated_data = [[map_voc[n] for n in file] for file in data]
pickle.dump(formated_data, open("./data/classic_dataset/formated_data.p", "wb"))
| data/prepare_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + nbsphinx="hidden"
# ignore this cell, this is just a helper cell to provide the magic %highlight_file
# %run ../highlighter.py
# -
# # Initializing Nornir
#
# Easiest way of initializing nornir is with the function [InitNornir](../api/nornir/__init__.html#nornir.__init__.InitNornir).
#
# With `InitNornir` you can initialize nornir with a configuration file, with code or with a combination of both.
#
# Let's start with [a configuration file](../../configuration/index.rst):
# %highlight_file config.yaml
# Now to create the [nornir](../../ref/api/nornir.rst#nornir) object:
from nornir import InitNornir
nr = InitNornir(config_file="config.yaml")
# You can also initialize nornir programmatically without a configuration file:
from nornir import InitNornir
nr = InitNornir(
runner={
"plugin": "threaded",
"options": {
"num_workers": 100,
},
},
inventory={
"plugin": "SimpleInventory",
"options": {
"host_file": "inventory/hosts.yaml",
"group_file": "inventory/groups.yaml"
},
},
)
# Or with a combination of both methods:
from nornir import InitNornir
nr = InitNornir(
config_file="config.yaml",
runner={
"plugin": "threaded",
"options": {
"num_workers": 50,
},
},
)
nr.config.runner.options["num_workers"]
| docs/tutorial/initializing_nornir.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import pandas as pd
import numpy as np
import time
import re
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
from datetime import datetime
from matplotlib.ticker import StrMethodFormatter
import seaborn as sns
sns.set(color_codes=True)
from sklearn.preprocessing import MinMaxScaler
# ## Data Cleaning
rawData = pd.read_csv('data/SqlMetric_prepared.csv')
rawData.head()
rawData.drop(['ActivityID', 'Event Name', 'criteria', 'Process Name'], axis=1, inplace=True)
rawData.isnull().sum()
rawData.dropna(inplace=True)
print(rawData.isnull().sum())
print('Dataset shape: {}'.format(rawData.shape))
rawData.head()
# +
rawData['db_state'] = rawData['metrics'].apply(lambda x: x.split(' '))
rawData.drop(['metrics'], axis=1, inplace=True)
print('Dataset shape: {}'.format(rawData.shape))
row = rawData.iloc[0,5]
print('Metrics: {}'.format(row))
dic = {}
for st in row:
splitted = st.split(':')
key = splitted[0]
dic[key] = []
print('Parsing sql metrics')
for index, row in rawData.iterrows():
metrics_row = row['db_state']
for i in range(0,4):
st = metrics_row[i]
splitted = st.split(':')
key = 'Table_{}'.format(i+1)
val = np.NaN
if len(splitted)>1:
val = int(splitted[1])
dic[key].append(val)
print('Adding new metrics collumns')
for key in dic.keys():
rawData[key] = dic[key]
print('Drop db_state column')
rawData.drop(['db_state'], axis=1, inplace=True)
print('Parsing sql metrics finnised')
print('Dataset shape: {}'.format(rawData.shape))
# -
print(rawData.isnull().sum())
rawData.dropna(inplace=True)
print('Dataset shape: {}'.format(rawData.shape))
rawData = rawData.applymap(lambda x: x.replace(',','') if type(x) is str or type(x) is object else x)
rawData['is_cold_start'] = rawData['usn'].apply(lambda x: True if int(x) == 0 else False)
rawData.drop(['usn', 'timestamp'], axis=1, inplace=True)
rawData.rename(columns={'Time MSec': 'timestamp', 'DURATION_MSEC': 'duration'}, inplace=True)
rawData['duration'] = rawData['duration'].apply(lambda x: float(x))
rawData.loc[:, 'second'] = rawData.loc[:, 'timestamp'].apply(lambda x: round(x/1000))
print(rawData.isnull().sum())
rawData.head()
rawData.shape
# ## Studing data
# Decalre some helpers
# +
main_color = '#2a6e52'
accent_color = '#6e4b2a'
plt.rcParams["axes.grid"] = False
plt.rcParams["axes.facecolor"] = '#fff'
plt.rcParams["axes.edgecolor"] = '#222'
plt.rcParams["lines.linewidth"] = 2
plt.rcParams["lines.color"] = main_color
def get_top_calling_procedures(df_procedures, limit):
top = df_procedures.groupby('viewName')['viewName'].count().sort_values(ascending=False)
return df_procedures.loc[df_procedures['viewName'].isin(top[top>= 1000].keys())]
def calculate_std(data):
procedure_names = df['viewName'].unique()
for p in procedure_names:
proc = df.loc[df['viewName']== p]
median = proc['duaration'].median()
def draw_dependency_plot(df, tables):
procedure_names = df['viewName'].unique()
proc_num = range(len(procedure_names))
table_count = len(tables)
proc_count = len(proc_num)
rowsCount = table_count + proc_count
fig, ax = plt.subplots(rowsCount,1,figsize=(20,rowsCount*5))
current_row = 0
for p in procedure_names:
proc = df.loc[df['viewName']== p]
x = proc['second'].unique()
y = proc[['duration','second']].groupby('second').mean()
ax[current_row].set_title(p)
ax[current_row].plot(x,y, color = main_color)
ax[current_row].set_xlabel('Time (s)')
ax[current_row].set_ylabel('Duration (ms)')
ax[current_row].grid(True)
current_row+= 1
for table in tables:
df_table = df.loc[:,[table, 'second']]
x2 = df_table['second'].unique()
y2 = df_table.groupby('second').max()
ax[current_row].set_title(table)
ax[current_row].plot(x2,y2, color= accent_color)
ax[current_row].set_xlabel('Time (s)')
ax[current_row].set_ylabel('Table Count')
current_row+=1
plt.show()
from tslearn.clustering import KShape
from tslearn.datasets import CachedDatasets
from tslearn.preprocessing import TimeSeriesScalerMeanVariance
#https://towardsdatascience.com/how-to-apply-k-means-clustering-to-time-series-data-28d04a8f7da3
def clusterize(data):
start_time = time.time()
seed = 0
X_train = data
X_train = TimeSeriesScalerMeanVariance().fit_transform(X_train)
sz = X_train.shape[1]
ks = KShape(n_clusters=3, verbose=True, random_state=seed)
y_pred = ks.fit_predict(X_train)
plt.figure()
for yi in range(3):
plt.subplot(3, 1, 1 + yi)
for xx in X_train[y_pred == yi]:
plt.plot(xx.ravel(), "k-", alpha=.2)
plt.plot(ks.cluster_centers_[yi].ravel(), "r-")
plt.xlim(0, sz)
plt.ylim(-4, 4)
plt.title("Cluster %d" % (yi + 1))
plt.tight_layout()
plt.show()
print('Clustering completed in {:,.2f} secs'.format(time.time()-start_time))
# -
# Look at data stats
rawData.describe()
# Get only changing columns
data = rawData[['viewName','timestamp','second', 'duration', 'is_cold_start','Table_2', 'Table_3']]
normal_execution = data.loc[data['is_cold_start']==False, :]
cold_execution = data.loc[data['is_cold_start']==True, :]
cold_percentage =len(cold_execution)*100/len(data)
normal_percentage = 100 - cold_percentage
print ("Total executions {} Normal execution {}({:,.2f}% from total, Cold execution {}({:,.2f}% form total))".format(len(data), len(normal_execution),normal_percentage,len(cold_execution),cold_percentage))
top_p = get_top_calling_procedures(cold_execution, 1000)
tables = ['Table_2', 'Table_3']
draw_dependency_plot(top_p, tables)
top_p = get_top_calling_procedures(cold_execution, 1000)
top_frequency = top_p.loc[:,['viewName', 'timestamp', 'second']].groupby(['viewName','second']).count()
print(top_frequency.describe())
top_p = get_top_calling_procedures(cold_execution, 1000)
parameters = top_p[['second', 'duration']].to_numpy()
clusterize(parameters)
| SqlMetrics/Evangelist_Perf_Processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ML_Contributions
# language: python
# name: ml_contributions
# ---
# # <center> Step 2.0 Data Pre-Processing and EDA </center> #
#
# In this script, we will examine issues like data distribution, collinearity, and scaling variables to aid the performance of our classification models.
# Import packages
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import os
from pandas_profiling import ProfileReport
# %cd ..
# %cd data/raw
#Import dataset
df = pd.read_csv('1.1 Processed Data.csv')
df = df.drop(['owner_no.1', 'order_dt.1'], axis=1)
# %cd ..\..
# %cd reports
# ### <center> EDA of Raw Dataset </center> ###
# We will create a Pandas Profiling Report for the raw dataset, then create a second report after addressing outliers, etc. This report will illustrate a heavy class imbalance in each target variable, which will affect how we evaluate model performance later on.
report1 = ProfileReport(df).to_file("Raw Data Profile - Pandas Profiling.html")
# %cd figures
#Create pairplot in Seaborn
sns.pairplot(df, height=2, aspect=1)
plt.savefig("Raw Data - Seaborn Pairplot.png")
plt.show()
#Create datetime data types
df.order_dt=pd.to_datetime(df.order_dt, errors='coerce')
df.first_order_dt = pd.to_datetime(df.first_order_dt, errors='coerce')
df.first_cont_dt = pd.to_datetime(df.first_cont_dt, errors='coerce')
# ### <center> EDA for Numerical Variables </center> ###
# All of our numerical variables are heavily skewed, so we'll explore various methods for reducing the significance of data outliers.
num_cols = [
'tot_ticket_paid_amt',
'tot_contribution_paid_amt',
'ltv_tkt_value',
'Lifetime Giving',
'days_to_donation',
'rolling_tkt_sum'
]
fig = df[num_cols].hist(layout = (2,3), figsize = (20,12))
plt.savefig('Histograms of Numerical Data.png')
plt.show()
# Dataframe for upper percentiles for each variable
data = []
for col in num_cols:
values = [
col,
round(df[col].max(),2),
round(df[col].quantile(.99),2),
round(df[col].quantile(.98),2),
round(df[col].quantile(.97),2),
round(df[col].quantile(.96),2)
]
data.append(values)
quantile_df = pd.DataFrame(data, columns = ['cat', 'max', '99','98','97','96'])
quantile_df
new = df.days_to_donation[df.days_to_donation >= 0]
new.max(), new.quantile(.99), new.quantile(.98), new.quantile(.97)
# ### <center> Note on Addressing Outliers </center> ###
#
# There's no perfect solution for handling outliers in these features. In many cases, the outliers are, in reality, the outcomes we are aiming to predict. In addition, several of these variables preclude our assumption that this data model can predict a new customer's propensity to donate - for example, 'Lifetime Giving' is data that would not be available at the time of a customer's first order.
#
# Rather than address outliers in the base dataset, we will address these features on a case-by-case basis as we model each target variable. To mitigate the impact of remaining outliers in the dataset, we will apply a logarithmic transformation to each feature except "days_to_donation".
#Apply logarithmic transformation to all numerical variables except 'days_to_donation'
num_cols.remove('days_to_donation')
for col in num_cols:
new_values = df[col].replace(0,0.1)
df[col] = np.log(new_values)
new_df = df[df.prospect_board == 0]
data = []
for col in num_cols:
values = [
col,
new_df[col].max(),
new_df[col].quantile(.99),
new_df[col].quantile(.98),
new_df[col].quantile(.97),
new_df[col].quantile(.96)
]
data.append(values)
quantile_df2 = pd.DataFrame(data, columns = ['cat', 'max', '99','98','97','96'])
quantile_df2
# ### <center> Examining Collinearity of Independent Variables </center> ###
# Here, I want to confirm whether the features that reference a customer's status as a donor prospect or Board member or their Lifetime Giving amounts are collinear with any of our target: making a donation on the first order, donating after a first order, or days until making a donation.
#Calculate VIF to eliminate collinearity
x1 = df[['first_cont_order', 'first_cont_after', 'days_to_donation', 'tot_contribution_paid_amt','prospect_board','Lifetime Giving']]
from statsmodels.stats.outliers_influence import variance_inflation_factor
variables = x1
vif = pd.DataFrame()
vif["VIF"] = [variance_inflation_factor(variables.values, i) for i in range(variables.shape[1])]
vif["features"] = variables.columns
vif
# ### <center> Scaling Variables with scikit-learn's RobustScaler </center> ###
from sklearn.preprocessing import RobustScaler
scaler = RobustScaler()
for col in num_cols:
new_col = df[col].values.reshape(-1,1)
df[col] = scaler.fit_transform(new_col)
df.head()
# cd ..\..
# %cd data\processed
# ### <center> Export Data and Processed Pandas Profiling Report </center> ###
df.to_csv('2.0 Processed Data with Scaled and Log Transformations.csv')
# %cd ..\..
# %cd reports
report2 = ProfileReport(df).to_file("Processed Data Profile - Pandas Profiling.html")
| notebooks/2.0 Data Pre-Processing and EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Imports
# +
############### Imports ###############
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import random
import time
import numpy as np
import pandas as pd
import datetime
from pathlib import Path
from functions import clean
import matplotlib.pyplot as plt
### Preprocessing ###
import nltk
import re
import string
from nltk.corpus import stopwords
from nltk.tokenize import TweetTokenizer
from nltk.stem.lancaster import LancasterStemmer
### Tensorflow ###
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Embedding, LSTM, Dense, Dropout, Bidirectional
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import regularizers
### API ###
import requests
import base64
import tweepy
# -
# %%capture
from tqdm.notebook import tqdm as tqdm
tqdm().pandas()
# %reload_ext tensorboard
tf.get_logger().setLevel('ERROR')
# ## Pipeline Parameters
# +
datasetSize = 1600000
trainingPart = 0.8
testingPart = 0.2
split = int(datasetSize*trainingPart)
tokenizer = None
embedding_dim = 100 # glove6b100
max_length = 20 # max lenght of a tweet
trunc_type='post' # it will cut the tweet if it is longer than 20
padding_type='post' # it will add zeros at the end the tweets if is smaller than 20
oov_tok = "<OOV>" # for unseen words
# -
# ## Reading Dataset
def read_dataset():
if Path("./data/cleaned_sentiment.csv").is_file():
df = pd.read_csv("./data/cleaned_sentiment.csv", encoding='latin')
df = df.replace(np.nan, '', regex=True)
else:
df = pd.read_csv("./data/training.1600000.processed.noemoticon.csv", encoding='latin')
df.columns = ['Label','Id','Date','Query','Name','Text']
df = df.drop(columns=['Date', 'Query', 'Name', 'Id'])
df['Label'] = df['Label'].replace(4, 1)
return df
# ## Data Cleaning
def clean_dataset(df):
if not Path("./data/cleaned_sentiment.csv").is_file():
tokenizer = Tokenizer()
rawData = df['Text'].to_numpy()
rawLabels = df['Label'].to_numpy()
tokenizer = TweetTokenizer(strip_handles=True)
selectedData = []
corpus = []
print("Replacing Contractions and Clean Data :")
with tqdm(total=datasetSize*1.5) as pbar: # The dataset is sorted by labels so we make sure to get our 2 labels in our subset
for i in range(int(datasetSize/2)):
selectedData.append([rawData[i], rawLabels[i]])
selectedData.append([rawData[len(rawData)-i-1], rawLabels[len(rawData)-i-1]])
pbar.update(1)
for i in range(datasetSize):
corpus.append([clean(selectedData[i][0]), selectedData[i][1]])
pbar.update(1)
cleaned_df = pd.DataFrame(corpus, columns =['Text', 'Label'])
cleaned_df.to_csv('./data/cleaned_sentiment.csv', index=True)
else:
rawData = df['Text'].to_numpy()
rawLabels = df['Label'].to_numpy()
selectedData = []
corpus = []
for elt in rawData:
if not isinstance(elt, str):
print(elt)
for i in range(int(datasetSize/2)):
selectedData.append([rawData[i], rawLabels[i]])
selectedData.append([rawData[len(rawData)-i-1], rawLabels[len(rawData)-i-1]])
for i in range(datasetSize):
corpus.append([selectedData[i][0], selectedData[i][1]])
return corpus
# ## Dataset Split
# +
def create_padded_sequences(corpus):
sentences=[]
labels=[]
random.shuffle(corpus)
for x in range(datasetSize):
sentences.append(corpus[x][0])
labels.append(corpus[x][1])
# Using tf tokenizer
tokenizer = Tokenizer()
tokenizer.fit_on_texts(sentences)
word_index = tokenizer.word_index
vocab_size=len(word_index)
# Passing with tensorflow tools
sequences = tokenizer.texts_to_sequences(sentences)
padded = pad_sequences(sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
return padded, labels, tokenizer, word_index, vocab_size
def split_dataset(padded, labels, split):
test_sequences = padded[split:len(padded)]
test_labels = labels[split:len(labels)]
training_sequences = padded[0:split]
training_labels = labels[0:split]
return training_sequences, training_labels, test_sequences, test_labels
# -
# ## Import GloVe 100 Dimensions Embedding sequences
def create_embeddings_matrix():
embeddings_index = {};
with open('./data/glove.6B.100d.txt') as f:
for line in f:
values = line.split();
word = values[0];
coefs = np.asarray(values[1:], dtype='float32');
embeddings_index[word] = coefs;
embeddings_matrix = np.zeros((vocab_size+1, embedding_dim));
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word);
if embedding_vector is not None:
embeddings_matrix[i] = embedding_vector;
return embeddings_matrix
# ## Training model
# +
def train_baseline_model(training_sequences, training_labels, test_sequences, test_labels, embeddings_matrix, vocab_size, num_epochs):
baseline_model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size+1, embedding_dim, input_length=max_length, weights=[embeddings_matrix], trainable=False),
tf.keras.layers.SimpleRNN(128),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
if Path("./data/baseline_weights.h5").is_file():
baseline_model.load_weights("./data/baseline_weights.h5")
baseline_model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
baseline_model.summary()
training_padded = np.array(training_sequences)
training_labels = np.array(training_labels)
testing_padded = np.array(test_sequences)
testing_labels = np.array(test_labels)
logs_base_dir = "./logs"
os.makedirs(logs_base_dir, exist_ok=True)
logdir = os.path.join(logs_base_dir, "baseline-"+datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
baseline_history = baseline_model.fit(training_padded,
training_labels,
epochs=num_epochs,
validation_data=(testing_padded, testing_labels),
batch_size = 512,
verbose=1,
callbacks=[tensorboard_callback])
print("Training Complete")
baseline_model.save_weights("./data/baseline_weights.h5", True)
return baseline_model, baseline_history
def train_model(training_sequences, training_labels, test_sequences, test_labels, embeddings_matrix, vocab_size, num_epochs):
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size+1, embedding_dim, input_length=max_length, weights=[embeddings_matrix], trainable=False),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Bidirectional(LSTM(units=64, return_sequences=True)),
tf.keras.layers.Bidirectional(LSTM(units=128)),
tf.keras.layers.Dense(64, activation='relu', kernel_regularizer=regularizers.l2(0.01)),
tf.keras.layers.Dense(1, activation='sigmoid')
])
if Path("./data/myWeights.h5").is_file():
model.load_weights("./data/myWeights.h5")
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
model.summary()
training_padded = np.array(training_sequences)
training_labels = np.array(training_labels)
testing_padded = np.array(test_sequences)
testing_labels = np.array(test_labels)
logs_base_dir = "./logs"
os.makedirs(logs_base_dir, exist_ok=True)
logdir = os.path.join(logs_base_dir, "LSTM-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
history = model.fit(training_padded,
training_labels,
epochs=num_epochs,
validation_data=(testing_padded, testing_labels),
batch_size = 512,
verbose=1,
callbacks=[tensorboard_callback])
print("Training Complete")
model.save_weights("./data/myWeights.h5", True)
return model, history
# -
# ## Training
df = read_dataset()
corpus = clean_dataset(df)
padded, labels, tokenizer, word_index, vocab_size = create_padded_sequences(corpus)
training_sequences, training_labels, test_sequences, test_labels = split_dataset(padded, labels, split)
embeddings_matrix = create_embeddings_matrix()
model, history = train_model(training_sequences, training_labels, test_sequences, test_labels, embeddings_matrix, vocab_size, 1)
baseline_model, baseline_history = train_baseline_model(training_sequences, training_labels, test_sequences, test_labels, embeddings_matrix, vocab_size, 1)
# ## Pipeline Function
# +
def create_tokenizer():
df = read_dataset()
corpus = clean_dataset(df)
padded, labels, tokenizer, word_index, vocab_size = create_padded_sequences(corpus)
return tokenizer, vocab_size, word_index
def pipeline(s, tokenizer, vocab_size):
tf.keras.backend.clear_session()
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size+1, embedding_dim, input_length=max_length, weights=[embeddings_matrix], trainable=False),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Bidirectional(LSTM(units=64, return_sequences=True)),
tf.keras.layers.Bidirectional(LSTM(units=128)),
tf.keras.layers.Dense(64, activation='relu', kernel_regularizer=regularizers.l2(0.01)),
tf.keras.layers.Dense(1, activation='sigmoid')
])
if Path("./data/myWeights.h5").is_file():
model.load_weights("./data/myWeights.h5")
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
sentence = clean(s)
sequences = tokenizer.texts_to_sequences([sentence])
padded = pad_sequences(sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
padded = np.asarray(padded)
val = model.predict(padded)[0][0]
res = ""
if val > 0.9:
res = "Very Positive"
elif val > 0.7:
res = "Positive"
elif val < 0.1:
res = "Very Negative"
elif val < 0.3:
res = "Negative"
else:
res = "Neutral"
print(s, ":", res, val)
# +
### Testing the Pipeline ###
if tokenizer is None:
tokenizer, vocab_size, word_index = create_tokenizer()
embeddings_matrix = create_embeddings_matrix()
pipeline("You were the chosen one, you were supposed to destroy siths, not join them !", tokenizer, vocab_size)
pipeline("I hate you !!!", tokenizer, vocab_size)
pipeline("You were my brother Anakin, I loved you", tokenizer, vocab_size)
# -
# ## Full Pipeline With API Call
# +
api_key = '<KEY>'
api_secret_key = '<KEY>'
access_token = '<KEY>'
access_secret_token = '<KEY>'
auth = tweepy.OAuthHandler(api_key, api_secret_key)
auth.set_access_token(access_token, access_secret_token)
api = tweepy.API(auth)
places = api.geo_search(query="USA", granularity="country")
place_id = places[0].id
tweets = api.search(q="place:%s" % place_id)
if tokenizer is None:
tokenizer, vocab_size, word_index = create_tokenizer()
embeddings_matrix = create_embeddings_matrix()
for tweet in tweets:
print(pipeline(tweet.text, tokenizer, vocab_size))
# -
# %tensorboard --logdir "./logs"
| SentimentAnalysisProject.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Singapore maritime dataset frames generation
#
# With this code you can generate the frames and horizon ground truth from Singapore maritime dataset.
#
# You can find the followings topics in this notebook:
# * Read libraries and paths
# * Do some checks
# * Convert ALL frames of the videos into jpg images
# * Generate ALL GT images
#
# ## Read libraries and paths
# +
from scipy.io import loadmat
from os import listdir
from os.path import isfile, join
import cv2
from PIL import Image, ImageDraw
import PIL
import math
# dimensions to save the images
x_size = 400
y_size = 225
# -
# Load the videos paths for both onboard and onshore data and generate dictionaries with it.
# +
# Set the paths for the video files and ground truth files
"""
VIDEOS_PATH_ONSHORE = "../../Dataset/VIS_Onshore/Videos"
HORIZON_ANNOTATIONS_ONSHORE_PATH = "../../Dataset/VIS_Onshore/HorizonGT"
VIDEO_FRAMES_PATH_ONSHORE = '../../Dataset/VIS_Onshore_frames/'
"""
VIDEOS_PATH_ONBOARD = "../../Dataset/VIS_Onboard/Videos"
HORIZON_ANNOTATIONS_ONBOARD_PATH = "../../Dataset/VIS_Onboard/HorizonGT"
VIDEO_FRAMES_PATH_ONBOARD = '../../Dataset/VIS_Onboard/VIS_Onboard_frames/'
# +
# video_files_onshore = [join(VIDEOS_PATH_ONSHORE, f) for f in listdir(VIDEOS_PATH_ONSHORE)
# if isfile(join(VIDEOS_PATH_ONSHORE, f))]
video_files_onboard = [join(VIDEOS_PATH_ONBOARD, f) for f in listdir(VIDEOS_PATH_ONBOARD)
if isfile(join(VIDEOS_PATH_ONBOARD, f))]
# -
# Create dictionaries for each video in the form video_name:video_path
"""
video_files_onshore_dict = {}
for f in listdir(VIDEOS_PATH_ONSHORE):
if isfile(join(VIDEOS_PATH_ONSHORE, f)):
video_files_onshore_dict[f.split('.')[0]] = join(VIDEOS_PATH_ONSHORE, f)
"""
video_files_onboard_dict = {}
for f in listdir(VIDEOS_PATH_ONBOARD):
if isfile(join(VIDEOS_PATH_ONBOARD, f)):
video_files_onboard_dict[f.split('.')[0]] = join(VIDEOS_PATH_ONBOARD, f)
# Load the ground truth files paths for both onboard and onshore data and generate dictionaries with it.
"""
horizon_gt_files_onshore_dict = {}
for f in listdir(HORIZON_ANNOTATIONS_ONSHORE_PATH):
if isfile(join(HORIZON_ANNOTATIONS_ONSHORE_PATH, f)):
horizon_gt_files_onshore_dict[f.split('.')[0].replace('_HorizonGT','')] = join(HORIZON_ANNOTATIONS_ONSHORE_PATH, f)
"""
horizon_gt_files_onboard_dict = {}
for f in listdir(HORIZON_ANNOTATIONS_ONBOARD_PATH):
if isfile(join(HORIZON_ANNOTATIONS_ONBOARD_PATH, f)):
horizon_gt_files_onboard_dict[f.split('.')[0].replace('_HorizonGT','')] = join(HORIZON_ANNOTATIONS_ONBOARD_PATH, f)
# ---------------------------
#
# ## Do some checks
#
# #### Numbers of videos and ground truth files
#
# Do some sanity checks to see if there are equal numbers of videos and ground truth files.
# +
# print('Number of onshore videos: ', len(video_files_onshore_dict))
# print('Number of onshore ground truth files: ', len(horizon_gt_files_onshore_dict))
print('Number of onboard videos: ', len(video_files_onboard_dict))
print('Number of onboard ground truth files: ', len(horizon_gt_files_onboard_dict))
# -
# So there are videos without ground truth files and ground truth files without videos. These unlabelled data might be good for testing later. Let's find these videos and ground truth files.
# +
# ground truth files are missing - find the corresponding videos
# videos are missing - find the corresponding ground truth files
"""
missing_files_onshore = []
for key in video_files_onshore_dict.keys():
if key not in horizon_gt_files_onshore_dict:
missing_files_onshore.append(key)
for key in horizon_gt_files_onshore_dict.keys():
if key not in video_files_onshore_dict:
missing_files_onshore.append(key)
print("Unlabelled onshore videos: ", missing_files_onshore)
"""
missing_files_onboard = []
for key in video_files_onboard_dict.keys():
if key not in horizon_gt_files_onboard_dict:
missing_files_onboard.append(key)
for key in horizon_gt_files_onboard_dict.keys():
if key not in video_files_onboard_dict:
missing_files_onboard.append(key)
print("Unlabelled onboard videos: ", missing_files_onboard)
# set whether to remove or not the missing videos from the frames generation later
remove_missing_files = True
if remove_missing_files:
#for key in missing_files_onshore:
# del video_files_onshore_dict[key]
# del horizon_gt_files_onshore_dict[key]
for key in missing_files_onboard:
del video_files_onboard_dict[key]
del horizon_gt_files_onboard_dict[key]
print()
print('Size of video dictionaries after removing the videos without ground truth:')
"""
print('Number of onshore videos: ', len(video_files_onshore_dict))
print('Number of onshore ground truth files: ', len(horizon_gt_files_onshore_dict))
"""
print('Number of onboard videos: ', len(video_files_onboard_dict))
print('Number of onboard ground truth files: ', len(horizon_gt_files_onboard_dict))
# -
#
# #### Count video frame number and GT data frame number
#
# Do some sanity checks to see if there are equal numbers of videos and ground truth files.
# count video frames
for video_key in video_files_onboard_dict:
vidcap = cv2.VideoCapture(video_files_onboard_dict.get(video_key))
# get total frames of video
total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
print("Total frames " + video_key + " : " + str(total_frames))
# count GT data frames
for horizon_key in horizon_gt_files_onboard_dict:
# read GT data
data = loadmat(horizon_gt_files_onboard_dict.get(horizon_key))
# get total frames of GT data
total_frames = len(data['structXML'][0])
print("Total GT frames " + horizon_key + " : " + str(total_frames))
# +
keys = list(horizon_gt_files_onboard_dict.keys())
for key in keys:
#print(video_files_onboard_dict.get(video_keys[i]) + " : " + horizon_gt_files_onboard_dict.get(horizon_keys[i]))
# read GT data
data = loadmat(horizon_gt_files_onboard_dict.get(key))
# get total frames of GT data
total_frames_gt = len(data['structXML'][0])
vidcap = cv2.VideoCapture(video_files_onboard_dict.get(key))
# get total frames of video
total_frames_video = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
#print("* " + str(total_frames_gt) + " - " + str(total_frames_video))
if total_frames_gt != total_frames_video:
del video_files_onboard_dict[key]
del horizon_gt_files_onboard_dict[key]
print()
print('Size of video dictionaries after removing the videos with different numbers of frames and ground truth:')
print('Number of onboard videos: ', len(video_files_onboard_dict))
print('Number of onboard ground truth files: ', len(horizon_gt_files_onboard_dict))
# -
print(video_files_onboard_dict.keys())
# ---------------------------
#
# ## Convert ALL frames of the videos into jpg images
# This is code to convert each video frame into a jpg image.
#
# #### Example
# This cell is for converting only one video.
# +
# convert a sample onshore video
video_name = 'MVI_1478_VIS'
vidcap = cv2.VideoCapture(video_files_onshore_dict.get(video_name))
success,image = vidcap.read()
count = 0
success = True
while success:
resized_image = cv2.resize(image, (x_size, y_size))
cv2.imwrite(VIDEO_FRAMES_PATH_ONSHORE + video_name + "_frame%d.jpg" % count, resized_image) # save frame as JPEG file
success,image = vidcap.read()
#print('Read a new frame: ', success)
count += 1
print("Derived %d frames" % count)
# +
# convert a sample onboard video
video_name = 'MVI_0788_VIS_OB'
vidcap = cv2.VideoCapture(video_files_onboard_dict.get(video_name))
success,image = vidcap.read()
count = 0
success = True
while success:
resized_image = cv2.resize(image, (x_size, y_size))
cv2.imwrite(VIDEO_FRAMES_PATH_ONBOARD + video_name + "_frame%d.jpg" % count, resized_image) # save frame as JPEG file
success,image = vidcap.read()
#print('Read a new frame: ', success)
count += 1
print("Derived %d frames" % count)
# -
# #### Convert ALL frames
#
# This cell is for converting all the videos in a folder into jpg images.
# convert ALL on shore videos into images with 1 image per frame
for video_key in video_files_onshore_dict:
#video_name = 'MVI_1478_VIS'
vidcap = cv2.VideoCapture(video_files_onshore_dict.get(video_key))
success,image = vidcap.read()
count = 0
success = True
while success:
resized_image = cv2.resize(image, (x_size, y_size))
cv2.imwrite(VIDEO_FRAMES_PATH_ONSHORE + video_key + "_frame%d.jpg" % count, resized_image) # save frame as JPEG file
success,image = vidcap.read()
#print('Read a new frame: ', success)
count += 1
print("Derived %d frames" % count)
# convert ALL on board videos into images with 1 image per frame
for video_key in video_files_onboard_dict:
#video_name = 'MVI_1478_VIS'
vidcap = cv2.VideoCapture(video_files_onboard_dict.get(video_key))
success,image = vidcap.read()
count = 0
success = True
while success:
resized_image = cv2.resize(image, (x_size, y_size))
cv2.imwrite(VIDEO_FRAMES_PATH_ONBOARD + video_key + "_frame%d.jpg" % count, resized_image) # save frame as JPEG file
success,image = vidcap.read()
#print('Read a new frame: ', success)
count += 1
print("Derived %d frames" % count)
# ---------------------------
# ## Convert every N frame of a video into jpg image
# convert every N onshore videos into images with 1 image per frame
for video_key in video_files_onshore_dict:
frame_space = 20
vidcap = cv2.VideoCapture(video_files_onshore_dict.get(video_key))
success,image = vidcap.read()
count = 0
frame_count = 0
success = True
while success:
if count % frame_space == 0:
resized_image = cv2.resize(image, (x_size, y_size))
cv2.imwrite(VIDEO_FRAMES_PATH_ONSHORE + video_key + "_frame%d.jpg" % count, resized_image) # save frame as JPEG file
frame_count += 1
success,image = vidcap.read()
#print('Read a new frame: ', success)
count += 1
print(video_key)
print("Total %d frames" % count)
print("Derived %d frames" % frame_count)
# convert every N onboard videos into images with 1 image per frame
for video_key in video_files_onboard_dict:
print(video_key)
frame_space = 20
vidcap = cv2.VideoCapture(video_files_onboard_dict.get(video_key))
success,image = vidcap.read()
count = 0
frame_count = 0
success = True
while success:
if count % frame_space == 0:
resized_image = cv2.resize(image, (x_size, y_size))
cv2.imwrite(VIDEO_FRAMES_PATH_ONBOARD + video_key + "R_frame%d.jpg" % count, resized_image) # save frame as JPEG file
frame_count += 1
success,image = vidcap.read()
#print('Read a new frame: ', success)
count += 1
print("Total %d frames" % count)
print("Derived %d frames" % frame_count)
# ---------------------------
#
# ## Generate ALL GT images
# This is code to generate the GT images using all data from valid GT files.
#
# #### Example
# This cell is for generating only one GT image.
# +
# Read GT data
data = loadmat("../../Dataset/VIS_Onboard/HorizonGT/MVI_0788_VIS_OB_HorizonGT")
# Read de base image
frame_number = 12
#base = np.array(Image.open("../../Dataset/VIS_Onboard/VIS_Onboard_frames/MVI_0788_VIS_OB_frame" + str(frame_number) + ".jpg"), dtype=np.uint8)
# Get image dimension
len_x, len_y = 1920, 1080
# Create GT image
# use the line below to see the horizon line in binary image ----
# PIL.Image.new(binary chanel, (x dimension, y dimension))
gt = PIL.Image.new('1', (len_x, len_y))
# use the line below to see the horizon line in the sea image ----
# Image.open(image_path)
#gt = Image.open("../../Dataset/VIS_Onboard/VIS_Onboard_frames/MVI_0788_VIS_OB_frame" + str(frame_number) + ".jpg")
# Create a draw with the image
draw = ImageDraw.Draw(gt)
# horizon = data frame <frame_number> -> (x, y, cos alpha, sen alpha) - See Explanation of GT files
horizon = data['structXML'][0][frame_number]
print(horizon)
# ------- Create the horizon line -------
# cosine and sine from GT file horizon line
c, s = horizon[2], horizon[3]
# horizon line angle
rad = math.asin(horizon[3]) - math.radians(90)
# cosine and sine to plot horizon line
c, s = math.cos(rad), math.sin(rad)
# central point
cx = float(horizon[0][0])
cy = float(horizon[1][0])
print(cx)
print(cy)
# start point and end point
x1 = 0 # start point
y1 = cy
x2 = len_x # end point
y2 = cy
# rotated points
xr1 = c*(x1-cx) - s*(y1-cy) + cx
yr1 = s*(x1-cx) + c*(y1-cy) + cy
xr2 = c*(x2-cx) - s*(y2-cy) + cx
yr2 = s*(x2-cx) + c*(y2-cy) + cy
# ---------------------------------------
# Draw the horizon line
# draw.line((x start point, y start point, x end point, y end point), white color, 1 pixel of width)
draw.line((xr1, yr1, xr2, yr2), fill=1, width=6)
# Show the image
#gt.show()
gt = gt.resize((x_size, y_size))
# Save the image
gt.save("GTs/MVI_0788_VIS_OB_gt" + str(frame_number) + ".jpeg", "JPEG")
# -
# #### Generate ALL GT images
#
# This cell is for generating all images using all valid GT files in a folder.
#
# To skip some GT images, choose a valeu for frame_space other than of 1. To skip generating all GT images, frame_space must be 1.
# +
# Generate all onshore GT images
# Base image dimension
len_x, len_y = 1920, 1080
frame_space = 20
for horizon_key in horizon_gt_files_onshore_dict:
print(horizon_key)
# Read GT data
data = loadmat("../../Dataset/VIS_Onshore/HorizonGT/" + horizon_key + "_HorizonGT")
# Control number of frames
count = 0
frame_count = 0
# Read line per line of GT file
# horizon = data frame <frame_number> -> (x, y, cos alpha, sen alpha) - See Explanation of GT files
for horizon in data['structXML'][0]:
if count % frame_space == 0:
# Create GT image
# PIL.Image.new(binary chanel, (x dimension, y dimension))
gt = PIL.Image.new('1', (len_x, len_y))
# Create a draw with the image
draw = ImageDraw.Draw(gt)
# ------- Create the horizon line -------
# cosine and sine from GT file horizon line
c, s = horizon[2], horizon[3]
# horizon line angle
rad = math.asin(horizon[3]) - math.radians(90)
# cosine and sine to plot horizon line
c, s = math.cos(rad), math.sin(rad)
# central point
cx = float(horizon[0][0])
cy = float(horizon[1][0])
# start point and end point
x1 = 0 # start point
y1 = cy
x2 = len_x # end point
y2 = cy
# rotated points
xr1 = c*(x1-cx) - s*(y1-cy) + cx
yr1 = s*(x1-cx) + c*(y1-cy) + cy
xr2 = c*(x2-cx) - s*(y2-cy) + cx
yr2 = s*(x2-cx) + c*(y2-cy) + cy
# ---------------------------------------
# Draw the horizon line
# draw.line((x start point, y start point, x end point, y end point), white color, 1 pixel of width)
draw.line((xr1, yr1, xr2, yr2), fill=1, width=6)
# Show the image
#gt.show()
gt = gt.resize((x_size, y_size))
# Save the image
gt.save("../../Dataset/VIS_Onshore/HorizonGT/GTImages/" + horizon_key + "_GT" + str(count) + ".jpg")
frame_count = frame_count + 1
count = count + 1
print("Total %d frames" % count)
print("Derived %d frames" % frame_count)
# +
# Generate all onboard GT images
# Base image dimension
len_x, len_y = 1920, 1080
frame_space = 20
for horizon_key in horizon_gt_files_onboard_dict:
print(horizon_key)
# Read GT data
data = loadmat("../../Dataset/VIS_Onboard/HorizonGT/" + horizon_key + "_HorizonGT")
# Control number of frames
count = 0
frame_count = 0
# Read line per line of GT file
# horizon = data frame <frame_number> -> (x, y, cos alpha, sen alpha) - See Explanation of GT files
for horizon in data['structXML'][0]:
if count % frame_space == 0:
# Create GT image
# PIL.Image.new(binary chanel, (x dimension, y dimension))
gt = PIL.Image.new('1', (len_x, len_y))
# Create a draw with the image
draw = ImageDraw.Draw(gt)
# ------- Create the horizon line -------
# cosine and sine from GT file horizon line
c, s = horizon[2], horizon[3]
# horizon line angle
rad = math.asin(horizon[3]) - math.radians(90)
# cosine and sine to plot horizon line
c, s = math.cos(rad), math.sin(rad)
# central point
cx = float(horizon[0][0])
cy = float(horizon[1][0])
# start point and end point
x1 = 0 # start point
y1 = cy
x2 = len_x # end point
y2 = cy
# rotated points
xr1 = c*(x1-cx) - s*(y1-cy) + cx
yr1 = s*(x1-cx) + c*(y1-cy) + cy
xr2 = c*(x2-cx) - s*(y2-cy) + cx
yr2 = s*(x2-cx) + c*(y2-cy) + cy
# ---------------------------------------
# Draw the horizon line
# draw.line((x start point, y start point, x end point, y end point), white color, 1 pixel of width)
draw.line((xr1, yr1, xr2, yr2), fill=1, width=6)
# Show the image
#gt.show()
gt = gt.resize((x_size, y_size))
# Save the image
gt.save("../../Dataset/VIS_Onboard/HorizonGT/GTImages/" + horizon_key + "_GT" + str(count) + ".jpg")
frame_count = frame_count + 1
count = count + 1
print("Total %d frames" % count)
print("Derived %d frames" % frame_count)
# -
| Code/SMD-Frames-GT-Generation/Singapore_maritime_dataset_frames_generation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Watch Me Code 4: More Matplotlib
#
# - Data Analysis of Syracuse Weather with Plotting
#
# %matplotlib inline
import matplotlib
matplotlib.rcParams['figure.figsize'] = (20.0, 10.0) # larger figure size
import pandas as pd
import numpy as np
from pandas import DataFrame, Series
weather = pd.read_csv("https://raw.githubusercontent.com/mafudge/datasets/master/weather/syracuse-ny.csv")
weather.head()
weather['Events'].unique()
# Let's get just the thunderstorms!
thunder = weather[ weather['Events'].str.find('Thunderstorm') >=0 ]
thunder.head()
# The percentage of days it thunders in Syracuse, historically
thunder.EST.count() / weather.EST.count()
weather.columns
# What is the relationship between Temperature and Dewpoint?
weather.plot.scatter( x = 'Mean TemperatureF', y = 'MeanDew PointF')
# INSIGHT: Positive correlation between tem and dewpoint. Every meteroloogist knows this. ;-)
#
# What is the relationship between cloud cover and visibility?
weather.plot.scatter( x = 'CloudCover', y = 'Mean VisibilityMiles')
# As one would expect the less cloud cover the greater visibility.
#
# How about temperature and wind speed?
weather.plot.scatter( x = 'Mean TemperatureF', y = 'Mean Wind SpeedMPH')
# not much of an insight there, but...
#
# when you look at the relationship on days where it thunders:
thunder.plot.scatter( x = 'Mean TemperatureF', y = 'Mean Wind SpeedMPH')
# We see that it doesn't really thunder when its cold out!
#
# This plot it interesting. It shows when the temperature is cold, the wind isn't coming out of the south. Make sense for Syracuse.
weather.plot.scatter( x = 'Mean TemperatureF', y = 'WindDirDegrees')
weather['Events'].unique()
weather['Events'] = weather.Events.fillna('None')
weather['Diff TemperatureF'] = weather['Max TemperatureF'] - weather['Min TemperatureF']
import matplotlib
matplotlib.rcParams['figure.figsize'] = (20.0, 10.0) # larger figure size
weather['date'] = pd.to_datetime(weather.EST) # make timeseries data
# let's plot the temperature swings For may 2015
weather[weather['EST'].str.find("2015-5") >=0 ].plot.line( x = 'date', y = 'Diff TemperatureF')
w2015 = weather[ weather.date > '2015-01-01']
w2015.plot.line(x = 'date', y =['Max TemperatureF', 'Min TemperatureF'] )
| content/lessons/13/Watch-Me-Code/WMC4-More-Matplotlib.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.12 64-bit (''forex-bot-env'': conda)'
# name: python3
# ---
#
class Dog():
def __init__(self, name, age):
print("Hello")
self.name = name
self.age = age
def __repr__(self):
return str(vars(self))
def woof(self):
print("Woof")
@classmethod
def cls_woof(cls):
print("CLS Woof")
d = Dog("Figo",99)
d
d.woof()
Dog.cls_woof()
| test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
def get_state(data, t, n):
d = t - n + 1
block = data[d : t + 1] if d >= 0 else -d * [data[0]] + data[0 : t + 1]
res = []
for i in range(n - 1):
res.append(block[i + 1] - block[i])
return np.array([res])
df = pd.read_csv('../dataset/GOOG-year.csv')
df.head()
# +
from collections import deque
import random
class Agent:
def __init__(self, state_size):
self.state_size = state_size
self.action_size = 3
self.memory = deque(maxlen = 1000)
self.inventory = []
self.gamma = 0.95
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.999
tf.reset_default_graph()
self.sess = tf.InteractiveSession()
self.X = tf.placeholder(tf.float32, [None, self.state_size])
self.Y = tf.placeholder(tf.float32, [None, self.action_size])
feed = tf.layers.dense(self.X, 64, activation = tf.nn.relu)
feed = tf.layers.dense(feed, 32, activation = tf.nn.relu)
feed = tf.layers.dense(feed, 8, activation = tf.nn.relu)
self.logits = tf.layers.dense(feed, self.action_size)
self.cost = tf.reduce_mean(tf.square(self.Y - self.logits))
self.optimizer = tf.train.GradientDescentOptimizer(1e-5).minimize(
self.cost
)
self.sess.run(tf.global_variables_initializer())
def act(self, state):
if random.random() <= self.epsilon:
return random.randrange(self.action_size)
return np.argmax(
self.sess.run(self.logits, feed_dict = {self.X: state})[0]
)
def replay(self, batch_size):
mini_batch = []
l = len(self.memory)
for i in range(l - batch_size + 1, l):
mini_batch.append(self.memory[i])
replay_size = len(mini_batch)
X = np.empty((replay_size, self.state_size))
Y = np.empty((replay_size, self.action_size))
states = np.array([a[0][0] for a in mini_batch])
new_states = np.array([a[3][0] for a in mini_batch])
Q = self.sess.run(self.logits, feed_dict = {self.X: states})
Q_new = self.sess.run(self.logits, feed_dict = {self.X: new_states})
for i in range(len(mini_batch)):
state, action, reward, next_state, done = mini_batch[i]
target = Q[i]
target[action] = reward
if not done:
target[action] += self.gamma * np.amax(Q_new[i])
X[i] = state
Y[i] = target
cost, _ = self.sess.run(
[self.cost, self.optimizer], feed_dict = {self.X: X, self.Y: Y}
)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
# -
close = df.Close.values.tolist()
window_size = 30
skip = 1
l = len(close) - 1
batch_size = 32
agent = Agent(window_size)
epoch = 50
for e in range(epoch):
state = get_state(close, 0, window_size + 1)
total_profit = 0
agent.inventory = []
for t in range(0, l, skip):
action = agent.act(state)
next_state = get_state(close, t + 1, window_size + 1)
done = True
reward = -10
if action == 1:
agent.inventory.append(close[t])
elif action == 2 and len(agent.inventory) > 0:
bought_price = agent.inventory.pop(0)
reward = max(close[t] - bought_price, 0)
done = False
total_profit += close[t] - bought_price
agent.memory.append((state, action, reward, next_state, done))
state = next_state
if len(agent.memory) > batch_size:
agent.replay(batch_size)
print('epoch %d, total profit %f' % (e + 1, total_profit))
# +
state = get_state(close, 0, window_size + 1)
initial_money = 10000
starting_money = initial_money
states_sell = []
states_buy = []
agent.inventory = []
for t in range(0, l, skip):
action = agent.act(state)
next_state = get_state(close, t + 1, window_size + 1)
if action == 1 and initial_money >= close[t]:
agent.inventory.append(close[t])
initial_money -= close[t]
states_buy.append(t)
print(
'day %d: buy 1 unit at price %f, total balance %f'
% (t, close[t], initial_money)
)
elif action == 2 and len(agent.inventory) > 0:
bought_price = agent.inventory.pop(0)
initial_money += close[t]
states_sell.append(t)
try:
invest = ((close[t] - bought_price) / bought_price) * 100
except:
invest = 0
print(
'day %d, sell 1 unit at price %f, investment %f %%, total balance %f,'
% (t, close[t], invest, initial_money)
)
state = next_state
invest = ((initial_money - starting_money) / starting_money) * 100
print(
'\ntotal gained %f, total investment %f %%'
% (initial_money - starting_money, invest)
)
plt.figure(figsize = (20, 10))
plt.plot(close, label = 'true close')
plt.plot(close, 'X', label = 'predict buy', markevery = states_buy, c = 'b')
plt.plot(close, 'o', label = 'predict sell', markevery = states_sell, c = 'r')
plt.legend()
plt.show()
# -
| agent/q-learning-agent.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _cell_guid="049dad6e-bee5-4822-9e0e-ae11b0e7f26a" _uuid="d9c4e75689c538b41aad45753aba45d601011830"
# # **Introduction**
#
# This is an initial Explanatory Data Analysis for the [Mercari Price Suggestion Challenge](https://www.kaggle.com/c/mercari-price-suggestion-challenge#description) with matplotlib. [bokeh](https://bokeh.pydata.org/en/latest/) and [Plot.ly](https://plot.ly/feed/) - a visualization tool that creates beautiful interactive plots and dashboards. The competition is hosted by Mercari, the biggest Japanese community-powered shopping app with the main objective to predict an accurate price that Mercari should suggest to its sellers, given the item's information.
#
# ***Update***: The abundant amount of food from my family's Thanksgiving dinner has really energized me to continue working on this model. I decided to dive deeper into the NLP analysis and found an amazing tutorial by <NAME>. The framework below is based on his [source code](https://ahmedbesbes.com/how-to-mine-newsfeed-data-and-extract-interactive-insights-in-python.html). It provides guidance on pre-processing documents and machine learning techniques (K-means and LDA) to clustering topics. So that this kernel will be divided into 2 parts:
#
# 1. Explanatory Data Analysis
# 2. Text Processing
# 2.1. Tokenizing and tf-idf algorithm
# 2.2. K-means Clustering
# 2.3. Latent Dirichlet Allocation (LDA) / Topic Modelling
#
# + _cell_guid="4f211658-d449-4052-87c4-0ab5d10545e8" _kg_hide-input=true _kg_hide-output=true _uuid="af187b17315c15081fcdcdc99942db794844d90d"
import nltk
import string
import re
import numpy as np
import pandas as pd
import pickle
#import lda
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="white")
from nltk.stem.porter import *
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from sklearn.feature_extraction import stop_words
from collections import Counter
from wordcloud import WordCloud
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
# %matplotlib inline
import bokeh.plotting as bp
from bokeh.models import HoverTool, BoxSelectTool
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure, show, output_notebook
#from bokeh.transform import factor_cmap
import warnings
warnings.filterwarnings('ignore')
import logging
logging.getLogger("lda").setLevel(logging.WARNING)
# + [markdown] _cell_guid="5fa6bae8-c5fc-4c1a-8339-acfa14aac199" _kg_hide-input=true _kg_hide-output=true _uuid="d82faf4720f0574d2be682880d2f662b96918ffa"
# # **Exploratory Data Analysis**
# On the first look at the data, besides the unique identifier (item_id), there are 7 variables in this model. This notebook will sequentially go through each of them with a brief statistical summary.
#
# 1. **Numerical/Continuous Features**
# 1. price: the item's final bidding price. This will be our reponse / independent variable that we need to predict in the test set
# 2. shipping cost
#
# 1. **Categorical Features**:
# 1. shipping cost: A binary indicator, 1 if shipping fee is paid by seller and 0 if it's paid by buyer
# 2. item_condition_id: The condition of the items provided by the seller
# 1. name: The item's name
# 2. brand_name: The item's producer brand name
# 2. category_name: The item's single or multiple categories that are separated by "\"
# 3. item_description: A short description on the item that may include removed words, flagged by [rm]
# + _cell_guid="fc7f8ff2-a9f2-488d-b9b8-0c47c1d64a9d" _kg_hide-input=true _uuid="122a209567af15b4422a75b50bfb8bb4c7b5fdfe"
PATH = "../input/"
# + _cell_guid="6f1e2926-af11-4801-9df9-943162e72478" _uuid="ca7edb8850b4b2d5dfbe044e08cc709886348c2e"
train = pd.read_csv(f'{PATH}train.tsv', sep='\t')
test = pd.read_csv(f'{PATH}test.tsv', sep='\t')
# + _cell_guid="5fe57507-8f62-4e90-afb4-f78d5ee174a0" _uuid="1a6310c7717bb6993ea79e4057f6d5a75e4c3c2a"
# size of training and dataset
print(train.shape)
print(test.shape)
# + _cell_guid="f58f905d-8279-42fa-8877-862c1f23aff8" _uuid="b247cf361d89fc2431adc5150d0c4533691f71b2"
# different data types in the dataset: categorical (strings) and numeric
train.dtypes
# + _cell_guid="8fd3b4d2-2bc9-47dc-8396-5980aad64a1f" _uuid="04c5b96c1de907bf9dc259a0a8d2e4df4f02375a"
train.head()
# + [markdown] _cell_guid="74b924c4-fecc-4a5b-b112-87625297d3b9" _uuid="3dc222ed04ddb74c4cf4081192150dcd7de17d89"
# ## Target Variable: **Price**
# + [markdown] _cell_guid="cb846bc6-01be-4f61-8c2b-49a6af0f8f29" _uuid="15c4c0eb2b7b138f52fba95cd11915ce4e15539f"
# The next standard check is with our response or target variables, which in this case is the `price` we are suggesting to the Mercari's marketplace sellers. The median price of all the items in the training is about \$267 but given the existence of some extreme values of over \$100 and the maximum at \$2,009, the distribution of the variables is heavily skewed to the left. So let's make log-transformation on the price (we added +1 to the value before the transformation to avoid zero and negative values).
# + _cell_guid="621d00c1-6f22-42bc-8709-fd77399b6492" _uuid="b3d3c18e99c7e49e81f3e51221ba26483f6ead81"
train.price.describe()
# + _cell_guid="ba824e0f-b23a-4e70-b621-fc1d979e57d6" _kg_hide-input=true _uuid="314e556a16f6810ea01536c135d62e3e39d88431"
plt.subplot(1, 2, 1)
(train['price']).plot.hist(bins=50, figsize=(20,10), edgecolor='white',range=[0,250])
plt.xlabel('price+', fontsize=17)
plt.ylabel('frequency', fontsize=17)
plt.tick_params(labelsize=15)
plt.title('Price Distribution - Training Set', fontsize=17)
plt.subplot(1, 2, 2)
np.log(train['price']+1).plot.hist(bins=50, figsize=(20,10), edgecolor='white')
plt.xlabel('log(price+1)', fontsize=17)
plt.ylabel('frequency', fontsize=17)
plt.tick_params(labelsize=15)
plt.title('Log(Price) Distribution - Training Set', fontsize=17)
plt.show()
# + [markdown] _cell_guid="91de0c69-7cb3-4503-aedf-9c396a1a8f19" _uuid="8ead68391b89acaa393f9f694c08addd4bd3a58a"
# ## **Shipping**
#
# The shipping cost burden is decently splitted between sellers and buyers with more than half of the items' shipping fees are paid by the sellers (55%). In addition, the average price paid by users who have to pay for shipping fees is lower than those that don't require additional shipping cost. This matches with our perception that the sellers need a lower price to compensate for the additional shipping.
# + _cell_guid="b3d38506-ebe3-4951-8fb0-9dd3de6c0851" _kg_hide-input=true _uuid="34df65c2eaa227091d1101a17ee43a99eae5e639"
train.shipping.value_counts()/len(train)
# + _cell_guid="956793f2-0e83-4de7-ac47-59a266e2a771" _kg_hide-input=true _kg_hide-output=false _uuid="938ea18ba993fd6f9296e3c1d1d541a728219622"
prc_shipBySeller = train.loc[train.shipping==1, 'price']
prc_shipByBuyer = train.loc[train.shipping==0, 'price']
# + _cell_guid="b7c2c064-4561-4831-9cf7-02910a8cde00" _kg_hide-input=true _uuid="cc24211ac13b7d8bd88c1061e996826cfc3da597"
fig, ax = plt.subplots(figsize=(20,10))
ax.hist(np.log(prc_shipBySeller+1), color='#8CB4E1', alpha=1.0, bins=50,
label='Price when Seller pays Shipping')
ax.hist(np.log(prc_shipByBuyer+1), color='#007D00', alpha=0.7, bins=50,
label='Price when Buyer pays Shipping')
ax.set(title='Histogram Comparison', ylabel='% of Dataset in Bin')
plt.xlabel('log(price+1)', fontsize=17)
plt.ylabel('frequency', fontsize=17)
plt.title('Price Distribution by Shipping Type', fontsize=17)
plt.tick_params(labelsize=15)
plt.show()
# + [markdown] _cell_guid="c19f85dd-6767-4329-88f2-75aae64623e5" _uuid="c275d349c49e2dd0fc1ec10bd416900875f083a2"
# ## **Item Category**
#
# There are about **1,287** unique categories but among each of them, we will always see a main/general category firstly, followed by two more particular subcategories (e.g. Beauty/Makeup/Face or Lips). In adidition, there are about 6,327 items that do not have a category labels. Let's split the categories into three different columns. We will see later that this information is actually quite important from the seller's point of view and how we handle the missing information in the `brand_name` column will impact the model's prediction.
# + _cell_guid="b73488c6-789e-4dda-a21c-cc661a715db7" _kg_hide-input=true _uuid="944114da84aa5c946b0d102f908c2d303350d401"
print("There are %d unique values in the category column." % train['category_name'].nunique())
# + _cell_guid="ac468824-7439-415e-b119-c24ff750e631" _kg_hide-input=false _uuid="07f9fdeb6dd4bd7e3a075a4cd5d779bbe2a64320"
# TOP 5 RAW CATEGORIES
train['category_name'].value_counts()[:5]
# + _cell_guid="a17d5bbc-f2b9-48f4-bf1e-8031000c992d" _kg_hide-input=true _uuid="e79d9bae48ea3daf13d3e83ea863d1eb51658fa8"
# missing categories
print("There are %d items that do not have a label." % train['category_name'].isnull().sum())
# + _cell_guid="ee8e9ba1-dd6b-493c-b97f-507eca5287fb" _kg_hide-input=false _kg_hide-output=false _uuid="f51ffaa9c0ffaa0cd04a5c2cfd7d76417971d530"
# reference: BuryBuryZymon at https://www.kaggle.com/maheshdadhich/i-will-sell-everything-for-free-0-55
def split_cat(text):
try: return text.split("/")
except: return ("No Label", "No Label", "No Label")
# + _cell_guid="74e722fb-4bf1-4540-b5e4-f9f7bba64dd4" _uuid="63f7d7f0f87b5599d3b970382432010a3968aee4"
train['general_cat'], train['subcat_1'], train['subcat_2'] = \
zip(*train['category_name'].apply(lambda x: split_cat(x)))
train.head()
# + _cell_guid="e35d96ec-fa00-4f49-b5f0-e8df56f466ff" _kg_hide-input=true _kg_hide-output=false _uuid="5e5ba730355ce75bf2dce3defb1c4fe1f80ff9fb"
# repeat the same step for the test set
test['general_cat'], test['subcat_1'], test['subcat_2'] = \
zip(*test['category_name'].apply(lambda x: split_cat(x)))
# + _cell_guid="64559f3b-b70b-45a6-ba10-975993c4926f" _kg_hide-input=false _kg_hide-output=false _uuid="d191e5c7ed101922aed6c3f9734bf34508f01912"
print("There are %d unique first sub-categories." % train['subcat_1'].nunique())
# + _cell_guid="0412ea5b-e8fc-4154-806c-043d34aae44c" _kg_hide-input=false _kg_hide-output=false _uuid="0d3250b78e86eddc7f621c1e7aabbcf7f762598d"
print("There are %d unique second sub-categories." % train['subcat_2'].nunique())
# + [markdown] _cell_guid="d6e280f0-885e-43bb-bda7-8ed9fe0987ba" _uuid="a03eb0d50c3103a7a3b2fa315254dfc9b5b6a894"
# Overall, we have **7 main categories** (114 in the first sub-categories and 871 second sub-categories): women's and beauty items as the two most popular categories (more than 50% of the observations), followed by kids and electronics.
# + _cell_guid="1977140a-f0ee-46af-8561-7fa5f3de43f2" _kg_hide-input=false _kg_hide-output=false _uuid="427123e746f47e763bb351e9abe83041985a05ca"
x = train['general_cat'].value_counts().index.values.astype('str')
y = train['general_cat'].value_counts().values
pct = [("%.2f"%(v*100))+"%"for v in (y/len(train))]
# + _cell_guid="e40b2cfb-02e9-46fe-a8cf-75bcb3e7eec9" _kg_hide-input=false _kg_hide-output=false _uuid="95ff30e205b02024275d465d489648f0cc64cdb2"
trace1 = go.Bar(x=x, y=y, text=pct)
layout = dict(title= 'Number of Items by Main Category',
yaxis = dict(title='Count'),
xaxis = dict(title='Category'))
fig=dict(data=[trace1], layout=layout)
py.iplot(fig)
# + _cell_guid="ee50ae89-4496-46d0-96f4-6f9f9d5407cc" _kg_hide-input=false _kg_hide-output=false _uuid="87c01e755ab62d552d584822460ae80e58843966"
x = train['subcat_1'].value_counts().index.values.astype('str')[:15]
y = train['subcat_1'].value_counts().values[:15]
pct = [("%.2f"%(v*100))+"%"for v in (y/len(train))][:15]
# + _cell_guid="2cbdb24a-efc5-478f-b3d2-3acad54abbd1" _kg_hide-input=true _uuid="6fcb8ee80064d4c671c9e0f94d0025ad6ee4ff95"
trace1 = go.Bar(x=x, y=y, text=pct,
marker=dict(
color = y,colorscale='Portland',showscale=True,
reversescale = False
))
layout = dict(title= 'Number of Items by Sub Category (Top 15)',
yaxis = dict(title='Count'),
xaxis = dict(title='SubCategory'))
fig=dict(data=[trace1], layout=layout)
py.iplot(fig)
# + [markdown] _cell_guid="59d504e8-a2cb-4c10-ae53-d5269000d937" _uuid="e66bf56801446d3c32bef84f7ef5820b331776c7"
# From the pricing (log of price) point of view, all the categories are pretty well distributed, with no category with an extraordinary pricing point
# + _cell_guid="94892e05-8c33-4ff3-84aa-24e39b4075ee" _kg_hide-input=true _kg_hide-output=false _uuid="788c3e10ed1ce263d2f2f2033e29cd01ed09454f"
general_cats = train['general_cat'].unique()
x = [train.loc[train['general_cat']==cat, 'price'] for cat in general_cats]
# + _cell_guid="3e171144-9090-4b21-949c-a4ac42879a8e" _kg_hide-input=true _kg_hide-output=false _uuid="da641275501bfb5c5e18aa18ddb964a61fe6ad1c"
data = [go.Box(x=np.log(x[i]+1), name=general_cats[i]) for i in range(len(general_cats))]
# + _cell_guid="5b7dadb9-3b40-4883-bb07-942f83297364" _kg_hide-input=true _uuid="dba9498362d3b574d2d0a7bc2010f99369b99338"
layout = dict(title="Price Distribution by General Category",
yaxis = dict(title='Frequency'),
xaxis = dict(title='Category'))
fig = dict(data=data, layout=layout)
py.iplot(fig)
# + [markdown] _cell_guid="a2f9d3e4-9d8e-480d-8deb-465310fc8cdd" _uuid="d52355679d02f3f7d629c25384dc5751c6023de9"
# ## **Brand Name**
# + _cell_guid="352301ea-175f-435f-8b83-dad50cb390c0" _uuid="5d75b2bf16078275fbd938a1483de3b526fa4c2c"
print("There are %d unique brand names in the training dataset." % train['brand_name'].nunique())
# + _cell_guid="795e315b-5c5b-481c-8302-7b4bfca0455d" _kg_hide-input=true _kg_hide-output=true _uuid="39dfac5c98e102b2637137032d1a17c055d8b31e"
x = train['brand_name'].value_counts().index.values.astype('str')[:10]
y = train['brand_name'].value_counts().values[:10]
# + _cell_guid="bd9a1b9b-c89d-4451-b1ca-52053cd6a611" _kg_hide-input=true _uuid="3df62861a822c99f3c42800c8e64e39f4276f9ab"
# trace1 = go.Bar(x=x, y=y,
# marker=dict(
# color = y,colorscale='Portland',showscale=True,
# reversescale = False
# ))
# layout = dict(title= 'Top 10 Brand by Number of Items',
# yaxis = dict(title='Brand Name'),
# xaxis = dict(title='Count'))
# fig=dict(data=[trace1], layout=layout)
# py.iplot(fig)
# + [markdown] _cell_guid="19d21a67-0b3b-4ade-8dc0-15003f03a146" _uuid="d2473fadc100c0ee057ebec8252dbd861751a947"
# ## **Item Description**
# + [markdown] _cell_guid="d643eeaa-e3d7-4148-8418-2a9f9d1fa5cc" _uuid="21c36e9a9a9f4ab2d66ac1a5f193051a8e894edb"
# It will be more challenging to parse through this particular item since it's unstructured data. Does it mean a more detailed and lengthy description will result in a higher bidding price? We will strip out all punctuations, remove some english stop words (i.e. redundant words such as "a", "the", etc.) and any other words with a length less than 3:
# + _cell_guid="3d536351-69ac-41a5-ad40-3114a24bd326" _uuid="40d28e05a35b4ef4fe11c436200cc5e40f5d561e"
def wordCount(text):
# convert to lower case and strip regex
try:
# convert to lower case and strip regex
text = text.lower()
regex = re.compile('[' +re.escape(string.punctuation) + '0-9\\r\\t\\n]')
txt = regex.sub(" ", text)
# tokenize
# words = nltk.word_tokenize(clean_txt)
# remove words in stop words
words = [w for w in txt.split(" ") \
if not w in stop_words.ENGLISH_STOP_WORDS and len(w)>3]
return len(words)
except:
return 0
# + _cell_guid="b71db1b9-756c-4dc3-8445-94f39d697d8e" _uuid="e72483da02ea3667ba3f14db0f0996eabc980861"
# add a column of word counts to both the training and test set
train['desc_len'] = train['item_description'].apply(lambda x: wordCount(x))
test['desc_len'] = test['item_description'].apply(lambda x: wordCount(x))
# + _cell_guid="511aec70-6890-4504-8e43-742056a47733" _uuid="79ebf5e7b0e03c993a3b4c53c2abf80b2c6d17b6"
train.head()
# + _cell_guid="d935df86-7977-434f-ad53-0c1c9aa44a20" _uuid="e7e8accc896ef4a04e833fb2abca498e715e3785"
df = train.groupby('desc_len')['price'].mean().reset_index()
# + _cell_guid="e34e4bf9-073c-4ff4-a052-1f76f4283ea3" _kg_hide-input=true _uuid="6b30d788953627a01463ae3422f890f675c87d7b"
trace1 = go.Scatter(
x = df['desc_len'],
y = np.log(df['price']+1),
mode = 'lines+markers',
name = 'lines+markers'
)
layout = dict(title= 'Average Log(Price) by Description Length',
yaxis = dict(title='Average Log(Price)'),
xaxis = dict(title='Description Length'))
fig=dict(data=[trace1], layout=layout)
py.iplot(fig)
# + [markdown] _cell_guid="cd93bed3-431f-4d42-bd98-ff287d29daa6" _uuid="52732f7292faf4e2fdd629f723d28a3bdc34ec93"
# We also need to check if there are any missing values in the item description (4 observations don't have a description) andl remove those observations from our training set.
# + _cell_guid="82d38cb1-ad25-4479-bf35-7df79eef5a23" _kg_hide-input=true _kg_hide-output=true _uuid="27d17728a67f5ef8203f16164edcc5f939c5971d"
train.item_description.isnull().sum()
# + _cell_guid="eedf987b-9e21-4e7b-940a-ef0bd0e471c4" _uuid="165c1267209c1f497224690cf9f4ca2829c571a1"
# remove missing values in item description
train = train[pd.notnull(train['item_description'])]
# + _cell_guid="b90e6701-76be-476c-b720-380d631bfd03" _kg_hide-input=true _kg_hide-output=true _uuid="c12117ac987cbd79d6d7612c8e6e26e9c83e3e72"
# create a dictionary of words for each category
cat_desc = dict()
for cat in general_cats:
text = " ".join(train.loc[train['general_cat']==cat, 'item_description'].values)
cat_desc[cat] = tokenize(text)
# flat list of all words combined
flat_lst = [item for sublist in list(cat_desc.values()) for item in sublist]
allWordsCount = Counter(flat_lst)
all_top10 = allWordsCount.most_common(20)
x = [w[0] for w in all_top10]
y = [w[1] for w in all_top10]
# + _cell_guid="492f17f5-952e-4561-9c45-5722fce7afe3" _kg_hide-input=true _kg_hide-output=true _uuid="d2748e1e0f07eb31c68c907ddf49e96595a32e97"
trace1 = go.Bar(x=x, y=y, text=pct)
layout = dict(title= 'Word Frequency',
yaxis = dict(title='Count'),
xaxis = dict(title='Word'))
fig=dict(data=[trace1], layout=layout)
py.iplot(fig)
# + [markdown] _cell_guid="a95bea2e-e175-43f1-81f2-d93ee6e16d2c" _uuid="f1d170c1d5d760b485de430f3ab3a3cf4cc1aa8a"
# If we look at the most common words by category, we could also see that, ***size***, ***free*** and ***shipping*** is very commonly used by the sellers, probably with the intention to attract customers, which is contradictory to what we have shown previously that there is little correlation between the two variables `price` and `shipping` (or shipping fees do not account for a differentiation in prices). ***Brand names*** also played quite an important role - it's one of the most popular in all four categories.
# + [markdown] _cell_guid="5a657436-9c24-4fbd-aada-eaa1330a60a2" _uuid="c4c2be486e56d6d9ec662f913856fac5ea65570e"
# # **Text Processing - Item Description**
# *
# The following section is based on the tutorial at https://ahmedbesbes.com/how-to-mine-newsfeed-data-and-extract-interactive-insights-in-python.html*
# + [markdown] _cell_guid="30158f97-7034-4d84-8d51-c7e8ca6ac56e" _uuid="5634aee89798082ea4025145d560629e32c026aa"
# ## **Pre-processing: tokenization**
#
# Most of the time, the first steps of an NLP project is to **"tokenize"** your documents, which main purpose is to normalize our texts. The three fundamental stages will usually include:
# * break the descriptions into sentences and then break the sentences into tokens
# * remove punctuation and stop words
# * lowercase the tokens
# * herein, I will also only consider words that have length equal to or greater than 3 characters
# + _cell_guid="79b3df0f-535f-4b91-ab2e-caffbd73a315" _uuid="e23a26ab8cccc9cfdf750ddcdfcb9cc1f28475c4"
stop = set(stopwords.words('english'))
def tokenize(text):
"""
sent_tokenize(): segment text into sentences
word_tokenize(): break sentences into words
"""
try:
regex = re.compile('[' +re.escape(string.punctuation) + '0-9\\r\\t\\n]')
text = regex.sub(" ", text) # remove punctuation
tokens_ = [word_tokenize(s) for s in sent_tokenize(text)]
tokens = []
for token_by_sent in tokens_:
tokens += token_by_sent
tokens = list(filter(lambda t: t.lower() not in stop, tokens))
filtered_tokens = [w for w in tokens if re.search('[a-zA-Z]', w)]
filtered_tokens = [w.lower() for w in filtered_tokens if len(w)>=3]
return filtered_tokens
except TypeError as e: print(text,e)
# + _cell_guid="7250efc8-77ba-40c1-bbad-f716bc6eeb6d" _uuid="32ddfee0de0f5f64bdae8944e9eccfa0d31751b3"
# apply the tokenizer into the item descriptipn column
train['tokens'] = train['item_description'].map(tokenize)
test['tokens'] = test['item_description'].map(tokenize)
# + _cell_guid="b8ad8934-bbaf-4db7-8da4-fdb70fdf17cc" _kg_hide-input=true _kg_hide-output=true _uuid="98cb8b403c7ea7c47663caf98e4259fb85c7dd72"
train.reset_index(drop=True, inplace=True)
test.reset_index(drop=True, inplace=True)
# + [markdown] _cell_guid="f4fb8dcd-3f94-4fef-908a-42fc864be42c" _uuid="548e888a1839b2c3c3d93e7578357b20af095b3d"
# Let's look at the examples of if the tokenizer did a good job in cleaning up our descriptions
# + _cell_guid="74c356c2-e2b8-4dda-b6d5-ccad74d6302a" _uuid="57a867fd01f737b7a60e04a5e8ab6e59b3c4a150"
for description, tokens in zip(train['item_description'].head(),
train['tokens'].head()):
print('description:', description)
print('tokens:', tokens)
print()
# + [markdown] _cell_guid="643b583b-11a3-45fe-94e4-1f6dcdafb196" _kg_hide-output=true _uuid="01942e57c0fcab0894c38e31cff43109a9bce23e"
# We could aso use the package `WordCloud` to easily visualize which words has the highest frequencies within each category:
# + _cell_guid="349888f0-7ffe-44aa-8bf7-d03df78073c3" _uuid="d17f807ae37a466f5e9410d97c37560670e08a74"
# build dictionary with key=category and values as all the descriptions related.
cat_desc = dict()
for cat in general_cats:
text = " ".join(train.loc[train['general_cat']==cat, 'item_description'].values)
cat_desc[cat] = tokenize(text)
# find the most common words for the top 4 categories
women100 = Counter(cat_desc['Women']).most_common(100)
beauty100 = Counter(cat_desc['Beauty']).most_common(100)
kids100 = Counter(cat_desc['Kids']).most_common(100)
electronics100 = Counter(cat_desc['Electronics']).most_common(100)
# + _cell_guid="b2f2274a-d48c-43c6-8349-c1a597967f87" _uuid="15b09d32c291e35db0de4c610a61ac3bf732f4a0"
def generate_wordcloud(tup):
wordcloud = WordCloud(background_color='white',
max_words=50, max_font_size=40,
random_state=42
).generate(str(tup))
return wordcloud
# + _cell_guid="efd8b7a2-960b-47a7-a25a-a6b5b09362b8" _kg_hide-input=true _uuid="974f2954233a235ca805ea393220ac5a1fbd1bfd"
fig,axes = plt.subplots(2, 2, figsize=(30, 15))
ax = axes[0, 0]
ax.imshow(generate_wordcloud(women100), interpolation="bilinear")
ax.axis('off')
ax.set_title("Women Top 100", fontsize=30)
ax = axes[0, 1]
ax.imshow(generate_wordcloud(beauty100))
ax.axis('off')
ax.set_title("Beauty Top 100", fontsize=30)
ax = axes[1, 0]
ax.imshow(generate_wordcloud(kids100))
ax.axis('off')
ax.set_title("Kids Top 100", fontsize=30)
ax = axes[1, 1]
ax.imshow(generate_wordcloud(electronics100))
ax.axis('off')
ax.set_title("Electronic Top 100", fontsize=30)
# + [markdown] _cell_guid="e77c6bc3-3a04-4b0f-8a89-31a31878f639" _uuid="190223022141a75be619b3471e5469eae329cad1"
# ## **Pre-processing: tf-idf**
# + [markdown] _cell_guid="76a54be8-3ef9-4184-b5bd-ca535d9abf75" _uuid="3b7d46cfb2279f955b639c68589fd839a46a3c4c"
# tf-idf is the acronym for **Term Frequency–inverse Document Frequency**. It quantifies the importance of a particular word in relative to the vocabulary of a collection of documents or corpus. The metric depends on two factors:
# - **Term Frequency**: the occurences of a word in a given document (i.e. bag of words)
# - **Inverse Document Frequency**: the reciprocal number of times a word occurs in a corpus of documents
#
# Think about of it this way: If the word is used extensively in all documents, its existence within a specific document will not be able to provide us much specific information about the document itself. So the second term could be seen as a penalty term that penalizes common words such as "a", "the", "and", etc. tf-idf can therefore, be seen as a weighting scheme for words relevancy in a specific document.
# + _cell_guid="11e3a9f9-e94a-45b0-90df-f452727028cd" _uuid="3152ded0c36fc76a0e60f9b6aabb0824bb9e4f13"
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(min_df=10,
max_features=180000,
tokenizer=tokenize,
ngram_range=(1, 2))
# + _cell_guid="7c556ca8-9969-4c5e-af24-f54411732c75" _uuid="37f44a006ca9d2f4f71081f55278759d7a473808"
all_desc = np.append(train['item_description'].values, test['item_description'].values)
vz = vectorizer.fit_transform(list(all_desc))
# + [markdown] _cell_guid="ce40e6f2-c56e-4412-815e-46852ae8e21c" _uuid="d581a19f324ec33c6bc13ae9c6197087f191f0c0"
# vz is a tfidf matrix where:
# * the number of rows is the total number of descriptions
# * the number of columns is the total number of unique tokens across the descriptions
# + _cell_guid="bd07b7e0-33cd-4326-a02c-bc0b6660f1e6" _uuid="93a3d6f38fdc6f9eb756ff2a72c422d81191d395"
# create a dictionary mapping the tokens to their tfidf values
tfidf = dict(zip(vectorizer.get_feature_names(), vectorizer.idf_))
tfidf = pd.DataFrame(columns=['tfidf']).from_dict(
dict(tfidf), orient='index')
tfidf.columns = ['tfidf']
# + [markdown] _cell_guid="891a9d4b-c684-48e1-9ac2-72dcf84bedd7" _uuid="8c27a5ff0f5a57fd4c50c8730aca758c05558426"
# Below is the 10 tokens with the lowest tfidf score, which is unsurprisingly, very generic words that we could not use to distinguish one description from another.
# + _cell_guid="bac2d553-a3e8-48e6-97a8-228e75dd97ae" _uuid="7b3dafb38d33b46ddb2509cca86b7fae2d4a678f"
tfidf.sort_values(by=['tfidf'], ascending=True).head(10)
# + [markdown] _cell_guid="25b77b22-1e2c-4f37-851f-72b07d4e0c6e" _uuid="ea113f51e664f454b087db59757ad538f9632b9b"
# Below is the 10 tokens with the highest tfidf score, which includes words that are a lot specific that by looking at them, we could guess the categories that they belong to:
# + _cell_guid="e2c31570-8e83-4c88-bc50-dbb0727cc8a0" _uuid="68a97356bd6afaf78b441d5887f68aad53ac9170"
tfidf.sort_values(by=['tfidf'], ascending=False).head(10)
# + [markdown] _cell_guid="2540c3fa-61cc-41f8-9542-c0274c949271" _uuid="2031b851f6edb5865308149ef18346f43b59a679"
# Given the high dimension of our tfidf matrix, we need to reduce their dimension using the Singular Value Decomposition (SVD) technique. And to visualize our vocabulary, we could next use t-SNE to reduce the dimension from 50 to 2. t-SNE is more suitable for dimensionality reduction to 2 or 3.
#
# ### **t-Distributed Stochastic Neighbor Embedding (t-SNE)**
#
# t-SNE is a technique for dimensionality reduction that is particularly well suited for the visualization of high-dimensional datasets. The goal is to take a set of points in a high-dimensional space and find a representation of those points in a lower-dimensional space, typically the 2D plane. It is based on probability distributions with random walk on neighborhood graphs to find the structure within the data. But since t-SNE complexity is significantly high, usually we'd use other high-dimension reduction techniques before applying t-SNE.
#
# First, let's take a sample from the both training and testing item's description since t-SNE can take a very long time to execute. We can then reduce the dimension of each vector from to n_components (50) using SVD.
# + _cell_guid="0e99f022-1ef1-498f-82d4-55a41de11b66" _uuid="12c465edfe5158967bd400c7bfe0318a615ccdfa"
trn = train.copy()
tst = test.copy()
trn['is_train'] = 1
tst['is_train'] = 0
sample_sz = 15000
combined_df = pd.concat([trn, tst])
combined_sample = combined_df.sample(n=sample_sz)
vz_sample = vectorizer.fit_transform(list(combined_sample['item_description']))
# + _cell_guid="d1fdc518-d325-4fe4-b8a8-9f84a975de03" _uuid="6f6e6d1e3e5d75062dc7e844b338bf3a33afecba"
from sklearn.decomposition import TruncatedSVD
n_comp=30
svd = TruncatedSVD(n_components=n_comp, random_state=42)
svd_tfidf = svd.fit_transform(vz_sample)
# + [markdown] _cell_guid="4c4eded5-ed1f-482b-a83b-02e5aa38a2cd" _uuid="92a3d8c0ba36fb567712d4d6a1470fc9ece2db73"
# Now we can reduce the dimension from 50 to 2 using t-SNE!
# + _cell_guid="894e4868-099f-47ae-aca2-b78f9ca73bcd" _uuid="e2e9aa416aa12531e14911deafbaa5d302f36c67"
from sklearn.manifold import TSNE
tsne_model = TSNE(n_components=2, verbose=1, random_state=42, n_iter=500)
# + _cell_guid="4f19da97-4d22-4831-8469-ba8b3b099989" _uuid="612ee5dcdeafc698702cb094b370827d40dd03e1"
tsne_tfidf = tsne_model.fit_transform(svd_tfidf)
# + [markdown] _cell_guid="4f8b10c5-f01d-436a-8cc2-aa5048d71234" _uuid="fde4cd911968c645202d9e165ae62417ac376622"
# It's now possible to visualize our data points. Note that the deviation as well as the size of the clusters imply little information in t-SNE.
# + _cell_guid="87aff740-4718-4321-8468-573945e6c26e" _kg_hide-input=false _uuid="70e0a5e9402a42191a3ef5336d186b386a3885c2"
output_notebook()
plot_tfidf = bp.figure(plot_width=700, plot_height=600,
title="tf-idf clustering of the item description",
tools="pan,wheel_zoom,box_zoom,reset,hover,previewsave",
x_axis_type=None, y_axis_type=None, min_border=1)
# + _cell_guid="74ea4283-9ab8-40d6-829a-5dc01e6b31f1" _kg_hide-input=true _kg_hide-output=true _uuid="b6195acc2f32fe21fbf863f4127d7993b4f4994d"
combined_sample.reset_index(inplace=True, drop=True)
# + _cell_guid="5976a500-8ec0-44f7-b7cd-cbc81a898989" _uuid="ea95c9e2c90336c2fdd2f89cf3ccffab3f9562d1"
tfidf_df = pd.DataFrame(tsne_tfidf, columns=['x', 'y'])
tfidf_df['description'] = combined_sample['item_description']
tfidf_df['tokens'] = combined_sample['tokens']
tfidf_df['category'] = combined_sample['general_cat']
# + _cell_guid="f0a19e78-1db9-4f69-a951-69fe9561f7bb" _uuid="8011ce69a3ea6833ac871691ad1292ad80f62348"
plot_tfidf.scatter(x='x', y='y', source=tfidf_df, alpha=0.7)
hover = plot_tfidf.select(dict(type=HoverTool))
hover.tooltips={"description": "@description", "tokens": "@tokens", "category":"@category"}
show(plot_tfidf)
# + [markdown] _cell_guid="9baa48f4-895c-46e8-b0c6-ab3a6f719cdc" _uuid="6ddd57a9aad405e7ad03b3939fc58502697aa56e"
# ## **K-Means Clustering**
#
# K-means clustering obejctive is to minimize the average squared Euclidean distance of the document / description from their cluster centroids.
# + _cell_guid="4145bb78-54ce-44b7-9014-e0d26e932f6a" _uuid="efbd785dd4814c23ff2ab7ef2ba9dd06b21c154a"
from sklearn.cluster import MiniBatchKMeans
num_clusters = 30 # need to be selected wisely
kmeans_model = MiniBatchKMeans(n_clusters=num_clusters,
init='k-means++',
n_init=1,
init_size=1000, batch_size=1000, verbose=0, max_iter=1000)
# + _cell_guid="e1251d38-9813-4d12-bde9-6e3e8f16914b" _uuid="bf1cc88318c78e7b11e3c1ac7edbc79713661293"
kmeans = kmeans_model.fit(vz)
kmeans_clusters = kmeans.predict(vz)
kmeans_distances = kmeans.transform(vz)
# + _cell_guid="3b2a5a0a-670e-44b3-824e-bb32c3c8a849" _kg_hide-input=false _kg_hide-output=true _uuid="aadb73b5ab0a3a691610357e128bf275a6b3312d"
sorted_centroids = kmeans.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(num_clusters):
print("Cluster %d:" % i)
aux = ''
for j in sorted_centroids[i, :10]:
aux += terms[j] + ' | '
print(aux)
print()
# + [markdown] _cell_guid="30af7fe9-c3f4-4e0e-88cd-16a03e0e2bf0" _uuid="3763e33b915ed577f02d035028ee2c13035f2892"
# In order to plot these clusters, first we will need to reduce the dimension of the distances to 2 using tsne:
# + _cell_guid="909da1fa-d307-4a4a-a162-cc44820fd094" _kg_hide-output=true _uuid="3d14784c614bb96c0ea626c1a98d8aef706206ed"
# repeat the same steps for the sample
kmeans = kmeans_model.fit(vz_sample)
kmeans_clusters = kmeans.predict(vz_sample)
kmeans_distances = kmeans.transform(vz_sample)
# reduce dimension to 2 using tsne
tsne_kmeans = tsne_model.fit_transform(kmeans_distances)
# + _cell_guid="dd55dce0-2b65-4547-8e83-2f3f8c1aa841" _kg_hide-input=true _kg_hide-output=true _uuid="59438d7cdb5f767b6746e6924cc6cb1a6a46a62a"
colormap = np.array(["#6d8dca", "#69de53", "#723bca", "#c3e14c", "#c84dc9", "#68af4e", "#6e6cd5",
"#e3be38", "#4e2d7c", "#5fdfa8", "#d34690", "#3f6d31", "#d44427", "#7fcdd8", "#cb4053", "#5e9981",
"#803a62", "#9b9e39", "#c88cca", "#e1c37b", "#34223b", "#bdd8a3", "#6e3326", "#cfbdce", "#d07d3c",
"#52697d", "#194196", "#d27c88", "#36422b", "#b68f79"])
# + _cell_guid="aefe9f45-c060-4ef1-8f4e-72c172d7828b" _kg_hide-input=false _kg_hide-output=false _uuid="0f8ff39015f3808158878bf69e2b4382f9a06afb"
#combined_sample.reset_index(drop=True, inplace=True)
kmeans_df = pd.DataFrame(tsne_kmeans, columns=['x', 'y'])
kmeans_df['cluster'] = kmeans_clusters
kmeans_df['description'] = combined_sample['item_description']
kmeans_df['category'] = combined_sample['general_cat']
#kmeans_df['cluster']=kmeans_df.cluster.astype(str).astype('category')
# + _cell_guid="21da5d15-cae0-49e7-9067-b6f6766afbbf" _uuid="52318f096f8240d65e5d342587c086b1a5307b76"
plot_kmeans = bp.figure(plot_width=700, plot_height=600,
title="KMeans clustering of the description",
tools="pan,wheel_zoom,box_zoom,reset,hover,previewsave",
x_axis_type=None, y_axis_type=None, min_border=1)
# + _cell_guid="90e136bf-f1b1-41a0-96c1-0f58a4b1f4a5" _uuid="4fe434a55d528f21cd301d7c20c0bd8f3e4386f9"
source = ColumnDataSource(data=dict(x=kmeans_df['x'], y=kmeans_df['y'],
color=colormap[kmeans_clusters],
description=kmeans_df['description'],
category=kmeans_df['category'],
cluster=kmeans_df['cluster']))
plot_kmeans.scatter(x='x', y='y', color='color', source=source)
hover = plot_kmeans.select(dict(type=HoverTool))
hover.tooltips={"description": "@description", "category": "@category", "cluster":"@cluster" }
show(plot_kmeans)
# + [markdown] _cell_guid="499de8b5-7be7-4e88-a7cd-ddc689be0498" _uuid="e75b6be2c799bade7aeae9027affd4f312334295"
# ## **Latent Dirichlet Allocation**
#
# Latent Dirichlet Allocation (LDA) is an algorithms used to discover the topics that are present in a corpus.
#
# > LDA starts from a fixed number of topics. Each topic is represented as a distribution over words, and each document is then represented as a distribution over topics. Although the tokens themselves are meaningless, the probability distributions over words provided by the topics provide a sense of the different ideas contained in the documents.
# >
# > Reference: https://medium.com/intuitionmachine/the-two-paths-from-natural-language-processing-to-artificial-intelligence-d5384ddbfc18
#
# Its input is a **bag of words**, i.e. each document represented as a row, with each columns containing the count of words in the corpus. We are going to use a powerful tool called pyLDAvis that gives us an interactive visualization for LDA.
# + _cell_guid="3f255cb6-11b5-43cd-986d-b16a532fbed6" _uuid="8db5a893242608e737efec9f50bda5d10a42a820"
cvectorizer = CountVectorizer(min_df=4,
max_features=180000,
tokenizer=tokenize,
ngram_range=(1,2))
# + _cell_guid="26d7b384-a33a-4c99-a352-4ef32bb897ed" _uuid="58ebe6357a18e5928e199fb03ed5bd41ed23c0c0"
cvz = cvectorizer.fit_transform(combined_sample['item_description'])
# + _cell_guid="9acb64ea-9bb0-4019-84df-932a78cd806f" _uuid="1382bea915784c1be5b636fddd78315fc825a38e"
lda_model = LatentDirichletAllocation(n_components=20,
learning_method='online',
max_iter=20,
random_state=42)
# + _cell_guid="68c7f202-c620-42ae-843f-472f67a0ef95" _uuid="39184ac4788791ab09ca777b0f6ec1d589516b4e"
X_topics = lda_model.fit_transform(cvz)
# + _cell_guid="3f3e686d-797b-4c22-b970-68778bdc66e9" _uuid="495379112353df675a8aad1c6df7a6762dc725a1"
n_top_words = 10
topic_summaries = []
topic_word = lda_model.components_ # get the topic words
vocab = cvectorizer.get_feature_names()
for i, topic_dist in enumerate(topic_word):
topic_words = np.array(vocab)[np.argsort(topic_dist)][:-(n_top_words+1):-1]
topic_summaries.append(' '.join(topic_words))
print('Topic {}: {}'.format(i, ' | '.join(topic_words)))
# + _cell_guid="635f7f3c-46c0-4f0b-8f05-1c092f81a87b" _uuid="8f0136acba090cf8cd2d9f3589087a6cb94bc5d3"
# reduce dimension to 2 using tsne
tsne_lda = tsne_model.fit_transform(X_topics)
# + _cell_guid="47df42d5-248f-4f92-aacf-5a8e86bbd1ab" _uuid="fcae4082942a7131ce3b598e485902aabf64cd17"
unnormalized = np.matrix(X_topics)
doc_topic = unnormalized/unnormalized.sum(axis=1)
lda_keys = []
for i, tweet in enumerate(combined_sample['item_description']):
lda_keys += [doc_topic[i].argmax()]
lda_df = pd.DataFrame(tsne_lda, columns=['x','y'])
lda_df['description'] = combined_sample['item_description']
lda_df['category'] = combined_sample['general_cat']
lda_df['topic'] = lda_keys
lda_df['topic'] = lda_df['topic'].map(int)
# + _cell_guid="6dfecf36-44d4-49b7-9909-93a230acddb6" _kg_hide-input=true _kg_hide-output=true _uuid="b3b3f9cacc7cac86db640d5b523f3a4b4373ca9b"
plot_lda = bp.figure(plot_width=700,
plot_height=600,
title="LDA topic visualization",
tools="pan,wheel_zoom,box_zoom,reset,hover,previewsave",
x_axis_type=None, y_axis_type=None, min_border=1)
# + _cell_guid="3fd253f0-a446-47db-8faf-444adf37507e" _uuid="3457a62e7a68c3cc0ed3edc15a314b256775d705"
source = ColumnDataSource(data=dict(x=lda_df['x'], y=lda_df['y'],
color=colormap[lda_keys],
description=lda_df['description'],
topic=lda_df['topic'],
category=lda_df['category']))
plot_lda.scatter(source=source, x='x', y='y', color='color')
hover = plot_kmeans.select(dict(type=HoverTool))
hover = plot_lda.select(dict(type=HoverTool))
hover.tooltips={"description":"@description",
"topic":"@topic", "category":"@category"}
show(plot_lda)
# + _cell_guid="161db864-75a8-4580-842b-c720efacd585" _uuid="37f2c17318ce1008ebb36d1493fa8ae5ec1b1c5a"
def prepareLDAData():
data = {
'vocab': vocab,
'doc_topic_dists': doc_topic,
'doc_lengths': list(lda_df['len_docs']),
'term_frequency':cvectorizer.vocabulary_,
'topic_term_dists': lda_model.components_
}
return data
# + [markdown] _cell_guid="3c2dd1c1-82fe-4d59-b7dd-4a20af223eaa" _uuid="72972bccd94dcb10fcf9930878052187d570a7f2"
# *Note: It's a shame that by putting the HTML of the visualization using pyLDAvis, it will distort the layout of the kernel, I won't upload in here. But if you follow the below code, there should be an HTML file generated with very interesting interactive bubble chart that visualizes the space of your topic clusters and the term components within each topic.*
#
# 
# + _cell_guid="6155fb2e-2677-44f0-add6-6993ff608df6" _uuid="e32eb8f082d0960ebe719e33e35fbcb3a270dd30"
import pyLDAvis
lda_df['len_docs'] = combined_sample['tokens'].map(len)
ldadata = prepareLDAData()
pyLDAvis.enable_notebook()
prepared_data = pyLDAvis.prepare(**ldadata)
# + [markdown] _cell_guid="2d7ba29d-5f28-43c3-9920-4a787b08467e" _kg_hide-input=true _kg_hide-output=true _uuid="df165b4bc7db4fa05adb98736b51343617c5f142"
# <a data-flickr-embed="true" href="https://www.flickr.com/photos/thykhuely/38709272151/in/dateposted-public/" title="pyLDAvis"><img src="https://farm5.staticflickr.com/4536/38709272151_7128c577ee_h.jpg" width="1600" height="976" alt="pyLDAvis"></a><script async src="//embedr.flickr.com/assets/client-code.js" charset="utf-8"></script>
# + _cell_guid="0c8f9e12-d2fe-45ae-b060-0fbba4d821a8" _uuid="a94dd585876f23074115131542a2aae956d63024"
import IPython.display
from IPython.core.display import display, HTML, Javascript
#h = IPython.display.display(HTML(html_string))
#IPython.display.display_HTML(h)
| mercari/mercari-interactive-eda-topic-modelling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Import Libraries
# +
import os
import boto3
import pandas as pd
import numpy as np
import sagemaker
from sagemaker import get_execution_role
import sagemaker_pyspark
role = get_execution_role()
# Configure Spark to use the SageMaker Spark dependency jars
jars = sagemaker_pyspark.classpath_jars()
classpath = ":".join(sagemaker_pyspark.classpath_jars())
# See the SageMaker Spark Github to learn how to connect to EMR from a notebook instance
spark = SparkSession.builder.config("spark.driver.extraClassPath", classpath)\
.master("local[*]").getOrCreate()
spark
# +
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession, SQLContext
from pyspark.sql.types import *
import pyspark.sql.functions as fn
from pyspark.sql.functions import col, udf
from pyspark.sql.types import DoubleType
from pyspark.ml.feature import StringIndexer, OneHotEncoder
from pyspark.mllib.util import MLUtils
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.feature import VectorAssembler
from pyspark.mllib.regression import LabeledPoint
from pyspark.ml import Pipeline
from pyspark.ml import Pipeline
from pyspark.mllib.classification import LogisticRegressionWithLBFGS, LogisticRegressionModel
from pyspark.ml.regression import RandomForestRegressor
# -
# ### Read file
cdc = spark.read.csv('output.csv', header = True)
cdc.printSchema()
# ### Impute null values
cdc = cdc.fillna({'place_of_injury_for_causes_w00_y34_except_y06_and_y07_' : '10'})
cdc = cdc.fillna({'130_infant_cause_recode' : '000'})
cdc = cdc.fillna({'activity_code' : '10'})
cdc = cdc.fillna({'manner_of_death' : '8'})
cdc = cdc.fillna({'Place_of_death_and_decedents_status': '7'})
# +
cdc = cdc.fillna({'record_condition_1': 'Other'})
cdc = cdc.fillna({'record_condition_2': 'Other'})
cdc = cdc.fillna({'record_condition_3': 'Other'})
cdc = cdc.fillna({'record_condition_4': 'Other'})
cdc = cdc.fillna({'entity_condition_1': 'Other'})
cdc = cdc.fillna({'entity_condition_2': 'Other'})
cdc = cdc.fillna({'entity_condition_3': 'Other'})
cdc = cdc.fillna({'entity_condition_4': 'Other'})
# -
# ### Combine other values of methods_of_disposition
cdc = cdc.withColumn('method_of_disposition', regexp_replace('method_of_disposition', 'R' , 'O'))
cdc = cdc.withColumn('method_of_disposition', regexp_replace('method_of_disposition', 'E' , 'O'))
cdc = cdc.withColumn('method_of_disposition', regexp_replace('method_of_disposition', 'D' , 'O'))
cdc = cdc.withColumn('method_of_disposition', regexp_replace('method_of_disposition', 'U' , 'O'))
cdc = cdc.withColumn('Place_of_death_and_decedents_status', regexp_replace('Place_of_death_and_decedents_status', '9' , '7'))
# +
cdc = cdc.withColumn('education_1989_revision', regexp_replace('education_1989_revision', '00' , '1'))
cdc = cdc.withColumn('education_1989_revision', regexp_replace('education_1989_revision', '01' , '1'))
cdc = cdc.withColumn('education_1989_revision', regexp_replace('education_1989_revision', '02' , '1'))
cdc = cdc.withColumn('education_1989_revision', regexp_replace('education_1989_revision', '03' , '1'))
cdc = cdc.withColumn('education_1989_revision', regexp_replace('education_1989_revision', '04' , '1'))
cdc = cdc.withColumn('education_1989_revision', regexp_replace('education_1989_revision', '05' , '1'))
cdc = cdc.withColumn('education_1989_revision', regexp_replace('education_1989_revision', '06' , '1'))
cdc = cdc.withColumn('education_1989_revision', regexp_replace('education_1989_revision', '07' , '1'))
cdc = cdc.withColumn('education_1989_revision', regexp_replace('education_1989_revision', '08' , '1'))
cdc = cdc.withColumn('education_1989_revision', regexp_replace('education_1989_revision', '09' , '2'))
cdc = cdc.withColumn('education_1989_revision', regexp_replace('education_1989_revision', '10' , '2'))
cdc = cdc.withColumn('education_1989_revision', regexp_replace('education_1989_revision', '11' , '2'))
cdc = cdc.withColumn('education_1989_revision', regexp_replace('education_1989_revision', '12' , '3'))
cdc = cdc.withColumn('education_1989_revision', regexp_replace('education_1989_revision', '13' , '4'))
cdc = cdc.withColumn('education_1989_revision', regexp_replace('education_1989_revision', '14' , '4'))
cdc = cdc.withColumn('education_1989_revision', regexp_replace('education_1989_revision', '15' , '5'))
cdc = cdc.withColumn('education_1989_revision', regexp_replace('education_1989_revision', '16' , '6'))
cdc = cdc.withColumn('education_1989_revision', regexp_replace('education_1989_revision', '17' , '7'))
cdc = cdc.withColumn('education_1989_revision', regexp_replace('education_1989_revision', '99' , '9'))
# -
# ### Combine the two education revisions
cdc = cdc.withColumn("education_2003_revision", coalesce(cdc.education_2003_revision,cdc.education_1989_revision))
# To remove rows coerced while combining yearly datasets
cdc = cdc.filter(cdc.education_2003_revision.isNotNull())
cdc = cdc.drop('detail_age_type','detail_age', 'age_substitution_flag','age_recode_27', 'age_recode_12',
'infant_age_recode_22', 'icd_code_10th_revision')
cdc = cdc.drop('education_1989_revision', 'education_reporting_flag','bridged_race_flag', 'race_imputation_flag',
'hispanic_origin')
cdc = cdc.drop('record_condition_20' , 'entity_condition_20', 'entity_condition_19', 'entity_condition_18',
'entity_condition_17', 'record_condition_19', 'record_condition_18', 'record_condition_16',
'record_condition_17','record_condition_15','record_condition_14','record_condition_13',
'record_condition_12','record_condition_11','record_condition_10','record_condition_9',
'record_condition_8','record_condition_7' , 'record_condition_6', 'record_condition_5',
'entity_condition_16', 'entity_condition_15','entity_condition_14', 'entity_condition_13',
'entity_condition_12', 'entity_condition_11', 'entity_condition_10', 'entity_condition_9',
'entity_condition_8', 'entity_condition_7', 'entity_condition_6', 'entity_condition_5')
# ## Selecting data for firearms
new_cdc = cdc.filter((cdc['358_cause_recode'] == '429') | (cdc['358_cause_recode'] == '435') | (cdc['358_cause_recode'] == '446')
| (cdc['358_cause_recode'] == '450') | (cdc['358_cause_recode'] == '451') | (cdc['358_cause_recode'] == '407'))
new_cdc = new_cdc.drop('number_of_entity_axis_conditions','number_of_record_axis_conditions', '39_cause_recode',
'130_infant_cause_recode','113_cause_recode', 'month_of_death','current_data_year','day_of_week_of_death')
from pyspark.sql.functions import lit
new_cdc = new_cdc.withColumn("suicide", lit(0))
new_cdc = new_cdc.withColumn('suicide',
when(new_cdc['manner_of_death']== '2' , 1).otherwise(new_cdc['suicide']))
new_cdc = new_cdc.drop('manner_of_death','entity_condition_1','entity_condition_2','entity_condition_3','entity_condition_4',
'record_condition_1','record_condition_2','record_condition_3','record_condition_4','358_cause_recode')
new_cdc = new_cdc.withColumn("suicide", new_cdc["suicide"].cast(StringType()))
# # Use stringindexer
indexer = StringIndexer(inputCol="resident_status", outputCol="resident_statusIndex").fit(new_dc)
new_df = indexer.transform(new_dc)
indexer = StringIndexer(inputCol="education_2003_revision", outputCol="education_2003_revisionIndex").fit(new_df)
new_df = indexer.transform(new_df)
indexer = StringIndexer(inputCol="sex", outputCol="sexIndex").fit(new_df)
new_df =indexer.transform(new_df)
indexer = StringIndexer(inputCol="age_recode_52", outputCol="age_recode_52Index").fit(new_df)
new_df =indexer.transform(new_df)
indexer = StringIndexer(inputCol="Place_of_death_and_decedents_status", outputCol="Place_of_death_and_decedents_statusIndex").fit(new_cdc)
new_df = indexer.transform(new_cdc)
indexer = StringIndexer(inputCol="marital_status", outputCol="marital_statusIndex").fit(new_df)
new_df = indexer.transform(new_df)
indexer = StringIndexer(inputCol="injury_at_work", outputCol="injury_at_workIndex").fit(new_df)
new_df =indexer.transform(new_df)
indexer = StringIndexer(inputCol="autopsy", outputCol="autopsyIndex").fit(new_df)
new_df =indexer.transform(new_df)
indexer = StringIndexer(inputCol="method_of_disposition", outputCol="method_of_dispositionIndex").fit(new_df)
new_df =indexer.transform(new_df)
indexer = StringIndexer(inputCol="activity_code", outputCol="activity_codeIndex").fit(new_df)
new_df = indexer.transform(new_df)
indexer = StringIndexer(inputCol="place_of_injury_for_causes_w00_y34_except_y06_and_y07_", outputCol="place_of_injury_for_causes_w00_y34_except_y06_and_y07_Index").fit(new_df)
new_df = indexer.transform(new_df)
indexer = StringIndexer(inputCol="race", outputCol="raceIndex").fit(new_df)
new_df =indexer.transform(new_df)
indexer = StringIndexer(inputCol="race_recode_3", outputCol="race_recode_3Index").fit(new_df)
new_df =indexer.transform(new_df)
indexer = OneHotEncoder(inputCol="resident_statusIndex", outputCol="resident_statusVec")
new_df = indexer.transform(new_df)
indexer = OneHotEncoder(inputCol="education_2003_revisionIndex", outputCol="education_2003_revisionVec")
new_df = indexer.transform(new_df)
indexer = OneHotEncoder(inputCol="sexIndex", outputCol="sexVec")
new_df = indexer.transform(new_df)
indexer = OneHotEncoder(inputCol="age_recode_52Index", outputCol="age_recode_52Vec")
new_df = indexer.transform(new_df)
indexer = OneHotEncoder(inputCol="Place_of_death_and_decedents_statusIndex", outputCol="Place_of_death_and_decedents_statusVec")
new_df = indexer.transform(new_df)
indexer = OneHotEncoder(inputCol="marital_statusIndex", outputCol="marital_statusVec")
new_df = indexer.transform(new_df)
indexer = OneHotEncoder(inputCol="injury_at_workIndex", outputCol="injury_at_workVec")
new_df = indexer.transform(new_df)
indexer = OneHotEncoder(inputCol="autopsyIndex", outputCol="autopsyVec")
new_df = indexer.transform(new_df)
indexer = OneHotEncoder(inputCol="method_of_dispositionIndex", outputCol="method_of_dispositionVec")
new_df = indexer.transform(new_df)
indexer = OneHotEncoder(inputCol="activity_codeIndex", outputCol="activity_codeVec")
new_df = indexer.transform(new_df)
indexer = OneHotEncoder(inputCol="place_of_injury_for_causes_w00_y34_except_y06_and_y07_Index", outputCol="place_of_injury_for_causes_w00_y34_except_y06_and_y07_Vec")
new_df = indexer.transform(new_df)
indexer = OneHotEncoder(inputCol="raceIndex", outputCol="raceVec")
new_df =indexer.transform(new_df)
indexer = OneHotEncoder(inputCol="race_recode_3Index", outputCol="race_recode_3Vec")
new_df =indexer.transform(new_df)
new_df.columns
new_df.printSchema()
assemblerInputs = ['resident_statusVec',
'education_2003_revisionVec',
'sexVec',
'age_recode_52Vec',
'Place_of_death_and_decedents_statusVec',
'marital_statusVec',
'injury_at_workVec',
'autopsyVec',
'method_of_dispositionVec',
'activity_codeVec',
'place_of_injury_for_causes_w00_y34_except_y06_and_y07_Vec',
'raceVec',
'race_recode_3Vec']
assemblerInputs
label_string_index = StringIndexer(inputCol="suicide", outputCol="label").fit(new_df)
new_df = label_string_index.transform(new_df)
assembler = VectorAssembler(inputCols=assemblerInputs, outputCol="features")
new_df = assembler.transform(new_df)
training, test = new_df.randomSplit([0.7, 0.3], seed = 11)
# +
rf = RandomForestClassifier(labelCol="label", featuresCol="features", numTrees=10)
# Train model. This also runs the indexers.
model = rf.fit(training)
# Make predictions.
predictions = model.transform(test)
# -
meta = [f.metadata
for f in new_df.schema.fields
if f.name == 'features'][0]
features_name_ind = meta['ml_attr']['attrs']['binary']
feature_names = []
for i in model.featureImportances.indices:
feature_names.append(features_name_ind[i]['name'])
df_values = [('feature_names', feature_names), ('variable_imp', model.featureImportances.values)]
inference_df = pd.DataFrame.from_items(df_values)
inference_df.sort_values(by = 'variable_imp', ascending= False)
model.featureImportances[4]
meta = [f.metadata
for f in preppedDataDF.schema.fields
if f.name == 'features'][0]
features_name_ind = meta['ml_attr']['attrs']['binary']
features_name_ind
tp = selected.where(selected["label"] == 1).where(selected["prediction"] == 1).count()
tn = selected.where(selected["label"] == 0).where(selected["prediction"] == 0).count()
fp = selected.where(selected["label"] == 0).where(selected["prediction"] == 1).count()
fn = selected.where(selected["label"] == 1).where(selected["prediction"] == 0).count()
accuracy = (tp + tn) * 100 / (tp + tn + fp + fn)
accuracy
precision = tp / (tp + fp)
precision
recall = tp / (tp + fn)
recall
f1_score = 2 * precision * recall / (precision + recall)
f1_score
| final/Final ML on AWS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # HW2: Khám phá dữ liệu, tiền xử lý, phân tích đơn giản
# **Vì đây là bài tập về Pandas nên yêu cầu là không được dùng vòng lặp**
#
# (Cập nhật lần cuối: 25/07/2021)
#
# Họ tên: <NAME>
#
# MSSV: 18127070
# ---
# ## Cách làm bài và nộp bài (bạn cần đọc kỹ)
# ⚡ Bạn lưu ý là mình sẽ dùng chương trình hỗ trợ chấm bài nên bạn cần phải tuân thủ chính xác qui định mà mình đặt ra, nếu không rõ thì hỏi, chứ không nên tự tiện làm theo ý của cá nhân.
#
# **Cách làm bài**
#
# Bạn sẽ làm trực tiếp trên file notebook này. Đầu tiên, bạn điền họ tên và MSSV vào phần đầu file ở bên trên. Trong file, bạn làm bài ở những chỗ có ghi là:
# ```python
# # YOUR CODE HERE
# raise NotImplementedError()
# ```
# hoặc đối với những phần code không bắt buộc thì là:
# ```python
# # YOUR CODE HERE (OPTION)
# ```
# hoặc đối với markdown cell thì là:
# ```markdown
# YOUR ANSWER HERE
# ```
# Tất nhiên, khi làm thì bạn xóa dòng `raise NotImplementedError()` đi.
# Đối những phần yêu cầu code thì thường ở ngay phía dưới sẽ có một (hoặc một số) cell chứa các bộ test để giúp bạn biết đã code đúng hay chưa; nếu chạy cell này không có lỗi gì thì có nghĩa là qua được các bộ test. Trong một số trường hợp, các bộ test có thể sẽ không đầy đủ; nghĩa là, nếu không qua được test thì là code sai, nhưng nếu qua được test thì chưa chắc đã đúng.
#
# Trong khi làm bài, bạn có thể cho in ra màn hình, tạo thêm các cell để test. Nhưng khi nộp bài thì bạn xóa các cell mà bạn tự tạo, xóa hoặc comment các câu lệnh in ra màn hình. Bạn lưu ý <font color=red>không được tự tiện xóa các cell hay sửa code của Thầy</font> (trừ những chỗ được phép sửa như đã nói ở trên).
#
# Trong khi làm bài, thường xuyên `Ctrl + S` để lưu lại bài làm của bạn, tránh mất mát thông tin.
#
#
# *Nên nhớ mục tiêu chính ở đây là <font color=green>học, học một cách chân thật</font>. Bạn có thể thảo luận ý tưởng với bạn khác cũng như tham khảo các nguồn trên mạng, nhưng sau cùng <font color=green>code và bài làm phải là của bạn, dựa trên sự hiểu thật sự của bạn</font>. Khi tham khảo các nguồn trên mạng thì bạn cần ghi rõ nguồn trong bài làm. Bạn không được tham khảo bài làm của các bạn năm trước (vì nếu làm vậy thì bao giờ bạn mới có thể tự mình suy nghĩ để giải quyết vấn đề); sau khi kết thúc môn học, bạn cũng không được đưa bài làm cho các bạn khóa sau hoặc public bài làm trên Github (vì nếu làm vậy thì sẽ ảnh hưởng tới việc học của các bạn khóa sau). Nếu bạn có thể làm theo những gì mình nói thì điểm của bạn có thể sẽ không cao nhưng bạn sẽ có được những bước tiến thật sự. <font color=red>Trong trường hợp bạn vi phạm những điều mình nói ở trên thì sẽ bị 0 điểm cho toàn bộ môn học.</font>*
#
# **Cách nộp bài**
#
# Khi chấm bài, đầu tiên mình sẽ chọn `Kernel` - `Restart & Run All`, để restart và chạy tất cả các cell trong notebook của bạn; do đó, trước khi nộp bài, bạn nên chạy thử `Kernel` - `Restart & Run All` để đảm bảo mọi chuyện diễn ra đúng như mong đợi.
#
# Sau đó, bạn tạo thư mục nộp bài theo cấu trúc sau:
# - Thư mục `MSSV` (vd, nếu bạn có MSSV là 1234567 thì bạn đặt tên thư mục là `1234567`)
# - File `HW2.ipynb` (không cần nộp các file khác)
#
# Cuối cùng, bạn nén thư mục `MSSV` này lại và nộp ở link trên moodle. Đuôi của file nén phải là .zip (chứ không được .rar hay gì khác).
#
# <font color=red>Bạn lưu ý tuân thủ chính xác qui định nộp bài ở trên.</font>
# + [markdown] Collapsed="false"
# ---
# -
# ## Môi trường code
# Ta thống nhất trong môn này: dùng phiên bản các package như trong file "min_ds-env.yml" (file này đã được cập nhật một số lần, file mới nhất ở đầu có dòng: "Last update: 24/06/2021"). Cách tạo ra môi trường code từ file "min_ds-env.yml" đã được nói ở file "02_BeforeClass-Notebook_Python.pdf".
# Check môi trường code:
import sys
sys.executable
# Nếu không có vấn đề gì thì file chạy python sẽ là file của môi trường code "min_ds-env".
# ---
# ## Import
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# YOUR CODE HERE (OPTION)
# Nếu cần các thư viện khác thì bạn có thể import ở đây
# ---
# ## Thu thập dữ liệu
# Dữ liệu được sử dụng trong bài tập này là dữ liệu khảo sát các lập trình viên của trang StackOverflow. Mình download dữ liệu [ở đây](https://drive.google.com/file/d/1dfGerWeWkcyQ9GX9x20rdSGj7WtEpzBB/view) và có bỏ đi một số cột để đơn giản hóa.
#
# Theo mô tả trong file "README_2020.txt" của StackOverflow:
# >The enclosed data set is the full, cleaned results of the 2020 Stack Overflow Developer Survey. Free response submissions and personally identifying information have been removed from the results to protect the privacy of respondents. There are three files besides this README:
# >
# >1. survey_results_public.csv - CSV file with main survey results, one respondent per row and one column per answer
# >2. survey_results_schema.csv - CSV file with survey schema, i.e., the questions that correspond to each column name
# >3. so_survey_2020.pdf - PDF file of survey instrument
# >
# >The survey was fielded from February 5 to February 28, 2020. The median time spent on the survey for qualified responses was 16.6 minutes.
# >
# >Respondents were recruited primarily through channels owned by Stack Overflow. The top 5 sources of respondents were onsite messaging, blog posts, email lists, Meta posts, banner ads, and social media posts. Since respondents were recruited in this way, highly engaged users on Stack Overflow were more likely to notice the links for the survey and click to begin it.
#
# File "survey_results_public-short.csv" mà mình đính kèm là phiên bản đơn giản hóa của file "survey_results_public.csv" (từ 61 cột, mình bỏ xuống còn 29 cột). Đây là file dữ liệu chính mà bạn sẽ làm trong bài tập này. Ngoài ra, mình còn đính kèm 2 file phụ: (1) file "survey_results_schema-short.csv" là file cho biết ý nghĩa của các cột, và (2) file "so_survey_2020.pdf" là file khảo sát gốc của StackOverflow.
#
# Để ý:
# - Dữ liệu này không đại diện được cho cộng đồng lập trình viên trên toàn thế giới, mà chỉ giới hạn trong tập những lập trình viên thực hiện khảo sát của StackOverflow. Những câu trả lời có được thông qua tập dữ liệu này cũng sẽ bị giới hạn trong phạm vi đó.
# - Dữ liệu có đúng không? Về cơ bản là ta không biết được. Ở đây, mục đích chính là học qui trình Khoa Học Dữ Liệu và các câu lệnh của Pandas nên ta sẽ **giả định** phần lớn dữ liệu là đúng và tiếp tục làm.
#
# Cũng theo file "README_2020.txt", dữ liệu này được StackOverflow public với license như sau:
# >This database - The Public 2020 Stack Overflow Developer Survey Results - is made available under the Open Database License (ODbL): http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/
# >
# >TLDR: You are free to share, adapt, and create derivative works from The Public 2020 Stack Overflow Developer Survey Results as long as you attribute Stack Overflow, keep the database open (if you redistribute it), and continue to share-alike any adapted database under the ODbl.
#
# ---
# ## Khám phá dữ liệu
# ### Đọc dữ liệu từ file (0.25đ)
# Đầu tiên, bạn viết code để đọc dữ liệu từ file "survey_results_public-short.csv" và lưu kết quả vào DataFrame `survey_df`; ta thống nhất là sẽ để file dữ liệu này cùng cấp với file notebook và khi đọc file thì chỉ truyền vào tên của file. Ngoài ra, bạn cũng cần cho cột `Respondent` (id của người làm khảo sát) làm cột index của `survey_df`.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "0bf411943f48101b385795cb06db43f4", "grade": true, "grade_id": "cell-d46e97079096473c", "locked": false, "points": 0.25, "schema_version": 3, "solution": true, "task": false}
# YOUR CODE HERE
#raise NotImplementedError()
survey_df = pd.read_csv('survey_results_public-short.csv', index_col='Respondent')
# -
# TEST
survey_df.head()
# ### Dữ liệu có bao nhiêu dòng và bao nhiêu cột? (0.25đ)
# Kế đến, bạn tính số dòng và số cột của DataFrame `survey_df` và lần lượt lưu vào biến `num_rows` và `num_cols`.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "607dbbe4997f90db7ca9d3e0c596d38a", "grade": false, "grade_id": "cell-8338f3610f6c5002", "locked": false, "schema_version": 3, "solution": true, "task": false}
# YOUR CODE HERE
#raise NotImplementedError()
num_rows = survey_df.shape[0]
num_cols = survey_df.shape[1]
print(str(num_rows) + ', ' + str(num_cols))
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "dccad1762b5cd1a5aeb4dd971539f41d", "grade": true, "grade_id": "cell-00f00d1bba235517", "locked": true, "points": 0.25, "schema_version": 3, "solution": false, "task": false}
# TEST
assert num_rows == 64461
assert num_cols == 28
# -
# ### Mỗi dòng có ý nghĩa gì? Có vấn đề các dòng có ý nghĩa khác nhau không?
# Theo file "README_2020.txt" cũng như theo quan sát sơ bộ về dữ liệu, mỗi dòng trong DataFrame `survey_df` cho biết kết quả làm khảo sát của một người. Có vẻ không có vấn đề các dòng có ý nghĩa khác nhau.
# ### Dữ liệu có các dòng bị lặp không? (0.25đ)
# Kế đến, bạn tính số dòng có index (id của người làm khảo sát) bị lặp và lưu vào biến `num_duplicated_rows`. Trong nhóm các dòng có index giống nhau thì dòng đầu tiên không tính là bị lặp.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "8562ed8052a1cfb4a1c033be002aa083", "grade": false, "grade_id": "cell-aadbfe12be205faa", "locked": false, "schema_version": 3, "solution": true, "task": false}
# YOUR CODE HERE
#raise NotImplementedError()
num_duplicated_rows = survey_df.index.duplicated().sum()
print(num_duplicated_rows)
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "9cf4a1f5b657704c7ff30130242e1646", "grade": true, "grade_id": "cell-7c456d5495d4904d", "locked": true, "points": 0.25, "schema_version": 3, "solution": false, "task": false}
# TEST
assert num_duplicated_rows == 0
# -
# ### Mỗi cột có ý nghĩa gì? (0.25đ)
# Để xem ý nghĩa của mỗi cột thì:
# - Trước tiên, bạn cần đọc file "survey_results_schema-short.csv" vào DataFrame `col_meaning_df`; bạn cũng cần cho cột "Column" làm cột index.
# - Sau đó, bạn chỉ cần hiển thị DataFrame `col_meaning_df` ra để xem (vụ này khó nên ở dưới mình đã làm cho bạn ở cell có dòng "# TEST" 😉). Tuy nhiên, bạn sẽ thấy ở cột "QuestionText": các chuỗi mô tả bị cắt do quá dài. Do đó, trước khi hiển thị DataFrame `col_meaning_df`, bạn cũng cần chỉnh sao đó để các chuỗi mô tả không bị cắt (vụ này bạn tự search Google, gợi ý: bạn sẽ dùng đến câu lệnh `pd.set_option`).
# + deletable=false nbgrader={"cell_type": "code", "checksum": "bc5afa2bdb3831c9bfdc88a3d0aeff5b", "grade": true, "grade_id": "cell-5efb0517df207535", "locked": false, "points": 0.25, "schema_version": 3, "solution": true, "task": false}
# YOUR CODE HERE
#raise NotImplementedError()
pd.set_option('display.max_colwidth', None) #show full text not cut
col_meaning_df = pd.read_csv('survey_results_schema-short.csv', index_col='Column')
# -
# TEST
col_meaning_df
# Trước khi đi tiếp, bạn nên đọc kết quả hiển thị ở trên và đảm bảo là bạn đã hiểu ý nghĩa của các cột. Để hiểu ý nghĩa của cột, có thể bạn sẽ cần xem thêm các giá trị của cột bên DataFrame `survey_df`.
# ### Mỗi cột hiện đang có kiểu dữ liệu gì? Có cột nào có kiểu dữ liệu chưa phù hợp để có thể xử lý tiếp không? (0.25đ)
# Kế đến, bạn tính kiểu dữ liệu (dtype) của mỗi cột trong DataFrame `survey_df` và lưu kết quả vào Series `dtypes` (Series này có index là tên cột).
# + deletable=false nbgrader={"cell_type": "code", "checksum": "2ecb5b8df41e1b29c852e2e5dd13b893", "grade": false, "grade_id": "cell-206c19691b1a6c05", "locked": false, "schema_version": 3, "solution": true, "task": false}
# YOUR CODE HERE
#raise NotImplementedError()
dtypes = survey_df.dtypes
dtypes
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "ac6951bf9ae5d429dd8c2fbeb9d17832", "grade": true, "grade_id": "cell-3ebcf8f07733605b", "locked": true, "points": 0.25, "schema_version": 3, "solution": false, "task": false}
# TEST
float_cols = set(dtypes[(dtypes==np.float32) | (dtypes==np.float64)].index)
assert float_cols == {'Age', 'ConvertedComp', 'WorkWeekHrs'}
object_cols = set(dtypes[dtypes == object].index)
assert len(object_cols) == 25
# -
# Như bạn có thể thấy, cột "YearsCode" và "YearsCodePro" nên có kiểu dữ liệu số, nhưng hiện giờ đang có kiểu dữ liệu object. Ta hãy thử xem thêm về các giá trị 2 cột này.
survey_df['YearsCode'].unique()
survey_df['YearsCodePro'].unique()
# Ta nên đưa 2 cột này về dạng số để có thể tiếp tục khám phá (tính min, median, max, ...).
# ---
# ## Tiền xử lý (0.5đ)
# Bạn sẽ thực hiện tiền xử lý để chuyển 2 cột "YearsCode" và "YearsCodePro" về dạng số (float). Trong đó: "Less than 1 year" $\to$ 0, "More than 50 years" $\to$ 51. Sau khi chuyển thì `survey_df.dtypes` sẽ thay đổi.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "b12c49ae100fb58e51939537b8f7af48", "grade": false, "grade_id": "cell-cfc7d3af0fb5edec", "locked": false, "schema_version": 3, "solution": true, "task": false}
# YOUR CODE HERE
#raise NotImplementedError()
list_convert = {"Less than 1 year":"0", "More than 50 years":"51"}
#survey_df['YearsCode'] = survey_df['YearsCode'].map(list_convert).astype(float)
#survey_df['YearsCodePro'] = survey_df['YearsCodePro'].map(list_convert).astype(float)
#use lambda to avoid replacing other value not in list_convert
survey_df['YearsCode'] = survey_df['YearsCode'].apply(lambda ele: list_convert[ele] if ele in list_convert else ele).astype(float)
survey_df['YearsCodePro'] = survey_df['YearsCodePro'].apply(lambda ele: list_convert[ele] if ele in list_convert else ele).astype(float)
survey_df.dtypes
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "8a6ac7a184baa06b11ff89d9402fed23", "grade": true, "grade_id": "cell-4ee41f84604837a1", "locked": true, "points": 0.5, "schema_version": 3, "solution": false, "task": false}
# TEST
assert survey_df['YearsCode'].dtype in [np.float32, np.float64]
assert survey_df['YearsCodePro'].dtype in [np.float32, np.float64]
# -
# ---
# ## Quay lại bước khám phá dữ liệu
# ### Với mỗi cột có kiểu dữ liệu dạng số, các giá trị được phân bố như thế nào? (1đ)
#
# (Trong đó: phần tính các mô tả của mỗi cột chiếm 0.5đ, phần tính số lượng giá trị không hợp lệ của mỗi cột chiếm 0.5đ)
# Với các cột có kiểu dữ liệu số, bạn sẽ tính:
# - Tỉ lệ % (từ 0 đến 100) các giá trị thiếu
# - Giá trị min
# - Giá trị lower quartile (phân vị 25)
# - Giá trị median (phân vị 50)
# - Giá trị upper quartile (phân vị 75)
# - Giá trị max
#
# Bạn sẽ lưu kết quả vào DataFrame `nume_col_info_df`, trong đó:
# - Tên của các cột là tên của các cột số trong `survey_df`
# - Tên của các dòng là: "missing_percentage", "min", "lower_quartile", "median", "upper_quartile", "max"
#
# Để dễ nhìn, tất cả các giá trị bạn đều làm tròn với 1 chữ số thập phân bằng phương thức `.round(1)`.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "442122ad4ec86e8b7f1a9c4ecca1f6e7", "grade": false, "grade_id": "cell-d1503f01712c873a", "locked": false, "schema_version": 3, "solution": true, "task": false}
# YOUR CODE HERE
#raise NotImplementedError()
col_only_numeric = survey_df.select_dtypes(include=['float32', 'float64']) #select just column float
nume_col_info_df = col_only_numeric.describe().transpose().drop(columns=['mean', 'std']) #remove 2 rows mean and std
nume_col_info_df['count'] = survey_df.isnull().sum() * 100.0 / len(survey_df) #calc % missing
my_dict_info = {'count': 'missing_percentage', '25%': 'lower_quartile', '50%': 'median', '75%': 'upper_quartile'}
nume_col_info_df = nume_col_info_df.rename(columns=my_dict_info).round(1).transpose() #rename and round
nume_col_info_df
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "d882c48546d8d70df000f2ac28c8ce0c", "grade": true, "grade_id": "cell-b19aff83344102f8", "locked": true, "points": 0.5, "schema_version": 3, "solution": false, "task": false}
# TEST
assert nume_col_info_df.shape == (6, 5)
data = nume_col_info_df.loc[['missing_percentage', 'min', 'lower_quartile', 'median', 'upper_quartile', 'max'],
['Age', 'ConvertedComp', 'WorkWeekHrs', 'YearsCode', 'YearsCodePro']].values
correct_data = np.array([[ 29.5, 46.1, 36.2, 10.5, 28.1],
[ 1. , 0. , 1. , 0. , 0. ],
[ 24. , 24648. , 40. , 6. , 3. ],
[ 29. , 54049. , 40. , 10. , 6. ],
[ 35. , 95000. , 44. , 17. , 12. ],
[ 279. , 2000000. , 475. , 51. , 51. ]])
assert np.array_equal(data, correct_data)
# -
# **Có giá trị không hợp lệ trong mỗi cột không? (không xét giá trị thiếu)**
# - Cột "Age": bạn hãy tính số lượng giá trị không hợp lệ của cột "Age" (< giá trị tương ứng trong cột "YearsCode" HOẶC < giá trị tương ứng trong cột "YearsCodePro") và lưu kết quả vào biến `num_invalid_Age_vals`.
# - Cột "WorkWeekHrs" (số giờ làm việc trung bình một tuần): ta thấy max là 475 giờ! Trong khi đó, 7 ngày * 24 giờ = 168 giờ! Bạn hãy tính số lượng giá trị không hợp lệ của cột "WorkWeekHrs" (> 24 * 7) và lưu kết quả vào biến `num_invalid_WorkWeekHrs`.
# - Cột "YearsCode": bạn hãy tính số lượng giá trị không hợp lệ của cột "YearsCode" (< giá trị tương ứng trong cột "YearsCodePro" HOẶC > giá trị tương ứng trong cột "Age") và lưu kết quả vào biến `num_invalid_YearsCode`.
# - Cột "YearsCodePro": bạn hãy tính số lượng giá trị không hợp lệ của cột "YearsCodePro" (> giá trị tương ứng trong cột "YearsCode" HOẶC > giá trị tương ứng trong cột "Age") và lưu kết quả vào biến `num_invalid_YearsCodePro`.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "90f9bb6ed444c8392723dd5aca893623", "grade": false, "grade_id": "cell-59a58bdc5f54bb25", "locked": false, "schema_version": 3, "solution": true, "task": false}
# YOUR CODE HERE
#raise NotImplementedError()
#use logic OR to return all rows where A = ... or B = ...
not_valid_age = (survey_df['Age'] < survey_df['YearsCode']) | (survey_df['Age'] < survey_df['YearsCodePro'])
num_invalid_Age_vals = len(survey_df[not_valid_age])
not_valid_workweekhrs = survey_df['WorkWeekHrs'] > 24*7
num_invalid_WorkWeekHrs_vals = len(survey_df[not_valid_workweekhrs])
not_valid_yearscode = (survey_df['YearsCode'] < survey_df['YearsCodePro']) | (survey_df['YearsCode'] > survey_df['Age'])
num_invalid_YearsCode_vals = len(survey_df[not_valid_yearscode])
not_valid_yearscodepro = (survey_df['YearsCodePro'] > survey_df['YearsCode']) | (survey_df['YearsCodePro'] > survey_df['Age'])
num_invalid_YearsCodePro_vals = len(survey_df[not_valid_yearscodepro])
print(num_invalid_WorkWeekHrs_vals)
print(num_invalid_Age_vals)
print(num_invalid_YearsCode_vals)
print(num_invalid_YearsCodePro_vals)
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "d077cbc3ef8d27ae7380e560cf825ccf", "grade": true, "grade_id": "cell-386769471c5b4b8f", "locked": true, "points": 0.5, "schema_version": 3, "solution": false, "task": false}
# TEST
assert num_invalid_WorkWeekHrs_vals == 62
assert num_invalid_Age_vals == 16
assert num_invalid_YearsCode_vals == 499
assert num_invalid_YearsCodePro_vals == 486
# -
# Do số lượng các giá trị không hợp lệ cũng khá ít nên ta có thể tiền xử lý bằng cách xóa các dòng chứa các giá trị không hợp lệ.
# ---
# ## Tiền xử lý (0.5đ)
# Bạn sẽ thực hiện tiền xử lý để xóa đi các dòng của DataFrame `survey_df` mà chứa ít nhất là một giá trị không hợp lệ. Sau khi tiền xử lý thì `survey_df` sẽ thay đổi.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "ce7e1b5a79e5743055a026055a75c175", "grade": false, "grade_id": "cell-6a6242ef6d2c8c65", "locked": false, "schema_version": 3, "solution": true, "task": false}
# YOUR CODE HERE
#raise NotImplementedError()
#use logic not to inverse
#survey_df = survey_df[~not_valid_age]
#survey_df = survey_df[~not_valid_workweekhrs]
#survey_df = survey_df[~not_valid_yearscode]
#survey_df = survey_df[~not_valid_yearscodepro]
#convert all above to boolean to avoid warning boolean series key will be reindexed to match dataFrame index
survey_df = survey_df[~(not_valid_age | not_valid_workweekhrs | not_valid_yearscode | not_valid_yearscodepro)]
print(len(survey_df))
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "93bbf1f7cc93f01d800d703aeb494f04", "grade": true, "grade_id": "cell-9d04f770468db3c8", "locked": true, "points": 0.5, "schema_version": 3, "solution": false, "task": false}
# TEST
assert len(survey_df) == 63900
# -
# ---
# ## Quay lại bước khám phá dữ liệu
# ### Với mỗi cột có kiểu dữ liệu không phải dạng số, các giá trị được phân bố như thế nào? (1đ)
# Với các cột có kiểu dữ liệu không phải số, bạn sẽ tính:
# - Tỉ lệ % (từ 0 đến 100) các giá trị thiếu
# - Số lượng các giá trị (các giá trị ở đây là các giá trị khác nhau và ta không xét giá trị thiếu): với cột mà ứng với câu hỏi dạng multichoice (ví dụ, cột "DevType"), mỗi giá trị có thể chứa nhiều choice (các choice được phân tách bởi dấu chấm phẩy), và việc đếm trực tiếp các giá trị không có nhiều ý nghĩa lắm vì số lượng tổ hợp các choice là khá nhiều; một cách khác tốt hơn mà bạn sẽ làm là đếm số lượng các choice
# - Tỉ lệ % (từ 0 đến 100) của mỗi giá trị được sort theo tỉ lệ % giảm dần (ta không xét giá trị thiếu, tỉ lệ là tỉ lệ so với số lượng các giá trị không thiếu): bạn dùng dictionary để lưu, key là giá trị, value là tỉ lệ %; với cột mà ứng với câu hỏi dạng multichoice, cách làm tương tự như ở trên
#
# Bạn sẽ lưu kết quả vào DataFrame `cate_col_info_df`, trong đó:
# - Tên của các cột là tên của các cột không phải số trong `survey_df`
# - Tên của các dòng là: "missing_percentage", "num_values", "value_percentages"
#
# Để dễ nhìn, tất cả các giá trị bạn đều làm tròn với 1 chữ số thập phân bằng phương thức `.round(1)`.
#
# Gợi ý: có thể bạn sẽ muốn dùng [phương thức `explode`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.explode.html).
# Các cột ứng với câu hỏi khảo sát multichoice
multichoice_cols = ['DevType', 'Gender', 'JobFactors',
'LanguageWorkedWith', 'LanguageDesireNextYear',
'MiscTechWorkedWith', 'MiscTechDesireNextYear',
'NEWCollabToolsWorkedWith', 'NEWCollabToolsDesireNextYear',
'PlatformWorkedWith', 'PlatformDesireNextYear',
'NEWStuck']
# + deletable=false nbgrader={"cell_type": "code", "checksum": "84e72c4f9ef336aad46d35d52a8c5ee3", "grade": false, "grade_id": "cell-c1cfa0a88177079d", "locked": false, "schema_version": 3, "solution": true, "task": false}
pd.set_option('display.max_colwidth', 100) # Để dễ nhìn
pd.set_option('display.max_columns', None) # Để dễ nhìn
# YOUR CODE HERE
#raise NotImplementedError()
exclude_numeric = survey_df.select_dtypes(exclude=['float32', 'float64']) #exclude numeric first
missing_percentage = exclude_numeric.isnull().sum() * 100.0 / len(survey_df) #calc % missing
missing_percentage = pd.DataFrame(missing_percentage.round(1))
cate_col_info_df = missing_percentage.rename(columns={0:"missing_percentage"}) #rename column
cate_col_info_df
# -
get_name_cate_col_info_df = cate_col_info_df.transpose().columns
get_name_cate_col_info_df
#create 2 dict for num_values and value_percentages
num_values = {}
value_percentages = {}
for elem in get_name_cate_col_info_df:
#first process column with multi-choice
if elem in multichoice_cols:
exclude_numeric[elem] = exclude_numeric[elem].str.split(';') #split by delimeter for columns not numeric
col_explode = exclude_numeric[elem].explode() #explode after spliting
count_value_after_explode = col_explode.value_counts() #count after exploding
num_values[elem] = len(count_value_after_explode.index.values) #calculate num_values
#calculate value_percentages
value_percentages[elem] = {} #init empty dict for col with multi chocie
for i in count_value_after_explode.index.values:
#ex: value_percentages[devType][Developer, back-end] = count_value_after_explode[Developer, backend]*100.0/sum col after exploding not null...
value_percentages[elem][i] = (count_value_after_explode[i]*100.0/(~col_explode.isnull()).sum()).round(1)
#second process column without multi-choice
else:
# calculate num_values for others
count_value_after_explode1 = exclude_numeric[elem].value_counts() #count without explode with no multi chocie
num_values[elem] = len(count_value_after_explode1.index.values)
# calculate value_ratios for others
value_percentages[elem] = {} #init empty dict for col without multi chocie
for i in count_value_after_explode1.index.values:
#change to sum col without exploding not null
value_percentages[elem][i] = (count_value_after_explode1[i]*100.0/(~exclude_numeric[elem].isnull()).sum()).round(1)
#num_values
#value_percentages
#add values of num_values to list l1
l1 = []
for key, value in num_values.items():
l1.append(value)
print(l1)
#add values of num_values to list l2
l2 = []
for key1, value1 in value_percentages.items():
l2.append(tuple((key1, value1)))
l2
#merge missing_percentage with num_values
df1 = cate_col_info_df.transpose()
df_tmp1 = pd.DataFrame([l1], columns=get_name_cate_col_info_df, index=['num_values']) #add list l1 to dataframe
df1 = df1.append(df_tmp1)
df1
#add l2 to dataframe and change 0 to value_percentages
df_tmp2 = pd.DataFrame(l2, columns=['0', 'value_percentages'], index=value_percentages.keys())
df_tmp2 = df_tmp2.drop(columns=['0']) #drop redundant column 0
df_tmp2 = df_tmp2.transpose()
df1 = df1.append(df_tmp2)
cate_col_info_df = df1
cate_col_info_df
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "fe2ab946f8702d5d5233c4746646bae7", "grade": true, "grade_id": "cell-e748038de60d2ab7", "locked": true, "points": 1, "schema_version": 3, "solution": false, "task": false}
# TEST
c = cate_col_info_df['MainBranch']
assert c.loc['missing_percentage'] == 0.5
assert c.loc['num_values'] == 5
assert c.loc['value_percentages']['I am a developer by profession'] == 73.5
c = cate_col_info_df['Hobbyist']
assert c.loc['missing_percentage'] == 0.1
assert c.loc['num_values'] == 2
assert c.loc['value_percentages']['Yes'] == 78.2
c = cate_col_info_df['DevType']
assert c.loc['missing_percentage'] == 23.6
assert c.loc['num_values'] == 23
assert c.loc['value_percentages']['Academic researcher'] == 2.2
c = cate_col_info_df['PlatformWorkedWith']
assert c.loc['missing_percentage'] == 16.5
assert c.loc['num_values'] == 16
assert c.loc['value_percentages']['Docker'] == 10.6
# -
# ---
# ## Đặt câu hỏi
# Sau khi khám phá dữ liệu, ta đã hiểu hơn về dữ liệu. Bây giờ, ta hãy xem thử có câu hỏi nào có thể được trả lời bằng dữ liệu này.
#
# **Một câu hỏi có thể có là:** Platform nào (Windows, Linux, Docker, AWS, ...) được yêu thích nhất, platform nào được yêu thích nhì, platform nào được yêu thích ba, ...?
#
# Một platform được xem là được yêu thích nếu một người đã dùng platform này (cột "PlatformWorkedWith") và muốn tiếp tục dùng platform trong năm kế (cột "PlatformDesireNextYear").
#
# **Trả lời được câu hỏi này sẽ** phần nào giúp ta định hướng là nên tập trung học platform nào để có thể chuẩn bị cho tương lai (mình nói "phần nào" vì ở đây dữ liệu chỉ giới hạn trong phạm vi những người làm khảo sát của StackOverflow).
# ---
# ## Tiền xử lý
# Nếu bạn thấy cần thực hiện thêm thao tác tiền xử lý để chuẩn bị dữ liệu cho bước phân tích thì bạn làm ở đây. Bước này là không bắt buộc.
# +
# YOUR CODE HERE (OPTION)
# -
# ---
# ## Phân tích dữ liệu (2.25đ)
# Bây giờ, bạn sẽ thực hiện phân tích dữ liệu để trả lời cho câu hỏi ở trên. Cụ thể các bước như sau:
# - Bước 1: tính Series `most_loved_platforms`, trong đó:
# - Index là tên flatform (ở bước khám phá dữ liệu, bạn đã thấy có tất cả 16 platform)
# - Data là tỉ lệ % (từ 0 đến 100, được làm tròn với một chữ số thập phân bằng phương thức `round(1)`) được yêu thích (được sort giảm dần)
# - Bước 2: từ Series `most_loved_platforms`, bạn vẽ bar chart:
# - Bạn cho các bar nằm ngang (cho dễ nhìn)
# - Bạn đặt tên trục hoành là "Tỉ lệ %"
# Code bước 1.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "0a9055745bab2ee00733aac0a056b17f", "grade": false, "grade_id": "cell-6d34c88e2fd46b33", "locked": false, "schema_version": 3, "solution": true, "task": false}
# YOUR CODE HERE
#raise NotImplementedError()
is_platform_favourite_df = survey_df[['PlatformWorkedWith', 'PlatformDesireNextYear']]
is_platform_favourite_df.loc[:, 'PlatformWorkedWith'] = is_platform_favourite_df['PlatformWorkedWith'].str.split(';') #first split by delimeter
is_platform_favourite_df = is_platform_favourite_df.explode('PlatformWorkedWith').dropna() #explode and drop column NaN
is_platform_favourite_df.loc[:, 'PlatformDesireNextYear'] = is_platform_favourite_df['PlatformDesireNextYear'].str.split(';') #first split by delimeter
is_platform_favourite_df = is_platform_favourite_df.explode('PlatformDesireNextYear').dropna() #explode and drop column NaN
#set rows PlatformDesireNextYear equal PlatformDesireNextYear to count
duplicate_platform_df = is_platform_favourite_df[is_platform_favourite_df['PlatformDesireNextYear'] == is_platform_favourite_df['PlatformWorkedWith']]
most_loved_platforms = duplicate_platform_df.groupby(['PlatformWorkedWith']).count()
most_loved_platforms = most_loved_platforms.sort_values(by="PlatformDesireNextYear", ascending=False)
most_loved_platforms['PlatformDesireNextYear'] = ((most_loved_platforms['PlatformDesireNextYear']*100.0)/len(duplicate_platform_df)).round(1)
#convert to series for running test
most_loved_platforms = most_loved_platforms.iloc[:, 0]
most_loved_platforms
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "0e1af6ae8a0d65cf90e9d8d9276bf6bb", "grade": true, "grade_id": "cell-1d22896d0f3f2394", "locked": true, "points": 2, "schema_version": 3, "solution": false, "task": false}
# TEST
assert len(most_loved_platforms) == 16
assert most_loved_platforms.loc['Linux'] == 20.2
assert most_loved_platforms.loc['Windows'] == 14.6
assert most_loved_platforms.loc['Docker'] == 12.3
# -
# Code bước 2.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "e9113040c23a1b8cbfde861c4efe685f", "grade": true, "grade_id": "cell-5395c611d6065339", "locked": false, "points": 0.25, "schema_version": 3, "solution": true, "task": false}
# YOUR CODE HERE
#raise NotImplementedError()
fig = plt.figure(figsize = (10, 7))
plt.tick_params(labelsize=12)
plt.xlabel("Tỉ lệ %", fontsize=15)
plt.barh(most_loved_platforms.index, most_loved_platforms.values);
# -
# Bạn đã hiểu tại sao mình khuyên bạn là nên tập làm quen dần với các câu lệnh của Linux chưa 😉
# ---
# ## Đặt câu hỏi của bạn (1.5đ)
# Bây giờ, đến lượt bạn phải tự suy nghĩ và đưa ra câu hỏi mà có thể trả lời bằng dữ liệu. Ngoài việc đưa ra câu hỏi, bạn cũng phải giải thích để người đọc thấy nếu trả lời được câu hỏi thì sẽ có lợi ích gì. Bạn nên sáng tạo một xíu, không nên đưa ra câu hỏi cùng dạng với câu hỏi của mình ở trên.
# + [markdown] deletable=false nbgrader={"cell_type": "markdown", "checksum": "8e88fb70d5a2aac41af1566276a95321", "grade": true, "grade_id": "cell-2e7a92ae99ca7449", "locked": false, "points": 1.5, "schema_version": 3, "solution": true, "task": false}
# Một câu hỏi có thể có là: Tiền lương trung bình ở các ngôn ngữ lập trình khác nhau, đất nước khác nhau, tình trạng học vấn của người đó... sẽ khác nhau như thế nào
#
# Trả lời được câu hỏi này sẽ phần nào giúp ta định hướng là nên tập trung học ngôn ngữ nào, có nên học lên cao hay không hay là đất nước nào ta nên đến đó để làm việc nếu có cơ hội... để có thể có tiền lương tốt hơn trong tương lai
# -
# ---
# ## Tiền xử lý để chuẩn bị dữ liệu cho bước phân tích để trả lời cho câu hỏi của bạn
# Phần này là không bắt buộc.
# YOUR CODE HERE (OPTION)
#take all necessary columns relate to salary
all_cols = ['Country', 'DevType', 'EdLevel', 'Employment', 'LanguageWorkedWith', 'MiscTechWorkedWith',
'NEWCollabToolsWorkedWith', 'NEWLearn', 'NEWOvertime', 'OpSys', 'PlatformWorkedWith',
'Age', 'ConvertedComp', 'WorkWeekHrs', 'YearsCode', 'YearsCodePro']
# Đầu tiên, ta sẽ bỏ đi các giá trị null của cột lương là ConvertedComp, sau đó với các cột còn lại là numeric ta sẽ fillna(0) và
# tương tự fillna('unk') cho các cột không phải numeric
salary_df = survey_df[all_cols]
salary_df = salary_df.dropna(subset=['ConvertedComp'])
salary_df[['Age', 'WorkWeekHrs', 'YearsCode', 'YearsCodePro']] = salary_df[['Age', 'WorkWeekHrs', 'YearsCode', 'YearsCodePro']].fillna(0)
salary_df.fillna('unk')
# Các quốc gia có lương trung bình cao nhất thì Mỹ vẫn là 1 quốc gia nổi trội nhất trên thế giới
salary_by_country = salary_df.groupby(['Country'], as_index=False).median()[['Country', 'ConvertedComp']]
salary_by_country = salary_by_country.sort_values('ConvertedComp', ascending=False)
salary_by_country
# Về trình độ học vấn thì những người có bằng thạc sĩ hoặc tiến sĩ trở lên sẽ có mức lương trung bình nổi trội hơn
salary_by_edlevel = salary_df.groupby(['EdLevel'], as_index=False).median()[['EdLevel', 'ConvertedComp']]
salary_by_edlevel = salary_by_edlevel.sort_values('ConvertedComp', ascending=False)
salary_by_edlevel
# Về các ngôn ngữ lập trình có mức lương trung bình cao nhất thì top 5 là Perl, Scala, Go, Rust và Ruby
salary_by_languagework = salary_df[['ConvertedComp', 'LanguageWorkedWith']]
salary_by_languagework.iloc[:, 1] = salary_by_languagework['LanguageWorkedWith'].str.split(';')
salary_by_languagework = salary_by_languagework.explode('LanguageWorkedWith')
salary_by_languagework1 = salary_by_languagework.groupby(['LanguageWorkedWith'], as_index=False).median()[['LanguageWorkedWith', 'ConvertedComp']]
salary_by_languagework1 = salary_by_languagework1.sort_values('ConvertedComp', ascending=False)
salary_by_languagework1
# ---
# ## Phân tích dữ liệu để ra câu trả lời cho câu hỏi của bạn (2đ)
# + deletable=false nbgrader={"cell_type": "code", "checksum": "57f44ba4e568b05148da80fc7161fc97", "grade": true, "grade_id": "cell-83e11892184cf811", "locked": false, "points": 2, "schema_version": 3, "solution": true, "task": false}
# YOUR CODE HERE
#raise NotImplementedError()
salary_by_country = salary_by_country.set_index('Country')
salary_by_country = salary_by_country.iloc[:, 0]
plt.figure(figsize = (14, 32))
plt.tick_params(labelsize=14)
plt.barh(salary_by_country.index, salary_by_country.values)
plt.xlabel('Mức lương trung bình theo đất nước', fontsize=14);
# -
salary_by_edlevel = salary_by_edlevel.set_index('EdLevel')
salary_by_edlevel = salary_by_edlevel.iloc[:, 0]
plt.barh(salary_by_edlevel.index, salary_by_edlevel.values)
plt.xlabel('Mức lương trung bình theo trình độ học vấn', fontsize=14);
salary_by_languagework1 = salary_by_languagework1.set_index('LanguageWorkedWith')
salary_by_languagework1 = salary_by_languagework1.iloc[:, 0]
plt.figure(figsize = (12, 8))
plt.barh(salary_by_languagework1.index, salary_by_languagework1.values)
plt.xlabel('Mức lương trung bình theo ngôn ngữ lập trình', fontsize=14);
| HW2/HW2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-2:429704687514:image/datascience-1.0
# ---
# # S3 util
# # バケット一覧を取得
import boto3
s3 = boto3.client('s3')
# jsonになっている
response = s3.list_buckets()
for bucket in response['Buckets']:
print(bucket['Name'])
# # バケットを削除(空にしてから削除実行)
### 削除するバケットをリストアップ
b_list = [
'<bucket-name>'
]
import boto3
s3 = boto3.resource('s3')
for bucket_name in b_list:
print(bucket_name)
bucket = s3.Bucket(bucket_name)
bucket.objects.all().delete()
bucket.delete()
# # ディレクトリごとダウンロード
# !aws s3 cp --recursive s3://<bucket>/riiid-test-answer-prediction/input/ ../input
# ## boto3で以下のようにもできるらしい[todo]
# https://python5.com/q/fpjkexvj
# +
import boto3
import os
def downloadDirectoryFroms3(bucketName, remoteDirectoryName):
s3_resource = boto3.resource('s3')
bucket = s3_resource.Bucket(bucketName)
for obj in bucket.objects.filter(Prefix = remoteDirectoryName):
if not os.path.exists(os.path.dirname(obj.key)):
os.makedirs(os.path.dirname(obj.key))
bucket.download_file(obj.key, obj.key) # save to same path
| s3_util.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="o5OAyd4nluKL" colab_type="text"
# # Generate a Vecsigrafo using Swivel
#
#
# In this notebook we show how to generate a Vecsigrafo based on a subset of the [UMBC corpus](https://ebiquity.umbc.edu/resource/html/id/351/UMBC-webbase-corpus).
#
# We follow the procedure described in [Towards a Vecsigrafo: Portable Semantics in Knowledge-based Text Analytics](https://pdfs.semanticscholar.org/b0d6/197940d8f1a5fa0d7474bd9a94bd9e44a0ee.pdf) and depicted in the following figure:
#
# 
#
#
#
# + [markdown] id="xLwTh2Qhs5Lt" colab_type="text"
# ## Tokenization and Word Sense Disambiguation
#
# The main difference with standard swivel is that:
# - we use word-sense disambiguation on the text as a pre-processing step (Swivel simply uses white-space tokenization)
# - each 'token' in the resulting sequences is composed of a lemma and an optional concept identifier.
#
# ### Disambiguators
# If we are going to apply WSD, we will need some disambiguator strategy. Unfortunately, there are not a lot of open-source high-performance disambiguators available. At [Expert System](https://www.expertsystem.com/), we have a [state-of-the-art disambiguator](https://www.expertsystem.com/products/cogito-cognitive-technology/semantic-technology/disambiguation/) that assings **syncon**s (our version of synsets) to lemmas in the text.
#
# Since Expert System's disambiguator and semantic KG are proprietary, in this notebook we will be mostly using WordNet (although we may present some results and examples based on Expert System's results). We have implemented a lightweight disambiguation strategy, proposed by [<NAME>., <NAME>., <NAME>., & <NAME>. (2017). Embedding Words and Senses Together via Joint Knowledge-Enhanced Training. CoNLL.](http://arxiv.org/abs/1612.02703), which has allowed us to produce disambiguated corpora based on WordNet 3.1.
#
# To be able to inspect the disambiguated corpus, let's make sure we have access to WordNet in our environment by executing the following cell.
#
#
#
#
#
# + id="7PGZ7dicIUAI" colab_type="code" colab={}
import nltk
nltk.download('wordnet')
from nltk.corpus import wordnet as wn
wn.synset('Maya.n.02')
# + [markdown] id="bhQ3aE4oKnIp" colab_type="text"
# ### Tokenizations
# When applying a disambiguator, the tokens are no longer (groups of) words. Each token can contain different types of information, we generally keep the following token information:
# * `t`: text, the original text (possibly normalised, i.e. lower-cased)
# * `l`: lemma, the lemma form of the word
# * `g`: grammar: the grammar type
# * `s`: syncon (or synset) identifier
#
# ### Example WordNet
#
# We have included a small sample of our disambiguated UMBC corpus as part of our [GitHub tutorial repo](https://github.com/HybridNLP2018/tutorial). Execute the following cell to clone the repo, unzip the sample corpus and print the first line of the corpus
# + id="b0s_9qVzw5bd" colab_type="code" colab={}
# %cd /content/
# !git clone https://github.com/hybridnlp/tutorial.git
# %cd /content/tutorial/datasamples/
# !unzip umbc_tlgs_wnscd_5K.zip
toked_corpus = '/content/tutorial/datasamples/umbc_tlgs_wnscd_5K'
# !head -n1 {toked_corpus}
# %cd /content/
# + [markdown] id="bILoai0BMjT0" colab_type="text"
# You should see, among others, the first line in the corpus, which starts with:
#
# ```
# the%7CGT_ART mayan%7Clem_Mayan%7CGT_ADJ%7Cwn31_Maya.n.03 image%7Clem_image%7CGT_NOU%7Cwn31_effigy.n.01
# ```
#
#
# + [markdown] id="NQ06M4Ow0Y-L" colab_type="text"
# The file included in the github repo for this tutorial is a subset of a disambiguated tokenization for the UMBC corpus, it only contains the first 5 thousand lines of that corpus (the full corpus has about 40 million lines) as we only need it to show the steps necessary to generate embeddings.
#
# The last output, from the cell above, shows the format we are using to represent the tokenized corpus. We use white space to separate the tokens, and have URL encoded each token to avoid mixing up tokens. Since this format is hard to read, we provide a library to inspect the lines in an easy manner. Execute the following cell to display the first two lines in the corpus as a table.
# + id="1NiyMpSIHDys" colab_type="code" colab={}
# %cd /content/
import tutorial.scripts.wntoken as wntoken
import pandas
# open the file and produce a list of python dictionaries describing the tokens
corpus_tokens = wntoken.open_as_token_dicts(toked_corpus, max_lines=2)
# convert the tokens into a pandas DataFrame to display in table form
pandas.DataFrame(corpus_tokens, columns=['line', 't', 'l', 'g', 's', 'glossa'])
# + [markdown] id="bRkQmHPDwzfN" colab_type="text"
# ### Example Cogito
# As a second example, analysing the original sentence:
#
# EXPERIMENTAL STUDY We conducted an empirical evaluation to assess the effectiveness
#
# using Cogito gives us
# 
#
# We filter some of the words and only keep the lemmas and the syncon ids and encode them into the next sequence of disambiguated tokens:
#
# en#86052|experimental en#2686|study en#76710|conduct en#86047|empirical en#3546|evaluation en#68903|assess
# en#25094|effectiveness
# + [markdown] id="VT1JtXgNluKN" colab_type="text"
# ## Vocabulary and Co-occurrence matrix
#
# Next, we need to count the co-occurrences in the disambiguated corpus. We can either:
# - use **standard swivel prep**: in this case each *<text>|<lemma>|<grammar>|<synset>* tuple will be treated as a separate token. For the example sentence from UMBC, presented above, we would then get that `mayan|lem_Mayan|GT_ADJ|wn31_Maya.n.03` has a co-occurrence count of 1 with `image|lem_image|GT_NOU|wn31_effigy.n.01`. This would result in a very large vocabulary.
# - use **joint-subtoken prep**: in this case, you can specify which individual subtoken information you want to take into account. In this notebook we will use **ls** information, hence each synset and each lemma are treated as separate entities in the vocabulary and will be represented with different embeddings. For the example sentence we would get that `lem_Mayan` has a co-occurrence count of 1 with `wn31_Maya.n.03`, `lem_image` and `wn31_effigy.n.01`.
#
# + id="SmDALHQlluKO" colab_type="code" colab={}
import os
import numpy as np
# + [markdown] id="2_-I2CIdluKS" colab_type="text"
# ### Standard Swivel Prep
# For the **standard swivel prep**, we can simply call `prep` using the `!python` command. In this case we have the `toked_corpus` which contains the disambiguated sequences as shown above. The output wil be a set of sharded co-occurrence submatrices as explained in the notebook for creating word vectors.
#
# We set the `shard_size` to 512 since the corpus is quite small. For larger corpora we could use the standard value of 4096.
# + id="wGVS5-xbluKT" colab_type="code" colab={}
# !mkdir /content/umbc/
# !mkdir /content/umbc/coocs
# !mkdir /content/umbc/coocs/tlgs_wnscd_5k_standard
coocs_path = '/content/umbc/coocs/tlgs_wnscd_5k_standard/'
# !python tutorial/scripts/swivel/prep.py --input={toked_corpus} --output_dir={coocs_path} --shard_size=512
# + [markdown] id="hJbMhl5lluKZ" colab_type="text"
# Expected output:
#
# ... tensorflow flags ....
#
# vocabulary contains 8192 tokens
#
# writing shard 256/256
# Wrote vocab and sum files to /content/umbc/coocs/tlgs_wnscd_5k_standard/
# Wrote vocab and sum files to /content/umbc/coocs/tlgs_wnscd_5k_standard/
# done!
#
#
# + id="9WEEg7a9068o" colab_type="code" colab={}
# !head -n15 /content/umbc/coocs/tlgs_wnscd_5k_standard/row_vocab.txt
# + [markdown] id="bRlYK0sZRYne" colab_type="text"
# As the cells above show, applying standard prep results in a vocabulary of over 8K "tokens", however each token is still represented as a URL-encoded combination of the plain text, lemma, grammar type and synset (when available).
# + [markdown] id="nTasIe8rluKb" colab_type="text"
# ### Joint-subtoken Prep
# For the **joint-subtoken prep** step, we have a Java implementation that is not open-source yet (as it is still tied to proprietary code, we are working on refactoring the code so that Cogito subtokens are just a special case). However, we ***provide pre-computed co-occurrence files***.
#
# Although not open source, we describe the steps we executed to help you implement a similar pipeline.
#
# First, we ran our implementation of subtoken prep on the corpus. Notice:
# * we are only including lemma and synset information (i.e. we are not including plain text and grammar information).
# * furthermore, we are filtering the corpus by
# 1. removing any tokens related to punctuation marks (PNT), auxiliary verbs (AUX) and articles (ART), since we think these do not contribute much to the semantics of words.
# 2. replacing tokens with grammar types `ENT` (entities) and `NPH` (proper names) with generic variants `grammar#ENT` and `grammar#NPH` respectively. The rationale is that, depending on the input corpus, names of people or organizations may appear a few times, but may be filtered out if they do not appear enough times. This ensures such tokens are kept in the vocabulary and contribute to the embeddings of words nearby. The main disadvantage is that we will not have some proper names in our final vocabulary.
#
# ```
# java $JAVA_OPTIONS net.expertsystem.word2vec.swivel.SubtokPrep \
# --input C:/hybridNLP2018/tutorial/datasamples/umbc_tlgs_wnscd_5K \
# --output_dir C:/corpora/umbc/coocs/tlgs_wnscd_5K_ls_f/ \
# --expected_seq_encoding TLGS_WN \
# --sub_tokens \
# --output_subtokens "LEMMA,SYNSET" \
# --remove_tokens_with_grammar_types "PNT,AUX,ART" \
# --generalise_tokens_with_grammar_types "ENT,NPH" \
# --shard_size 512
# ```
#
# The output log looked as follows:
#
# ```
# INFO net.expertsystem.word2vec.swivel.SubtokPrep - expected_seq_encoding set to 'TLGS_WN'
# INFO net.expertsystem.word2vec.swivel.SubtokPrep - remove_tokens_with_grammar_types set to PNT,AUX,ART
# INFO net.expertsystem.word2vec.swivel.SubtokPrep - generalise_tokens_with_grammar_types set to ENT,NPH
# INFO net.expertsystem.word2vec.swivel.SubtokPrep - Creating vocab for C:\hybridNLP2018\tutorial\datasamples\umbc_tlgs_wnscd_5K
# INFO net.expertsystem.word2vec.swivel.SubtokPrep - read 5000 lines from C:\hybridNLP2018\tutorial\datasamples\umbc_tlgs_wnscd_5K
# INFO net.expertsystem.word2vec.swivel.SubtokPrep - filtered 166152 tokens from a total of 427796 (38,839%)
# generalised 1899 tokens from a total of 427796 (0,444%)
# full vocab size 21321
# INFO net.expertsystem.word2vec.swivel.SubtokPrep - Vocabulary contains 5632 tokens (21321 full count, 5913 appear > 5 times)
# INFO net.expertsystem.word2vec.swivel.SubtokPrep - Flushing 1279235 co-occ pairs
# INFO net.expertsystem.word2vec.swivel.SubtokPrep - Wrote 121 tmpShards to disk
# ```
# + [markdown] id="PcpcqXJ_OeBk" colab_type="text"
# We have included the output of this process as part of the GitHub repo for the tutorial. We will unzip this folder to inspect the results:
# + id="RRUEmfptOuIr" colab_type="code" colab={}
# !unzip /content/tutorial/datasamples/precomp-coocs-tlgs_wnscd_5K_ls_f.zip -d /content/umbc/coocs/
precomp_coocs_path = '/content/umbc/coocs/tlgs_wnscd_5K_ls_f'
# + [markdown] id="WwTFnSPTQBxl" colab_type="text"
# The previous cell extracts the pre-computed co-occurrence shards and defines a variable `precomp_coocs_path` that points to the folder where these shards are stored.
#
# Next, we print the first 10 elements of the vocabulary to see the format that we are using to represent the lemmas and synsets:
# + id="uf6Vas-TP6h2" colab_type="code" colab={}
# !head -n10 {precomp_coocs_path}/row_vocab.txt
# + [markdown] id="OvK8-CaBSOOO" colab_type="text"
# As the output above shows, the vocabulary we get with `subtoken prep` is smaller (5.6K elements instead of over 8K) and it contains individual lemmas and synsets (it also contains *special* elements grammar#ENT and grammar#NPH, as described above).
#
# **More importantly**, the co-occurrence counts take into account the fact that certain lemmas co-occur more frequently with certain other lemmas and synsets, which should be taken into account when learning embedding representations.
# + [markdown] id="YE0VZd9lluKf" colab_type="text"
# ## Learn embeddings from co-occurrence matrix
#
# With the sharded co-occurrence matrices created in the previous section it is now possible to learn embeddings by calling the `swivel.py` script. This launches a tensorflow application based on various parameters (most of which are self-explanatory) :
#
# - `input_base_path`: the folder with the co-occurrence matrix (protobuf files with the sparse matrix) generated above.
# - `submatrix_` rows and columns need to be the same size as the `shard_size` used in the `prep` step.
# - `num_epochs` the number of times to go through the input data (all the co-occurrences in the shards). We have found that for large corpora, the learning algorithm converges after a few epochs, while for smaller corpora you need a larger number of epochs.
#
# Execute the following cell to generate embeddings for the pre-computed co-occurrences.
# + id="Adtnl3HWluKf" colab_type="code" colab={}
vec_path = '/content/umbc/vec/tlgs_wnscd_5k_ls_f'
# !python /content/tutorial/scripts/swivel/swivel.py --input_base_path={precomp_coocs_path} \
# --output_base_path={vec_path} \
# --num_epochs=40 --dim=150 \
# --submatrix_rows=512 --submatrix_cols=512
# + [markdown] id="5JItR-RVluKl" colab_type="text"
# This will take a few minutes, depending on your machine.
# The result is a list of files in the specified output folder, including:
# - the tensorflow graph, which defines the architecture of the model being trained
# - checkpoints of the model (intermediate snapshots of the weights)
# - `tsv` files for the final state of the column and row embeddings.
# + id="FORz_vtGluKm" colab_type="code" colab={}
# %ls {vec_path}
# + [markdown] id="IrMmRcouluKq" colab_type="text"
# ### Convert `tsv` files to `bin` file
#
# As we've seen in previous notebooks, the `tsv` files are easy to inspect, but they take too much space and they are slow to load since we need to convert the different values to floats and pack them as vectors. Swivel offers a utility to convert the `tsv` files into a `bin`ary format. At the same time it combines the column and row embeddings into a single space (it simply adds the two vectors for each word in the vocabulary).
# + id="IBtZG82kluKr" colab_type="code" colab={}
# !python /content/tutorial/scripts/swivel/text2bin.py --vocab={precomp_coocs_path}/row_vocab.txt --output={vec_path}/vecs.bin \
# {vec_path}/row_embedding.tsv \
# {vec_path}/col_embedding.tsv
# + [markdown] id="ZXQOe0SWluKw" colab_type="text"
# This adds the `vocab.txt` and `vecs.bin` to the folder with the vectors:
# + id="t1jV7uoGluKx" colab_type="code" colab={}
# %ls {vec_path}
# + [markdown] id="yL26TO9BluK4" colab_type="text"
# ## Inspect the embeddings
#
# As in previous notebooks, we can now use Swivel to inspect the vectors using the `Vecs` class. It accepts a `vocab_file` and a file for the binary serialization of the vectors (`vecs.bin`).
# + id="f3QA6U6nluK5" colab_type="code" colab={}
from tutorial.scripts.swivel import vecs
# + [markdown] id="h3cXtjfYluK7" colab_type="text"
# ...and we can load existing vectors. Here we load some pre-computed embeddings, but feel free to use the embeddings you computed by following the steps above (although, due to random initialization of weight during the training step, your results may be different).
# + id="e7VejLMMluK8" colab_type="code" colab={}
vectors = vecs.Vecs(precomp_coocs_path + '/row_vocab.txt',
vec_path + '/vecs.bin')
# + [markdown] id="LJCKmEpAluLB" colab_type="text"
# Next, let's define a basic method for printing the `k` nearest neighbors for a given word:
# + [markdown] id="V8dR-NxRluLH" colab_type="text"
# And let's use the method on a few lemmas and synsets in the vocabulary:
# + id="AffhuN8SluLH" colab_type="code" colab={}
import pandas as pd
pd.DataFrame(vectors.k_neighbors('lem_California'))
# + id="6IE6BC63j9KW" colab_type="code" colab={}
pd.DataFrame(vectors.k_neighbors('lem_semantic'))
# + id="ffWNLUVHkJfr" colab_type="code" colab={}
pd.DataFrame(vectors.k_neighbors('lem_conference'))
# + id="_K8HU-LDkjW-" colab_type="code" colab={}
pd.DataFrame(vectors.k_neighbors('wn31_conference.n.01'))
# + [markdown] id="0_Q1p9DbluLM" colab_type="text"
# Note that using the Vecsigrafo approach gets us very different results than when using standard swivel (notebook 01):
# * the results now include concepts (synsets), besides just words. Without further information, this makes interpreting the results harder since we now only have the concept id, but we can search for these concepts in the underlying KG (WordNet in this case) to explore the semantic network and get further information.
#
# Of course, results may not be very good, since these have been derived from a very small corpus (5K lines from UMBC). In the excercise below, we encourage you to download and inspect pre-computed embeddings based on the full UMBC corpus.
# + id="PCG_FvU6luLM" colab_type="code" colab={}
pd.DataFrame(vectors.k_neighbors('lem_semantic web'))
# + id="7rKAc6T_luLm" colab_type="code" colab={}
pd.DataFrame(vectors.k_neighbors('lem_ontology'))
# + [markdown] id="KAZGM9PcluLr" colab_type="text"
# # Conclusion and Exercises
#
# In this notebook we generated a vecsigrafo based on a disambiguated corpus. The resulting embedding space combines concept ids and lemmas.
#
# We have seen that the resulting space:
# 1. may be harder to inspect due to the potentially opaque concept ids
# 2. clearly different than standard swivel embeddings
#
# The question is: are the resulting embeddings *better*?
#
# To get an answer, in the next notebook, we will look at **evaluation methods for embeddings**.
# + [markdown] id="oOgdJ4BBgS-E" colab_type="text"
# ## Exercise 1: Explore full precomputed embeddings
#
# We have also pre-computed embeddings for the full UMBC corpus. The provided `tar.gz` file is about 1.1GB, hence downloading it may take several minutes.
# + id="6bxeWC28gRzy" colab_type="code" colab={}
full_precomp_url = 'https://zenodo.org/record/1446214/files/vecsigrafo_umbc_tlgs_ls_f_6e_160d_row_embedding.tar.gz'
full_precomp_targz = '/content/umbc/vec/tlgs_wnscd_ls_f_6e_160d_row_embedding.tar.gz'
# !wget {full_precomp_url} -O {full_precomp_targz}
# + [markdown] id="Zr_9FdujqKMA" colab_type="text"
# Next, we have to unpack the vectors:
# + id="Xg1Og_PVnMZ-" colab_type="code" colab={}
# !tar -xzf {full_precomp_targz} -C /content/umbc/vec/
full_precomp_vec_path = '/content/umbc/vec/vecsi_tlgs_wnscd_ls_f_6e_160d'
# + id="H8kHn673qBDW" colab_type="code" colab={}
# %ls /content/umbc/vec/vecsi_tlgs_wnscd_ls_f_6e_160d/
# + [markdown] id="xomWQ33ZugIr" colab_type="text"
# The data only includes the `tsv` version of the vectors, so we need to convert these to the binary format that Swivel uses. And for that, we aso need a `vocab.txt` file, which we can derive from the tsv as follows:
# + id="TI08XC90tJW9" colab_type="code" colab={}
with open(full_precomp_vec_path + '/vocab.txt', 'w', encoding='utf_8') as f:
with open(full_precomp_vec_path + '/row_embedding.tsv', 'r', encoding='utf_8') as vec_lines:
vocab = [line.split('\t')[0].strip() for line in vec_lines]
for word in vocab:
print(word, file=f)
# + [markdown] id="WvstUmD18uFh" colab_type="text"
# Let's inspect the vocabulary:
# + id="lE0D6C5EuY5c" colab_type="code" colab={}
# !wc -l {full_precomp_vec_path}/vocab.txt
# + id="tfI5DsNZv3_6" colab_type="code" colab={}
# !grep 'wn31_' {full_precomp_vec_path}/vocab.txt | wc -l
# + id="hsaxAWIwv_AZ" colab_type="code" colab={}
# !grep 'lem_' {full_precomp_vec_path}/vocab.txt | wc -l
# + [markdown] id="n33cYIzvvy1P" colab_type="text"
# As we can see, the embeddings have a vocabulary of just under 1.5M entries, 56K of which are synsets and most of the rest are lemmas.
#
# Next, convert the `tsv` into the swivel's binary format. This can take a couple of minutes.
# + id="RA_Nkam8wk4r" colab_type="code" colab={}
# !python /content/tutorial/scripts/swivel/text2bin.py --vocab={full_precomp_vec_path}/vocab.txt --output={full_precomp_vec_path}/vecs.bin \
# {full_precomp_vec_path}/row_embedding.tsv
# + [markdown] id="kqTmZaMOxp6g" colab_type="text"
# Now, we are ready to load the vectors.
# + id="INgVB_9PwW7C" colab_type="code" colab={}
vecsi_wn_umbc = vecs.Vecs(full_precomp_vec_path + '/vocab.txt',
full_precomp_vec_path + '/vecs.bin')
# + id="ELtP4P4px_ao" colab_type="code" colab={}
pd.DataFrame(vecsi_wn_umbc.k_neighbors('lem_California'))
# + id="9KVBK500yL_q" colab_type="code" colab={}
pd.DataFrame(vecsi_wn_umbc.k_neighbors('lem_semantic'))
# + id="DbgsE-ekyShq" colab_type="code" colab={}
pd.DataFrame(vecsi_wn_umbc.k_neighbors('lem_conference'))
# + id="d3nPDt0TyeOZ" colab_type="code" colab={}
print(wn.synset('conference.n.01').definition())
pd.DataFrame(vecsi_wn_umbc.k_neighbors('wn31_conference.n.01'))
# + id="4p6Vubw5yg2K" colab_type="code" colab={}
print(wn.synset('conference.n.03').definition())
pd.DataFrame(vecsi_wn_umbc.k_neighbors('wn31_conference.n.03'))
| 03_vecsigrafo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.8 64-bit (system)
# name: python3
# ---
# # ML Semana 2 Grupo #777
# - <NAME>
# - <NAME>
# - <NAME>
# ### Configuración del ambiente:
# Imports
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn.metrics import confusion_matrix
#Importamos la libreria sklearn
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestClassifier
from mlxtend.plotting import plot_confusion_matrix
from sklearn import tree
# ### Importo los datos
# Importo los datos usando <code>pandas.read_csv()</code>
# Importo con pandas para crear el Dataframe y lo nombro como "df"
url_csv = "vgsalesv3.csv"
df = pd.read_csv(url_csv, sep=';')
df.head()
# Sacamos las columnas que no son utiles para el analisis
df.drop(columns=["Rank", "NA_Sales", "EU_Sales", "Other_Sales","Publisher","Year","Name"],inplace=True)
# incrementan en el tiempo o no, y los campos faltantes de global sales ya que es la variable objetivo
df.dropna(subset=['Global_Sales'], inplace=True)
df.dropna(subset=['JP_Sales'], inplace=True)
# +
#Factorizamos la columna genre y nos quedamos con sus valores numericos ignorando la otra columna correspondiente al valor
#df['Factor_Platform'] = df['Platform'].factorize()[0]
#df['Factor_Genre'] = df['Genre'].factorize()[0]
df_copy=df.drop(["Platform"],axis = "columns")
df_copy.drop(columns=["Genre"],inplace=True)
# -
#Aplanamos plataforma y factorizamos
df_platform = pd.get_dummies(df['Platform'])
df_copy2 = pd.concat([df_copy,df_platform], axis=1 )
df_copy2.head()
###
genre = df['Genre']
xTrain,xTest,yTrain,yTest = train_test_split(df_copy2,genre, test_size = 0.20, random_state=0)
# +
model = RandomForestClassifier(n_estimators=19, random_state = 2016, min_samples_leaf = 8)
model.fit(xTrain, yTrain)
model.score(xTest, yTest)
Ypred = model.predict(xTest)
#Creamos la confusion matrix
matriz = confusion_matrix(yTest, Ypred)
plot_confusion_matrix(conf_mat=matriz, figsize=(6,6), show_normed=False)
plt.tight_layout()
# -
#Creamos arbol de desición
dt= tree.DecisionTreeClassifier()
# +
dt.fit(xTrain,yTrain)
#vemos la accuaracy
dt.score(xTest, yTest)
#Graficamos la confusion matrix
Ypred = dt.predict(xTest)
matriz = confusion_matrix(yTest,Ypred)
plot_confusion_matrix(conf_mat=matriz, figsize=(6,6), show_normed=False)
plt.tight_layout()
| Semana2/ML_Tarea2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
# -
# # Introduction to Neural Networks
# ## Live Demos
digits_data, digits_labels = load_digits().data, load_digits().target
digits_data.shape
digits_labels.shape
pd.Series(digits_labels).groupby(digits_labels).size()
for i in range(10):
plt.imshow(digits_data[i].reshape(8, 8), cmap = "gray")
plt.title("Label: {}".format(digits_labels[i]))
plt.show()
digits_data = MinMaxScaler().fit_transform(digits_data)
digits_data_train, digits_data_test, digits_labels_train, digits_labels_test = train_test_split(
digits_data,
digits_labels,
train_size = 0.8,
stratify = digits_labels)
nn = MLPClassifier(hidden_layer_sizes = (10,))
nn.fit(digits_data_train, digits_labels_train)
def get_scores(estimator):
print("Train: ", estimator.score(digits_data_train, digits_labels_train))
print("Test: ", estimator.score(digits_data_test, digits_labels_test))
get_scores(nn)
nn.coefs_[0].shape, nn.coefs_[1].shape,
len(nn.intercepts_)
deep_nn = MLPClassifier(hidden_layer_sizes = (3, 5, 3))
deep_nn.fit(digits_data_train, digits_labels_train)
get_scores(deep_nn)
| MachineLearning/07.IntroductionToNeuralNetworks/Intro to Neural Networks Demos.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1. 모델 성능 평가
import glob
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing.image import load_img, img_to_array, array_to_img
from tensorflow.keras.models import load_model
import model_evaluation_utils as meu
img_basic_cnn = load_model('12-09_basic_cnn.h5')
#img_aug_cnn = load_model('cats_dogs_cnn_img_aug.h5')
#tl_img_aug_finetune_cnn = load_model('10-06-me.h5')
## 이미지 기본 모양
IMG_DIM = (150, 150)
input_shape = (150, 150, 3)
num2class_label_transformer = lambda l: ['cat' if x == 0 else 'dog' for x in l]
class2num_label_transformer = lambda l: [0 if x == 'cat' else 1 for x in l]
# ### 샘플 테스트 이미지로 모델 예측
# +
sample_img_path = 'my_cat.jpg'
#sample_img_path = "C:\\Users\\user\\Documents\\한국선급\\CNG_P1\\2_104449_11_11.png"
#sample_img_path = 'dog_my.jpg'
#sample_img_path = 'tiger.jpg'
sample_img = load_img(sample_img_path, target_size=IMG_DIM)
sample_img_tensor = img_to_array(sample_img)
sample_img_tensor = np.expand_dims(sample_img_tensor, axis=0)
sample_img_tensor /= 255.
print(sample_img_tensor.shape)
# -
plt.imshow(sample_img_tensor[0])
cnn_basic_aug_prediction = num2class_label_transformer(img_basic_cnn.predict_classes(sample_img_tensor, verbose=0))
#cnn_img_aug_prediction = num2class_label_transformer(img_aug_cnn.predict_classes(sample_img_tensor, verbose=0))
#tlearn_cnn_finetune_img_aug_prediction = num2class_label_transformer(tl_img_aug_finetune_cnn.predict_classes(sample_img_tensor, verbose=0))
print('Predictions for our sample image:\n',
'\nCNN with Img Augmentation:', cnn_basic_aug_prediction)
# '\nPre-trained CNN with Fine-tuning & Img Augmentation (Transfer Learning):', tlearn_cnn_finetune_img_aug_prediction)
img_basic_cnn.predict_proba(sample_img_tensor, verbose=0)
# +
#tl_img_aug_finetune_cnn.predict_proba(sample_img_tensor, verbose=0)
# -
| transfer_learning/(model)evaluation_new.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Introduction
#
# I was curious which Network provider numbers exist the most in my phone conatcts log
#
# So, i ran this analysis to find out and decide which phone call bundle should i pick!
# +
# import pandas for dataframe operations
import pandas as pd
#import matplotlib for visualizing results
import matplotlib.pyplot as plt
# +
# reading phone contacts as csv file
contacts = pd.read_csv('Phone Contacts.csv')
# +
# displaying the dataframe
contacts.head()
# +
# dividing the contacts numbers to the known providers in my country
# recognize each provider by the third digit in the phone number
voda = []
eti = []
mob = []
other = []
for i in range(len(contacts)-1):
if contacts['Phone Number'][i][3] == '0':
voda.append(contacts['Phone Number'][i])
elif contacts['Phone Number'][i][3] == '1':
eti.append(contacts['Phone Number'][i])
elif contacts['Phone Number'][i][3] == '2':
mob.append(contacts['Phone Number'][i])
else:
other.append(contacts['Phone Number'][i])
# +
# Plotting the results to see which service provider exists the most!
# %matplotlib inline
labels = 'Vodafone', 'Etisalat', 'Mobinil', 'Other'
sizes = [len(voda), len(eti), len(mob), len(other)]
colors = ['red', 'yellowgreen', 'orange', 'purple']
plt.pie(sizes, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=140)
plt.title("Phone Contact Numbers Analysis")
plt.show()
# -
# # Findings
#
# As I found above, Vodafone service providers has the biggest share of numbers on my phone!
#
# So, i suppose a Vodafone call bundle will be suitable for me
| Phone Contacts Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.7 64-bit (''my_libs'': conda)'
# name: python3
# ---
# # Movie Recommendations using Content Based Filtering
#
# In this kernel we'll be building a baseline Movie Recommendation System using TMDB 5000 Movie Dataset. For novices like me this kernel will pretty much serve as a foundation in recommendation systems and will provide you with something to start with.
# In this kernel we would be using content based filtering to recommend movies.
# In this recommender system the content of the movie (overview, cast, crew, keyword, tagline etc) is used to find its similarity with other movies. Then the movies that are most likely to be similar are recommended.
# ## Loading the dataset
# +
import pandas as pd
import numpy as np
df1 = pd.read_csv('./Dataset/tmdb_5000_credits.csv/')
df2 = pd.read_csv('./Dataset/tmdb_5000_movies.csv/')
# -
df1.head()
df2.head()
# **The first dataset contains the following features:-**
# movie_id - A unique identifier for each movie.
#
# cast - The name of lead and supporting actors.
#
# crew - The name of Director, Editor, Composer, Writer etc.
#
# **The second dataset has the following features:-**
# budget - The budget in which the movie was made.
#
# genre - The genre of the movie, Action, Comedy ,Thriller etc.
#
# homepage - A link to the homepage of the movie.
#
# id - This is infact the movie_id as in the first dataset.
#
# keywords - The keywords or tags related to the movie.
#
# original_language - The language in which the movie was made.
#
# original_title - The title of the movie before translation or adaptation.
#
# overview - A brief description of the movie.
#
# popularity - A numeric quantity specifying the movie popularity.
#
# production_companies - The production house of the movie.
#
# production_countries - The country in which it was produced.
#
# release_date - The date on which it was released.
#
# revenue - The worldwide revenue generated by the movie.
#
# runtime - The running time of the movie in minutes.
#
# status - "Released" or "Rumored".
#
# tagline - Movie's tagline.
#
# title - Title of the movie.
#
# vote_average - average ratings the movie recieved.
#
# vote_count - the count of votes recieved.
#
# ## Now we will join the two dataframes on the 'id' column
df1.columns = ['id','tittle','cast','crew']
df2= df2.merge(df1,on='id')
# final dataset
df2.head()
# ## Plot description based Recommender
# We will compute pairwise similarity scores for all movies based on their plot descriptions and recommend movies based on that similarity score. The plot description is given in the overview feature of our dataset.
df2['overview'].head(5)
# Now we will convert the word vector of each overview and compute the Term Frequency-Inverse Document Frequency (TF-IDF) vectors for each overview.
#
# Term Frequency is the relative frequency of a word in a document and is given as **(term instances/total instances)**.
#
# Inverse Document Frequency is the relative count of documents containing the term is given as **log(number of documents/documents with term)**.
#
# The overall importance of each word to the documents in which they appear is equal to **TF * IDF**
#
# This will give us a matrix where each column represents a word in the overview vocabulary (all the words that appear in at least one document) and each row represents a movie, as before.
#
# This is done to reduce the importance of words that occur frequently in plot overviews and therefore, their significance in computing the final similarity score.
#
# Scikit-learn gives you a built-in **TfIdfVectorizer** class that produces the TF-IDF matrix in a couple of lines.
# +
from sklearn.feature_extraction.text import TfidfVectorizer
# Define a TF-IDF Vectorizer Object and remove all english stop words
tfidf = TfidfVectorizer(stop_words='english')
# Replacing NaN values with an empty string
df2['overview'] = df2['overview'].fillna('')
# Constructing the required TF-IDF matrix by fitting and transforming the data
tfidf_matrix = tfidf.fit_transform(df2['overview'])
# Output the shape of tfidf_matrix
tfidf_matrix.shape
# -
# We see that over **20,000** different words were used to describe the 4800 movies in our dataset.
#
# With this matrix, we can now compute a similarity score. There are several candidates for this; such as the euclidean, the pearson and the cosine similarity scores. There is no right answer to which score is the best. Different scores work well in different scenarios and it is often a good idea to experiment with different metrics.
#
# We will be using the cosine similarity to calculate a numeric quantity that denotes the similarity between two movies. We use the cosine similarity score since it is independent of magnitude and is relatively easy and fast to calculate.
#
# **Mathematically, it is defined as follows:**
#
# 
#
# Since we have used the TF-IDF vectorizer, calculating the dot product will directly give us the cosine similarity score. Therefore, we will use sklearn's **linear_kernel()** instead of cosine_similarities() since it is faster.
# +
from sklearn.metrics.pairwise import linear_kernel
# Computing the cosine similarity matrix
cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)
# -
# Now we are going to define a function that takes in a movie title as an input and outputs a list of the 10 most similar movies.
#
# Firstly, for this, we need a mechanism to identify the index of a movie in our DataFrame, given it's title.
# Constructing a reverse map of indices and movie titles
indices = pd.Series(df2.index, index=df2['title']).drop_duplicates()
# These are the following steps we'll follow to define our recommendation function:-
#
# 1] Get the index of the movie given its title.
#
# 2] Get the list of cosine similarity scores for that particular movie with all movies. Convert it into a list of tuples where the first element is its position and the second is the similarity score.
#
# 3] Sort the list of tuples based on the similarity scores.
#
# 4] Get the top 10 elements of this list. Ignore the first element as it refers to self (the movie most similar to a particular movie is the movie itself).
#
# 5] Return the titles corresponding to the indices of the top elements.
def get_recommendations(title, cosine_sim=cosine_sim):
# Get the index of the movie that matches the title
idx = indices[title]
# Get the list of cosine similarity scores for that particular movie with all movies
sim_scores = list(enumerate(cosine_sim[idx]))
# Sort the movies based on the similarity scores
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
# Get the scores of the 10 most similar movies
sim_scores = sim_scores[1:11]
# Get the movie indices
movie_indices = [i[0] for i in sim_scores]
# Return the top 10 most similar movies
return df2['title'].iloc[movie_indices]
# Now, get reccomendations for movies based on the plot description
get_recommendations('The Dark Knight Rises')
get_recommendations('The Avengers')
# While our system has done a decent job of finding movies with similar plot descriptions, the quality of recommendations is not that great.
#
# "The Dark Knight Rises" returns all Batman movies while it is more likely that the people who liked that movie are more inclined to enjoy other Christopher Nolan movies.
#
# **This is something that cannot be captured by the present system.**
# ## Credits, Genres and Keywords Based Recommender
#
# The quality of our recommender would be increased with the usage of better dataframes. So, we are going to build a recommender based on the following metadata: the 3 top actors, the director, related genres and the movie plot keywords.
#
# From the cast, crew and keywords features, we need to extract the three most important actors, the director and the keywords associated with that movie. Our data is present in the form of "stringified" lists , we need to convert it into a safe and usable structure.
# +
# Parsing the stringified features into their corresponding python objects
from ast import literal_eval
features = ['cast', 'crew', 'keywords', 'genres']
for feature in features:
df2[feature] = df2[feature].apply(literal_eval)
# -
# Now we'll write functions that will help us to extract the required information from each feature.
# Get the director's name from the crew feature. If director is not listed it will return NaN
def get_director(x):
for i in x:
if i['job'] == 'Director':
return i['name']
return np.nan
# Returns the list top 3 elements or entire list; whichever is more.
def get_list(x):
if isinstance(x, list):
names = [i['name'] for i in x]
# Checks if more than 3 elements exist. If yes, return only first three. If no, return entire list.
if len(names) > 3:
names = names[:3]
return names
# Return empty list in case of missing data
return []
# +
# Define new director, cast, genres and keywords features that are in a suitable form.
df2['director'] = df2['crew'].apply(get_director)
features = ['cast', 'keywords', 'genres']
for feature in features:
df2[feature] = df2[feature].apply(get_list)
# -
# Print the new features of the first 3 films
df2[['title', 'cast', 'director', 'keywords', 'genres']].head(3)
# The next step would be to convert the names and keyword instances into lowercase and strip all the spaces between them. This is done so that our vectorizer doesn't count the Johnny of "<NAME>" and "<NAME>" as the same.
# Function to convert all strings to lower case and strip names of spaces
def clean_data(x):
if isinstance(x, list):
return [str.lower(i.replace(" ", "")) for i in x]
else:
# Check if the director exists. If not, return empty string
if isinstance(x, str):
return str.lower(x.replace(" ", ""))
else:
return ''
# +
# Apply clean_data function to your features.
features = ['cast', 'keywords', 'director', 'genres']
for feature in features:
df2[feature] = df2[feature].apply(clean_data)
# -
# Now we will create our **"metadata soup"**, which is a string that contains all the metadata that we want to feed to our vectorizer (namely actors, director and keywords).
def create_soup(x):
return ' '.join(x['keywords']) + ' ' + ' '.join(x['cast']) + ' ' + x['director'] + ' ' + ' '.join(x['genres'])
df2['soup'] = df2.apply(create_soup, axis=1)
# The next steps are the same as what we did with our plot description based recommender.
#
# One important difference is that we use the CountVectorizer() instead of TF-IDF. This is because we do not want to down-weight the presence of an actor/director if he or she has acted or directed in relatively more movies.
# +
from sklearn.feature_extraction.text import CountVectorizer
# Creating the count_matrix
count = CountVectorizer(stop_words='english')
count_matrix = count.fit_transform(df2['soup'])
# +
# Computing the Cosine Similarity matrix based on the count_matrix
from sklearn.metrics.pairwise import cosine_similarity
cosine_sim2 = cosine_similarity(count_matrix, count_matrix)
# -
# Reset index of our main DataFrame and construct reverse mapping as before
df2 = df2.reset_index()
indices = pd.Series(df2.index, index=df2['title'])
# We can now reuse our **get_recommendations()** function by passing in the new cosine_sim2 matrix as your second argument.
# Now, get reccomendations for movies based on the keywords.
get_recommendations('The Dark Knight Rises', cosine_sim2)
get_recommendations('The Godfather', cosine_sim2)
get_recommendations('The Avengers', cosine_sim2)
# We see that our recommender has been successful in capturing more information due to more metadata and has given us better recommendations.
#
# It is more likely that Marvels or DC comics fans will like the movies of the same production house. Therefore, to our features above we can add production_company.
#
# We can also increase the weight of the director, by adding the feature multiple times in the soup.
# ## Conclusion
#
# We created recommenders using demographic , content- based and collaborative filtering. While demographic filtering is very elemantary and cannot be used practically, **Hybrid Systems** can take advantage of content-based and collaborative filtering as the two approaches are proved to be almost complimentary.
| content_based_filtering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div align="center"><h3>Remembering Python...</h3></div>
#
# Python boots up with __builtins__ already in the namespace and checked as a part of the name resolution protocol...
#
# Using difference slices, we an check portions of a long list.
# +
from pprint import pprint
# I, Python am built from types, such as builtin types:
the_builtins = dir(__builtins__) # always here
pprint(the_builtins[-10:]) # no need to import
# -
# Lets check our understanding that the native types -- the ones we count on to build more complex types -- live in __builtins__:
for the_string in ["list", "tuple", "dict", "int", "float"]:
if the_string in the_builtins:
print("Yes I am a native type: ", the_string)
assert type(eval(the_string)) == type # all types in this club
else:
print("No, I'm not native: ", the_string)
# And now for something completely different, lets define a class that does substitution based on a permutation of lower-case ascii letters plus space. Such a type is given more substantial implementation in the form of our px_class.py, which allows permutations to multiply, giving more permuations.
# +
# usually up top
from string import ascii_lowercase as all_lowers
from random import shuffle
class P:
"""
class Px is the more sophisticated version of this class
"""
def __init__(self, p=None):
if not p:
original = all_lowers + ' '
scrambled = list(original)
shuffle(scrambled)
self.perm = dict(zip(original, scrambled))
else:
self.perm = p
def __invert__(self):
"""reverse my perm, make a new me"""
reverse = dict(zip(self.perm.values(), self.perm.keys()))
return P(reverse) # <-- new P instance
def encrypt(self, s):
output = ""
for c in s:
output += self.perm[c]
return output
def decrypt(self, s):
rev = ~self # <-- new P instance
return rev.encrypt(s) # <-- symmetric key
p = P()
m = "i like python so much because it does everything" # palindrome
c = p.encrypt(m)
print(m) # plaintext
print(c) # ciphertext
d = p.decrypt(c)
print(d)
# -
# In the code below, we use a context manager to connect and disconnect from a SQLite database. The context manager is developed from a simple generator with precisely one yield statement, using the @contextmanager decorator.
# +
import sqlite3 as sql
import os.path
import json
import time
from contextlib import contextmanager
PATH = "/Users/kurner/Documents/classroom_labs/session10"
DB1 = os.path.join(PATH, 'periodic_table.db')
def mod_date():
return time.mktime(time.gmtime()) # GMT time
@contextmanager
def Connector(db):
try:
db.conn = sql.connect(db.db_name) # connection
db.curs = db.conn.cursor() # cursor
yield db
except Exception as oops:
if oops[0]:
raise
db.conn.close()
class elemsDB:
def __init__(self, db_name):
self.db_name = db_name
def seek(self, elem):
if self.conn:
if elem != "all":
query = ("SELECT * FROM Elements "
"WHERE elem_symbol = '{}'".format(elem))
self.curs.execute(query)
result = self.curs.fetchone()
if result:
return json.dumps(list(result))
else:
query = "SELECT * FROM Elements ORDER BY elem_protons"
self.curs.execute(query)
result={}
for row in self.curs.fetchall():
result[row[1]] = list(row)
return json.dumps(result)
return "NOT FOUND"
# -
# At this point, we're able to seek a specific row from the Elements table, or request all of them. In a Flask web application, the controlling argument might come from a GET request, i.e. a URL such as /api/elements?elem=H
# +
output = ""
with Connector(elemsDB(DB1)) as dbx:
output = dbx.seek("C")
print(output)
# -
# To be continued...
# 
# +
import requests
data = {}
data["protons"]=100
data["symbol"]="Kr"
data["long_name"]="Kirbium"
data["mass"]=300
data["series"]="Dunno"
data["secret"]="DADA" # <--- primitive authentication
the_url = 'http://localhost:5000/api/elements'
r = requests.post(the_url, data=data)
print(r.status_code)
print(r.content)
| Remembering1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
# %pylab inline
import random
# +
#First visit Monte Carlo
# -
# parameters
gamma = 0.4 # discounting rate
rewardSize = -1
gridSize = 4
terminationStates = [[0,0], [gridSize-1, gridSize-1]]
actions = [[-1, 0], [1, 0], [0, 1], [0, -1]]
numIterations = 10000
# initialization
V = np.zeros((gridSize, gridSize))
returns = {(i, j):list() for i in range(gridSize) for j in range(gridSize)}
deltas = {(i, j):list() for i in range(gridSize) for j in range(gridSize)}
states = [[i, j] for i in range(gridSize) for j in range(gridSize)]
print(states)
# utils
def generateEpisode():
initState = random.choice(states[1:-1])
#print(initState)
episode = []
while True:
if list(initState) in terminationStates:
return episode
action = random.choice(actions)
finalState = np.array(initState)+np.array(action)
if -1 in list(finalState) or gridSize in list(finalState):
finalState = initState
episode.append([list(initState), action, rewardSize, list(finalState)])
initState = finalState
generateEpisode()
for it in tqdm(range(numIterations)):
episode = generateEpisode()
G = 0
#print(episode)
for i, step in enumerate(episode[::-1]):
G = gamma*G + step[2]
if step[0] not in [x[0] for x in episode[::-1][len(episode)-i:]]:
idx = (step[0][0], step[0][1])
returns[idx].append(G)
newValue = np.average(returns[idx])
deltas[idx[0], idx[1]].append(np.abs(V[idx[0], idx[1]]-newValue))
V[idx[0], idx[1]] = newValue
V
# using gamma = .4
plt.figure(figsize=(20,10))
all_series = [list(x)[:50] for x in deltas.values()]
for series in all_series:
xlabel("episodes")
ylabel("delta")
plt.plot(series)
| monte_carlo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Working with local coordinates
#
# <img align="right" src="https://anitagraser.github.io/movingpandas/assets/img/movingpandas.png">
#
# [](https://mybinder.org/v2/gh/anitagraser/movingpandas/master?filepath=tutorials/5-local-coordinates.ipynb)
#
# **<p style="color:#e31883">This notebook demonstrates the current development version of MovingPandas.</p>**
#
# For tutorials using the latest release visit https://github.com/anitagraser/movingpandas-examples.
#
#
#
# This tutorial uses data extracted from video footage of a soccer game that was published in https://github.com/Friends-of-Tracking-Data-FoTD/Last-Row
#
# +
import urllib
import os
import pandas as pd
import geopandas as gpd
from geopandas import GeoDataFrame, read_file
from shapely.geometry import Point, LineString, Polygon
from fiona.crs import from_epsg
from datetime import datetime, timedelta
from matplotlib import pyplot as plt
import holoviews as hv
import sys
sys.path.append("..")
import movingpandas as mpd
mpd.show_versions()
import warnings
warnings.simplefilter("ignore")
# +
from os.path import exists
from urllib.request import urlretrieve
def get_df_from_gh_url(url):
file = f'soccer.csv'
if not exists(file):
urlretrieve(url, file)
return pd.read_csv(file)
# -
input_file = "https://raw.githubusercontent.com/anitagraser/Last-Row/master/datasets/positional_data/liverpool_2019.csv"
df = get_df_from_gh_url(input_file)
df.drop(columns=['Unnamed: 0'], inplace=True)
# +
plays = list(df.play.unique())
def to_timestamp(row):
# plays to date
day = plays.index(row.play)+1
start_time = datetime(2019,1,day,12,0,0)
# frames to time
td = timedelta(milliseconds=1000/20*row.frame)
return start_time + td
# frame: the frame number for the current location. Data provided has 20 frames per second
df['time'] = df.apply(to_timestamp, axis=1)
df.set_index('time', inplace=True)
# the preferred size for many professional teams' stadiums is 105 by 68 metres, accoring to https://en.wikipedia.org/wiki/Football_pitch
pitch_length = 105
pitch_width = 68
df.x = df.x / 100 * pitch_length
df.y = df.y / 100 * pitch_width
df
# -
# %%time
CRS = None
traj_collection = mpd.TrajectoryCollection(df, 'player', x='x', y='y', crs=CRS)
mpd.TemporalSplitter(traj_collection).split(mode="day")
print(f"Finished creating {len(traj_collection)} trajectories")
pitch = Polygon([(0, 0), (0, pitch_width), (pitch_length, pitch_width), (pitch_length, 0), (0, 0)])
plotted_pitch = GeoDataFrame(pd.DataFrame([{'geometry': pitch, 'id': 1}]), crs=CRS).hvplot(color='white', alpha=0.5)
PLAY = 2
title = f'Play {PLAY} {plays[PLAY]}'
play_trajs = traj_collection.filter('play', plays[PLAY])
play_trajs
play_trajs.plot(column='team', colormap={'attack':'hotpink', 'defense':'turquoise'})
type(play_trajs)
generalized = mpd.MinTimeDeltaGeneralizer(play_trajs).generalize(tolerance=timedelta(seconds=0.5))
hvplot_defaults = {'line_width':5, 'frame_height':350, 'frame_width':700, 'colorbar':True, 'tiles':None, 'geo':False,}
generalized.hvplot(title=title, c='speed', hover_cols=['player'], **hvplot_defaults)
(
plotted_pitch *
generalized.hvplot(title=title, c='speed', hover_cols=['player'], cmap='Viridis', **hvplot_defaults)
)
pitch_img = hv.RGB.load_image(f'./data/soccer_field.png', bounds=(0,0,pitch_length,pitch_width))
(
pitch_img *
generalized.hvplot(title=title, c='team', colormap={'attack':'limegreen', 'defense':'purple'},
hover_cols=['player'],**hvplot_defaults) *
generalized.get_start_locations().hvplot(label='start', color='orange')
)
# ## Continue exploring MovingPandas
#
# 1. [Getting started](1-getting-started.ipynb)
# 1. [Handling trajectory data files (reading & writing)](2-reading-data-from-files.ipynb)
# 1. [TrajectoryCollection aggregation (flow maps)](3-generalization-and-aggregation.ipynb)
# 1. [Stop detection](4-stop-detection.ipynb)
# 1. [Working with local coordinates](5-local-coordinates.ipynb)
| tutorials/5-local-coordinates.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from logicqubit.logic import *
from cmath import *
import numpy as np
import sympy as sp
from scipy.optimize import *
import matplotlib.pyplot as plt
# +
def qft(qr):
for i in range(len(qr)):
for j in range(i):
qr[i].CU1(qr[j], pi/float(2**(i-j)))
qr[i].H()
def iqft(qr): # transformada quântica de Fourier inversa
for i in range(len(qr)):
for j in range(i):
qr[i].CU1(qr[j], -pi/float(2**(i-j)))
qr[i].H()
def swap(s1, s2):
s2.CX(s1)
s1.CX(s2)
s2.CX(s1)
#def ansatz(q, params):
# return q.RY(params[0])
def _ansatz(reg, params):
n_qubits = len(reg)
depth = n_qubits
for i in range(depth):
reg[1].CNOT(reg[0])
for j in range(n_qubits):
reg[i].RY(params[j])
def ansatz(reg, params):
n_qubits = len(reg)
depth = n_qubits
for i in range(depth):
for j in range(n_qubits):
if(j < n_qubits-1):
reg[j+1].CNOT(reg[j])
reg[i].RY(params[j])
def ansatz_2(q1, q2, params):
q2.CNOT(q1)
q1.RY(params[0])
q2.RY(params[1])
q1.CNOT(q2)
q1.RY(params[0])
q2.RY(params[1])
q2.CNOT(q1)
q1.RY(params[0])
q2.RY(params[1])
# -
def expectation(params):
# H = [[1,0,0,0],[0,-1,0,0],[0,0,-5,0],[0,0,0,1]]
# <psi|H|psi> = <psi|00><00|psi> - <psi|01><01|psi> - 5*<psi|10><10|psi> + <psi|11><11|psi>
logicQuBit = LogicQuBit(2, first_left=True)
#reg = QubitRegister(2)
q1 = Qubit()
q2 = Qubit()
#_ansatz(reg.getQubits(), params)
#ansatz([q2,q1], params)
ansatz_2(q1,q2,params)
res = logicQuBit.Measure([q1,q2])
return -5*res[0]+res[1]-4*res[2]-10*res[3]
expectation([0,0])
minimum = minimize(expectation, [0,0], method='Nelder-Mead')
print(minimum)
| vqe_h_2q.ipynb |
/* --- */
/* jupyter: */
/* jupytext: */
/* text_representation: */
/* extension: .mac */
/* format_name: light */
/* format_version: '1.5' */
/* jupytext_version: 1.14.4 */
/* kernelspec: */
/* display_name: Maxima */
/* language: maxima */
/* name: maxima */
/* --- */
/* # Finding Numerical Fluxes for DG */
/* */
/* Copyright (C) 2020 <NAME> */
/* */
/* <details> */
/* <summary>MIT License</summary> */
/* Permission is hereby granted, free of charge, to any person obtaining a copy */
/* of this software and associated documentation files (the "Software"), to deal */
/* in the Software without restriction, including without limitation the rights */
/* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell */
/* copies of the Software, and to permit persons to whom the Software is */
/* furnished to do so, subject to the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be included in */
/* all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR */
/* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, */
/* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE */
/* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER */
/* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, */
/* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN */
/* THE SOFTWARE. */
/* </details> */
/* */
/* ---- */
/* NB: This note book uses the [Maxima-Jupyter kernel](https://github.com/robert-dodier/maxima-jupyter) to interface with [Maxima](http://maxima.sourceforge.net/), an open-source computer algebra system. I have found that the [Docker image](https://hub.docker.com/r/calyau/maxima-jupyter) is a fairly convenient way of using this kernel. */
/* */
/* Some Maxima version info for reproducibility: */
build_info();
/* Load some packages: */
/* +
kill(all);
load("itensor");
assert(condition):=if not condition then error("Assertion violated") else true$
norm_2_squared(v):=v.v;
crossfunc(f):=makelist(
sum(sum(
levi_civita([i,j,k])*f(j,k),
j,1,3),k,1,3),i,1,3)$
crossprod(a,b):=crossfunc(lambda([j,k], a[j]*b[k]));
/* -
/* ## Simplification Utilities */
/* */
/* These function simplify expressions coming from the (symbolic) simultaneous diagonalization, by letting Maxima realize that $n \cdot n =1$. */
/* +
/* ------------------------------------------------------------------------- */
/* Simplification for expressions stemming from hyperbolic systems */
/* ------------------------------------------------------------------------- */
hypsimp(x):=ratsimp(ratsubst(1,n.n,x))$
fullhypsimp(x):=hypsimp(
ratsubst(
last(n)^2,
1-sum(n[i]^2,i,1,length(n)-1),
x)
)$
/* ------------------------------------------------------------------------- */
/* diagonalize a given hyperbolic operator A */
/* ------------------------------------------------------------------------- */
hypdiagonalize(A):=block([evA, V, invV,D],
evA:hypsimp(apply(append, eigenvectors(A)[2])),
V:transpose(apply(matrix, evA)),
invV:hypsimp(invert(V)),
assert(hypsimp(V.invV)=ident(length(A))),
D:hypsimp(invV.A.V),
[V, D, invV])$
/* -
/* ## Wave Equation */
/* redefine this to change dimensionality: */
n:[nx,ny];
/* +
dims:length(n);
assume(c>0);
if dims = 1 then n:[1];
/* -
/* Define the flux jacobian for the first-order form of the wave-equation: */
/* */
/* $$ */
/* \begin{align*} */
/* \partial_t u &= \nabla \cdot \boldsymbol v\\ */
/* \partial_t \boldsymbol v &= \nabla u */
/* \end{align*} */
/* $$ */
/* projected onto a line with normal `n`: */
esymmatrix(n, v, i,j):=ematrix(n,n,v,i,j)+ematrix(n,n,v,j,i);
wave_A:sum(n[i]*esymmatrix(dims+1, -c, 1+i,1),i,1,dims);
/* Find the eigenvalues of the flux Jacobian: */
[wave_V, wave_D, wave_invV]:hypdiagonalize(wave_A);
/* ### Finding the Numerical Flux */
/* */
/* This function sets up a system of Rankine-Hugoniot conditions across the flux fan and solves for the stagnation state: */
/* ------------------------------------------------------------------------- */
/* compute upwind flux for a given operator with eigenvalues evs, sorted
* in ascending order.
* Sign assumptions for all variables occuring in evs must be in place.
*/
/* ------------------------------------------------------------------------- */
hyp_upwind_flux(evs, D):=block([evvars, Dp, Dm, n, midstates, states, unknowns],
evvars:listofvars(evs),
add_evvars_suffix(suffix, x):=subst(makelist(v=concat(''v, suffix), v, evvars), x),
evsm:add_evvars_suffix(m, evs),
evsp:add_evvars_suffix(p, evs),
Dm:add_evvars_suffix(m, D),
Dp:add_evvars_suffix(p, D),
midstates:makelist(makelist(concat(s,state,i), i, 1, length(D)),
state, 1, length(evs)-1),
states:append(
[makelist(concat(sm, i), i, 1, length(D))],
midstates,
[makelist(concat(sp,i), i, 1, length(D))]),
unknowns:apply(append, midstates),
result:if member(0, evs) then
block([biasedD, veceqns, eqns, soln],
biasedD:makelist(
if evs[i] = 0 then [Dp,Dm]
else if evs[i] > 0 then [Dp,Dp]
else [Dm,Dm],
i, 1, length(evs)),
veceqns:apply(append, makelist(
-(if evs[i] > 0 then evsp[i] else evsm[i]) *(states[i+1]-states[i])
+(biasedD[i][1].states[i+1]-biasedD[i][2].states[i]),
i,1,length(evs))),
eqns:makelist(veceqns[i,1], i, 1, length(veceqns)),
soln:solve(eqns, unknowns),
assert(length(soln)=1),
for i: 1 thru length(evs) do
if evs[i] = 0 then return(Dp.subst(soln[1], midstates[i]))
)
else
block([straddle_idx, Dstates, veceqns, eqns, soln],
straddle_idx:for i: 1 thru length(evs)-1 do
if (evs[i] < 0) and (evs[i+1] > 0) then return(i),
flux:makelist(concat(flux,i),i,1,length(D)),
unknowns:append(unknowns, flux),
Dstates:append(
[Dm.first(states)],
makelist(
if i = straddle_idx then flux
else if evs[i] > 0 then Dp.midstates[i]
else Dm.midstates[i],
i, 1, length(midstates)),
[Dp.last(states)]),
veceqns:apply(append, makelist(
-(if evs[i] > 0 then evsp[i] else evsm[i]) *(states[i+1]-states[i])
+(Dstates[i+1]-Dstates[i]),
i,1,length(evs))),
eqns:makelist(veceqns[i,1], i, 1, length(veceqns)),
print(covect(eqns)),
soln:solve(eqns, unknowns),
assert(length(soln)=1),
subst(soln[1], flux)
),
subst(
append(
makelist(concat(sm, i)=sm[i,1], i, 1, length(D)),
makelist(concat(sp, i)=sp[i,1], i, 1, length(D))
),
result)
)$
/* Find an expression for the flux in characteristic variables. */
/* */
/* Note the `p` and `m` suffixes for the $+$ and $-$ sides of the interface. */
/* +
wave_eigenvalues:makelist(wave_D[i,i], i, 1, length(wave_D));
if member(0, wave_eigenvalues) then
wave_sflux:hyp_upwind_flux([-c,0,c], wave_D)
else
wave_sflux:hyp_upwind_flux([-c,c], wave_D);
/* -
/* Convert back to conserved variables: */
wave_wflux:ratsimp(wave_V.ev(wave_sflux, [sm=wave_sminw,sp=wave_spinw]));
/* ## Maxwell's Equations */
/* */
/* First, set up some parameter assumptions: */
assume(c>0);
assume(mu>0);
assume(epsilon>0);
assume(epsinv>0);
assume(muinv>0);
/* Some helper functions for matrix creation: */
/* +
/* A hyperbolic system matrix resulting from a curl */
curlmat(coord):=genmatrix(
lambda ([i,j], levi_civita([coord,j,i])),
3,3)$
vstack:append$
hstack(a,b):=transpose(append(transpose(a),transpose(b)))$
blockmat(a11,a12,a21,a22):=vstack(hstack(a11,a12),hstack(a21,a22))$
/* -
n:[nx,ny,nz];
/* Next, write down the flux Jacobian on a line with normal `n`: */
/* +
max_submat(i):=blockmat(
zeromatrix(3,3),
-epsinv*curlmat(i), /* epsinv = 1/epsilon */
muinv*curlmat(i), /* muinv = 1/mu */
zeromatrix(3,3)
)$
max_Asimp:sum(n[i]*max_submat(i),i,1,3);
max_A:subst([epsinv=1/epsilon,muinv=1/mu], max_Asimp);
/* -
/* Next, diagonalize to obtain the transformation to/from characteristic variables: */
/* +
max_invsubst(x):=subst([epsinv=1/epsilon, muinv=1/mu], x)$
[max_V, max_D, max_invV]:max_invsubst(hypdiagonalize(max_Asimp));
/* -
/* Now find the flux in characteristic variables: */
/* +
max_Dinc:subst([1/(sqrt(epsilon)*sqrt(mu))=c], max_D);
max_sflux:hyp_upwind_flux([-c,0,c], max_Dinc);
/* FIXME: max_V should not depend on epsilon and mu, but it does
For now, make cp and cm equal. */
max_sflux:subst(
[cp=1/(sqrt(epsilon)*sqrt(mu)), cm=1/(sqrt(epsilon)*sqrt(mu))],
max_sflux);
/* -
/* And in conserved variables: */
/* +
max_Em:makelist(Em[i],i,1,3)$
max_Ep:makelist(Ep[i],i,1,3)$
max_Hm:makelist(Hm[i],i,1,3)$
max_Hp:makelist(Hp[i],i,1,3)$
max_wm:vstack(max_Em,max_Hm)$
max_wp:vstack(max_Ep,max_Hp)$
max_sminw:hypsimp(max_invV.max_wm)$
max_spinw:hypsimp(max_invV.max_wp)$
max_wflux:fullhypsimp(max_V.ev(max_sflux, [sm=max_sminw,sp=max_spinw]));
/* +
max_stronglocalpart:max_A.max_wm;
max_strongwflux:max_stronglocalpart-max_wflux;
/* -
/* Check against value from [the literature](https://doi.org/10.1016/0010-4655(91)90199-U): */
/* +
max_Z:sqrt(mu/epsilon)$
max_Y:sqrt(epsilon/mu)$
max_knownstrongwflux:ratsimp(vstack(
-1/(2*epsilon)
*(crossprod(n,(max_Hm-max_Hp)-1/max_Z*crossprod(n,max_Em-max_Ep))),
1/(2*mu)
*(crossprod(n,(max_Em-max_Ep)+1/max_Y*crossprod(n,max_Hm-max_Hp)))
));
assert(norm_2_squared(hypsimp(
(max_strongwflux)
-max_knownstrongwflux))=0);
/* -
| demos/dg-hyperbolic/Finding Numerical Fluxes for DG.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <small>
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# </small>
#
#
#
# # Deep Learning From Basics to Practice
# ## by <NAME>, https://dlbasics.com, http://glassner.com
# ------
# ## Chapter 27: Applications
# ### Notebook 4: Style Transfer
#
# This notebook is provided as a “behind-the-scenes” look at code used to make some of the figures in this chapter. It is still in the hacked-together form used to develop the figures, and is only lightly commented.
# ### How to run this code:
# - Find the cell marked Constants.
# - Set the variables there - in particular, set the input and output files
# - Save the notebook
# - Choose the Kernel menu, then Restart & Run All
# - Wait a while!
# ### About this code:
# This notebook is a minor restructuring of code from
# https://github.com/titu1994/Neural-Style-Transfer
# by <NAME> (titu1994).
#
# See License E in LICENSE.txt
# +
from scipy.misc import imread, imresize, imsave, fromimage, toimage
from scipy.optimize import fmin_l_bfgs_b
import numpy as np
import time
import argparse
import warnings
from keras.models import Model
from keras.layers import Input
from keras.layers.convolutional import Convolution2D, AveragePooling2D, MaxPooling2D
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.utils.layer_utils import convert_all_kernels_in_model
# Just in case the Keras defaults aren't as we expect
K.set_image_data_format('channels_last')
def about():
"""
Neural Style Transfer with Keras 2.0.5
Based on:
https://github.com/fchollet/keras/blob/master/examples/neural_style_transfer.py
Contains few improvements suggested in the paper Improving the Neural Algorithm of Artistic Style
(http://arxiv.org/abs/1605.04603).
-----------------------------------------------------------------------------------------------------------------------
"""
# +
save_files = True
import os, sys, inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.insert(0, os.path.dirname(current_dir)) # path to parent dir
from DLBasics_Utilities import File_Helper
file_helper = File_Helper(save_files)
file_helper.check_for_directory(file_helper.get_saved_output_dir())
# -
# # Constants
# ## Set up the transfer here, then reset and run the whole notebook
# +
# CONSTANTS
#
# SET UP THE TRANFER HERE
#
base_image_path = 'input_data/waters-3038803_1280-crop.jpg'
style_image_paths = [ 'input_data/HR-Self-Portrait-1907-Picasso.jpg' ]
content_weight = 0.025
style_weights = [1]
image_size = 400
total_variation_weight = 8.5e-5
num_iter = 10
model_name = 'vgg16'
content_loss_type = 0
rescale_image = True
rescale_method = 'bicubic'
maintain_aspect_ratio = True
result_prefix = file_helper.get_saved_output_dir()+'/style-xfer-'
content_layer = 'block1_conv2'
num_style_layers = 13
init_image ='content' # try 'noise'
pool_type_name = 'ave'
preserve_color = False
style_masks = None
content_mask = None
color_mask = None
mask_path = None
content_mask_path = None
style_masks_present = False
content_mask_present = False
color_mask_present = False
style_scale = 1.0
min_improvement = 0
# -
# +
pooltype = 1 if pool_type_name == "ave" else 0
read_mode = "color"
# dimensions of the generated picture.
img_width = img_height = 0
img_WIDTH = img_HEIGHT = 0
aspect_ratio = 0
# -
# globals
nb_tensors = None
nb_style_images = None
combination_image = None
# +
# util function to open, resize and format pictures into appropriate tensors
def preprocess_image(image_path, load_dims=False, read_mode="color"):
global img_width, img_height, img_WIDTH, img_HEIGHT, aspect_ratio
mode = "RGB" if read_mode == "color" else "L"
img = imread(image_path, mode=mode) # Prevents crashes due to PNG images (ARGB)
if mode == "L":
# Expand the 1 channel grayscale to 3 channel grayscale image
temp = np.zeros(img.shape + (3,), dtype=np.uint8)
temp[:, :, 0] = img
temp[:, :, 1] = img.copy()
temp[:, :, 2] = img.copy()
img = temp
if load_dims:
img_WIDTH = img.shape[0]
img_HEIGHT = img.shape[1]
aspect_ratio = float(img_HEIGHT) / img_WIDTH
img_width = image_size
if maintain_aspect_ratio:
img_height = int(img_width * aspect_ratio)
else:
img_height = image_size
img = imresize(img, (img_width, img_height)).astype('float32')
# RGB -> BGR
img = img[:, :, ::-1]
img[:, :, 0] -= 103.939
img[:, :, 1] -= 116.779
img[:, :, 2] -= 123.68
if K.image_dim_ordering() == "th":
img = img.transpose((2, 0, 1)).astype('float32')
img = np.expand_dims(img, axis=0)
return img
# util function to convert a tensor into a valid image
def deprocess_image(x):
if K.image_dim_ordering() == "th":
x = x.reshape((3, img_width, img_height))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((img_width, img_height, 3))
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# BGR -> RGB
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
# util function to preserve image color
def original_color_transform(content, generated, mask=None):
generated = fromimage(toimage(generated, mode='RGB'), mode='YCbCr') # Convert to YCbCr color space
if mask is None:
generated[:, :, 1:] = content[:, :, 1:] # Generated CbCr = Content CbCr
else:
width, height, channels = generated.shape
for i in range(width):
for j in range(height):
if mask[i, j] == 1:
generated[i, j, 1:] = content[i, j, 1:]
generated = fromimage(toimage(generated, mode='YCbCr'), mode='RGB') # Convert to RGB color space
return generated
# -
def load_mask(mask_path, shape, return_mask_img=False):
if K.image_dim_ordering() == "th":
_, channels, width, height = shape
else:
_, width, height, channels = shape
mask = imread(mask_path, mode="L") # Grayscale mask load
mask = imresize(mask, (width, height)).astype('float32')
# Perform binarization of mask
mask[mask <= 127] = 0
mask[mask > 128] = 255
max = np.amax(mask)
mask /= max
if return_mask_img: return mask
mask_shape = shape[1:]
mask_tensor = np.empty(mask_shape)
for i in range(channels):
if K.image_dim_ordering() == "th":
mask_tensor[i, :, :] = mask
else:
mask_tensor[:, :, i] = mask
return mask_tensor
def pooling_func(x):
if pooltype == 1:
return AveragePooling2D((2, 2), strides=(2, 2))(x)
else:
return MaxPooling2D((2, 2), strides=(2, 2))(x)
def get_input_tensor():
global nb_tensors, nb_style_images
global combination_image
# get tensor representations of our images
base_image = K.variable(preprocess_image(base_image_path, True, read_mode=read_mode))
style_reference_images = []
for style_path in style_image_paths:
style_reference_images.append(K.variable(preprocess_image(style_path)))
# this will contain our generated image
combination_image = K.placeholder((1, img_width, img_height, 3))
image_tensors = [base_image]
for style_image_tensor in style_reference_images:
image_tensors.append(style_image_tensor)
image_tensors.append(combination_image)
nb_tensors = len(image_tensors)
nb_style_images = nb_tensors - 2 # Content and Output image not considered
# combine the various images into a single Keras tensor
input_tensor = K.concatenate(image_tensors, axis=0)
shape = (nb_tensors, img_width, img_height, 3)
ip = Input(tensor=input_tensor, batch_shape=shape)
return ip
def get_model_and_feature_layers():
ip = get_input_tensor()
# build the VGG16 network with our 3 images as input
x = Convolution2D(64, (3, 3), activation='relu', name='block1_conv1', padding='same')(ip)
x = Convolution2D(64, (3, 3), activation='relu', name='block1_conv2', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(128, (3, 3), activation='relu', name='block2_conv1', padding='same')(x)
x = Convolution2D(128, (3, 3), activation='relu', name='block2_conv2', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(256, (3, 3), activation='relu', name='block3_conv1', padding='same')(x)
x = Convolution2D(256, (3, 3), activation='relu', name='block3_conv2', padding='same')(x)
x = Convolution2D(256, (3, 3), activation='relu', name='block3_conv3', padding='same')(x)
if model_name == "vgg19":
x = Convolution2D(256, (3, 3), activation='relu', name='block3_conv4', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(512, (3, 3), activation='relu', name='block4_conv1', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='block4_conv2', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='block4_conv3', padding='same')(x)
if model_name == "vgg19":
x = Convolution2D(512, (3, 3), activation='relu', name='block4_conv4', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(512, (3, 3), activation='relu', name='block5_conv1', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='block5_conv2', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='block5_conv3', padding='same')(x)
if model_name == "vgg19":
x = Convolution2D(512, (3, 3), activation='relu', name='block5_conv4', padding='same')(x)
x = pooling_func(x)
model = Model(ip, x)
TF_16_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
TF_19_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5'
if model_name == "vgg19":
weights = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_19_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
feature_layers = ['block1_conv1', 'block1_conv2',
'block2_conv1', 'block2_conv2',
'block3_conv1', 'block3_conv2', 'block3_conv3', 'block3_conv4',
'block4_conv1', 'block4_conv2', 'block4_conv3', 'block4_conv4',
'block5_conv1', 'block5_conv2', 'block5_conv3', 'block5_conv4']
else:
weights = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_16_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
feature_layers = ['block1_conv1', 'block1_conv2',
'block2_conv1', 'block2_conv2',
'block3_conv1', 'block3_conv2', 'block3_conv3',
'block4_conv1', 'block4_conv2', 'block4_conv3',
'block5_conv1', 'block5_conv2', 'block5_conv3' ]
model.load_weights(weights)
print('Model loaded.')
return (model, feature_layers)
# +
# compute the neural style loss
# first we need to define 4 util functions
# Improvement 1
# the gram matrix of an image tensor (feature-wise outer product) using shifted activations
def gram_matrix(x):
assert K.ndim(x) == 3
if K.image_dim_ordering() == "th":
features = K.batch_flatten(x)
else:
features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
gram = K.dot(features - 1, K.transpose(features - 1))
return gram
# the "style loss" is designed to maintain
# the style of the reference image in the generated image.
# It is based on the gram matrices (which capture style) of
# feature maps from the style reference image
# and from the generated image
def style_loss(style, combination, mask_path=None, nb_channels=None):
assert K.ndim(style) == 3
assert K.ndim(combination) == 3
if content_mask_path is not None:
content_mask = K.variable(load_mask(content_mask_path, nb_channels))
combination = combination * K.stop_gradient(content_mask)
del content_mask
if mask_path is not None:
style_mask = K.variable(load_mask(mask_path, nb_channels))
style = style * K.stop_gradient(style_mask)
if content_mask_path is None:
combination = combination * K.stop_gradient(style_mask)
del style_mask
S = gram_matrix(style)
C = gram_matrix(combination)
channels = 3
size = img_width * img_height
return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2))
# an auxiliary loss function
# designed to maintain the "content" of the
# base image in the generated image
def content_loss(base, combination):
channel_dim = 0 if K.image_dim_ordering() == "th" else -1
try:
channels = K.int_shape(base)[channel_dim]
except TypeError:
channels = K.shape(base)[channel_dim]
size = img_width * img_height
if content_loss_type == 1:
multiplier = 1. / (2. * (channels ** 0.5) * (size ** 0.5))
elif content_loss_type == 2:
multiplier = 1. / (channels * size)
else:
multiplier = 1.
return multiplier * K.sum(K.square(combination - base))
# the 3rd loss function, total variation loss,
# designed to keep the generated image locally coherent
def total_variation_loss(x):
assert K.ndim(x) == 4
a = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, 1:, :img_height - 1, :])
b = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, :img_width - 1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
# -
def eval_loss_and_grads(x):
x = x.reshape((1, img_width, img_height, 3))
outs = f_outputs([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grads_values = None
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = eval_loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
model, feature_layers = get_model_and_feature_layers()
# get the symbolic outputs of each "key" layer (we gave them unique names).
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
shape_dict = dict([(layer.name, layer.output_shape) for layer in model.layers])
evaluator = Evaluator()
# combine the loss functions into a single scalar
loss = K.variable(0.)
layer_features = outputs_dict[content_layer]
base_image_features = layer_features[0, :, :, :]
combination_features = layer_features[nb_tensors - 1, :, :, :]
#loss += content_weight * content_loss(base_image_features, combination_features)
# +
# Improvement 2
# Use all layers for style feature extraction and reconstruction
nb_layers = len(feature_layers) - 1
style_masks = []
if style_masks_present:
style_masks = mask_paths # If mask present, pass dictionary of masks to style loss
else:
style_masks = [None for _ in range(nb_style_images)] # If masks not present, pass None to the style loss
channel_index = 1 if K.image_dim_ordering() == "th" else -1
# -
# Improvement 3 : Chained Inference without blurring
#AG print("len feature_layers = ",len(feature_layers))
#print("summing up this many style layers: ",(min(args.num_style_layers, len(feature_layers)-1)))
#print("nb_style_image = ",nb_style_images)
layers_to_use = min(num_style_layers, len(feature_layers))
for i in range(layers_to_use-1):
layer_features = outputs_dict[feature_layers[i]]
shape = shape_dict[feature_layers[i]]
combination_features = layer_features[nb_tensors - 1, :, :, :]
style_reference_features = layer_features[1:nb_tensors - 1, :, :, :]
sl1 = []
for j in range(nb_style_images):
#sl1.append(style_loss(style_reference_features[j], combination_features, style_masks[j], shape))
sl1.append(style_loss(style_reference_features[j], combination_features, None, shape)) # AG
layer_features = outputs_dict[feature_layers[i + 1]]
shape = shape_dict[feature_layers[i + 1]]
combination_features = layer_features[nb_tensors - 1, :, :, :]
style_reference_features = layer_features[1:nb_tensors - 1, :, :, :]
sl2 = []
for j in range(nb_style_images):
# sl2.append(style_loss(style_reference_features[j], combination_features, style_masks[j], shape))
sl2.append(style_loss(style_reference_features[j], combination_features, None, shape)) # AG
for j in range(nb_style_images):
sl = sl1[j] - sl2[j]
# Improvement 4
# Geometric weighted scaling of style loss
loss += (style_weights[j] / (2 ** (layers_to_use- (i + 1)))) * sl
# +
loss += total_variation_weight * total_variation_loss(combination_image)
# get the gradients of the generated image wrt the loss
grads = K.gradients(loss, combination_image)
outputs = [loss]
if type(grads) in {list, tuple}:
outputs += grads
else:
outputs.append(grads)
f_outputs = K.function([combination_image], outputs)
# -
# +
# run scipy-based optimization (L-BFGS) over the pixels of the generated image
# so as to minimize the neural style loss
if init_image == 'content':
x = preprocess_image(base_image_path, True, read_mode=read_mode)
elif init_image == 'noise':
x = np.random.uniform(0, 255, (1, img_width, img_height, 3)) - 128.
else:
print("Hey! Don't know init_image = ",init_image)
# We require original image if we are to preserve color in YCbCr mode
if preserve_color:
content = imread(base_image_path, mode="YCbCr")
content = imresize(content, (img_width, img_height))
if color_mask_present:
color_mask_shape = (None, img_width, img_height, None)
color_mask = load_mask(color_mask, color_mask_shape, return_mask_img=True)
else:
color_mask = None
else:
color_mask = None
num_iter = num_iter
prev_min_val = -1
improvement_threshold = float(min_improvement)
# -
for i in range(num_iter):
print("Starting iteration %d of %d" % ((i + 1), num_iter))
start_time = time.time()
x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), fprime=evaluator.grads, maxfun=20)
if prev_min_val == -1:
prev_min_val = min_val
improvement = (prev_min_val - min_val) / prev_min_val * 100
print("Current loss value:", min_val, " Improvement : %0.3f" % improvement, "%")
prev_min_val = min_val
# save current generated image
img = deprocess_image(x.copy())
if preserve_color and content is not None:
img = original_color_transform(content, img, mask=color_mask)
if not rescale_image:
img_ht = int(img_width * aspect_ratio)
print("Rescaling Image to (%d, %d)" % (img_width, img_ht))
img = imresize(img, (img_width, img_ht), interp=rescale_method)
if rescale_image:
print("Rescaling Image to (%d, %d)" % (img_WIDTH, img_HEIGHT))
img = imresize(img, (img_WIDTH, img_HEIGHT), interp=rescale_method)
fname = result_prefix + "at_iteration_%d.png" % (i + 1)
imsave(fname, img)
end_time = time.time()
print("Image saved as", fname)
print("Iteration %d completed in %ds" % (i + 1, end_time - start_time))
if improvement_threshold is not 0.0:
if improvement < improvement_threshold and improvement is not 0.0:
print("Improvement (%f) is less than improvement threshold (%f). Early stopping script." %
(improvement, improvement_threshold))
exit()
| Chapter28-CreativeApplications/Creative-Applications-Notebook-4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Example 3 - Isotropic Bearings, asymmetrical rotor.
# ===========
#
# In this example, we use the rotor seen in Example 5.9.1 from 'Dynamics of Rotating Machinery' by <NAME>, <NAME>, <NAME> & <NAME>, published by Cambridge University Press, 2010.
# A 1.5-m-long shaft, with a diameter of $0.05 m$. The disks are keyed to the shaft at $0.5$ and $1 m$ from
# one end. The left disk is $0.07 m$ thick with a diameter of $0.28 m$; the right disk
# is $0.07 m$ thick with a diameter of $0.35 m$. For the shaft, $E = 211 GN/m^2$ and
# $G = 81.2 GN/m^2$. There is no internal shaft damping. For both the shaft and the
# disks, $\rho = 7,810 kg/m^3$. The shaft is supported by identical bearings at its ends.
#
# These bearings are isotropic and have a stiffness of $1 MN/m$ in both the x and
# y directions. The bearings contribute no additional stiffness to the rotational
# degrees of freedom and there is no damping or cross-coupling in the bearings.
#
from bokeh.io import output_notebook, show
import ross as rs
import numpy as np
output_notebook()
# +
#Classic Instantiation of the rotor
shaft_elements = []
bearing_seal_elements = []
disk_elements = []
Steel = rs.Material.use_material('Steel')
for i in range(6):
shaft_elements.append(rs.ShaftElement(L=0.25, material=Steel, n=i, i_d=0, o_d=0.05))
disk_elements.append(rs.DiskElement.from_geometry(n=2,
material=Steel,
width=0.07,
i_d=0.05,
o_d=0.28
)
)
disk_elements.append(rs.DiskElement.from_geometry(n=4,
material=Steel,
width=0.07,
i_d=0.05,
o_d=0.35
)
)
bearing_seal_elements.append(rs.BearingElement(n=0, kxx=1e6, kyy=1e6, cxx=0, cyy=0))
bearing_seal_elements.append(rs.BearingElement(n=6, kxx=1e6, kyy=1e6, cxx=0, cyy=0))
rotor591c = rs.Rotor(shaft_elements=shaft_elements,
bearing_seal_elements=bearing_seal_elements,
disk_elements=disk_elements,n_eigen = 12)
show(rotor591c.plot_rotor())
# +
#From_section class method instantiation.
bearing_seal_elements = []
disk_elements = []
shaft_length_data = 3*[0.5]
i_d = 3*[0]
o_d = 3*[0.05]
disk_elements.append(rs.DiskElement.from_geometry(n=1,
material=Steel,
width=0.07,
i_d=0.05,
o_d=0.28
)
)
disk_elements.append(rs.DiskElement.from_geometry(n=2,
material=Steel,
width=0.07,
i_d=0.05,
o_d=0.35
)
)
bearing_seal_elements.append(rs.BearingElement(n=0, kxx=1e6, kyy=1e6, cxx=0, cyy=0))
bearing_seal_elements.append(rs.BearingElement(n=3, kxx=1e6, kyy=1e6, cxx=0, cyy=0))
rotor591fs = rs.Rotor.from_section(brg_seal_data=bearing_seal_elements,
disk_data=disk_elements,leng_data=shaft_length_data,
i_ds_data=i_d,o_ds_data=o_d
)
show(rotor591fs.plot_rotor(plot_type='bokeh'))
# +
#Obtaining results (wn is in rad/s)
show(rotor591c.run_campbell(np.linspace(0,4000*np.pi/30,250)).plot(plot_type='bokeh'))
print('Normal Instantiation =', rotor591c.wn)
print('\n')
print('From Section Instantiation =', rotor591fs.wn)
# +
#Obtaining modal results for w=4000RPM (wn is in rad/s)
speed = 4000*np.pi/30
modal591c = rotor591c.run_modal(speed)
print('Normal Instantiation =', modal591c.wn)
| docs/examples/example_05_09_01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Design of a 2D spin Hamiltonian with topological properties
#
# In this notebook, we show how to use QOSY to find spin-$1/2$ Hamiltonians on a 2D kagome lattice that commute with particular Wilson loop operators.
#
# ## The lattice
#
# First, we use QOSY's `Lattice` class to construct a 2D kagome lattice:
# +
# %matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
import qosy as qy
# Lattice spacing
a = 1.0
# Lattice vectors
a1 = a * np.array([1.0, 0.0])
a2 = a * np.array([1.0/2.0, np.sqrt(3.0)/2.0])
# Positions of the three sites in a primitive unit cell.
r1 = np.zeros(2)
r2 = a1 / 2.0
r3 = a2 / 2.0
positions = [r3, r2, a1]
# Recenter the unit cell about the center of mass.
center_pos = -r2+a1+r3
positions = [pos-center_pos for pos in positions]
unit_cell = qy.UnitCell([a1, a2])
for pos in positions:
unit_cell.add_atom(pos)
# Create a lattice with a single unit cell.
lattice = qy.Lattice(unit_cell, (1,1), periodic_boundaries=(True,True))
# Rotation by 2.0*pi/6
theta = 2.0*np.pi/6.0
Rmat = np.array([[np.cos(theta), -np.sin(theta)],\
[np.sin(theta), np.cos(theta)]])
# Reflection about x=0
permutation2 = [lattice.index(np.array([-r[0], r[1]]), orbital_name) for (r, orbital_name,_) in lattice]
Smat = np.array([[-1,0],[0,1]],dtype=float)
point_group_generators = [Rmat, Smat]
# Number of times to expand unit cell.
num_expansions = 3 # 1
# Symmetrized and expanded lattice.
lattice = qy.symmetrize_lattice(lattice, point_group_generators, num_expansions=num_expansions)
print('num_orbitals = {}'.format(len(lattice)))
# Plot the lattice for reference.
qy.plot(lattice, with_labels=True)
qy.show()
# -
# ## Choose a basis of operators
#
# Next, we build our basis of quantum operators that we would like to build Hamiltonians from. The `distance_basis` function provides a convenient way to consider a basis of $k$-local operator strings with support on orbitals up to a distance $R$ away on a lattice.
# +
num_orbitals = len(lattice)
orbitals = np.arange(num_orbitals)
# Consider k-local operators.
k = [3,6]
# Consider operator strings on orbitals separated
# up to a distance of R away from one another.
R = 2.0/np.sqrt(3.0)
basis = qy.distance_basis(lattice, k, R, 'Pauli')
print(len(basis))
# -
# ## Specify the desired spatial symmetries
# Then, we specify our spatial symmetries. We use the generators of those symmetries to directly symmetrize the basis of operators. For this lattice, we have translational and $D_6$ symmetries.
# +
# Translations
T1 = qy.space_group_symmetry(lattice, np.eye(2), a1)
T2 = qy.space_group_symmetry(lattice, np.eye(2), a2)
# Rotation
theta = 2.0*np.pi/6.0
Rmatp = np.array([[np.cos(theta), -np.sin(theta)],\
[np.sin(theta), np.cos(theta)]])
Rt = qy.space_group_symmetry(lattice, Rmatp, np.zeros(2))
# Reflection
St = qy.space_group_symmetry(lattice, Smat, np.zeros(2))
group_generators = [T1, T2, Rt, St]
sym_basis = qy.symmetrize_basis(basis, group_generators)
print(len(sym_basis))
# -
# ## Specify the desired non-spatial symmetries
#
# Then, we specify integrals of motion that we would like our Hamiltonian to commute with. We will consider Pauli string operators (or Wilson loops) so that our Hamiltonians will have topological properties.
#
# ### String operator function
#
# This is a function we used to define our Wilson loops.
# +
import numpy.linalg as nla
def string_operator(lattice, op_positions, op_names):
op_labels = [lattice.index(pos, '') for pos in op_positions]
inds_sort = np.argsort(op_labels)
op_names = [op_names[ind] for ind in inds_sort]
op_labels = [op_labels[ind] for ind in inds_sort]
os = qy.OperatorString(op_names, op_labels, 'Pauli')
op = qy.Operator([1.0], [os])
return op
# -
# ### Wilson loops
#
# Here we define $X, Y, Z$ Wilson loops that loop non-trivially around the torus.
# +
# %matplotlib notebook
pos0 = lattice._orbitals[0][0]
delta1 = 0.5 * a * np.array([1.0, 0.0])
delta2 = 0.5 * a * np.array([1.0/2.0, np.sqrt(3.0)/2.0])
delta3 = 0.5 * a * np.array([1.0/2.0, np.sqrt(3.0)/2.0])
delta4 = 0.5 * a * np.array([1.0, 0.0])
delta5 = 0.5 * a * np.array([1.0/2.0, -np.sqrt(3.0)/2.0])
delta6 = 0.5 * a * np.array([1.0/2.0, -np.sqrt(3.0)/2.0])
# These define two different strings across the Kagome lattice
deltasA = [delta1, delta2, delta3, delta4, delta5, delta6]
deltasB = [delta1]*6
deltas = deltasB
theta2 = np.pi/3
Rmatp2 = np.array([[np.cos(theta2), -np.sin(theta2)],\
[np.sin(theta2), np.cos(theta2)]])
op_positions = [np.copy(pos0)]
ind_step = 1
current_pos = pos0 + delta1
while lattice.distance(current_pos, pos0) > 1e-12:
op_positions.append(np.copy(current_pos))
current_pos += deltas[(ind_step % 6)]
ind_step += 1
lattice_positions = [lattice._orbitals[ind_orb][0] for ind_orb in range(len(lattice))]
qy.plot(lattice, with_labels=False, with_lattice_vectors=False)
wilson_loops = []
for op_name in ['X', 'Y', 'Z']:
op_names = [op_name] * len(op_positions)
op = string_operator(lattice, op_positions, op_names)
print(op)
wilson_loops.append(op)
plot_op_positions = []
for pos1 in op_positions:
for pos2 in lattice_positions:
if lattice.distance(pos1, pos2) < 1e-14:
plot_op_positions.append(pos2)
break
xs = [pos[0] for pos in plot_op_positions]
ys = [pos[1] for pos in plot_op_positions]
for (x,y) in zip(xs,ys):
plt.text(x,y,'Z', fontsize=26, fontweight='heavy', color='r', horizontalalignment='center', verticalalignment='center_baseline')
#plt.plot(xs, ys, 'gX', markersize=14)
# A second rotated parity string
op_positions2 = [np.dot(Rmatp2, pos) for pos in plot_op_positions]
op = string_operator(lattice, op_positions2, op_names)
print(op)
wilson_loops.append(op)
plot_op_positions2 = []
for pos1 in op_positions2:
for pos2 in lattice_positions:
if lattice.distance(pos1, pos2) < 1e-14:
plot_op_positions2.append(pos2)
break
xs = [pos[0] for pos in plot_op_positions2]
ys = [pos[1] for pos in plot_op_positions2]
for (x,y) in zip(xs,ys):
plt.text(x,y,'X', fontsize=26, fontweight='heavy', color='b', horizontalalignment='center', verticalalignment='center_baseline')
#plt.plot(xs, ys, 'bX', markersize=14)
plt.axis('off')
plt.show()
# -
# ## Generate the symmetric operators
#
# Finally, we generate Hamiltonians with the desired symmetries!
#
# All we need to do is input the basis and desired symmetries into the `SymmetricOperatorGenerator` then call the `generate` method and examine the output stored in the object.
# +
# Specify the symmetries.
symmetries = wilson_loops
# Define the generator using a basis of operator.
generator = qy.SymmetricOperatorGenerator(sym_basis)
# Add the symmetries to the generator.
for symmetry in symmetries:
generator.add_symmetry(symmetry)
# Generate the Hamiltonians.
generator.generate()
# -
# We found six operators that have the desired spatial symmetries and commute with the Wilson loops.
#
# These symmetric Hamiltonians are:
# +
result_ops = generator.projected_output_operators[-1]
result_ops = [op*(1.0/op.norm(order=np.inf)) for op in result_ops]
qy.print_operators(result_ops, norm_order=np.inf, keywords=[' 0 '])
# +
# %matplotlib notebook
# Plot one of these operators for reference.
inds_plot = [5]
qy.plot(lattice, with_labels=False, with_lattice_vectors=False)
for ind_plot in inds_plot:
op = result_ops[ind_plot]
qy.plot_operator(op, lattice, distance_cutoff=R)
plt.axis('off')
qy.show()
# -
# Three of these Hamiltonians are equal sums of three-site $X$, $Y$, or $Z$ operators on the triangle plaquettes of the lattice. The other three are equal sums of six-site $X$, $Y$, or $Z$ operators on the hexagon plaquettes of the lattice.
| tutorials/inverse_spin_kagome_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction To GradCAM (Part 1) - Lecture Notebook
# In this lecture notebook we'll be looking at an introduction to Grad-CAM, a powerful technique for interpreting Convolutional Neural Networks. Grad-CAM stands for Gradient-weighted Class Activation Mapping.
#
# CNN's are very flexible models and their great predictive power comes at the cost of losing interpretability (something that is true for all Artificial Neural Networks). Grad-CAM attempts to solve this by giving us a graphical visualisation of parts of an image that are the most relevant for the CNN when predicting a particular class.
#
# Aside from working on some Grad-CAM concepts we'll also look at how we can use Keras to access some concrete information of our model. Let's dive into it!
import keras
from keras import backend as K
from util import *
# The `load_C3M3_model()` function has been taken care of and its internals are out of the scope of this notebook. But if it intrigues you, you can take a look at it in `util.py`
# Load the model we are going to be using
model = load_C3M3_model()
# As you may already know, we can check the architecture of our model using the `summary()` method.
#
# After running the code block below we’ll see that this model has a lot of layers. One advantage of Grad-CAM over previous attempts of interpreting CNN's (such as CAM) is that it is architecture agnostic. This means it can be used for CNN's with complex architectures such as this one:
# Print all of the model's layers
model.summary()
# Keras models include abundant information about the elements that make them up. You can check all of the available methods and attributes of this class by using the `dir()` method:
# Printing out methods and attributes for Keras model
print(f"Keras' models have the following methods and attributes: \n\n{dir(model)}")
# Wow, this certainly is a lot! These models are indeed very complex.
#
# What we are interested in are the layers of the model which can be easily accessed as an attribute using the dot notation. They are a list of layers, which can be confirmed by checking its type:
# Check the type of the model's layers
type(model.layers)
# Print 5 first layers along with their names
for i in range(5):
l = model.layers[i]
print(f"Layer number {i}: \n{l} \nWith name: {l.name} \n")
# Let's check how many layers our model has:
# Print number of layers in our model
print(f"The model has {len(model.layers)} layers")
# Our main goal is interpreting the representations which the neural net is creating for classifying our images. But as you can see this architecture has many layers.
#
# Actually we are really interested in the representations that the convolutional layers produce because these are the layers that (hopefully) recognize concrete elements within the images. We are also interested in the "concatenate" layers because in our model's arquitecture they concatenate convolutional layers.
#
# Let's check how many of those we have:
# Number of layers that are of type "Convolutional" or "Concatenate"
len([l for l in model.layers if ("conv" in str(type(l))) or ("Concatenate" in str(type(l)))])
# This number is still very big to try to interpret each one of these layers individually.
#
# One characteristic of CNN's is that the earlier layers capture low-level features such as edges in an image while the deeper layers capture high-level concepts such as physical features of a "Cat".
#
# Because of this **Grad-CAM usually focuses on the last layers, as they provide a better picture of what the network is paying attention to when classifying a particular class**. Let's grab the last concatenate layer of our model. Luckily Keras API makes this quite easy:
# +
# Save the desired layer in a variable
layer = model.layers[424]
# Print layer
layer
# -
# This approach is not the best since we will need to know the exact index of the desired layer. Luckily we can use the `get_layer()` method in conjunction with the layer's name to get the same result.
#
# Remember you can get the name from the information displayed earlier with the `summary()` method.
# +
# Save the desired layer in a variable
layer = model.get_layer("conv5_block16_concat")
# Print layer
layer
# -
# Let's check what methods and attributes we have available when working with this layer:
# Printing out methods and attributes for Keras' layer
print(f"Keras' layers have the following methods and attributes: \n\n{dir(layer)}")
# Since we want to know the representations which this layer is abstracting from the images we should be interested in the output from this layer. Luckily we have this attribute available:
# Print layer's output
layer.output
# Do you notice something odd? The shape of this tensor is undefined for some dimensions. This is because this tensor is just a placeholder and it doesn't really contain information about the activations that occurred in this layer.
#
# To compute the actual activation values given an input we will need to use a **Keras function**.
#
# This function accepts lists of input and output placeholders and can be used with an actual input to compute the respective output of the layer associated to the placeholder for that given input.
#
# Before jumping onto the Keras function we should rewind a little bit to get the placeholder tensor associated with the input. You can get this from the model’s input:
# Print model's input tensor placeholder
model.input
# We can see that this is a placeholder as well. Now let's instantiate our Keras function using Keras backend. Please be aware that this **function expects its arguments as lists or tuples**:
# +
# Instantiate the function to compute the activations of the last convolutional layer
last_layer_activations_function = K.function([model.input], [layer.output])
# Print the Keras function
last_layer_activations_function
# -
# Let's test the functions for computing the last layer activation which we just defined on a particular image. Don't worry about the code to load the image, this has been taken care of for you. You should only care that an image ready to be processed will be saved in the x variable:
# +
# Load dataframe that contains information about the dataset of images
df = pd.read_csv("nih_new/train-small.csv")
# Path to the actual image
im_path = 'nih_new/images-small/00000599_000.png'
# Load the image and save it to a variable
x = load_image(im_path, df, preprocess=False)
# Display the image
plt.imshow(x, cmap = 'gray')
plt.show()
# -
# We should normalize this image before going forward, this has also been taken care of:
# +
# Calculate mean and standard deviation of a batch of images
mean, std = get_mean_std_per_batch(df)
# Normalize image
x = load_image_normalize(im_path, mean, std)
# -
# Now we have everything we need to compute the actual values of the last layer activations. In this case we should also **provide the input as a list or tuple**:
# Run the function on the image and save it in a variable
actual_activations = last_layer_activations_function([x])
# An important intermediary step is to trim the batch dimension which can be done like this. This is necessary because we are applying Grad-CAM to a single image rather than to a batch of images:
# Remove batch dimension
actual_activations = actual_activations[0][0, :]
# +
# Print shape of the activation array
print(f"Activations of last convolutional layer have shape: {actual_activations.shape}")
# Print activation array
actual_activations
# -
# Looks like everything worked out nicely! This is all for this lecture notebook (Grad-CAM Part 1). In Part 2 we will see how to calculate the gradients of the model's output with respect to the activations in this layer. This is the "Grad" part of Grad-CAM.
# **Congratulations on finishing this lecture notebook!** Hopefully you will now have a better understanding of how to leverage Keras's API power for computing activations in specific layers. Keep it up!
| AI for Medical Treatment/Week 3/lecture notebooks/AI4M_C3_M3_lecture_notebook_gradcam.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data prep for image recognition
#
# The purpose of this short notebook is to introduce the most basic features of the OpenCV library, focusing on features that will make it possible to use intelligent APIs on image data. We'll then see how to use a pretrained object detection model to find real-world objects in images.
import cv2
import numpy as np
# The first thing we'll try is reading an image from a file. OpenCV makes it easy to decode popular image formats, and this notebook has access to an image file we can read.
# +
def loadImage(f):
""" Load an image and convert it from BGR color space
(which OpenCV uses) to RGB color space (which pyplot expects) """
return cv2.cvtColor(cv2.imread(f, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
img = loadImage("otto.jpg")
# -
# ## Working with images as arrays
#
# This will get us a `numpy` array containing the pixels from a picture of a confused schnauzer who did not expect to wind up unable to get out of the clothes basket.
#
# We can look at the size of the array:
img.shape
# We can examine the image itself by plotting it.
# %matplotlib inline
import matplotlib.pyplot as plt
plt.imshow(img)
# While our focus is on using pretrained models, if we were training a model, it may be useful to transform, blur, or resize images in order to generate more training data from a few images. Since our images are `numpy` arrays, this is relatively straightforward in general, but OpenCV provides functions to make these tasks even easier. We'll see how to
#
# - blur an input image with a 15x15 box blur,
# - resize an image and interpolate between pixels in the source data, and
# - rotate an image without calculating a transformation matrix
#
# First, let's look at box blur:
plt.imshow(cv2.blur(img, (15,15)))
# We can also scale the image by a factor of 3 on both axes (notice the difference in the axes on the plotted image, even though the size doesn't change).
plt.imshow(cv2.resize(img, None, fx=3, fy=3, interpolation=cv2.INTER_CUBIC))
# It's also possible to stretch the image by scaling along axes differently:
plt.imshow(cv2.resize(img, None, fx=2.5, fy=3, interpolation=cv2.INTER_CUBIC))
# We can also rotate the image. Recall that rotation is an affine tranformation on image matrices. OpenCV provides a function to calculate the transformation matrix, given a point to rotate around, an angle of rotation, and a scaling factor. Here we'll rotate the image around its center by 15 degrees while scaling by 1.3x.
rows, cols, _ = img.shape
center = (cols / 2, rows / 2)
angle = 15 # degrees
scale = 1.3
rotationMatrix = cv2.getRotationMatrix2D(center, angle, scale)
plt.imshow(cv2.warpAffine(img, rotationMatrix, (cols, rows)))
# ## Working with image data in byte arrays
#
# In many non-batch applications, we won't be actually processing _files_; instead, we'll be dealing with binary data, whether passed as a base64-encoded string to a HTTP request or stored in a blob as part of structured data on a stream. OpenCV is able to decode this raw binary data just as it is able to decode files; this last part of the notebook will show you how to do it.
#
# We'll start by getting a Python `bytearray` with the contents of a file. Notice that, while we have a JPEG file, we aren't storing the file type anywhere.
with open("otto.jpg", "rb") as f:
img_bytes = bytearray(f.read())
# Now that we have a `bytearray` of the file's contents, we'll convert that into a flat NumPy array:
imgarr = np.asarray(img_bytes, dtype=np.uint8)
imgarr
# The OpenCV `imdecode` function will inspect this flat array and parse it as an image, inferring the right type and dimensions and returning a multidimensional array with an appropriate shape.
# +
# decode byte array as image
img2 = cv2.imdecode(imgarr, cv2.IMREAD_COLOR)
# convert BGR to RGB
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
# -
# We then have a multidimensional array that we can use just as we did the image we read from a file.
plt.imshow(img2)
# ## Image intensities
#
# We can also plot histograms for each channel of the image. (This example code is taken from the [OpenCV documentation](https://docs.opencv.org/3.1.0/d1/db7/tutorial_py_histogram_begins.html).) You can see that the image of the dog is underexposed.
for i, color in enumerate(["r", "g", "b"]):
histogram = cv2.calcHist([img], [i], None, [256], [0, 256])
plt.plot(histogram, color=color)
plt.xlim([0, 256])
plt.show()
# # Object detection with pretrained models
#
# Now that we've seen how to use some of the basic capabilities of OpenCV to parse image data into a matrix of pixels -- and then to perform useful image transformations and analyses on this matrix -- we're ready to see how to use a pretrained model to identify objects in real images.
#
# We'll use a pretrained [YOLO](https://pjreddie.com/darknet/yolo/) ("you only look once") model and we'll load and score that model with the [darkflow](https://github.com/thtrieu/darkflow/) library, which is built on TensorFlow.
#
# One of the key themes of this workshop is that you don't need a deep understanding of the techniques behind off-the-shelf models for language processing or image recognition in order to make use of them in your applications, but YOLO is a cool technique, so if you want to learn more about it, here's where to get started:
#
# - [this paper](https://pjreddie.com/media/files/papers/yolo_1.pdf) explains the first version of YOLO and the basic technique,
# - [this presentation](https://www.youtube.com/watch?v=NM6lrxy0bxs) presents the basics of the paper in a thirteen-minute video, and
# - [this paper](http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc09.pdf) provides a deeper dive into object detection (including some details on the mAP metric for evaluating classifier quality)
#
# YOLO is so-called because previous object-detection techniques repeatedly ran image classifiers on multiple overlapping windows of an image; by contrast, YOLO "only looks once," identifying image regions that might contain an interesting object and then identifying which objects those regions might contain in a single pass. It can be much faster than classic approaches; indeed, it can run in real time or faster with GPU acceleration.
#
# ## Loading our model
#
# We'll start by loading a pretrained model architecture and model weights from files:
from darkflow.net.build import TFNet
options = {"model": "cfg/yolo.cfg", "load": "/data/yolo.weights", "threshold" : 0.1}
yolo = TFNet(options)
# Our next step is to use the model to identify some objects in an image. We'll start with the dog image. The `return_predict` method will return a list of predictions, each with a visual object class, a confidence score, and a bounding box.
predictions = yolo.return_predict(img)
predictions
# To be fair, most dogs spend a lot of time on sofas.
#
# It is often useful to visualize what parts of the image were identified as objects. We can use OpenCV to annotate the bounding boxes of each identified object in the image with the `cv2.rectangle` function. Since this is destructive, we'll work on a copy of the image.
def annotate(img, predictions, thickness=None):
""" Copies the supplied image and annotates it with the bounding
boxes of each identified object """
annotated_img = np.copy(img)
if thickness is None:
thickness = int(max(img.shape[0], img.shape[1]) / 100)
for prediction in predictions:
tl = prediction["topleft"]
topleft = (tl["x"], tl["y"])
br = prediction["bottomright"]
bottomright = (br["x"], br["y"])
# draw a white rectangle around the identified object
white = (255,255,255)
cv2.rectangle(annotated_img, topleft, bottomright, color=white, thickness=thickness)
return annotated_img
plt.imshow(annotate(img, predictions))
# ## Trying it out with other images
#
# We can try this technique out with other images as well. The test images we have are from the [Open Images Dataset](https://storage.googleapis.com/openimages/web/index.html) and are licensed under CC-BY-SA. Some of these results are impressive and some are unintentionally hilarious! Try it out and see if you can figure out why certain false positives show up.
# +
from ipywidgets import interact
from os import listdir
def predict(imageFile):
image = loadImage("/data/images/" + imageFile)
predictions = yolo.return_predict(image)
plt.imshow(annotate(image, predictions, thickness=5))
return predictions
interact(predict, imageFile = listdir("/data/images/"))
# -
# # Training custom models
#
# It's outside the scope of this workshop (both in terms of time and content), but you've actually learned a lot of skills in this notebook that are applicable to training custom object detection models (e.g., to identify new kinds of objects). Here's how you can get started.
#
# 1. You'll need some labeled data; for object detection, this is going to be image files annotated with the bounding boxes and object classes of real-world object pictured in those images. Good places to start are the [Pascal VOC 2012 dataset](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#data) or the [COCO dataset](http://cocodataset.org/).
# 2. If you want to identify new object classes (for example, a corporate logo), you'll need to add labeled images that contain these object classes. Since you may not have many example images for the new object classes, you may want to generate synthetic images to augment your training set; there are [many approaches ranging from rotating and scaling input data to using neural networks to generate new examples](https://arxiv.org/pdf/1712.04621.pdf). You already know how to transform and rotate images, of course!
# 3. Actually training the model will depend on what framework you ultimately want to use for the project; [here are the instructions for Darkflow](https://github.com/thtrieu/darkflow#training-on-your-own-dataset).
| notebooks/opencv-basics.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.2.0
# language: julia
# name: julia-1.2
# ---
# # TikzGraphs
# This library uses the LaTeX package [pgf/tikz](http://www.ctan.org/pkg/pgf) to produce graphs. It integrates with IJulia, outputting SVG images to the notebook.
# ## Installation
using Pkg
Pkg.add("TikzGraphs")
# In addition, you will need to install the following dependencies if you do not already have them on your system.
# * Pdf2svg. This is required by TikzPictures. On Ubuntu, you can get this by running `sudo apt-get install pdf2svg`. On Windows, you can download the binaries from http://www.cityinthesky.co.uk/opensource/pdf2svg/. Be sure to add pdf2svg to your path (and restart).
# * Pgf (version 3.0 or later). Install using your latex package manager (e.g., texlive or miktex).
#
# Once these things are installed, you should be able to run the following:
using TikzGraphs
# ## Examples
using TikzGraphs
using LightGraphs
g = DiGraph(4)
add_edge!(g, 1, 2)
add_edge!(g, 2, 3)
TikzGraphs.plot(g)
add_edge!(g, 3, 4)
add_edge!(g, 1, 4)
TikzGraphs.plot(g)
# You can save your graphs to PDF, SVG, and TEX.
t = TikzGraphs.plot(g)
using TikzPictures # this is required for saving
TikzPictures.save(PDF("graph"), t)
TikzPictures.save(SVG("graph"), t)
TikzPictures.save(TEX("graph"), t)
# ## Labels
# You can also specify the node labels.
TikzGraphs.plot(g, ["A", "B", "C", "D"])
# You can even use unicode.
TikzGraphs.plot(g, ["α", "β", "γ", "δ"])
# You can also have latex labels.
using LaTeXStrings
TikzGraphs.plot(g, [L"\int_0^\infty f(x) dx", L"\sqrt{2}", L"x^2", L"\frac{1}{2}"])
# You can have repeated labels.
TikzGraphs.plot(g, ["α", "β", "γ", "α"])
# You can specify the style for the nodes.
TikzGraphs.plot(g, ["α", "β", "γ", "α"], node_style="draw, rounded corners, fill=blue!10")
# You can override the styles of selected nodes using a dictionary.
TikzGraphs.plot(g, ["α", "β", "γ", "α"], node_style="draw, rounded corners, fill=blue!10", node_styles=Dict(1=>"fill=green!10",3=>"fill=yellow!10"))
# ## Edges
# You can set edge labels using a dictionary.
TikzGraphs.plot(g, ["α", "β", "γ", "α"], edge_labels=Dict((1,2)=>"x", (1,4)=>"y"))
# You can set the style for the edges.
TikzGraphs.plot(g, ["α", "β", "γ", "α"], edge_labels=Dict((1,2)=>"x", (1,4)=>"y"), edge_style="green")
# You can overide the style for specific edges as specified by a dictionary.
TikzGraphs.plot(g, edge_labels=Dict((1,2)=>"x", (1,4)=>"y"), edge_style="green", edge_styles=Dict((1,2)=>"blue"))
# You can draw self-loop edges by specifying the edge style.
loop_g = deepcopy(g)
add_edge!(loop_g, (4,4))
TikzGraphs.plot(loop_g, edge_labels=Dict((1,2)=>"x", (1,4)=>"y", (4,4)=>"loop"), edge_styles=Dict((4,4)=>"loop right"))
# ## Layouts
# You can use different layouts (currently just Layered [default], Spring, and SimpleNecklace are supported).
TikzGraphs.plot(g, Layouts.Layered())
TikzGraphs.plot(g, Layouts.Spring())
# You can set the random seed for the Spring layout.
TikzGraphs.plot(g, Layouts.Spring(randomSeed=52))
TikzGraphs.plot(g, Layouts.SimpleNecklace())
# ## Options
TikzGraphs.plot(g, node_style="draw", options="scale=2, font=\\huge\\sf")
# ## Future Plans
# Gradually, more functionality from pgf/tikz will be migrated into this package.
| doc/TikzGraphs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The importance of constraints
#
# Constraints determine which potential adversarial examples are valid inputs to the model. When determining the efficacy of an attack, constraints are everything. After all, an attack that looks very powerful may just be generating nonsense. Or, perhaps more nefariously, an attack may generate a real-looking example that changes the original label of the input. That's why you should always clearly define the *constraints* your adversarial examples must meet.
#
# ### Classes of constraints
#
# TextAttack evaluates constraints using methods from three groups:
#
# - **Overlap constraints** determine if a perturbation is valid based on character-level analysis. For example, some attacks are constrained by edit distance: a perturbation is only valid if it perturbs some small number of characters (or fewer).
#
# - **Grammaticality constraints** filter inputs based on syntactical information. For example, an attack may require that adversarial perturbations do not introduce grammatical errors.
#
# - **Semantic constraints** try to ensure that the perturbation is semantically similar to the original input. For example, we may design a constraint that uses a sentence encoder to encode the original and perturbed inputs, and enforce that the sentence encodings be within some fixed distance of one another. (This is what happens in subclasses of `textattack.constraints.semantics.sentence_encoders`.)
# ### A new constraint
#
# To add our own constraint, we need to create a subclass of `textattack.constraints.Constraint`. We can implement one of two functions, either `_check_constarint` or `_check_constraint_many`:
#
# - `_check_constraint` determines whether candidate `TokenizedText` `transformed_text`, transformed from `current_text`, fulfills a desired constraint. It returns either `True` or `False`.
# - `_check_constraint_many` determines whether each of a list of candidates `transformed_texts` fulfill the constraint relative to `current_text`. This is here in case your constraint can be vectorized. If not, just implement `_check_constraint`, and `_check_constraint` will be executed for each `(transformed_text, current_text)` pair.
# ### A custom constraint
#
#
# For fun, we're going to see what happens when we constrain an attack to only allow perturbations that substitute out a named entity for another. In linguistics, a **named entity** is a proper noun, the name of a person, organization, location, product, etc. Named Entity Recognition is a popular NLP task (and one that state-of-the-art models can perform quite well).
#
#
# ### NLTK and Named Entity Recognition
#
# **NLTK**, the Natural Language Toolkit, is a Python package that helps developers write programs that process natural language. NLTK comes with predefined algorithms for lots of linguistic tasks– including Named Entity Recognition.
#
# First, we're going to write a constraint class. In the `_check_constraints` method, we're going to use NLTK to find the named entities in both `current_text` and `transformed_text`. We will only return `True` (that is, our constraint is met) if `transformed_text` has substituted one named entity in `current_text` for another.
#
# Let's import NLTK and download the required modules:
import nltk
nltk.download('punkt') # The NLTK tokenizer
nltk.download('maxent_ne_chunker') # NLTK named-entity chunker
nltk.download('words') # NLTK list of words
# ### NLTK NER Example
#
# Here's an example of using NLTK to find the named entities in a sentence:
# +
sentence = ('In 2017, star quarterback <NAME> led the Patriots to the Super Bowl, '
'but lost to the Philadelphia Eagles.')
# 1. Tokenize using the NLTK tokenizer.
tokens = nltk.word_tokenize(sentence)
# 2. Tag parts of speech using the NLTK part-of-speech tagger.
tagged = nltk.pos_tag(tokens)
# 3. Extract entities from tagged sentence.
entities = nltk.chunk.ne_chunk(tagged)
print(entities)
# -
# It looks like `nltk.chunk.ne_chunk` gives us an `nltk.tree.Tree` object where named entities are also `nltk.tree.Tree` objects within that tree. We can take this a step further and grab the named entities from the tree of entities:
# 4. Filter entities to just named entities.
named_entities = [entity for entity in entities if isinstance(entity, nltk.tree.Tree)]
print(named_entities)
# ### Caching with `@functools.lru_cache`
#
# A little-known feature of Python 3 is `functools.lru_cache`, a decorator that allows users to easily cache the results of a function in an LRU cache. We're going to be using the NLTK library quite a bit to tokenize, parse, and detect named entities in sentences. These sentences might repeat themselves. As such, we'll use this decorator to cache named entities so that we don't have to perform this expensive computation multiple times.
# ### Putting it all together: getting a list of Named Entity Labels from a sentence
#
# Now that we know how to tokenize, parse, and detect named entities using NLTK, let's put it all together into a single helper function. Later, when we implement our constraint, we can query this function to easily get the entity labels from a sentence. We can even use `@functools.lru_cache` to try and speed this process up.
# +
import functools
@functools.lru_cache(maxsize=2**14)
def get_entities(sentence):
tokens = nltk.word_tokenize(sentence)
tagged = nltk.pos_tag(tokens)
# Setting `binary=True` makes NLTK return all of the named
# entities tagged as NNP instead of detailed tags like
#'Organization', 'Geo-Political Entity', etc.
entities = nltk.chunk.ne_chunk(tagged, binary=True)
return entities.leaves()
# -
# And let's test our function to make sure it works:
sentence = '<NAME> starred in the 2003 film classic "School of Rock".'
get_entities(sentence)
# We flattened the tree of entities, so the return format is a list of `(word, entity type)` tuples. For non-entities, the `entity_type` is just the part of speech of the word. `'NNP'` is the indicator of a named entity (a proper noun, according to NLTK). Looks like we identified three named entities here: 'Jack' and 'Black', 'School', and 'Rock'. as a 'GPE'. (Seems that the labeler thinks Rock is the name of a place, a city or something.) Whatever technique NLTK uses for named entity recognition may be a bit rough, but it did a pretty decent job here!
# ### Creating our NamedEntityConstraint
#
# Now that we know how to detect named entities using NLTK, let's create our custom constraint.
# +
from textattack.constraints import Constraint
class NamedEntityConstraint(Constraint):
""" A constraint that ensures `transformed_text` only substitutes named entities from `current_text` with other named entities.
"""
def _check_constraint(self, transformed_text, current_text):
transformed_entities = get_entities(transformed_text.text)
current_entities = get_entities(current_text.text)
# If there aren't named entities, let's return False (the attack
# will eventually fail).
if len(current_entities) == 0:
return False
if len(current_entities) != len(transformed_entities):
# If the two sentences have a different number of entities, then
# they definitely don't have the same labels. In this case, the
# constraint is violated, and we return False.
return False
else:
# Here we compare all of the words, in order, to make sure that they match.
# If we find two words that don't match, this means a word was swapped
# between `current_text` and `transformed_text`. That word must be a named entity to fulfill our
# constraint.
current_word_label = None
transformed_word_label = None
for (word_1, label_1), (word_2, label_2) in zip(current_entities, transformed_entities):
if word_1 != word_2:
# Finally, make sure that words swapped between `x` and `x_adv` are named entities. If
# they're not, then we also return False.
if (label_1 not in ['NNP', 'NE']) or (label_2 not in ['NNP', 'NE']):
return False
# If we get here, all of the labels match up. Return True!
return True
# -
# ### Testing our constraint
#
# We need to create an attack and a dataset to test our constraint on. We went over all of this in the transformations tutorial, so let's gloss over this part for now.
# +
# Import the model
import transformers
from textattack.models.tokenizers import AutoTokenizer
model = transformers.AutoModelForSequenceClassification.from_pretrained("textattack/albert-base-v2-yelp-polarity")
model.tokenizer = AutoTokenizer("textattack/albert-base-v2-yelp-polarity")
# Create the goal function using the model
from textattack.goal_functions import UntargetedClassification
goal_function = UntargetedClassification(model)
# Import the dataset
from textattack.datasets import HuggingFaceNlpDataset
dataset = HuggingFaceNlpDataset("yelp_polarity", None, "test")
# +
from textattack.transformations import WordSwapEmbedding
from textattack.search_methods import GreedySearch
from textattack.shared import Attack
from textattack.constraints.pre_transformation import RepeatModification, StopwordModification
# We're going to the `WordSwapEmbedding` transformation. Using the default settings, this
# will try substituting words with their neighbors in the counter-fitted embedding space.
transformation = WordSwapEmbedding(max_candidates=15)
# We'll use the greedy search method again
search_method = GreedySearch()
# Our constraints will be the same as Tutorial 1, plus the named entity constraint
constraints = [RepeatModification(),
StopwordModification(),
NamedEntityConstraint(False)]
# Now, let's make the attack using these parameters.
attack = Attack(goal_function, constraints, transformation, search_method)
print(attack)
# -
# Now, let's use our attack. We're going to attack samples until we achieve 5 successes. (There's a lot to check here, and since we're using a greedy search over all potential word swap positions, each sample will take a few minutes. This will take a few hours to run on a single core.)
# +
from textattack.loggers import CSVLogger # tracks a dataframe for us.
from textattack.attack_results import SuccessfulAttackResult
results_iterable = attack.attack_dataset(dataset)
logger = CSVLogger(color_method='html')
num_successes = 0
while num_successes < 5:
result = next(results_iterable)
if isinstance(result, SuccessfulAttackResult):
logger.log_attack_result(result)
num_successes += 1
print(f'{num_successes} of 5 successes complete.')
# -
# Now let's visualize our 5 successes in color:
# +
import pandas as pd
pd.options.display.max_colwidth = 480 # increase column width so we can actually read the examples
from IPython.core.display import display, HTML
display(HTML(logger.df[['original_text', 'perturbed_text']].to_html(escape=False)))
# -
# ### Conclusion
#
# Our constraint seems to have done its job: it filtered out attacks that did not swap out a named entity for another, according to the NLTK named entity detector. However, we can see some problems inherent in the detector: it often thinks the first word of a given sentence is a named entity, probably due to capitalization.
#
# We did manage to produce some nice adversarial examples! "Sigh" beacame "Inahles" and the prediction shifted from negative to positive.
| docs/examples/2_Constraints.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="I_SqGquuT7K0"
# # GPU-accelerated image processing using CUPY and CUCIM
# Processing large images with python can take time. In order to accelerate processing, graphics processing units (GPUs) can be exploited, for example using [NVidia CUDA](https://en.wikipedia.org/wiki/CUDA). For processing images with CUDA, there are a couple of libraries available. We will take a closer look at [cupy](https://cupy.dev/), which brings more general computing capabilities for CUDA compatible GPUs, and [cucim](https://github.com/rapidsai/cucim), a library of image processing specific operations using CUDA. Both together can serve as GPU-surrogate for [scikit-image](https://scikit-image.org/).
#
# See also
# * [StackOverflow: Is it possible to install cupy on google colab?](https://stackoverflow.com/questions/49135065/is-it-possible-to-install-cupy-on-google-colab)
# * [Cucim example notebooks](https://github.com/rapidsai/cucim/blob/branch-0.20/notebooks/Welcome.ipynb)
#
# Before we start, we need to install CUDA and CUCIM it properly. The following commands make this notebook run in Google Colab.
# + colab={"base_uri": "https://localhost:8080/"} id="zY7ZNaKWQnqQ" outputId="a2695d46-70ad-46a9-afc9-fd657fb3bfb0"
# !curl https://colab.chainer.org/install | sh -
# !pip install cucim
# !pip install scipy scikit-image cupy-cuda100
# + id="44Iz1nuLQWqG"
import numpy as np
import cupy as cp
import cucim
from skimage.io import imread, imshow
import pandas as pd
# + [markdown] id="zpcCXV7yPtsc"
# In the following, we are using image data from Paci et al shared under the [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/) license. See also: https://doi.org/10.17867/10000140
#
# + colab={"base_uri": "https://localhost:8080/", "height": 314} id="OgExavuOOJKx" outputId="d2c7f566-9fe8-436b-877d-004ca7a8ed2f"
image = imread('https://idr.openmicroscopy.org/webclient/render_image_download/9844418/?format=tif')
imshow(image)
# + [markdown] id="1bMdYN2xR40Y"
# In order to process an image using CUDA on the GPU, we need to convert it. Under the hood of this conversion, the image data is sent from computer random access memory (RAM) to the GPUs memory.
# + colab={"base_uri": "https://localhost:8080/"} id="X6iCh2YqQHkT" outputId="2bd39abd-ee16-40d3-9fb6-af021759758b"
image_gpu = cp.asarray(image)
image_gpu.shape
# + [markdown] id="1_ar9AyLR5al"
# Extracting a single channel out of the three-channel image works like if we were working with numpy. Showing the image does not work, because the CUDA image is not available in memory. In order to get it back from GPU memory, we need to convert it to a numpy array.
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="noWzAc8JRmcM" outputId="77262cbb-56d6-45da-ddfb-a4015f3dd812"
single_channel_gpu = image_gpu[:,:,1]
# the following line would fail
# imshow(single_channel_gpu)
# get single channel image back from GPU memory and show it
single_channel = cp.asnumpy(single_channel_gpu)
imshow(single_channel)
# we can also do this with a convenience function
def gpu_imshow(image_gpu):
image = np.asarray(image_gpu)
imshow(image)
# + [markdown] id="y3eApJ2VYo3t"
# ## Image filtering and segmentation
#
# The cucim developers have re-implemented many functions from sckit image, e.g. the [Gaussian blur filter](https://docs.rapids.ai/api/cucim/stable/api.html#cucim.skimage.filters.gaussian), [Otsu Thresholding](https://docs.rapids.ai/api/cucim/stable/api.html#cucim.skimage.filters.threshold_otsu) after [Otsu et al. 1979](https://ieeexplore.ieee.org/document/4310076), [binary erosion](https://docs.rapids.ai/api/cucim/stable/api.html#cucim.skimage.morphology.binary_erosion) and [connected component labeling](https://docs.rapids.ai/api/cucim/stable/api.html#cucim.skimage.measure.label).
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="H6X0UdUIRsoO" outputId="e7f507e0-1759-460c-d9f5-e01922d4d2bb"
from cucim.skimage.filters import gaussian
blurred_gpu = gaussian(single_channel_gpu, sigma=5)
gpu_imshow(blurred_gpu)
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="ZdHr80deSye7" outputId="740689ff-7a68-4b1e-bee2-3197c0034471"
from cucim.skimage.filters import threshold_otsu
# determine threshold
threshold = threshold_otsu(blurred_gpu)
# binarize image by apply the threshold
binary_gpu = blurred_gpu > threshold
gpu_imshow(binary_gpu)
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="I8yw3rJITj6T" outputId="7aba486d-9e1c-49ef-9427-4e51b1306fb0"
from cucim.skimage.morphology import binary_erosion, disk
eroded_gpu = binary_erosion(binary_gpu, selem=disk(2))
gpu_imshow(eroded_gpu)
# + colab={"base_uri": "https://localhost:8080/", "height": 351} id="gCvy1x2NVti-" outputId="c8a0901e-dd56-46e9-92bc-502ef6951d44"
from cucim.skimage.measure import label
labels_gpu = label(eroded_gpu)
gpu_imshow(labels_gpu)
# + [markdown] id="17b8WZqWaIMn"
# For visualization purposes, it is recommended to turn the label image into an RGB image, especially if you want to save it to disk.
# + colab={"base_uri": "https://localhost:8080/", "height": 351} id="OoUxrrWuZ2D7" outputId="985835b2-4bdd-4224-ad9d-236d1efebc4c"
from cucim.skimage.color import label2rgb
labels_rgb_gpu = label2rgb(labels_gpu)
gpu_imshow(labels_rgb_gpu)
# + [markdown] id="QYzUSmEmaT2B"
# ## Quantitative measurements
#
# Also quantitative measurments using [regionprops_table](https://docs.rapids.ai/api/cucim/stable/api.html#cucim.skimage.measure.regionprops_table) have been implemented in cucim. A major difference is that you need to convert its result back to numpy if you want to continue processing on the CPU, e.g. using [pandas](https://pandas.pydata.org/).
# + colab={"base_uri": "https://localhost:8080/"} id="Sl5EK8R2WEuI" outputId="920ba7f8-4d00-4ad8-9cc5-1cf72b51c1c9"
from cucim.skimage.measure import regionprops_table
table_gpu = regionprops_table(labels_gpu, intensity_image=single_channel_gpu, properties=('mean_intensity', 'area', 'solidity'))
table_gpu
# + colab={"base_uri": "https://localhost:8080/", "height": 576} id="vOi8JINbXGe3" outputId="cfb15bcc-df16-4eda-8dff-c99e240b9ef9"
# The following line would fail.
# pd.DataFrame(table_gpu)
# We need to convert that table to numpy before we can pass it to pandas.
table = {item[0] : cp.asnumpy(item[1]) for item in table_gpu.items()}
pd.DataFrame(table)
# + id="BNUDnC1yX1B0"
| robert_haase/cupy_cucim/cupy_cucim.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Plotting Wine Type and Quality with Matplotlib
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
% matplotlib inline
import seaborn as sns
sns.set_style('darkgrid')
wine_df = pd.read_csv('winequality_edited.csv')
# -
# ### Create arrays for red bar heights white bar heights
# Remember, there's a bar for each combination of color and quality rating. Each bar's height is based on the proportion of samples of that color with that quality rating.
# 1. Red bar proportions = counts for each quality rating / total # of red samples
# 2. White bar proportions = counts for each quality rating / total # of white samples
# get counts for each rating and color
color_counts = wine_df.groupby(['color', 'quality']).count()['pH']
color_counts
# get total counts for each color
color_totals = wine_df.groupby('color').count()['pH']
color_totals
# get proportions by dividing red rating counts by total # of red samples
red_proportions = color_counts['red'] / color_totals['red']
red_proportions
# get proportions by dividing white rating counts by total # of white samples
white_proportions = color_counts['white'] / color_totals['white']
white_proportions
# ### Plot proportions on a bar chart
# Set the x coordinate location for each rating group and and width of each bar.
ind = np.arange(len(red_proportions)) # the x locations for the groups
width = 0.35 # the width of the bars
# Now let’s create the plot.
# +
# plot bars
red_bars = plt.bar(ind, red_proportions, width, color='r', alpha=.7, label='Red Wine')
white_bars = plt.bar(ind + width, white_proportions, width, color='w', alpha=.7, label='White Wine')
# title and labels
plt.ylabel('Proportion')
plt.xlabel('Quality')
plt.title('Proportion by Wine Color and Quality')
locations = ind + width / 2 # xtick locations
labels = ['3', '4', '5', '6', '7', '8', '9'] # xtick labels
plt.xticks(locations, labels)
# legend
plt.legend()
# -
# Oh, that didn't work because we're missing a red wine value for a the 9 rating. Even though this number is a 0, we need it for our plot. Run the last two cells after running the cell below.
red_proportions['9'] = 0
red_proportions
| plotting_type_quality.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Random Forest Model on KKBox's Music Recommendation System
# In this notebook, we will fit a Random Forest model to the data, calculate the accuracies, run a grid search to extract the best model and build evaluation metrics to validate how well our model fits the data.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from IPython.display import display
from sklearn import metrics
import re
import random
from sklearn import preprocessing
import gc
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
import warnings
warnings.filterwarnings('ignore')
from summary_fn import *
# # 1. Loading data
# Import data
data = pd.read_csv('features_train_data.csv')
# Display the transpose of the rows to visualize all columns
def display_all(df):
with pd.option_context("display.max_rows", 1000,
"display.max_columns", 1000):
display(df)
display_all(data.head().T)
# # 2. Data Processing for RandomForest
# #### 2.1 Data Normalization
# We will convert few columns to percentage of total songs per user to normalize the data and improve the feature selection.
# +
# Normalize data into percentage for following columns
hnorm_cols = ['msno_genre_count', 'source_system_tab_user_lev_c',
'source_screen_name_user_lev_c',
'source_type_user_lev_c','composer_user_lev_c',
'lyricist_user_lev_c', 'artist_name_user_lev_c' ]
for i in hnorm_cols:
data[i] = data[i]/data['msno_appear_count']
# -
# Dropping less relevant columns
data.drop(columns=['lyricist_count_y','composer_count_x','gener_count'], inplace= True)
# #### 2.2 Data Imputaion
# Since we have a lot of data missing for few columns and RandomForest is not succeptible to NA values, we will replace continuous columns with -1 and categorical varaibles with 'NA' to treat them as features and levels.
# Imputing continuous columns
na_continuous_col = ['msno_genre_count', 'source_system_tab_user_lev_c',
'source_screen_name_user_lev_c',
'source_type_user_lev_c', 'artist_name_user_lev_c',
'composer_user_lev_c',
'lyricist_user_lev_c']
for col in na_continuous_col:
data[col] = data[col].fillna(-1)
# Imputing categorical columns
data = data.fillna('Not')
# #### 2.3 Label encoding
# +
# Label encoding for the categorical varaibles
from sklearn import preprocessing
def encoder(x_train):
le = preprocessing.LabelEncoder()
for column_name in x_train.columns:
if x_train[column_name].dtype.name in ['category','object']:
x_train[column_name] = le.fit_transform(
x_train[column_name].astype(str))
# Encode the data
encoder(data)
# -
# # 3. Model Fitting
# #### 3.1 Train Test Split
# +
# Features and target varaibles
X = data.drop(columns='target',axis=1)
y = data['target']
# Train and test split
X_train_all, X_test, y_train_all, y_test = train_test_split(X, y,
test_size=0.2, random_state=1)
# Train and validation split
X_train, X_val, y_train, y_val = train_test_split(X_train_all, y_train_all,
test_size=0.2, random_state=1)
# -
del data
del X_train_all
del y_train_all
gc.collect()
# #### 3.2 Taking a small sample of data for our first fit
# Sampled train data
random.seed(130)
idx = random.sample(range(0,X_train.shape[0]), 100000)
X_train_sampled = X_train.iloc[idx]
y_train_sampled = y_train.iloc[idx]
# Model fitting
rf = RandomForestClassifier(n_estimators=100, oob_score=False,
random_state=1,n_jobs=-1)
rf.fit(X_train_sampled, y_train_sampled)
predicted = rf.predict(X_val)
accuracy = accuracy_score(y_val, predicted)
print(f'Mean accuracy score on validation: {accuracy:.3}')
from sklearn.metrics import accuracy_score
predicted = rf.predict(X_test)
accuracy = accuracy_score(y_test, predicted)
print(f'Mean accuracy score on test: {accuracy:.3}')
# #### 3.3 Feature selection
# +
def rf_feat_importance(m, df):
return pd.DataFrame({'cols':df.columns,
'imp':m.feature_importances_}
).sort_values('imp', ascending=False)
fi = rf_feat_importance(rf, X_train_sampled ); fi[:10]
# -
def plot_fi(fi):return fi.plot('cols', 'imp', 'barh',
figsize=(12,7), legend=False)
plot_fi(fi[:30]);
to_keep = fi[fi.imp>0.015].cols; len(to_keep)
X_train_sampled = X_train_sampled[to_keep]
X_val = X_val[to_keep]
X_test = X_test[to_keep]
X_train = X_train[to_keep]
# #### 3.4 Grid Search
# +
# Pipeline and grid search
pipe_dt = Pipeline([('clf', RandomForestClassifier(n_jobs=-1))])
grid_params = dict(clf__n_estimators = [50,100,150,200],
clf__max_features = ['sqrt', 'log2'],
clf__min_samples_split = [2,10,20])
gs = GridSearchCV(estimator=pipe_dt,
param_grid=grid_params,
scoring='accuracy',
cv=5)
gs.fit(X_train_sampled, y_train_sampled)
f"{gs.score(X_test, y_test):.4f}"
# +
# Best algorithm with best hyperparameters
# need to fit it to find specific model parameters
print(gs.best_estimator_)
# Best model with specific model parameters
gs.best_estimator_.get_params()['clf']
# -
gs.best_estimator_.get_params()['clf']
m = RandomForestClassifier(n_estimators=100, n_jobs=-1,oob_score=False,
max_features='sqrt', min_samples_split= 2)
m.fit(X_train, y_train)
predicted = m.predict(X_val)
accuracy = accuracy_score(y_val, predicted)
print(f'Mean accuracy score validation: {accuracy:.3}')
# Test accuracy
predicted = m.predict(X_test)
accuracy = accuracy_score(y_test, predicted)
print(f'Mean accuracy score test: {accuracy:.3}')
# # 4. Evaluation Metric
# #### 4.1 Prediction Score
from sklearn.metrics import precision_recall_fscore_support as score
precision, recall, fscore, support = score(y_test, predicted)
print('precision: {}'.format(precision))
print('recall: {}'.format(recall))
print('fscore: {}'.format(fscore))
print('support: {}'.format(support))
# #### 4.2 Confusion Matrix
# +
import itertools
from sklearn.metrics import confusion_matrix
class_names = ['0','1']
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(2)
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]),
range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, predicted)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
# -
pd.DataFrame(predicted, columns=['rf']
).to_csv('rf_test_score.csv',index=False)
# #### 4.3 ROC Curve
# Predict probability for ROC curve
predicted = m.predict_proba(X_test)
# +
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
import random
false_positive_rate, true_positive_rate, thresholds = roc_curve(
y_test, [i[1] for i in predicted])
roc_auc = auc(false_positive_rate, true_positive_rate)
plt.title('Receiver Operating Characteristic')
plt.plot(false_positive_rate, true_positive_rate,
'b',label='AUC = %0.2f'% roc_auc)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([0,1])
plt.ylim([0,1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
| Models/Random_Forest_featured_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:mindspore] *
# language: python
# name: conda-env-mindspore-py
# ---
# + [markdown] slideshow={"slide_type": "-"}
# # 注意力汇聚:Nadaraya-Watson 核回归
#
#
# -
import sys
sys.path.append('..')
# + origin_pos=2 tab=["pytorch"]
import mindspore
import numpy as np
import mindspore.nn as nn
import mindspore.ops as ops
import mindspore.numpy as mnp
from d2l import mindspore as d2l
# + [markdown] slideshow={"slide_type": "slide"}
# 生成数据集
# + origin_pos=9 tab=["pytorch"]
n_train = 50
# x_train, _ = mindspore.sort(mindspore.rand(n_train) * 5)
x_train = np.sort(np.random.rand(n_train) * 5)
x_train = mindspore.Tensor(x_train, mindspore.float32)
def f(x):
return 2 * mnp.sin(x) + x**0.8
y_train = f(x_train) + mindspore.Tensor(np.random.normal(0.0, 0.5, (n_train,)), mindspore.float32)
x_test = mnp.arange(0, 5, 0.1)
y_truth = f(x_test)
n_test = len(x_test)
n_test
# + origin_pos=15 tab=["pytorch"]
def plot_kernel_reg(y_hat):
d2l.plot(x_test.asnumpy(), [y_truth.asnumpy(), y_hat.asnumpy()], 'x', 'y', legend=['Truth', 'Pred'],
xlim=[0, 5], ylim=[-1, 5])
d2l.plt.plot(x_train.asnumpy(), y_train.asnumpy(), 'o', alpha=0.5);
y_hat = mnp.repeat(y_train.mean(), n_test)
plot_kernel_reg(y_hat)
# + [markdown] slideshow={"slide_type": "slide"}
# 非参数注意力汇聚
# + origin_pos=19 tab=["pytorch"]
X_repeat = x_test.repeat(n_train).reshape((-1, n_train))
attention_weights = nn.Softmax(axis=1)(-(X_repeat - x_train)**2 / 2)
y_hat = mnp.matmul(attention_weights, y_train)
plot_kernel_reg(y_hat)
# + [markdown] slideshow={"slide_type": "slide"}
# 注意力权重
# + origin_pos=23 tab=["pytorch"]
attention_weights = mnp.expand_dims(mnp.expand_dims(attention_weights, 0), 0)
d2l.show_heatmaps(attention_weights,
xlabel='Sorted training inputs',
ylabel='Sorted testing inputs')
# + [markdown] slideshow={"slide_type": "slide"}
# 带参数注意力汇聚
# 假定两个张量的形状分别是$(n,a,b)$和$(n,b,c)$,
# 它们的批量矩阵乘法输出的形状为$(n,a,c)$
# + origin_pos=27 tab=["pytorch"]
X = mnp.ones((2, 1, 4))
Y = mnp.ones((2, 4, 6))
ops.BatchMatMul()(X, Y).shape
# + [markdown] slideshow={"slide_type": "slide"}
# 使用小批量矩阵乘法来计算小批量数据中的加权平均值
# + origin_pos=31 tab=["pytorch"]
weights = mnp.ones((2, 10)) * 0.1
values = mnp.arange(20.0).reshape((2, 10))
ops.BatchMatMul()(mnp.expand_dims(weights, 1), mnp.expand_dims(values, -1))
# + [markdown] slideshow={"slide_type": "slide"}
# 带参数的注意力汇聚
# + origin_pos=35 tab=["pytorch"]
class NWKernelRegression(nn.Cell):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.w = mindspore.Parameter(mindspore.Tensor(np.random.rand(1,), mindspore.float32))
def construct(self, queries, keys, values):
queries = queries.repeat(keys.shape[1]).reshape((-1, keys.shape[1]))
attention_weights = ops.Softmax(axis=1)(
-((queries - keys) * self.w)**2 / 2)
output = ops.BatchMatMul()(mnp.expand_dims(attention_weights, 1), \
mnp.expand_dims(values, -1)).reshape(-1)
return output, attention_weights
# + [markdown] slideshow={"slide_type": "slide"}
# 将训练数据集变换为键和值
# + origin_pos=39 tab=["pytorch"]
X_tile = mnp.tile(x_train, (n_train, 1))
Y_tile = mnp.tile(y_train, (n_train, 1))
keys = X_tile[(1 - mnp.eye(n_train)).astype(mindspore.int32)].reshape((n_train, -1))
values = Y_tile[(1 - mnp.eye(n_train)).astype(mindspore.int32)].reshape((n_train, -1))
# -
class NetWithLoss(nn.Cell):
def __init__(self, network, loss):
super().__init__()
self.network = network
self.loss = loss
def construct(self, *inputs):
y_hat, _ = self.network(*inputs[:-1])
loss = self.loss(y_hat, inputs[-1])
loss = (loss / 2).sum()
return loss
# + [markdown] slideshow={"slide_type": "slide"}
# 训练带参数的注意力汇聚模型
# + origin_pos=43 tab=["pytorch"]
net = NWKernelRegression()
loss = nn.MSELoss(reduction='none')
optim = nn.SGD(net.trainable_params(), 0.5)
net_with_loss = NetWithLoss(net, loss)
trainer = d2l.Train(net_with_loss, optim)
animator = d2l.Animator(xlabel='epoch', ylabel='loss', xlim=[1, 5])
for epoch in range(5):
l = trainer(x_train, keys, values, y_train) / 2
print(f'epoch {epoch + 1}, loss {float(l.sum().asnumpy()):.6f}')
animator.add(epoch + 1, float(l.sum().asnumpy()))
# + [markdown] slideshow={"slide_type": "slide"}
# 预测结果绘制
# + origin_pos=47 tab=["pytorch"]
keys = mnp.tile(x_train, (n_test, 1))
values = mnp.tile(y_train, (n_test, 1))
y_hat, attention_weights = net(x_test, keys, values)
y_hat = mnp.expand_dims(y_hat, 1)
plot_kernel_reg(y_hat)
# + [markdown] slideshow={"slide_type": "slide"}
# 曲线在注意力权重较大的区域变得更不平滑
# + origin_pos=51 tab=["pytorch"]
attention_weights = mnp.expand_dims(mnp.expand_dims(attention_weights, 0), 0)
d2l.show_heatmaps(attention_weights,
xlabel='Sorted training inputs',
ylabel='Sorted testing inputs')
| chapter_10_attention_mechanisms/0_nadaraya-waston.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp proxy
# -
# %reload_ext autoreload
# %autoreload 2
# hide
# !nbdev_build_lib --fname 11_Proxy_Request.ipynb
# # TODO
# - [x] 2020-03-15 抽象成Class
# # 代理爬虫
# > 很多网站都有反爬虫机制,一个IP频繁访问一个网站,就会出现访问被拒绝的情况,所以换IP可以解决这个问题。(运用技术请克制,避免过度浪费服务器资源)
# ## 测试代理
# > 有专用的付费代理IP稳定可靠,也有免费的代理IP可能会随时失效
# +
# export
import requests,json,re,random,sys,time,os
from bs4 import BeautifulSoup,Tag,NavigableString
from crawler_from_scratch.utils import *
from concurrent.futures import ThreadPoolExecutor
import pandas as pd
# -
# 先从 https://www.freeip.top/ 随便拿个ip来测试
# hide
url = 'https://www.baidu.com/'
headers={'user-agent':'Mozilla/5.0'}
proxies = {'https': 'https://192.168.3.11:8080'}
res = requests.get(url,proxies=proxies,headers=headers,timeout=5)
res
# ## 制作代理池
# > 网上有专门整理的[代理池](https://github.com/jhao104/proxy_pool),但需要配置数据库,所以不在这里演示,而是自己写个爬虫,通过[这个网站的API](https://github.com/jiangxianli/ProxyIpLib)获取IP
# +
# export
class Proxy():
'一个代理器,用爬取的免费代理ip,来爬取网站'
def __init__(self):
self.db = {}
self.path = './data/11_Proxy.json'
self.debug = True
if os.path.exists(self.path):
with open(self.path, 'r') as f:
self.db = json.loads(f.read())
print('加载成功',len(self.db.keys()))
else:
self.update(self)
def update(self):
'重新爬取ip,初始化health值,赋值给`self.db`,保存在`self.path`'
data = []
next_page_url = 'https://www.freeip.top/api/proxy_ips?page=1'
while next_page_url:
if self.debug: print('start:',next_page_url)
res = requests.get(next_page_url)
if res.status_code == 200:
data_list = res.json()['data']['data']
data += data_list
next_page_url = res.json()['data']['next_page_url']
time.sleep(1)
# ip list 转 dict 增加健康值
self.db = {}
for d in data:
_id = d['unique_id']
self.db[_id] = d
self.db[_id]['health'] = 50
self.save()
self.validate('http://www.baidu.com/')
self.validate('https://www.baidu.com/')
def save(self):
with open(self.path, 'w') as f:
json.dump(self.db,f)
print('更新成功',len(self.db.keys()))
def validate(self,url,max_workers=50):
'批量测试ip有效性'
protocol = url.split(':')[0]
db_with_protocol = [self.db[k] for k in self.db if self.db[k]['protocol'] == protocol]
with ThreadPoolExecutor(max_workers=max_workers) as executor:
executor.map(lambda ip_obj : self._get(url,ip_obj),db_with_protocol)
def choose_healthy_ip(self,protocol):
'根据健康度,随机选择优质ip'
db_with_protocol = [self.db[k] for k in self.db if self.db[k]['protocol'] == protocol]
sorted_db = sorted(db_with_protocol,
key = lambda item : item['health'],
reverse=True)
return random.choice(sorted_db[:10])
def update_ip_health(self,res,obj):
'根据response,更新health'
if res.status_code == 200:
obj['health'] += 1
else:
obj['health'] = int(obj['health']/2)
if self.debug: print(obj['ip'],'健康值变为:',obj['health'])
def _get(self,url,ip_obj={}):
'如果不指定ip,则自动选择`self.db`中最优的ip,访问网页,并更新health值'
protocol = url.split(':')[0]
if not ip_obj: ip_obj = self.choose_healthy_ip(protocol)
ip = f"{ip_obj['protocol']}://{ip_obj['ip']}:{ip_obj['port']}"
try:
res = requests.get(url,
proxies={protocol: ip},
headers={'user-agent':'Mozilla/5.0'},
timeout=5)
except:
if self.debug: print(f'error: {ip}\n{sys.exc_info()}\n')
res = requests.Response()
self.update_ip_health(res,ip_obj)
return res
def get(self,url):
'如果一个网页访问失败,会更换ip重试10次'
try_times = 1
while try_times < 11:
if self.debug : print('\n',try_times,url)
res = self._get(url)
if res.status_code == 200:
print('访问成功:',url)
return res
else:
try_times += 1
print('访问失败:',url)
return res
# -
# hide
px = Proxy()
px.update()
# ## 自动切换代理
# > 优先选择健康值高的ip,一次请求,成功健康值+1,失败则减半
# hide
px = Proxy()
px._get('https://www.baidu.com/')
# ## 校验&更替代理
# > 用百度批量测试网站的有效性
# hide
px = Proxy()
px.validate('http://www.baidu.com/')
# ## 用代理爬取豆瓣页面
# > 昨天用豆瓣页面测试爬虫功能的时候,就发现了访问频率过高的被拒的问题,今天就用爬虫来抓取整个互联网类目下的图书信息
url_list = []
for i in range(0,1000,20):
url = f'https://book.douban.com/tag/%E4%BA%92%E8%81%94%E7%BD%91?start={i}&type=T'
url_list.append(url)
len(url_list)
def get_douban_data(url,px,data):
res = px.get(url)
if res.status_code == 200:
soup = BeautifulSoup(res.text)
main_content = soup.body.find('ul',class_='subject-list')
for c in get_children(main_content):
item_data = get_data(c)
a_nbg_url = item_data['a_nbg_url']
_id = re.search(r'/(\d+)/',a_nbg_url).group(1)
# 写入data
data[_id] = item_data
else:
print(res,res.text)
# +
# hide
px = Proxy()
data ={}
px.debug = True
for i in range(3):
px.validate('https://book.douban.com')
px.save()
# -
#hide
px.debug = False
with ThreadPoolExecutor(max_workers=10) as executor:
executor.map(lambda url : get_douban_data(url,px,data), url_list)
# hide
dataframe = pd.DataFrame.from_dict(data,orient='index')
dataframe.head()
# 得到了这样的规范的数据结构的数据,就可以进行很多数据分析的工作,比如评价数分布,评分分布,热门作者等等
# hide
dataframe.describe()
| 11_Proxy_Request.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
cas, descriptors, data = dravnieks.get_data()
drav = pd.DataFrame.from_dict(data).T.round(2)
# Turn CAS into CIDs
cas_list = list(data.keys())
results = odorants.get_cids(cas_list, kind='name', verbose=False)
drav = pd.Series(results, name='CID').to_frame().join(drav)
drav.head()
# Create a new file with CIDs and store here in `cids` dictionary
file_path = os.path.join(pyrfume.DATA, 'dravnieks', 'dravnieks.csv')
drav.to_csv(file_path)
| notebooks/dravnieks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ассимптотика алгоритмов и зачем она нужна
#
# Нужно как-то уметь сравнивать алгоритмы между собой, особенно хочется понимать какой из алгоритмов работает быстрее (вне зависимости от железа) И какой из них потребляет больеше памяти.
# Для этого придумали О-нотацию.
#
# Есть математическое определение: для двух функций f(x) и g(x) можно сказать что f является O(g) если существует такая константа C > 0, что при устремлении x к бесконечности (или какой-то точке $x_0$) выполняется неравенство $|f(x)| < C * |g(x)|$
#
# Если говорить человеческим языком, то если мы говорим что алгоритм работает за O(f(n)), где n - размер входных параметров алгоритма
# это значит что скорость работы алгоритма с увеличением n будет расти не быстрее чем f(n) на какую-то константу
#
# Вычислять ассимптотику можно просто отбрасывая все константы при вычислении количества операций, которое делате алгоритм, но некоторые случаи сложнее чем другие
#
# Вот тут можно почитать еще https://bit.ly/3khIrsy
# ### Пример алгоритма с ассимптотикой (по времени) O($n^2$)
def n_square_algo(array):
# Здесь n это len(array)
# Этот цикл работает за O(n^2) операций
for elem in array:
for elem2 in array:
print(elem * elem2)
# Не влияет на ассимптотику т.к. отбрасываем все константы (не зависящие от n штуки)
for i in range(100000000000000000):
print(i)
# Можно поверить что сортировка списка работает за O(n log n), что меньше чем O(n^2)
array.sort()
# # Задача 1 (В которой мы узнаем про трюк с префиксными суммами и что можно обменивать время на память)
#
# Вам дан список чисел (как отрицательных так и положительных), задача найти в нем помассив с максимальной суммой элементов и при том из всех таких - с наибольшей длиной
#
# Пример:
#
# array = [1, 2, -4, 5, 2, -1, 3, -10, 7, 1, -1, 2]
#
# ОТВЕТ: [1, 2, -4, <font color='green'>5, 2, -1, 3,</font> -10, 7, 1, -1, 1]
#
# ## Наивное решение с ассимптотикой O($n^3$) по времени и O($1$) по памяти:
def max_sum_subarray(array):
n = len(array)
ans = -1
max_sum = float("-inf")
for start in range(n):
for finish in range(start + 1, n):
current_sum = 0
for elem in array[start:finish + 1]:
current_sum += elem
if current_sum > max_sum:
max_sum, ans = current_sum, finish - start + 1
return ans, max_sum
import numpy as np
# %%timeit
max_sum_subarray(np.random.rand(100)-0.5)
# ## Чуть более близкое к оптимальному решение с ассимптотикой O($n^2$) по времени и O($n$) по памяти:
def max_sum_subarray(array):
# Трюк заключается в том чтобы сначала посчитать суммы всех префиксов массива и потом вычислять сумму любого подмассива за О(1)
n = len(array)
prefix_sum = [0]
for i in range(n):
prefix_sum.append(prefix_sum[-1] + array[i])
ans = -1
max_sum = -float("inf")
for start in range(n):
for finish in range(start + 1, n):
# Вот как мы вычисляем сумму на подмассиве используя префиксные суммы
current_sum = prefix_sum[finish + 1] - prefix_sum[start]
if current_sum > max_sum:
max_sum, ans = current_sum, finish - start + 1
return ans, max_sum
# %%timeit
max_sum_subarray(np.random.rand(1000)-0.5)
49.6/1.32
# в 38 раз быстрее
# # Задача 2 (Где мы узнаем про трюк с двумя указателями и что ассимптотику по времени можно сократить если итерироваться в каком-то специальном порядке):
# Вам дано 2 массива с числами, отсортированные по возрастанию, задача найти по элементу в каждом изз массивов ($x_1$ и $x_2$ соответственно) таких что $|x_1 - x_2|$ минимально
#
# Пример:
#
# <font color='green'>array1</font> = [-10, -3, 0, 5, 13, 58, 91, 200, 356, 1000, 25000]
#
# <font color='red'>array2</font> = [-9034, -574, -300, -29, 27, 100, 250, 340, 900, 60000]
#
# ОТВЕТ: <font color='green'>91</font> и <font color='red'>100</font>
#
# # Наивное решение с ассимптотикой O($n * m$) по времени и O(1) по памяти
# где n и m это размеры массивов
def min_difference_pair(arr1, arr2):
n, m = len(arr1) - 1, len(arr2) - 1
min_diff, ans = abs(arr1[0] - arr2[0]), (0, 0)
for i in range(n):
for j in range(m):
diff = abs(arr1[i] - arr2[j])
if diff < min_diff:
min_diff, ans = diff, (i, j)
return min_diff, ans
# # Оптимальное решение с ассимптотикой O($n + m$) по времени и O(1) по памяти
def min_difference_pair(arr1, arr2):
n, m = len(arr1) - 1, len(arr2) - 1
pointer1, pointer2 = 0, 0
min_diff, ans = abs(arr1[0] - arr2[0]), (0, 0)
while pointer1 + pointer2 != n + m:
if pointer2 == m or (pointer1 < n and arr1[pointer1] <= arr2[pointer2]):
pointer1 += 1
elif pointer1 == n or (pointer2 < m and arr1[pointer1] >= arr2[pointer2]):
pointer2 += 1
current_diff = abs(arr1[pointer1] - arr2[pointer2])
if current_diff < min_diff:
min_diff, ans = current_diff, (pointer1, pointer2)
return min_diff, ans
# # Домашнее Задание
#
# 1) Потренироваться в рекурсии, например, здесь: https://informatics.mccme.ru/mod/statements/view.php?id=2543#1 (задачи в менюшке справа, нужно зарегаться чтобы решать)
#
# 2) Задачи на метод двух указателей:
#
# #### Простые:
#
# https://leetcode.com/problems/longest-substring-without-repeating-characters/
# https://leetcode.com/problems/remove-duplicates-from-sorted-array/
# https://leetcode.com/problems/merge-sorted-array/
#
# #### Посложнее:
#
# https://leetcode.com/problems/long-pressed-name/
# https://leetcode.com/problems/trapping-rain-water/
#
# 3) Придумать решение первой задачи с ассимптотикой по времени O(n) (например используя метод двух указателей)
#
def max_sum_subarray(array):
"""выдает длину и сумму подстроки, которая дает максимальную сумму
на массиве np.random.rand(1000)-0.5) работает в 37 раз быстрее,
чем алогиртм с лекции """
n = len(array)
# all prefix sum compute
prefix_sum = [0]
for i in range(n):
prefix_sum.append(prefix_sum[-1] + array[i])
start, finish, start_last, finish_last = 0, 0, -1, -1
start_old = -1
finish_old = -1
ans = -1
max_sum = -float("inf")
while start != n - 1:
# критерий остановки - дошли до предпоследнего места указателем начала
if finish == n or ((finish == finish_old) and (start == start_old)):
# если дошли указателем конца или два раза смотрим то же место - двигать начало
start += 1
current_sum = prefix_sum[finish + 1] - prefix_sum[start]
if current_sum > max_sum:
max_sum, ans = current_sum, finish - start + 1
try:
# если доходим до конца, следующего движения уже нет, поэтому, следующая сумма даст IndexError,
# переставляю индикатор движения вперед по более большой сумме на False
sum_check = (prefix_sum[finish] - prefix_sum[start + 1]) < (prefix_sum[finish + 2] - prefix_sum[start])
except IndexError:
sum_check = False
if ((finish - start) < 1 or sum_check) and (finish + 2 < len(prefix_sum)):
finish += 1
current_sum = prefix_sum[finish + 1] - prefix_sum[start]
if current_sum > max_sum:
max_sum, ans = current_sum, finish - start + 1
start_old = start_last
start_last = start
finish_old = finish_last
finish_last = finish
return ans, max_sum
not(True)
# +
# %%timeit
X = [1, 2, -4, 5, 2, -1, 3, -10, 7, 1, -1, 2]
max_sum_subarray(X)
# -
# %%timeit
max_sum_subarray(np.random.rand(1000)-0.5)
# хотим быстрее 1 мс
X = np.random.rand(1000)-0.5
133/3.54
max_sum_subarray(X)
max_sum_subarray(X)
| 02 Algorithms/Day 1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Granero0011/AB-Demo/blob/master/Monte_Carlo_Simulation_Example.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="NZeb4VpGhe3s" colab_type="code" colab={}
import pandas as pd
import numpy as np
import seaborn as sns
sns.set_style('whitegrid')
# + id="ogLvGkFYl-OQ" colab_type="code" colab={}
avg = 1
std_dev=.1
num_reps= 500
num_simulations= 1000
# + id="I_37m1ium6BX" colab_type="code" colab={}
pct_to_target = np.random.normal(avg, std_dev, num_reps).round(2)
# + id="Vr1wagOvnEql" colab_type="code" colab={}
sales_target_values = [75_000, 100_000, 200_000, 300_000, 400_000, 500_000]
sales_target_prob = [.3, .3, .2, .1, .05, .05]
sales_target = np.random.choice(sales_target_values, num_reps, p=sales_target_prob)
# + id="81LDH9bpp_nJ" colab_type="code" colab={}
df = pd.DataFrame(index=range(num_reps), data={'Pct_To_Target': pct_to_target,
'Sales_Target': sales_target})
df['Sales'] = df['Pct_To_Target'] * df['Sales_Target']
# + id="3QIDM3qWqdcC" colab_type="code" colab={}
def calc_commission_rate(x):
""" Return the commission rate based on the table:
0-90% = 2%
91-99% = 3%
>= 100 = 4%
"""
if x <= .90:
return .02
if x <= .99:
return .03
else:
return .04
# + id="oS9ktOuaq0wB" colab_type="code" colab={}
df['Commission_Rate'] = df['Pct_To_Target'].apply(calc_commission_rate)
df['Commission_Amount'] = df['Commission_Rate'] * df['Sales']
# + id="O6aff9BLq3Vz" colab_type="code" colab={}
# Define a list to keep all the results from each simulation that we want to analyze
all_stats = []
# Loop through many simulations
for i in range(num_simulations):
# Choose random inputs for the sales targets and percent to target
sales_target = np.random.choice(sales_target_values, num_reps, p=sales_target_prob)
pct_to_target = np.random.normal(avg, std_dev, num_reps).round(2)
# Build the dataframe based on the inputs and number of reps
df = pd.DataFrame(index=range(num_reps), data={'Pct_To_Target': pct_to_target,
'Sales_Target': sales_target})
# Back into the sales number using the percent to target rate
df['Sales'] = df['Pct_To_Target'] * df['Sales_Target']
# Determine the commissions rate and calculate it
df['Commission_Rate'] = df['Pct_To_Target'].apply(calc_commission_rate)
df['Commission_Amount'] = df['Commission_Rate'] * df['Sales']
# We want to track sales,commission amounts and sales targets over all the simulations
all_stats.append([df['Sales'].sum().round(0),
df['Commission_Amount'].sum().round(0),
df['Sales_Target'].sum().round(0)])
# + id="eQ0nzbr_r_mP" colab_type="code" colab={}
results_df = pd.DataFrame.from_records(all_stats, columns=['Sales',
'Commission_Amount',
'Sales_Target'])
# + id="CbHT2d9DshFj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 208} outputId="c743b885-ed8e-453c-b41b-948658aceb8b"
results_df.describe().style.format('{:,}')
| Monte_Carlo_Simulation_Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# 
# + [markdown] tags=[]
# # Intro to Python
#
# ### Access this notebook on [GITHUB](https://github.com/chisomloius/iLearnPy/) or [COLAB](https://colab.com/ilearnPy/)
# -
# 
#
# # Table of Contents
#
# ### Click on the links to go directly to specific sections on the notebook.
#
#
# 1. [Import Dependencies](#dependencies)
# <br>
# 2. [Python Syntax](#python-syntax)
# <br>
# 3. [Variable ](#variable)
# <br>
# 4. [Numbers](#numbers)
# <br>
# 5. [Type Casting](#type-casting)
# <br>
# 6. [Assignment Link](#assignment-link)
# <br>
# 7. [After Thoughts](#after-thoughts)
# <br>
# 8. [About Author](#about)
# <br>
# 9. [More Info](#more-info)
# <br>
# <p>Estimated time needed: <strong>25 min</strong></p>
#
# ----
#
# ### Python - Let's get you writing some Python Code now!</h1>
# <p><strong>Welcome!</strong> This notebook will teach you the basics of the Python programming language. Although the information presented here is quite basic, it is an important foundation that will help you read and write Python code. By the end of this notebook, you'll know the basics of Python, including how to write basic commands, understand some basic types, and how to perform simple operations on them.</p>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <a id='dependencies'></a>
#
# ### Import Dependencies
# </div>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <a id='python-syntax'></a>
#
# ### Python Syntax
# </div>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <a id='variables'></a>
#
# ### Variables
# </div>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <a id='strings'></a>
#
# ### Strings
# </div>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <a id='numbers'></a>
#
# ### Numbers
# </div>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <a id='type-casting'></a>
#
# ### Type Casting
# </div>
#
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <a id='assignment-link'></a>
#
# ### Assignment Link
#
# </div>
# Now we would try out some practical examples with what we have learnt so far ! Let us try out this [notebook](https://typei.com)
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <a id='after-thoughts'></a>
#
# ### After Thoughts ??
# </div>
#
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <a id='about'></a>
#
# ### About this Instructor:
# </div>
# <p><a href="https://github.com/chisomloius/" target= "_blank"> ChisomLoius</a> is very passionate about Data Analysis and Machine Learning and does lot of free lance teaching and learning. Holding a B.Eng. in Petroleum Engineering, my focused is leveraging the knowledge of Data Science and Machine Learning to help build solutions in Education and High Tech Security. I currently work as a Petrochemist.
# </p>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <a id='more-info'></a>
#
# ### More Info
# </div>
#
# <p> Visit our <a href="https://techorigin.alisutechnology.com" target= "_blank">website</a>, or further enquire more information via our <a href="<EMAIL>" target= "_blank">email</a>.
# <hr>
# <p>Copyright © 2021 TechOrigin. This notebook and its source code are released under the terms of the <a href="https://cognitiveclass.ai/mit-license/">MIT License</a>.</p>
| iLearn/py/practice/1-basic/notes/.ipynb_checkpoints/3.Operators_and_Conditionals-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KernelDensity # display as density curves
import time
import torch
from geomloss import SamplesLoss
use_cuda = torch.cuda.is_available()
dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
# +
t_plot = np.linspace(-0.1, 1.1, 1000)[:, np.newaxis]
def display_samples(ax, x, color):
"""Displays samples on the unit interval using a density curve."""
kde = KernelDensity(kernel="gaussian", bandwidth=0.005).fit(x.data.cpu().numpy())
dens = np.exp(kde.score_samples(t_plot))
dens[0] = 0
dens[-1] = 0
ax.fill(t_plot, dens, color=color)
# +
N, M = (50, 50) if not use_cuda else (10000, 10000)
t_i = torch.linspace(0, 1, N).type(dtype).view(-1, 1)
t_j = torch.linspace(0, 1, M).type(dtype).view(-1, 1)
X_i, Y_j = 0.2 * t_i, 0.4 * t_j + 0.6
# -
def gradient_flow(loss, lr=0.01):
"""Flows along the gradient of the cost function, using a simple Euler scheme.
Parameters:
loss ((x_i,y_j) -> torch float number):
Real-valued loss function.
lr (float, default = .025):
Learning rate, i.e. time step.
"""
# Parameters for the gradient descent
Nsteps = int(5 / lr) + 1
display_its = [int(t / lr) for t in [0, 0.25, 0.50, 1.0, 2.0, 5.0]]
# Make sure that we won't modify the reference samples
x_i, y_j = X_i.clone(), Y_j.clone()
# We're going to perform gradient descent on Loss(α, β)
# wrt. the positions x_i of the diracs masses that make up α:
x_i.requires_grad = True
t_0 = time.time()
plt.figure(figsize=(12, 8))
k = 1
for i in range(Nsteps): # Euler scheme ===============
# Compute cost and gradient
L_αβ = loss(x_i, y_j)
[g] = torch.autograd.grad(L_αβ, [x_i])
if i in display_its: # display
ax = plt.subplot(2, 3, k)
k = k + 1
display_samples(ax, y_j, (0.55, 0.55, 0.95))
display_samples(ax, x_i, (0.95, 0.55, 0.55))
ax.set_title("t = {:1.2f}".format(lr * i))
plt.axis([-0.1, 1.1, -0.1, 5.5])
plt.xticks([], [])
plt.yticks([], [])
plt.tight_layout()
# in-place modification of the tensor's values
x_i.data -= lr * len(x_i) * g
plt.title(
"t = {:1.2f}, elapsed time: {:.2f}s/it".format(
lr * i, (time.time() - t_0) / Nsteps
)
)
gradient_flow(SamplesLoss("gaussian", blur=0.5))
gradient_flow(SamplesLoss("gaussian", blur=0.1))
# +
t_plot = np.linspace(-0.5, 1.5, 1000)[:, np.newaxis]
def display_samples(ax, x, color, label=None):
"""Displays samples on the unit interval using a density curve."""
kde = KernelDensity(kernel="gaussian", bandwidth=0.005).fit(x.data.cpu().numpy())
dens = np.exp(kde.score_samples(t_plot))
dens[0] = 0
dens[-1] = 0
ax.fill(t_plot, dens, color=color, label=label)
# +
def rweight():
"""Random weight."""
return torch.rand(1).type(dtype)
N = 100 if not use_cuda else 10 ** 3 # Number of samples per measure
C = 100 if not use_cuda else 10000 # number of copies for the Gaussian blur
for _ in range(5): # Repeat the experiment 5 times
K = 5 # Generate random 1D measures as the superposition of K=5 intervals
t = torch.linspace(0, 1, N // K).type(dtype).view(-1, 1)
X_i = torch.cat([rweight() ** 2 * t + rweight() - 0.5 for k in range(K)], dim=0)
Y_j = torch.cat([rweight() ** 2 * t + rweight() - 0.5 for k in range(K)], dim=0)
# Compute the limits when blur = 0...
x_, _ = X_i.sort(dim=0)
y_, _ = Y_j.sort(dim=0)
true_wass = (0.5 / len(X_i)) * ((x_ - y_) ** 2).sum()
# and when blur = +infinity:
mean_diff = 0.5 * ((X_i.mean(0) - Y_j.mean(0)) ** 2).sum()
blurs = [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0]
sink, bwass = [], []
for blur in blurs:
# Compute the Sinkhorn divergence:
# N.B.: To be super-precise, we use the well-tested "online" backend
# with a very large 'scaling' coefficient
loss = SamplesLoss("sinkhorn", p=2, blur=blur, scaling=0.99, backend="online")
sink.append(loss(X_i, Y_j).item())
# Compute the blurred Wasserstein distance:
x_i = torch.cat([X_i] * C, dim=0)
y_j = torch.cat([Y_j] * C, dim=0)
x_i = x_i + 0.5 * blur * torch.randn(x_i.shape).type(dtype)
y_j = y_j + 0.5 * blur * torch.randn(y_j.shape).type(dtype)
x_, _ = x_i.sort(dim=0)
y_, _ = y_j.sort(dim=0)
wass = (0.5 / len(x_i)) * ((x_ - y_) ** 2).sum()
bwass.append(wass.item())
# Fancy display:
plt.figure(figsize=(12, 5))
if N < 10 ** 5:
ax = plt.subplot(1, 2, 1)
display_samples(ax, X_i, (1.0, 0, 0, 0.5), label="$\\alpha$")
display_samples(ax, Y_j, (0, 0, 1.0, 0.5), label="$\\beta$")
plt.axis([-0.5, 1.5, -0.1, 5.5])
plt.ylabel("density")
ax.legend()
plt.tight_layout()
ax = plt.subplot(1, 2, 2)
plt.plot([0.01, 10], [true_wass, true_wass], "g", label="True Wasserstein")
plt.plot(blurs, sink, "r-o", label="Sinkhorn divergence")
plt.plot(blurs, bwass, "b-o", label="Blurred Wasserstein")
plt.plot(
[0.01, 10], [mean_diff, mean_diff], "m", label="Squared difference of means"
)
ax.set_xscale("log")
ax.legend()
plt.axis([0.01, 10.0, 0.0, 1.5 * bwass[0]])
plt.xlabel("blur $\\sqrt{\\varepsilon}$")
plt.tight_layout()
plt.show()
# -
| notebooks/OT_distll.ipynb |